index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/qianmo/hua-zally-rules/1.0.1/ai/qianmo/zally
|
java-sources/ai/qianmo/hua-zally-rules/1.0.1/ai/qianmo/zally/rules/CheckTagsRule.java
|
package ai.qianmo.zally.rules;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 - 2022 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import io.swagger.v3.oas.models.Operation;
import io.swagger.v3.oas.models.PathItem;
import io.swagger.v3.oas.models.Paths;
import java.util.ArrayList;
import java.util.List;
import org.zalando.zally.rule.api.Check;
import org.zalando.zally.rule.api.Context;
import org.zalando.zally.rule.api.Rule;
import org.zalando.zally.rule.api.Severity;
import org.zalando.zally.rule.api.Violation;
@Rule(
ruleSet = HuaJiRuleSet.class,
id = "HUA-001",
severity = Severity.MUST,
title = "tags field is required"
)
public class CheckTagsRule {
@Check(severity = Severity.MUST)
public List<Violation> checkTags(Context context) {
final Paths paths = context.getApi().getPaths();
List<Violation> list = new ArrayList<>();
for (final String s : paths.keySet()) {
final PathItem pathItem = paths.get(s);
if (!checkOperationTags(pathItem.getGet()) || !checkOperationTags(pathItem.getDelete()) || !checkOperationTags(
pathItem.getPost()) || !checkOperationTags(pathItem.getPut())) {
final Violation violation = context.violation("tags field is required " + s, pathItem);
list.add(violation);
}
}
return list;
}
private boolean checkOperationTags(Operation operation) {
if (operation == null) {
return true;
}
return operation.getTags() != null && !operation.getTags().isEmpty();
}
}
|
0
|
java-sources/ai/qianmo/hua-zally-rules/1.0.1/ai/qianmo/zally
|
java-sources/ai/qianmo/hua-zally-rules/1.0.1/ai/qianmo/zally/rules/HuaJiRuleSet.java
|
package ai.qianmo.zally.rules;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.net.URI;
import org.jetbrains.annotations.NotNull;
import org.zalando.zally.rule.api.Rule;
import org.zalando.zally.rule.api.RuleSet;
public class HuaJiRuleSet implements RuleSet
{
@NotNull
@Override
public String getId()
{
return "huaji";
}
@NotNull
@Override
public URI getUrl()
{
return URI.create("https://github.com/huaji_app/hua-zally-rules");
}
@NotNull
@Override
public URI url(@NotNull final Rule rule)
{
return URI.create(getUrl() + "#" + rule.id());
}
}
|
0
|
java-sources/ai/qianmo/hua-zally-rules/1.0.1/ai/qianmo/zally
|
java-sources/ai/qianmo/hua-zally-rules/1.0.1/ai/qianmo/zally/rules/OpenApiParser.java
|
package ai.qianmo.zally.rules;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import io.swagger.parser.util.ParseOptions;
import io.swagger.v3.oas.models.OpenAPI;
import io.swagger.v3.parser.OpenAPIV3Parser;
import io.swagger.v3.parser.util.ResolverFully;
public class OpenApiParser
{
public OpenAPI parse(String url)
{
final ParseOptions parseOptions = new ParseOptions();
parseOptions.setResolve(true);
final OpenAPI parseResult = new OpenAPIV3Parser().read(url);
new ResolverFully(true).resolveFully(parseResult);
return parseResult;
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/ApiReporter.java
|
package com.ethlo.zally;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.nio.charset.StandardCharsets;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.collect.Iterators;
import io.swagger.models.Method;
import io.swagger.v3.oas.models.OpenAPI;
import io.swagger.v3.oas.models.Operation;
import io.swagger.v3.oas.models.PathItem;
import io.swagger.v3.oas.models.Paths;
public class ApiReporter
{
private final OpenAPI openAPI;
public ApiReporter(final OpenAPI openAPI)
{
this.openAPI = openAPI;
}
private Map<Method, Operation> getOperations(PathItem value)
{
final Map<Method, Operation> operations = new LinkedHashMap<>();
operations.put(Method.GET, value.getGet());
operations.put(Method.PUT, value.getPut());
operations.put(Method.DELETE, value.getDelete());
operations.put(Method.HEAD, value.getHead());
operations.put(Method.OPTIONS, value.getOptions());
operations.put(Method.POST, value.getPost());
operations.put(Method.PATCH, value.getPatch());
operations.values().removeIf(Objects::isNull);
return operations;
}
private Optional<Map.Entry<Method, Operation>> getOperationByMethod(Method method, Operation operation)
{
if (operation != null)
{
return Optional.of(new AbstractMap.SimpleEntry<>(method, operation));
}
return Optional.empty();
}
public String render()
{
final Paths paths = this.openAPI.getPaths();
final Node root = new Node("");
for (final Map.Entry<String, PathItem> pathEntry : paths.entrySet())
{
final Map<Method, Operation> methodOperations = getOperations(pathEntry.getValue());
for (final Map.Entry<Method, Operation> methodOperationEntry : methodOperations.entrySet())
{
root.addPath(pathEntry.getKey(), methodOperationEntry.getKey(), methodOperationEntry.getValue().getOperationId());
}
}
final ByteArrayOutputStream bout = new ByteArrayOutputStream();
printTree(root, bout);
return bout.toString(StandardCharsets.UTF_8);
}
private void print(String prefix, Node node, boolean isTail, boolean isRoot, PrintWriter out)
{
final String strConnector = isRoot ? "" : isTail ? "└── " : "├── ";
final String strNode = node.item;
out.println(prefix + strConnector + strNode);
final Set<Node> children = node.getChildren();
if (!children.isEmpty())
{
children.stream().limit(children.size() - 1)
.forEach(child ->
print(prefix + (isTail ? " " : "│ "), child, false, false, out));
print(prefix + (isTail ? " " : "│ "), Iterators.getLast(children.iterator()), true, false, out);
}
out.flush();
}
public void printTree(Node ref, OutputStream out)
{
final PrintWriter pw = new PrintWriter(out);
final Set<Node> roots = ref.getChildren();
if (roots == null)
{
pw.println(ref + " does not exist");
pw.flush();
return;
}
else if (roots.isEmpty())
{
//pw.println(ref + " has no children");
pw.flush();
return;
}
print("", ref, true, true, pw);
}
static class Node
{
private final String item;
private final Set<Node> children = new LinkedHashSet<>();
public Node(final String item)
{
this.item = Objects.requireNonNull(item, "item cannot be null");
}
public String getItem()
{
return item;
}
public Set<Node> getChildren()
{
return children;
}
public void addPath(final String pathUri, final Method method, final String operationId)
{
final AtomicReference<Node> nodeRef = new AtomicReference<>(this);
Arrays.stream(pathUri.split("/"))
.filter(p -> !p.isEmpty())
.forEach(pathPart ->
{
final Node newNode = nodeRef.get().getOrCreateNode(pathPart);
nodeRef.get().children.add(newNode);
nodeRef.set(newNode);
});
nodeRef.get().children.add(new Node(method.name() + " - " + operationId));
}
private Node getOrCreateNode(final String item)
{
for (Node child : children)
{
if (child.getItem().equals(item))
{
return child;
}
}
return new Node(item);
}
@Override
public boolean equals(final Object o)
{
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Node node = (Node) o;
return Objects.equals(item, node.item);
}
@Override
public int hashCode()
{
return Objects.hash(item);
}
@Override
public String toString()
{
return "Node{" +
"item='" + item + '\'' +
", children=" + children.size() +
'}';
}
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/HelpMojo.java
|
package com.ethlo.zally;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
/**
* Display help information on zally-maven-plugin.<br>
* Call <code>mvn zally:help -Ddetail=true -Dgoal=<goal-name></code> to display parameter details.
* @author maven-plugin-tools
*/
@Mojo( name = "help", requiresProject = false, threadSafe = true )
public class HelpMojo
extends AbstractMojo
{
/**
* If <code>true</code>, display all settable properties for each goal.
*
*/
@Parameter( property = "detail", defaultValue = "false" )
private boolean detail;
/**
* The name of the goal for which to show help. If unspecified, all goals will be displayed.
*
*/
@Parameter( property = "goal" )
private java.lang.String goal;
/**
* The maximum length of a display line, should be positive.
*
*/
@Parameter( property = "lineLength", defaultValue = "80" )
private int lineLength;
/**
* The number of spaces per indentation level, should be positive.
*
*/
@Parameter( property = "indentSize", defaultValue = "2" )
private int indentSize;
// groupId/artifactId/plugin-help.xml
private static final String PLUGIN_HELP_PATH =
"/META-INF/maven/ai.qianmo/zally-maven-plugin/plugin-help.xml";
private static final int DEFAULT_LINE_LENGTH = 80;
private Document build()
throws MojoExecutionException
{
getLog().debug( "load plugin-help.xml: " + PLUGIN_HELP_PATH );
InputStream is = null;
try
{
is = getClass().getResourceAsStream( PLUGIN_HELP_PATH );
DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
return dBuilder.parse( is );
}
catch ( IOException e )
{
throw new MojoExecutionException( e.getMessage(), e );
}
catch ( ParserConfigurationException e )
{
throw new MojoExecutionException( e.getMessage(), e );
}
catch ( SAXException e )
{
throw new MojoExecutionException( e.getMessage(), e );
}
finally
{
if ( is != null )
{
try
{
is.close();
}
catch ( IOException e )
{
throw new MojoExecutionException( e.getMessage(), e );
}
}
}
}
/**
* {@inheritDoc}
*/
public void execute()
throws MojoExecutionException
{
if ( lineLength <= 0 )
{
getLog().warn( "The parameter 'lineLength' should be positive, using '80' as default." );
lineLength = DEFAULT_LINE_LENGTH;
}
if ( indentSize <= 0 )
{
getLog().warn( "The parameter 'indentSize' should be positive, using '2' as default." );
indentSize = 2;
}
Document doc = build();
StringBuilder sb = new StringBuilder();
Node plugin = getSingleChild( doc, "plugin" );
String name = getValue( plugin, "name" );
String version = getValue( plugin, "version" );
String id = getValue( plugin, "groupId" ) + ":" + getValue( plugin, "artifactId" ) + ":" + version;
if ( isNotEmpty( name ) && !name.contains( id ) )
{
append( sb, name + " " + version, 0 );
}
else
{
if ( isNotEmpty( name ) )
{
append( sb, name, 0 );
}
else
{
append( sb, id, 0 );
}
}
append( sb, getValue( plugin, "description" ), 1 );
append( sb, "", 0 );
//<goalPrefix>plugin</goalPrefix>
String goalPrefix = getValue( plugin, "goalPrefix" );
Node mojos1 = getSingleChild( plugin, "mojos" );
List<Node> mojos = findNamedChild( mojos1, "mojo" );
if ( goal == null || goal.length() <= 0 )
{
append( sb, "This plugin has " + mojos.size() + ( mojos.size() > 1 ? " goals:" : " goal:" ), 0 );
append( sb, "", 0 );
}
for ( Node mojo : mojos )
{
writeGoal( sb, goalPrefix, (Element) mojo );
}
if ( getLog().isInfoEnabled() )
{
getLog().info( sb.toString() );
}
}
private static boolean isNotEmpty( String string )
{
return string != null && string.length() > 0;
}
private String getValue( Node node, String elementName )
throws MojoExecutionException
{
return getSingleChild( node, elementName ).getTextContent();
}
private Node getSingleChild( Node node, String elementName )
throws MojoExecutionException
{
List<Node> namedChild = findNamedChild( node, elementName );
if ( namedChild.isEmpty() )
{
throw new MojoExecutionException( "Could not find " + elementName + " in plugin-help.xml" );
}
if ( namedChild.size() > 1 )
{
throw new MojoExecutionException( "Multiple " + elementName + " in plugin-help.xml" );
}
return namedChild.get( 0 );
}
private List<Node> findNamedChild( Node node, String elementName )
{
List<Node> result = new ArrayList<Node>();
NodeList childNodes = node.getChildNodes();
for ( int i = 0; i < childNodes.getLength(); i++ )
{
Node item = childNodes.item( i );
if ( elementName.equals( item.getNodeName() ) )
{
result.add( item );
}
}
return result;
}
private Node findSingleChild( Node node, String elementName )
throws MojoExecutionException
{
List<Node> elementsByTagName = findNamedChild( node, elementName );
if ( elementsByTagName.isEmpty() )
{
return null;
}
if ( elementsByTagName.size() > 1 )
{
throw new MojoExecutionException( "Multiple " + elementName + "in plugin-help.xml" );
}
return elementsByTagName.get( 0 );
}
private void writeGoal( StringBuilder sb, String goalPrefix, Element mojo )
throws MojoExecutionException
{
String mojoGoal = getValue( mojo, "goal" );
Node configurationElement = findSingleChild( mojo, "configuration" );
Node description = findSingleChild( mojo, "description" );
if ( goal == null || goal.length() <= 0 || mojoGoal.equals( goal ) )
{
append( sb, goalPrefix + ":" + mojoGoal, 0 );
Node deprecated = findSingleChild( mojo, "deprecated" );
if ( ( deprecated != null ) && isNotEmpty( deprecated.getTextContent() ) )
{
append( sb, "Deprecated. " + deprecated.getTextContent(), 1 );
if ( detail && description != null )
{
append( sb, "", 0 );
append( sb, description.getTextContent(), 1 );
}
}
else if ( description != null )
{
append( sb, description.getTextContent(), 1 );
}
append( sb, "", 0 );
if ( detail )
{
Node parametersNode = getSingleChild( mojo, "parameters" );
List<Node> parameters = findNamedChild( parametersNode, "parameter" );
append( sb, "Available parameters:", 1 );
append( sb, "", 0 );
for ( Node parameter : parameters )
{
writeParameter( sb, parameter, configurationElement );
}
}
}
}
private void writeParameter( StringBuilder sb, Node parameter, Node configurationElement )
throws MojoExecutionException
{
String parameterName = getValue( parameter, "name" );
String parameterDescription = getValue( parameter, "description" );
Element fieldConfigurationElement = null;
if ( configurationElement != null )
{
fieldConfigurationElement = (Element) findSingleChild( configurationElement, parameterName );
}
String parameterDefaultValue = "";
if ( fieldConfigurationElement != null && fieldConfigurationElement.hasAttribute( "default-value" ) )
{
parameterDefaultValue = " (Default: " + fieldConfigurationElement.getAttribute( "default-value" ) + ")";
}
append( sb, parameterName + parameterDefaultValue, 2 );
Node deprecated = findSingleChild( parameter, "deprecated" );
if ( ( deprecated != null ) && isNotEmpty( deprecated.getTextContent() ) )
{
append( sb, "Deprecated. " + deprecated.getTextContent(), 3 );
append( sb, "", 0 );
}
append( sb, parameterDescription, 3 );
if ( "true".equals( getValue( parameter, "required" ) ) )
{
append( sb, "Required: Yes", 3 );
}
if ( ( fieldConfigurationElement != null ) && isNotEmpty( fieldConfigurationElement.getTextContent() ) )
{
String property = getPropertyFromExpression( fieldConfigurationElement.getTextContent() );
append( sb, "User property: " + property, 3 );
}
append( sb, "", 0 );
}
/**
* <p>Repeat a String <code>n</code> times to form a new string.</p>
*
* @param str String to repeat
* @param repeat number of times to repeat str
* @return String with repeated String
* @throws NegativeArraySizeException if <code>repeat < 0</code>
* @throws NullPointerException if str is <code>null</code>
*/
private static String repeat( String str, int repeat )
{
StringBuilder buffer = new StringBuilder( repeat * str.length() );
for ( int i = 0; i < repeat; i++ )
{
buffer.append( str );
}
return buffer.toString();
}
/**
* Append a description to the buffer by respecting the indentSize and lineLength parameters.
* <b>Note</b>: The last character is always a new line.
*
* @param sb The buffer to append the description, not <code>null</code>.
* @param description The description, not <code>null</code>.
* @param indent The base indentation level of each line, must not be negative.
*/
private void append( StringBuilder sb, String description, int indent )
{
for ( String line : toLines( description, indent, indentSize, lineLength ) )
{
sb.append( line ).append( '\n' );
}
}
/**
* Splits the specified text into lines of convenient display length.
*
* @param text The text to split into lines, must not be <code>null</code>.
* @param indent The base indentation level of each line, must not be negative.
* @param indentSize The size of each indentation, must not be negative.
* @param lineLength The length of the line, must not be negative.
* @return The sequence of display lines, never <code>null</code>.
* @throws NegativeArraySizeException if <code>indent < 0</code>
*/
private static List<String> toLines( String text, int indent, int indentSize, int lineLength )
{
List<String> lines = new ArrayList<String>();
String ind = repeat( "\t", indent );
String[] plainLines = text.split( "(\r\n)|(\r)|(\n)" );
for ( String plainLine : plainLines )
{
toLines( lines, ind + plainLine, indentSize, lineLength );
}
return lines;
}
/**
* Adds the specified line to the output sequence, performing line wrapping if necessary.
*
* @param lines The sequence of display lines, must not be <code>null</code>.
* @param line The line to add, must not be <code>null</code>.
* @param indentSize The size of each indentation, must not be negative.
* @param lineLength The length of the line, must not be negative.
*/
private static void toLines( List<String> lines, String line, int indentSize, int lineLength )
{
int lineIndent = getIndentLevel( line );
StringBuilder buf = new StringBuilder( 256 );
String[] tokens = line.split( " +" );
for ( String token : tokens )
{
if ( buf.length() > 0 )
{
if ( buf.length() + token.length() >= lineLength )
{
lines.add( buf.toString() );
buf.setLength( 0 );
buf.append( repeat( " ", lineIndent * indentSize ) );
}
else
{
buf.append( ' ' );
}
}
for ( int j = 0; j < token.length(); j++ )
{
char c = token.charAt( j );
if ( c == '\t' )
{
buf.append( repeat( " ", indentSize - buf.length() % indentSize ) );
}
else if ( c == '\u00A0' )
{
buf.append( ' ' );
}
else
{
buf.append( c );
}
}
}
lines.add( buf.toString() );
}
/**
* Gets the indentation level of the specified line.
*
* @param line The line whose indentation level should be retrieved, must not be <code>null</code>.
* @return The indentation level of the line.
*/
private static int getIndentLevel( String line )
{
int level = 0;
for ( int i = 0; i < line.length() && line.charAt( i ) == '\t'; i++ )
{
level++;
}
for ( int i = level + 1; i <= level + 4 && i < line.length(); i++ )
{
if ( line.charAt( i ) == '\t' )
{
level++;
break;
}
}
return level;
}
private String getPropertyFromExpression( String expression )
{
if ( expression != null && expression.startsWith( "${" ) && expression.endsWith( "}" )
&& !expression.substring( 2 ).contains( "${" ) )
{
// expression="${xxx}" -> property="xxx"
return expression.substring( 2, expression.length() - 1 );
}
// no property can be extracted
return null;
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/OpenApiParser.java
|
package com.ethlo.zally;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import io.swagger.parser.util.ParseOptions;
import io.swagger.v3.oas.models.OpenAPI;
import io.swagger.v3.parser.OpenAPIV3Parser;
import io.swagger.v3.parser.util.ResolverFully;
public class OpenApiParser
{
public OpenAPI parse(String url)
{
final ParseOptions parseOptions = new ParseOptions();
parseOptions.setResolve(true);
final OpenAPI parseResult = new OpenAPIV3Parser().read(url);
new ResolverFully(true).resolveFully(parseResult);
return parseResult;
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/ReportingMojo.java
|
package com.ethlo.zally;/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Arrays;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.MojoFailureException;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.apache.maven.project.MavenProject;
@Mojo(threadSafe = true, name = "report", defaultPhase = LifecyclePhase.GENERATE_SOURCES)
public class ReportingMojo extends AbstractMojo
{
@Parameter(required = true, defaultValue = "${project.basedir}/src/main/resources/api.yaml", property = "zally.source")
private String source;
@Parameter(property = "zally.skip", defaultValue = "false")
private boolean skip;
@Parameter(defaultValue = "${project}", required = true, readonly = true)
private MavenProject project;
@Override
public void execute() throws MojoFailureException
{
if (skip)
{
getLog().info("Skipping execution as requested");
return;
}
final boolean existsOnClassPath = getClass().getClassLoader().getResourceAsStream(source) != null;
final boolean existsOnFilesystem = Files.exists(Paths.get(source));
if (!existsOnClassPath && !existsOnFilesystem)
{
throw new MojoFailureException("The specified source file could not be found: " + source);
}
getLog().info("Analyzing file '" + source + "'");
getLog().info("");
getLog().info("API path hierarchy:");
final String hierarchy = new ApiReporter(new OpenApiParser().parse(source)).render();
Arrays.stream(hierarchy.split("\n")).forEach(line -> getLog().info(line));
getLog().info("");
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/ZallyMojo.java
|
package com.ethlo.zally;/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.MojoFailureException;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.apache.maven.project.MavenProject;
import org.zalando.zally.core.CheckDetails;
import org.zalando.zally.core.Result;
import org.zalando.zally.core.RuleDetails;
import org.zalando.zally.rule.api.Severity;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
@Mojo(threadSafe = true, name = "validate", defaultPhase = LifecyclePhase.GENERATE_SOURCES)
public class ZallyMojo extends AbstractMojo
{
private final ObjectMapper mapper;
@Parameter(required = true, defaultValue = "${project.basedir}/src/main/resources/api.yaml", property = "zally.source")
private String source;
@Parameter(property = "zally.failOn")
private List<Severity> failOn;
@Parameter(property = "zally.resultFile")
private String resultFile;
@Parameter(property = "zally.skip", defaultValue = "false")
private boolean skip;
@Parameter(defaultValue = "${project}", required = true, readonly = true)
private MavenProject project;
@Parameter
private Map<String, String> ruleConfigs;
@Parameter
private Set<String> skipRules;
public ZallyMojo()
{
mapper = new ObjectMapper(new YAMLFactory());
mapper.setSerializationInclusion(JsonInclude.Include.NON_EMPTY);
}
@Override
public void execute() throws MojoFailureException
{
if (skip)
{
getLog().info("Skipping execution as requested");
return;
}
final Config config = parseConfigMap(ruleConfigs).withFallback(ConfigFactory.load("reference"));
final ZallyRunner zallyRunner = new ZallyRunner(config, getLog());
final boolean existsOnClassPath = getClass().getClassLoader().getResourceAsStream(source) != null;
final boolean existsOnFilesystem = Files.exists(Paths.get(source));
if (!existsOnClassPath && !existsOnFilesystem)
{
throw new MojoFailureException("The specified source file could not be found: " + source);
}
printInfo("Validating file '" + source + "'");
if (!failOn.isEmpty())
{
getLog().info("Will fail build on errors of severity: " + failOn
.stream()
.map(Enum::name)
.collect(Collectors.joining(", ")));
}
else
{
getLog().warn("No errors will fail the build, reporting only. Adjust 'failOn' " +
"property to fail on requested severities:" + Arrays.toString(Severity.values()));
}
printErrorDescriptionsWithLink(zallyRunner.getRules());
printSkippedRulesInfo(zallyRunner.getRules());
final Map<CheckDetails, List<Result>> results = validate(zallyRunner, skipRules, source);
// Map results to severity
final Map<Severity, Map<CheckDetails, List<Result>>> resultsBySeverity = new LinkedHashMap<>();
results.forEach((details, resultList) ->
{
for (final Result result : resultList)
{
resultsBySeverity.compute(result.getViolationType(), (severity, resultsByDetail) ->
{
if (resultsByDetail == null)
{
resultsByDetail = new LinkedHashMap<>();
}
resultsByDetail.compute(details, (cd, rs) ->
{
if (rs == null)
{
rs = new LinkedList<>();
}
rs.add(result);
return rs;
});
return resultsByDetail;
});
}
});
printErrors(resultsBySeverity);
writeResults(results);
// Check if we should halt the build due to validation errors
for (Severity severity : failOn)
{
final int size = Optional.ofNullable(resultsBySeverity.get(severity))
.map(Map::size)
.orElse(0);
if (size > 0)
{
throw new MojoFailureException("Failing build due to errors with severity " + severity);
}
}
}
private void printInfo(String message)
{
getLog().info("");
getLog().info(message);
}
private void printErrors(Map<Severity, Map<CheckDetails, List<Result>>> results)
{
final List<String> violations = new LinkedList<>();
results.forEach((severity, res) ->
res.forEach((checkDetails, resultList) ->
resultList.forEach(result ->
violations.add(checkDetails.getRule().id()
+ " - " + severity
+ " - " + checkDetails.getInstance().getClass().getSimpleName()
+ " - " + result.getDescription()
+ " - " + result.getPointer()))));
printHeader("Rule violations (" + violations.size() + ")");
violations.forEach(v -> getLog().warn(v));
getLog().warn("");
}
private void printHeader(String message)
{
getLog().info("");
getLog().info(message);
getLog().info(StringUtils.repeat("-", message.length()));
}
private Config parseConfigMap(Map<String, String> ruleConfig)
{
final Map<String, String> m = ruleConfig != null ? ruleConfig : new TreeMap<>();
final Map<String, Map<?, ?>> configurations = new LinkedHashMap<>();
for (Map.Entry<String, String> e : m.entrySet())
{
final Map<?, ?> config = loadConfig(e.getKey(), e.getValue());
Optional.ofNullable(config).ifPresent(c -> configurations.put(e.getKey(), c));
}
return ConfigFactory.parseMap(configurations);
}
private void printErrorDescriptionsWithLink(List<RuleDetails> rules)
{
final List<String> errorDescriptionsWithLink = rules
.stream()
.map(rule ->
rule.getRule().id() + " - "
+ rule.getInstance().getClass().getSimpleName() + " - "
+ rule.getRule().severity().name() + " - "
+ rule.getRule().title() + " - "
+ rule.getRuleSet().getUrl()).sorted()
.collect(Collectors.toList());
printHeader("Rules (" + rules.size() + ")");
errorDescriptionsWithLink.forEach(i -> getLog().info(i));
}
private void printSkippedRulesInfo(List<RuleDetails> rules)
{
final Set<String> skipped = new LinkedHashSet<>();
skipRules.forEach(ruleName ->
{
if (rules.stream().anyMatch(r ->
{
final String ruleClassName = r.getInstance().getClass().getSimpleName();
final boolean ruleNameMatch = ruleClassName.equals(ruleName);
final boolean isSkipped = skipRules.contains(ruleClassName);
return ruleNameMatch && isSkipped;
}))
{
skipped.add(ruleName);
}
else
{
getLog().warn("Requested to skip rule '" + ruleName + "', but no such rule is known.");
}
});
final List<String> skippedDescription =
rules
.stream()
.filter(r -> skipped.contains(r.getInstance().getClass().getSimpleName()))
.sorted(Comparator.comparing(a -> a.getRule().id()))
.map(d -> d.getRule().id() + " - " + d.getInstance().getClass().getSimpleName() + " - " + d.getRule().severity() + " - " + d.getRule().title())
.collect(Collectors.toList());
if (!skippedDescription.isEmpty())
{
printHeader("Skipped rules (" + skippedDescription.size() + ")");
skippedDescription.forEach(i -> getLog().info(i));
}
}
private Map<String, Object> loadConfig(final String ruleName, final String ruleConfig)
{
try
{
return mapper.readValue(ruleConfig, Map.class);
}
catch (JsonProcessingException e)
{
throw new UncheckedIOException("Unable to parse configuration for rule name " + ruleName, e);
}
}
private void writeResults(Map<CheckDetails, List<Result>> results)
{
if (resultFile != null && !resultFile.trim().equals(""))
{
try
{
printInfo("Writing result file to " + resultFile);
getLog().info("");
final Path target = Paths.get(resultFile);
Files.createDirectories(target.getParent());
Files.writeString(target, mapper.writeValueAsString(results.values().stream().filter(r -> !r.isEmpty()).collect(Collectors.toList())));
}
catch (IOException e)
{
throw new UncheckedIOException(e);
}
}
}
private Map<CheckDetails, List<Result>> validate(ZallyRunner zallyRunner, final Set<String> skipped, String url)
{
try
{
return zallyRunner.validate(url, skipped);
}
catch (IOException e)
{
throw new UncheckedIOException(e.getMessage(), e);
}
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/ZallyRunner.java
|
package com.ethlo.zally;/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.maven.plugin.logging.Log;
import org.jetbrains.annotations.NotNull;
import org.zalando.zally.core.CheckDetails;
import org.zalando.zally.core.DefaultContext;
import org.zalando.zally.core.Result;
import org.zalando.zally.core.RuleDetails;
import org.zalando.zally.rule.api.Check;
import org.zalando.zally.rule.api.Context;
import org.zalando.zally.rule.api.Rule;
import org.zalando.zally.rule.api.RuleSet;
import org.zalando.zally.rule.api.Violation;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import edu.emory.mathcs.backport.java.util.Collections;
import io.github.classgraph.ClassGraph;
import io.github.classgraph.ClassInfo;
import io.github.classgraph.ClassInfoList;
import io.github.classgraph.ScanResult;
import io.swagger.v3.oas.models.OpenAPI;
public class ZallyRunner
{
private final List<RuleDetails> rules;
private final Log logger;
public ZallyRunner(final Config ruleConfigs, final Log logger)
{
this.rules = new LinkedList<>();
this.logger = logger;
final List<Class<?>> ruleClasses = loadRuleClasses();
for (Class<?> ruleClass : ruleClasses)
{
final String simpleName = ruleClass.getSimpleName();
logger.debug("Loading rule " + simpleName);
final Object instance = createRuleInstance(ruleClass, ruleConfigs);
final Rule ruleAnnotation = ruleClass.getAnnotation(Rule.class);
this.rules.add(new RuleDetails((RuleSet) createInstance(ruleAnnotation.ruleSet()), ruleAnnotation, instance));
}
}
public Map<CheckDetails, List<Result>> validate(String url, final Set<String> skipped) throws IOException
{
final OpenAPI openApi = new OpenApiParser().parse(url);
final Context context = new DefaultContext("", openApi, null);
final Map<CheckDetails, List<Result>> returnValue = new LinkedHashMap<>();
for (RuleDetails ruleDetails : rules)
{
if (!skipped.contains(ruleDetails.getInstance().getClass().getSimpleName()))
{
final Object instance = ruleDetails.getInstance();
for (Method method : instance.getClass().getDeclaredMethods())
{
final Check checkAnnotation = method.getAnnotation(Check.class);
if (checkAnnotation != null && method.getParameterTypes().length == 1 && method.getParameterTypes()[0] == Context.class)
{
final List<Result> violationList = new LinkedList<>();
final CheckDetails checkDetails = performCheck(context, violationList, instance, ruleDetails.getRule(), ruleDetails.getRuleSet(), method, checkAnnotation);
returnValue.put(checkDetails, violationList);
}
}
}
}
return returnValue;
}
@NotNull
private CheckDetails performCheck(Context context, List<Result> violationList, Object instance, Rule ruleAnnotation, RuleSet ruleSet, Method method, Check checkAnnotation)
{
final CheckDetails checkDetails = new CheckDetails(ruleSet, ruleAnnotation, instance, checkAnnotation, method);
final Object result;
try
{
result = method.invoke(instance, context);
}
catch (IllegalAccessException | InvocationTargetException e)
{
throw new RuntimeException(e);
}
if (result != null)
{
if (result instanceof Iterable)
{
//noinspection unchecked
for (Violation violation : (Iterable<? extends Violation>) result)
{
// Ignore violations if there are x-zally-ignore markers.
if (context.isIgnored(violation.getPointer(), checkDetails.getRule().id())
|| context.isIgnored(violation.getPointer(), "*"))
{
logger.info(String.format("Ignore violation, rule = %s, at %s", checkDetails.getRule().id(), violation.getPointer()));
continue;
}
violationList.add(handleViolation(checkDetails, violation));
}
}
else if (result instanceof Violation)
{
violationList.add(handleViolation(checkDetails, (Violation) result));
}
}
return checkDetails;
}
private List<Class<?>> loadRuleClasses()
{
try (ScanResult result = new ClassGraph().enableClassInfo().enableAnnotationInfo().scan())
{
final ClassInfoList classInfos = result.getClassesWithAnnotation(Rule.class.getName());
return classInfos.stream().map(ClassInfo::loadClass).collect(Collectors.toList());
}
}
private Object createRuleInstance(Class<?> ruleClass, Config ruleConfig)
{
try
{
for (Constructor<?> constructor : ruleClass.getConstructors())
{
final Class<?>[] paramTypes = constructor.getParameterTypes();
if (paramTypes.length == 1 && paramTypes[0].equals(Config.class))
{
return constructor.newInstance(ruleConfig.withFallback(ConfigFactory.parseMap(Collections.emptyMap())));
}
}
return ruleClass.getConstructor().newInstance();
}
catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e)
{
throw new RuntimeException("Cannot instantiate rule " + ruleClass, e);
}
}
private Object createInstance(Class<?> type)
{
try
{
final Constructor<?> constructor = type.getConstructor();
return constructor.newInstance();
}
catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e)
{
throw new RuntimeException("Cannot instantiate class " + type, e);
}
}
private Result handleViolation(final CheckDetails details, Violation violation)
{
// TODO: Handle pointers better to make it easier to know where the error is
//final JsonPointer pointer = violation.getPointer();
//System.out.println(pointer.toString() + " - " + pointer.toString().replace("~1", "/"));
return new Result(
details.getRule().id(),
details.getRuleSet().url(details.getRule()),
details.getRule().title(),
violation.getDescription(),
details.getCheck().severity(),
violation.getPointer(),
null/*locator.locate(violation.getPointer())*/
);
}
public List<RuleDetails> getRules()
{
return rules;
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/rules/ConfigurableZalandoRuleSet.java
|
package com.ethlo.zally.rules;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.net.URI;
import org.jetbrains.annotations.NotNull;
import org.zalando.zally.rule.api.Rule;
import org.zalando.zally.rule.api.RuleSet;
public class ConfigurableZalandoRuleSet implements RuleSet
{
@NotNull
@Override
public String getId()
{
return "configurable_zalando";
}
@NotNull
@Override
public URI getUrl()
{
return URI.create("https://zalando.github.io/restful-api-guidelines/");
}
@NotNull
@Override
public URI url(@NotNull final Rule rule)
{
return URI.create(getUrl() + "#" + rule.id());
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/rules/WhiteListedPluralizeNamesForArraysRule.java
|
package com.ethlo.zally.rules;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.zalando.zally.rule.api.Check;
import org.zalando.zally.rule.api.Context;
import org.zalando.zally.rule.api.Rule;
import org.zalando.zally.rule.api.Severity;
import org.zalando.zally.rule.api.Violation;
import com.ethlo.zally.rules.common.PlingStemmer;
import com.typesafe.config.Config;
import io.swagger.v3.oas.models.OpenAPI;
import io.swagger.v3.oas.models.media.Schema;
@Rule(
ruleSet = ConfigurableZalandoRuleSet.class,
id = "120",
severity = Severity.SHOULD,
title = "Array names should be pluralized"
)
public class WhiteListedPluralizeNamesForArraysRule
{
private final List<String> whiteList;
public WhiteListedPluralizeNamesForArraysRule(Config config)
{
this.whiteList = config.hasPath(getClass().getSimpleName()) ? config.getConfig(getClass().getSimpleName()).getStringList("whitelist") : Collections.emptyList();
}
public static Map<String, Schema> getAllSchemas(OpenAPI openAPI)
{
if (openAPI != null && openAPI.getComponents() != null && openAPI.getComponents().getSchemas() != null)
{
return openAPI.getComponents().getSchemas();
}
return Collections.emptyMap();
}
@Check(severity = Severity.SHOULD)
public List<Violation> checkArrayPropertyNamesArePlural(final Context context)
{
return getAllSchemas(context.getApi()).entrySet().stream()
.filter(it -> "array".equals(it.getValue().getType()))
.filter(it -> whiteList.contains(it.getKey()))
.filter(it -> !PlingStemmer.isPlural(it.getKey()))
.map(it -> context.violation("Array property name appears to be singular: " + it.getKey(), it.getValue()))
.collect(Collectors.toList());
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/rules
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/rules/common/FinalMap.java
|
package com.ethlo.zally.rules.common;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.util.TreeMap;
/**
* Copyright 2016 Fabian M. Suchanek
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p>
* Provides a nicer constructor for a TreeMap.
* Example:
* <PRE>
* FinalMap<String,Integer> f=new FinalMap(
* "a",1,
* "b",2,
* "c",3);
* System.out.println(f.get("b"));
* --> 2
* </PRE>
*/
public class FinalMap<T1 extends Comparable, T2> extends TreeMap<T1, T2>
{
private static final long serialVersionUID = 1L;
/**
* Constructs a FinalMap from an array that contains key/value sequences
*/
@SuppressWarnings("unchecked")
public FinalMap(Object... a)
{
super();
for (int i = 0; i < a.length - 1; i += 2)
{
if (containsKey(a[i])) throw new RuntimeException("Duplicate key in FinalMap: " + a[i]);
put((T1) a[i], (T2) a[i + 1]);
}
}
/**
* Test routine
*/
public static void main(String[] args)
{
FinalMap<String, Integer> f = new FinalMap<String, Integer>("a", 1, "b", 2);
System.out.println(f.get("b"));
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/rules
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/rules/common/FinalSet.java
|
package com.ethlo.zally.rules.common;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.util.AbstractList;
import java.util.Arrays;
import java.util.Set;
import java.util.Spliterator;
/**
* Copyright 2016 Fabian M. Suchanek
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p>
* This class provides a very simple container implementation with zero
* overhead. A FinalSet bases on a sorted, unmodifiable array. The constructor
* can either be called with a sorted unmodifiable array (default constructor)
* or with an array that can be cloned and sorted beforehand if desired.
* Example:
*
* <PRE>
* FinalSet<String> f=new FinalSet("a","b","c");
* // equivalently:
* // FinalSet<String> f=new FinalSet(new String[]{"a","b","c"});
* // FinalSet<String> f=new FinalSet(SHALLNOTBECLONED,ISSORTED,"a","b","c");
* System.out.println(f.get(1));
* --> b
* </PRE>
*/
public class FinalSet<T extends Comparable<?>> extends AbstractList<T> implements Set<T>
{
/**
* Holds the data, must be sorted
*/
public T[] data;
/**
* Constructs a FinalSet from an array, clones the array if indicated.
*/
@SuppressWarnings("unchecked")
public FinalSet(boolean clone, T... a)
{
if (clone)
{
Comparable<?>[] b = new Comparable[a.length];
System.arraycopy(a, 0, b, 0, a.length);
a = (T[]) b;
}
Arrays.sort(a);
data = a;
}
/**
* Constructs a FinalSet from an array that does not need to be cloned
*/
public FinalSet(T... a)
{
this(false, a);
}
/**
* Tells whether x is in the container
*/
public boolean contains(T x)
{
return (Arrays.binarySearch(data, x) >= 0);
}
/**
* Returns the position in the array or -1
*/
public int indexOf(T x)
{
int r = Arrays.binarySearch(data, x);
return (r >= 0 ? r : -1);
}
/**
* Returns the element at position i
*/
@Override
public T get(int i)
{
return (data[i]);
}
/**
* Returns the number of elements in this FinalSet
*/
@Override
public int size()
{
return (data.length);
}
/* Choosing default implementation. Comment out this function to compile with Java 6/7 */
public Spliterator<T> spliterator()
{
return super.spliterator();
}
}
|
0
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/rules
|
java-sources/ai/qianmo/zally-maven-plugin/1.0.3/com/ethlo/zally/rules/common/PlingStemmer.java
|
package com.ethlo.zally.rules.common;
/*-
* #%L
* zally-maven-plugin
* %%
* Copyright (C) 2021 Morten Haraldsen (ethlo)
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.Map;
import java.util.Set;
/**
* Copyright 2016 Fabian M. Suchanek
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* <p>
* <p>
* The PlingStemmer stems an English noun (plural or singular) to its singular
* form. It deals with "firemen"->"fireman", it knows Greek stuff like
* "appendices"->"appendix" and yes, it was a lot of work to compile these exceptions.
* Examples:
* <PRE>
* System.out.println(PlingStemmer.stem("boy"));
* ----> boy
* System.out.println(PlingStemmer.stem("boys"));
* ----> boy
* System.out.println(PlingStemmer.stem("biophysics"));
* ----> biophysics
* System.out.println(PlingStemmer.stem("automata"));
* ----> automaton
* System.out.println(PlingStemmer.stem("genus"));
* ----> genus
* System.out.println(PlingStemmer.stem("emus"));
* ----> emu
* </PRE><P>
* <p>
* There are a number of word forms that can either be plural or singular.
* Examples include "physics" (the science or the plural of "physic" (the
* medicine)), "quarters" (the housing or the plural of "quarter" (1/4))
* or "people" (the singular of "peoples" or the plural of "person"). In
* these cases, the stemmer assumes the word is a plural form and returns
* the singular form. The methods isPlural, isSingular and isPluralAndSingular
* can be used to differentiate the cases.<P>
* <p>
* It cannot be guaranteed that the stemmer correctly stems a plural word
* or correctly ignores a singular word -- let alone that it treats an
* ambiguous word form in the way expected by the user.<P>
* <p>
* The PlingStemmer uses material from <A HREF=http://wordnet.princeton.edu/>WordNet</A>.<P>
* It requires the class FinalSet from the <A HREF=http://www.mpii.mpg.de/~suchanek/downloads/javatools>
* Java Tools</A>.
*/
public class PlingStemmer
{
/**
* Words that end in "-se" in their plural forms (like "nurse" etc.)
*/
public static Set<String> categorySE_SES = new FinalSet<String>("nurses", "cruises", "premises", "houses", "courses", "cases");
/**
* Words that do not have a distinct plural form (like "atlas" etc.)
*/
public static Set<String> category00 = new FinalSet<String>("alias", "asbestos", "atlas", "barracks", "bathos", "bias", "breeches", "britches",
"canvas", "chaos", "clippers", "contretemps", "corps", "cosmos", "crossroads", "diabetes", "ethos", "gallows", "gas", "graffiti",
"headquarters", "herpes", "high-jinks", "innings", "jackanapes", "lens", "means", "measles", "mews", "mumps", "news", "pathos", "pincers",
"pliers", "proceedings", "rabies", "rhinoceros", "sassafras", "scissors", "series", "shears", "species", "tuna"
);
/**
* Words that change from "-um" to "-a" (like "curriculum" etc.), listed in their plural forms
*/
public static Set<String> categoryUM_A = new FinalSet<String>("addenda", "agenda", "aquaria", "bacteria", "candelabra", "compendia", "consortia",
"crania", "curricula", "data", "desiderata", "dicta", "emporia", "enconia", "errata", "extrema", "gymnasia", "honoraria", "interregna",
"lustra", "maxima", "media", "memoranda", "millenia", "minima", "momenta", "optima", "ova", "phyla", "quanta", "rostra", "spectra", "specula",
"stadia", "strata", "symposia", "trapezia", "ultimata", "vacua", "vela"
);
/**
* Words that change from "-on" to "-a" (like "phenomenon" etc.), listed in their plural forms
*/
public static Set<String> categoryON_A = new FinalSet<String>("aphelia", "asyndeta", "automata", "criteria", "hyperbata", "noumena", "organa",
"perihelia", "phenomena", "prolegomena"
);
/**
* Words that change from "-o" to "-i" (like "libretto" etc.), listed in their plural forms
*/
public static Set<String> categoryO_I = new FinalSet<String>("alti", "bassi", "canti", "contralti", "crescendi", "libretti", "soli", "soprani",
"tempi", "virtuosi"
);
/**
* Words that change from "-us" to "-i" (like "fungus" etc.), listed in their plural forms
*/
public static Set<String> categoryUS_I = new FinalSet<String>("alumni", "bacilli", "cacti", "foci", "fungi", "genii", "hippopotami", "incubi",
"nimbi", "nuclei", "nucleoli", "octopi", "radii", "stimuli", "styli", "succubi", "syllabi", "termini", "tori", "umbilici", "uteri"
);
/**
* Words that change from "-ix" to "-ices" (like "appendix" etc.), listed in their plural forms
*/
public static Set<String> categoryIX_ICES = new FinalSet<String>("appendices", "cervices");
/**
* Words that change from "-is" to "-es" (like "axis" etc.), listed in their plural forms
*/
public static Set<String> categoryIS_ES = new FinalSet<String>(
// plus everybody ending in theses
"analyses", "axes", "bases", "crises", "diagnoses", "ellipses", "emphases", "neuroses", "oases", "paralyses", "synopses");
/**
* Words that change from "-oe" to "-oes" (like "toe" etc.), listed in their plural forms
*/
public static Set<String> categoryOE_OES = new FinalSet<String>("aloes", "backhoes", "beroes", "canoes", "chigoes", "cohoes", "does", "felloes",
"floes", "foes", "gumshoes", "hammertoes", "hoes", "hoopoes", "horseshoes", "leucothoes", "mahoes", "mistletoes", "oboes", "overshoes",
"pahoehoes", "pekoes", "roes", "shoes", "sloes", "snowshoes", "throes", "tic-tac-toes", "tick-tack-toes", "ticktacktoes", "tiptoes",
"tit-tat-toes", "toes", "toetoes", "tuckahoes", "woes"
);
/**
* Words that change from "-ex" to "-ices" (like "index" etc.), listed in their plural forms
*/
public static Set<String> categoryEX_ICES = new FinalSet<String>("apices", "codices", "cortices", "indices", "latices", "murices", "pontifices",
"silices", "simplices", "vertices", "vortices"
);
/**
* Words that change from "-u" to "-us" (like "emu" etc.), listed in their plural forms
*/
public static Set<String> categoryU_US = new FinalSet<String>("apercus", "barbus", "cornus", "ecrus", "emus", "fondus", "gnus", "iglus", "mus",
"nandus", "napus", "poilus", "quipus", "snafus", "tabus", "tamandus", "tatus", "timucus", "tiramisus", "tofus", "tutus"
);
/**
* Words that change from "-sse" to "-sses" (like "finesse" etc.), listed in their plural forms
*/
public static Set<String> categorySSE_SSES = new FinalSet<String>(
//plus those ending in mousse
"bouillabaisses", "coulisses", "crevasses", "crosses", "cuisses", "demitasses", "ecrevisses", "fesses", "finesses", "fosses", "impasses",
"lacrosses", "largesses", "masses", "noblesses", "palliasses", "pelisses", "politesses", "posses", "tasses", "wrasses"
);
/**
* Words that change from "-che" to "-ches" (like "brioche" etc.), listed in their plural forms
*/
public static Set<String> categoryCHE_CHES = new FinalSet<String>("adrenarches", "attaches", "avalanches", "barouches", "brioches", "caches",
"caleches", "caroches", "cartouches", "cliches", "cloches", "creches", "demarches", "douches", "gouaches", "guilloches", "headaches",
"heartaches", "huaraches", "menarches", "microfiches", "moustaches", "mustaches", "niches", "panaches", "panoches", "pastiches", "penuches",
"pinches", "postiches", "psyches", "quiches", "schottisches", "seiches", "soutaches", "synecdoches", "thelarches", "troches"
);
/**
* Words that end with "-ics" and do not exist as nouns without the 's' (like "aerobics" etc.)
*/
public static Set<String> categoryICS = new FinalSet<String>("aerobatics", "aerobics", "aerodynamics", "aeromechanics", "aeronautics",
"alphanumerics", "animatronics", "apologetics", "architectonics", "astrodynamics", "astronautics", "astrophysics", "athletics", "atmospherics",
"autogenics", "avionics", "ballistics", "bibliotics", "bioethics", "biometrics", "bionics", "bionomics", "biophysics", "biosystematics",
"cacogenics", "calisthenics", "callisthenics", "catoptrics", "civics", "cladistics", "cryogenics", "cryonics", "cryptanalytics", "cybernetics",
"cytoarchitectonics", "cytogenetics", "diagnostics", "dietetics", "dramatics", "dysgenics", "econometrics", "economics", "electromagnetics",
"electronics", "electrostatics", "endodontics", "enterics", "ergonomics", "eugenics", "eurhythmics", "eurythmics", "exodontics", "fibreoptics",
"futuristics", "genetics", "genomics", "geographics", "geophysics", "geopolitics", "geriatrics", "glyptics", "graphics", "gymnastics",
"hermeneutics", "histrionics", "homiletics", "hydraulics", "hydrodynamics", "hydrokinetics", "hydroponics", "hydrostatics", "hygienics",
"informatics", "kinematics", "kinesthetics", "kinetics", "lexicostatistics", "linguistics", "lithoglyptics", "liturgics", "logistics",
"macrobiotics", "macroeconomics", "magnetics", "magnetohydrodynamics", "mathematics", "metamathematics", "metaphysics", "microeconomics",
"microelectronics", "mnemonics", "morphophonemics", "neuroethics", "neurolinguistics", "nucleonics", "numismatics", "obstetrics", "onomastics",
"orthodontics", "orthopaedics", "orthopedics", "orthoptics", "paediatrics", "patristics", "patristics", "pedagogics", "pediatrics",
"periodontics", "pharmaceutics", "pharmacogenetics", "pharmacokinetics", "phonemics", "phonetics", "phonics", "photomechanics", "physiatrics",
"pneumatics", "poetics", "politics", "pragmatics", "prosthetics", "prosthodontics", "proteomics", "proxemics", "psycholinguistics",
"psychometrics", "psychonomics", "psychophysics", "psychotherapeutics", "robotics", "semantics", "semiotics", "semitropics", "sociolinguistics",
"stemmatics", "strategics", "subtropics", "systematics", "tectonics", "telerobotics", "therapeutics", "thermionics", "thermodynamics",
"thermostatics"
);
/**
* Words that change from "-ie" to "-ies" (like "auntie" etc.), listed in their plural forms
*/
public static Set<String> categoryIE_IES = new FinalSet<String>("aeries", "anomies", "aunties", "baddies", "beanies", "birdies", "boccies",
"bogies", "bolshies", "bombies", "bonhomies", "bonxies", "booboisies", "boogies", "boogie-woogies", "bookies", "booties", "bosies",
"bourgeoisies", "brasseries", "brassies", "brownies", "budgies", "byrnies", "caddies", "calories", "camaraderies", "capercaillies",
"capercailzies", "cassies", "catties", "causeries", "charcuteries", "chinoiseries", "collies", "commies", "cookies", "coolies", "coonties",
"cooties", "corries", "coteries", "cowpies", "cowries", "cozies", "crappies", "crossties", "curies", "dachsies", "darkies", "dassies",
"dearies", "dickies", "dies", "dixies", "doggies", "dogies", "dominies", "dovekies", "eyries", "faeries", "falsies", "floozies", "folies",
"foodies", "freebies", "gaucheries", "gendarmeries", "genies", "ghillies", "gillies", "goalies", "goonies", "grannies", "grotesqueries",
"groupies", "hankies", "hippies", "hoagies", "honkies", "hymies", "indies", "junkies", "kelpies", "kilocalories", "knobkerries", "koppies",
"kylies", "laddies", "lassies", "lies", "lingeries", "magpies", "magpies", "marqueteries", "mashies", "mealies", "meanies", "menageries",
"millicuries", "mollies", "facts1", "moxies", "neckties", "newbies", "nighties", "nookies", "oldies", "organdies", "panties", "parqueteries",
"passementeries", "patisseries", "pies", "pinkies", "pixies", "porkpies", "potpies", "prairies", "preemies", "premies", "punkies", "pyxies",
"quickies", "ramies", "reveries", "rookies", "rotisseries", "scrapies", "sharpies", "smoothies", "softies", "stoolies", "stymies", "swaggies",
"sweeties", "talkies", "techies", "ties", "tooshies", "toughies", "townies", "veggies", "walkie-talkies", "wedgies", "weenies", "weirdies",
"yardies", "yuppies", "zombies"
);
/**
* Maps irregular Germanic English plural nouns to their singular form
*/
public static Map<String, String> irregular = new FinalMap<String, String>("beefs", "beef", "beeves", "beef", "brethren", "brother", "busses",
"bus", "cattle", "cattlebeast", "children", "child", "corpora", "corpus", "ephemerides", "ephemeris", "firemen", "fireman", "genera", "genus",
"genies", "genie", "genii", "genie", "kine", "cow", "lice", "louse", "men", "man", "mice", "mouse", "mongooses", "mongoose", "monies", "money",
"mythoi", "mythos", "octopodes", "octopus", "octopuses", "octopus", "oxen", "ox", "people", "person", "soliloquies", "soliloquy", "throes",
"throes", "trilbys", "trilby", "women", "woman"
);
/**
* Contains word forms that can either be plural or singular
*/
public static Set<String> singAndPlur = new FinalSet<String>("acoustics", "aestetics", "aquatics", "basics", "ceramics", "classics", "cosmetics",
"dermatoglyphics", "dialectics", "dynamics", "esthetics", "ethics", "harmonics", "heroics", "isometrics", "mechanics", "metrics", "statistics",
"optic", "people", "physics", "polemics", "premises", "propaedeutics", "pyrotechnics", "quadratics", "quarters", "statistics", "tactics",
"tropics"
);
/**
* Tells whether a word form is plural. This method just checks whether the
* stem method alters the word
*/
public static boolean isPlural(String s)
{
return (!s.equals(stem(s)));
}
/**
* Tells whether a word form is singular. Note that a word can be both plural and singular
*/
public static boolean isSingular(String s)
{
return (singAndPlur.contains(s.toLowerCase()) || !isPlural(s));
}
/**
* Tells whether a word form is the singular form of one word and at
* the same time the plural form of another.
*/
public static boolean isSingularAndPlural(String s)
{
return (singAndPlur.contains(s.toLowerCase()));
}
/**
* Cuts a suffix from a string (that is the number of chars given by the suffix)
*/
public static String cut(String s, String suffix)
{
return (s.substring(0, s.length() - suffix.length()));
}
/**
* Returns true if a word is probably not Latin
*/
public static boolean noLatin(String s)
{
return (s.indexOf('h') > 0 || s.indexOf('j') > 0 || s.indexOf('k') > 0 || s.indexOf('w') > 0 || s.indexOf('y') > 0 || s.indexOf('z') > 0
|| s.indexOf("ou") > 0 || s.indexOf("sh") > 0 || s.indexOf("ch") > 0 || s.endsWith("aus"));
}
/**
* Returns true if a word is probably Greek
*/
private static boolean greek(String s)
{
return (s.indexOf("ph") > 0 || s.indexOf('y') > 0 && s.endsWith("nges"));
}
/**
* Stems an English noun
*/
public static String stem(String s)
{
String stem = s;
// Handle irregular ones
String irreg = irregular.get(s);
if (irreg != null) return (stem = irreg);
// -on to -a
if (categoryON_A.contains(s)) return (stem = cut(s, "a") + "on");
// -um to -a
if (categoryUM_A.contains(s)) return (stem = cut(s, "a") + "um");
// -x to -ices
if (categoryIX_ICES.contains(s)) return (stem = cut(s, "ices") + "ix");
// -o to -i
if (categoryO_I.contains(s)) return (stem = cut(s, "i") + "o");
// -se to ses
if (categorySE_SES.contains(s)) return (stem = cut(s, "s"));
// -is to -es
if (categoryIS_ES.contains(s) || s.endsWith("theses")) return (stem = cut(s, "es") + "is");
// -us to -i
if (categoryUS_I.contains(s)) return (stem = cut(s, "i") + "us");
//Wrong plural
if (s.endsWith("uses") && (categoryUS_I.contains(cut(s, "uses") + "i") || s.equals("genuses") || s.equals("corpuses")))
return (stem = cut(s, "es"));
// -ex to -ices
if (categoryEX_ICES.contains(s)) return (stem = cut(s, "ices") + "ex");
// Words that do not inflect in the plural
if (s.endsWith("ois") || s.endsWith("itis") || category00.contains(s) || categoryICS.contains(s))
return (stem = s);
// -en to -ina
// No other common words end in -ina
if (s.endsWith("ina")) return (stem = cut(s, "en"));
// -a to -ae
// No other common words end in -ae
if (s.endsWith("ae")) return (stem = cut(s, "e"));
// -a to -ata
// No other common words end in -ata
if (s.endsWith("ata")) return (stem = cut(s, "ta"));
// trix to -trices
// No common word ends with -trice(s)
if (s.endsWith("trices")) return (stem = cut(s, "trices") + "trix");
// -us to -us
//No other common word ends in -us, except for false plurals of French words
//Catch words that are not latin or known to end in -u
if (s.endsWith("us") && !s.endsWith("eaus") && !s.endsWith("ieus") && !noLatin(s) && !categoryU_US.contains(s))
return (stem = s);
// -tooth to -teeth
// -goose to -geese
// -foot to -feet
// -zoon to -zoa
//No other common words end with the indicated suffixes
if (s.endsWith("teeth")) return (stem = cut(s, "teeth") + "tooth");
if (s.endsWith("geese")) return (stem = cut(s, "geese") + "goose");
if (s.endsWith("feet")) return (stem = cut(s, "feet") + "foot");
if (s.endsWith("zoa")) return (stem = cut(s, "zoa") + "zoon");
// -eau to -eaux
//No other common words end in eaux
if (s.endsWith("eaux")) return (stem = cut(s, "x"));
// -ieu to -ieux
//No other common words end in ieux
if (s.endsWith("ieux")) return (stem = cut(s, "x"));
// -nx to -nges
// Pay attention not to kill words ending in -nge with plural -nges
// Take only Greek words (works fine, only a handfull of exceptions)
if (s.endsWith("nges") && greek(s)) return (stem = cut(s, "nges") + "nx");
// -[sc]h to -[sc]hes
//No other common word ends with "shes", "ches" or "she(s)"
//Quite a lot end with "che(s)", filter them out
if (s.endsWith("shes") || s.endsWith("ches") && !categoryCHE_CHES.contains(s)) return (stem = cut(s, "es"));
// -ss to -sses
// No other common singular word ends with "sses"
// Filter out those ending in "sse(s)"
if (s.endsWith("sses") && !categorySSE_SSES.contains(s) && !s.endsWith("mousses")) return (stem = cut(s, "es"));
// -x to -xes
// No other common word ends with "xe(s)" except for "axe"
if (s.endsWith("xes") && !s.equals("axes")) return (stem = cut(s, "es"));
// -[nlw]ife to -[nlw]ives
//No other common word ends with "[nlw]ive(s)" except for olive
if (s.endsWith("nives") || s.endsWith("lives") && !s.endsWith("olives") || s.endsWith("wives"))
return (stem = cut(s, "ves") + "fe");
// -[aeo]lf to -ves exceptions: valve, solve
// -[^d]eaf to -ves exceptions: heave, weave
// -arf to -ves no exception
if (s.endsWith("alves") && !s.endsWith("valves") || s.endsWith("olves") && !s.endsWith("solves")
|| s.endsWith("eaves") && !s.endsWith("heaves") && !s.endsWith("weaves") || s.endsWith("arves"))
return (stem = cut(s, "ves") + "f");
// -y to -ies
// -ies is very uncommon as a singular suffix
// but -ie is quite common, filter them out
if (s.endsWith("ies") && !categoryIE_IES.contains(s)) return (stem = cut(s, "ies") + "y");
// -o to -oes
// Some words end with -oe, so don't kill the "e"
if (s.endsWith("oes") && !categoryOE_OES.contains(s)) return (stem = cut(s, "es"));
// -s to -ses
// -z to -zes
// no words end with "-ses" or "-zes" in singular
if (s.endsWith("ses") || s.endsWith("zes")) return (stem = cut(s, "es"));
// - to -s
if (s.endsWith("s") && !s.endsWith("ss") && !s.endsWith("is")) return (stem = cut(s, "s"));
return stem;
}
/**
* Test routine
*/
public static void main(String[] argv) throws Exception
{
System.out.println("Enter an English word in plural form and press ENTER");
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
while (true)
{
String w = in.readLine();
if (w.length() == 0) break;
if (isPlural(w)) System.out.println("This word is plural");
if (isSingular(w)) System.out.println("This word is singular");
System.out.println("Stemmed to singular: " + stem(w));
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/Aggregation.java
|
/*
*
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.Arrays;
/**
* Represents an aggregation operation. Please note that not all aggregations work, or even make
* sense in all types of aggregation operations.
*/
abstract class Aggregation {
static {
NativeDepsLoader.loadNativeDeps();
}
/*
* This should be kept in sync with AggregationJni.cpp. Note that the nativeId here is not the
* same as the C++ cudf::aggregation::Kind. They are very closely related, but both are
* implementation details and generally should be hidden from the end user.
* Visible for testing.
*/
enum Kind {
SUM(0),
PRODUCT(1),
MIN(2),
MAX(3),
COUNT(4),
ANY(5),
ALL(6),
SUM_OF_SQUARES(7),
MEAN(8),
VARIANCE(9), // This can take a delta degrees of freedom
STD(10), // This can take a delta degrees of freedom
MEDIAN(11),
QUANTILE(12),
ARGMAX(13),
ARGMIN(14),
NUNIQUE(15),
NTH_ELEMENT(16),
ROW_NUMBER(17),
COLLECT_LIST(18),
COLLECT_SET(19),
MERGE_LISTS(20),
MERGE_SETS(21),
LEAD(22),
LAG(23),
PTX(24),
CUDA(25),
HOST_UDF(26),
M2(27),
MERGE_M2(28),
RANK(29),
DENSE_RANK(30),
PERCENT_RANK(31),
TDIGEST(32), // This can take a delta argument for accuracy level
MERGE_TDIGEST(33), // This can take a delta argument for accuracy level
HISTOGRAM(34),
MERGE_HISTOGRAM(35),
BITWISE_AGG(36);
final int nativeId;
Kind(int nativeId) {this.nativeId = nativeId;}
}
/**
* An Aggregation that only needs a kind and nothing else.
*/
private static class NoParamAggregation extends Aggregation {
public NoParamAggregation(Kind kind) {
super(kind);
}
@Override
long createNativeInstance() {
return Aggregation.createNoParamAgg(kind.nativeId);
}
@Override
public int hashCode() {
return kind.hashCode();
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof NoParamAggregation) {
NoParamAggregation o = (NoParamAggregation) other;
return o.kind.equals(this.kind);
}
return false;
}
}
static final class NthAggregation extends Aggregation {
private final int offset;
private final NullPolicy nullPolicy;
private NthAggregation(int offset, NullPolicy nullPolicy) {
super(Kind.NTH_ELEMENT);
this.offset = offset;
this.nullPolicy = nullPolicy;
}
@Override
long createNativeInstance() {
return Aggregation.createNthAgg(offset, nullPolicy.includeNulls);
}
@Override
public int hashCode() {
return 31 * offset + nullPolicy.hashCode();
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof NthAggregation) {
NthAggregation o = (NthAggregation) other;
return o.offset == this.offset && o.nullPolicy == this.nullPolicy;
}
return false;
}
}
private static class DdofAggregation extends Aggregation {
private final int ddof;
public DdofAggregation(Kind kind, int ddof) {
super(kind);
this.ddof = ddof;
}
@Override
long createNativeInstance() {
return Aggregation.createDdofAgg(kind.nativeId, ddof);
}
@Override
public int hashCode() {
return 31 * kind.hashCode() + ddof;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof DdofAggregation) {
DdofAggregation o = (DdofAggregation) other;
return o.ddof == this.ddof;
}
return false;
}
}
private static class CountLikeAggregation extends Aggregation {
private final NullPolicy nullPolicy;
public CountLikeAggregation(Kind kind, NullPolicy nullPolicy) {
super(kind);
this.nullPolicy = nullPolicy;
}
@Override
long createNativeInstance() {
return Aggregation.createCountLikeAgg(kind.nativeId, nullPolicy.includeNulls);
}
@Override
public int hashCode() {
return 31 * kind.hashCode() + nullPolicy.hashCode();
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof CountLikeAggregation) {
CountLikeAggregation o = (CountLikeAggregation) other;
return o.nullPolicy == this.nullPolicy;
}
return false;
}
}
private static final class QuantileAggregation extends Aggregation {
private final QuantileMethod method;
private final double[] quantiles;
public QuantileAggregation(QuantileMethod method, double[] quantiles) {
super(Kind.QUANTILE);
this.method = method;
this.quantiles = quantiles;
}
@Override
long createNativeInstance() {
return Aggregation.createQuantAgg(method.nativeId, quantiles);
}
@Override
public int hashCode() {
return 31 * (31 * kind.hashCode() + method.hashCode()) + Arrays.hashCode(quantiles);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof QuantileAggregation) {
QuantileAggregation o = (QuantileAggregation) other;
return this.method == o.method && Arrays.equals(this.quantiles, o.quantiles);
}
return false;
}
}
private static class LeadLagAggregation extends Aggregation {
private final int offset;
private final ColumnVector defaultOutput;
LeadLagAggregation(Kind kind, int offset, ColumnVector defaultOutput) {
super(kind);
this.offset = offset;
this.defaultOutput = defaultOutput;
}
@Override
long createNativeInstance() {
// Default output comes from a different path
return Aggregation.createLeadLagAgg(kind.nativeId, offset);
}
@Override
public int hashCode() {
int ret = 31 * kind.hashCode() + offset;
if (defaultOutput != null) {
ret = 31 * ret + defaultOutput.hashCode();
}
return ret;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof LeadLagAggregation) {
LeadLagAggregation o = (LeadLagAggregation) other;
boolean ret = o.kind == this.kind && this.offset == o.offset;
if (defaultOutput != null) {
ret = ret && defaultOutput.equals(o.defaultOutput);
} else if (o.defaultOutput != null) {
// defaultOutput == null and o.defaultOutput != null so they are not equal
ret = false;
} // else they are both null which is the same and a noop.
return ret;
}
return false;
}
@Override
long getDefaultOutput() {
return defaultOutput == null ? 0 : defaultOutput.getNativeView();
}
}
static final class CollectListAggregation extends Aggregation {
private final NullPolicy nullPolicy;
private CollectListAggregation(NullPolicy nullPolicy) {
super(Kind.COLLECT_LIST);
this.nullPolicy = nullPolicy;
}
@Override
long createNativeInstance() {
return Aggregation.createCollectListAgg(nullPolicy.includeNulls);
}
@Override
public int hashCode() {
return 31 * kind.hashCode() + nullPolicy.hashCode();
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof CollectListAggregation) {
CollectListAggregation o = (CollectListAggregation) other;
return o.nullPolicy == this.nullPolicy;
}
return false;
}
}
static final class CollectSetAggregation extends Aggregation {
private final NullPolicy nullPolicy;
private final NullEquality nullEquality;
private final NaNEquality nanEquality;
private CollectSetAggregation(NullPolicy nullPolicy, NullEquality nullEquality, NaNEquality nanEquality) {
super(Kind.COLLECT_SET);
this.nullPolicy = nullPolicy;
this.nullEquality = nullEquality;
this.nanEquality = nanEquality;
}
@Override
long createNativeInstance() {
return Aggregation.createCollectSetAgg(nullPolicy.includeNulls,
nullEquality.nullsEqual,
nanEquality.nansEqual);
}
@Override
public int hashCode() {
return 31 * kind.hashCode()
+ Boolean.hashCode(nullPolicy.includeNulls)
+ Boolean.hashCode(nullEquality.nullsEqual)
+ Boolean.hashCode(nanEquality.nansEqual);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof CollectSetAggregation) {
CollectSetAggregation o = (CollectSetAggregation) other;
return o.nullPolicy == this.nullPolicy &&
o.nullEquality == this.nullEquality &&
o.nanEquality == this.nanEquality;
}
return false;
}
}
static final class MergeSetsAggregation extends Aggregation {
private final NullEquality nullEquality;
private final NaNEquality nanEquality;
private MergeSetsAggregation(NullEquality nullEquality, NaNEquality nanEquality) {
super(Kind.MERGE_SETS);
this.nullEquality = nullEquality;
this.nanEquality = nanEquality;
}
@Override
long createNativeInstance() {
return Aggregation.createMergeSetsAgg(nullEquality.nullsEqual, nanEquality.nansEqual);
}
@Override
public int hashCode() {
return 31 * kind.hashCode()
+ Boolean.hashCode(nullEquality.nullsEqual)
+ Boolean.hashCode(nanEquality.nansEqual);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof MergeSetsAggregation) {
MergeSetsAggregation o = (MergeSetsAggregation) other;
return o.nullEquality == this.nullEquality && o.nanEquality == this.nanEquality;
}
return false;
}
}
static final class HostUDFAggregation extends Aggregation {
private final HostUDFWrapper wrapper;
private HostUDFAggregation(HostUDFWrapper wrapper) {
super(Kind.HOST_UDF);
this.wrapper = wrapper;
}
@Override
long createNativeInstance() {
long udf = 0;
try {
udf = wrapper.createUDFInstance();
return Aggregation.createHostUDFAgg(udf);
} finally {
// a new UDF is cloned in `createHostUDFAgg`, here should close the UDF instance.
if (udf != 0) {
HostUDFWrapper.closeUDFInstance(udf);
}
}
}
@Override
public int hashCode() {
return 31 * kind.hashCode() + wrapper.hashCode();
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof HostUDFAggregation) {
return wrapper.equals(((HostUDFAggregation) other).wrapper);
}
return false;
}
}
static final class BitAndAggregation extends Aggregation {
private BitAndAggregation() {
super(Kind.BITWISE_AGG);
}
@Override
long createNativeInstance() {
return Aggregation.createBitAndAgg();
}
@Override
public int hashCode() {
return 31 * kind.hashCode() + ("AND").hashCode();
}
@Override
public boolean equals(Object other) {
return other instanceof BitAndAggregation;
}
}
static final class BitOrAggregation extends Aggregation {
private BitOrAggregation() {
super(Kind.BITWISE_AGG);
}
@Override
long createNativeInstance() {
return Aggregation.createBitOrAgg();
}
@Override
public int hashCode() {
return 31 * kind.hashCode() + ("OR").hashCode();
}
@Override
public boolean equals(Object other) {
return other instanceof BitAndAggregation;
}
}
static final class BitXorAggregation extends Aggregation {
private BitXorAggregation() {
super(Kind.BITWISE_AGG);
}
@Override
long createNativeInstance() {
return Aggregation.createBitXorAgg();
}
@Override
public int hashCode() {
return 31 * kind.hashCode() + ("XOR").hashCode();
}
@Override
public boolean equals(Object other) {
return other instanceof BitAndAggregation;
}
}
protected final Kind kind;
protected Aggregation(Kind kind) {
this.kind = kind;
}
/**
* Get the native view of a ColumnVector that provides default values to be used for some window
* aggregations when there is not enough data to do the computation. This really only happens
* for a very few number of window aggregations. Also note that the ownership and life cycle of
* the column is controlled outside of this, so don't try to close it.
* @return the native view of the column vector or 0.
*/
long getDefaultOutput() {
return 0;
}
/**
* returns a <code>cudf::aggregation *</code> cast to a long. We don't want to force users to
* close an Aggregation. Because of this Aggregation objects are created in pure java, but when
* it is time to use them this method is called to return a pointer to the c++ aggregation
* instance. All values returned by this can be used multiple times, and should be closed by
* calling the static close method. Yes, this creates a lot more JNI calls, but it keeps the
* user API clean.
*/
abstract long createNativeInstance();
@Override
public abstract int hashCode();
@Override
public abstract boolean equals(Object other);
static void close(long[] ptrs) {
for (long ptr: ptrs) {
if (ptr != 0) {
close(ptr);
}
}
}
static native void close(long ptr);
static final class SumAggregation extends NoParamAggregation {
private SumAggregation() {
super(Kind.SUM);
}
}
/**
* Sum reduction.
*/
static SumAggregation sum() {
return new SumAggregation();
}
static final class ProductAggregation extends NoParamAggregation {
private ProductAggregation() {
super(Kind.PRODUCT);
}
}
/**
* Product reduction.
*/
static ProductAggregation product() {
return new ProductAggregation();
}
static final class MinAggregation extends NoParamAggregation {
private MinAggregation() {
super(Kind.MIN);
}
}
/**
* Min reduction.
*/
static MinAggregation min() {
return new MinAggregation();
}
static final class MaxAggregation extends NoParamAggregation {
private MaxAggregation() {
super(Kind.MAX);
}
}
/**
* Max reduction.
*/
static MaxAggregation max() {
return new MaxAggregation();
}
static final class CountAggregation extends CountLikeAggregation {
private CountAggregation(NullPolicy nullPolicy) {
super(Kind.COUNT, nullPolicy);
}
}
/**
* Count number of valid, a.k.a. non-null, elements.
*/
static CountAggregation count() {
return count(NullPolicy.EXCLUDE);
}
/**
* Count number of elements.
* @param nullPolicy INCLUDE if nulls should be counted. EXCLUDE if only non-null values
* should be counted.
*/
static CountAggregation count(NullPolicy nullPolicy) {
return new CountAggregation(nullPolicy);
}
static final class AnyAggregation extends NoParamAggregation {
private AnyAggregation() {
super(Kind.ANY);
}
}
/**
* Any reduction. Produces a true or 1, depending on the output type,
* if any of the elements in the range are true or non-zero, otherwise produces a false or 0.
* Null values are skipped.
*/
static AnyAggregation any() {
return new AnyAggregation();
}
static final class AllAggregation extends NoParamAggregation {
private AllAggregation() {
super(Kind.ALL);
}
}
/**
* All reduction. Produces true or 1, depending on the output type, if all of the elements in
* the range are true or non-zero, otherwise produces a false or 0.
* Null values are skipped.
*/
static AllAggregation all() {
return new AllAggregation();
}
static final class SumOfSquaresAggregation extends NoParamAggregation {
private SumOfSquaresAggregation() {
super(Kind.SUM_OF_SQUARES);
}
}
/**
* Sum of squares reduction.
*/
static SumOfSquaresAggregation sumOfSquares() {
return new SumOfSquaresAggregation();
}
static final class MeanAggregation extends NoParamAggregation {
private MeanAggregation() {
super(Kind.MEAN);
}
}
/**
* Arithmetic mean reduction.
*/
static MeanAggregation mean() {
return new MeanAggregation();
}
static final class M2Aggregation extends NoParamAggregation {
private M2Aggregation() {
super(Kind.M2);
}
}
/**
* Sum of square of differences from mean.
*/
static M2Aggregation M2() {
return new M2Aggregation();
}
static final class VarianceAggregation extends DdofAggregation {
private VarianceAggregation(int ddof) {
super(Kind.VARIANCE, ddof);
}
}
/**
* Variance aggregation with 1 as the delta degrees of freedom.
*/
static VarianceAggregation variance() {
return variance(1);
}
/**
* Variance aggregation.
* @param ddof delta degrees of freedom. The divisor used in calculation of variance is
* <code>N - ddof</code>, where N is the population size.
*/
static VarianceAggregation variance(int ddof) {
return new VarianceAggregation(ddof);
}
static final class StandardDeviationAggregation extends DdofAggregation {
private StandardDeviationAggregation(int ddof) {
super(Kind.STD, ddof);
}
}
/**
* Standard deviation aggregation with 1 as the delta degrees of freedom.
*/
static StandardDeviationAggregation standardDeviation() {
return standardDeviation(1);
}
/**
* Standard deviation aggregation.
* @param ddof delta degrees of freedom. The divisor used in calculation of std is
* <code>N - ddof</code>, where N is the population size.
*/
static StandardDeviationAggregation standardDeviation(int ddof) {
return new StandardDeviationAggregation(ddof);
}
static final class MedianAggregation extends NoParamAggregation {
private MedianAggregation() {
super(Kind.MEDIAN);
}
}
/**
* Median reduction.
*/
static MedianAggregation median() {
return new MedianAggregation();
}
/**
* Aggregate to compute the specified quantiles. Uses linear interpolation by default.
*/
static QuantileAggregation quantile(double ... quantiles) {
return quantile(QuantileMethod.LINEAR, quantiles);
}
/**
* Aggregate to compute various quantiles.
*/
static QuantileAggregation quantile(QuantileMethod method, double ... quantiles) {
return new QuantileAggregation(method, quantiles);
}
static final class ArgMaxAggregation extends NoParamAggregation {
private ArgMaxAggregation() {
super(Kind.ARGMAX);
}
}
/**
* Index of max element. Please note that when using this aggregation with a group by if the
* data is not already sorted by the grouping keys it may be automatically sorted
* prior to doing the aggregation. This would result in an index into the sorted data being
* returned.
*/
static ArgMaxAggregation argMax() {
return new ArgMaxAggregation();
}
static final class ArgMinAggregation extends NoParamAggregation {
private ArgMinAggregation() {
super(Kind.ARGMIN);
}
}
/**
* Index of min element. Please note that when using this aggregation with a group by if the
* data is not already sorted by the grouping keys it may be automatically sorted
* prior to doing the aggregation. This would result in an index into the sorted data being
* returned.
*/
static ArgMinAggregation argMin() {
return new ArgMinAggregation();
}
static final class NuniqueAggregation extends CountLikeAggregation {
private NuniqueAggregation(NullPolicy nullPolicy) {
super(Kind.NUNIQUE, nullPolicy);
}
}
/**
* Number of unique, non-null, elements.
*/
static NuniqueAggregation nunique() {
return nunique(NullPolicy.EXCLUDE);
}
/**
* Number of unique elements.
* @param nullPolicy INCLUDE if nulls should be counted else EXCLUDE. If nulls are counted they
* compare as equal so multiple null values in a range would all only
* increase the count by 1.
*/
static NuniqueAggregation nunique(NullPolicy nullPolicy) {
return new NuniqueAggregation(nullPolicy);
}
/**
* Get the nth, non-null, element in a group.
* @param offset the offset to look at. Negative numbers go from the end of the group. Any
* value outside of the group range results in a null.
*/
static NthAggregation nth(int offset) {
return nth(offset, NullPolicy.INCLUDE);
}
/**
* Get the nth element in a group.
* @param offset the offset to look at. Negative numbers go from the end of the group. Any
* value outside of the group range results in a null.
* @param nullPolicy INCLUDE if nulls should be included in the aggregation or EXCLUDE if they
* should be skipped.
*/
static NthAggregation nth(int offset, NullPolicy nullPolicy) {
return new NthAggregation(offset, nullPolicy);
}
static final class RowNumberAggregation extends NoParamAggregation {
private RowNumberAggregation() {
super(Kind.ROW_NUMBER);
}
}
/**
* Get the row number, only makes sense for a window operations.
*/
static RowNumberAggregation rowNumber() {
return new RowNumberAggregation();
}
static final class RankAggregation extends NoParamAggregation {
private RankAggregation() {
super(Kind.RANK);
}
}
/**
* Get the row's ranking.
*/
static RankAggregation rank() {
return new RankAggregation();
}
static final class DenseRankAggregation extends NoParamAggregation {
private DenseRankAggregation() {
super(Kind.DENSE_RANK);
}
}
/**
* Get the row's dense ranking.
*/
static DenseRankAggregation denseRank() {
return new DenseRankAggregation();
}
static final class PercentRankAggregation extends NoParamAggregation {
private PercentRankAggregation() {
super(Kind.PERCENT_RANK);
}
}
/**
* Get the row's percent ranking.
*/
static PercentRankAggregation percentRank() {
return new PercentRankAggregation();
}
/**
* Collect the values into a list. Nulls will be skipped.
*/
static CollectListAggregation collectList() {
return collectList(NullPolicy.EXCLUDE);
}
/**
* Collect the values into a list.
*
* @param nullPolicy Indicates whether to include/exclude nulls during collection.
*/
static CollectListAggregation collectList(NullPolicy nullPolicy) {
return new CollectListAggregation(nullPolicy);
}
/**
* Collect the values into a set. All null values will be excluded, and all nan values are regarded as
* unique instances.
*/
static CollectSetAggregation collectSet() {
return collectSet(NullPolicy.EXCLUDE, NullEquality.UNEQUAL, NaNEquality.UNEQUAL);
}
/**
* Collect the values into a set.
*
* @param nullPolicy Indicates whether to include/exclude nulls during collection.
* @param nullEquality Flag to specify whether null entries within each list should be considered equal.
* @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal.
*/
static CollectSetAggregation collectSet(NullPolicy nullPolicy, NullEquality nullEquality, NaNEquality nanEquality) {
return new CollectSetAggregation(nullPolicy, nullEquality, nanEquality);
}
static final class MergeListsAggregation extends NoParamAggregation {
private MergeListsAggregation() {
super(Kind.MERGE_LISTS);
}
}
/**
* Merge the partial lists produced by multiple CollectListAggregations.
* NOTICE: The partial lists to be merged should NOT include any null list element (but can include null list entries).
*/
static MergeListsAggregation mergeLists() {
return new MergeListsAggregation();
}
/**
* Merge the partial sets produced by multiple CollectSetAggregations. Each null/nan value will be regarded as
* a unique instance.
*/
static MergeSetsAggregation mergeSets() {
return mergeSets(NullEquality.UNEQUAL, NaNEquality.UNEQUAL);
}
/**
* Merge the partial sets produced by multiple CollectSetAggregations.
*
* @param nullEquality Flag to specify whether null entries within each list should be considered equal.
* @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal.
*/
static MergeSetsAggregation mergeSets(NullEquality nullEquality, NaNEquality nanEquality) {
return new MergeSetsAggregation(nullEquality, nanEquality);
}
/**
* Host UDF aggregation, to execute a host-side user-defined function (UDF).
* @param wrapper The wrapper for the native host UDF instance.
* @return A new HostUDFAggregation instance
*/
static HostUDFAggregation hostUDF(HostUDFWrapper wrapper) {
return new HostUDFAggregation(wrapper);
}
static final class LeadAggregation extends LeadLagAggregation {
private LeadAggregation(int offset, ColumnVector defaultOutput) {
super(Kind.LEAD, offset, defaultOutput);
}
}
/**
* In a rolling window return the value offset entries ahead or the corresponding value from
* defaultOutput if it is outside of the window. Note that this does not take any ownership of
* defaultOutput and the caller mush ensure that defaultOutput remains valid during the life
* time of this aggregation operation.
*/
static LeadAggregation lead(int offset, ColumnVector defaultOutput) {
return new LeadAggregation(offset, defaultOutput);
}
static final class LagAggregation extends LeadLagAggregation {
private LagAggregation(int offset, ColumnVector defaultOutput) {
super(Kind.LAG, offset, defaultOutput);
}
}
/**
* In a rolling window return the value offset entries behind or the corresponding value from
* defaultOutput if it is outside of the window. Note that this does not take any ownership of
* defaultOutput and the caller mush ensure that defaultOutput remains valid during the life
* time of this aggregation operation.
*/
static LagAggregation lag(int offset, ColumnVector defaultOutput) {
return new LagAggregation(offset, defaultOutput);
}
public static final class MergeM2Aggregation extends NoParamAggregation {
private MergeM2Aggregation() {
super(Kind.MERGE_M2);
}
}
/**
* Merge the partial M2 values produced by multiple instances of M2Aggregation.
*/
static MergeM2Aggregation mergeM2() {
return new MergeM2Aggregation();
}
static class TDigestAggregation extends Aggregation {
private final int delta;
public TDigestAggregation(Kind kind, int delta) {
super(kind);
this.delta = delta;
}
@Override
long createNativeInstance() {
return Aggregation.createTDigestAgg(kind.nativeId, delta);
}
@Override
public int hashCode() {
return 31 * kind.hashCode() + delta;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (other instanceof TDigestAggregation) {
TDigestAggregation o = (TDigestAggregation) other;
return o.delta == this.delta;
}
return false;
}
}
static TDigestAggregation createTDigest(int delta) {
return new TDigestAggregation(Kind.TDIGEST, delta);
}
static TDigestAggregation mergeTDigest(int delta) {
return new TDigestAggregation(Kind.MERGE_TDIGEST, delta);
}
static final class HistogramAggregation extends NoParamAggregation {
private HistogramAggregation() {
super(Kind.HISTOGRAM);
}
}
static final class MergeHistogramAggregation extends NoParamAggregation {
private MergeHistogramAggregation() {
super(Kind.MERGE_HISTOGRAM);
}
}
static HistogramAggregation histogram() {
return new HistogramAggregation();
}
static MergeHistogramAggregation mergeHistogram() {
return new MergeHistogramAggregation();
}
static BitAndAggregation bitAnd() {
return new BitAndAggregation();
}
static BitOrAggregation bitOr() {
return new BitOrAggregation();
}
static BitXorAggregation bitXor() {
return new BitXorAggregation();
}
/**
* Create one of the aggregations that only needs a kind, no other parameters. This does not
* work for all types and for code safety reasons each kind is added separately.
*/
private static native long createNoParamAgg(int kind);
/**
* Create an nth aggregation.
*/
private static native long createNthAgg(int offset, boolean includeNulls);
/**
* Create an aggregation that uses a ddof
*/
private static native long createDdofAgg(int kind, int ddof);
/**
* Create an aggregation that is like count including nulls or not.
*/
private static native long createCountLikeAgg(int kind, boolean includeNulls);
/**
* Create quantile aggregation.
*/
private static native long createQuantAgg(int method, double[] quantiles);
/**
* Create a lead or lag aggregation.
*/
private static native long createLeadLagAgg(int kind, int offset);
/**
* Create a collect list aggregation including nulls or not.
*/
private static native long createCollectListAgg(boolean includeNulls);
/**
* Create a collect set aggregation.
*/
private static native long createCollectSetAgg(boolean includeNulls, boolean nullsEqual, boolean nansEqual);
/**
* Create a merge sets aggregation.
*/
private static native long createMergeSetsAgg(boolean nullsEqual, boolean nansEqual);
/**
* Create a TDigest aggregation.
*/
private static native long createTDigestAgg(int kind, int delta);
/**
* Create a HOST_UDF aggregation.
*/
private static native long createHostUDFAgg(long udfNativeHandle);
/**
* Create a bitwise AND aggregation.
*/
private static native long createBitAndAgg();
/**
* Create a bitwise OR aggregation.
*/
private static native long createBitOrAgg();
/**
* Create a bitwise XOR aggregation.
*/
private static native long createBitXorAgg();
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/Aggregation128Utils.java
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Utility methods for breaking apart and reassembling 128-bit values during aggregations
* to enable hash-based aggregations and detect overflows.
*/
public class Aggregation128Utils {
static {
NativeDepsLoader.loadNativeDeps();
}
/**
* Extract a 32-bit chunk from a 128-bit value.
* @param col column of 128-bit values (e.g.: DECIMAL128)
* @param outType integer type to use for the output column (e.g.: UINT32 or INT32)
* @param chunkIdx index of the 32-bit chunk to extract where 0 is the least significant chunk
* and 3 is the most significant chunk
* @return column containing the specified 32-bit chunk of the input column values. A null input
* row will result in a corresponding null output row.
*/
public static ColumnVector extractInt32Chunk(ColumnView col, DType outType, int chunkIdx) {
return new ColumnVector(extractInt32Chunk(col.getNativeView(),
outType.getTypeId().getNativeId(), chunkIdx));
}
/**
* Reassemble a column of 128-bit values from a table of four 64-bit integer columns and check
* for overflow. The 128-bit value is reconstructed by overlapping the 64-bit values by 32-bits.
* The least significant 32-bits of the least significant 64-bit value are used directly as the
* least significant 32-bits of the final 128-bit value, and the remaining 32-bits are added to
* the next most significant 64-bit value. The lower 32-bits of that sum become the next most
* significant 32-bits in the final 128-bit value, and the remaining 32-bits are added to the
* next most significant 64-bit input value, and so on.
*
* @param chunks table of four 64-bit integer columns with the columns ordered from least
* significant to most significant. The last column must be of type INT64.
* @param type the type to use for the resulting 128-bit value column
* @return table containing a boolean column and a 128-bit value column of the requested type.
* The boolean value will be true if an overflow was detected for that row's value when
* it was reassembled. A null input row will result in a corresponding null output row.
*/
public static Table combineInt64SumChunks(Table chunks, DType type) {
return new Table(combineInt64SumChunks(chunks.getNativeView(),
type.getTypeId().getNativeId(),
type.getScale()));
}
private static native long extractInt32Chunk(long columnView, int outTypeId, int chunkIdx);
private static native long[] combineInt64SumChunks(long chunksTableView, int dtype, int scale);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/AggregationOverWindow.java
|
/*
*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* An Aggregation instance that also holds a column number and window metadata so the aggregation
* can be done over a specific window.
*/
public final class AggregationOverWindow {
private final RollingAggregationOnColumn wrapped;
protected final WindowOptions windowOptions;
AggregationOverWindow(RollingAggregationOnColumn wrapped, WindowOptions windowOptions) {
this.wrapped = wrapped;
this.windowOptions = windowOptions;
if (windowOptions == null) {
throw new IllegalArgumentException("WindowOptions cannot be null!");
}
if (windowOptions.getPrecedingCol() != null || windowOptions.getFollowingCol() != null) {
throw new UnsupportedOperationException("Dynamic windows (via columns) are currently unsupported!");
}
}
public WindowOptions getWindowOptions() {
return windowOptions;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + windowOptions.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other instanceof AggregationOverWindow) {
AggregationOverWindow o = (AggregationOverWindow) other;
return wrapped.equals(o.wrapped) && windowOptions.equals(o.windowOptions);
}
return false;
}
int getColumnIndex() {
return wrapped.getColumnIndex();
}
long createNativeInstance() {
return wrapped.createNativeInstance();
}
long getDefaultOutput() {
return wrapped.getDefaultOutput();
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ArrowColumnBuilder.java
|
/*
*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.nio.ByteBuffer;
import java.util.ArrayList;
/**
* Column builder from Arrow data. This builder takes in byte buffers referencing
* Arrow data and allows efficient building of CUDF ColumnVectors from that Arrow data.
* The caller can add multiple batches where each batch corresponds to Arrow data
* and those batches get concatenated together after being converted to CUDF
* ColumnVectors.
* This currently only supports primitive types and Strings, Decimals and nested types
* such as list and struct are not supported.
*/
public final class ArrowColumnBuilder implements AutoCloseable {
private DType type;
private final ArrayList<ByteBuffer> data = new ArrayList<>();
private final ArrayList<ByteBuffer> validity = new ArrayList<>();
private final ArrayList<ByteBuffer> offsets = new ArrayList<>();
private final ArrayList<Long> nullCount = new ArrayList<>();
private final ArrayList<Long> rows = new ArrayList<>();
public ArrowColumnBuilder(HostColumnVector.DataType type) {
this.type = type.getType();
}
/**
* Add an Arrow buffer. This API allows you to add multiple if you want them
* combined into a single ColumnVector.
* Note, this takes all data, validity, and offsets buffers, but they may not all
* be needed based on the data type. The buffer should be null if its not used
* for that type.
* This API only supports primitive types and Strings, Decimals and nested types
* such as list and struct are not supported.
* @param rows - number of rows in this Arrow buffer
* @param nullCount - number of null values in this Arrow buffer
* @param data - ByteBuffer of the Arrow data buffer
* @param validity - ByteBuffer of the Arrow validity buffer
* @param offsets - ByteBuffer of the Arrow offsets buffer
*/
public void addBatch(long rows, long nullCount, ByteBuffer data, ByteBuffer validity,
ByteBuffer offsets) {
this.rows.add(rows);
this.nullCount.add(nullCount);
this.data.add(data);
this.validity.add(validity);
this.offsets.add(offsets);
}
/**
* Create the immutable ColumnVector, copied to the device based on the Arrow data.
* @return - new ColumnVector
*/
public final ColumnVector buildAndPutOnDevice() {
int numBatches = rows.size();
ArrayList<ColumnVector> allVecs = new ArrayList<>(numBatches);
ColumnVector vecRet;
try {
for (int i = 0; i < numBatches; i++) {
allVecs.add(ColumnVector.fromArrow(type, rows.get(i), nullCount.get(i),
data.get(i), validity.get(i), offsets.get(i)));
}
if (numBatches == 1) {
vecRet = allVecs.get(0);
} else if (numBatches > 1) {
vecRet = ColumnVector.concatenate(allVecs.toArray(new ColumnVector[0]));
} else {
throw new IllegalStateException("Can't build a ColumnVector when no Arrow batches specified");
}
} finally {
// close the vectors that were concatenated
if (numBatches > 1) {
allVecs.forEach(cv -> cv.close());
}
}
return vecRet;
}
@Override
public void close() {
// memory buffers owned outside of this
}
@Override
public String toString() {
return "ArrowColumnBuilder{" +
"type=" + type +
", data=" + data +
", validity=" + validity +
", offsets=" + offsets +
", nullCount=" + nullCount +
", rows=" + rows +
'}';
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ArrowIPCOptions.java
|
/*
*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Options for reading data in Arrow IPC format
*/
public class ArrowIPCOptions {
public interface NeedGpu {
/**
* A callback to indicate that we are about to start putting data on the GPU.
*/
void needTheGpu();
}
public static ArrowIPCOptions DEFAULT = new ArrowIPCOptions(new Builder());
private final NeedGpu callback;
private ArrowIPCOptions(Builder builder) {
this.callback = builder.callback;
}
public NeedGpu getCallback() {
return callback;
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private NeedGpu callback = () -> {};
public Builder withCallback(NeedGpu callback) {
if (callback == null) {
this.callback = () -> {};
} else {
this.callback = callback;
}
return this;
}
public ArrowIPCOptions build() {
return new ArrowIPCOptions(this);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ArrowIPCWriterOptions.java
|
/*
*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Settings for writing Arrow IPC data.
*/
public class ArrowIPCWriterOptions extends WriterOptions {
public interface DoneOnGpu {
/**
* A callback to indicate that the table is off of the GPU
* and may be closed, even if all of the data is not yet written.
* @param table the table that can be closed.
*/
void doneWithTheGpu(Table table);
}
private final long size;
private final DoneOnGpu callback;
private ArrowIPCWriterOptions(Builder builder) {
super(builder);
this.size = builder.size;
this.callback = builder.callback;
}
public long getMaxChunkSize() {
return size;
}
public DoneOnGpu getCallback() {
return callback;
}
public static class Builder extends WriterBuilder<Builder> {
private long size = -1;
private DoneOnGpu callback = (ignored) -> {};
public Builder withMaxChunkSize(long size) {
this.size = size;
return this;
}
public Builder withCallback(DoneOnGpu callback) {
if (callback == null) {
this.callback = (ignored) -> {};
} else {
this.callback = callback;
}
return this;
}
/**
* Add the name(s) for nullable column(s).
*
* Please note the column names of the nested struct columns should be flattened in sequence.
* For examples,
* <pre>
* A table with an int column and a struct column:
* ["int_col", "struct_col":{"field_1", "field_2"}]
* output:
* ["int_col", "struct_col", "field_1", "field_2"]
*
* A table with an int column and a list of non-nested type column:
* ["int_col", "list_col":[]]
* output:
* ["int_col", "list_col"]
*
* A table with an int column and a list of struct column:
* ["int_col", "list_struct_col":[{"field_1", "field_2"}]]
* output:
* ["int_col", "list_struct_col", "field_1", "field_2"]
* </pre>
*
* @param columnNames The column names corresponding to the written table(s).
*/
@Override
public Builder withColumnNames(String... columnNames) {
return super.withColumnNames(columnNames);
}
/**
* Add the name(s) for non-nullable column(s).
*
* Please note the column names of the nested struct columns should be flattened in sequence.
* For examples,
* <pre>
* A table with an int column and a struct column:
* ["int_col", "struct_col":{"field_1", "field_2"}]
* output:
* ["int_col", "struct_col", "field_1", "field_2"]
*
* A table with an int column and a list of non-nested type column:
* ["int_col", "list_col":[]]
* output:
* ["int_col", "list_col"]
*
* A table with an int column and a list of struct column:
* ["int_col", "list_struct_col":[{"field_1", "field_2"}]]
* output:
* ["int_col", "list_struct_col", "field_1", "field_2"]
* </pre>
*
* @param columnNames The column names corresponding to the written table(s).
*/
@Override
public Builder withNotNullableColumnNames(String... columnNames) {
return super.withNotNullableColumnNames(columnNames);
}
public ArrowIPCWriterOptions build() {
return new ArrowIPCWriterOptions(this);
}
}
public static final ArrowIPCWriterOptions DEFAULT = new ArrowIPCWriterOptions(new Builder());
public static Builder builder() {
return new Builder();
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/AssertEmptyNulls.java
|
/*
*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* This class is a Helper class to assert there are no non-empty nulls in a ColumnView
*
* The reason for the existence of this class is so that we can turn the asserts on/off when needed
* by passing "-da:ai.rapids.cudf.AssertEmptyNulls". We need that behavior because we have tests
* that explicitly test with ColumnViews that contain non-empty nulls but more importantly, there
* could be cases where an external system may not have a requirement of nulls being empty, so for
* us to work with those systems, we can turn off this assert in the field.
*/
public class AssertEmptyNulls {
public static void assertNullsAreEmpty(ColumnView cv) {
if (cv.type.isNestedType() || cv.type.hasOffsets()) {
assert !cv.hasNonEmptyNulls() : "Column has non-empty nulls";
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/AvroOptions.java
|
/*
*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Options for reading an Avro file
*/
public class AvroOptions extends ColumnFilterOptions {
public static AvroOptions DEFAULT = new AvroOptions(new Builder());
private AvroOptions(Builder builder) {
super(builder);
}
public static Builder builder() {
return new Builder();
}
public static class Builder extends ColumnFilterOptions.Builder<Builder> {
public AvroOptions build() {
return new AvroOptions(this);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/BaseDeviceMemoryBuffer.java
|
/*
*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Base class for all MemoryBuffers that are in device memory.
*/
public abstract class BaseDeviceMemoryBuffer extends MemoryBuffer {
protected BaseDeviceMemoryBuffer(long address, long length, MemoryBuffer parent) {
super(address, length, parent);
}
protected BaseDeviceMemoryBuffer(long address, long length, MemoryBufferCleaner cleaner) {
super(address, length, cleaner);
}
/**
* Copy a subset of src to this buffer starting at destOffset.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
*/
public final void copyFromHostBuffer(long destOffset, HostMemoryBuffer src, long srcOffset, long length) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.memcpy(address + destOffset, src.address + srcOffset, length, CudaMemcpyKind.HOST_TO_DEVICE);
}
/**
* Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream.
* The copy has completed when this returns, but the memory copy could overlap with
* operations occurring on other streams.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
* @param stream CUDA stream to use
*/
public final void copyFromHostBuffer(long destOffset, HostMemoryBuffer src,
long srcOffset, long length, Cuda.Stream stream) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.memcpy(address + destOffset, src.address + srcOffset, length,
CudaMemcpyKind.HOST_TO_DEVICE, stream);
}
/**
* Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream.
* The copy is async and may not have completed when this returns.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
* @param stream CUDA stream to use
*/
public final void copyFromHostBufferAsync(long destOffset, HostMemoryBuffer src,
long srcOffset, long length, Cuda.Stream stream) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.asyncMemcpy(address + destOffset, src.address + srcOffset, length,
CudaMemcpyKind.HOST_TO_DEVICE, stream);
}
/**
* Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream.
* The copy is async and may not have completed when this returns.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
* @param stream CUDA stream to use
*/
public final void copyFromDeviceBufferAsync(long destOffset, BaseDeviceMemoryBuffer src,
long srcOffset, long length, Cuda.Stream stream) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.asyncMemcpy(address + destOffset, src.address + srcOffset, length,
CudaMemcpyKind.DEVICE_TO_DEVICE, stream);
}
/**
* Copy a subset of src to this buffer starting at the beginning of this.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
*/
public final void copyFromHostBuffer(HostMemoryBuffer src, long srcOffset, long length) {
copyFromHostBuffer(0, src, srcOffset, length);
}
/**
* Copy everything from src to this buffer starting at the beginning of this buffer.
* @param src - Buffer to copy data from
*/
public final void copyFromHostBuffer(HostMemoryBuffer src) {
copyFromHostBuffer(0, src, 0, src.length);
}
/**
* Copy entire host buffer starting at the beginning of this buffer using a CUDA stream.
* The copy has completed when this returns, but the memory copy could overlap with
* operations occurring on other streams.
* @param src host buffer to copy from
* @param stream CUDA stream to use
*/
public final void copyFromHostBuffer(HostMemoryBuffer src, Cuda.Stream stream) {
copyFromHostBuffer(0, src, 0, src.length, stream);
}
/**
* Copy entire host buffer starting at the beginning of this buffer using a CUDA stream.
* The copy is async and may not have completed when this returns.
* @param src host buffer to copy from
* @param stream CUDA stream to use
*/
public final void copyFromHostBufferAsync(HostMemoryBuffer src, Cuda.Stream stream) {
copyFromHostBufferAsync(0, src, 0, src.length, stream);
}
/**
* Slice off a part of the device buffer, copying it instead of reference counting it.
* @param offset where to start the slice at.
* @param len how many bytes to slice
* @return a device buffer that will need to be closed independently from this buffer.
*/
public final DeviceMemoryBuffer sliceWithCopy(long offset, long len) {
addressOutOfBoundsCheck(address + offset, len, "slice");
DeviceMemoryBuffer ret = null;
boolean success = false;
try {
ret = DeviceMemoryBuffer.allocate(len);
Cuda.memcpy(ret.getAddress(), getAddress() + offset, len, CudaMemcpyKind.DEVICE_TO_DEVICE);
success = true;
return ret;
} finally {
if (!success && ret != null) {
ret.close();
}
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/BinaryOp.java
|
/*
* Copyright (c) 2019-2020,2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import java.util.EnumSet;
/**
* Mathematical binary operations.
*/
public enum BinaryOp {
ADD(0),
SUB(1),
MUL(2),
DIV(3), // divide using common type of lhs and rhs
TRUE_DIV(4), // divide after promoting to FLOAT64 point
FLOOR_DIV(5), // divide after promoting to FLOAT64 and flooring the result
MOD(6),
PMOD(7), // pmod
PYMOD(8), // mod operator % follow by python's sign rules for negatives
POW(9),
INT_POW(10), // int ^ int, used to avoid floating point precision loss
LOG_BASE(11), // logarithm to the base
ATAN2(12), // atan2
SHIFT_LEFT(13), // bitwise shift left (<<)
SHIFT_RIGHT(14), // bitwise shift right (>>)
SHIFT_RIGHT_UNSIGNED(15), // bitwise shift right (>>>)
BITWISE_AND(16),
BITWISE_OR(17),
BITWISE_XOR(18),
LOGICAL_AND(19),
LOGICAL_OR(20),
EQUAL(21),
NOT_EQUAL(22),
LESS(23),
GREATER(24),
LESS_EQUAL(25), // <=
GREATER_EQUAL(26), // >=
NULL_EQUALS(27), // like EQUAL but NULL == NULL is TRUE and NULL == not NULL is FALSE
NULL_NOT_EQUALS(28), // negation of NULL_EQUALS
NULL_MAX(29), // MAX but NULL < not NULL
NULL_MIN(30), // MIN but NULL > not NULL
//NOT IMPLEMENTED YET GENERIC_BINARY(30);
NULL_LOGICAL_AND(32),
NULL_LOGICAL_OR(33);
static final EnumSet<BinaryOp> COMPARISON = EnumSet.of(
EQUAL, NOT_EQUAL, LESS, GREATER, LESS_EQUAL, GREATER_EQUAL);
static final EnumSet<BinaryOp> INEQUALITY_COMPARISON = EnumSet.of(
LESS, GREATER, LESS_EQUAL, GREATER_EQUAL);
private static final BinaryOp[] OPS = BinaryOp.values();
final int nativeId;
BinaryOp(int nativeId) {
this.nativeId = nativeId;
}
static BinaryOp fromNative(int nativeId) {
for (BinaryOp type : OPS) {
if (type.nativeId == nativeId) {
return type;
}
}
throw new IllegalArgumentException("Could not translate " + nativeId + " into a BinaryOp");
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/BinaryOperable.java
|
/*
*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
public interface BinaryOperable {
/**
* Finds the proper DType for an implicit output. This follows the typical rules of
* C++, Java, and most SQL implementations.
* FLOAT64/double >
* FLOAT32/float >
* INT64/long >
* INT32/int >
* INT16/short >
* INT8/byte/char
* <p>
* Currently most TIMESTAMPs are treated the same as INT64. TIMESTAMP_DAYS is treated the same
* as INT32. All time information is stripped from them. This may change in the future.
* <p>
* BOOL8 is treated like an INT8. Math on boolean operations makes little sense. If
* you want to stay as a BOOL8 you will need to explicitly specify the output type.
* For decimal types, DECIMAL32 and DECIMAL64 takes in another parameter `scale`. DType is created
* with scale=0 as scale is required. Dtype is discarded for binary operations for decimal
* types in cudf as a new DType is created for output type with the new scale.
*/
static DType implicitConversion(BinaryOp op, BinaryOperable lhs, BinaryOperable rhs) {
DType a = lhs.getType();
DType b = rhs.getType();
if (a.equals(DType.FLOAT64) || b.equals(DType.FLOAT64)) {
return DType.FLOAT64;
}
if (a.equals(DType.FLOAT32) || b.equals(DType.FLOAT32)) {
return DType.FLOAT32;
}
if (a.equals(DType.UINT64) || b.equals(DType.UINT64)) {
return DType.UINT64;
}
if (a.equals(DType.INT64) || b.equals(DType.INT64) ||
a.equals(DType.TIMESTAMP_MILLISECONDS) || b.equals(DType.TIMESTAMP_MILLISECONDS) ||
a.equals(DType.TIMESTAMP_MICROSECONDS) || b.equals(DType.TIMESTAMP_MICROSECONDS) ||
a.equals(DType.TIMESTAMP_SECONDS) || b.equals(DType.TIMESTAMP_SECONDS) ||
a.equals(DType.TIMESTAMP_NANOSECONDS) || b.equals(DType.TIMESTAMP_NANOSECONDS)) {
return DType.INT64;
}
if (a.equals(DType.UINT32) || b.equals(DType.UINT32)) {
return DType.UINT32;
}
if (a.equals(DType.INT32) || b.equals(DType.INT32) ||
a.equals(DType.TIMESTAMP_DAYS) || b.equals(DType.TIMESTAMP_DAYS)) {
return DType.INT32;
}
if (a.equals(DType.UINT16) || b.equals(DType.UINT16)) {
return DType.UINT16;
}
if (a.equals(DType.INT16) || b.equals(DType.INT16)) {
return DType.INT16;
}
if (a.equals(DType.UINT8) || b.equals(DType.UINT8)) {
return DType.UINT8;
}
if (a.equals(DType.INT8) || b.equals(DType.INT8)) {
return DType.INT8;
}
if (a.equals(DType.BOOL8) || b.equals(DType.BOOL8)) {
return DType.BOOL8;
}
if (a.isDecimalType() && b.isDecimalType()) {
if (a.typeId != b.typeId) {
throw new IllegalArgumentException("Both columns must be of the same fixed_point type");
}
final int scale = ColumnView.getFixedPointOutputScale(op, lhs.getType(), rhs.getType());
// The output precision/size should be at least as large as the input.
// It may be larger if room is needed for it based off of the output scale.
final DType.DTypeEnum outputEnum;
if (scale <= DType.DECIMAL32_MAX_PRECISION && a.typeId == DType.DTypeEnum.DECIMAL32) {
outputEnum = DType.DTypeEnum.DECIMAL32;
} else if (scale <= DType.DECIMAL64_MAX_PRECISION &&
(a.typeId == DType.DTypeEnum.DECIMAL32 || a.typeId == DType.DTypeEnum.DECIMAL64)) {
outputEnum = DType.DTypeEnum.DECIMAL64;
} else {
outputEnum = DType.DTypeEnum.DECIMAL128;
}
return DType.create(outputEnum, scale);
}
throw new IllegalArgumentException("Unsupported types " + a + " and " + b);
}
/**
* Get the type of this data.
*/
DType getType();
/**
* Multiple different binary operations.
* @param op the operation to perform
* @param rhs the rhs of the operation
* @param outType the type of output you want.
* @return the result
*/
ColumnVector binaryOp(BinaryOp op, BinaryOperable rhs, DType outType);
/**
* Add one vector to another with the given output type. this + rhs
* Output type is ignored for the operations between decimal types and
* it is always decimal type.
*/
default ColumnVector add(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.ADD, rhs, outType);
}
/**
* Add + operator. this + rhs
*/
default ColumnVector add(BinaryOperable rhs) {
return add(rhs, implicitConversion(BinaryOp.ADD, this, rhs));
}
/**
* Subtract one vector from another with the given output type. this - rhs
* Output type is ignored for the operations between decimal types and
* it is always decimal type.
*/
default ColumnVector sub(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.SUB, rhs, outType);
}
/**
* Subtract one vector from another. this - rhs
*/
default ColumnVector sub(BinaryOperable rhs) {
return sub(rhs, implicitConversion(BinaryOp.SUB, this, rhs));
}
/**
* Multiply two vectors together with the given output type. this * rhs
* Output type is ignored for the operations between decimal types and
* it is always decimal type.
*/
default ColumnVector mul(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.MUL, rhs, outType);
}
/**
* Multiply two vectors together. this * rhs
*/
default ColumnVector mul(BinaryOperable rhs) {
return mul(rhs, implicitConversion(BinaryOp.MUL, this, rhs));
}
/**
* Divide one vector by another with the given output type. this / rhs
* Output type is ignored for the operations between decimal types and
* it is always decimal type.
*/
default ColumnVector div(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.DIV, rhs, outType);
}
/**
* Divide one vector by another. this / rhs
*/
default ColumnVector div(BinaryOperable rhs) {
return div(rhs, implicitConversion(BinaryOp.DIV, this, rhs));
}
/**
* Divide one vector by another converting to FLOAT64 in between with the given output type.
* (double)this / (double)rhs
*/
default ColumnVector trueDiv(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.TRUE_DIV, rhs, outType);
}
/**
* Divide one vector by another converting to FLOAT64 in between.
* (double)this / (double)rhs
*/
default ColumnVector trueDiv(BinaryOperable rhs) {
return trueDiv(rhs, implicitConversion(BinaryOp.TRUE_DIV, this, rhs));
}
/**
* Divide one vector by another and calculate the floor of the result with the given output type.
* Math.floor(this/rhs)
*/
default ColumnVector floorDiv(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.FLOOR_DIV, rhs, outType);
}
/**
* Divide one vector by another and calculate the floor of the result.
* Math.floor(this/rhs)
*/
default ColumnVector floorDiv(BinaryOperable rhs) {
return floorDiv(rhs, implicitConversion(BinaryOp.FLOOR_DIV, this, rhs));
}
/**
* Compute the modulus with the given output type.
* this % rhs
*/
default ColumnVector mod(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.MOD, rhs, outType);
}
/**
* Compute the modulus.
* this % rhs
*/
default ColumnVector mod(BinaryOperable rhs) {
return mod(rhs, implicitConversion(BinaryOp.MOD, this, rhs));
}
/**
* Compute the power with the given output type.
* Math.pow(this, rhs)
*/
default ColumnVector pow(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.POW, rhs, outType);
}
/**
* Compute the power.
* Math.pow(this, rhs)
*/
default ColumnVector pow(BinaryOperable rhs) {
return pow(rhs, implicitConversion(BinaryOp.POW, this, rhs));
}
/**
* this == rhs 1 is true 0 is false with the output cast to the given type.
*/
default ColumnVector equalTo(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.EQUAL, rhs, outType);
}
/**
* this == rhs 1 is true 0 is false. The output type is BOOL8.
*/
default ColumnVector equalTo(BinaryOperable rhs) {
return equalTo(rhs, DType.BOOL8);
}
/**
* this != rhs 1 is true 0 is false with the output cast to the given type.
*/
default ColumnVector notEqualTo(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.NOT_EQUAL, rhs, outType);
}
/**
* this != rhs 1 is true 0 is false. The output type is BOOL8.
*/
default ColumnVector notEqualTo(BinaryOperable rhs) {
return notEqualTo(rhs, DType.BOOL8);
}
/**
* this < rhs 1 is true 0 is false with the output cast to the given type.
*/
default ColumnVector lessThan(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.LESS, rhs, outType);
}
/**
* this < rhs 1 is true 0 is false. The output type is BOOL8.
*/
default ColumnVector lessThan(BinaryOperable rhs) {
return lessThan(rhs, DType.BOOL8);
}
/**
* this > rhs 1 is true 0 is false with the output cast to the given type.
*/
default ColumnVector greaterThan(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.GREATER, rhs, outType);
}
/**
* this > rhs 1 is true 0 is false. The output type is BOOL8.
*/
default ColumnVector greaterThan(BinaryOperable rhs) {
return greaterThan(rhs, DType.BOOL8);
}
/**
* this <= rhs 1 is true 0 is false with the output cast to the given type.
*/
default ColumnVector lessOrEqualTo(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.LESS_EQUAL, rhs, outType);
}
/**
* this <= rhs 1 is true 0 is false. The output type is BOOL8.
*/
default ColumnVector lessOrEqualTo(BinaryOperable rhs) {
return lessOrEqualTo(rhs, DType.BOOL8);
}
/**
* this >= rhs 1 is true 0 is false with the output cast to the given type.
*/
default ColumnVector greaterOrEqualTo(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.GREATER_EQUAL, rhs, outType);
}
/**
* this >= rhs 1 is true 0 is false. The output type is BOOL8.
*/
default ColumnVector greaterOrEqualTo(BinaryOperable rhs) {
return greaterOrEqualTo(rhs, DType.BOOL8);
}
/**
* Bit wise and (&) with the given output type. this & rhs
*/
default ColumnVector bitAnd(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.BITWISE_AND, rhs, outType);
}
/**
* Bit wise and (&). this & rhs
*/
default ColumnVector bitAnd(BinaryOperable rhs) {
return bitAnd(rhs, implicitConversion(BinaryOp.BITWISE_AND, this, rhs));
}
/**
* Bit wise or (|) with the given output type. this | rhs
*/
default ColumnVector bitOr(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.BITWISE_OR, rhs, outType);
}
/**
* Bit wise or (|). this | rhs
*/
default ColumnVector bitOr(BinaryOperable rhs) {
return bitOr(rhs, implicitConversion(BinaryOp.BITWISE_OR, this, rhs));
}
/**
* Bit wise xor (^) with the given output type. this ^ rhs
*/
default ColumnVector bitXor(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.BITWISE_XOR, rhs, outType);
}
/**
* Bit wise xor (^). this ^ rhs
*/
default ColumnVector bitXor(BinaryOperable rhs) {
return bitXor(rhs, implicitConversion(BinaryOp.BITWISE_XOR, this, rhs));
}
/**
* Logical and (&&) with the given output type. this && rhs
*/
default ColumnVector and(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.LOGICAL_AND, rhs, outType);
}
/**
* Logical and (&&). this && rhs
*/
default ColumnVector and(BinaryOperable rhs) {
return and(rhs, implicitConversion(BinaryOp.LOGICAL_AND, this, rhs));
}
/**
* Logical or (||) with the given output type. this || rhs
*/
default ColumnVector or(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.LOGICAL_OR, rhs, outType);
}
/**
* Logical or (||). this || rhs
*/
default ColumnVector or(BinaryOperable rhs) {
return or(rhs, implicitConversion(BinaryOp.LOGICAL_OR, this, rhs));
}
/**
* Bitwise left shifts the values of this vector by shiftBy.
*
* If "this" and shiftBy are both vectors then, this[i] << shiftBy[i]
* If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows
* with the scalar << shiftBy[i]
* If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows
* with this[i] << shiftBy
*
*/
default ColumnVector shiftLeft(BinaryOperable shiftBy, DType outType) {
return binaryOp(BinaryOp.SHIFT_LEFT, shiftBy, outType);
}
/**
* Bitwise left shift the values of this vector by the shiftBy.
*
* If "this" and shiftBy are both vectors then, this[i] << shiftBy[i]
* If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows
* with the scalar << shiftBy[i]
* If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows
* with this[i] << shiftBy
*/
default ColumnVector shiftLeft(BinaryOperable shiftBy) {
return shiftLeft(shiftBy, implicitConversion(BinaryOp.SHIFT_LEFT, this, shiftBy));
}
/**
* Bitwise right shift this vector by the shiftBy.
*
* If "this" and shiftBy are both vectors then, this[i] >> shiftBy[i]
* If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows
* with the scalar >> shiftBy[i]
* If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows
* with this[i] >> shiftBy
*/
default ColumnVector shiftRight(BinaryOperable shiftBy, DType outType) {
return binaryOp(BinaryOp.SHIFT_RIGHT, shiftBy, outType);
}
/**
* Bitwise right shift this vector by the shiftBy.
*
* If "this" and shiftBy are both vectors then, this[i] >> shiftBy[i]
* If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows
* with the scalar >> shiftBy[i]
* If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows
* with this[i] >> shiftBy
*/
default ColumnVector shiftRight(BinaryOperable shiftBy) {
return shiftRight(shiftBy, implicitConversion(BinaryOp.SHIFT_RIGHT, this, shiftBy));
}
/**
* This method bitwise right shifts the values of this vector by the shiftBy.
* This method always fills 0 irrespective of the sign of the number.
*
* If "this" and shiftBy are both vectors then, this[i] >>> shiftBy[i]
* If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows
* with the scalar >>> shiftBy[i]
* If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows
* with this[i] >>> shiftBy
*/
default ColumnVector shiftRightUnsigned(BinaryOperable shiftBy, DType outType) {
return binaryOp(BinaryOp.SHIFT_RIGHT_UNSIGNED, shiftBy, outType);
}
/**
* This method bitwise right shifts the values of this vector by the shiftBy.
* This method always fills 0 irrespective of the sign of the number.
*
* If "this" and shiftBy are both vectors then, this[i] >>> shiftBy[i]
* If "this" is a scalar and shiftBy is a vector then returns a vector of size shiftBy.rows
* with the scalar >>> shiftBy[i]
* If "this" is a vector and shiftBy is a scalar then returns a vector of size this.rows
* with this[i] >>> shiftBy
*/
default ColumnVector shiftRightUnsigned(BinaryOperable shiftBy) {
return shiftRightUnsigned(shiftBy, implicitConversion(BinaryOp.SHIFT_RIGHT_UNSIGNED, this,
shiftBy));
}
/**
* Calculate the log with the specified base
*/
default ColumnVector log(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.LOG_BASE, rhs, outType);
}
/**
* Calculate the log with the specified base, output is the same as this.
*/
default ColumnVector log(BinaryOperable rhs) {
return log(rhs, getType());
}
/**
* The function arctan2(y,x) or atan2(y,x) is defined as the angle in the Euclidean plane, given
* in radians, between the positive x axis and the ray to the point (x, y) ≠(0, 0).
*/
default ColumnVector arctan2(BinaryOperable xCoordinate, DType outType) {
return binaryOp(BinaryOp.ATAN2, xCoordinate, outType);
}
/**
* The function arctan2(y,x) or atan2(y,x) is defined as the angle in the Euclidean plane, given
* in radians, between the positive x axis and the ray to the point (x, y) ≠(0, 0).
*/
default ColumnVector arctan2(BinaryOperable xCoordinate) {
return arctan2(xCoordinate, implicitConversion(BinaryOp.ATAN2, this, xCoordinate));
}
/**
* Returns the positive value of lhs mod rhs.
*
* r = lhs % rhs
* if r < 0 then (r + rhs) % rhs
* else r
*
*/
default ColumnVector pmod(BinaryOperable rhs, DType outputType) {
return binaryOp(BinaryOp.PMOD, rhs, outputType);
}
/**
* Returns the positive value of lhs mod rhs.
*
* r = lhs % rhs
* if r < 0 then (r + rhs) % rhs
* else r
*
*/
default ColumnVector pmod(BinaryOperable rhs) {
return pmod(rhs, implicitConversion(BinaryOp.PMOD, this, rhs));
}
/**
* like equalTo but NULL == NULL is TRUE and NULL == not NULL is FALSE
*/
default ColumnVector equalToNullAware(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.NULL_EQUALS, rhs, outType);
}
/**
* like equalTo but NULL == NULL is TRUE and NULL == not NULL is FALSE
*/
default ColumnVector equalToNullAware(BinaryOperable rhs) {
return equalToNullAware(rhs, DType.BOOL8);
}
/**
* like notEqualTo but NULL != NULL is TRUE and NULL != not NULL is FALSE
*/
default ColumnVector notEqualToNullAware(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.NULL_NOT_EQUALS, rhs, outType);
}
/**
* like notEqualTo but NULL != NULL is TRUE and NULL != not NULL is FALSE
*/
default ColumnVector notEqualToNullAware(BinaryOperable rhs) {
return notEqualToNullAware(rhs, DType.BOOL8);
}
/**
* Returns the max non null value.
*/
default ColumnVector maxNullAware(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.NULL_MAX, rhs, outType);
}
/**
* Returns the max non null value.
*/
default ColumnVector maxNullAware(BinaryOperable rhs) {
return maxNullAware(rhs, implicitConversion(BinaryOp.NULL_MAX, this, rhs));
}
/**
* Returns the min non null value.
*/
default ColumnVector minNullAware(BinaryOperable rhs, DType outType) {
return binaryOp(BinaryOp.NULL_MIN, rhs, outType);
}
/**
* Returns the min non null value.
*/
default ColumnVector minNullAware(BinaryOperable rhs) {
return minNullAware(rhs, implicitConversion(BinaryOp.NULL_MIN, this, rhs));
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/BitVectorHelper.java
|
/*
*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* This class does bit manipulation using byte arithmetic
*/
final class BitVectorHelper {
/**
* Shifts that to the left by the required bits then appends to this
*/
static void append(HostMemoryBuffer src, HostMemoryBuffer dst, long dstOffset, long rows) {
assert dst.length * 8 - dstOffset >= rows : "validity vector bigger then available space on " +
"dst: " + (dst.length * 8 - dstOffset) + " copying space needed: " + rows;
long dstByteIndex = dstOffset / 8;
int shiftBits = (int) (dstOffset % 8);
if (shiftBits > 0) {
shiftSrcLeftAndWriteToDst(src, dst, dstByteIndex, shiftBits, rows);
} else {
dst.copyFromHostBuffer(dstByteIndex, src, 0, getValidityLengthInBytes(rows));
}
}
/**
* Shifts the src to the left by the given bits and writes 'length' bytes to the destination
*/
private static void shiftSrcLeftAndWriteToDst(HostMemoryBuffer src, HostMemoryBuffer dst,
long dstOffset, int shiftByBits, long length) {
assert shiftByBits > 0 && shiftByBits < 8 : "shiftByBits out of range";
int dstMask = 0xFF >> (8 - shiftByBits);
// the mask to save the left side of the bits before we shift
int srcLeftMask = dstMask << (8 - shiftByBits);
int valueFromTheLeftOfTheLastByte = dst.getByte(dstOffset) & dstMask;
long i;
long byteLength = getValidityLengthInBytes(length);
for (i = 0; i < byteLength; i++) {
int b = src.getByte(i);
int fallingBitsOnTheLeft = b & srcLeftMask;
b <<= shiftByBits;
b |= valueFromTheLeftOfTheLastByte;
dst.setByte(dstOffset + i, (byte) b);
valueFromTheLeftOfTheLastByte = fallingBitsOnTheLeft >>> (8 - shiftByBits);
}
if (((length % 8) + shiftByBits > 8) || length % 8 == 0) {
/*
Only if the last byte has data that has been shifted to spill over to the next
byte execute the
following statement.
*/
dst.setByte(dstOffset + i, (byte) (valueFromTheLeftOfTheLastByte | ~dstMask));
}
}
/**
* This method returns the length in bytes needed to represent X number of rows
* e.g. getValidityLengthInBytes(5) => 1 byte
* getValidityLengthInBytes(7) => 1 byte
* getValidityLengthInBytes(14) => 2 bytes
*/
static long getValidityLengthInBytes(long rows) {
return (rows + 7) / 8;
}
/**
* This method returns the allocation size of the validity vector which is 64-byte aligned
* e.g. getValidityAllocationSizeInBytes(5) => 64 bytes
* getValidityAllocationSizeInBytes(14) => 64 bytes
* getValidityAllocationSizeInBytes(65) => 128 bytes
*/
static long getValidityAllocationSizeInBytes(long rows) {
long numBytes = getValidityLengthInBytes(rows);
return ((numBytes + 63) / 64) * 64;
}
/**
* Set the validity bit to null for the given index.
* @param valid the buffer to set it in.
* @param index the index to set it at.
* @return 1 if validity changed else 0 if it already was null.
*/
static int setNullAt(HostMemoryBuffer valid, long index) {
long bucket = index / 8;
byte currentByte = valid.getByte(bucket);
int bitmask = ~(1 << (index % 8));
int ret = (currentByte >> index) & 0x1;
currentByte &= bitmask;
valid.setByte(bucket, currentByte);
return ret;
}
static boolean isNull(HostMemoryBuffer valid, long index) {
int b = valid.getByte(index / 8);
int i = b & (1 << (index % 8));
return i == 0;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/BufferType.java
|
/*
*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Types of buffers supported by ColumnVectors and HostColumnVectors
*/
public enum BufferType {
VALIDITY,
OFFSET,
DATA
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CSVOptions.java
|
/*
*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.HashSet;
import java.util.Set;
/**
* Options for reading a CSV file
*/
public class CSVOptions extends ColumnFilterOptions {
public static CSVOptions DEFAULT = new CSVOptions(new Builder());
private final int headerRow;
private final byte delim;
private final byte quote;
private final byte comment;
private final String[] nullValues;
private final String[] trueValues;
private final String[] falseValues;
private final QuoteStyle quoteStyle;
private CSVOptions(Builder builder) {
super(builder);
headerRow = builder.headerRow;
delim = builder.delim;
quote = builder.quote;
comment = builder.comment;
nullValues = builder.nullValues.toArray(
new String[builder.nullValues.size()]);
trueValues = builder.trueValues.toArray(
new String[builder.trueValues.size()]);
falseValues = builder.falseValues.toArray(
new String[builder.falseValues.size()]);
quoteStyle = builder.quoteStyle;
}
String[] getNullValues() {
return nullValues;
}
String[] getTrueValues() {
return trueValues;
}
String[] getFalseValues() {
return falseValues;
}
int getHeaderRow() {
return headerRow;
}
byte getDelim() {
return delim;
}
byte getQuote() {
return quote;
}
byte getComment() {
return comment;
}
QuoteStyle getQuoteStyle() {
return quoteStyle;
}
public static Builder builder() {
return new Builder();
}
public static class Builder extends ColumnFilterOptions.Builder<Builder> {
private static final int NO_HEADER_ROW = -1;
private final Set<String> nullValues = new HashSet<>();
private final Set<String> trueValues = new HashSet<>();
private final Set<String> falseValues = new HashSet<>();
private byte comment = 0;
private int headerRow = NO_HEADER_ROW;
private byte delim = ',';
private byte quote = '"';
private QuoteStyle quoteStyle = QuoteStyle.MINIMAL;
/**
* Row of the header data (0 based counting). Negative is no header.
*/
public Builder withHeaderAtRow(int index) {
headerRow = index;
return this;
}
/**
* Set the row of the header to 0, the first line, if hasHeader is true else disables the
* header.
*/
public Builder hasHeader(boolean hasHeader) {
return withHeaderAtRow(hasHeader ? 0 : NO_HEADER_ROW);
}
/**
* Set the row of the header to 0, the first line.
*/
public Builder hasHeader() {
return withHeaderAtRow(0);
}
/**
* Set the entry deliminator. Only ASCII chars are currently supported.
*/
public Builder withDelim(char delim) {
if (Character.getNumericValue(delim) > 127) {
throw new IllegalArgumentException("Only ASCII characters are currently supported");
}
this.delim = (byte) delim;
return this;
}
/**
* Set the quote character. Only ASCII chars are currently supported.
*/
public Builder withQuote(char quote) {
if (Character.getNumericValue(quote) > 127) {
throw new IllegalArgumentException("Only ASCII characters are currently supported");
}
this.quote = (byte) quote;
return this;
}
/**
* Quote style to expect in the input CSV data.
*
* Note: Only the following quoting styles are supported:
* 1. MINIMAL: String columns containing special characters like row-delimiters/
* field-delimiter/quotes will be quoted.
* 2. NONE: No quoting is done for any columns.
*/
public Builder withQuoteStyle(QuoteStyle quoteStyle) {
if (quoteStyle != QuoteStyle.MINIMAL && quoteStyle != QuoteStyle.NONE) {
throw new IllegalArgumentException("Only MINIMAL and NONE quoting styles are supported");
}
this.quoteStyle = quoteStyle;
return this;
}
/**
* Set the character that starts the beginning of a comment line. setting to
* 0 or '\0' will disable comments. The default is to have no comments.
*/
public Builder withComment(char comment) {
if (Character.getNumericValue(quote) > 127) {
throw new IllegalArgumentException("Only ASCII characters are currently supported");
}
this.comment = (byte) comment;
return this;
}
public Builder withoutComments() {
this.comment = 0;
return this;
}
public Builder withNullValue(String... nvs) {
for (String nv : nvs) {
nullValues.add(nv);
}
return this;
}
public Builder withTrueValue(String... tvs) {
for (String tv : tvs) {
trueValues.add(tv);
}
return this;
}
public Builder withFalseValue(String... fvs) {
for (String fv : fvs) {
falseValues.add(fv);
}
return this;
}
public CSVOptions build() {
return new CSVOptions(this);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CSVWriterOptions.java
|
/*
*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Options for writing a CSV file
*/
public class CSVWriterOptions {
private String[] columnNames;
private Boolean includeHeader = false;
private String rowDelimiter = "\n";
private byte fieldDelimiter = ',';
private String nullValue = "";
private String falseValue = "false";
private String trueValue = "true";
// Quote style used for CSV data.
// Currently supports only `MINIMAL` and `NONE`.
private QuoteStyle quoteStyle = QuoteStyle.MINIMAL;
private CSVWriterOptions(Builder builder) {
this.columnNames = builder.columnNames.toArray(new String[builder.columnNames.size()]);
this.nullValue = builder.nullValue;
this.includeHeader = builder.includeHeader;
this.fieldDelimiter = builder.fieldDelimiter;
this.rowDelimiter = builder.rowDelimiter;
this.falseValue = builder.falseValue;
this.trueValue = builder.trueValue;
this.quoteStyle = builder.quoteStyle;
}
public String[] getColumnNames() {
return columnNames;
}
public Boolean getIncludeHeader() {
return includeHeader;
}
public String getRowDelimiter() {
return rowDelimiter;
}
public byte getFieldDelimiter() {
return fieldDelimiter;
}
public String getNullValue() {
return nullValue;
}
public String getTrueValue() {
return trueValue;
}
public String getFalseValue() {
return falseValue;
}
/**
* Returns the quoting style used for writing CSV.
*/
public QuoteStyle getQuoteStyle() {
return quoteStyle;
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private List<String> columnNames = Collections.emptyList();
private Boolean includeHeader = false;
private String rowDelimiter = "\n";
private byte fieldDelimiter = ',';
private String nullValue = "";
private String falseValue = "false";
private String trueValue = "true";
private QuoteStyle quoteStyle = QuoteStyle.MINIMAL;
public CSVWriterOptions build() {
return new CSVWriterOptions(this);
}
public Builder withColumnNames(List<String> columnNames) {
this.columnNames = columnNames;
return this;
}
public Builder withColumnNames(String... columnNames) {
List<String> columnNamesList = new ArrayList<>();
for (String columnName : columnNames) {
columnNamesList.add(columnName);
}
return withColumnNames(columnNamesList);
}
public Builder withIncludeHeader(Boolean includeHeader) {
this.includeHeader = includeHeader;
return this;
}
public Builder withRowDelimiter(String rowDelimiter) {
this.rowDelimiter = rowDelimiter;
return this;
}
public Builder withFieldDelimiter(byte fieldDelimiter) {
this.fieldDelimiter = fieldDelimiter;
return this;
}
public Builder withNullValue(String nullValue) {
this.nullValue = nullValue;
return this;
}
public Builder withTrueValue(String trueValue) {
this.trueValue = trueValue;
return this;
}
public Builder withFalseValue(String falseValue) {
this.falseValue = falseValue;
return this;
}
/**
* Sets the quote style used when writing CSV.
*
* Note: Only the following quoting styles are supported:
* 1. MINIMAL: String columns containing special characters like row-delimiters/
* field-delimiter/quotes will be quoted.
* 2. NONE: No quoting is done for any columns.
*/
public Builder withQuoteStyle(QuoteStyle quoteStyle) {
this.quoteStyle = quoteStyle;
return this;
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CaptureGroups.java
|
/*
*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Capture groups setting, closely following cudf::strings::capture_groups.
*
* For processing a regex pattern containing capture groups. These can be used
* to optimize the generated regex instructions where the capture groups do not
* require extracting the groups.
*/
public enum CaptureGroups {
EXTRACT(0), // capture groups processed normally for extract
NON_CAPTURE(1); // convert all capture groups to non-capture groups
final int nativeId; // Native id, for use with libcudf.
private CaptureGroups(int nativeId) { // Only constant values should be used
this.nativeId = nativeId;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ChunkedPack.java
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* JNI interface to cudf::chunked_pack.
*
* ChunkedPack has an Iterator-like API with the familiar `hasNext` and `next`
* methods. `next` should be used in a loop until `hasNext` returns false.
*
* However, `ChunkedPack.next` is special because it takes a `DeviceMemoryBuffer` as a
* parameter, which means that the caller can call `next` giving any bounce buffer it
* may have previously allocated. No requirement exists that the bounce buffer be the
* same each time, the only requirement is that their sizes are all the same, and match
* the size that was passed to `Table.makeChunkedPack` (which instantiates this class).
*
* The user of `ChunkedPack` must close `.close()` when done using it to clear up both
* host and device resources.
*/
public class ChunkedPack implements AutoCloseable {
long nativePtr;
/**
* This constructor is invoked by `Table.makeChunkedPack` after creating a native
* `cudf::chunked_pack`.
* @param nativePtr pointer to a `cudf::chunked_pack`
*/
public ChunkedPack(long nativePtr) {
this.nativePtr = nativePtr;
}
/**
* Get the final contiguous size of the table we are packing. This is
* the size that the final buffer should be, just like if the user called
* `cudf::pack` instead.
* @return the total number of bytes for the table in contiguous layout
*/
public long getTotalContiguousSize() {
return chunkedPackGetTotalContiguousSize(nativePtr);
}
/**
* Method to be called to ensure that `ChunkedPack` has work left.
* This method should be invoked followed by a call to `next`, until
* `hasNext` returns false.
* @return true if there is work left to be done (`next` should be called),
* false otherwise.
*/
public boolean hasNext() {
return chunkedPackHasNext(nativePtr);
}
/**
* Place the next contiguous chunk of our table into `userPtr`.
*
* This method throws if `hasNext` is false.
* @param userPtr the bounce buffer to use for this iteration
* @return the number of bytes that we were able to place in `userPtr`. This is
* at most `userPtr.getLength()`.
*/
public long next(DeviceMemoryBuffer userPtr) {
return chunkedPackNext(nativePtr, userPtr.getAddress(), userPtr.getLength());
}
/**
* Generates opaque table metadata that can be unpacked via `cudf::unpack`
* at a later time.
* @return a `PackedColumnMetadata` instance referencing cuDF packed table metadata
*/
public PackedColumnMetadata buildMetadata() {
return new PackedColumnMetadata(chunkedPackBuildMetadata(nativePtr));
}
@Override
public void close() {
try {
chunkedPackDelete(nativePtr);
} finally {
nativePtr = 0;
}
}
private static native long chunkedPackGetTotalContiguousSize(long nativePtr);
private static native boolean chunkedPackHasNext(long nativePtr);
private static native long chunkedPackNext(long nativePtr, long userPtr, long userPtrSize);
private static native long chunkedPackBuildMetadata(long nativePtr);
private static native void chunkedPackDelete(long nativePtr);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CloseableArray.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/** Utility class that wraps an array of closeable instances and can be closed */
public class CloseableArray<T extends AutoCloseable> implements AutoCloseable {
private T[] array;
public static <T extends AutoCloseable> CloseableArray<T> wrap(T[] array) {
return new CloseableArray<T>(array);
}
CloseableArray(T[] array) {
this.array = array;
}
public int size() {
return array.length;
}
public T get(int i) {
return array[i];
}
public T set(int i, T obj) {
array[i] = obj;
return obj;
}
public T[] getArray() {
return array;
}
public T[] release() {
T[] result = array;
array = null;
return result;
}
public void closeAt(int i) {
try {
T toClose = array[i];
array[i] = null;
toClose.close();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void close() {
close(null);
}
public void close(Exception pendingError) {
if (array == null) {
return;
}
T[] toClose = array;
array = null;
RuntimeException error = null;
if (pendingError instanceof RuntimeException) {
error = (RuntimeException) pendingError;
} else if (pendingError != null) {
error = new RuntimeException(pendingError);
}
for (T obj: toClose) {
if (obj != null) {
try {
obj.close();
} catch (RuntimeException e) {
if (error != null) {
error.addSuppressed(e);
} else {
error = e;
}
} catch (Exception e) {
if (error != null) {
error.addSuppressed(e);
} else {
error = new RuntimeException(e);
}
}
}
}
if (error != null) {
throw error;
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ColumnFilterOptions.java
|
/*
*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* Base options class for input formats that can filter columns.
*/
public abstract class ColumnFilterOptions {
// Names of the columns to be returned (other columns are skipped)
// If empty all columns are returned.
private final String[] includeColumnNames;
protected ColumnFilterOptions(Builder<?> builder) {
includeColumnNames = builder.includeColumnNames.toArray(
new String[builder.includeColumnNames.size()]);
}
String[] getIncludeColumnNames() {
return includeColumnNames;
}
public static class Builder<T extends Builder> {
final List<String> includeColumnNames = new ArrayList<>();
/**
* Include one or more specific columns. Any column not included will not be read.
* @param names the name of the column, or more than one if you want.
*/
public T includeColumn(String... names) {
for (String name : names) {
includeColumnNames.add(name);
}
return (T) this;
}
/**
* Include one or more specific columns. Any column not included will not be read.
* @param names the name of the column, or more than one if you want.
*/
public T includeColumn(Collection<String> names) {
includeColumnNames.addAll(names);
return (T) this;
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ColumnVector.java
|
/*
*
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.HostColumnVector.Builder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.function.Consumer;
/**
* This class represents the immutable vector of data. This class holds
* references to device(GPU) memory and is reference counted to know when to release it. Call
* close to decrement the reference count when you are done with the column, and call incRefCount
* to increment the reference count.
*/
public final class ColumnVector extends ColumnView {
/**
* Interface to handle events for this ColumnVector. Only invoked during
* close, hence `onClosed` is the only event.
*/
public interface EventHandler {
/**
* `onClosed` is invoked with the updated `refCount` during `close`.
* The last invocation of `onClosed` will be with `refCount=0`.
*
* @note the callback is invoked with this `ColumnVector`'s lock held.
*
* @param cv reference to the ColumnVector we are closing
* @param refCount the updated ref count for this ColumnVector at the time
* of invocation
*/
void onClosed(ColumnVector cv, int refCount);
}
private static final Logger log = LoggerFactory.getLogger(ColumnVector.class);
static {
NativeDepsLoader.loadNativeDeps();
}
private Optional<Long> nullCount = Optional.empty();
private int refCount;
private EventHandler eventHandler;
/**
* Wrap an existing on device cudf::column with the corresponding ColumnVector. The new
* ColumnVector takes ownership of the pointer and will free it when the ref count reaches zero.
* @param nativePointer host address of the cudf::column object which will be
* owned by this instance.
*/
public ColumnVector(long nativePointer) {
super(new OffHeapState(nativePointer));
assert nativePointer != 0;
MemoryCleaner.register(this, offHeap);
this.refCount = 0;
incRefCountInternal(true);
}
private static OffHeapState makeOffHeap(DType type, long rows, Optional<Long> nullCount,
DeviceMemoryBuffer dataBuffer, DeviceMemoryBuffer validityBuffer,
DeviceMemoryBuffer offsetBuffer) {
long viewHandle = initViewHandle(
type, (int)rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(),
dataBuffer, validityBuffer, offsetBuffer, null);
return new OffHeapState(dataBuffer, validityBuffer, offsetBuffer, null, viewHandle);
}
/**
* Create a new column vector based off of data already on the device.
* @param type the type of the vector
* @param rows the number of rows in this vector.
* @param nullCount the number of nulls in the dataset.
* @param dataBuffer the data stored on the device. The column vector takes ownership of the
* buffer. Do not use the buffer after calling this.
* @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0. The
* column vector takes ownership of the buffer. Do not use the buffer
* after calling this.
* @param offsetBuffer a host buffer required for strings and string categories. The column
* vector takes ownership of the buffer. Do not use the buffer after calling
* this.
*/
public ColumnVector(DType type, long rows, Optional<Long> nullCount,
DeviceMemoryBuffer dataBuffer, DeviceMemoryBuffer validityBuffer,
DeviceMemoryBuffer offsetBuffer) {
super(makeOffHeap(type, rows, nullCount, dataBuffer, validityBuffer, offsetBuffer));
assert !type.equals(DType.LIST) : "This constructor should not be used for list type";
if (!type.equals(DType.STRING)) {
assert offsetBuffer == null : "offsets are only supported for STRING";
}
assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE)
|| !nullCount.isPresent();
MemoryCleaner.register(this, offHeap);
this.nullCount = nullCount;
this.refCount = 0;
incRefCountInternal(true);
}
/**
* This method is internal and exposed purely for testing purposes
*/
static OffHeapState makeOffHeap(DType type, long rows, Optional<Long> nullCount,
DeviceMemoryBuffer dataBuffer, DeviceMemoryBuffer validityBuffer,
DeviceMemoryBuffer offsetBuffer, List<DeviceMemoryBuffer> toClose, long[] childHandles) {
long viewHandle = initViewHandle(type, (int)rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(),
dataBuffer, validityBuffer,
offsetBuffer, childHandles);
return new OffHeapState(dataBuffer, validityBuffer, offsetBuffer, toClose, viewHandle);
}
/**
* Create a new column vector based off of data already on the device with child columns.
* @param type the type of the vector, typically a nested type
* @param rows the number of rows in this vector.
* @param nullCount the number of nulls in the dataset.
* @param dataBuffer the data stored on the device. The column vector takes ownership of the
* buffer. Do not use the buffer after calling this.
* @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0. The
* column vector takes ownership of the buffer. Do not use the buffer
* after calling this.
* @param offsetBuffer a host buffer required for strings and string categories. The column
* vector takes ownership of the buffer. Do not use the buffer after calling
* this.
* @param toClose List of buffers to track and close once done, usually in case of children
* @param childHandles array of longs for child column view handles.
*/
public ColumnVector(DType type, long rows, Optional<Long> nullCount,
DeviceMemoryBuffer dataBuffer, DeviceMemoryBuffer validityBuffer,
DeviceMemoryBuffer offsetBuffer, List<DeviceMemoryBuffer> toClose, long[] childHandles) {
super(makeOffHeap(type, rows, nullCount, dataBuffer, validityBuffer, offsetBuffer, toClose, childHandles));
if (!type.equals(DType.STRING) && !type.equals(DType.LIST)) {
assert offsetBuffer == null : "offsets are only supported for STRING, LISTS";
}
assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE)
|| !nullCount.isPresent();
MemoryCleaner.register(this, offHeap);
this.refCount = 0;
incRefCountInternal(true);
}
/**
* This is a very special constructor that should only ever be called by
* fromViewWithContiguousAllocation. It takes a cudf::column_view * instead of a cudf::column *.
* But to maintain memory ownership properly we need to slice the memory in the view off from
* a separate buffer that actually owns the memory allocation.
* @param viewAddress the address of the cudf::column_view
* @param contiguousBuffer the buffer that this is based off of.
*/
private ColumnVector(long viewAddress, DeviceMemoryBuffer contiguousBuffer) {
super(new OffHeapState(viewAddress, contiguousBuffer));
MemoryCleaner.register(this, offHeap);
// TODO we may want to ask for the null count anyways...
this.nullCount = Optional.empty();
this.refCount = 0;
incRefCountInternal(true);
}
/**
* For a ColumnVector this is really just incrementing the reference count.
* @return this
*/
@Override
public ColumnVector copyToColumnVector() {
return incRefCount();
}
/**
* Retrieves the column_view for a cudf::column and if it fails to do so, the column is deleted
* and the exception is thrown to the caller.
* @param nativePointer the cudf::column handle
* @return the column_view handle
*/
private static long getColumnViewFromColumn(long nativePointer) {
try {
return ColumnVector.getNativeColumnView(nativePointer);
} catch (CudfException ce) {
deleteCudfColumn(nativePointer);
throw ce;
}
}
static long initViewHandle(DType type, int numRows, int nullCount,
BaseDeviceMemoryBuffer dataBuffer,
BaseDeviceMemoryBuffer validityBuffer,
BaseDeviceMemoryBuffer offsetBuffer, long[] childHandles) {
long cd = dataBuffer == null ? 0 : dataBuffer.address;
long cdSize = dataBuffer == null ? 0 : dataBuffer.length;
long od = offsetBuffer == null ? 0 : offsetBuffer.address;
long vd = validityBuffer == null ? 0 : validityBuffer.address;
return makeCudfColumnView(type.typeId.getNativeId(), type.getScale(), cd, cdSize,
od, vd, nullCount, numRows, childHandles);
}
/**
* Creates a ColumnVector from a native column_view using a contiguous device allocation.
*
* @param columnViewAddress address of the native column_view
* @param buffer device buffer containing the data referenced by the column view
*/
public static ColumnVector fromViewWithContiguousAllocation(long columnViewAddress, DeviceMemoryBuffer buffer) {
return new ColumnVector(columnViewAddress, buffer);
}
/**
* Set an event handler for this vector. This method can be invoked with null
* to unset the handler.
*
* @param newHandler - the EventHandler to use from this point forward
* @return the prior event handler, or null if not set.
*/
public synchronized EventHandler setEventHandler(EventHandler newHandler) {
EventHandler prev = this.eventHandler;
this.eventHandler = newHandler;
return prev;
}
/**
* Returns the current event handler for this ColumnVector or null if no handler
* is associated.
*/
public synchronized EventHandler getEventHandler() {
return this.eventHandler;
}
/**
* This is a really ugly API, but it is possible that the lifecycle of a column of
* data may not have a clear lifecycle thanks to java and GC. This API informs the leak
* tracking code that this is expected for this column, and big scary warnings should
* not be printed when this happens.
*/
public void noWarnLeakExpected() {
offHeap.noWarnLeakExpected();
}
/**
* Close this Vector and free memory allocated for HostMemoryBuffer and DeviceMemoryBuffer
*/
@Override
public synchronized void close() {
refCount--;
offHeap.delRef();
try {
if (refCount == 0) {
super.close();
offHeap.clean(false);
} else if (refCount < 0) {
offHeap.logRefCountDebug("double free " + this);
throw new IllegalStateException("Close called too many times " + this);
}
} finally {
if (eventHandler != null) {
eventHandler.onClosed(this, refCount);
}
}
}
@Override
public String toString() {
return "ColumnVector{" +
"rows=" + rows +
", type=" + type +
", nullCount=" + nullCount +
", offHeap=" + offHeap +
'}';
}
/////////////////////////////////////////////////////////////////////////////
// METADATA ACCESS
/////////////////////////////////////////////////////////////////////////////
/**
* Increment the reference count for this column. You need to call close on this
* to decrement the reference count again.
*/
public ColumnVector incRefCount() {
return incRefCountInternal(false);
}
private synchronized ColumnVector incRefCountInternal(boolean isFirstTime) {
offHeap.addRef();
if (refCount <= 0 && !isFirstTime) {
offHeap.logRefCountDebug("INC AFTER CLOSE " + this);
throw new IllegalStateException("Column is already closed");
}
refCount++;
return this;
}
/**
* Returns the number of nulls in the data. Note that this might end up
* being a very expensive operation because if the null count is not
* known it will be calculated.
*/
public long getNullCount() {
if (!nullCount.isPresent()) {
nullCount = Optional.of(offHeap.getNativeNullCount());
}
return nullCount.get();
}
/**
* Returns this column's current refcount
*/
public synchronized int getRefCount() {
return refCount;
}
/**
* Returns if the vector has a validity vector allocated or not.
*/
public boolean hasValidityVector() {
return (offHeap.getValid() != null);
}
/**
* Returns if the vector has nulls. Note that this might end up
* being a very expensive operation because if the null count is not
* known it will be calculated.
*/
public boolean hasNulls() {
return getNullCount() > 0;
}
/////////////////////////////////////////////////////////////////////////////
// RAW DATA ACCESS
/////////////////////////////////////////////////////////////////////////////
/**
* Get access to the raw device buffer for this column. This is intended to be used with a lot
* of caution. The lifetime of the buffer is tied to the lifetime of the column (Do not close
* the buffer, as the column will take care of it). Do not modify the contents of the buffer or
* it might negatively impact what happens on the column. The data must be on the device for
* this to work. Strings and string categories do not currently work because their underlying
* device layout is currently hidden.
* @param type the type of buffer to get access to.
* @return the underlying buffer or null if no buffer is associated with it for this column.
* Please note that if the column is empty there may be no buffers at all associated with the
* column.
*/
public BaseDeviceMemoryBuffer getDeviceBufferFor(BufferType type) {
BaseDeviceMemoryBuffer srcBuffer;
switch(type) {
case VALIDITY:
srcBuffer = offHeap.getValid();
break;
case DATA:
srcBuffer = offHeap.getData();
break;
case OFFSET:
srcBuffer = offHeap.getOffsets();
break;
default:
throw new IllegalArgumentException(type + " is not a supported buffer type.");
}
return srcBuffer;
}
/**
* Ensures the ByteBuffer passed in is a direct byte buffer.
* If it is not then it creates one and copies the data in
* the byte buffer passed in to the direct byte buffer
* it created and returns it.
*/
private static ByteBuffer bufferAsDirect(ByteBuffer buf) {
ByteBuffer bufferOut = buf;
if (bufferOut != null && !bufferOut.isDirect()) {
bufferOut = ByteBuffer.allocateDirect(buf.remaining());
bufferOut.put(buf);
bufferOut.flip();
}
return bufferOut;
}
/**
* Create a ColumnVector from the Apache Arrow byte buffers passed in.
* Any of the buffers not used for that datatype should be set to null.
* The buffers are expected to be off heap buffers, but if they are not,
* it will handle copying them to direct byte buffers.
* This only supports primitive types. Strings, Decimals and nested types
* such as list and struct are not supported.
* @param type - type of the column
* @param numRows - Number of rows in the arrow column
* @param nullCount - Null count
* @param data - ByteBuffer of the Arrow data buffer
* @param validity - ByteBuffer of the Arrow validity buffer
* @param offsets - ByteBuffer of the Arrow offsets buffer
* @return - new ColumnVector
*/
public static ColumnVector fromArrow(
DType type,
long numRows,
long nullCount,
ByteBuffer data,
ByteBuffer validity,
ByteBuffer offsets) {
long columnHandle = fromArrow(type.typeId.getNativeId(), numRows, nullCount,
bufferAsDirect(data), bufferAsDirect(validity), bufferAsDirect(offsets));
ColumnVector vec = new ColumnVector(columnHandle);
return vec;
}
/**
* Create a new vector of length rows, where each row is filled with the Scalar's
* value
* @param scalar - Scalar to use to fill rows
* @param rows - Number of rows in the new ColumnVector
* @return - new ColumnVector
*/
public static ColumnVector fromScalar(Scalar scalar, int rows) {
long columnHandle = fromScalar(scalar.getScalarHandle(), rows);
return new ColumnVector(columnHandle);
}
/**
* Create a new struct vector made up of existing columns. Note that this will copy
* the contents of the input columns to make a new vector. If you only want to
* do a quick temporary computation you can use ColumnView.makeStructView.
* @param columns the columns to make the struct from.
* @return the new ColumnVector
*/
public static ColumnVector makeStruct(ColumnView... columns) {
try (ColumnView cv = ColumnView.makeStructView(columns)) {
return cv.copyToColumnVector();
}
}
/**
* Create a new struct vector made up of existing columns. Note that this will copy
* the contents of the input columns to make a new vector. If you only want to
* do a quick temporary computation you can use ColumnView.makeStructView.
* @param rows the number of rows in the struct. Used for structs with no children.
* @param columns the columns to make the struct from.
* @return the new ColumnVector
*/
public static ColumnVector makeStruct(long rows, ColumnView... columns) {
try (ColumnView cv = ColumnView.makeStructView(rows, columns)) {
return cv.copyToColumnVector();
}
}
/**
* Create a LIST column from the given columns. Each list in the returned column will have the
* same number of entries in it as columns passed into this method. Be careful about the
* number of rows passed in as there are limits on the maximum output size supported for
* column lists.
* @param columns the columns to make up the list column, in the order they will appear in the
* resulting lists.
* @return the new LIST ColumnVector
*/
public static ColumnVector makeList(ColumnView... columns) {
if (columns.length <= 0) {
throw new IllegalArgumentException("At least one column is needed to get the row count");
}
return makeList(columns[0].getRowCount(), columns[0].getType(), columns);
}
/**
* Create a LIST column from the given columns. Each list in the returned column will have the
* same number of entries in it as columns passed into this method. Be careful about the
* number of rows passed in as there are limits on the maximum output size supported for
* column lists.
* @param rows the number of rows to create, for the special case of an empty list.
* @param type the type of the child column, for the special case of an empty list.
* @param columns the columns to make up the list column, in the order they will appear in the
* resulting lists.
* @return the new LIST ColumnVector
*/
public static ColumnVector makeList(long rows, DType type, ColumnView... columns) {
long[] handles = new long[columns.length];
for (int i = 0; i < columns.length; i++) {
ColumnView cv = columns[i];
if (rows != cv.getRowCount()) {
throw new IllegalArgumentException("All columns must have the same number of rows");
}
if (!type.equals(cv.getType())) {
throw new IllegalArgumentException("All columns must have the same type");
}
handles[i] = cv.getNativeView();
}
if (columns.length == 0 && type.isNestedType()) {
throw new IllegalArgumentException(
"Creating an empty list column of nested types is not currently supported");
}
return new ColumnVector(makeList(handles, type.typeId.nativeId, type.getScale(), rows));
}
/**
* Create a LIST column from the current column and a given offsets column. The output column will
* contain lists having elements that are copied from the current column and their sizes are
* determined by the given offsets.
*
* Note that the caller is responsible to make sure the given offsets column is of type INT32 and
* it contains valid indices to create a LIST column. There will not be any validity check for
* these offsets during calling to this function. If the given offsets are invalid, we may have
* bad memory accesses and/or data corruption.
*
* @param rows the number of rows to create.
* @param offsets the offsets pointing to row indices of the current column to create an output
* LIST column.
*/
public ColumnVector makeListFromOffsets(long rows, ColumnView offsets) {
return new ColumnVector(makeListFromOffsets(getNativeView(), offsets.getNativeView(), rows));
}
/**
* Create a new vector of length rows, starting at the initialValue and going by step each time.
* Only numeric types are supported.
* @param initialValue the initial value to start at.
* @param step the step to add to each subsequent row.
* @param rows the total number of rows
* @return the new ColumnVector.
*/
public static ColumnVector sequence(Scalar initialValue, Scalar step, int rows) {
if (!initialValue.isValid() || !step.isValid()) {
throw new IllegalArgumentException("nulls are not supported in sequence");
}
return new ColumnVector(sequence(initialValue.getScalarHandle(), step.getScalarHandle(), rows));
}
/**
* Create a new vector of length rows, starting at the initialValue and going by 1 each time.
* Only numeric types are supported.
* @param initialValue the initial value to start at.
* @param rows the total number of rows
* @return the new ColumnVector.
*/
public static ColumnVector sequence(Scalar initialValue, int rows) {
if (!initialValue.isValid()) {
throw new IllegalArgumentException("nulls are not supported in sequence");
}
return new ColumnVector(sequence(initialValue.getScalarHandle(), 0, rows));
}
/**
* Create a list column in which each row is a sequence of values starting from a `start` value,
* incrementing by one, and its cardinality is specified by a `size` value. The `start` and `size`
* values used to generate each list is taken from the corresponding row of the input start and
* size columns.
* @param start first values in the result sequences
* @param size numbers of values in the result sequences
* @return the new ColumnVector.
*/
public static ColumnVector sequence(ColumnView start, ColumnView size) {
assert start.getNullCount() == 0 || size.getNullCount() == 0 : "starts and sizes input " +
"columns must not have nulls.";
return new ColumnVector(sequences(start.getNativeView(), size.getNativeView(), 0));
}
/**
* Create a list column in which each row is a sequence of values starting from a `start` value,
* incrementing by a `step` value, and its cardinality is specified by a `size` value.
* The values `start`, `step`, and `size` used to generate each list is taken from the
* corresponding row of the input starts, steps, and sizes columns.
* @param start first values in the result sequences
* @param size numbers of values in the result sequences
* @param step increment values for the result sequences.
* @return the new ColumnVector.
*/
public static ColumnVector sequence(ColumnView start, ColumnView size, ColumnView step) {
assert start.getNullCount() == 0 || size.getNullCount() == 0 || step.getNullCount() == 0:
"start, size and step must not have nulls.";
assert step.getType() == start.getType() : "start and step input columns must" +
" have the same type.";
return new ColumnVector(sequences(start.getNativeView(), size.getNativeView(),
step.getNativeView()));
}
/**
* Create a new vector by concatenating multiple columns together.
* Note that all columns must have the same type.
*/
public static ColumnVector concatenate(ColumnView... columns) {
if (columns.length < 2) {
throw new IllegalArgumentException("Concatenate requires 2 or more columns");
}
long[] columnHandles = new long[columns.length];
for (int i = 0; i < columns.length; ++i) {
columnHandles[i] = columns[i].getNativeView();
}
return new ColumnVector(concatenate(columnHandles));
}
/**
* Concatenate columns of strings together, combining a corresponding row from each column
* into a single string row of a new column with no separator string inserted between each
* combined string and maintaining null values in combined rows.
* @param columns array of columns containing strings, must be non-empty
* @return A new java column vector containing the concatenated strings.
*/
public static ColumnVector stringConcatenate(ColumnView[] columns) {
try (Scalar emptyString = Scalar.fromString("");
Scalar nullString = Scalar.fromString(null)) {
return stringConcatenate(emptyString, nullString, columns);
}
}
/**
* Concatenate columns of strings together, combining a corresponding row from each column into
* a single string row of a new column. This version includes the separator for null rows
* if 'narep' is valid.
* @param separator string scalar inserted between each string being merged.
* @param narep string scalar indicating null behavior. If set to null and any string in the row
* is null the resulting string will be null. If not null, null values in any column
* will be replaced by the specified string.
* @param columns array of columns containing strings, must be non-empty
* @return A new java column vector containing the concatenated strings.
*/
public static ColumnVector stringConcatenate(Scalar separator, Scalar narep, ColumnView[] columns) {
return stringConcatenate(separator, narep, columns, true);
}
/**
* Concatenate columns of strings together, combining a corresponding row from each column into
* a single string row of a new column.
* @param separator string scalar inserted between each string being merged.
* @param narep string scalar indicating null behavior. If set to null and any string in the row
* is null the resulting string will be null. If not null, null values in any column
* will be replaced by the specified string.
* @param columns array of columns containing strings, must be non-empty
* @param separateNulls if true, then the separator is included for null rows if
* `narep` is valid.
* @return A new java column vector containing the concatenated strings.
*/
public static ColumnVector stringConcatenate(Scalar separator, Scalar narep, ColumnView[] columns,
boolean separateNulls) {
assert columns != null : "input columns should not be null";
assert columns.length > 0 : "input columns should not be empty";
assert separator != null : "separator scalar provided may not be null";
assert separator.getType().equals(DType.STRING) : "separator scalar must be a string scalar";
assert narep != null : "narep scalar provided may not be null";
assert narep.getType().equals(DType.STRING) : "narep scalar must be a string scalar";
long[] columnViews = new long[columns.length];
for(int i = 0; i < columns.length; i++) {
assert columns[i] != null : "Column vectors passed may not be null";
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(stringConcatenation(columnViews, separator.getScalarHandle(),
narep.getScalarHandle(), separateNulls));
}
/**
* Concatenate columns of strings together using a separator specified for each row
* and returns the result as a string column. If the row separator for a given row is null,
* output column for that row is null. Null column values for a given row are skipped.
* @param columns array of columns containing strings
* @param sepCol strings column that provides the separator for a given row
* @return A new java column vector containing the concatenated strings with separator between.
*/
public static ColumnVector stringConcatenate(ColumnView[] columns, ColumnView sepCol) {
try (Scalar nullString = Scalar.fromString(null);
Scalar emptyString = Scalar.fromString("")) {
return stringConcatenate(columns, sepCol, nullString, emptyString, false);
}
}
/**
* Concatenate columns of strings together using a separator specified for each row
* and returns the result as a string column. If the row separator for a given row is null,
* output column for that row is null unless separatorNarep is provided.
* The separator is applied between two output row values if the separateNulls
* is `YES` or only between valid rows if separateNulls is `NO`.
* @param columns array of columns containing strings
* @param sepCol strings column that provides the separator for a given row
* @param separatorNarep string scalar indicating null behavior when a separator is null.
* If set to null and the separator is null the resulting string will
* be null. If not null, this string will be used in place of a null
* separator.
* @param colNarep string that should be used in place of any null strings
* found in any column.
* @param separateNulls if true, then the separator is included for null rows if
* `colNarep` is valid.
* @return A new java column vector containing the concatenated strings with separator between.
*/
public static ColumnVector stringConcatenate(ColumnView[] columns,
ColumnView sepCol, Scalar separatorNarep, Scalar colNarep, boolean separateNulls) {
assert columns.length >= 1 : ".stringConcatenate() operation requires at least 1 column";
assert separatorNarep != null : "separator narep scalar provided may not be null";
assert colNarep != null : "column narep scalar provided may not be null";
assert separatorNarep.getType().equals(DType.STRING) : "separator naprep scalar must be a string scalar";
assert colNarep.getType().equals(DType.STRING) : "column narep scalar must be a string scalar";
long[] columnViews = new long[columns.length];
for(int i = 0; i < columns.length; i++) {
assert columns[i] != null : "Column vectors passed may not be null";
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(stringConcatenationSepCol(columnViews, sepCol.getNativeView(),
separatorNarep.getScalarHandle(), colNarep.getScalarHandle(), separateNulls));
}
/**
* Concatenate columns of lists horizontally (row by row), combining a corresponding row
* from each column into a single list row of a new column.
* NOTICE: Any concatenation involving a null list element will result in a null list.
*
* @param columns array of columns containing lists, must be non-empty
* @return A new java column vector containing the concatenated lists.
*/
public static ColumnVector listConcatenateByRow(ColumnView... columns) {
return listConcatenateByRow(false, columns);
}
/**
* Concatenate columns of lists horizontally (row by row), combining a corresponding row
* from each column into a single list row of a new column.
*
* @param ignoreNull whether to ignore null list element of input columns: If true, null list
* will be ignored from concatenation; Otherwise, any concatenation involving
* a null list element will result in a null list
* @param columns array of columns containing lists, must be non-empty
* @return A new java column vector containing the concatenated lists.
*/
public static ColumnVector listConcatenateByRow(boolean ignoreNull, ColumnView... columns) {
assert columns != null : "input columns should not be null";
assert columns.length > 0 : "input columns should not be empty";
long[] columnViews = new long[columns.length];
for(int i = 0; i < columns.length; i++) {
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(concatListByRow(columnViews, ignoreNull));
}
/**
* Create a new vector containing the MD5 hash of each row in the table.
*
* @param columns array of columns to hash, must have identical number of rows.
* @return the new ColumnVector of 32 character hex strings representing each row's hash value.
*/
public static ColumnVector md5Hash(ColumnView... columns) {
if (columns.length < 1) {
throw new IllegalArgumentException("MD5 hashing requires at least 1 column of input");
}
long[] columnViews = new long[columns.length];
long size = columns[0].getRowCount();
for(int i = 0; i < columns.length; i++) {
assert columns[i] != null : "Column vectors passed may not be null";
assert columns[i].getRowCount() == size : "Row count mismatch, all columns must be the same size";
assert !columns[i].getType().isDurationType() : "Unsupported column type Duration";
assert !columns[i].getType().isTimestampType() : "Unsupported column type Timestamp";
assert !columns[i].getType().isNestedType() || columns[i].getType().equals(DType.LIST) :
"Unsupported nested type column";
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(md5(columnViews));
}
/**
* Create a new column containing the Sha1 hash of each row in the table.
*
* @param columns columns to hash
* @return the new ColumnVector of 40 character hex strings representing each row's hash value.
*/
public static ColumnVector sha1Hash(ColumnView... columns) {
if (columns.length < 1) {
throw new IllegalArgumentException("Sha1 hashing requires at least 1 column of input");
}
long[] columnViews = new long[columns.length];
for(int i = 0; i < columns.length; i++) {
assert columns[i] != null : "Column vectors passed may not be null";
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(sha1(columnViews));
}
/**
* Generic method to cast ColumnVector
* When casting from a Date, Timestamp, or Boolean to a numerical type the underlying numerical
* representation of the data will be used for the cast.
*
* For Strings:
* Casting strings from/to timestamp isn't supported atm.
* Please look at {@link ColumnVector#asTimestamp(DType, String)}
* and {@link ColumnVector#asStrings(String)} for casting string to timestamp when the format
* is known
*
* Float values when converted to String could be different from the expected default behavior in
* Java
* e.g.
* 12.3 => "12.30000019" instead of "12.3"
* Double.POSITIVE_INFINITY => "Inf" instead of "INFINITY"
* Double.NEGATIVE_INFINITY => "-Inf" instead of "-INFINITY"
*
* @param type type of the resulting ColumnVector
* @return A new vector allocated on the GPU
*/
@Override
public ColumnVector castTo(DType type) {
if (this.type.equals(type)) {
// Optimization
return incRefCount();
}
return super.castTo(type);
}
/////////////////////////////////////////////////////////////////////////////
// NATIVE METHODS
/////////////////////////////////////////////////////////////////////////////
private static native long sequence(long initialValue, long step, int rows);
private static native long sequences(long startHandle, long sizeHandle, long stepHandle)
throws CudfException;
private static native long fromArrow(int type, long col_length,
long null_count, ByteBuffer data, ByteBuffer validity,
ByteBuffer offsets) throws CudfException;
private static native long fromScalar(long scalarHandle, int rowCount) throws CudfException;
private static native long makeList(long[] handles, long typeHandle, int scale, long rows)
throws CudfException;
private static native long makeListFromOffsets(long childHandle, long offsetsHandle, long rows)
throws CudfException;
private static native long concatenate(long[] viewHandles) throws CudfException;
/**
* Native method to concatenate columns of lists horizontally (row by row), combining a row
* from each column into a single list.
*
* @param columnViews array of longs holding the native handles of the column_views to combine.
* @return native handle of the resulting cudf column, used to construct the Java column
* by the listConcatenateByRow method.
*/
private static native long concatListByRow(long[] columnViews, boolean ignoreNull);
/**
* Native method to concatenate columns of strings together, combining a row from
* each column into a single string.
*
* @param columnViews array of longs holding the native handles of the column_views to combine.
* @param separator string scalar inserted between each string being merged, may not be null.
* @param narep string scalar indicating null behavior. If set to null and any string in
* the row is null the resulting string will be null. If not null, null
* values in any column will be replaced by the specified string. The
* underlying value in the string scalar may be null, but the object passed
* in may not.
* @param separate_nulls boolean if true, then the separator is included for null rows if
* `narep` is valid.
* @return native handle of the resulting cudf column, used to construct the Java column
* by the stringConcatenate method.
*/
private static native long stringConcatenation(long[] columnViews, long separator, long narep,
boolean separate_nulls);
/**
* Native method to concatenate columns of strings together using a separator specified for each row
* and returns the result as a string column.
* @param columnViews array of longs holding the native handles of the column_views to combine.
* @param sep_column long holding the native handle of the strings_column_view used as separators.
* @param separator_narep string scalar indicating null behavior when a separator is null.
* If set to null and the separator is null the resulting string will
* be null. If not null, this string will be used in place of a null
* separator.
* @param col_narep string String scalar that should be used in place of any null strings
* found in any column.
* @param separate_nulls boolean if true, then the separator is included for null rows if
* `col_narep` is valid.
* @return native handle of the resulting cudf column, used to construct the Java column.
*/
private static native long stringConcatenationSepCol(long[] columnViews,
long sep_column,
long separator_narep,
long col_narep,
boolean separate_nulls);
/**
* Native method to MD5 hash each row of the given table
*
* @param viewHandles array of native handles to the cudf::column_view columns being operated on.
* @return native handle of the resulting cudf column containing the hex-string hashing results.
*/
private static native long md5(long[] viewHandles) throws CudfException;
/**
* Native method to sha1 hash each row of the given table
*
* @param viewHandles array of native handles to the columns being operated on.
* @return native handle of the resulting cudf column containing the hex-string hashing results.
*/
private static native long sha1(long[] viewHandles) throws CudfException;
/////////////////////////////////////////////////////////////////////////////
// INTERNAL/NATIVE ACCESS
/////////////////////////////////////////////////////////////////////////////
////////
// Native methods specific to cudf::column. These either take or create a cudf::column
// instead of a cudf::column_view so they need to be used with caution. These should
// only be called from the OffHeap inner class.
////////
/**
* Delete the column. This is not private because there are a few cases where Table
* may get back an array of columns pointers and we want to have best effort in cleaning them up
* on any failure.
*/
static native void deleteCudfColumn(long cudfColumnHandle) throws CudfException;
private static native int getNativeNullCountColumn(long cudfColumnHandle) throws CudfException;
private static native void setNativeNullCountColumn(long cudfColumnHandle, int nullCount) throws CudfException;
/**
* Create a cudf::column_view from a cudf::column.
* @param cudfColumnHandle the pointer to the cudf::column
* @return a pointer to a cudf::column_view
* @throws CudfException on any error
*/
static native long getNativeColumnView(long cudfColumnHandle) throws CudfException;
static native long makeEmptyCudfColumn(int type, int scale);
/////////////////////////////////////////////////////////////////////////////
// HELPER CLASSES
/////////////////////////////////////////////////////////////////////////////
/**
* Holds the off heap state of the column vector so we can clean it up, even if it is leaked.
*/
protected static final class OffHeapState extends MemoryCleaner.Cleaner {
// This must be kept in sync with the native code
public static final long UNKNOWN_NULL_COUNT = -1;
private long columnHandle;
private long viewHandle = 0;
private List<MemoryBuffer> toClose = new ArrayList<>();
/**
* Make a column form an existing cudf::column *.
*/
public OffHeapState(long columnHandle) {
this.columnHandle = columnHandle;
this.toClose.add(getData());
this.toClose.add(getValid());
this.toClose.add(getOffsets());
}
/**
* Create from existing cudf::column_view and buffers.
*/
public OffHeapState(DeviceMemoryBuffer data, DeviceMemoryBuffer valid, DeviceMemoryBuffer offsets,
List<DeviceMemoryBuffer> buffers,
long viewHandle) {
assert(viewHandle != 0);
if (data != null) {
this.toClose.add(data);
}
if (valid != null) {
this.toClose.add(valid);
}
if (offsets != null) {
this.toClose.add(offsets);
}
if (buffers != null) {
toClose.addAll(buffers);
}
this.viewHandle = viewHandle;
}
/**
* Create from existing cudf::column_view and contiguous buffer.
*/
public OffHeapState(long viewHandle, DeviceMemoryBuffer contiguousBuffer) {
assert viewHandle != 0;
this.viewHandle = viewHandle;
BaseDeviceMemoryBuffer valid = getValid();
BaseDeviceMemoryBuffer data = getData();
BaseDeviceMemoryBuffer offsets = getOffsets();
toClose.add(data);
toClose.add(valid);
toClose.add(offsets);
contiguousBuffer.incRefCount();
toClose.add(contiguousBuffer);
}
public long getViewHandle() {
if (viewHandle == 0) {
viewHandle = ColumnVector.getNativeColumnView(columnHandle);
}
return viewHandle;
}
public long getNativeNullCount() {
if (viewHandle != 0) {
return ColumnView.getNativeNullCount(getViewHandle());
}
return getNativeNullCountColumn(columnHandle);
}
private void setNativeNullCount(int nullCount) throws CudfException {
assert viewHandle == 0 : "Cannot set the null count if a view has already been created";
assert columnHandle != 0;
setNativeNullCountColumn(columnHandle, nullCount);
}
public BaseDeviceMemoryBuffer getData() {
return getDataBuffer(getViewHandle());
}
public BaseDeviceMemoryBuffer getValid() {
return getValidityBuffer(getViewHandle());
}
public BaseDeviceMemoryBuffer getOffsets() {
return getOffsetsBuffer(getViewHandle());
}
@Override
public void noWarnLeakExpected() {
super.noWarnLeakExpected();
BaseDeviceMemoryBuffer valid = getValid();
BaseDeviceMemoryBuffer data = getData();
BaseDeviceMemoryBuffer offsets = getOffsets();
if (valid != null) {
valid.noWarnLeakExpected();
}
if (data != null) {
data.noWarnLeakExpected();
}
if(offsets != null) {
offsets.noWarnLeakExpected();
}
}
@Override
public String toString() {
return "(ID: " + id + " " + Long.toHexString(columnHandle == 0 ? viewHandle : columnHandle) + ")";
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
long address = 0;
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
Throwable toThrow = null;
if (viewHandle != 0) {
address = viewHandle;
try {
ColumnView.deleteColumnView(viewHandle);
} catch (Throwable t) {
toThrow = t;
} finally {
viewHandle = 0;
}
neededCleanup = true;
}
if (columnHandle != 0) {
if (address != 0) {
address = columnHandle;
}
try {
ColumnVector.deleteCudfColumn(columnHandle);
} catch (Throwable t) {
if (toThrow != null) {
toThrow.addSuppressed(t);
} else {
toThrow = t;
}
} finally {
columnHandle = 0;
}
neededCleanup = true;
}
if (!toClose.isEmpty()) {
try {
for (MemoryBuffer toCloseBuff : toClose) {
if (toCloseBuff != null) {
try {
toCloseBuff.close();
} catch (Throwable t) {
if (toThrow != null) {
toThrow.addSuppressed(t);
} else {
toThrow = t;
}
}
}
}
} finally {
toClose.clear();
}
neededCleanup = true;
}
if (toThrow != null) {
throw new RuntimeException(toThrow);
}
if (neededCleanup) {
if (logErrorIfNotClean) {
log.error("A DEVICE COLUMN VECTOR WAS LEAKED (ID: " + id + " " + Long.toHexString(address)+ ")");
logRefCountDebug("Leaked vector");
}
}
return neededCleanup;
}
@Override
public boolean isClean() {
return viewHandle == 0 && columnHandle == 0 && toClose.isEmpty();
}
}
/////////////////////////////////////////////////////////////////////////////
// BUILDER
/////////////////////////////////////////////////////////////////////////////
/**
* Create a new vector.
* @param type the type of vector to build.
* @param rows maximum number of rows that the vector can hold.
* @param init what will initialize the vector.
* @return the created vector.
*/
public static ColumnVector build(DType type, int rows, Consumer<Builder> init) {
try (Builder builder = HostColumnVector.builder(type, rows)) {
init.accept(builder);
return builder.buildAndPutOnDevice();
}
}
public static ColumnVector build(int rows, long stringBufferSize, Consumer<Builder> init) {
try (Builder builder = HostColumnVector.builder(rows, stringBufferSize)) {
init.accept(builder);
return builder.buildAndPutOnDevice();
}
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector boolFromBytes(byte... values) {
return build(DType.BOOL8, values.length, (b) -> b.appendArray(values));
}
/**
* This method is evolving, unstable and currently test only.
* Please use with caution and expect it to change in the future.
*/
public static<T> ColumnVector fromLists(HostColumnVector.DataType dataType, List<T>... lists) {
try (HostColumnVector host = HostColumnVector.fromLists(dataType, lists)) {
return host.copyToDevice();
}
}
/**
* This method is evolving, unstable and currently test only.
* Please use with caution and expect it to change in the future.
*/
public static ColumnVector fromStructs(HostColumnVector.DataType dataType,
List<HostColumnVector.StructData> lists) {
try (HostColumnVector host = HostColumnVector.fromStructs(dataType, lists)) {
return host.copyToDevice();
}
}
/**
* This method is evolving, unstable and currently test only.
* Please use with caution and expect it to change in the future.
*/
public static ColumnVector fromStructs(HostColumnVector.DataType dataType,
HostColumnVector.StructData... lists) {
try (HostColumnVector host = HostColumnVector.fromStructs(dataType, lists)) {
return host.copyToDevice();
}
}
/**
* This method is evolving, unstable and currently test only.
* Please use with caution and expect it to change in the future.
*/
public static ColumnVector emptyStructs(HostColumnVector.DataType dataType, long numRows) {
try (HostColumnVector host = HostColumnVector.emptyStructs(dataType, numRows)) {
return host.copyToDevice();
}
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromBooleans(boolean... values) {
byte[] bytes = new byte[values.length];
for (int i = 0; i < values.length; i++) {
bytes[i] = values[i] ? (byte) 1 : (byte) 0;
}
return build(DType.BOOL8, values.length, (b) -> b.appendArray(bytes));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromBytes(byte... values) {
return build(DType.INT8, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned byte type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromUnsignedBytes(byte... values) {
return build(DType.UINT8, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromShorts(short... values) {
return build(DType.INT16, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned short type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromUnsignedShorts(short... values) {
return build(DType.UINT16, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromInts(int... values) {
return build(DType.INT32, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned int type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromUnsignedInts(int... values) {
return build(DType.UINT32, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromLongs(long... values) {
return build(DType.INT64, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned long type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromUnsignedLongs(long... values) {
return build(DType.UINT64, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromFloats(float... values) {
return build(DType.FLOAT32, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector fromDoubles(double... values) {
return build(DType.FLOAT64, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector daysFromInts(int... values) {
return build(DType.TIMESTAMP_DAYS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationSecondsFromLongs(long... values) {
return build(DType.DURATION_SECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector timestampSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_SECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationDaysFromInts(int... values) {
return build(DType.DURATION_DAYS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationMilliSecondsFromLongs(long... values) {
return build(DType.DURATION_MILLISECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector timestampMilliSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_MILLISECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationMicroSecondsFromLongs(long... values) {
return build(DType.DURATION_MICROSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector timestampMicroSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_MICROSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector durationNanoSecondsFromLongs(long... values) {
return build(DType.DURATION_NANOSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static ColumnVector timestampNanoSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_NANOSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new decimal vector from unscaled values (int array) and scale.
* The created vector is of type DType.DECIMAL32, whose max precision is 9.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromInts(int scale, int... values) {
try (HostColumnVector host = HostColumnVector.decimalFromInts(scale, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from boxed unscaled values (Integer array) and scale.
* The created vector is of type DType.DECIMAL32, whose max precision is 9.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromBoxedInts(int scale, Integer... values) {
try (HostColumnVector host = HostColumnVector.decimalFromBoxedInts(scale, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from unscaled values (long array) and scale.
* The created vector is of type DType.DECIMAL64, whose max precision is 18.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromLongs(int scale, long... values) {
try (HostColumnVector host = HostColumnVector.decimalFromLongs(scale, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from boxed unscaled values (Long array) and scale.
* The created vector is of type DType.DECIMAL64, whose max precision is 18.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromBoxedLongs(int scale, Long... values) {
try (HostColumnVector host = HostColumnVector.decimalFromBoxedLongs(scale, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from double floats with specific DecimalType and RoundingMode.
* All doubles will be rescaled if necessary, according to scale of input DecimalType and RoundingMode.
* If any overflow occurs in extracting integral part, an IllegalArgumentException will be thrown.
* This API is inefficient because of slow double -> decimal conversion, so it is mainly for testing.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromDoubles(DType type, RoundingMode mode, double... values) {
try (HostColumnVector host = HostColumnVector.decimalFromDoubles(type, mode, values)) {
return host.copyToDevice();
}
}
/**
* Create a new decimal vector from BigIntegers
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static ColumnVector decimalFromBigInt(int scale, BigInteger... values) {
try (HostColumnVector host = HostColumnVector.decimalFromBigIntegers(scale, values)) {
ColumnVector columnVector = host.copyToDevice();
return columnVector;
}
}
/**
* Create a new string vector from the given values. This API
* supports inline nulls. This is really intended to be used only for testing as
* it is slow and memory intensive to translate between java strings and UTF8 strings.
*/
public static ColumnVector fromStrings(String... values) {
try (HostColumnVector host = HostColumnVector.fromStrings(values)) {
return host.copyToDevice();
}
}
/**
* Create a new string vector from the given values. This API
* supports inline nulls.
*/
public static ColumnVector fromUTF8Strings(byte[]... values) {
try (HostColumnVector host = HostColumnVector.fromUTF8Strings(values)) {
return host.copyToDevice();
}
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than building from primitive array of unscaledValues.
* Notice:
* 1. All input BigDecimals should share same scale.
* 2. The scale will be zero if all input values are null.
*/
public static ColumnVector fromDecimals(BigDecimal... values) {
try (HostColumnVector hcv = HostColumnVector.fromDecimals(values)) {
return hcv.copyToDevice();
}
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedBooleans(Boolean... values) {
return build(DType.BOOL8, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedBytes(Byte... values) {
return build(DType.INT8, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned byte type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromBoxedUnsignedBytes(Byte... values) {
return build(DType.UINT8, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedShorts(Short... values) {
return build(DType.INT16, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned short type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromBoxedUnsignedShorts(Short... values) {
return build(DType.UINT16, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedInts(Integer... values) {
return build(DType.INT32, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned int type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromBoxedUnsignedInts(Integer... values) {
return build(DType.UINT32, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedLongs(Long... values) {
return build(DType.INT64, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned long type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static ColumnVector fromBoxedUnsignedLongs(Long... values) {
return build(DType.UINT64, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedFloats(Float... values) {
return build(DType.FLOAT32, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector fromBoxedDoubles(Double... values) {
return build(DType.FLOAT64, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampDaysFromBoxedInts(Integer... values) {
return build(DType.TIMESTAMP_DAYS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationDaysFromBoxedInts(Integer... values) {
return build(DType.DURATION_DAYS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationSecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_SECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_SECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationMilliSecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_MILLISECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampMilliSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_MILLISECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationMicroSecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_MICROSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampMicroSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_MICROSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector durationNanoSecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_NANOSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static ColumnVector timestampNanoSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_NANOSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Creates an empty column according to the data type.
*
* It will create all the nested columns by iterating all the children in the input
* type object 'colType'.
*
* The performance is not good, so use it carefully. We may want to move this implementation
* to the native once figuring out a way to pass the nested data type to the native.
*
* @param colType the data type of the empty column
* @return an empty ColumnVector with its children. Each children contains zero elements.
* Users should close the ColumnVector to avoid memory leak.
*/
public static ColumnVector empty(HostColumnVector.DataType colType) {
if (colType == null || colType.getType() == null) {
throw new IllegalArgumentException("The data type and its 'DType' should NOT be null.");
}
if (colType instanceof HostColumnVector.BasicType) {
// Non nested type
DType dt = colType.getType();
return new ColumnVector(makeEmptyCudfColumn(dt.typeId.getNativeId(), dt.getScale()));
} else if (colType instanceof HostColumnVector.ListType) {
// List type
assert colType.getNumChildren() == 1 : "List type requires one child type";
try (ColumnVector child = empty(colType.getChild(0))) {
return makeList(child);
}
} else if (colType instanceof HostColumnVector.StructType) {
// Struct type
ColumnVector[] children = new ColumnVector[colType.getNumChildren()];
try {
for (int i = 0; i < children.length; i++) {
children[i] = empty(colType.getChild(i));
}
return makeStruct(children);
} finally {
for (ColumnVector cv : children) {
if (cv != null) cv.close();
}
}
} else {
throw new IllegalArgumentException("Unsupported data type: " + colType);
}
}
static ColumnVector[] getColumnVectorsFromPointers(long[] nativeHandles) {
ColumnVector[] columns = new ColumnVector[nativeHandles.length];
try {
for (int i = 0; i < nativeHandles.length; i++) {
long nativeHandle = nativeHandles[i];
// setting address to zero, so we don't clean it in case of an exception as it
// will be cleaned up by the constructor
nativeHandles[i] = 0;
columns[i] = new ColumnVector(nativeHandle);
}
return columns;
} catch (Throwable t) {
for (ColumnVector columnVector : columns) {
if (columnVector != null) {
try {
columnVector.close();
} catch (Throwable s) {
t.addSuppressed(s);
}
}
}
for (long nativeHandle : nativeHandles) {
if (nativeHandle != 0) {
try {
deleteCudfColumn(nativeHandle);
} catch (Throwable s) {
t.addSuppressed(s);
}
}
}
throw t;
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ColumnView.java
|
/*
*
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.*;
import java.util.stream.IntStream;
import static ai.rapids.cudf.HostColumnVector.OFFSET_SIZE;
/**
* This class represents the column_view of a column analogous to its cudf cpp counterpart.
* It holds view information like the native handle and other metadata for a column_view. It also
* exposes APIs that would allow operations on a view.
*/
public class ColumnView implements AutoCloseable, BinaryOperable {
static {
NativeDepsLoader.loadNativeDeps();
}
public static final long UNKNOWN_NULL_COUNT = -1;
protected long viewHandle;
protected final DType type;
protected final long rows;
protected final long nullCount;
protected final ColumnVector.OffHeapState offHeap;
/**
* Constructs a Column View given a native view address. This asserts that if the ColumnView is
* of nested-type it doesn't contain non-empty nulls
* @param address the view handle
* @throws AssertionError if the address points to a nested-type view with non-empty nulls
*/
ColumnView(long address) {
this.viewHandle = address;
try {
this.type = DType.fromNative(ColumnView.getNativeTypeId(viewHandle), ColumnView.getNativeTypeScale(viewHandle));
this.rows = ColumnView.getNativeRowCount(viewHandle);
this.nullCount = ColumnView.getNativeNullCount(viewHandle);
this.offHeap = null;
AssertEmptyNulls.assertNullsAreEmpty(this);
} catch (Throwable t) {
// offHeap state is null, so there is nothing to clean in offHeap
// delete ColumnView to avoid memory leak
deleteColumnView(viewHandle);
viewHandle = 0;
throw t;
}
}
/**
* Intended to be called from ColumnVector when it is being constructed. Because state creates a
* cudf::column_view instance and will close it in all cases, we don't want to have to double
* close it. This asserts that if the offHeapState is of nested-type it doesn't contain non-empty nulls
* @param state the state this view is based off of.
* @throws AssertionError if offHeapState points to a nested-type view with non-empty nulls
*/
protected ColumnView(ColumnVector.OffHeapState state) {
offHeap = state;
try {
viewHandle = state.getViewHandle();
type = DType.fromNative(ColumnView.getNativeTypeId(viewHandle), ColumnView.getNativeTypeScale(viewHandle));
rows = ColumnView.getNativeRowCount(viewHandle);
nullCount = ColumnView.getNativeNullCount(viewHandle);
AssertEmptyNulls.assertNullsAreEmpty(this);
} catch (Throwable t) {
// cleanup offHeap
offHeap.clean(false);
viewHandle = 0;
throw t;
}
}
/**
* Create a new column view based off of data already on the device. Ref count on the buffers
* is not incremented and none of the underlying buffers are owned by this view. The returned
* ColumnView is only valid as long as the underlying buffers remain valid. If the buffers are
* closed before this ColumnView is closed, it will result in undefined behavior.
*
* If ownership is needed, call {@link ColumnView#copyToColumnVector}
*
* @param type the type of the vector
* @param rows the number of rows in this vector.
* @param nullCount the number of nulls in the dataset.
* @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0.
* The ownership doesn't change on this buffer
* @param offsetBuffer a host buffer required for nested types including strings and string
* categories. The ownership doesn't change on this buffer
* @param children an array of ColumnView children
*/
public ColumnView(DType type, long rows, Optional<Long> nullCount,
BaseDeviceMemoryBuffer validityBuffer,
BaseDeviceMemoryBuffer offsetBuffer, ColumnView[] children) {
this(type, (int) rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(),
null, validityBuffer, offsetBuffer, children);
assert(type.isNestedType());
assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE)
|| !nullCount.isPresent();
}
/**
* Create a new column view based off of data already on the device. Ref count on the buffers
* is not incremented and none of the underlying buffers are owned by this view. The returned
* ColumnView is only valid as long as the underlying buffers remain valid. If the buffers are
* closed before this ColumnView is closed, it will result in undefined behavior.
*
* If ownership is needed, call {@link ColumnView#copyToColumnVector}
*
* @param type the type of the vector
* @param rows the number of rows in this vector.
* @param nullCount the number of nulls in the dataset.
* @param dataBuffer a host buffer required for nested types including strings and string
* categories. The ownership doesn't change on this buffer
* @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0.
* The ownership doesn't change on this buffer
*/
public ColumnView(DType type, long rows, Optional<Long> nullCount,
BaseDeviceMemoryBuffer dataBuffer,
BaseDeviceMemoryBuffer validityBuffer) {
this(type, (int) rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(),
dataBuffer, validityBuffer, null, null);
assert (!type.isNestedType());
assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE)
|| !nullCount.isPresent();
}
/**
* Create a new column view based off of data already on the device. Ref count on the buffers
* is not incremented and none of the underlying buffers are owned by this view. The returned
* ColumnView is only valid as long as the underlying buffers remain valid. If the buffers are
* closed before this ColumnView is closed, it will result in undefined behavior.
*
* If ownership is needed, call {@link ColumnView#copyToColumnVector}
*
* @param type the type of the vector
* @param rows the number of rows in this vector.
* @param nullCount the number of nulls in the dataset.
* @param dataBuffer a host buffer required for nested types including strings and string
* categories. The ownership doesn't change on this buffer
* @param validityBuffer an optional validity buffer. Must be provided if nullCount != 0.
* The ownership doesn't change on this buffer
* @param offsetBuffer The offsetbuffer for columns that need an offset buffer
*/
public ColumnView(DType type, long rows, Optional<Long> nullCount,
BaseDeviceMemoryBuffer dataBuffer,
BaseDeviceMemoryBuffer validityBuffer, BaseDeviceMemoryBuffer offsetBuffer) {
this(type, (int) rows, nullCount.orElse(UNKNOWN_NULL_COUNT).intValue(),
dataBuffer, validityBuffer, offsetBuffer, null);
assert (!type.isNestedType());
assert (nullCount.isPresent() && nullCount.get() <= Integer.MAX_VALUE)
|| !nullCount.isPresent();
}
private ColumnView(DType type, long rows, int nullCount,
BaseDeviceMemoryBuffer dataBuffer, BaseDeviceMemoryBuffer validityBuffer,
BaseDeviceMemoryBuffer offsetBuffer, ColumnView[] children) {
this(ColumnVector.initViewHandle(type, (int) rows, nullCount, dataBuffer, validityBuffer,
offsetBuffer, children == null ? new long[]{} :
Arrays.stream(children).mapToLong(c -> c.getNativeView()).toArray()));
}
/** Creates a ColumnVector from a column view handle
* @return a new ColumnVector
*/
public ColumnVector copyToColumnVector() {
return new ColumnVector(ColumnView.copyColumnViewToCV(getNativeView()));
}
/**
* USE WITH CAUTION: This method exposes the address of the native cudf::column_view. This allows
* writing custom kernels or other cuda operations on the data. DO NOT close this column
* vector until you are completely done using the native column_view. DO NOT modify the column in
* any way. This should be treated as a read only data structure. This API is unstable as
* the underlying C/C++ API is still not stabilized. If the underlying data structure
* is renamed this API may be replaced. The underlying data structure can change from release
* to release (it is not stable yet) so be sure that your native code is complied against the
* exact same version of libcudf as this is released for.
*/
public final long getNativeView() {
return viewHandle;
}
static int getFixedPointOutputScale(BinaryOp op, DType lhsType, DType rhsType) {
assert (lhsType.isDecimalType() && rhsType.isDecimalType());
return fixedPointOutputScale(op.nativeId, lhsType.getScale(), rhsType.getScale());
}
private static native int fixedPointOutputScale(int op, int lhsScale, int rhsScale);
public final DType getType() {
return type;
}
/**
* Returns the child column views for this view
* Please note that it is the responsibility of the caller to close these views.
* @return an array of child column views
*/
public final ColumnView[] getChildColumnViews() {
int numChildren = getNumChildren();
if (!getType().isNestedType()) {
return null;
}
ColumnView[] views = new ColumnView[numChildren];
try {
for (int i = 0; i < numChildren; i++) {
views[i] = getChildColumnView(i);
}
return views;
} catch(Throwable t) {
for (ColumnView v: views) {
if (v != null) {
v.close();
}
}
throw t;
}
}
/**
* Returns the child column view at a given index.
* Please note that it is the responsibility of the caller to close this view.
* @param childIndex the index of the child
* @return a column view
*/
public final ColumnView getChildColumnView(int childIndex) {
int numChildren = getNumChildren();
assert childIndex < numChildren : "children index should be less than " + numChildren;
if (!getType().isNestedType()) {
return null;
}
long childColumnView = ColumnView.getChildCvPointer(viewHandle, childIndex);
return new ColumnView(childColumnView);
}
/**
* Get a ColumnView that is the offsets for this list.
* Please note that it is the responsibility of the caller to close this view, and the parent
* column must out live this view.
*/
public ColumnView getListOffsetsView() {
assert(getType().equals(DType.LIST));
return new ColumnView(getListOffsetCvPointer(viewHandle));
}
/**
* Gets the data buffer for the current column view (viewHandle).
* If the type is LIST, STRUCT it returns null.
* @return If the type is LIST, STRUCT or data buffer is empty it returns null,
* else return the data device buffer
*/
public final BaseDeviceMemoryBuffer getData() {
return getDataBuffer(viewHandle);
}
public final BaseDeviceMemoryBuffer getOffsets() {
return getOffsetsBuffer(viewHandle);
}
public final BaseDeviceMemoryBuffer getValid() {
return getValidityBuffer(viewHandle);
}
/**
* Returns the number of nulls in the data. Note that this might end up
* being a very expensive operation because if the null count is not
* known it will be calculated.
*/
public long getNullCount() {
return nullCount;
}
/**
* Returns the number of rows in this vector.
*/
public final long getRowCount() {
return rows;
}
public final int getNumChildren() {
if (!getType().isNestedType()) {
return 0;
}
return ColumnView.getNativeNumChildren(viewHandle);
}
/**
* Returns the amount of device memory used.
*/
public long getDeviceMemorySize() {
return getDeviceMemorySize(getNativeView(), false);
}
@Override
public void close() {
// close the view handle so long as offHeap is not going to do it for us.
if (offHeap == null) {
ColumnView.deleteColumnView(viewHandle);
}
viewHandle = 0;
}
@Override
public String toString() {
return "ColumnView{" +
"rows=" + rows +
", type=" + type +
", nullCount=" + nullCount +
'}';
}
/**
* Used for string strip function.
* Indicates characters to be stripped from the beginning, end, or both of each string.
*/
private enum StripType {
LEFT(0), // strip characters from the beginning of the string
RIGHT(1), // strip characters from the end of the string
BOTH(2); // strip characters from the beginning and end of the string
final int nativeId;
StripType(int nativeId) { this.nativeId = nativeId; }
}
/**
* Returns a new ColumnVector with NaNs converted to nulls, preserving the existing null values.
*/
public final ColumnVector nansToNulls() {
assert type.equals(DType.FLOAT32) || type.equals(DType.FLOAT64);
return new ColumnVector(nansToNulls(this.getNativeView()));
}
/////////////////////////////////////////////////////////////////////////////
// DEVICE METADATA
/////////////////////////////////////////////////////////////////////////////
/**
* Retrieve the number of characters in each string. Null strings will have value of null.
*
* @return ColumnVector holding length of string at index 'i' in the original vector
*/
public final ColumnVector getCharLengths() {
assert DType.STRING.equals(type) : "char length only available for String type";
return new ColumnVector(charLengths(getNativeView()));
}
/**
* Retrieve the number of bytes for each string. Null strings will have value of null.
*
* @return ColumnVector, where each element at i = byte count of string at index 'i' in the original vector
*/
public final ColumnVector getByteCount() {
assert type.equals(DType.STRING) : "type has to be a String";
return new ColumnVector(byteCount(getNativeView()));
}
/**
* Get the code point values (integers) for each character of each string.
*
* @return ColumnVector, with code point integer values for each character as INT32
*/
public final ColumnVector codePoints() {
assert type.equals(DType.STRING) : "type has to be a String";
return new ColumnVector(codePoints(getNativeView()));
}
/**
* Get the number of elements for each list. Null lists will have a value of null.
* @return the number of elements in each list as an INT32 value.
*/
public final ColumnVector countElements() {
assert DType.LIST.equals(type) : "Only lists are supported";
return new ColumnVector(countElements(getNativeView()));
}
/**
* Returns a Boolean vector with the same number of rows as this instance, that has
* TRUE for any entry that is not null, and FALSE for any null entry (as per the validity mask)
*
* @return - Boolean vector
*/
public final ColumnVector isNotNull() {
return new ColumnVector(isNotNullNative(getNativeView()));
}
/**
* Returns a Boolean vector with the same number of rows as this instance, that has
* FALSE for any entry that is not null, and TRUE for any null entry (as per the validity mask)
*
* @return - Boolean vector
*/
public final ColumnVector isNull() {
return new ColumnVector(isNullNative(getNativeView()));
}
/**
* Returns a Boolean vector with the same number of rows as this instance, that has
* TRUE for any entry that is a fixed-point, and FALSE if its not a fixed-point.
* A null will be returned for null entries.
*
* The sign and the exponent is optional. The decimal point may only appear once.
* The integer component must fit within the size limits of the underlying fixed-point
* storage type. The value of the integer component is based on the scale of the target
* decimalType.
*
* Example:
* vec = ["A", "nan", "Inf", "-Inf", "Infinity", "infinity", "2.1474", "112.383", "-2.14748",
* "NULL", "null", null, "1.2", "1.2e-4", "0.00012"]
* vec.isFixedPoint() = [false, false, false, false, false, false, true, true, true, false, false,
* null, true, true, true]
*
* @param decimalType the data type that should be used for bounds checking. Note that only
* Decimal types (fixed-point) are allowed.
* @return Boolean vector
*/
public final ColumnVector isFixedPoint(DType decimalType) {
assert type.equals(DType.STRING);
assert decimalType.isDecimalType();
return new ColumnVector(isFixedPoint(getNativeView(),
decimalType.getTypeId().getNativeId(), decimalType.getScale()));
}
/**
* Returns a Boolean vector with the same number of rows as this instance, that has
* TRUE for any entry that is an integer, and FALSE if its not an integer. A null will be returned
* for null entries.
*
* NOTE: Integer doesn't mean a 32-bit integer. It means a number that is not a fraction.
* i.e. If this method returns true for a value it could still result in an overflow or underflow
* if you convert it to a Java integral type
*
* @return Boolean vector
*/
public final ColumnVector isInteger() {
assert type.equals(DType.STRING);
return new ColumnVector(isInteger(getNativeView()));
}
/**
* Returns a Boolean vector with the same number of rows as this instance, that has
* TRUE for any entry that is an integer, and FALSE if its not an integer. A null will be returned
* for null entries.
*
* @param intType the data type that should be used for bounds checking. Note that only
* cudf integer types are allowed including signed/unsigned int8 through int64
* @return Boolean vector
*/
public final ColumnVector isInteger(DType intType) {
assert type.equals(DType.STRING);
assert intType.isBackedByInt() || intType.isBackedByLong() || intType.isBackedByByte()
|| intType.isBackedByShort();
return new ColumnVector(isIntegerWithType(getNativeView(),
intType.getTypeId().getNativeId(), intType.getScale()));
}
/**
* Returns a Boolean vector with the same number of rows as this instance, that has
* TRUE for any entry that is a float, and FALSE if its not a float. A null will be returned
* for null entries
*
* NOTE: Float doesn't mean a 32-bit float. It means a number that is a fraction or can be written
* as a fraction. i.e. This method will return true for integers as well as floats. Also note if
* this method returns true for a value it could still result in an overflow or underflow if you
* convert it to a Java float or double
*
* @return - Boolean vector
*/
public final ColumnVector isFloat() {
assert type.equals(DType.STRING);
return new ColumnVector(isFloat(getNativeView()));
}
/**
* Returns a Boolean vector with the same number of rows as this instance, that has
* TRUE for any entry that is NaN, and FALSE if null or a valid floating point value
* @return - Boolean vector
*/
public final ColumnVector isNan() {
return new ColumnVector(isNanNative(getNativeView()));
}
/**
* Returns a Boolean vector with the same number of rows as this instance, that has
* TRUE for any entry that is null or a valid floating point value, FALSE otherwise
* @return - Boolean vector
*/
public final ColumnVector isNotNan() {
return new ColumnVector(isNotNanNative(getNativeView()));
}
/////////////////////////////////////////////////////////////////////////////
// Replacement
/////////////////////////////////////////////////////////////////////////////
/**
* Returns a vector with all values "oldValues[i]" replaced with "newValues[i]".
* Warning:
* Currently this function doesn't work for Strings or StringCategories.
* NaNs can't be replaced in the original vector but regular values can be replaced with NaNs
* Nulls can't be replaced in the original vector but regular values can be replaced with Nulls
* Mixing of types isn't allowed, the resulting vector will be the same type as the original.
* e.g. You can't replace an integer vector with values from a long vector
*
* Usage:
* this = {1, 4, 5, 1, 5}
* oldValues = {1, 5, 7}
* newValues = {2, 6, 9}
*
* result = this.findAndReplaceAll(oldValues, newValues);
* result = {2, 4, 6, 2, 6} (1 and 5 replaced with 2 and 6 but 7 wasn't found so no change)
*
* @param oldValues - A vector containing values that should be replaced
* @param newValues - A vector containing new values
* @return - A new vector containing the old values replaced with new values
*/
public final ColumnVector findAndReplaceAll(ColumnView oldValues, ColumnView newValues) {
return new ColumnVector(findAndReplaceAll(oldValues.getNativeView(), newValues.getNativeView(), this.getNativeView()));
}
/**
* Returns a ColumnVector with any null values replaced with a scalar.
* The types of the input ColumnVector and Scalar must match, else an error is thrown.
*
* @param scalar - Scalar value to use as replacement
* @return - ColumnVector with nulls replaced by scalar
*/
public final ColumnVector replaceNulls(Scalar scalar) {
return new ColumnVector(replaceNullsScalar(getNativeView(), scalar.getScalarHandle()));
}
/**
* Returns a ColumnVector with any null values replaced with the corresponding row in the
* specified replacement column.
* This column and the replacement column must have the same type and number of rows.
*
* @param replacements column of replacement values
* @return column with nulls replaced by corresponding row of replacements column
*/
public final ColumnVector replaceNulls(ColumnView replacements) {
return new ColumnVector(replaceNullsColumn(getNativeView(), replacements.getNativeView()));
}
public final ColumnVector replaceNulls(ReplacePolicy policy) {
return new ColumnVector(replaceNullsPolicy(getNativeView(), policy.isPreceding));
}
/**
* For a BOOL8 vector, computes a vector whose rows are selected from two other vectors
* based on the boolean value of this vector in the corresponding row.
* If the boolean value in a row is true, the corresponding row is selected from trueValues
* otherwise the corresponding row from falseValues is selected.
* Note that trueValues and falseValues vectors must be the same length as this vector,
* and trueValues and falseValues must have the same data type.
* @param trueValues the values to select if a row in this column is true
* @param falseValues the values to select if a row in this column is not true
* @return the computed vector
*/
public final ColumnVector ifElse(ColumnView trueValues, ColumnView falseValues) {
if (!type.equals(DType.BOOL8)) {
throw new IllegalArgumentException("Cannot select with a predicate vector of type " + type);
}
long result = ifElseVV(getNativeView(), trueValues.getNativeView(), falseValues.getNativeView());
return new ColumnVector(result);
}
/**
* For a BOOL8 vector, computes a vector whose rows are selected from two other inputs
* based on the boolean value of this vector in the corresponding row.
* If the boolean value in a row is true, the corresponding row is selected from trueValues
* otherwise the value from falseValue is selected.
* Note that trueValues must be the same length as this vector,
* and trueValues and falseValue must have the same data type.
* Note that the trueValues vector and falseValue scalar must have the same data type.
* @param trueValues the values to select if a row in this column is true
* @param falseValue the value to select if a row in this column is not true
* @return the computed vector
*/
public final ColumnVector ifElse(ColumnView trueValues, Scalar falseValue) {
if (!type.equals(DType.BOOL8)) {
throw new IllegalArgumentException("Cannot select with a predicate vector of type " + type);
}
long result = ifElseVS(getNativeView(), trueValues.getNativeView(), falseValue.getScalarHandle());
return new ColumnVector(result);
}
/**
* For a BOOL8 vector, computes a vector whose rows are selected from two other inputs
* based on the boolean value of this vector in the corresponding row.
* If the boolean value in a row is true, the value from trueValue is selected
* otherwise the corresponding row from falseValues is selected.
* Note that falseValues must be the same length as this vector,
* and trueValue and falseValues must have the same data type.
* Note that the trueValue scalar and falseValues vector must have the same data type.
* @param trueValue the value to select if a row in this column is true
* @param falseValues the values to select if a row in this column is not true
* @return the computed vector
*/
public final ColumnVector ifElse(Scalar trueValue, ColumnView falseValues) {
if (!type.equals(DType.BOOL8)) {
throw new IllegalArgumentException("Cannot select with a predicate vector of type " + type);
}
long result = ifElseSV(getNativeView(), trueValue.getScalarHandle(), falseValues.getNativeView());
return new ColumnVector(result);
}
/**
* For a BOOL8 vector, computes a vector whose rows are selected from two other inputs
* based on the boolean value of this vector in the corresponding row.
* If the boolean value in a row is true, the value from trueValue is selected
* otherwise the value from falseValue is selected.
* Note that the trueValue and falseValue scalars must have the same data type.
* @param trueValue the value to select if a row in this column is true
* @param falseValue the value to select if a row in this column is not true
* @return the computed vector
*/
public final ColumnVector ifElse(Scalar trueValue, Scalar falseValue) {
if (!type.equals(DType.BOOL8)) {
throw new IllegalArgumentException("Cannot select with a predicate vector of type " + type);
}
long result = ifElseSS(getNativeView(), trueValue.getScalarHandle(), falseValue.getScalarHandle());
return new ColumnVector(result);
}
/////////////////////////////////////////////////////////////////////////////
// Slice/Split and Concatenate
/////////////////////////////////////////////////////////////////////////////
/**
* Slices a column (including null values) into a set of columns
* according to a set of indices. The caller owns the ColumnVectors and is responsible
* closing them
*
* The "slice" function divides part of the input column into multiple intervals
* of rows using the indices values and it stores the intervals into the output
* columns. Regarding the interval of indices, a pair of values are taken from
* the indices array in a consecutive manner. The pair of indices are left-closed
* and right-open.
*
* The pairs of indices in the array are required to comply with the following
* conditions:
* a, b belongs to Range[0, input column size]
* a <= b, where the position of a is less or equal to the position of b.
*
* Exceptional cases for the indices array are:
* When the values in the pair are equal, the function returns an empty column.
* When the values in the pair are 'strictly decreasing', the outcome is
* undefined.
* When any of the values in the pair don't belong to the range[0, input column
* size), the outcome is undefined.
* When the indices array is empty, an empty vector of columns is returned.
*
* The caller owns the output ColumnVectors and is responsible for closing them.
*
* @param indices
* @return A new ColumnVector array with slices from the original ColumnVector
*/
public final ColumnVector[] slice(int... indices) {
long[] nativeHandles = slice(this.getNativeView(), indices);
ColumnVector[] columnVectors = new ColumnVector[nativeHandles.length];
try {
for (int i = 0; i < nativeHandles.length; i++) {
long nativeHandle = nativeHandles[i];
// setting address to zero, so we don't clean it in case of an exception as it
// will be cleaned up by the constructor
nativeHandles[i] = 0;
columnVectors[i] = new ColumnVector(nativeHandle);
}
} catch (Throwable t) {
try {
cleanupColumnViews(nativeHandles, columnVectors, t);
} catch (Throwable s) {
t.addSuppressed(s);
} finally {
throw t;
}
}
return columnVectors;
}
/**
* Return a subVector from start inclusive to the end of the vector.
* @param start the index to start at.
*/
public final ColumnVector subVector(int start) {
return subVector(start, (int)rows);
}
/**
* Return a subVector.
* @param start the index to start at (inclusive).
* @param end the index to end at (exclusive).
*/
public final ColumnVector subVector(int start, int end) {
ColumnVector [] tmp = slice(start, end);
assert tmp.length == 1;
return tmp[0];
}
/**
* Splits a column (including null values) into a set of columns
* according to a set of indices. The caller owns the ColumnVectors and is responsible
* closing them.
*
* The "split" function divides the input column into multiple intervals
* of rows using the splits indices values and it stores the intervals into the
* output columns. Regarding the interval of indices, a pair of values are taken
* from the indices array in a consecutive manner. The pair of indices are
* left-closed and right-open.
*
* The indices array ('splits') is require to be a monotonic non-decreasing set.
* The indices in the array are required to comply with the following conditions:
* a, b belongs to Range[0, input column size]
* a <= b, where the position of a is less or equal to the position of b.
*
* The split function will take a pair of indices from the indices array
* ('splits') in a consecutive manner. For the first pair, the function will
* take the value 0 and the first element of the indices array. For the last pair,
* the function will take the last element of the indices array and the size of
* the input column.
*
* Exceptional cases for the indices array are:
* When the values in the pair are equal, the function return an empty column.
* When the values in the pair are 'strictly decreasing', the outcome is
* undefined.
* When any of the values in the pair don't belong to the range[0, input column
* size), the outcome is undefined.
* When the indices array is empty, an empty vector of columns is returned.
*
* The input columns may have different sizes. The number of
* columns must be equal to the number of indices in the array plus one.
*
* Example:
* input: {10, 12, 14, 16, 18, 20, 22, 24, 26, 28}
* splits: {2, 5, 9}
* output: {{10, 12}, {14, 16, 18}, {20, 22, 24, 26}, {28}}
*
* Note that this is very similar to the output from a PartitionedTable.
*
* @param indices the indexes to split with
* @return A new ColumnVector array with slices from the original ColumnVector
*/
public final ColumnVector[] split(int... indices) {
ColumnView[] views = splitAsViews(indices);
ColumnVector[] columnVectors = new ColumnVector[views.length];
try {
for (int i = 0; i < views.length; i++) {
columnVectors[i] = views[i].copyToColumnVector();
}
return columnVectors;
} catch (Throwable t) {
for (ColumnVector cv : columnVectors) {
if (cv != null) {
cv.close();
}
}
throw t;
} finally {
for (ColumnView view : views) {
view.close();
}
}
}
/**
* Splits a ColumnView (including null values) into a set of ColumnViews
* according to a set of indices. No data is moved or copied.
*
* IMPORTANT NOTE: Nothing is copied out from the vector and the slices will only be relevant for
* the lifecycle of the underlying ColumnVector.
*
* The "split" function divides the input column into multiple intervals
* of rows using the splits indices values and it stores the intervals into the
* output columns. Regarding the interval of indices, a pair of values are taken
* from the indices array in a consecutive manner. The pair of indices are
* left-closed and right-open.
*
* The indices array ('splits') is required to be a monotonic non-decreasing set.
* The indices in the array are required to comply with the following conditions:
* a, b belongs to Range[0, input column size]
* a <= b, where the position of 'a' is less or equal to the position of 'b'.
*
* The split function will take a pair of indices from the indices array
* ('splits') in a consecutive manner. For the first pair, the function will
* take the value 0 and the first element of the indices array. For the last pair,
* the function will take the last element of the indices array and the size of
* the input column.
*
* Exceptional cases for the indices array are:
* When the values in the pair are equal, the function return an empty column.
* When the values in the pair are 'strictly decreasing', the outcome is
* undefined.
* When any of the values in the pair don't belong to the range[0, input column
* size), the outcome is undefined.
* When the indices array is empty, an empty array of ColumnViews is returned.
*
* The output columns may have different sizes. The number of
* columns must be equal to the number of indices in the array plus one.
*
* Example:
* input: {10, 12, 14, 16, 18, 20, 22, 24, 26, 28}
* splits: {2, 5, 9}
* output: {{10, 12}, {14, 16, 18}, {20, 22, 24, 26}, {28}}
*
* Note that this is very similar to the output from a PartitionedTable.
*
*
* @param indices the indices to split with
* @return A new ColumnView array with slices from the original ColumnView
*/
public ColumnView[] splitAsViews(int... indices) {
long[] nativeHandles = split(this.getNativeView(), indices);
ColumnView[] columnViews = new ColumnView[nativeHandles.length];
try {
for (int i = 0; i < nativeHandles.length; i++) {
long nativeHandle = nativeHandles[i];
// setting address to zero, so we don't clean it in case of an exception as it
// will be cleaned up by the constructor
nativeHandles[i] = 0;
columnViews[i] = new ColumnView(nativeHandle);
}
} catch (Throwable t) {
try {
cleanupColumnViews(nativeHandles, columnViews, t);
} catch (Throwable s) {
t.addSuppressed(s);
} finally {
throw t;
}
}
return columnViews;
}
static void cleanupColumnViews(long[] nativeHandles, ColumnView[] columnViews, Throwable throwable) {
for (ColumnView columnView : columnViews) {
if (columnView != null) {
try {
columnView.close();
} catch (Throwable s) {
throwable.addSuppressed(s);
}
}
}
for (long nativeHandle : nativeHandles) {
if (nativeHandle != 0) {
try {
deleteColumnView(nativeHandle);
} catch (Throwable s) {
throwable.addSuppressed(s);
}
}
}
}
/**
* Create a new vector of "normalized" values, where:
* 1. All representations of NaN (and -NaN) are replaced with the normalized NaN value
* 2. All elements equivalent to 0.0 (including +0.0 and -0.0) are replaced with +0.0.
* 3. All elements that are not equivalent to NaN or 0.0 remain unchanged.
*
* The documentation for {@link Double#longBitsToDouble(long)}
* describes how equivalent values of NaN/-NaN might have different bitwise representations.
*
* This method may be used to compare different bitwise values of 0.0 or NaN as logically
* equivalent. For instance, if these values appear in a groupby key column, without normalization
* 0.0 and -0.0 would be erroneously treated as distinct groups, as will each representation of NaN.
*
* @return A new ColumnVector with all elements equivalent to NaN/0.0 replaced with a normalized equivalent.
*/
public final ColumnVector normalizeNANsAndZeros() {
return new ColumnVector(normalizeNANsAndZeros(getNativeView()));
}
/**
* Create a deep copy of the column while replacing the null mask. The resultant null mask is the
* bitwise merge of null masks in the columns given as arguments.
* The result will be sanitized to not contain any non-empty nulls in case of nested types
*
* @param mergeOp binary operator (BITWISE_AND and BITWISE_OR only)
* @param columns array of columns whose null masks are merged, must have identical number of rows.
* @return the new ColumnVector with merged null mask.
*/
public final ColumnVector mergeAndSetValidity(BinaryOp mergeOp, ColumnView... columns) {
assert mergeOp == BinaryOp.BITWISE_AND || mergeOp == BinaryOp.BITWISE_OR : "Only BITWISE_AND and BITWISE_OR supported right now";
long[] columnViews = new long[columns.length];
long size = getRowCount();
for(int i = 0; i < columns.length; i++) {
assert columns[i] != null : "Column vectors passed may not be null";
assert columns[i].getRowCount() == size : "Row count mismatch, all columns must be the same size";
columnViews[i] = columns[i].getNativeView();
}
return new ColumnVector(bitwiseMergeAndSetValidity(getNativeView(), columnViews, mergeOp.nativeId));
}
/////////////////////////////////////////////////////////////////////////////
// DATE/TIME
/////////////////////////////////////////////////////////////////////////////
/**
* Extract a particular date time component from a timestamp.
* @param component what should be extracted
* @return a column with the extracted information in it.
*/
public final ColumnVector extractDateTimeComponent(DateTimeComponent component) {
assert type.isTimestampType();
return new ColumnVector(extractDateTimeComponent(getNativeView(), component.getNativeId()));
}
/**
* Get year from a timestamp.
* <p>
* Postconditions - A new vector is allocated with the result. The caller owns the vector and
* is responsible for its lifecycle.
* @return - A new INT16 vector allocated on the GPU.
*/
public final ColumnVector year() {
return extractDateTimeComponent(DateTimeComponent.YEAR);
}
/**
* Get month from a timestamp.
* <p>
* Postconditions - A new vector is allocated with the result. The caller owns the vector and
* is responsible for its lifecycle.
* @return - A new INT16 vector allocated on the GPU.
*/
public final ColumnVector month() {
return extractDateTimeComponent(DateTimeComponent.MONTH);
}
/**
* Get day from a timestamp.
* <p>
* Postconditions - A new vector is allocated with the result. The caller owns the vector and
* is responsible for its lifecycle.
* @return - A new INT16 vector allocated on the GPU.
*/
public final ColumnVector day() {
return extractDateTimeComponent(DateTimeComponent.DAY);
}
/**
* Get hour from a timestamp with time resolution.
* <p>
* Postconditions - A new vector is allocated with the result. The caller owns the vector and
* is responsible for its lifecycle.
* @return - A new INT16 vector allocated on the GPU.
*/
public final ColumnVector hour() {
return extractDateTimeComponent(DateTimeComponent.HOUR);
}
/**
* Get minute from a timestamp with time resolution.
* <p>
* Postconditions - A new vector is allocated with the result. The caller owns the vector and
* is responsible for its lifecycle.
* @return - A new INT16 vector allocated on the GPU.
*/
public final ColumnVector minute() {
return extractDateTimeComponent(DateTimeComponent.MINUTE);
}
/**
* Get second from a timestamp with time resolution.
* <p>
* Postconditions - A new vector is allocated with the result. The caller owns the vector and
* is responsible for its lifecycle.
* @return A new INT16 vector allocated on the GPU.
*/
public final ColumnVector second() {
return extractDateTimeComponent(DateTimeComponent.SECOND);
}
/**
* Get the day of the week from a timestamp.
* <p>
* Postconditions - A new vector is allocated with the result. The caller owns the vector and
* is responsible for its lifecycle.
* @return A new INT16 vector allocated on the GPU. Monday=1, ..., Sunday=7
*/
public final ColumnVector weekDay() {
return extractDateTimeComponent(DateTimeComponent.WEEKDAY);
}
/**
* Get the date that is the last day of the month for this timestamp.
* <p>
* Postconditions - A new vector is allocated with the result. The caller owns the vector and
* is responsible for its lifecycle.
* @return A new TIMESTAMP_DAYS vector allocated on the GPU.
*/
public final ColumnVector lastDayOfMonth() {
assert type.isTimestampType();
return new ColumnVector(lastDayOfMonth(getNativeView()));
}
/**
* Get the day of the year from a timestamp.
* <p>
* Postconditions - A new vector is allocated with the result. The caller owns the vector and
* is responsible for its lifecycle.
* @return A new INT16 vector allocated on the GPU. The value is between [1, {365-366}]
*/
public final ColumnVector dayOfYear() {
assert type.isTimestampType();
return new ColumnVector(dayOfYear(getNativeView()));
}
/**
* Get the quarter of the year from a timestamp.
* @return A new INT16 vector allocated on the GPU. It will be a value from {1, 2, 3, 4}
* corresponding to the quarter of the year.
*/
public final ColumnVector quarterOfYear() {
assert type.isTimestampType();
return new ColumnVector(quarterOfYear(getNativeView()));
}
/**
* Add the specified number of months to the timestamp.
* @param months must be a INT16 column indicating the number of months to add. A negative number
* of months works too.
* @return the updated timestamp
*/
public final ColumnVector addCalendricalMonths(ColumnView months) {
return new ColumnVector(addCalendricalMonths(getNativeView(), months.getNativeView()));
}
/**
* Add the specified number of months to the timestamp.
* @param months must be a INT16 scalar indicating the number of months to add. A negative number
* of months works too.
* @return the updated timestamp
*/
public final ColumnVector addCalendricalMonths(Scalar months) {
return new ColumnVector(addScalarCalendricalMonths(getNativeView(), months.getScalarHandle()));
}
/**
* Check to see if the year for this timestamp is a leap year or not.
* @return BOOL8 vector of results
*/
public final ColumnVector isLeapYear() {
return new ColumnVector(isLeapYear(getNativeView()));
}
/**
* Extract the number of days in the month
* @return INT16 column of the number of days in the corresponding month
*/
public final ColumnVector daysInMonth() {
assert type.isTimestampType();
return new ColumnVector(daysInMonth(getNativeView()));
}
/**
* Round the timestamp up to the given frequency keeping the type the same.
* @param freq what part of the timestamp to round.
* @return a timestamp with the same type, but rounded up.
*/
public final ColumnVector dateTimeCeil(DateTimeRoundingFrequency freq) {
assert type.isTimestampType();
return new ColumnVector(dateTimeCeil(getNativeView(), freq.getNativeId()));
}
/**
* Round the timestamp down to the given frequency keeping the type the same.
* @param freq what part of the timestamp to round.
* @return a timestamp with the same type, but rounded down.
*/
public final ColumnVector dateTimeFloor(DateTimeRoundingFrequency freq) {
assert type.isTimestampType();
return new ColumnVector(dateTimeFloor(getNativeView(), freq.getNativeId()));
}
/**
* Round the timestamp (half up) to the given frequency keeping the type the same.
* @param freq what part of the timestamp to round.
* @return a timestamp with the same type, but rounded (half up).
*/
public final ColumnVector dateTimeRound(DateTimeRoundingFrequency freq) {
assert type.isTimestampType();
return new ColumnVector(dateTimeRound(getNativeView(), freq.getNativeId()));
}
/**
* Rounds all the values in a column to the specified number of decimal places.
*
* @param decimalPlaces Number of decimal places to round to. If negative, this
* specifies the number of positions to the left of the decimal point.
* @param mode Rounding method(either HALF_UP or HALF_EVEN)
* @return a new ColumnVector with rounded values.
*/
public ColumnVector round(int decimalPlaces, RoundMode mode) {
return new ColumnVector(round(this.getNativeView(), decimalPlaces, mode.nativeId));
}
/**
* Rounds all the values in a column with decimal places = 0. Default number of decimal places
* to round to is 0.
*
* @param round Rounding method(either HALF_UP or HALF_EVEN)
* @return a new ColumnVector with rounded values.
*/
public ColumnVector round(RoundMode round) {
return round(0, round);
}
/**
* Rounds all the values in a column to the specified number of decimal places with HALF_UP
* (default) as Rounding method.
*
* @param decimalPlaces Number of decimal places to round to. If negative, this
* specifies the number of positions to the left of the decimal point.
* @return a new ColumnVector with rounded values.
*/
public ColumnVector round(int decimalPlaces) {
return round(decimalPlaces, RoundMode.HALF_UP);
}
/**
* Rounds all the values in a column with these default values:
* decimalPlaces = 0
* Rounding method = RoundMode.HALF_UP
*
* @return a new ColumnVector with rounded values.
*/
public ColumnVector round() {
return round(0, RoundMode.HALF_UP);
}
/////////////////////////////////////////////////////////////////////////////
// ARITHMETIC
/////////////////////////////////////////////////////////////////////////////
/**
* Transform a vector using a custom function. Be careful this is not
* simple to do. You need to be positive you know what type of data you are
* processing and how the data is laid out. This also only works on fixed
* length types.
* @param udf This function will be applied to every element in the vector
* @param isPtx is the code of the function ptx? true or C/C++ false.
*/
public final ColumnVector transform(String udf, boolean isPtx) {
return new ColumnVector(transform(getNativeView(), udf, isPtx));
}
/**
* Multiple different unary operations. The output is the same type as input.
* @param op the operation to perform
* @return the result
*/
public final ColumnVector unaryOp(UnaryOp op) {
return new ColumnVector(unaryOperation(getNativeView(), op.nativeId));
}
/**
* Calculate the sin, output is the same type as input.
*/
public final ColumnVector sin() {
return unaryOp(UnaryOp.SIN);
}
/**
* Calculate the cos, output is the same type as input.
*/
public final ColumnVector cos() {
return unaryOp(UnaryOp.COS);
}
/**
* Calculate the tan, output is the same type as input.
*/
public final ColumnVector tan() {
return unaryOp(UnaryOp.TAN);
}
/**
* Calculate the arcsin, output is the same type as input.
*/
public final ColumnVector arcsin() {
return unaryOp(UnaryOp.ARCSIN);
}
/**
* Calculate the arccos, output is the same type as input.
*/
public final ColumnVector arccos() {
return unaryOp(UnaryOp.ARCCOS);
}
/**
* Calculate the arctan, output is the same type as input.
*/
public final ColumnVector arctan() {
return unaryOp(UnaryOp.ARCTAN);
}
/**
* Calculate the hyperbolic sin, output is the same type as input.
*/
public final ColumnVector sinh() {
return unaryOp(UnaryOp.SINH);
}
/**
* Calculate the hyperbolic cos, output is the same type as input.
*/
public final ColumnVector cosh() {
return unaryOp(UnaryOp.COSH);
}
/**
* Calculate the hyperbolic tan, output is the same type as input.
*/
public final ColumnVector tanh() {
return unaryOp(UnaryOp.TANH);
}
/**
* Calculate the hyperbolic arcsin, output is the same type as input.
*/
public final ColumnVector arcsinh() {
return unaryOp(UnaryOp.ARCSINH);
}
/**
* Calculate the hyperbolic arccos, output is the same type as input.
*/
public final ColumnVector arccosh() {
return unaryOp(UnaryOp.ARCCOSH);
}
/**
* Calculate the hyperbolic arctan, output is the same type as input.
*/
public final ColumnVector arctanh() {
return unaryOp(UnaryOp.ARCTANH);
}
/**
* Calculate the exp, output is the same type as input.
*/
public final ColumnVector exp() {
return unaryOp(UnaryOp.EXP);
}
/**
* Calculate the log, output is the same type as input.
*/
public final ColumnVector log() {
return unaryOp(UnaryOp.LOG);
}
/**
* Calculate the log with base 2, output is the same type as input.
*/
public final ColumnVector log2() {
try (Scalar base = Scalar.fromInt(2)) {
return binaryOp(BinaryOp.LOG_BASE, base, getType());
}
}
/**
* Calculate the log with base 10, output is the same type as input.
*/
public final ColumnVector log10() {
try (Scalar base = Scalar.fromInt(10)) {
return binaryOp(BinaryOp.LOG_BASE, base, getType());
}
}
/**
* Calculate the sqrt, output is the same type as input.
*/
public final ColumnVector sqrt() {
return unaryOp(UnaryOp.SQRT);
}
/**
* Calculate the cube root, output is the same type as input.
*/
public final ColumnVector cbrt() {
return unaryOp(UnaryOp.CBRT);
}
/**
* Calculate the ceil, output is the same type as input.
*/
public final ColumnVector ceil() {
return unaryOp(UnaryOp.CEIL);
}
/**
* Calculate the floor, output is the same type as input.
*/
public final ColumnVector floor() {
return unaryOp(UnaryOp.FLOOR);
}
/**
* Calculate the abs, output is the same type as input.
*/
public final ColumnVector abs() {
return unaryOp(UnaryOp.ABS);
}
/**
* Rounds a floating-point argument to the closest integer value, but returns it as a float.
*/
public final ColumnVector rint() {
return unaryOp(UnaryOp.RINT);
}
/**
* Count the number of set bit for each integer value.
*/
public final ColumnVector bitCount() {
return unaryOp(UnaryOp.BIT_COUNT);
}
/**
* Invert the bits, output is the same type as input.
* For BOOL8 type, this is equivalent to logical not (UnaryOp.NOT), but this does not
* matter since Spark does not support bitwise inverting on boolean type.
*/
public final ColumnVector bitInvert() {
return unaryOp(UnaryOp.BIT_INVERT);
}
/**
* Multiple different binary operations.
* @param op the operation to perform
* @param rhs the rhs of the operation
* @param outType the type of output you want.
* @return the result
*/
@Override
public final ColumnVector binaryOp(BinaryOp op, BinaryOperable rhs, DType outType) {
if (rhs instanceof ColumnView) {
assert rows == ((ColumnView) rhs).getRowCount();
return new ColumnVector(binaryOp(this, (ColumnView) rhs, op, outType));
} else {
return new ColumnVector(binaryOp(this, (Scalar) rhs, op, outType));
}
}
static long binaryOp(ColumnView lhs, ColumnView rhs, BinaryOp op, DType outputType) {
return binaryOpVV(lhs.getNativeView(), rhs.getNativeView(),
op.nativeId, outputType.typeId.getNativeId(), outputType.getScale());
}
static long binaryOp(ColumnView lhs, Scalar rhs, BinaryOp op, DType outputType) {
return binaryOpVS(lhs.getNativeView(), rhs.getScalarHandle(),
op.nativeId, outputType.typeId.getNativeId(), outputType.getScale());
}
/////////////////////////////////////////////////////////////////////////////
// AGGREGATION
/////////////////////////////////////////////////////////////////////////////
/**
* Computes the sum of all values in the column, returning a scalar
* of the same type as this column.
*/
public Scalar sum() {
return sum(type);
}
/**
* Computes the sum of all values in the column, returning a scalar
* of the specified type.
*/
public Scalar sum(DType outType) {
return reduce(ReductionAggregation.sum(), outType);
}
/**
* Returns the minimum of all values in the column, returning a scalar
* of the same type as this column.
*/
public Scalar min() {
return reduce(ReductionAggregation.min(), type);
}
/**
* Returns the minimum of all values in the column, returning a scalar
* of the specified type.
* @deprecated the min reduction no longer internally allows for setting the output type, as a
* work around this API will cast the input type to the output type for you, but this may not
* work in all cases.
*/
@Deprecated
public Scalar min(DType outType) {
if (!outType.equals(type)) {
try (ColumnVector tmp = this.castTo(outType)) {
return tmp.min(outType);
}
}
return reduce(ReductionAggregation.min(), outType);
}
/**
* Returns the maximum of all values in the column, returning a scalar
* of the same type as this column.
*/
public Scalar max() {
return reduce(ReductionAggregation.max(), type);
}
/**
* Returns the maximum of all values in the column, returning a scalar
* of the specified type.
* @deprecated the max reduction no longer internally allows for setting the output type, as a
* work around this API will cast the input type to the output type for you, but this may not
* work in all cases.
*/
@Deprecated
public Scalar max(DType outType) {
if (!outType.equals(type)) {
try (ColumnVector tmp = this.castTo(outType)) {
return tmp.max(outType);
}
}
return reduce(ReductionAggregation.max(), outType);
}
/**
* Returns the product of all values in the column, returning a scalar
* of the same type as this column.
*/
public Scalar product() {
return product(type);
}
/**
* Returns the product of all values in the column, returning a scalar
* of the specified type.
*/
public Scalar product(DType outType) {
return reduce(ReductionAggregation.product(), outType);
}
/**
* Returns the sum of squares of all values in the column, returning a
* scalar of the same type as this column.
*/
public Scalar sumOfSquares() {
return sumOfSquares(type);
}
/**
* Returns the sum of squares of all values in the column, returning a
* scalar of the specified type.
*/
public Scalar sumOfSquares(DType outType) {
return reduce(ReductionAggregation.sumOfSquares(), outType);
}
/**
* Returns the arithmetic mean of all values in the column, returning a
* FLOAT64 scalar unless the column type is FLOAT32 then a FLOAT32 scalar is returned.
* Null values are skipped.
*/
public Scalar mean() {
DType outType = DType.FLOAT64;
if (type.equals(DType.FLOAT32)) {
outType = type;
}
return mean(outType);
}
/**
* Returns the arithmetic mean of all values in the column, returning a
* scalar of the specified type.
* Null values are skipped.
* @param outType the output type to return. Note that only floating point
* types are currently supported.
*/
public Scalar mean(DType outType) {
return reduce(ReductionAggregation.mean(), outType);
}
/**
* Returns the variance of all values in the column, returning a
* FLOAT64 scalar unless the column type is FLOAT32 then a FLOAT32 scalar is returned.
* Null values are skipped.
*/
public Scalar variance() {
DType outType = DType.FLOAT64;
if (type.equals(DType.FLOAT32)) {
outType = type;
}
return variance(outType);
}
/**
* Returns the variance of all values in the column, returning a
* scalar of the specified type.
* Null values are skipped.
* @param outType the output type to return. Note that only floating point
* types are currently supported.
*/
public Scalar variance(DType outType) {
return reduce(ReductionAggregation.variance(), outType);
}
/**
* Returns the sample standard deviation of all values in the column,
* returning a FLOAT64 scalar unless the column type is FLOAT32 then
* a FLOAT32 scalar is returned. Nulls are not counted as an element
* of the column when calculating the standard deviation.
*/
public Scalar standardDeviation() {
DType outType = DType.FLOAT64;
if (type.equals(DType.FLOAT32)) {
outType = type;
}
return standardDeviation(outType);
}
/**
* Returns the sample standard deviation of all values in the column,
* returning a scalar of the specified type. Null's are not counted as
* an element of the column when calculating the standard deviation.
* @param outType the output type to return. Note that only floating point
* types are currently supported.
*/
public Scalar standardDeviation(DType outType) {
return reduce(ReductionAggregation.standardDeviation(), outType);
}
/**
* Returns a boolean scalar that is true if any of the elements in
* the column are true or non-zero otherwise false.
* Null values are skipped.
*/
public Scalar any() {
return any(DType.BOOL8);
}
/**
* Returns a scalar is true or 1, depending on the specified type,
* if any of the elements in the column are true or non-zero
* otherwise false or 0.
* Null values are skipped.
*/
public Scalar any(DType outType) {
return reduce(ReductionAggregation.any(), outType);
}
/**
* Returns a boolean scalar that is true if all of the elements in
* the column are true or non-zero otherwise false.
* Null values are skipped.
*/
public Scalar all() {
return all(DType.BOOL8);
}
/**
* Returns a scalar is true or 1, depending on the specified type,
* if all of the elements in the column are true or non-zero
* otherwise false or 0.
* Null values are skipped.
* @deprecated the only output type supported is BOOL8.
*/
@Deprecated
public Scalar all(DType outType) {
return reduce(ReductionAggregation.all(), outType);
}
/**
* Computes the reduction of the values in all rows of a column.
* Overflows in reductions are not detected. Specifying a higher precision
* output type may prevent overflow. Only the MIN and MAX ops are
* The null values are skipped for the operation.
* @param aggregation The reduction aggregation to perform
* @return The scalar result of the reduction operation. If the column is
* empty or the reduction operation fails then the
* {@link Scalar#isValid()} method of the result will return false.
*/
public Scalar reduce(ReductionAggregation aggregation) {
return reduce(aggregation, type);
}
/**
* Computes the reduction of the values in all rows of a column.
* Overflows in reductions are not detected. Specifying a higher precision
* output type may prevent overflow. Only the MIN and MAX ops are
* supported for reduction of non-arithmetic types (TIMESTAMP...)
* The null values are skipped for the operation.
* @param aggregation The reduction aggregation to perform
* @param outType The type of scalar value to return. Not all output types are supported
* by all aggregation operations.
* @return The scalar result of the reduction operation. If the column is
* empty or the reduction operation fails then the
* {@link Scalar#isValid()} method of the result will return false.
*/
public Scalar reduce(ReductionAggregation aggregation, DType outType) {
long nativeId = aggregation.createNativeInstance();
try {
return new Scalar(outType, reduce(getNativeView(), nativeId, outType.typeId.getNativeId(), outType.getScale()));
} finally {
Aggregation.close(nativeId);
}
}
/**
* Do a segmented reduce where the offsets column indicates which groups in this to combine. The
* output type is the same as the input type.
* @param offsets an INT32 column with no nulls.
* @param aggregation the aggregation to do
* @return the result.
*/
public ColumnVector segmentedReduce(ColumnView offsets, SegmentedReductionAggregation aggregation) {
return segmentedReduce(offsets, aggregation, NullPolicy.EXCLUDE, type);
}
/**
* Do a segmented reduce where the offsets column indicates which groups in this to combine.
* @param offsets an INT32 column with no nulls.
* @param aggregation the aggregation to do
* @param outType the output data type.
* @return the result.
*/
public ColumnVector segmentedReduce(ColumnView offsets, SegmentedReductionAggregation aggregation,
DType outType) {
return segmentedReduce(offsets, aggregation, NullPolicy.EXCLUDE, outType);
}
/**
* Do a segmented reduce where the offsets column indicates which groups in this to combine.
* @param offsets an INT32 column with no nulls.
* @param aggregation the aggregation to do
* @param nullPolicy the null policy.
* @param outType the output data type.
* @return the result.
*/
public ColumnVector segmentedReduce(ColumnView offsets, SegmentedReductionAggregation aggregation,
NullPolicy nullPolicy, DType outType) {
long nativeId = aggregation.createNativeInstance();
try {
return new ColumnVector(segmentedReduce(getNativeView(), offsets.getNativeView(), nativeId,
nullPolicy.includeNulls, outType.typeId.getNativeId(), outType.getScale()));
} finally {
Aggregation.close(nativeId);
}
}
/**
* Segmented gather of the elements within a list element in each row of a list column.
* For each list, assuming the size is N, valid indices of gather map ranges in [-N, N).
* Out of bound indices refer to null.
* @param gatherMap ListColumnView carrying lists of integral indices which maps the
* element in list of each row in the source columns to rows of lists in the result columns.
* @return the result.
*/
public ColumnVector segmentedGather(ColumnView gatherMap) {
return segmentedGather(gatherMap, OutOfBoundsPolicy.NULLIFY);
}
/**
* Segmented gather of the elements within a list element in each row of a list column.
* @param gatherMap ListColumnView carrying lists of integral indices which maps the
* element in list of each row in the source columns to rows of lists in the result columns.
* @param policy OutOfBoundsPolicy, `DONT_CHECK` leads to undefined behaviour; `NULLIFY`
* replaces out of bounds with null.
* @return the result.
*/
public ColumnVector segmentedGather(ColumnView gatherMap, OutOfBoundsPolicy policy) {
return new ColumnVector(segmentedGather(getNativeView(), gatherMap.getNativeView(),
policy.equals(OutOfBoundsPolicy.NULLIFY)));
}
/**
* Do a reduction on the values in a list. The output type will be the type of the data column
* of this list.
* @param aggregation the aggregation to perform
*/
public ColumnVector listReduce(SegmentedReductionAggregation aggregation) {
if (!getType().equals(DType.LIST)) {
throw new IllegalArgumentException("listReduce only works on list types");
}
try (ColumnView offsets = getListOffsetsView();
ColumnView data = getChildColumnView(0)) {
return data.segmentedReduce(offsets, aggregation);
}
}
/**
* Do a reduction on the values in a list.
* @param aggregation the aggregation to perform
* @param outType the type of the output. Typically, this should match with the child type
* of the list.
*/
public ColumnVector listReduce(SegmentedReductionAggregation aggregation, DType outType) {
return listReduce(aggregation, NullPolicy.EXCLUDE, outType);
}
/**
* Do a reduction on the values in a list.
* @param aggregation the aggregation to perform
* @param nullPolicy should nulls be included or excluded from the aggregation.
* @param outType the type of the output. Typically, this should match with the child type
* of the list.
*/
public ColumnVector listReduce(SegmentedReductionAggregation aggregation, NullPolicy nullPolicy,
DType outType) {
if (!getType().equals(DType.LIST)) {
throw new IllegalArgumentException("listReduce only works on list types");
}
try (ColumnView offsets = getListOffsetsView();
ColumnView data = getChildColumnView(0)) {
return data.segmentedReduce(offsets, aggregation, nullPolicy, outType);
}
}
/**
* Calculate various percentiles of this ColumnVector, which must contain centroids produced by
* a t-digest aggregation.
*
* @param percentiles Required percentiles [0,1]
* @return Column containing the approximate percentile values as a list of doubles, in
* the same order as the input percentiles
*/
public final ColumnVector approxPercentile(double[] percentiles) {
try (ColumnVector cv = ColumnVector.fromDoubles(percentiles)) {
return approxPercentile(cv);
}
}
/**
* Calculate various percentiles of this ColumnVector, which must contain centroids produced by
* a t-digest aggregation.
*
* @param percentiles Column containing percentiles [0,1]
* @return Column containing the approximate percentile values as a list of doubles, in
* the same order as the input percentiles
*/
public final ColumnVector approxPercentile(ColumnVector percentiles) {
return new ColumnVector(approxPercentile(getNativeView(), percentiles.getNativeView()));
}
/**
* Calculate various quantiles of this ColumnVector. It is assumed that this is already sorted
* in the desired order.
* @param method the method used to calculate the quantiles
* @param quantiles the quantile values [0,1]
* @return Column containing the approximate percentile values as a list of doubles, in
* the same order as the input percentiles
*/
public final ColumnVector quantile(QuantileMethod method, double[] quantiles) {
return new ColumnVector(quantile(getNativeView(), method.nativeId, quantiles));
}
/**
* This function aggregates values in a window around each element i of the input
* column. Please refer to WindowsOptions for various options that can be passed.
* Note: Only rows-based windows are supported.
* @param op the operation to perform.
* @param options various window function arguments.
* @return Column containing aggregate function result.
* @throws IllegalArgumentException if unsupported window specification * (i.e. other than {@link WindowOptions.FrameType#ROWS} is used.
*/
public final ColumnVector rollingWindow(RollingAggregation op, WindowOptions options) {
// Check that only row-based windows are used.
if (!options.getFrameType().equals(WindowOptions.FrameType.ROWS)) {
throw new IllegalArgumentException("Expected ROWS-based window specification. Unexpected window type: "
+ options.getFrameType());
}
long nativePtr = op.createNativeInstance();
try {
Scalar p = options.getPrecedingScalar();
Scalar f = options.getFollowingScalar();
return new ColumnVector(
rollingWindow(this.getNativeView(),
op.getDefaultOutput(),
options.getMinPeriods(),
nativePtr,
p == null || !p.isValid() ? 0 : p.getInt(),
f == null || !f.isValid() ? 0 : f.getInt(),
options.getPrecedingCol() == null ? 0 : options.getPrecedingCol().getNativeView(),
options.getFollowingCol() == null ? 0 : options.getFollowingCol().getNativeView()));
} finally {
Aggregation.close(nativePtr);
}
}
/**
* Compute the prefix sum (aka cumulative sum) of the values in this column.
* This is just a convenience method for an inclusive scan with a SUM aggregation.
*/
public final ColumnVector prefixSum() {
return scan(ScanAggregation.sum());
}
/**
* Computes a scan for a column. This is very similar to a running window on the column.
* @param aggregation the aggregation to perform
* @param scanType should the scan be inclusive, include the current row, or exclusive.
* @param nullPolicy how should nulls be treated. Note that some aggregations also include a
* null policy too. Currently none of those aggregations are supported so
* it is undefined how they would interact with each other.
*/
public final ColumnVector scan(ScanAggregation aggregation, ScanType scanType, NullPolicy nullPolicy) {
long nativeId = aggregation.createNativeInstance();
try {
return new ColumnVector(scan(getNativeView(), nativeId,
scanType.isInclusive, nullPolicy.includeNulls));
} finally {
Aggregation.close(nativeId);
}
}
/**
* Computes a scan for a column that excludes nulls.
* @param aggregation the aggregation to perform
* @param scanType should the scan be inclusive, include the current row, or exclusive.
*/
public final ColumnVector scan(ScanAggregation aggregation, ScanType scanType) {
return scan(aggregation, scanType, NullPolicy.EXCLUDE);
}
/**
* Computes an inclusive scan for a column that excludes nulls.
* @param aggregation the aggregation to perform
*/
public final ColumnVector scan(ScanAggregation aggregation) {
return scan(aggregation, ScanType.INCLUSIVE, NullPolicy.EXCLUDE);
}
/////////////////////////////////////////////////////////////////////////////
// LOGICAL
/////////////////////////////////////////////////////////////////////////////
/**
* Returns a vector of the logical `not` of each value in the input
* column (this)
*/
public final ColumnVector not() {
return unaryOp(UnaryOp.NOT);
}
/////////////////////////////////////////////////////////////////////////////
// SEARCH
/////////////////////////////////////////////////////////////////////////////
/**
* Find if the `needle` is present in this col
*
* example:
*
* Single Column:
* idx 0 1 2 3 4
* col = { 10, 20, 20, 30, 50 }
* Scalar:
* value = { 20 }
* result = true
*
* @param needle
* @return true if needle is present else false
*/
public boolean contains(Scalar needle) {
return containsScalar(getNativeView(), needle.getScalarHandle());
}
/**
* Returns a new column of {@link DType#BOOL8} elements having the same size as this column,
* each row value is true if the corresponding entry in this column is contained in the
* given searchSpace column and false if it is not.
* The caller will be responsible for the lifecycle of the new vector.
*
* example:
*
* col = { 10, 20, 30, 40, 50 }
* searchSpace = { 20, 40, 60, 80 }
*
* result = { false, true, false, true, false }
*
* @param searchSpace
* @return A new ColumnVector of type {@link DType#BOOL8}
*/
public final ColumnVector contains(ColumnView searchSpace) {
return new ColumnVector(containsVector(getNativeView(), searchSpace.getNativeView()));
}
/**
* Returns a column of strings where, for each string row in the input,
* the first character after spaces is modified to upper-case,
* while all the remaining characters in a word are modified to lower-case.
*
* Any null string entries return corresponding null output column entries
*/
public final ColumnVector toTitle() {
assert type.equals(DType.STRING);
return new ColumnVector(title(getNativeView()));
}
/**
* Returns a column of capitalized strings.
*
* If the `delimiters` is an empty string, then only the first character of each
* row is capitalized. Otherwise, a non-delimiter character is capitalized after
* any delimiter character is found.
*
* Example:
* input = ["tesT1", "a Test", "Another Test", "a\tb"];
* delimiters = ""
* output is ["Test1", "A test", "Another test", "A\tb"]
* delimiters = " "
* output is ["Test1", "A Test", "Another Test", "A\tb"]
*
* Any null string entries return corresponding null output column entries.
*
* @param delimiters Used if identifying words to capitalize. Should not be null.
* @return a column of capitalized strings. Users should close the returned column.
*/
public final ColumnVector capitalize(Scalar delimiters) {
if (DType.STRING.equals(type) && DType.STRING.equals(delimiters.getType())) {
return new ColumnVector(capitalize(getNativeView(), delimiters.getScalarHandle()));
}
throw new IllegalArgumentException("Both input column and delimiters scalar should be" +
" string type. But got column: " + type + ", scalar: " + delimiters.getType());
}
/**
* Concatenates all strings in the column into one new string delimited
* by an optional separator string.
*
* This returns a column with one string. Any null entries are ignored unless
* the narep parameter specifies a replacement string (not a null value).
*
* @param separator what to insert to separate each row.
* @param narep what to replace nulls with
* @return a ColumnVector with a single string in it.
*/
public final ColumnVector joinStrings(Scalar separator, Scalar narep) {
if (DType.STRING.equals(type) &&
DType.STRING.equals(separator.getType()) &&
DType.STRING.equals(narep.getType())) {
return new ColumnVector(joinStrings(getNativeView(), separator.getScalarHandle(),
narep.getScalarHandle()));
}
throw new IllegalArgumentException("The column, separator, and narep all need to be STRINGs");
}
/////////////////////////////////////////////////////////////////////////////
// TYPE CAST
/////////////////////////////////////////////////////////////////////////////
/**
* Generic method to cast ColumnVector
* When casting from a Date, Timestamp, or Boolean to a numerical type the underlying numerical
* representation of the data will be used for the cast.
*
* For Strings:
* Casting strings from/to timestamp isn't supported atm.
* Please look at {@link ColumnVector#asTimestamp(DType, String)}
* and {@link ColumnVector#asStrings(String)} for casting string to timestamp when the format
* is known
*
* Float values when converted to String could be different from the expected default behavior in
* Java
* e.g.
* 12.3 => "12.30000019" instead of "12.3"
* Double.POSITIVE_INFINITY => "Inf" instead of "INFINITY"
* Double.NEGATIVE_INFINITY => "-Inf" instead of "-INFINITY"
*
* @param type type of the resulting ColumnVector
* @return A new vector allocated on the GPU
*/
public ColumnVector castTo(DType type) {
return new ColumnVector(castTo(getNativeView(), type.typeId.getNativeId(), type.getScale()));
}
/**
* This method takes in a nested type and replaces its children with the given views
* Note: Make sure the numbers of rows in the leaf node are the same as the child replacing it
* otherwise the list can point to elements outside of the column values.
*
* Note: this method returns a ColumnView that won't live past the ColumnVector that it's
* pointing to.
*
* Ex: List<Int> list = col{{1,3}, {9,3,5}}
*
* validNewChild = col{8, 3, 9, 2, 0}
*
* list.replaceChildrenWithViews(1, validNewChild) => col{{8, 3}, {9, 2, 0}}
*
* invalidNewChild = col{3, 2}
* list.replaceChildrenWithViews(1, invalidNewChild) => col{{3, 2}, {invalid, invalid, invalid}}
*
* invalidNewChild = col{8, 3, 9, 2, 0, 0, 7}
* list.replaceChildrenWithViews(1, invalidNewChild) => col{{8, 3}, {9, 2, 0}} // undefined result
*/
public ColumnView replaceChildrenWithViews(int[] indices,
ColumnView[] views) {
assert (type.isNestedType());
assert (indices.length == views.length);
if (type == DType.LIST) {
assert (indices.length == 1);
}
if (indices.length != views.length) {
throw new IllegalArgumentException("The indices size and children size should match");
}
Map<Integer, ColumnView> map = new HashMap<>();
IntStream.range(0, indices.length).forEach(index -> {
if (map.containsKey(indices[index])) {
throw new IllegalArgumentException("Duplicate mapping found for replacing child index");
}
map.put(indices[index], views[index]);
});
List<ColumnView> newChildren = new ArrayList<>(getNumChildren());
List<ColumnView> toClose = new ArrayList<>(getNumChildren());
try {
IntStream.range(0, getNumChildren()).forEach(i -> {
ColumnView view = map.remove(i);
ColumnView child = getChildColumnView(i);
toClose.add(child);
if (view == null) {
newChildren.add(child);
} else {
if (child.getRowCount() != view.getRowCount()) {
throw new IllegalArgumentException("Child row count doesn't match the old child");
}
newChildren.add(view);
}
});
if (!map.isEmpty()) {
throw new IllegalArgumentException("One or more invalid child indices passed to be " +
"replaced");
}
return new ColumnView(type, getRowCount(), Optional.of(getNullCount()), getValid(),
getOffsets(), newChildren.stream().toArray(n -> new ColumnView[n]));
} finally {
for (ColumnView columnView: toClose) {
columnView.close();
}
}
}
/**
* This method takes in a list and returns a new list with the leaf node replaced with the given
* view. Make sure the numbers of rows in the leaf node are the same as the child replacing it
* otherwise the list can point to elements outside of the column values.
*
* Note: this method returns a ColumnView that won't live past the ColumnVector that it's
* pointing to.
*
* Ex: List<Int> list = col{{1,3}, {9,3,5}}
*
* validNewChild = col{8, 3, 9, 2, 0}
*
* list.replaceChildrenWithViews(1, validNewChild) => col{{8, 3}, {9, 2, 0}}
*
* invalidNewChild = col{3, 2}
* list.replaceChildrenWithViews(1, invalidNewChild) =>
* col{{3, 2}, {invalid, invalid, invalid}} throws an exception
*
* invalidNewChild = col{8, 3, 9, 2, 0, 0, 7}
* list.replaceChildrenWithViews(1, invalidNewChild) =>
* col{{8, 3}, {9, 2, 0}} throws an exception
*/
public ColumnView replaceListChild(ColumnView child) {
assert(type == DType.LIST);
return replaceChildrenWithViews(new int[]{0}, new ColumnView[]{child});
}
/**
* Zero-copy cast between types with the same underlying representation.
*
* Similar to reinterpret_cast or bit_cast in C++. This will essentially take the underlying data
* and update the metadata to reflect a new type. Not all types are supported the width of the
* types must match.
* @param type the type you want to go to.
* @return a ColumnView that cannot outlive the Column that owns the actual data it points to.
* @deprecated this has changed to bit_cast in C++ so use that name instead
*/
@Deprecated
public ColumnView logicalCastTo(DType type) {
return bitCastTo(type);
}
/**
* Zero-copy cast between types with the same underlying length.
*
* Similar to bit_cast in C++. This will take the underlying data and create new metadata
* so it is interpreted as a new type. Not all types are supported the width of the
* types must match.
* @param type the type you want to go to.
* @return a ColumnView that cannot outlive the Column that owns the actual data it points to.
*/
public ColumnView bitCastTo(DType type) {
return new ColumnView(bitCastTo(getNativeView(),
type.typeId.getNativeId(), type.getScale()));
}
/**
* Cast to Byte - ColumnVector
* This method takes the value provided by the ColumnVector and casts to byte
* When casting from a Date, Timestamp, or Boolean to a byte type the underlying numerical
* representation of the data will be used for the cast.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asBytes() {
return castTo(DType.INT8);
}
/**
* Cast to list of bytes
* This method converts the rows provided by the ColumnVector and casts each row to a list of
* bytes with endinanness reversed. Numeric and string types supported, but not timestamps.
*
* @return A new vector allocated on the GPU
*/
public final ColumnVector asByteList() {
return new ColumnVector(byteListCast(getNativeView(), true));
}
/**
* Cast to list of bytes
* This method converts the rows provided by the ColumnVector and casts each row to a list
* of bytes. Numeric and string types supported, but not timestamps.
*
* @param config Flips the byte order (endianness) if true, retains byte order otherwise
* @return A new vector allocated on the GPU
*/
public final ColumnVector asByteList(boolean config) {
return new ColumnVector(byteListCast(getNativeView(), config));
}
/**
* Cast to unsigned Byte - ColumnVector
* This method takes the value provided by the ColumnVector and casts to byte
* When casting from a Date, Timestamp, or Boolean to a byte type the underlying numerical
* representation of the data will be used for the cast.
* <p>
* Java does not have an unsigned byte type, so properly decoding these values
* will require extra steps on the part of the application. See
* {@link Byte#toUnsignedInt(byte)}.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asUnsignedBytes() {
return castTo(DType.UINT8);
}
/**
* Cast to Short - ColumnVector
* This method takes the value provided by the ColumnVector and casts to short
* When casting from a Date, Timestamp, or Boolean to a short type the underlying numerical
* representation of the data will be used for the cast.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asShorts() {
return castTo(DType.INT16);
}
/**
* Cast to unsigned Short - ColumnVector
* This method takes the value provided by the ColumnVector and casts to short
* When casting from a Date, Timestamp, or Boolean to a short type the underlying numerical
* representation of the data will be used for the cast.
* <p>
* Java does not have an unsigned short type, so properly decoding these values
* will require extra steps on the part of the application. See
* {@link Short#toUnsignedInt(short)}.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asUnsignedShorts() {
return castTo(DType.UINT16);
}
/**
* Cast to Int - ColumnVector
* This method takes the value provided by the ColumnVector and casts to int
* When casting from a Date, Timestamp, or Boolean to a int type the underlying numerical
* representation of the data will be used for the cast.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asInts() {
return castTo(DType.INT32);
}
/**
* Cast to unsigned Int - ColumnVector
* This method takes the value provided by the ColumnVector and casts to int
* When casting from a Date, Timestamp, or Boolean to a int type the underlying numerical
* representation of the data will be used for the cast.
* <p>
* Java does not have an unsigned int type, so properly decoding these values
* will require extra steps on the part of the application. See
* {@link Integer#toUnsignedLong(int)}.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asUnsignedInts() {
return castTo(DType.UINT32);
}
/**
* Cast to Long - ColumnVector
* This method takes the value provided by the ColumnVector and casts to long
* When casting from a Date, Timestamp, or Boolean to a long type the underlying numerical
* representation of the data will be used for the cast.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asLongs() {
return castTo(DType.INT64);
}
/**
* Cast to unsigned Long - ColumnVector
* This method takes the value provided by the ColumnVector and casts to long
* When casting from a Date, Timestamp, or Boolean to a long type the underlying numerical
* representation of the data will be used for the cast.
* <p>
* Java does not have an unsigned long type, so properly decoding these values
* will require extra steps on the part of the application. See
* {@link Long#toUnsignedString(long)}.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asUnsignedLongs() {
return castTo(DType.UINT64);
}
/**
* Cast to Float - ColumnVector
* This method takes the value provided by the ColumnVector and casts to float
* When casting from a Date, Timestamp, or Boolean to a float type the underlying numerical
* representatio of the data will be used for the cast.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asFloats() {
return castTo(DType.FLOAT32);
}
/**
* Cast to Double - ColumnVector
* This method takes the value provided by the ColumnVector and casts to double
* When casting from a Date, Timestamp, or Boolean to a double type the underlying numerical
* representation of the data will be used for the cast.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asDoubles() {
return castTo(DType.FLOAT64);
}
/**
* Cast to TIMESTAMP_DAYS - ColumnVector
* This method takes the value provided by the ColumnVector and casts to TIMESTAMP_DAYS
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampDays() {
if (type.equals(DType.STRING)) {
return asTimestamp(DType.TIMESTAMP_DAYS, "%Y-%m-%dT%H:%M:%SZ%f");
}
return castTo(DType.TIMESTAMP_DAYS);
}
/**
* Cast to TIMESTAMP_DAYS - ColumnVector
* This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_DAYS
* @param format timestamp string format specifier, ignored if the column type is not string
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampDays(String format) {
assert type.equals(DType.STRING) : "A column of type string is required when using a format string";
return asTimestamp(DType.TIMESTAMP_DAYS, format);
}
/**
* Cast to TIMESTAMP_SECONDS - ColumnVector
* This method takes the value provided by the ColumnVector and casts to TIMESTAMP_SECONDS
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampSeconds() {
if (type.equals(DType.STRING)) {
return asTimestamp(DType.TIMESTAMP_SECONDS, "%Y-%m-%dT%H:%M:%SZ%f");
}
return castTo(DType.TIMESTAMP_SECONDS);
}
/**
* Cast to TIMESTAMP_SECONDS - ColumnVector
* This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_SECONDS
* @param format timestamp string format specifier, ignored if the column type is not string
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampSeconds(String format) {
assert type.equals(DType.STRING) : "A column of type string is required when using a format string";
return asTimestamp(DType.TIMESTAMP_SECONDS, format);
}
/**
* Cast to TIMESTAMP_MICROSECONDS - ColumnVector
* This method takes the value provided by the ColumnVector and casts to TIMESTAMP_MICROSECONDS
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampMicroseconds() {
if (type.equals(DType.STRING)) {
return asTimestamp(DType.TIMESTAMP_MICROSECONDS, "%Y-%m-%dT%H:%M:%SZ%f");
}
return castTo(DType.TIMESTAMP_MICROSECONDS);
}
/**
* Cast to TIMESTAMP_MICROSECONDS - ColumnVector
* This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_MICROSECONDS
* @param format timestamp string format specifier, ignored if the column type is not string
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampMicroseconds(String format) {
assert type.equals(DType.STRING) : "A column of type string is required when using a format string";
return asTimestamp(DType.TIMESTAMP_MICROSECONDS, format);
}
/**
* Cast to TIMESTAMP_MILLISECONDS - ColumnVector
* This method takes the value provided by the ColumnVector and casts to TIMESTAMP_MILLISECONDS.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampMilliseconds() {
if (type.equals(DType.STRING)) {
return asTimestamp(DType.TIMESTAMP_MILLISECONDS, "%Y-%m-%dT%H:%M:%SZ%f");
}
return castTo(DType.TIMESTAMP_MILLISECONDS);
}
/**
* Cast to TIMESTAMP_MILLISECONDS - ColumnVector
* This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_MILLISECONDS.
* @param format timestamp string format specifier, ignored if the column type is not string
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampMilliseconds(String format) {
assert type.equals(DType.STRING) : "A column of type string is required when using a format string";
return asTimestamp(DType.TIMESTAMP_MILLISECONDS, format);
}
/**
* Cast to TIMESTAMP_NANOSECONDS - ColumnVector
* This method takes the value provided by the ColumnVector and casts to TIMESTAMP_NANOSECONDS.
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampNanoseconds() {
if (type.equals(DType.STRING)) {
return asTimestamp(DType.TIMESTAMP_NANOSECONDS, "%Y-%m-%dT%H:%M:%SZ%9f");
}
return castTo(DType.TIMESTAMP_NANOSECONDS);
}
/**
* Cast to TIMESTAMP_NANOSECONDS - ColumnVector
* This method takes the string value provided by the ColumnVector and casts to TIMESTAMP_NANOSECONDS.
* @param format timestamp string format specifier, ignored if the column type is not string
* @return A new vector allocated on the GPU
*/
public final ColumnVector asTimestampNanoseconds(String format) {
assert type.equals(DType.STRING) : "A column of type string is required when using a format string";
return asTimestamp(DType.TIMESTAMP_NANOSECONDS, format);
}
/**
* Parse a string to a timestamp. Strings that fail to parse will default to 0, corresponding
* to 1970-01-01 00:00:00.000.
* @param timestampType timestamp DType that includes the time unit to parse the timestamp into.
* @param format strptime format specifier string of the timestamp. Used to parse and convert
* the timestamp with. Supports %Y,%y,%m,%d,%H,%I,%p,%M,%S,%f,%z format specifiers.
* See https://github.com/rapidsai/custrings/blob/branch-0.10/docs/source/datetime.md
* for full parsing format specification and documentation.
* @return A new ColumnVector containing the long representations of the timestamps in the
* original column vector.
*/
public final ColumnVector asTimestamp(DType timestampType, String format) {
assert type.equals(DType.STRING) : "A column of type string " +
"is required for .to_timestamp() operation";
assert format != null : "Format string may not be NULL";
assert timestampType.isTimestampType() : "unsupported conversion to non-timestamp DType";
// Only nativeID is passed in the below function as timestamp type does not have `scale`.
return new ColumnVector(stringTimestampToTimestamp(getNativeView(),
timestampType.typeId.getNativeId(), format));
}
/**
* Cast to Strings.
* Negative timestamp values are not currently supported and will yield undesired results. See
* github issue https://github.com/rapidsai/cudf/issues/3116 for details
* In case of timestamps it follows the following formats
* {@link DType#TIMESTAMP_DAYS} - "%Y-%m-%d"
* {@link DType#TIMESTAMP_SECONDS} - "%Y-%m-%d %H:%M:%S"
* {@link DType#TIMESTAMP_MICROSECONDS} - "%Y-%m-%d %H:%M:%S.%f"
* {@link DType#TIMESTAMP_MILLISECONDS} - "%Y-%m-%d %H:%M:%S.%f"
* {@link DType#TIMESTAMP_NANOSECONDS} - "%Y-%m-%d %H:%M:%S.%f"
*
* @return A new vector allocated on the GPU.
*/
public final ColumnVector asStrings() {
switch(type.typeId) {
case TIMESTAMP_SECONDS:
return asStrings("%Y-%m-%d %H:%M:%S");
case TIMESTAMP_DAYS:
return asStrings("%Y-%m-%d");
case TIMESTAMP_MICROSECONDS:
case TIMESTAMP_MILLISECONDS:
case TIMESTAMP_NANOSECONDS:
return asStrings("%Y-%m-%d %H:%M:%S.%f");
default:
return castTo(DType.STRING);
}
}
/**
* Method to parse and convert a timestamp column vector to string column vector. A unix
* timestamp is a long value representing how many units since 1970-01-01 00:00:00:000 in either
* positive or negative direction.
* No checking is done for invalid formats or invalid timestamp units.
* Negative timestamp values are not currently supported and will yield undesired results. See
* github issue https://github.com/rapidsai/cudf/issues/3116 for details
*
* @param format - strftime format specifier string of the timestamp. Its used to parse and convert
* the timestamp with. Supports %m,%j,%d,%H,%M,%S,%y,%Y,%f format specifiers.
* %d Day of the month: 01-31
* %m Month of the year: 01-12
* %y Year without century: 00-99c
* %Y Year with century: 0001-9999
* %H 24-hour of the day: 00-23
* %M Minute of the hour: 00-59
* %S Second of the minute: 00-59
* %f 6-digit microsecond: 000000-999999
* See https://github.com/rapidsai/custrings/blob/branch-0.10/docs/source/datetime.md
*
* Reported bugs
* https://github.com/rapidsai/cudf/issues/4160 after the bug is fixed this method should
* also support
* %I 12-hour of the day: 01-12
* %p Only 'AM', 'PM'
* %j day of the year
*
* @return A new vector allocated on the GPU
*/
public final ColumnVector asStrings(String format) {
assert type.isTimestampType() : "unsupported conversion from non-timestamp DType";
assert format != null || format.isEmpty(): "Format string may not be NULL or empty";
return new ColumnVector(timestampToStringTimestamp(this.getNativeView(), format));
}
/**
* Verifies that a string column can be parsed to timestamps using the provided format
* pattern.
*
* The format pattern can include the following specifiers: "%Y,%y,%m,%d,%H,%I,%p,%M,%S,%f,%z"
*
* | Specifier | Description |
* | :-------: | ----------- |
* | \%d | Day of the month: 01-31 |
* | \%m | Month of the year: 01-12 |
* | \%y | Year without century: 00-99 |
* | \%Y | Year with century: 0001-9999 |
* | \%H | 24-hour of the day: 00-23 |
* | \%I | 12-hour of the day: 01-12 |
* | \%M | Minute of the hour: 00-59|
* | \%S | Second of the minute: 00-59 |
* | \%f | 6-digit microsecond: 000000-999999 |
* | \%z | UTC offset with format ±HHMM Example +0500 |
* | \%j | Day of the year: 001-366 |
* | \%p | Only 'AM', 'PM' or 'am', 'pm' are recognized |
*
* Other specifiers are not currently supported.
* The "%f" supports a precision value to read the numeric digits. Specify the
* precision with a single integer value (1-9) as follows:
* use "%3f" for milliseconds, "%6f" for microseconds and "%9f" for nanoseconds.
*
* Any null string entry will result in a corresponding null row in the output column.
*
* This will return a column of type boolean where a `true` row indicates the corresponding
* input string can be parsed correctly with the given format.
*
* @param format String specifying the timestamp format in strings.
* @return New boolean ColumnVector.
*/
public final ColumnVector isTimestamp(String format) {
return new ColumnVector(isTimestamp(getNativeView(), format));
}
/////////////////////////////////////////////////////////////////////////////
// LISTS
/////////////////////////////////////////////////////////////////////////////
/**
* For each list in this column pull out the entry at the given index. If the entry would
* go off the end of the list a NULL is returned instead.
* @param index 0 based offset into the list. Negative values go backwards from the end of the
* list.
* @return a new column of the values at those indexes.
*/
public final ColumnVector extractListElement(int index) {
assert type.equals(DType.LIST) : "A column of type LIST is required for .extractListElement()";
return new ColumnVector(extractListElement(getNativeView(), index));
}
/**
* For each list in this column pull out the entry at the corresponding index specified in
* the index column. If the entry goes off the end of the list a NULL is returned instead.
*
* The index column should have the same row count with the list column.
*
* @param indices a column of 0 based offsets into the list. Negative values go backwards from
* the end of the list.
* @return a new column of the values at those indexes.
*/
public final ColumnVector extractListElement(ColumnView indices) {
assert type.equals(DType.LIST) : "A column of type LIST is required for .extractListElement()";
assert indices != null && DType.INT32.equals(indices.type)
: "indices should be non-null and integer type";
assert indices.getRowCount() == rows
: "indices must have the same row count with list column";
return new ColumnVector(extractListElementV(getNativeView(), indices.getNativeView()));
}
/**
* Create a new LIST column by copying elements from the current LIST column ignoring duplicate,
* producing a LIST column in which each list contain only unique elements.
*
* Relative ordering elements will be kept the same, by default can keep any of the duplicates
* Example: [0,3,4,0] may produce either [0,3,4] or [3,4,0], both of which are valid here
*
* @return A new LIST column having unique list elements.
*/
public final ColumnVector dropListDuplicates() {
return new ColumnVector(dropListDuplicates(getNativeView(), DuplicateKeepOption.KEEP_ANY.nativeId));
}
/**
* Create a new LIST column by copying elements from the current LIST column ignoring duplicate,
* producing a LIST column in which each list contain only unique elements.
*
* Order of the output elements within each list will be preserved as in the input
*
* @param keep_option Flag to specify which element to keep (first, last, any)
* @return A new LIST column having unique list elements.
*/
public final ColumnVector dropListDuplicates(DuplicateKeepOption keepOption) {
return new ColumnVector(dropListDuplicates(getNativeView(), keepOption.nativeId));
}
/**
* Given a LIST column in which each element is a struct containing a <key, value> pair. An output
* LIST column is generated by copying elements of the current column in a way such that if a list
* contains multiple elements having the same key then only the last element will be copied.
*
* @return A new LIST column having list elements with unique keys.
*/
public final ColumnVector dropListDuplicatesWithKeysValues() {
return new ColumnVector(dropListDuplicatesWithKeysValues(getNativeView()));
}
/**
* Flatten each list of lists into a single list.
*
* The column must have rows that are lists of lists.
* Any row containing null list elements will result in a null output row.
*
* @return A new column vector containing the flattened result
*/
public ColumnVector flattenLists() {
return flattenLists(false);
}
/**
* Flatten each list of lists into a single list.
*
* The column must have rows that are lists of lists.
*
* @param ignoreNull Whether to ignore null list elements in the input column from the operation,
* or any row containing null list elements will result in a null output row
* @return A new column vector containing the flattened result
*/
public ColumnVector flattenLists(boolean ignoreNull) {
return new ColumnVector(flattenLists(getNativeView(), ignoreNull));
}
/////////////////////////////////////////////////////////////////////////////
// STRINGS
/////////////////////////////////////////////////////////////////////////////
/**
* Copy the current column to a new column, each string or list of the output column will have
* reverse order of characters or elements.
*
* @return A new column with lists or strings having reverse order.
*/
public final ColumnVector reverseStringsOrLists() {
assert type.equals(DType.STRING) || type.equals(DType.LIST) :
"A column of type string or list is required, actual: " + type;
return new ColumnVector(reverseStringsOrLists(getNativeView()));
}
/**
* Convert a string to upper case.
*/
public final ColumnVector upper() {
assert type.equals(DType.STRING) : "A column of type string is required for .upper() operation";
return new ColumnVector(upperStrings(getNativeView()));
}
/**
* Convert a string to lower case.
*/
public final ColumnVector lower() {
assert type.equals(DType.STRING) : "A column of type string is required for .lower() operation";
return new ColumnVector(lowerStrings(getNativeView()));
}
/**
* Locates the starting index of the first instance of the given string in each row of a column.
* 0 indexing, returns -1 if the substring is not found. Overloading stringLocate to support
* default values for start (0) and end index.
* @param substring scalar containing the string to locate within each row.
*/
public final ColumnVector stringLocate(Scalar substring) {
return stringLocate(substring, 0);
}
/**
* Locates the starting index of the first instance of the given string in each row of a column.
* 0 indexing, returns -1 if the substring is not found. Overloading stringLocate to support
* default value for end index (-1, the end of each string).
* @param substring scalar containing the string to locate within each row.
* @param start character index to start the search from (inclusive).
*/
public final ColumnVector stringLocate(Scalar substring, int start) {
return stringLocate(substring, start, -1);
}
/**
* Locates the starting index of the first instance of the given string in each row of a column.
* 0 indexing, returns -1 if the substring is not found. Can be be configured to start or end
* the search mid string.
* @param substring scalar containing the string scalar to locate within each row.
* @param start character index to start the search from (inclusive).
* @param end character index to end the search on (exclusive).
*/
public final ColumnVector stringLocate(Scalar substring, int start, int end) {
assert type.equals(DType.STRING) : "column type must be a String";
assert substring != null : "target string may not be null";
assert substring.getType().equals(DType.STRING) : "substring scalar must be a string scalar";
assert start >= 0 : "start index must be a positive value";
assert end >= start || end == -1 : "end index must be -1 or >= the start index";
return new ColumnVector(substringLocate(getNativeView(), substring.getScalarHandle(),
start, end));
}
/**
* Returns a list of columns by splitting each string using the specified pattern. The number of
* rows in the output columns will be the same as the input column. Null entries are added for a
* row where split results have been exhausted. Null input entries result in all nulls in the
* corresponding rows of the output columns.
*
* @param pattern UTF-8 encoded string identifying the split pattern for each input string.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
* @param splitByRegex a boolean flag indicating whether the input strings will be split by a
* regular expression pattern or just by a string literal delimiter.
* @return list of strings columns as a table.
*/
@Deprecated
public final Table stringSplit(String pattern, int limit, boolean splitByRegex) {
if (splitByRegex) {
return stringSplit(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), limit);
} else {
return stringSplit(pattern, limit);
}
}
/**
* Returns a list of columns by splitting each string using the specified regex program pattern.
* The number of rows in the output columns will be the same as the input column. Null entries
* are added for the rows where split results have been exhausted. Null input entries result in
* all nulls in the corresponding rows of the output columns.
*
* @param regexProg the regex program with UTF-8 encoded string identifying the split pattern
* for each input string.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
* @return list of strings columns as a table.
*/
public final Table stringSplit(RegexProgram regexProg, int limit) {
assert type.equals(DType.STRING) : "column type must be a String";
assert regexProg != null : "regex program is null";
assert limit != 0 && limit != 1 : "split limit == 0 and limit == 1 are not supported";
return new Table(stringSplitRe(this.getNativeView(), regexProg.pattern(), regexProg.combinedFlags(),
regexProg.capture().nativeId, limit));
}
/**
* Returns a list of columns by splitting each string using the specified pattern. The number of
* rows in the output columns will be the same as the input column. Null entries are added for a
* row where split results have been exhausted. Null input entries result in all nulls in the
* corresponding rows of the output columns.
*
* @param pattern UTF-8 encoded string identifying the split pattern for each input string.
* @param splitByRegex a boolean flag indicating whether the input strings will be split by a
* regular expression pattern or just by a string literal delimiter.
* @return list of strings columns as a table.
*/
@Deprecated
public final Table stringSplit(String pattern, boolean splitByRegex) {
return stringSplit(pattern, -1, splitByRegex);
}
/**
* Returns a list of columns by splitting each string using the specified string literal
* delimiter. The number of rows in the output columns will be the same as the input column.
* Null entries are added for a row where split results have been exhausted. Null input entries
* result in all nulls in the corresponding rows of the output columns.
*
* @param delimiter UTF-8 encoded string identifying the split delimiter for each input string.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
* @return list of strings columns as a table.
*/
public final Table stringSplit(String delimiter, int limit) {
assert type.equals(DType.STRING) : "column type must be a String";
assert delimiter != null : "delimiter is null";
assert limit != 0 && limit != 1 : "split limit == 0 and limit == 1 are not supported";
return new Table(stringSplit(this.getNativeView(), delimiter, limit));
}
/**
* Returns a list of columns by splitting each string using the specified string literal
* delimiter. The number of rows in the output columns will be the same as the input column.
* Null entries are added for a row where split results have been exhausted. Null input entries
* result in all nulls in the corresponding rows of the output columns.
*
* @param delimiter UTF-8 encoded string identifying the split delimiter for each input string.
* @return list of strings columns as a table.
*/
public final Table stringSplit(String delimiter) {
return stringSplit(delimiter, -1);
}
/**
* Returns a list of columns by splitting each string using the specified regex program pattern.
* The number of rows in the output columns will be the same as the input column. Null entries
* are added for the rows where split results have been exhausted. Null input entries result in
* all nulls in the corresponding rows of the output columns.
*
* @param regexProg the regex program with UTF-8 encoded string identifying the split pattern
* for each input string.
* @return list of strings columns as a table.
*/
public final Table stringSplit(RegexProgram regexProg) {
return stringSplit(regexProg, -1);
}
/**
* Returns a column that are lists of strings in which each list is made by splitting the
* corresponding input string using the specified pattern.
*
* @param pattern UTF-8 encoded string identifying the split pattern for each input string.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
* @param splitByRegex a boolean flag indicating whether the input strings will be split by a
* regular expression pattern or just by a string literal delimiter.
* @return a LIST column of string elements.
*/
@Deprecated
public final ColumnVector stringSplitRecord(String pattern, int limit, boolean splitByRegex) {
if (splitByRegex) {
return stringSplitRecord(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), limit);
} else {
return stringSplitRecord(pattern, limit);
}
}
/**
* Returns a column that are lists of strings in which each list is made by splitting the
* corresponding input string using the specified regex program pattern.
*
* @param regexProg the regex program with UTF-8 encoded string identifying the split pattern
* for each input string.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
* @return a LIST column of string elements.
*/
public final ColumnVector stringSplitRecord(RegexProgram regexProg, int limit) {
assert type.equals(DType.STRING) : "column type must be String";
assert regexProg != null : "regex program is null";
assert limit != 0 && limit != 1 : "split limit == 0 and limit == 1 are not supported";
return new ColumnVector(
stringSplitRecordRe(this.getNativeView(), regexProg.pattern(), regexProg.combinedFlags(),
regexProg.capture().nativeId, limit));
}
/**
* Returns a column that are lists of strings in which each list is made by splitting the
* corresponding input string using the specified pattern.
*
* @param pattern UTF-8 encoded string identifying the split pattern for each input string.
* @param splitByRegex a boolean flag indicating whether the input strings will be split by a
* regular expression pattern or just by a string literal delimiter.
* @return a LIST column of string elements.
*/
@Deprecated
public final ColumnVector stringSplitRecord(String pattern, boolean splitByRegex) {
return stringSplitRecord(pattern, -1, splitByRegex);
}
/**
* Returns a column that are lists of strings in which each list is made by splitting the
* corresponding input string using the specified string literal delimiter.
*
* @param delimiter UTF-8 encoded string identifying the split delimiter for each input string.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
* @return a LIST column of string elements.
*/
public final ColumnVector stringSplitRecord(String delimiter, int limit) {
assert type.equals(DType.STRING) : "column type must be String";
assert delimiter != null : "delimiter is null";
assert limit != 0 && limit != 1 : "split limit == 0 and limit == 1 are not supported";
return new ColumnVector(stringSplitRecord(this.getNativeView(), delimiter, limit));
}
/**
* Returns a column that are lists of strings in which each list is made by splitting the
* corresponding input string using the specified string literal delimiter.
*
* @param delimiter UTF-8 encoded string identifying the split delimiter for each input string.
* @return a LIST column of string elements.
*/
public final ColumnVector stringSplitRecord(String delimiter) {
return stringSplitRecord(delimiter, -1);
}
/**
* Returns a column that are lists of strings in which each list is made by splitting the
* corresponding input string using the specified regex program pattern.
*
* @param regexProg the regex program with UTF-8 encoded string identifying the split pattern
* for each input string.
* @return a LIST column of string elements.
*/
public final ColumnVector stringSplitRecord(RegexProgram regexProg) {
return stringSplitRecord(regexProg, -1);
}
/**
* Returns a new strings column that contains substrings of the strings in the provided column.
* The character positions to retrieve in each string are `[start, <the string end>)`..
*
* @param start first character index to begin the substring(inclusive).
*/
public final ColumnVector substring(int start) {
assert type.equals(DType.STRING) : "column type must be a String";
return new ColumnVector(substringS(getNativeView(), start));
}
/**
* Returns a new strings column that contains substrings of the strings in the provided column.
* 0-based indexing, If the stop position is past end of a string's length, then end of string is
* used as stop position for that string.
* @param start first character index to begin the substring(inclusive).
* @param end last character index to stop the substring(exclusive)
* @return A new java column vector containing the substrings.
*/
public final ColumnVector substring(int start, int end) {
assert type.equals(DType.STRING) : "column type must be a String";
return new ColumnVector(substring(getNativeView(), start, end));
}
/**
* Returns a new strings column that contains substrings of the strings in the provided column
* which uses unique ranges for each string
* @param start Vector containing start indices of each string
* @param end Vector containing end indices of each string. -1 indicated to read until end of string.
* @return A new java column vector containing the substrings/
*/
public final ColumnVector substring(ColumnView start, ColumnView end) {
assert type.equals(DType.STRING) : "column type must be a String";
assert (rows == start.getRowCount() && rows == end.getRowCount()) : "Number of rows must be equal";
assert (start.getType().equals(DType.INT32) && end.getType().equals(DType.INT32)) : "start and end " +
"vectors must be of integer type";
return new ColumnVector(substringColumn(getNativeView(), start.getNativeView(), end.getNativeView()));
}
/**
* Given a lists column of strings (each row is a list of strings), concatenates the strings
* within each row and returns a single strings column result. Each new string is created by
* concatenating the strings from the same row (same list element) delimited by the separator
* provided. This version of the function relaces nulls with empty string and returns null
* for empty list.
* @param sepCol strings column that provides separators for concatenation.
* @return A new java column vector containing the concatenated strings with separator between.
*/
public final ColumnVector stringConcatenateListElements(ColumnView sepCol) {
try (Scalar nullString = Scalar.fromString(null);
Scalar emptyString = Scalar.fromString("")) {
return stringConcatenateListElements(sepCol, nullString, emptyString,
false, false);
}
}
/**
* Given a lists column of strings (each row is a list of strings), concatenates the strings
* within each row and returns a single strings column result.
* Each new string is created by concatenating the strings from the same row (same list element)
* delimited by the row separator provided in the sepCol strings column.
* @param sepCol strings column that provides separators for concatenation.
* @param separatorNarep string scalar indicating null behavior when a separator is null.
* If set to null and the separator is null the resulting string will
* be null. If not null, this string will be used in place of a null
* separator.
* @param stringNarep string that should be used to replace null strings in any non-null list
* row. If set to null and the string is null the resulting string will
* be null. If not null, this string will be used in place of a null value.
* @param separateNulls if true, then the separator is included for null rows if
* `stringNarep` is valid.
* @param emptyStringOutputIfEmptyList if set to true, any input row that is an empty list
* will result in an empty string. Otherwise, it will result in a null.
* @return A new java column vector containing the concatenated strings with separator between.
*/
public final ColumnVector stringConcatenateListElements(ColumnView sepCol,
Scalar separatorNarep, Scalar stringNarep, boolean separateNulls,
boolean emptyStringOutputIfEmptyList) {
assert type.equals(DType.LIST) : "column type must be a list";
assert separatorNarep != null : "separator narep scalar provided may not be null";
assert stringNarep != null : "string narep scalar provided may not be null";
assert separatorNarep.getType().equals(DType.STRING) : "separator naprep scalar must be a string scalar";
assert stringNarep.getType().equals(DType.STRING) : "string narep scalar must be a string scalar";
return new ColumnVector(stringConcatenationListElementsSepCol(getNativeView(),
sepCol.getNativeView(), separatorNarep.getScalarHandle(), stringNarep.getScalarHandle(),
separateNulls, emptyStringOutputIfEmptyList));
}
/**
* Given a lists column of strings (each row is a list of strings), concatenates the strings
* within each row and returns a single strings column result. Each new string is created by
* concatenating the strings from the same row (same list element) delimited by the
* separator provided.
* @param separator string scalar inserted between each string being merged.
* @param narep string scalar indicating null behavior. If set to null and any string in the row
* is null the resulting string will be null. If not null, null values in any
* column will be replaced by the specified string. The underlying value in the
* string scalar may be null, but the object passed in may not.
* @param separateNulls if true, then the separator is included for null rows if
* `narep` is valid.
* @param emptyStringOutputIfEmptyList if set to true, any input row that is an empty list
* will result in an empty string. Otherwise, it will result in a null.
* @return A new java column vector containing the concatenated strings with separator between.
*/
public final ColumnVector stringConcatenateListElements(Scalar separator,
Scalar narep, boolean separateNulls, boolean emptyStringOutputIfEmptyList) {
assert type.equals(DType.LIST) : "column type must be a list";
assert separator != null : "separator scalar provided may not be null";
assert narep != null : "column narep scalar provided may not be null";
assert narep.getType().equals(DType.STRING) : "narep scalar must be a string scalar";
return new ColumnVector(stringConcatenationListElements(getNativeView(),
separator.getScalarHandle(), narep.getScalarHandle(), separateNulls,
emptyStringOutputIfEmptyList));
}
/**
* Given a strings column, each string in it is repeated a number of times specified by the
* <code>repeatTimes</code> parameter.
*
* In special cases:
* - If <code>repeatTimes</code> is not a positive number, a non-null input string will always
* result in an empty output string.
* - A null input string will always result in a null output string regardless of the value of
* the <code>repeatTimes</code> parameter.
*
* @param repeatTimes The number of times each input string is repeated.
* @return A new java column vector containing repeated strings.
*/
public final ColumnVector repeatStrings(int repeatTimes) {
assert type.equals(DType.STRING) : "column type must be String";
return new ColumnVector(repeatStrings(getNativeView(), repeatTimes));
}
/**
* Given a strings column, an output strings column is generated by repeating each of the input
* string by a number of times given by the corresponding row in a <code>repeatTimes</code>
* numeric column.
*
* In special cases:
* - Any null row (from either the input strings column or the <code>repeatTimes</code> column)
* will always result in a null output string.
* - If any value in the <code>repeatTimes</code> column is not a positive number and its
* corresponding input string is not null, the output string will be an empty string.
*
* @param repeatTimes The column containing numbers of times each input string is repeated.
* @return A new java column vector containing repeated strings.
*/
public final ColumnVector repeatStrings(ColumnView repeatTimes) {
assert type.equals(DType.STRING) : "column type must be String";
return new ColumnVector(repeatStringsWithColumnRepeatTimes(getNativeView(),
repeatTimes.getNativeView()));
}
/**
* Apply a JSONPath string to all rows in an input strings column.
*
* Applies a JSONPath string to an incoming strings column where each row in the column
* is a valid json string. The output is returned by row as a strings column.
*
* For reference, https://tools.ietf.org/id/draft-goessner-dispatch-jsonpath-00.html
* Note: Only implements the operators: $ . [] *
*
* @param path The JSONPath string to be applied to each row
* @param path The GetJsonObjectOptions to control get_json_object behaviour
* @return new strings ColumnVector containing the retrieved json object strings
*/
public final ColumnVector getJSONObject(Scalar path, GetJsonObjectOptions options) {
assert(type.equals(DType.STRING)) : "column type must be a String";
return new ColumnVector(getJSONObject(getNativeView(), path.getScalarHandle(), options.isAllowSingleQuotes(), options.isStripQuotesFromSingleStrings(), options.isMissingFieldsAsNulls()));
}
/**
* Apply a JSONPath string to all rows in an input strings column.
*
* Applies a JSONPath string to an incoming strings column where each row in the column
* is a valid json string. The output is returned by row as a strings column.
*
* For reference, https://tools.ietf.org/id/draft-goessner-dispatch-jsonpath-00.html
* Note: Only implements the operators: $ . [] *
*
* @param path The JSONPath string to be applied to each row
* @return new strings ColumnVector containing the retrieved json object strings
*/
public final ColumnVector getJSONObject(Scalar path) {
assert(type.equals(DType.STRING)) : "column type must be a String";
return getJSONObject(path, GetJsonObjectOptions.DEFAULT);
}
/**
* Returns a new strings column where target string within each string is replaced with the specified
* replacement string.
* The replacement proceeds from the beginning of the string to the end, for example,
* replacing "aa" with "b" in the string "aaa" will result in "ba" rather than "ab".
* Specifying an empty string for replace will essentially remove the target string if found in each string.
* Null string entries will return null output string entries.
* target Scalar should be string and should not be empty or null.
*
* @param target String to search for within each string.
* @param replace Replacement string if target is found.
* @return A new java column vector containing replaced strings
*/
public final ColumnVector stringReplace(Scalar target, Scalar replace) {
assert type.equals(DType.STRING) : "column type must be a String";
assert target != null : "target string may not be null";
assert target.getType().equals(DType.STRING) : "target string must be a string scalar";
assert target.getJavaString().isEmpty() == false : "target scalar may not be empty";
return new ColumnVector(stringReplace(getNativeView(), target.getScalarHandle(),
replace.getScalarHandle()));
}
/**
* Returns a new strings column where target strings with each string are replaced with
* corresponding replacement strings. For each string in the column, the list of targets
* is searched within that string. If a target string is found, it is replaced by the
* corresponding entry in the repls column. All occurrences found in each string are replaced.
* The repls argument can optionally contain a single string. In this case, all matching
* target substrings will be replaced by that single string.
*
* Example:
* cv = ["hello", "goodbye"]
* targets = ["e","o"]
* repls = ["EE","OO"]
* r1 = cv.stringReplace(targets, repls)
* r1 is now ["hEEllO", "gOOOOdbyEE"]
*
* targets = ["e", "o"]
* repls = ["_"]
* r2 = cv.stringReplace(targets, repls)
* r2 is now ["h_ll_", "g__dby_"]
*
* @param targets Strings to search for in each string.
* @param repls Corresponding replacement strings for target strings.
* @return A new java column vector containing the replaced strings.
*/
public final ColumnVector stringReplace(ColumnView targets, ColumnView repls) {
assert type.equals(DType.STRING) : "column type must be a String";
assert targets != null : "target list may not be null";
assert targets.getType().equals(DType.STRING) : "target list must be a string column";
assert repls != null : "replacement list may not be null";
assert repls.getType().equals(DType.STRING) : "replacement list must be a string column";
return new ColumnVector(stringReplaceMulti(getNativeView(), targets.getNativeView(),
repls.getNativeView()));
}
/**
* For each string, replaces any character sequence matching the given pattern using the
* replacement string scalar.
*
* @param pattern The regular expression pattern to search within each string.
* @param repl The string scalar to replace for each pattern match.
* @return A new column vector containing the string results.
*/
@Deprecated
public final ColumnVector replaceRegex(String pattern, Scalar repl) {
return replaceRegex(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), repl);
}
/**
* For each string, replaces any character sequence matching the given regex program pattern
* using the replacement string scalar.
*
* @param regexProg The regex program with pattern to search within each string.
* @param repl The string scalar to replace for each pattern match.
* @return A new column vector containing the string results.
*/
public final ColumnVector replaceRegex(RegexProgram regexProg, Scalar repl) {
return replaceRegex(regexProg, repl, -1);
}
/**
* For each string, replaces any character sequence matching the given pattern using the
* replacement string scalar.
*
* @param pattern The regular expression pattern to search within each string.
* @param repl The string scalar to replace for each pattern match.
* @param maxRepl The maximum number of times a replacement should occur within each string.
* @return A new column vector containing the string results.
*/
@Deprecated
public final ColumnVector replaceRegex(String pattern, Scalar repl, int maxRepl) {
return replaceRegex(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), repl, maxRepl);
}
/**
* For each string, replaces any character sequence matching the given regex program pattern
* using the replacement string scalar.
*
* @param regexProg The regex program with pattern to search within each string.
* @param repl The string scalar to replace for each pattern match.
* @param maxRepl The maximum number of times a replacement should occur within each string.
* @return A new column vector containing the string results.
*/
public final ColumnVector replaceRegex(RegexProgram regexProg, Scalar repl, int maxRepl) {
if (!repl.getType().equals(DType.STRING)) {
throw new IllegalArgumentException("Replacement must be a string scalar");
}
assert regexProg != null : "regex program may not be null";
return new ColumnVector(replaceRegex(getNativeView(), regexProg.pattern(), regexProg.combinedFlags(),
regexProg.capture().nativeId, repl.getScalarHandle(), maxRepl));
}
/**
* For each string, replaces any character sequence matching any of the regular expression
* patterns with the corresponding replacement strings.
*
* @param patterns The regular expression patterns to search within each string.
* @param repls The string scalars to replace for each corresponding pattern match.
* @return A new column vector containing the string results.
*/
public final ColumnVector replaceMultiRegex(String[] patterns, ColumnView repls) {
return new ColumnVector(replaceMultiRegex(getNativeView(), patterns,
repls.getNativeView()));
}
/**
* For each string, replaces any character sequence matching the given pattern
* using the replace template for back-references.
*
* Any null string entries return corresponding null output column entries.
*
* @param pattern The regular expression patterns to search within each string.
* @param replace The replacement template for creating the output string.
* @return A new java column vector containing the string results.
*/
@Deprecated
public final ColumnVector stringReplaceWithBackrefs(String pattern, String replace) {
return stringReplaceWithBackrefs(new RegexProgram(pattern), replace);
}
/**
* For each string, replaces any character sequence matching the given regex program
* pattern using the replace template for back-references.
*
* Any null string entries return corresponding null output column entries.
*
* @param regexProg The regex program with pattern to search within each string.
* @param replace The replacement template for creating the output string.
* @return A new java column vector containing the string results.
*/
public final ColumnVector stringReplaceWithBackrefs(RegexProgram regexProg, String replace) {
assert regexProg != null : "regex program may not be null";
return new ColumnVector(
stringReplaceWithBackrefs(getNativeView(), regexProg.pattern(), regexProg.combinedFlags(),
regexProg.capture().nativeId, replace));
}
/**
* Add '0' as padding to the left of each string.
*
* If the string is already width or more characters, no padding is performed.
* No strings are truncated.
*
* Null string entries result in null entries in the output column.
*
* @param width The minimum number of characters for each string.
* @return New column of strings.
*/
public final ColumnVector zfill(int width) {
return new ColumnVector(zfill(getNativeView(), width));
}
/**
* Pad the Strings column until it reaches the desired length with spaces " " on the right.
*
* If the string is already width or more characters, no padding is performed.
* No strings are truncated.
*
* Null string entries result in null entries in the output column.
*
* @param width the minimum number of characters for each string.
* @return the new strings column.
*/
public final ColumnVector pad(int width) {
return pad(width, PadSide.RIGHT, " ");
}
/**
* Pad the Strings column until it reaches the desired length with spaces " ".
*
* If the string is already width or more characters, no padding is performed.
* No strings are truncated.
*
* Null string entries result in null entries in the output column.
*
* @param width the minimum number of characters for each string.
* @param side where to add new characters.
* @return the new strings column.
*/
public final ColumnVector pad(int width, PadSide side) {
return pad(width, side, " ");
}
/**
* Pad the Strings column until it reaches the desired length.
*
* If the string is already width or more characters, no padding is performed.
* No strings are truncated.
*
* Null string entries result in null entries in the output column.
*
* @param width the minimum number of characters for each string.
* @param side where to add new characters.
* @param fillChar a single character string that holds what should be added.
* @return the new strings column.
*/
public final ColumnVector pad(int width, PadSide side, String fillChar) {
assert fillChar != null;
assert fillChar.length() == 1;
return new ColumnVector(pad(getNativeView(), width, side.getNativeId(), fillChar));
}
/**
* Checks if each string in a column starts with a specified comparison string, resulting in a
* parallel column of the boolean results.
* @param pattern scalar containing the string being searched for at the beginning of the column's strings.
* @return A new java column vector containing the boolean results.
*/
public final ColumnVector startsWith(Scalar pattern) {
assert type.equals(DType.STRING) : "column type must be a String";
assert pattern != null : "pattern scalar may not be null";
assert pattern.getType().equals(DType.STRING) : "pattern scalar must be a string scalar";
return new ColumnVector(stringStartWith(getNativeView(), pattern.getScalarHandle()));
}
/**
* Checks if each string in a column ends with a specified comparison string, resulting in a
* parallel column of the boolean results.
* @param pattern scalar containing the string being searched for at the end of the column's strings.
* @return A new java column vector containing the boolean results.
*/
public final ColumnVector endsWith(Scalar pattern) {
assert type.equals(DType.STRING) : "column type must be a String";
assert pattern != null : "pattern scalar may not be null";
assert pattern.getType().equals(DType.STRING) : "pattern scalar must be a string scalar";
return new ColumnVector(stringEndWith(getNativeView(), pattern.getScalarHandle()));
}
/**
* Removes whitespace from the beginning and end of a string.
* @return A new java column vector containing the stripped strings.
*/
public final ColumnVector strip() {
assert type.equals(DType.STRING) : "column type must be a String";
try (Scalar emptyString = Scalar.fromString("")) {
return new ColumnVector(stringStrip(getNativeView(), StripType.BOTH.nativeId,
emptyString.getScalarHandle()));
}
}
/**
* Removes the specified characters from the beginning and end of each string.
* @param toStrip UTF-8 encoded characters to strip from each string.
* @return A new java column vector containing the stripped strings.
*/
public final ColumnVector strip(Scalar toStrip) {
assert type.equals(DType.STRING) : "column type must be a String";
assert toStrip != null : "toStrip scalar may not be null";
assert toStrip.getType().equals(DType.STRING) : "toStrip must be a string scalar";
return new ColumnVector(stringStrip(getNativeView(), StripType.BOTH.nativeId, toStrip.getScalarHandle()));
}
/**
* Removes whitespace from the beginning of a string.
* @return A new java column vector containing the stripped strings.
*/
public final ColumnVector lstrip() {
assert type.equals(DType.STRING) : "column type must be a String";
try (Scalar emptyString = Scalar.fromString("")) {
return new ColumnVector(stringStrip(getNativeView(), StripType.LEFT.nativeId,
emptyString.getScalarHandle()));
}
}
/**
* Removes the specified characters from the beginning of each string.
* @param toStrip UTF-8 encoded characters to strip from each string.
* @return A new java column vector containing the stripped strings.
*/
public final ColumnVector lstrip(Scalar toStrip) {
assert type.equals(DType.STRING) : "column type must be a String";
assert toStrip != null : "toStrip Scalar may not be null";
assert toStrip.getType().equals(DType.STRING) : "toStrip must be a string scalar";
return new ColumnVector(stringStrip(getNativeView(), StripType.LEFT.nativeId, toStrip.getScalarHandle()));
}
/**
* Removes whitespace from the end of a string.
* @return A new java column vector containing the stripped strings.
*/
public final ColumnVector rstrip() {
assert type.equals(DType.STRING) : "column type must be a String";
try (Scalar emptyString = Scalar.fromString("")) {
return new ColumnVector(stringStrip(getNativeView(), StripType.RIGHT.nativeId,
emptyString.getScalarHandle()));
}
}
/**
* Removes the specified characters from the end of each string.
* @param toStrip UTF-8 encoded characters to strip from each string.
* @return A new java column vector containing the stripped strings.
*/
public final ColumnVector rstrip(Scalar toStrip) {
assert type.equals(DType.STRING) : "column type must be a String";
assert toStrip != null : "toStrip Scalar may not be null";
assert toStrip.getType().equals(DType.STRING) : "toStrip must be a string scalar";
return new ColumnVector(stringStrip(getNativeView(), StripType.RIGHT.nativeId, toStrip.getScalarHandle()));
}
/**
* Checks if each string in a column contains a specified comparison string, resulting in a
* parallel column of the boolean results.
* @param compString scalar containing the string being searched for.
* @return A new java column vector containing the boolean results.
*/
public final ColumnVector stringContains(Scalar compString) {
assert type.equals(DType.STRING) : "column type must be a String";
assert compString != null : "compString scalar may not be null";
assert compString.getType().equals(DType.STRING) : "compString scalar must be a string scalar";
return new ColumnVector(stringContains(getNativeView(), compString.getScalarHandle()));
}
/**
* @brief Searches for the given target strings within each string in the provided column
*
* Each column in the result table corresponds to the result for the target string at the same
* ordinal. i.e. 0th column is the BOOL8 column result for the 0th target string, 1th for 1th,
* etc.
*
* If the target is not found for a string, false is returned for that entry in the output column.
* If the target is an empty string, true is returned for all non-null entries in the output column.
*
* Any null input strings return corresponding null entries in the output columns.
*
* input = ["a", "b", "c"]
* targets = ["a", "c"]
* output is a table with two boolean columns:
* column 0: [true, false, false]
* column 1: [false, false, true]
*
* @param targets UTF-8 encoded strings to search for in each string in `input`
* @return BOOL8 columns
*/
public final ColumnVector[] stringContains(ColumnView targets) {
assert type.equals(DType.STRING) : "column type must be a String";
assert targets.getType().equals(DType.STRING) : "targets type must be a string";
assert targets.getNullCount() == 0 : "targets must not contain nulls";
assert targets.getRowCount() > 0 : "targets must not be empty";
long[] resultPointers = stringContainsMulti(getNativeView(), targets.getNativeView());
return Arrays.stream(resultPointers).mapToObj(ColumnVector::new).toArray(ColumnVector[]::new);
}
/**
* Replaces values less than `lo` in `input` with `lo`,
* and values greater than `hi` with `hi`.
*
* if `lo` is invalid, then lo will not be considered while
* evaluating the input (Essentially considered minimum value of that type).
* if `hi` is invalid, then hi will not be considered while
* evaluating the input (Essentially considered maximum value of that type).
*
* ```
* Example:
* input: {1, 2, 3, NULL, 5, 6, 7}
*
* valid lo and hi
* lo: 3, hi: 5, lo_replace : 0, hi_replace : 16
* output:{0, 0, 3, NULL, 5, 16, 16}
*
* invalid lo
* lo: NULL, hi: 5, lo_replace : 0, hi_replace : 16
* output:{1, 2, 3, NULL, 5, 16, 16}
*
* invalid hi
* lo: 3, hi: NULL, lo_replace : 0, hi_replace : 16
* output:{0, 0, 3, NULL, 5, 6, 7}
* ```
* @param lo - Minimum clamp value. All elements less than `lo` will be replaced by `lo`.
* Ignored if null.
* @param hi - Maximum clamp value. All elements greater than `hi` will be replaced by `hi`.
* Ignored if null.
* @return Returns a new clamped column as per `lo` and `hi` boundaries
*/
public final ColumnVector clamp(Scalar lo, Scalar hi) {
return new ColumnVector(clamper(this.getNativeView(), lo.getScalarHandle(),
lo.getScalarHandle(), hi.getScalarHandle(), hi.getScalarHandle()));
}
/**
* Replaces values less than `lo` in `input` with `lo_replace`,
* and values greater than `hi` with `hi_replace`.
*
* if `lo` is invalid, then lo will not be considered while
* evaluating the input (Essentially considered minimum value of that type).
* if `hi` is invalid, then hi will not be considered while
* evaluating the input (Essentially considered maximum value of that type).
*
* @note: If `lo` is valid then `lo_replace` should be valid
* If `hi` is valid then `hi_replace` should be valid
*
* ```
* Example:
* input: {1, 2, 3, NULL, 5, 6, 7}
*
* valid lo and hi
* lo: 3, hi: 5, lo_replace : 0, hi_replace : 16
* output:{0, 0, 3, NULL, 5, 16, 16}
*
* invalid lo
* lo: NULL, hi: 5, lo_replace : 0, hi_replace : 16
* output:{1, 2, 3, NULL, 5, 16, 16}
*
* invalid hi
* lo: 3, hi: NULL, lo_replace : 0, hi_replace : 16
* output:{0, 0, 3, NULL, 5, 6, 7}
* ```
*
* @param lo - Minimum clamp value. All elements less than `lo` will be replaced by `loReplace`. Ignored if null.
* @param loReplace - All elements less than `lo` will be replaced by `loReplace`.
* @param hi - Maximum clamp value. All elements greater than `hi` will be replaced by `hiReplace`. Ignored if null.
* @param hiReplace - All elements greater than `hi` will be replaced by `hiReplace`.
* @return - a new clamped column as per `lo` and `hi` boundaries
*/
public final ColumnVector clamp(Scalar lo, Scalar loReplace, Scalar hi, Scalar hiReplace) {
return new ColumnVector(clamper(this.getNativeView(), lo.getScalarHandle(),
loReplace.getScalarHandle(), hi.getScalarHandle(), hiReplace.getScalarHandle()));
}
/**
* Returns a boolean ColumnVector identifying rows which
* match the given regex pattern but only at the beginning of the string.
*
* ```
* cv = ["abc", "123", "def456"]
* result = cv.matchesRe("\\d+")
* r is now [false, true, false]
* ```
* Any null string entries return corresponding null output column entries.
* For supported regex patterns refer to:
* @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html
*
* @param pattern Regex pattern to match to each string.
* @return New ColumnVector of boolean results for each string.
*/
@Deprecated
public final ColumnVector matchesRe(String pattern) {
return matchesRe(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE));
}
/**
* Returns a boolean ColumnVector identifying rows which
* match the given regex program pattern but only at the beginning of the string.
*
* ```
* cv = ["abc", "123", "def456"]
* p = new RegexProgram("\\d+", CaptureGroups.NON_CAPTURE)
* r = cv.matchesRe(p)
* r is now [false, true, false]
* ```
* Any null string entries return corresponding null output column entries.
* For supported regex patterns refer to:
* @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html
*
* @param regexProg Regex program to match to each string.
* @return New ColumnVector of boolean results for each string.
*/
public final ColumnVector matchesRe(RegexProgram regexProg) {
assert type.equals(DType.STRING) : "column type must be a String";
assert regexProg != null : "regex program may not be null";
assert !regexProg.pattern().isEmpty() : "pattern string may not be empty";
return new ColumnVector(matchesRe(getNativeView(), regexProg.pattern(),
regexProg.combinedFlags(), regexProg.capture().nativeId));
}
/**
* Returns a boolean ColumnVector identifying rows which
* match the given regex pattern starting at any location.
*
* ```
* cv = ["abc", "123", "def456"]
* r = cv.containsRe("\\d+")
* r is now [false, true, true]
* ```
* Any null string entries return corresponding null output column entries.
* For supported regex patterns refer to:
* @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html
*
* @param pattern Regex pattern to match to each string.
* @return New ColumnVector of boolean results for each string.
*/
@Deprecated
public final ColumnVector containsRe(String pattern) {
return containsRe(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE));
}
/**
* Returns a boolean ColumnVector identifying rows which
* match the given RegexProgram pattern starting at any location.
*
* ```
* cv = ["abc", "123", "def456"]
* p = new RegexProgram("\\d+", CaptureGroups.NON_CAPTURE)
* r = cv.containsRe(p)
* r is now [false, true, true]
* ```
* Any null string entries return corresponding null output column entries.
* For supported regex patterns refer to:
* @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html
*
* @param regexProg Regex program to match to each string.
* @return New ColumnVector of boolean results for each string.
*/
public final ColumnVector containsRe(RegexProgram regexProg) {
assert type.equals(DType.STRING) : "column type must be a String";
assert regexProg != null : "regex program may not be null";
assert !regexProg.pattern().isEmpty() : "pattern string may not be empty";
return new ColumnVector(containsRe(getNativeView(), regexProg.pattern(),
regexProg.combinedFlags(), regexProg.capture().nativeId));
}
/**
* For each captured group specified in the given regular expression
* return a column in the table. Null entries are added if the string
* does not match. Any null inputs also result in null output entries.
*
* For supported regex patterns refer to:
* @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html
* @param pattern the pattern to use
* @return the table of extracted matches
* @throws CudfException if any error happens including if the RE does
* not contain any capture groups.
*/
@Deprecated
public final Table extractRe(String pattern) throws CudfException {
return extractRe(new RegexProgram(pattern));
}
/**
* For each captured group specified in the given regex program
* return a column in the table. Null entries are added if the string
* does not match. Any null inputs also result in null output entries.
*
* For supported regex patterns refer to:
* @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html
* @param regexProg the regex program to use
* @return the table of extracted matches
* @throws CudfException if any error happens including if the regex
* program does not contain any capture groups.
*/
public final Table extractRe(RegexProgram regexProg) throws CudfException {
assert type.equals(DType.STRING) : "column type must be a String";
assert regexProg != null : "regex program may not be null";
return new Table(extractRe(this.getNativeView(), regexProg.pattern(),
regexProg.combinedFlags(), regexProg.capture().nativeId));
}
/**
* Extracts all strings that match the given regular expression and corresponds to the
* regular expression group index. Any null inputs also result in null output entries.
*
* For supported regex patterns refer to:
* @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html
* @param pattern The regex pattern
* @param idx The regex group index
* @return A new column vector of extracted matches
*/
@Deprecated
public final ColumnVector extractAllRecord(String pattern, int idx) {
if (idx == 0) {
return extractAllRecord(new RegexProgram(pattern, CaptureGroups.NON_CAPTURE), idx);
}
return extractAllRecord(new RegexProgram(pattern), idx);
}
/**
* Extracts all strings that match the given regex program pattern and corresponds to the
* regular expression group index. Any null inputs also result in null output entries.
*
* For supported regex patterns refer to:
* @link https://docs.rapids.ai/api/libcudf/nightly/md_regex.html
* @param regexProg The regex program
* @param idx The regex group index
* @return A new column vector of extracted matches
*/
public final ColumnVector extractAllRecord(RegexProgram regexProg, int idx) {
assert type.equals(DType.STRING) : "column type must be a String";
assert idx >= 0 : "group index must be at least 0";
assert regexProg != null : "regex program may not be null";
return new ColumnVector(
extractAllRecord(this.getNativeView(), regexProg.pattern(), regexProg.combinedFlags(),
regexProg.capture().nativeId, idx));
}
/**
* Returns a boolean ColumnVector identifying rows which
* match the given like pattern.
*
* The like pattern expects only 2 wildcard special characters
* - `%` any number of any character (including no characters)
* - `_` any single character
*
* ```
* cv = ["azaa", "ababaabba", "aaxa"]
* r = cv.like("%a_aa%", "\\")
* r is now [true, true, false]
* r = cv.like("a__a", "\\")
* r is now [true, false, true]
* ```
*
* The escape character is specified to include either `%` or `_` in the search,
* which is expected to be either 0 or 1 character.
* If more than one character is specified, only the first character is used.
*
* ```
* cv = ["abc_def", "abc1def", "abc_"]
* r = cv.like("abc/_d%", "/")
* r is now [true, false, false]
* ```
* Any null string entries return corresponding null output column entries.
*
* @param pattern Like pattern to match to each string.
* @param escapeChar Character specifies the escape prefix; default is "\\".
* @return New ColumnVector of boolean results for each string.
*/
public final ColumnVector like(Scalar pattern, Scalar escapeChar) {
assert type.equals(DType.STRING) : "column type must be a String";
assert pattern != null : "pattern scalar must not be null";
assert pattern.getType().equals(DType.STRING) : "pattern scalar must be a string scalar";
assert escapeChar != null : "escapeChar scalar must not be null";
assert escapeChar.getType().equals(DType.STRING) : "escapeChar scalar must be a string scalar";
return new ColumnVector(like(getNativeView(), pattern.getScalarHandle(), escapeChar.getScalarHandle()));
}
/**
* Converts all character sequences starting with '%' into character code-points
* interpreting the 2 following characters as hex values to create the code-point.
* For example, the sequence '%20' is converted into byte (0x20) which is a single
* space character. Another example converts '%C3%A9' into 2 sequential bytes
* (0xc3 and 0xa9 respectively) which is the é character. Overall, 3 characters
* are converted into one char byte whenever a '%%' (single percent) character
* is encountered in the string.
* <p>
* Any null entries will result in corresponding null entries in the output column.
*
* @return a new column instance containing the decoded strings
*/
public final ColumnVector urlDecode() throws CudfException {
assert type.equals(DType.STRING) : "column type must be a String";
return new ColumnVector(urlDecode(getNativeView()));
}
/**
* Converts mostly non-ascii characters and control characters into UTF-8 hex code-points
* prefixed with '%'. For example, the space character must be converted to characters '%20' where
* the '20' indicates the hex value for space in UTF-8. Likewise, multi-byte characters are
* converted to multiple hex characters. For example, the é character is converted to characters
* '%C3%A9' where 'C3A9' is the UTF-8 bytes 0xC3A9 for this character.
* <p>
* Any null entries will result in corresponding null entries in the output column.
*
* @return a new column instance containing the encoded strings
*/
public final ColumnVector urlEncode() throws CudfException {
assert type.equals(DType.STRING) : "column type must be a String";
return new ColumnVector(urlEncode(getNativeView()));
}
private static void assertIsSupportedMapKeyType(DType keyType) {
boolean isSupportedKeyType =
!keyType.equals(DType.EMPTY) && !keyType.equals(DType.LIST) && !keyType.equals(DType.STRUCT);
assert isSupportedKeyType : "Map lookup by STRUCT and LIST keys is not supported.";
}
/**
* Given a column of type List<Struct<X, Y>> and a key column of type X, return a column of type Y,
* where each row in the output column is the Y value corresponding to the X key.
* If the key is not found, the corresponding output value is null.
* @param keys the column view with keys to lookup in the column
* @return a column of values or nulls based on the lookup result
*/
public final ColumnVector getMapValue(ColumnView keys) {
assert type.equals(DType.LIST) : "column type must be a LIST";
assert keys != null : "Lookup key may not be null";
return new ColumnVector(mapLookupForKeys(getNativeView(), keys.getNativeView()));
}
/**
* Given a column of type List<Struct<X, Y>> and a key of type X, return a column of type Y,
* where each row in the output column is the Y value corresponding to the X key.
* If the key is not found, the corresponding output value is null.
* @param key the scalar key to lookup in the column
* @return a column of values or nulls based on the lookup result
*/
public final ColumnVector getMapValue(Scalar key) {
assert type.equals(DType.LIST) : "column type must be a LIST";
assert key != null : "Lookup key may not be null";
assertIsSupportedMapKeyType(key.getType());
return new ColumnVector(mapLookup(getNativeView(), key.getScalarHandle()));
}
/** For a column of type List<Struct<String, String>> and a passed in String key, return a boolean
* column for all keys in the structs, It is true if the key exists in the corresponding map for
* that row, false otherwise. It will never return null for a row.
* @param key the String scalar to lookup in the column
* @return a boolean column based on the lookup result
*/
public final ColumnVector getMapKeyExistence(Scalar key) {
assert type.equals(DType.LIST) : "column type must be a LIST";
assert key != null : "Lookup key may not be null";
assertIsSupportedMapKeyType(key.getType());
return new ColumnVector(mapContains(getNativeView(), key.getScalarHandle()));
}
/** For a column of type List<Struct<_, _>> and a passed in key column, return a boolean
* column for all keys in the map. Each output row is true if the key exists in the corresponding map for
* that row, false otherwise. It will never return null for a row.
* @param keys the keys to lookup in the column
* @return a boolean column based on the lookup result
*/
public final ColumnVector getMapKeyExistence(ColumnView keys) {
assert type.equals(DType.LIST) : "column type must be a LIST";
assert keys != null : "Lookup key may not be null";
assertIsSupportedMapKeyType(keys.getType());
return new ColumnVector(mapContainsKeys(getNativeView(), keys.getNativeView()));
}
/**
* Create a new struct column view of existing column views. Note that this will NOT copy
* the contents of the input columns to make a new vector, but makes a view that must not
* outlive the child views that it references. The resulting column cannot be null.
* @param rows the number of rows in the struct column. This is needed if no columns
* are provided.
* @param columns the columns to add to the struct in the order they should be added
* @return the new column view. It is the responsibility of the caller to close this.
*/
public static ColumnView makeStructView(long rows, ColumnView... columns) {
long[] handles = new long[columns.length];
for (int i = 0; i < columns.length; i++) {
ColumnView cv = columns[i];
if (rows != cv.getRowCount()) {
throw new IllegalArgumentException("All columns must have the same number of rows");
}
handles[i] = cv.getNativeView();
}
return new ColumnView(makeStructView(handles, rows));
}
/**
* Create a new struct column view of existing column views. Note that this will NOT copy
* the contents of the input columns to make a new vector, but makes a view that must not
* outlive the child views that it references. The resulting column cannot be null.
* @param columns the columns to add to the struct in the order they should be added
* @return the new column view. It is the responsibility of the caller to close this.
*/
public static ColumnView makeStructView(ColumnView... columns) {
if (columns.length <= 0) {
throw new IllegalArgumentException("At least one column is needed to get the row count");
}
return makeStructView(columns[0].rows, columns);
}
/**
* Create a new column view from a raw device buffer. Note that this will NOT copy
* the contents of the buffer but only creates a view. The view MUST NOT outlive
* the underlying device buffer. The column view will be created without a validity
* vector, so it is not possible to create a view containing null elements. Additionally
* only fixed-width primitive types are supported.
*
* @param buffer device memory that will back the column view
* @param startOffset byte offset into the device buffer where the column data starts
* @param type type of data in the column view
* @param rows number of data elements in the column view
* @return new column view instance that must not outlive the backing device buffer
*/
public static ColumnView fromDeviceBuffer(BaseDeviceMemoryBuffer buffer,
long startOffset,
DType type,
int rows) {
if (buffer == null) {
throw new NullPointerException("buffer is null");
}
int typeSize = type.getSizeInBytes();
if (typeSize <= 0) {
throw new IllegalArgumentException("Unsupported type: " + type);
}
if (startOffset < 0) {
throw new IllegalArgumentException("Invalid start offset: " + startOffset);
}
if (rows < 0) {
throw new IllegalArgumentException("Invalid row count: " + rows);
}
long dataSize = typeSize * rows;
if (startOffset + dataSize > buffer.length) {
throw new IllegalArgumentException("View extends beyond buffer range");
}
long dataAddress = buffer.getAddress() + startOffset;
if (dataAddress % typeSize != 0) {
throw new IllegalArgumentException("Data address " + Long.toHexString(dataAddress) +
" is misaligned relative to type size of " + typeSize + " bytes");
}
return new ColumnView(makeCudfColumnView(type.typeId.getNativeId(), type.getScale(),
dataAddress, dataSize, 0, 0, 0, rows, null));
}
/**
* Create a column of bool values indicating whether the specified scalar
* is an element of each row of a list column.
* Output `column[i]` is set to null if one or more of the following are true:
* 1. The key is null
* 2. The column vector list value is null
* @param key the scalar to look up
* @return a Boolean ColumnVector with the result of the lookup
*/
public final ColumnVector listContains(Scalar key) {
assert type.equals(DType.LIST) : "column type must be a LIST";
return new ColumnVector(listContains(getNativeView(), key.getScalarHandle()));
}
/**
* Create a column of bool values indicating whether the list rows of the first
* column contain the corresponding values in the second column.
* Output `column[i]` is set to null if one or more of the following are true:
* 1. The key value is null
* 2. The column vector list value is null
* @param key the ColumnVector with look up values
* @return a Boolean ColumnVector with the result of the lookup
*/
public final ColumnVector listContainsColumn(ColumnView key) {
assert type.equals(DType.LIST) : "column type must be a LIST";
return new ColumnVector(listContainsColumn(getNativeView(), key.getNativeView()));
}
/**
* Create a column of bool values indicating whether the list rows of the specified
* column contain null elements.
* Output `column[i]` is set to null iff the input list row is null.
* @return a Boolean ColumnVector with the result of the lookup
*/
public final ColumnVector listContainsNulls() {
assert type.equals(DType.LIST) : "column type must be a LIST";
return new ColumnVector(listContainsNulls(getNativeView()));
}
/**
* Enum to choose behaviour of listIndexOf functions:
* 1. FIND_FIRST finds the first occurrence of a search key.
* 2. FIND_LAST finds the last occurrence of a search key.
*/
public enum FindOptions {FIND_FIRST, FIND_LAST};
/**
* Create a column of int32 indices, indicating the position of the scalar search key
* in each list row.
* All indices are 0-based. If a search key is not found, the index is set to -1.
* The index is set to null if one of the following is true:
* 1. The search key is null.
* 2. The list row is null.
* @param key The scalar search key
* @param findOption Whether to find the first index of the key, or the last.
* @return The resultant column of int32 indices
*/
public final ColumnVector listIndexOf(Scalar key, FindOptions findOption) {
assert type.equals(DType.LIST) : "column type must be a LIST";
boolean isFindFirst = findOption == FindOptions.FIND_FIRST;
return new ColumnVector(listIndexOfScalar(getNativeView(), key.getScalarHandle(), isFindFirst));
}
/**
* Create a column of int32 indices, indicating the position of each row in the
* search key column in the corresponding row of the lists column.
* All indices are 0-based. If a search key is not found, the index is set to -1.
* The index is set to null if one of the following is true:
* 1. The search key row is null.
* 2. The list row is null.
* @param keys ColumnView of search keys.
* @param findOption Whether to find the first index of the key, or the last.
* @return The resultant column of int32 indices
*/
public final ColumnVector listIndexOf(ColumnView keys, FindOptions findOption) {
assert type.equals(DType.LIST) : "column type must be a LIST";
boolean isFindFirst = findOption == FindOptions.FIND_FIRST;
return new ColumnVector(listIndexOfColumn(getNativeView(), keys.getNativeView(), isFindFirst));
}
/**
* Segmented sort of the elements within a list in each row of a list column.
* NOTICE: list columns with nested child are NOT supported yet.
*
* @param isDescending whether sorting each row with descending order (or ascending order)
* @param isNullSmallest whether to regard the null value as the min value (or the max value)
* @return a List ColumnVector with elements in each list sorted
*/
public final ColumnVector listSortRows(boolean isDescending, boolean isNullSmallest) {
assert type.equals(DType.LIST) : "column type must be a LIST";
return new ColumnVector(listSortRows(getNativeView(), isDescending, isNullSmallest));
}
/**
* For each pair of lists from the input lists columns, check if they have any common non-null
* elements.
*
* A null input row in any of the input columns will result in a null output row. During checking
* for common elements, nulls within each list are considered as different values while
* floating-point NaN values are considered as equal.
*
* The input lists columns must have the same size and same data type.
*
* @param lhs The input lists column for one side
* @param rhs The input lists column for the other side
* @return A column of type BOOL8 containing the check result
*/
public static ColumnVector listsHaveOverlap(ColumnView lhs, ColumnView rhs) {
assert lhs.getType().equals(DType.LIST) && rhs.getType().equals(DType.LIST) :
"Input columns type must be of type LIST";
assert lhs.getRowCount() == rhs.getRowCount() : "Input columns must have the same size";
return new ColumnVector(listsHaveOverlap(lhs.getNativeView(), rhs.getNativeView()));
}
/**
* Find the intersection without duplicate between lists at each row of the given lists columns.
*
* A null input row in any of the input lists columns will result in a null output row. During
* finding list intersection, nulls and floating-point NaN values within each list are
* considered as equal values.
*
* The input lists columns must have the same size and same data type.
*
* @param lhs The input lists column for one side
* @param rhs The input lists column for the other side
* @return A lists column containing the intersection result
*/
public static ColumnVector listsIntersectDistinct(ColumnView lhs, ColumnView rhs) {
assert lhs.getType().equals(DType.LIST) && rhs.getType().equals(DType.LIST) :
"Input columns type must be of type LIST";
assert lhs.getRowCount() == rhs.getRowCount() : "Input columns must have the same size";
return new ColumnVector(listsIntersectDistinct(lhs.getNativeView(), rhs.getNativeView()));
}
/**
* Find the union without duplicate between lists at each row of the given lists columns.
*
* A null input row in any of the input lists columns will result in a null output row. During
* finding list union, nulls and floating-point NaN values within each list are considered as
* equal values.
*
* The input lists columns must have the same size and same data type.
*
* @param lhs The input lists column for one side
* @param rhs The input lists column for the other side
* @return A lists column containing the union result
*/
public static ColumnVector listsUnionDistinct(ColumnView lhs, ColumnView rhs) {
assert lhs.getType().equals(DType.LIST) && rhs.getType().equals(DType.LIST) :
"Input columns type must be of type LIST";
assert lhs.getRowCount() == rhs.getRowCount() : "Input columns must have the same size";
return new ColumnVector(listsUnionDistinct(lhs.getNativeView(), rhs.getNativeView()));
}
/**
* Find the difference of lists of the left column against lists of the right column.
* Specifically, find the elements (without duplicates) from each list of the left column that
* do not exist in the corresponding list of the right column.
*
* A null input row in any of the input lists columns will result in a null output row. During
* finding, nulls and floating-point NaN values within each list are considered as equal values.
*
* The input lists columns must have the same size and same data type.
*
* @param lhs The input lists column for one side
* @param rhs The input lists column for the other side
* @return A lists column containing the difference result
*/
public static ColumnVector listsDifferenceDistinct(ColumnView lhs, ColumnView rhs) {
assert lhs.getType().equals(DType.LIST) && rhs.getType().equals(DType.LIST) :
"Input columns type must be of type LIST";
assert lhs.getRowCount() == rhs.getRowCount() : "Input columns must have the same size";
return new ColumnVector(listsDifferenceDistinct(lhs.getNativeView(), rhs.getNativeView()));
}
/**
* Generate list offsets from sizes of each list.
* NOTICE: This API only works for INT32. Otherwise, the behavior is undefined. And no null and negative value is allowed.
*
* @return a column of list offsets whose size is N + 1
*/
public final ColumnVector generateListOffsets() {
return new ColumnVector(generateListOffsets(getNativeView()));
}
/**
* Get a single item from the column at the specified index as a Scalar.
*
* Be careful. This is expensive and may involve running a kernel to copy the data out.
*
* @param index the index to look at
* @return the value at that index as a scalar.
* @throws CudfException if the index is out of bounds.
*/
public final Scalar getScalarElement(int index) {
return new Scalar(getType(), getElement(getNativeView(), index));
}
/**
* Filters elements in each row of this LIST column using `booleanMaskView`
* LIST of booleans as a mask.
* <p>
* Given a list-of-bools column, the function produces
* a new `LIST` column of the same type as this column, where each element is copied
* from the row *only* if the corresponding `boolean_mask` is non-null and `true`.
* <p>
* E.g.
* column = { {0,1,2}, {3,4}, {5,6,7}, {8,9} };
* boolean_mask = { {0,1,1}, {1,0}, {1,1,1}, {0,0} };
* results = { {1,2}, {3}, {5,6,7}, {} };
* <p>
* This column and `boolean_mask` must have the same number of rows.
* The output column has the same number of rows as this column.
* An element is copied to an output row *only*
* if the corresponding boolean_mask element is `true`.
* An output row is invalid only if the row is invalid.
*
* @param booleanMaskView A nullable list of bools column used to filter elements in this column
* @return List column of the same type as this column, containing filtered list rows
* @throws CudfException if `boolean_mask` is not a "lists of bools" column
* @throws CudfException if this column and `boolean_mask` have different number of rows
*/
public final ColumnVector applyBooleanMask(ColumnView booleanMaskView) {
assert (getType().equals(DType.LIST));
assert (booleanMaskView.getType().equals(DType.LIST));
assert (getRowCount() == booleanMaskView.getRowCount());
return new ColumnVector(applyBooleanMask(getNativeView(), booleanMaskView.getNativeView()));
}
/**
* Get the number of bytes needed to allocate a validity buffer for the given number of rows.
* According to cudf::bitmask_allocation_size_bytes, the padding boundary for null mask is 64 bytes.
*/
static long getValidityBufferSize(int numRows) {
// number of bytes required = Math.ceil(number of bits / 8)
long actualBytes = ((long) numRows + 7) >> 3;
// padding to the multiplies of the padding boundary(64 bytes)
return ((actualBytes + 63) >> 6) << 6;
}
/**
* Count how many rows in the column are distinct from one another.
* @param nullPolicy if nulls should be included or not.
*/
public int distinctCount(NullPolicy nullPolicy) {
return distinctCount(getNativeView(), nullPolicy.includeNulls);
}
/**
* Count how many rows in the column are distinct from one another.
* Nulls are included.
*/
public int distinctCount() {
return distinctCount(getNativeView(), true);
}
/////////////////////////////////////////////////////////////////////////////
// INTERNAL/NATIVE ACCESS
/////////////////////////////////////////////////////////////////////////////
static DeviceMemoryBufferView getDataBuffer(long viewHandle) {
long address = getNativeDataAddress(viewHandle);
if (address == 0) {
return null;
}
long length = getNativeDataLength(viewHandle);
return new DeviceMemoryBufferView(address, length);
}
static DeviceMemoryBufferView getValidityBuffer(long viewHandle) {
long address = getNativeValidityAddress(viewHandle);
if (address == 0) {
return null;
}
long length = getNativeValidityLength(viewHandle);
return new DeviceMemoryBufferView(address, length);
}
static DeviceMemoryBufferView getOffsetsBuffer(long viewHandle) {
long address = getNativeOffsetsAddress(viewHandle);
if (address == 0) {
return null;
}
long length = getNativeOffsetsLength(viewHandle);
return new DeviceMemoryBufferView(address, length);
}
// Native Methods
private static native int distinctCount(long handle, boolean nullsIncluded);
/**
* Native method to parse and convert a string column vector to unix timestamp. A unix
* timestamp is a long value representing how many units since 1970-01-01 00:00:00.000 in either
* positive or negative direction. This mirrors the functionality spark sql's to_unix_timestamp.
* Strings that fail to parse will default to 0. Supported time units are second, millisecond,
* microsecond, and nanosecond. Larger time units for column vectors are not supported yet in cudf.
* No checking is done for invalid formats or invalid timestamp units.
* Negative timestamp values are not currently supported and will yield undesired results. See
* github issue https://github.com/rapidsai/cudf/issues/3116 for details
*
* @param unit integer native ID of the time unit to parse the timestamp into.
* @param format strptime format specifier string of the timestamp. Used to parse and convert
* the timestamp with. Supports %Y,%y,%m,%d,%H,%I,%p,%M,%S,%f,%z format specifiers.
* See https://github.com/rapidsai/custrings/blob/branch-0.10/docs/source/datetime.md
* for full parsing format specification and documentation.
* @return native handle of the resulting cudf column, used to construct the Java column vector
* by the timestampToLong method.
*/
private static native long stringTimestampToTimestamp(long viewHandle, int unit, String format);
private static native long isFixedPoint(long viewHandle, int nativeTypeId, int scale);
private static native long toHex(long viewHandle);
/**
* Native method to concatenate a list column of strings (each row is a list of strings),
* concatenates the strings within each row and returns a single strings column result.
* Each new string is created by concatenating the strings from the same row (same list element)
* delimited by the row separator provided in the `separators` strings column.
* @param listColumnHandle long holding the native handle of the column containing lists of strings
* to concatenate.
* @param sepColumn long holding the native handle of the strings column that provides separators
* for concatenation.
* @param separatorNarep string scalar indicating null behavior when a separator is null.
* If set to null and the separator is null the resulting string will
* be null. If not null, this string will be used in place of a null
* separator.
* @param colNarep string String scalar that should be used in place of any null strings
* found in any column.
* @param separateNulls boolean if true, then the separator is included for null rows if
* `colNarep` is valid.
* @param emptyStringOutputIfEmptyList boolean if true, any input row that is an empty list
* will result in an empty string. Otherwise, it will
* result in a null.
* @return native handle of the resulting cudf column, used to construct the Java column.
*/
private static native long stringConcatenationListElementsSepCol(long listColumnHandle,
long sepColumn,
long separatorNarep,
long colNarep,
boolean separateNulls,
boolean emptyStringOutputIfEmptyList);
/**
* Native method to concatenate a list column of strings (each row is a list of strings),
* concatenates the strings within each row and returns a single strings column result.
* Each new string is created by concatenating the strings from the same row (same list element)
* delimited by the separator provided.
* @param listColumnHandle long holding the native handle of the column containing lists of strings
* to concatenate.
* @param separator string scalar inserted between each string being merged, may not be null.
* @param narep string scalar indicating null behavior. If set to null and any string in the row
* is null the resulting string will be null. If not null, null values in any
* column will be replaced by the specified string. The underlying value in the
* string scalar may be null, but the object passed in may not.
* @param separateNulls boolean if true, then the separator is included for null rows if
* `narep` is valid.
* @param emptyStringOutputIfEmptyList boolean if true, any input row that is an empty list
* will result in an empty string. Otherwise, it will
* result in a null.
* @return native handle of the resulting cudf column, used to construct the Java column.
*/
private static native long stringConcatenationListElements(long listColumnHandle,
long separator,
long narep,
boolean separateNulls,
boolean emptyStringOutputIfEmptyList);
/**
* Native method to repeat each string in the given input strings column a number of times
* specified by the <code>repeatTimes</code> parameter.
*
* In special cases:
* - If <code>repeatTimes</code> is not a positive number, a non-null input string will always
* result in an empty output string.
* - A null input string will always result in a null output string regardless of the value of
* the <code>repeatTimes</code> parameter.
*
* @param viewHandle long holding the native handle of the column containing strings to repeat.
* @param repeatTimes The number of times each input string is repeated.
* @return native handle of the resulting cudf strings column containing repeated strings.
*/
private static native long repeatStrings(long viewHandle, int repeatTimes);
/**
* Native method to repeat strings in the given input strings column, each string is repeated
* by a different number of times given by the corresponding row in a <code>repeatTimes</code>
* numeric column.
*
* In special cases:
* - Any null row (from either the input strings column or the <code>repeatTimes</code> column)
* will always result in a null output string.
* - If any value in the <code>repeatTimes</code> column is not a positive number and its
* corresponding input string is not null, the output string will be an empty string.
*
* If the input <code>repeatTimesHandle</code> column does not have a numeric type, or it has a
* size that is different from size of the input strings column, an exception will be thrown.
*
* @param stringsHandle long holding the native handle of the column containing strings to repeat.
* @param repeatTimesHandle long holding the native handle of the column containing the numbers
* of times each input string is repeated.
* @return native handle of the resulting cudf strings column containing repeated strings.
*/
private static native long repeatStringsWithColumnRepeatTimes(long stringsHandle,
long repeatTimesHandle);
private static native long getJSONObject(long viewHandle, long scalarHandle, boolean allowSingleQuotes, boolean stripQuotesFromSingleStrings, boolean missingFieldsAsNulls) throws CudfException;
/**
* Native method to parse and convert a timestamp column vector to string column vector. A unix
* timestamp is a long value representing how many units since 1970-01-01 00:00:00:000 in either
* positive or negative direction. This mirrors the functionality spark sql's from_unixtime.
* No checking is done for invalid formats or invalid timestamp units.
* Negative timestamp values are not currently supported and will yield undesired results. See
* github issue https://github.com/rapidsai/cudf/issues/3116 for details
*
* @param format - strftime format specifier string of the timestamp. Its used to parse and convert
* the timestamp with. Supports %Y,%y,%m,%d,%H,%M,%S,%f format specifiers.
* %d Day of the month: 01-31
* %m Month of the year: 01-12
* %y Year without century: 00-99c
* %Y Year with century: 0001-9999
* %H 24-hour of the day: 00-23
* %M Minute of the hour: 00-59
* %S Second of the minute: 00-59
* %f 6-digit microsecond: 000000-999999
* See http://man7.org/linux/man-pages/man3/strftime.3.html for details
*
* Reported bugs
* https://github.com/rapidsai/cudf/issues/4160 after the bug is fixed this method should
* also support
* %I 12-hour of the day: 01-12
* %p Only 'AM', 'PM'
* %j day of the year
*
* @return - native handle of the resulting cudf column used to construct the Java column vector
*/
private static native long timestampToStringTimestamp(long viewHandle, String format);
/**
* Native method for locating the starting index of the first instance of a given substring
* in each string in the column. 0 indexing, returns -1 if the substring is not found. Can be
* be configured to start or end the search mid string.
* @param columnView native handle of the cudf::column_view containing strings being operated on.
* @param substringScalar string scalar handle containing the string to locate within each row.
* @param start character index to start the search from (inclusive).
* @param end character index to end the search on (exclusive).
*/
private static native long substringLocate(long columnView, long substringScalar, int start, int end);
/**
* Returns a list of columns by splitting each string using the specified string literal
* delimiter. The number of rows in the output columns will be the same as the input column.
* Null entries are added for the rows where split results have been exhausted. Null input entries
* result in all nulls in the corresponding rows of the output columns.
*
* @param nativeHandle native handle of the input strings column that being operated on.
* @param delimiter UTF-8 encoded string identifying the split delimiter for each input string.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
*/
private static native long[] stringSplit(long nativeHandle, String delimiter, int limit);
/**
* Returns a list of columns by splitting each string using the specified regular expression
* pattern. The number of rows in the output columns will be the same as the input column.
* Null entries are added for the rows where split results have been exhausted. Null input entries
* result in all nulls in the corresponding rows of the output columns.
*
* @param nativeHandle native handle of the input strings column that being operated on.
* @param pattern UTF-8 encoded string identifying the split regular expression pattern for
* each input string.
* @param flags regex flags setting.
* @param capture capture groups setting.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
*/
private static native long[] stringSplitRe(long nativeHandle, String pattern, int flags,
int capture, int limit);
/**
* Returns a column that are lists of strings in which each list is made by splitting the
* corresponding input string using the specified string literal delimiter.
*
* @param nativeHandle native handle of the input strings column that being operated on.
* @param delimiter UTF-8 encoded string identifying the split delimiter for each input string.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
*/
private static native long stringSplitRecord(long nativeHandle, String delimiter, int limit);
/**
* Returns a column that are lists of strings in which each list is made by splitting the
* corresponding input string using the specified regular expression pattern.
*
* @param nativeHandle native handle of the input strings column that being operated on.
* @param pattern UTF-8 encoded string identifying the split regular expression pattern for
* each input string.
* @param flags regex flags setting.
* @param capture capture groups setting.
* @param limit the maximum size of the list resulting from splitting each input string,
* or -1 for all possible splits. Note that limit = 0 (all possible splits without
* trailing empty strings) and limit = 1 (no split at all) are not supported.
*/
private static native long stringSplitRecordRe(long nativeHandle, String pattern, int flags,
int capture, int limit);
/**
* Native method to calculate substring from a given string column. 0 indexing.
* @param columnView native handle of the cudf::column_view being operated on.
* @param start first character index to begin the substring(inclusive).
* @param end last character index to stop the substring(exclusive).
*/
private static native long substring(long columnView, int start, int end) throws CudfException;
/**
* Native method to extract substrings from a given strings column.
* @param columnView native handle of the cudf::column_view being operated on.
* @param start first character index to begin the substrings (inclusive).
*/
private static native long substringS(long columnView, int start) throws CudfException;
/**
* Native method to calculate substring from a given string column.
* @param columnView native handle of the cudf::column_view being operated on.
* @param startColumn handle of cudf::column_view which has start indices of each string.
* @param endColumn handle of cudf::column_view which has end indices of each string.
*/
private static native long substringColumn(long columnView, long startColumn, long endColumn)
throws CudfException;
/**
* Native method to replace target string by repl string.
* @param columnView native handle of the cudf::column_view being operated on.
* @param target handle of scalar containing the string being searched.
* @param repl handle of scalar containing the string to replace.
*/
private static native long stringReplace(long columnView, long target, long repl) throws CudfException;
/**
* Native method to replace target strings by corresponding repl strings.
* @param inputCV native handle of the cudf::column_view being operated on.
* @param targetsCV handle of column containing the strings being searched.
* @param replsCV handle of column containing the strings to replace (can optionally contain a single string).
*/
private static native long stringReplaceMulti(long inputCV, long targetsCV, long replsCV) throws CudfException;
/**
* Native method for replacing each regular expression pattern match with the specified
* replacement string.
* @param columnView native handle of the cudf::column_view being operated on.
* @param pattern regular expression pattern to search within each string.
* @param flags regex flags setting.
* @param capture capture groups setting.
* @param repl native handle of the cudf::scalar containing the replacement string.
* @param maxRepl maximum number of times to replace the pattern within a string
* @return native handle of the resulting cudf column containing the string results.
*/
private static native long replaceRegex(long columnView, String pattern, int flags, int capture,
long repl, long maxRepl) throws CudfException;
/**
* Native method for multiple instance regular expression replacement.
* @param columnView native handle of the cudf::column_view being operated on.
* @param patterns native handle of the cudf::column_view containing the regex patterns.
* @param repls The replacement template for creating the output string.
* @return native handle of the resulting cudf column containing the string results.
*/
private static native long replaceMultiRegex(long columnView, String[] patterns,
long repls) throws CudfException;
/**
* Native method for replacing any character sequence matching the given regex program
* pattern using the replace template for back-references.
* @param columnView native handle of the cudf::column_view being operated on.
* @param pattern The regular expression patterns to search within each string.
* @param flags Regex flags setting.
* @param capture Capture groups setting.
* @param replace The replacement template for creating the output string.
* @return native handle of the resulting cudf column containing the string results.
*/
private static native long stringReplaceWithBackrefs(long columnView, String pattern, int flags,
int capture, String replace) throws CudfException;
/**
* Native method for checking if strings in a column starts with a specified comparison string.
* @param cudfViewHandle native handle of the cudf::column_view being operated on.
* @param compString handle of scalar containing the string being searched for at the beginning
of each string in the column.
* @return native handle of the resulting cudf column containing the boolean results.
*/
private static native long stringStartWith(long cudfViewHandle, long compString) throws CudfException;
/**
* Native method for checking if strings in a column ends with a specified comparison string.
* @param cudfViewHandle native handle of the cudf::column_view being operated on.
* @param compString handle of scalar containing the string being searched for at the end
of each string in the column.
* @return native handle of the resulting cudf column containing the boolean results.
*/
private static native long stringEndWith(long cudfViewHandle, long compString) throws CudfException;
/**
* Native method to strip whitespace from the start and end of a string.
* @param columnView native handle of the cudf::column_view being operated on.
*/
private static native long stringStrip(long columnView, int type, long toStrip) throws CudfException;
/**
* Native method for checking if strings match the passed in regex program pattern from the
* beginning of the string.
* @param cudfViewHandle native handle of the cudf::column_view being operated on.
* @param pattern string regex pattern.
* @param flags regex flags setting.
* @param capture capture groups setting.
* @return native handle of the resulting cudf column containing the boolean results.
*/
private static native long matchesRe(long cudfViewHandle, String pattern, int flags, int capture) throws CudfException;
/**
* Native method for checking if strings match the passed in regex program pattern starting at any location.
* @param cudfViewHandle native handle of the cudf::column_view being operated on.
* @param pattern string regex pattern.
* @param flags regex flags setting.
* @param capture capture groups setting.
* @return native handle of the resulting cudf column containing the boolean results.
*/
private static native long containsRe(long cudfViewHandle, String pattern, int flags, int capture) throws CudfException;
/**
* Native method for checking if strings match the passed in like pattern
* and escape character.
* @param cudfViewHandle native handle of the cudf::column_view being operated on.
* @param patternHandle handle of scalar containing the string like pattern.
* @param escapeCharHandle handle of scalar containing the string escape character.
* @return native handle of the resulting cudf column containing the boolean results.
*/
private static native long like(long cudfViewHandle, long patternHandle, long escapeCharHandle) throws CudfException;
/**
* Native method for checking if strings in a column contains a specified comparison string.
* @param cudfViewHandle native handle of the cudf::column_view being operated on.
* @param compString handle of scalar containing the string being searched for.
* @return native handle of the resulting cudf column containing the boolean results.
*/
private static native long stringContains(long cudfViewHandle, long compString) throws CudfException;
/**
* Native method for searching for the given target strings within each string in the provided column.
* @param cudfViewHandle native handle of the cudf::column_view being operated on.
* @param targetViewHandle handle of the column view containing the strings being searched for.
*/
private static native long[] stringContainsMulti(long cudfViewHandle, long targetViewHandle) throws CudfException;
/**
* Native method for extracting results from a regex program pattern. Returns a table handle.
*
* @param cudfViewHandle Native handle of the cudf::column_view being operated on.
* @param pattern String regex pattern.
* @param flags Regex flags setting.
* @param capture Capture groups setting.
*/
private static native long[] extractRe(long cudfViewHandle, String pattern, int flags, int capture) throws CudfException;
/**
* Native method for extracting all results corresponding to group idx from a regex program pattern.
*
* @param nativeHandle Native handle of the cudf::column_view being operated on.
* @param pattern String regex pattern.
* @param flags Regex flags setting.
* @param capture Capture groups setting.
* @param idx Regex group index. A 0 value means matching the entire regex.
* @return Native handle of a string column of the result.
*/
private static native long extractAllRecord(long nativeHandle, String pattern, int flags, int capture, int idx);
private static native long urlDecode(long cudfViewHandle);
private static native long urlEncode(long cudfViewHandle);
/**
* Native method for map lookup over a column of List<Struct<String,String>>
* @param columnView the column view handle of the map
* @param key the string scalar that is the key for lookup
* @return a string column handle of the resultant
* @throws CudfException
*/
private static native long mapLookup(long columnView, long key) throws CudfException;
/**
* Native method for map lookup over a column of List<Struct<String,String>>
* The lookup column must have as many rows as the map column,
* and must match the key-type of the map.
* A column of values is returned, with the same number of rows as the map column.
* If a key is repeated in a map row, the value corresponding to the last matching
* key is returned.
* If a lookup key is null or not found, the corresponding value is null.
* @param columnView the column view handle of the map
* @param keys the column view holding the keys
* @return a column of values corresponding the value of the lookup key.
* @throws CudfException
*/
private static native long mapLookupForKeys(long columnView, long keys) throws CudfException;
/**
* Native method for check the existence of a key over a column of List<Struct<_, _>>
* @param columnView the column view handle of the map
* @param key the column view holding the keys
* @return boolean column handle of the result
* @throws CudfException
*/
private static native long mapContainsKeys(long columnView, long key) throws CudfException;
/**
* Native method for check the existence of a key over a column of List<Struct<String,String>>
* @param columnView the column view handle of the map
* @param key the string scalar that is the key for lookup
* @return boolean column handle of the result
* @throws CudfException
*/
private static native long mapContains(long columnView, long key) throws CudfException;
/**
* Native method to add zeros as padding to the left of each string.
*/
private static native long zfill(long nativeHandle, int width);
private static native long pad(long nativeHandle, int width, int side, String fillChar);
private static native long binaryOpVS(long lhs, long rhs, int op, int dtype, int scale);
private static native long binaryOpVV(long lhs, long rhs, int op, int dtype, int scale);
private static native long countElements(long viewHandle);
private static native long byteCount(long viewHandle) throws CudfException;
private static native long codePoints(long viewHandle);
private static native long extractListElement(long nativeView, int index);
private static native long extractListElementV(long nativeView, long indicesView);
private static native long dropListDuplicates(long nativeView, int keep_option);
private static native long dropListDuplicatesWithKeysValues(long nativeHandle);
private static native long flattenLists(long inputHandle, boolean ignoreNull);
/**
* Native method for list lookup
* @param nativeView the column view handle of the list
* @param key the scalar key handle
* @return column handle of the resultant
*/
private static native long listContains(long nativeView, long key);
/**
* Native method for list lookup
* @param nativeView the column view handle of the list
* @param keyColumn the column handle of look up keys
* @return column handle of the resultant
*/
private static native long listContainsColumn(long nativeView, long keyColumn);
/**
* Native method to search list rows for null elements.
* @param nativeView the column view handle of the list
* @return column handle of the resultant boolean column
*/
private static native long listContainsNulls(long nativeView);
/**
* Native method to find the first (or last) index of a specified scalar key,
* in each row of a list column.
* @param nativeView the column view handle of the list
* @param scalarKeyHandle handle to the scalar search key
* @param isFindFirst Whether to find the first index of the key, or the last.
* @return column handle of the resultant column of int32 indices
*/
private static native long listIndexOfScalar(long nativeView, long scalarKeyHandle, boolean isFindFirst);
/**
* Native method to find the first (or last) index of each search key in the specified column,
* in each row of a list column.
* @param nativeView the column view handle of the list
* @param keyColumnHandle handle to the search key column
* @param isFindFirst Whether to find the first index of the key, or the last.
* @return column handle of the resultant column of int32 indices
*/
private static native long listIndexOfColumn(long nativeView, long keyColumnHandle, boolean isFindFirst);
private static native long listSortRows(long nativeView, boolean isDescending, boolean isNullSmallest);
private static native long listsHaveOverlap(long lhsViewHandle, long rhsViewHandle);
private static native long listsIntersectDistinct(long lhsViewHandle, long rhsViewHandle);
private static native long listsUnionDistinct(long lhsViewHandle, long rhsViewHandle);
private static native long listsDifferenceDistinct(long lhsViewHandle, long rhsViewHandle);
private static native long getElement(long nativeView, int index);
private static native long castTo(long nativeHandle, int type, int scale);
private static native long bitCastTo(long nativeHandle, int type, int scale);
private static native long byteListCast(long nativeHandle, boolean config);
private static native long[] slice(long nativeHandle, int[] indices) throws CudfException;
private static native long[] split(long nativeHandle, int[] indices) throws CudfException;
private static native long findAndReplaceAll(long valuesHandle, long replaceHandle, long myself) throws CudfException;
private static native long round(long nativeHandle, int decimalPlaces, int roundingMethod) throws CudfException;
private static native long reverseStringsOrLists(long inputHandle);
/**
* Native method to switch all characters in a column of strings to lowercase characters.
* @param cudfViewHandle native handle of the cudf::column_view being operated on.
* @return native handle of the resulting cudf column, used to construct the Java column
* by the lower method.
*/
private static native long lowerStrings(long cudfViewHandle);
/**
* Native method to switch all characters in a column of strings to uppercase characters.
* @param cudfViewHandle native handle of the cudf::column_view being operated on.
* @return native handle of the resulting cudf column, used to construct the Java column
* by the upper method.
*/
private static native long upperStrings(long cudfViewHandle);
/**
* Native method to compute approx percentiles.
* @param cudfColumnHandle T-Digest column
* @param percentilesHandle Percentiles
* @return native handle of the resulting cudf column, used to construct the Java column
* by the approxPercentile method.
*/
private static native long approxPercentile(long cudfColumnHandle, long percentilesHandle) throws CudfException;
private static native long quantile(long cudfColumnHandle, int quantileMethod, double[] quantiles) throws CudfException;
private static native long rollingWindow(
long viewHandle,
long defaultOutputHandle,
int min_periods,
long aggPtr,
int preceding,
int following,
long preceding_col,
long following_col);
private static native long scan(long viewHandle, long aggregation,
boolean isInclusive, boolean includeNulls) throws CudfException;
private static native long nansToNulls(long viewHandle) throws CudfException;
private static native long charLengths(long viewHandle) throws CudfException;
private static native long replaceNullsScalar(long viewHandle, long scalarHandle) throws CudfException;
private static native long replaceNullsColumn(long viewHandle, long replaceViewHandle) throws CudfException;
private static native long replaceNullsPolicy(long nativeView, boolean isPreceding) throws CudfException;
private static native long ifElseVV(long predVec, long trueVec, long falseVec) throws CudfException;
private static native long ifElseVS(long predVec, long trueVec, long falseScalar) throws CudfException;
private static native long ifElseSV(long predVec, long trueScalar, long falseVec) throws CudfException;
private static native long ifElseSS(long predVec, long trueScalar, long falseScalar) throws CudfException;
private static native long reduce(long viewHandle, long aggregation, int dtype, int scale) throws CudfException;
private static native long segmentedReduce(long dataViewHandle, long offsetsViewHandle,
long aggregation, boolean includeNulls, int dtype, int scale) throws CudfException;
private static native long segmentedGather(long sourceColumnHandle, long gatherMapListHandle,
boolean isNullifyOutBounds) throws CudfException;
private static native long isNullNative(long viewHandle);
private static native long isNanNative(long viewHandle);
private static native long isFloat(long viewHandle);
private static native long isInteger(long viewHandle);
private static native long isIntegerWithType(long viewHandle, int typeId, int typeScale);
private static native long isNotNanNative(long viewHandle);
private static native long isNotNullNative(long viewHandle);
private static native long unaryOperation(long viewHandle, int op);
private static native long extractDateTimeComponent(long viewHandle, int component);
private static native long lastDayOfMonth(long viewHandle) throws CudfException;
private static native long dayOfYear(long viewHandle) throws CudfException;
private static native long quarterOfYear(long viewHandle) throws CudfException;
private static native long addCalendricalMonths(long tsViewHandle, long monthsViewHandle);
private static native long addScalarCalendricalMonths(long tsViewHandle, long scalarHandle);
private static native long isLeapYear(long viewHandle) throws CudfException;
private static native long daysInMonth(long viewHandle) throws CudfException;
private static native long dateTimeCeil(long viewHandle, int freq);
private static native long dateTimeFloor(long viewHandle, int freq);
private static native long dateTimeRound(long viewHandle, int freq);
private static native boolean containsScalar(long columnViewHaystack, long scalarHandle) throws CudfException;
private static native long containsVector(long valuesHandle, long searchSpaceHandle) throws CudfException;
private static native long transform(long viewHandle, String udf, boolean isPtx);
private static native long clamper(long nativeView, long loScalarHandle, long loScalarReplaceHandle,
long hiScalarHandle, long hiScalarReplaceHandle);
protected static native long title(long handle);
private static native long capitalize(long strsColHandle, long delimitersHandle);
private static native long joinStrings(long strsHandle, long sepHandle, long narepHandle);
private static native long makeStructView(long[] handles, long rowCount);
private static native long isTimestamp(long nativeView, String format);
/**
* Native method to normalize the various bitwise representations of NAN and zero.
*
* All occurrences of -NaN are converted to NaN. Likewise, all -0.0 are converted to 0.0.
*
* @param viewHandle `long` representation of pointer to input column_view.
* @return Pointer to a new `column` of normalized values.
* @throws CudfException On failure to normalize.
*/
private static native long normalizeNANsAndZeros(long viewHandle) throws CudfException;
/**
* Native method to deep copy a column while replacing the null mask. The null mask is the
* bitwise merge of the null masks in the columns given as arguments.
*
* @param baseHandle column view of the column that is deep copied.
* @param viewHandles array of views whose null masks are merged, must have identical row counts.
* @return native handle of the copied cudf column with replaced null mask.
*/
private static native long bitwiseMergeAndSetValidity(long baseHandle, long[] viewHandles,
int nullConfig) throws CudfException;
////////
// Native cudf::column_view life cycle and metadata access methods. Life cycle methods
// should typically only be called from the OffHeap inner class.
////////
static native int getNativeTypeId(long viewHandle) throws CudfException;
static native int getNativeTypeScale(long viewHandle) throws CudfException;
static native int getNativeRowCount(long viewHandle) throws CudfException;
static native int getNativeNullCount(long viewHandle) throws CudfException;
static native void deleteColumnView(long viewHandle) throws CudfException;
private static native long getNativeDataAddress(long viewHandle) throws CudfException;
private static native long getNativeDataLength(long viewHandle) throws CudfException;
private static native long getNativeOffsetsAddress(long viewHandle) throws CudfException;
private static native long getNativeOffsetsLength(long viewHandle) throws CudfException;
private static native long getNativeValidityAddress(long viewHandle) throws CudfException;
private static native long getNativeValidityLength(long viewHandle) throws CudfException;
static native long makeCudfColumnView(int type, int scale, long data, long dataSize, long offsets,
long valid, int nullCount, int size, long[] childHandle);
static native long getChildCvPointer(long viewHandle, int childIndex) throws CudfException;
private static native long getListOffsetCvPointer(long viewHandle) throws CudfException;
static native int getNativeNumChildren(long viewHandle) throws CudfException;
// calculate the amount of device memory used by this column including any child columns
static native long getDeviceMemorySize(long viewHandle, boolean shouldPadForCpu) throws CudfException;
static native long copyColumnViewToCV(long viewHandle) throws CudfException;
static native long generateListOffsets(long handle) throws CudfException;
static native long applyBooleanMask(long arrayColumnView, long booleanMaskHandle) throws CudfException;
static native boolean hasNonEmptyNulls(long handle) throws CudfException;
static native long purgeNonEmptyNulls(long handle) throws CudfException;
/**
* A utility class to create column vector like objects without refcounts and other APIs when
* creating the device side vector from host side nested vectors. Eventually this can go away or
* be refactored to hold less state like just the handles and the buffers to close.
*/
static class NestedColumnVector {
private final DeviceMemoryBuffer data;
private final DeviceMemoryBuffer valid;
private final DeviceMemoryBuffer offsets;
private final DType dataType;
private final long rows;
private final Optional<Long> nullCount;
List<NestedColumnVector> children;
private NestedColumnVector(DType type, long rows, Optional<Long> nullCount,
DeviceMemoryBuffer data, DeviceMemoryBuffer valid,
DeviceMemoryBuffer offsets, List<NestedColumnVector> children) {
this.dataType = type;
this.rows = rows;
this.nullCount = nullCount;
this.data = data;
this.valid = valid;
this.offsets = offsets;
this.children = children;
}
/**
* Returns a LIST ColumnVector, for now, after constructing the NestedColumnVector from the host side
* nested Column Vector - children. This is used for host side to device side copying internally.
* @param type top level dtype, which is LIST currently
* @param rows top level number of rows in the LIST column
* @param valid validity buffer
* @param offsets offsets buffer
* @param nullCount nullCount for the LIST column
* @param child the host side nested column vector list
* @return new ColumnVector of type LIST at the moment
*/
static ColumnVector createColumnVector(DType type, int rows, HostMemoryBuffer data,
HostMemoryBuffer valid, HostMemoryBuffer offsets, Optional<Long> nullCount, List<HostColumnVectorCore> child) {
List<NestedColumnVector> devChildren = new ArrayList<>();
for (HostColumnVectorCore c : child) {
devChildren.add(createNewNestedColumnVector(c));
}
int mainColRows = rows;
DType mainColType = type;
HostMemoryBuffer mainColValid = valid;
HostMemoryBuffer mainColOffsets = offsets;
DeviceMemoryBuffer mainDataDevBuff = null;
DeviceMemoryBuffer mainValidDevBuff = null;
DeviceMemoryBuffer mainOffsetsDevBuff = null;
if (mainColValid != null) {
long validLen = getValidityBufferSize(mainColRows);
mainValidDevBuff = DeviceMemoryBuffer.allocate(validLen);
mainValidDevBuff.copyFromHostBuffer(mainColValid, 0, validLen);
}
if (data != null) {
long dataLen = data.length;
mainDataDevBuff = DeviceMemoryBuffer.allocate(dataLen);
mainDataDevBuff.copyFromHostBuffer(data, 0, dataLen);
}
if (mainColOffsets != null) {
// The offset buffer has (no. of rows + 1) entries, where each entry is INT32.sizeInBytes
long offsetsLen = OFFSET_SIZE * (((long)mainColRows) + 1);
mainOffsetsDevBuff = DeviceMemoryBuffer.allocate(offsetsLen);
mainOffsetsDevBuff.copyFromHostBuffer(mainColOffsets, 0, offsetsLen);
}
List<DeviceMemoryBuffer> toClose = new ArrayList<>();
long[] childHandles = new long[devChildren.size()];
try {
for (ColumnView.NestedColumnVector ncv : devChildren) {
toClose.addAll(ncv.getBuffersToClose());
}
for (int i = 0; i < devChildren.size(); i++) {
childHandles[i] = devChildren.get(i).createViewHandle();
}
return new ColumnVector(mainColType, mainColRows, nullCount, mainDataDevBuff,
mainValidDevBuff, mainOffsetsDevBuff, toClose, childHandles);
} finally {
for (int i = 0; i < childHandles.length; i++) {
if (childHandles[i] != 0) {
ColumnView.deleteColumnView(childHandles[i]);
childHandles[i] = 0;
}
}
}
}
private static NestedColumnVector createNewNestedColumnVector(
HostColumnVectorCore nestedChildren) {
if (nestedChildren == null) {
return null;
}
DType colType = nestedChildren.getType();
Optional<Long> nullCount = Optional.of(nestedChildren.getNullCount());
long colRows = nestedChildren.getRowCount();
HostMemoryBuffer colData = nestedChildren.getNestedChildren().isEmpty() ? nestedChildren.getData() : null;
HostMemoryBuffer colValid = nestedChildren.getValidity();
HostMemoryBuffer colOffsets = nestedChildren.getOffsets();
List<NestedColumnVector> children = new ArrayList<>();
for (HostColumnVectorCore nhcv : nestedChildren.getNestedChildren()) {
children.add(createNewNestedColumnVector(nhcv));
}
return createNestedColumnVector(colType, colRows, nullCount, colData, colValid, colOffsets,
children);
}
private long createViewHandle() {
long[] childrenColViews = null;
try {
if (children != null) {
childrenColViews = new long[children.size()];
for (int i = 0; i < children.size(); i++) {
childrenColViews[i] = children.get(i).createViewHandle();
}
}
long dataAddr = data == null ? 0 : data.address;
long dataLen = data == null ? 0 : data.length;
long offsetAddr = offsets == null ? 0 : offsets.address;
long validAddr = valid == null ? 0 : valid.address;
int nc = nullCount.orElse(ColumnVector.OffHeapState.UNKNOWN_NULL_COUNT).intValue();
return makeCudfColumnView(dataType.typeId.getNativeId(), dataType.getScale(), dataAddr, dataLen,
offsetAddr, validAddr, nc, (int) rows, childrenColViews);
} finally {
if (childrenColViews != null) {
for (int i = 0; i < childrenColViews.length; i++) {
if (childrenColViews[i] != 0) {
deleteColumnView(childrenColViews[i]);
childrenColViews[i] = 0;
}
}
}
}
}
List<DeviceMemoryBuffer> getBuffersToClose() {
List<DeviceMemoryBuffer> buffers = new ArrayList<>();
if (children != null) {
for (NestedColumnVector ncv : children) {
buffers.addAll(ncv.getBuffersToClose());
}
}
if (data != null) {
buffers.add(data);
}
if (valid != null) {
buffers.add(valid);
}
if (offsets != null) {
buffers.add(offsets);
}
return buffers;
}
private static long getEndStringOffset(long totalRows, long index, HostMemoryBuffer offsets) {
assert index < totalRows;
return offsets.getInt((index + 1) * 4);
}
private static NestedColumnVector createNestedColumnVector(DType type, long rows, Optional<Long> nullCount,
HostMemoryBuffer dataBuffer, HostMemoryBuffer validityBuffer,
HostMemoryBuffer offsetBuffer, List<NestedColumnVector> child) {
DeviceMemoryBuffer data = null;
DeviceMemoryBuffer valid = null;
DeviceMemoryBuffer offsets = null;
if (dataBuffer != null) {
long dataLen = rows * type.getSizeInBytes();
if (type.equals(DType.STRING)) {
// This needs a different type
dataLen = getEndStringOffset(rows, rows - 1, offsetBuffer);
if (dataLen == 0 && nullCount.get() == 0) {
// This is a work around to an issue where a column of all empty strings must have at
// least one byte or it will not be interpreted correctly.
dataLen = 1;
}
}
data = DeviceMemoryBuffer.allocate(dataLen);
data.copyFromHostBuffer(dataBuffer, 0, dataLen);
}
if (validityBuffer != null) {
long validLen = getValidityBufferSize((int)rows);
valid = DeviceMemoryBuffer.allocate(validLen);
valid.copyFromHostBuffer(validityBuffer, 0, validLen);
}
if (offsetBuffer != null) {
long offsetsLen = OFFSET_SIZE * (rows + 1);
offsets = DeviceMemoryBuffer.allocate(offsetsLen);
offsets.copyFromHostBuffer(offsetBuffer, 0, offsetsLen);
}
NestedColumnVector ret = new NestedColumnVector(type, rows, nullCount, data, valid, offsets,
child);
return ret;
}
}
/////////////////////////////////////////////////////////////////////////////
// DATA MOVEMENT
/////////////////////////////////////////////////////////////////////////////
private static HostColumnVectorCore copyToHostAsyncNestedHelper(
Cuda.Stream stream, ColumnView deviceCvPointer, HostMemoryAllocator hostMemoryAllocator) {
if (deviceCvPointer == null) {
return null;
}
HostMemoryBuffer hostOffsets = null;
HostMemoryBuffer hostValid = null;
HostMemoryBuffer hostData = null;
List<HostColumnVectorCore> children = new ArrayList<>();
BaseDeviceMemoryBuffer currData = null;
BaseDeviceMemoryBuffer currOffsets = null;
BaseDeviceMemoryBuffer currValidity = null;
long currNullCount = 0l;
boolean needsCleanup = true;
try {
long currRows = deviceCvPointer.getRowCount();
DType currType = deviceCvPointer.getType();
currData = deviceCvPointer.getData();
currOffsets = deviceCvPointer.getOffsets();
currValidity = deviceCvPointer.getValid();
if (currData != null) {
hostData = hostMemoryAllocator.allocate(currData.length);
hostData.copyFromDeviceBufferAsync(currData, stream);
}
if (currValidity != null) {
hostValid = hostMemoryAllocator.allocate(currValidity.length);
hostValid.copyFromDeviceBufferAsync(currValidity, stream);
}
if (currOffsets != null) {
hostOffsets = hostMemoryAllocator.allocate(currOffsets.length);
hostOffsets.copyFromDeviceBufferAsync(currOffsets, stream);
}
int numChildren = deviceCvPointer.getNumChildren();
for (int i = 0; i < numChildren; i++) {
try(ColumnView childDevPtr = deviceCvPointer.getChildColumnView(i)) {
children.add(copyToHostAsyncNestedHelper(stream, childDevPtr, hostMemoryAllocator));
}
}
currNullCount = deviceCvPointer.getNullCount();
Optional<Long> nullCount = Optional.of(currNullCount);
HostColumnVectorCore ret =
new HostColumnVectorCore(currType, currRows, nullCount, hostData,
hostValid, hostOffsets, children);
needsCleanup = false;
return ret;
} finally {
if (currData != null) {
currData.close();
}
if (currOffsets != null) {
currOffsets.close();
}
if (currValidity != null) {
currValidity.close();
}
if (needsCleanup) {
if (hostData != null) {
hostData.close();
}
if (hostOffsets != null) {
hostOffsets.close();
}
if (hostValid != null) {
hostValid.close();
}
}
}
}
/** Copy the data to the host synchronously. */
public HostColumnVector copyToHost(HostMemoryAllocator hostMemoryAllocator) {
HostColumnVector result = copyToHostAsync(Cuda.DEFAULT_STREAM, hostMemoryAllocator);
Cuda.DEFAULT_STREAM.sync();
return result;
}
/**
* Copy the data to the host asynchronously. The caller MUST synchronize on the stream
* before examining the result.
*/
public HostColumnVector copyToHostAsync(Cuda.Stream stream,
HostMemoryAllocator hostMemoryAllocator) {
try (NvtxRange toHost = new NvtxRange("toHostAsync", NvtxColor.BLUE)) {
HostMemoryBuffer hostDataBuffer = null;
HostMemoryBuffer hostValidityBuffer = null;
HostMemoryBuffer hostOffsetsBuffer = null;
BaseDeviceMemoryBuffer valid = getValid();
BaseDeviceMemoryBuffer offsets = getOffsets();
BaseDeviceMemoryBuffer data = null;
DType type = this.type;
long rows = this.rows;
if (!type.isNestedType()) {
data = getData();
}
boolean needsCleanup = true;
try {
// We don't have a good way to tell if it is cached on the device or recalculate it on
// the host for now, so take the hit here.
getNullCount();
if (!type.isNestedType()) {
if (valid != null) {
hostValidityBuffer = hostMemoryAllocator.allocate(valid.getLength());
hostValidityBuffer.copyFromDeviceBufferAsync(valid, stream);
}
if (offsets != null) {
hostOffsetsBuffer = hostMemoryAllocator.allocate(offsets.length);
hostOffsetsBuffer.copyFromDeviceBufferAsync(offsets, stream);
}
// If a strings column is all null values there is no data buffer allocated
if (data != null) {
hostDataBuffer = hostMemoryAllocator.allocate(data.length);
hostDataBuffer.copyFromDeviceBufferAsync(data, stream);
}
HostColumnVector ret = new HostColumnVector(type, rows, Optional.of(nullCount),
hostDataBuffer, hostValidityBuffer, hostOffsetsBuffer);
needsCleanup = false;
return ret;
} else {
if (data != null) {
hostDataBuffer = hostMemoryAllocator.allocate(data.length);
hostDataBuffer.copyFromDeviceBufferAsync(data, stream);
}
if (valid != null) {
hostValidityBuffer = hostMemoryAllocator.allocate(valid.getLength());
hostValidityBuffer.copyFromDeviceBufferAsync(valid, stream);
}
if (offsets != null) {
hostOffsetsBuffer = hostMemoryAllocator.allocate(offsets.getLength());
hostOffsetsBuffer.copyFromDeviceBufferAsync(offsets, stream);
}
List<HostColumnVectorCore> children = new ArrayList<>();
for (int i = 0; i < getNumChildren(); i++) {
try (ColumnView childDevPtr = getChildColumnView(i)) {
children.add(copyToHostAsyncNestedHelper(stream, childDevPtr, hostMemoryAllocator));
}
}
HostColumnVector ret = new HostColumnVector(type, rows, Optional.of(nullCount),
hostDataBuffer, hostValidityBuffer, hostOffsetsBuffer, children);
needsCleanup = false;
return ret;
}
} finally {
if (data != null) {
data.close();
}
if (offsets != null) {
offsets.close();
}
if (valid != null) {
valid.close();
}
if (needsCleanup) {
if (hostOffsetsBuffer != null) {
hostOffsetsBuffer.close();
}
if (hostDataBuffer != null) {
hostDataBuffer.close();
}
if (hostValidityBuffer != null) {
hostValidityBuffer.close();
}
}
}
}
}
/** Copy the data to host memory synchronously */
public HostColumnVector copyToHost() {
return copyToHost(DefaultHostMemoryAllocator.get());
}
/**
* Copy the data to the host asynchronously. The caller MUST synchronize on the stream
* before examining the result.
*/
public HostColumnVector copyToHostAsync(Cuda.Stream stream) {
return copyToHostAsync(stream, DefaultHostMemoryAllocator.get());
}
/**
* Calculate the total space required to copy the data to the host. This should be padded to
* the alignment that the CPU requires.
*/
public long getHostBytesRequired() {
return getDeviceMemorySize(getNativeView(), true);
}
/**
* Get the size that the host will align memory allocations to in bytes.
*/
public static native long hostPaddingSizeInBytes();
/**
* Exact check if a column or its descendants have non-empty null rows
*
* @return Whether the column or its descendants have non-empty null rows
*/
public boolean hasNonEmptyNulls() {
return hasNonEmptyNulls(viewHandle);
}
/**
* Copies this column into output while purging any non-empty null rows in the column or its
* descendants.
*
* If this column is not of compound type (LIST/STRING/STRUCT/DICTIONARY), the output will be
* the same as input.
*
* The purge operation only applies directly to LIST and STRING columns, but it applies indirectly
* to STRUCT/DICTIONARY columns as well, since these columns may have child columns that
* are LIST or STRING.
*
* Examples:
* lists = data: [{{0,1}, {2,3}, {4,5}} validity: {true, false, true}]
* lists[1] is null, but the list's child column still stores `{2,3}`.
*
* After purging the contents of the list's null rows, the column's contents will be:
* lists = [data: {{0,1}, {4,5}} validity: {true, false, true}]
*
* @return A new column with equivalent contents to `input`, but with null rows purged
*/
public ColumnVector purgeNonEmptyNulls() {
return new ColumnVector(purgeNonEmptyNulls(viewHandle));
}
static ColumnView[] getColumnViewsFromPointers(long[] nativeHandles) {
ColumnView[] columns = new ColumnView[nativeHandles.length];
try {
for (int i = 0; i < nativeHandles.length; i++) {
long nativeHandle = nativeHandles[i];
// setting address to zero, so we don't clean it in case of an exception as it
// will be cleaned up by the constructor
nativeHandles[i] = 0;
columns[i] = new ColumnView(nativeHandle);
}
return columns;
} catch (Throwable t) {
try {
cleanupColumnViews(nativeHandles, columns, t);
} catch (Throwable s) {
t.addSuppressed(s);
} finally {
throw t;
}
}
}
/**
* Convert this integer column to hexadecimal column and return a new strings column
*
* Any null entries will result in corresponding null entries in the output column.
*
* The output character set is '0'-'9' and 'A'-'F'. The output string width will
* be a multiple of 2 depending on the size of the integer type. A single leading
* zero is applied to the first non-zero output byte if it is less than 0x10.
*
* Example:
* input = [123, -1, 0, 27, 342718233]
* s = input.toHex()
* s is [ '04D2', 'FFFFFFFF', '00', '1B', '146D7719']
*
* The example above shows an `INT32` type column where each integer is 4 bytes.
* Leading zeros are suppressed unless filling out a complete byte as in
* `123 -> '04D2'` instead of `000004D2` or `4D2`.
*
* @return new string ColumnVector
*/
public ColumnVector toHex() {
assert getType().isIntegral() : "Only integers are supported";
return new ColumnVector(toHex(this.getNativeView()));
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ColumnWriterOptions.java
|
/*
*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.ArrayList;
import java.util.List;
/**
* Per column settings for writing Parquet/ORC files.
*
* The native also uses the same "column_in_metadata" for both Parquet and ORC.
*/
public class ColumnWriterOptions {
// `isTimestampTypeInt96` is ignored in ORC
private boolean isTimestampTypeInt96;
private int precision;
private boolean isNullable;
private boolean isMap = false;
private boolean isBinary = false;
private String columnName;
// only for Parquet
private boolean hasParquetFieldId;
private int parquetFieldId;
private ColumnWriterOptions(AbstractStructBuilder builder) {
this.columnName = builder.name;
this.isNullable = builder.isNullable;
this.hasParquetFieldId = builder.hasParquetFieldId;
this.parquetFieldId = builder.parquetFieldId;
this.childColumnOptions =
(ColumnWriterOptions[]) builder.children.toArray(new ColumnWriterOptions[0]);
}
// The sentinel value of unknown precision (default value)
public static int UNKNOWN_PRECISION = -1;
/**
* Constructor used for list
*/
private ColumnWriterOptions(ListBuilder builder) {
assert(builder.children.size() == 1) : "Lists can only have one child";
this.columnName = builder.name;
this.isNullable = builder.isNullable;
// we are adding the child twice even though lists have one child only because the way the cudf
// has implemented this it requires two children to be set for the list, but it drops the
// first one. This is something that is a lower priority and might be fixed in future
this.childColumnOptions =
new ColumnWriterOptions[]{DUMMY_CHILD, builder.children.get(0)};
}
protected ColumnWriterOptions[] childColumnOptions = {};
protected abstract static class AbstractStructBuilder<T extends AbstractStructBuilder,
V extends ColumnWriterOptions> extends NestedBuilder<T, V> {
/**
* Builder specific to build a Struct meta
*/
public AbstractStructBuilder(String name, boolean isNullable) {
super(name, isNullable);
}
public AbstractStructBuilder(String name, boolean isNullable, int parquetFieldId) {
super(name, isNullable, parquetFieldId);
}
protected AbstractStructBuilder() {
super();
}
}
// This child is needed as the first child of a List column meta due to how cudf has been
// implemented. Cudf drops the first child from the meta if a column is a LIST. This is done
// this way due to some complications in the parquet reader. There was change to fix this here:
// https://github.com/rapidsai/cudf/pull/7461/commits/5ce33b40abb87cc7b76b5efeb0a3a0215f9ef6fb
// but it was reverted later on here:
// https://github.com/rapidsai/cudf/pull/7461/commits/f248eb7265de995a95f998d46d897fb0ae47f53e
static ColumnWriterOptions DUMMY_CHILD = new ColumnWriterOptions("DUMMY");
public static abstract class NestedBuilder<T extends NestedBuilder, V extends ColumnWriterOptions> {
protected List<ColumnWriterOptions> children = new ArrayList<>();
protected boolean isNullable = true;
protected String name = "";
// Parquet structure needs
protected boolean hasParquetFieldId;
protected int parquetFieldId;
/**
* Builder specific to build a Struct meta
*/
protected NestedBuilder(String name, boolean isNullable) {
this.name = name;
this.isNullable = isNullable;
}
protected NestedBuilder(String name, boolean isNullable, int parquetFieldId) {
this.name = name;
this.isNullable = isNullable;
this.hasParquetFieldId = true;
this.parquetFieldId = parquetFieldId;
}
protected NestedBuilder() {}
protected ColumnWriterOptions withColumn(String name, boolean isNullable) {
return new ColumnWriterOptions(name, isNullable);
}
protected ColumnWriterOptions withColumn(String name, boolean isNullable, int parquetFieldId) {
return new ColumnWriterOptions(name, isNullable, parquetFieldId);
}
protected ColumnWriterOptions withDecimal(String name, int precision,
boolean isNullable) {
return new ColumnWriterOptions(name, false, precision, isNullable);
}
protected ColumnWriterOptions withDecimal(String name, int precision,
boolean isNullable, int parquetFieldId) {
return new ColumnWriterOptions(name, false, precision, isNullable, parquetFieldId);
}
protected ColumnWriterOptions withTimestamp(String name, boolean isInt96,
boolean isNullable) {
return new ColumnWriterOptions(name, isInt96, UNKNOWN_PRECISION, isNullable);
}
protected ColumnWriterOptions withTimestamp(String name, boolean isInt96,
boolean isNullable, int parquetFieldId) {
return new ColumnWriterOptions(name, isInt96, UNKNOWN_PRECISION, isNullable, parquetFieldId);
}
protected ColumnWriterOptions withBinary(String name, boolean isNullable) {
ColumnWriterOptions opt = listBuilder(name, isNullable)
// The name here does not matter. It will not be included in the final file
// This is just to get the metadata to line up properly for the C++ APIs
.withColumns(false, "BINARY_DATA")
.build();
opt.isBinary = true;
return opt;
}
protected ColumnWriterOptions withBinary(String name, boolean isNullable, int parquetFieldId) {
ColumnWriterOptions opt = listBuilder(name, isNullable)
// The name here does not matter. It will not be included in the final file
// This is just to get the metadata to line up properly for the C++ APIs
.withColumn(false, "BINARY_DATA", parquetFieldId)
.build();
opt.isBinary = true;
return opt;
}
/**
* Set the list column meta.
* Lists should have only one child in ColumnVector, but the metadata expects a
* LIST column to have two children and the first child to be the
* {@link ColumnWriterOptions#DUMMY_CHILD}.
* This is the current behavior in cudf and will change in future
* @return this for chaining.
*/
public T withListColumn(ListColumnWriterOptions child) {
assert (child.getChildColumnOptions().length == 2) : "Lists can only have two children";
if (child.getChildColumnOptions()[0] != DUMMY_CHILD) {
throw new IllegalArgumentException("First child in the list has to be DUMMY_CHILD");
}
if (child.getChildColumnOptions()[1].getColumnName().isEmpty()) {
throw new IllegalArgumentException("Column name can't be empty");
}
children.add(child);
return (T) this;
}
/**
* Set the map column meta.
* @return this for chaining.
*/
public T withMapColumn(ColumnWriterOptions child) {
children.add(child);
return (T) this;
}
/**
* Set a child struct meta data
* @return this for chaining.
*/
public T withStructColumn(StructColumnWriterOptions child) {
for (ColumnWriterOptions opt: child.getChildColumnOptions()) {
if (opt.getColumnName().isEmpty()) {
throw new IllegalArgumentException("Column name can't be empty");
}
}
children.add(child);
return (T) this;
}
/**
* Set column name
*/
public T withNonNullableColumns(String... names) {
withColumns(false, names);
return (T) this;
}
/**
* Set nullable column meta data
*/
public T withNullableColumns(String... names) {
withColumns(true, names);
return (T) this;
}
/**
* Set a simple child meta data
* @return this for chaining.
*/
public T withColumns(boolean nullable, String... names) {
for (String n : names) {
children.add(withColumn(n, nullable));
}
return (T) this;
}
/**
* Set a simple child meta data
* @return this for chaining.
*/
public T withColumn(boolean nullable, String name, int parquetFieldId) {
children.add(withColumn(name, nullable, parquetFieldId));
return (T) this;
}
/**
* Set a Decimal child meta data
* @return this for chaining.
*/
public T withDecimalColumn(String name, int precision, boolean nullable) {
children.add(withDecimal(name, precision, nullable));
return (T) this;
}
/**
* Set a Decimal child meta data
* @return this for chaining.
*/
public T withDecimalColumn(String name, int precision, boolean nullable, int parquetFieldId) {
children.add(withDecimal(name, precision, nullable, parquetFieldId));
return (T) this;
}
/**
* Set a Decimal child meta data
* @return this for chaining.
*/
public T withNullableDecimalColumn(String name, int precision) {
withDecimalColumn(name, precision, true);
return (T) this;
}
/**
* Set a Decimal child meta data
* @return this for chaining.
*/
public T withDecimalColumn(String name, int precision) {
withDecimalColumn(name, precision, false);
return (T) this;
}
/**
* Set a binary child meta data
* @return this for chaining.
*/
public T withBinaryColumn(String name, boolean nullable, int parquetFieldId) {
children.add(withBinary(name, nullable, parquetFieldId));
return (T) this;
}
/**
* Set a binary child meta data
* @return this for chaining.
*/
public T withBinaryColumn(String name, boolean nullable) {
children.add(withBinary(name, nullable));
return (T) this;
}
/**
* Set a timestamp child meta data
* @return this for chaining.
*/
public T withTimestampColumn(String name, boolean isInt96, boolean nullable, int parquetFieldId) {
children.add(withTimestamp(name, isInt96, nullable, parquetFieldId));
return (T) this;
}
/**
* Set a timestamp child meta data
* @return this for chaining.
*/
public T withTimestampColumn(String name, boolean isInt96, boolean nullable) {
children.add(withTimestamp(name, isInt96, nullable));
return (T) this;
}
/**
* Set a timestamp child meta data
* @return this for chaining.
*/
public T withTimestampColumn(String name, boolean isInt96) {
withTimestampColumn(name, isInt96, false);
return (T) this;
}
/**
* Set a timestamp child meta data
* @return this for chaining.
*/
public T withNullableTimestampColumn(String name, boolean isInt96) {
withTimestampColumn(name, isInt96, true);
return (T) this;
}
public abstract V build();
}
public ColumnWriterOptions(String columnName, boolean isTimestampTypeInt96,
int precision, boolean isNullable) {
this.isTimestampTypeInt96 = isTimestampTypeInt96;
this.precision = precision;
this.isNullable = isNullable;
this.columnName = columnName;
}
public ColumnWriterOptions(String columnName, boolean isTimestampTypeInt96,
int precision, boolean isNullable, int parquetFieldId) {
this(columnName, isTimestampTypeInt96, precision, isNullable);
this.hasParquetFieldId = true;
this.parquetFieldId = parquetFieldId;
}
public ColumnWriterOptions(String columnName, boolean isNullable) {
this.isTimestampTypeInt96 = false;
this.precision = UNKNOWN_PRECISION;
this.isNullable = isNullable;
this.columnName = columnName;
}
public ColumnWriterOptions(String columnName, boolean isNullable, int parquetFieldId) {
this(columnName, isNullable);
this.hasParquetFieldId = true;
this.parquetFieldId = parquetFieldId;
}
public ColumnWriterOptions(String columnName) {
this(columnName, true);
}
@FunctionalInterface
protected interface ByteArrayProducer {
boolean[] apply(ColumnWriterOptions opt);
}
@FunctionalInterface
protected interface IntArrayProducer {
int[] apply(ColumnWriterOptions opt);
}
boolean[] getFlatIsTimeTypeInt96() {
boolean[] ret = {isTimestampTypeInt96};
if (childColumnOptions.length > 0) {
return getFlatBooleans(ret, (opt) -> opt.getFlatIsTimeTypeInt96());
} else {
return ret;
}
}
protected boolean[] getFlatBooleans(boolean[] ret, ByteArrayProducer producer) {
boolean[][] childResults = new boolean[childColumnOptions.length][];
int totalChildrenFlatLength = ret.length;
for (int i = 0 ; i < childColumnOptions.length ; i++) {
ColumnWriterOptions opt = childColumnOptions[i];
childResults[i] = producer.apply(opt);
totalChildrenFlatLength += childResults[i].length;
}
boolean[] result = new boolean[totalChildrenFlatLength];
System.arraycopy(ret, 0, result, 0, ret.length);
int copiedSoFar = ret.length;
for (int i = 0 ; i < childColumnOptions.length ; i++) {
System.arraycopy(childResults[i], 0, result, copiedSoFar, childResults[i].length);
copiedSoFar += childResults[i].length;
}
return result;
}
int[] getFlatPrecision() {
int[] ret = {precision};
if (childColumnOptions.length > 0) {
return getFlatInts(ret, (opt) -> opt.getFlatPrecision());
} else {
return ret;
}
}
boolean[] getFlatHasParquetFieldId() {
boolean[] ret = {hasParquetFieldId};
if (childColumnOptions.length > 0) {
return getFlatBooleans(ret, (opt) -> opt.getFlatHasParquetFieldId());
} else {
return ret;
}
}
int[] getFlatParquetFieldId() {
int[] ret = {parquetFieldId};
if (childColumnOptions.length > 0) {
return getFlatInts(ret, (opt) -> opt.getFlatParquetFieldId());
} else {
return ret;
}
}
boolean[] getFlatIsNullable() {
boolean[] ret = {isNullable};
if (childColumnOptions.length > 0) {
return getFlatBooleans(ret, (opt) -> opt.getFlatIsNullable());
} else {
return ret;
}
}
boolean[] getFlatIsMap() {
boolean[] ret = {isMap};
if (childColumnOptions.length > 0) {
return getFlatBooleans(ret, (opt) -> opt.getFlatIsMap());
} else {
return ret;
}
}
boolean[] getFlatIsBinary() {
boolean[] ret = {isBinary};
if (childColumnOptions.length > 0) {
return getFlatBooleans(ret, (opt) -> opt.getFlatIsBinary());
} else {
return ret;
}
}
int[] getFlatNumChildren() {
int[] ret = {childColumnOptions.length};
if (childColumnOptions.length > 0) {
return getFlatInts(ret, (opt) -> opt.getFlatNumChildren());
} else {
return ret;
}
}
protected int[] getFlatInts(int[] ret, IntArrayProducer producer) {
int[][] childResults = new int[childColumnOptions.length][];
int totalChildrenFlatLength = ret.length;
for (int i = 0 ; i < childColumnOptions.length ; i++) {
ColumnWriterOptions opt = childColumnOptions[i];
childResults[i] = producer.apply(opt);
totalChildrenFlatLength += childResults[i].length;
}
int[] result = new int[totalChildrenFlatLength];
System.arraycopy(ret, 0, result, 0, ret.length);
int copiedSoFar = ret.length;
for (int i = 0 ; i < childColumnOptions.length ; i++) {
System.arraycopy(childResults[i], 0, result, copiedSoFar, childResults[i].length);
copiedSoFar += childResults[i].length;
}
return result;
}
String[] getFlatColumnNames() {
String[] ret = {columnName};
if (childColumnOptions.length > 0) {
return getFlatColumnNames(ret);
} else {
return ret;
}
}
protected String[] getFlatColumnNames(String[] ret) {
String[][] childResults = new String[childColumnOptions.length][];
int totalChildrenFlatLength = ret.length;
for (int i = 0 ; i < childColumnOptions.length ; i++) {
ColumnWriterOptions opt = childColumnOptions[i];
childResults[i] = opt.getFlatColumnNames();
totalChildrenFlatLength += childResults[i].length;
}
String[] result = new String[totalChildrenFlatLength];
System.arraycopy(ret, 0, result, 0, ret.length);
int copiedSoFar = ret.length;
for (int i = 0 ; i < childColumnOptions.length ; i++) {
System.arraycopy(childResults[i], 0, result, copiedSoFar, childResults[i].length);
copiedSoFar += childResults[i].length;
}
return result;
}
/**
* Add a Map Column to the schema.
* <p>
* Maps are List columns with a Struct named 'key_value' with a child named 'key' and a child
* named 'value'. The caller of this method doesn't need to worry about this as this method will
* take care of this without the knowledge of the caller.
*
* Note: This method always returns a nullabe column, cannot return non-nullable column.
* Do not use this, use the next function with the parameter `isNullable`.
*/
@Deprecated
public static ColumnWriterOptions mapColumn(String name, ColumnWriterOptions key,
ColumnWriterOptions value) {
StructColumnWriterOptions struct = structBuilder("key_value").build();
if (key.isNullable) {
throw new IllegalArgumentException("key column can not be nullable");
}
struct.childColumnOptions = new ColumnWriterOptions[]{key, value};
ColumnWriterOptions opt = listBuilder(name)
.withStructColumn(struct)
.build();
opt.isMap = true;
return opt;
}
/**
* Add a Map Column to the schema.
* <p>
* Maps are List columns with a Struct named 'key_value' with a child named 'key' and a child
* named 'value'. The caller of this method doesn't need to worry about this as this method will
* take care of this without the knowledge of the caller.
*
* Note: If this map column is a key of another map, should pass isNullable = false.
* e.g.: map1(map2(int, int), int) the map2 should be non-nullable.
*
* @param isNullable is the returned map nullable.
*/
public static ColumnWriterOptions mapColumn(String name, ColumnWriterOptions key,
ColumnWriterOptions value, Boolean isNullable) {
if (key.isNullable) {
throw new IllegalArgumentException("key column can not be nullable");
}
StructColumnWriterOptions struct = structBuilder("key_value").build();
struct.childColumnOptions = new ColumnWriterOptions[]{key, value};
ColumnWriterOptions opt = listBuilder(name, isNullable)
.withStructColumn(struct)
.build();
opt.isMap = true;
return opt;
}
/**
* Creates a ListBuilder for column called 'name'
*/
public static ListBuilder listBuilder(String name) {
return new ListBuilder(name, true);
}
/**
* Creates a ListBuilder for column called 'name'
*/
public static ListBuilder listBuilder(String name, boolean isNullable) {
return new ListBuilder(name, isNullable);
}
/**
* Creates a StructBuilder for column called 'name'
*/
public static StructBuilder structBuilder(String name, boolean isNullable) {
return new StructBuilder(name, isNullable);
}
/**
* Creates a StructBuilder for column called 'name'
*/
public static StructBuilder structBuilder(String name, boolean isNullable, int parquetFieldId) {
return new StructBuilder(name, isNullable, parquetFieldId);
}
/**
* Creates a StructBuilder for column called 'name'
*/
public static StructBuilder structBuilder(String name) {
return new StructBuilder(name, true);
}
/**
* Return if the column can have null values
*/
public String getColumnName() {
return columnName;
}
/**
* Return if the column can have null values
*/
public boolean isNullable() {
return isNullable;
}
/**
* Return the precision for this column
*/
public int getPrecision() {
return precision;
}
/**
* Returns true if the writer is expected to write timestamps in INT96
*/
public boolean isTimestampTypeInt96() {
return isTimestampTypeInt96;
}
/**
* Return the child columnOptions for this column
*/
public ColumnWriterOptions[] getChildColumnOptions() {
return childColumnOptions;
}
public static class StructColumnWriterOptions extends ColumnWriterOptions {
protected StructColumnWriterOptions(AbstractStructBuilder builder) {
super(builder);
}
}
public static class ListColumnWriterOptions extends ColumnWriterOptions {
protected ListColumnWriterOptions(ListBuilder builder) {
super(builder);
}
}
public static class StructBuilder extends AbstractStructBuilder<StructBuilder, StructColumnWriterOptions> {
public StructBuilder(String name, boolean isNullable) {
super(name, isNullable);
}
public StructBuilder(String name, boolean isNullable, int parquetFieldId) {
super(name, isNullable, parquetFieldId);
}
public StructColumnWriterOptions build() {
return new StructColumnWriterOptions(this);
}
}
public static class ListBuilder extends NestedBuilder<ListBuilder, ListColumnWriterOptions> {
public ListBuilder(String name, boolean isNullable) {
super(name, isNullable);
}
public ListColumnWriterOptions build() {
return new ListColumnWriterOptions(this);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CompressedMetadataWriterOptions.java
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
class CompressedMetadataWriterOptions extends WriterOptions {
private final CompressionType compressionType;
private final Map<String, String> metadata;
<T extends CMWriterBuilder> CompressedMetadataWriterOptions(T builder) {
super(builder);
compressionType = builder.compressionType;
metadata = Collections.unmodifiableMap(builder.metadata);
}
public CompressionType getCompressionType() {
return compressionType;
}
public Map<String, String> getMetadata() {
return metadata;
}
String[] getMetadataKeys() {
return metadata.keySet().toArray(new String[metadata.size()]);
}
String[] getMetadataValues() {
return metadata.values().toArray(new String[metadata.size()]);
}
protected static class CMWriterBuilder<T extends CMWriterBuilder> extends WriterBuilder<T> {
final Map<String, String> metadata = new LinkedHashMap<>();
CompressionType compressionType = CompressionType.AUTO;
/**
* Add a metadata key and a value
* @param key
* @param value
*/
public T withMetadata(String key, String value) {
this.metadata.put(key, value);
return (T) this;
}
/**
* Add a map of metadata keys and values
* @param metadata
*/
public T withMetadata(Map<String, String> metadata) {
this.metadata.putAll(metadata);
return (T) this;
}
/**
* Set the compression type to use for writing
* @param compression
*/
public T withCompressionType(CompressionType compression) {
this.compressionType = compression;
return (T) this;
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CompressionMetadataWriterOptions.java
|
/*
*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.LinkedHashMap;
import java.util.Map;
public class CompressionMetadataWriterOptions extends ColumnWriterOptions.StructColumnWriterOptions {
private final CompressionType compressionType;
private final Map<String, String> metadata;
protected CompressionMetadataWriterOptions(Builder builder) {
super(builder);
this.compressionType = builder.compressionType;
this.metadata = builder.metadata;
}
@Override
boolean[] getFlatIsTimeTypeInt96() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatIsTimeTypeInt96());
}
@Override
int[] getFlatPrecision() {
return super.getFlatInts(new int[]{}, (opt) -> opt.getFlatPrecision());
}
@Override
boolean[] getFlatHasParquetFieldId() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatHasParquetFieldId());
}
@Override
int[] getFlatParquetFieldId() {
return super.getFlatInts(new int[]{}, (opt) -> opt.getFlatParquetFieldId());
}
@Override
int[] getFlatNumChildren() {
return super.getFlatInts(new int[]{}, (opt) -> opt.getFlatNumChildren());
}
@Override
boolean[] getFlatIsNullable() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatIsNullable());
}
@Override
boolean[] getFlatIsMap() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatIsMap());
}
@Override
boolean[] getFlatIsBinary() {
return super.getFlatBooleans(new boolean[]{}, (opt) -> opt.getFlatIsBinary());
}
@Override
String[] getFlatColumnNames() {
return super.getFlatColumnNames(new String[]{});
}
String[] getMetadataKeys() {
return metadata.keySet().toArray(new String[metadata.size()]);
}
String[] getMetadataValues() {
return metadata.values().toArray(new String[metadata.size()]);
}
public CompressionType getCompressionType() {
return compressionType;
}
public Map<String, String> getMetadata() {
return metadata;
}
public int getTopLevelChildren() {
return childColumnOptions.length;
}
public abstract static class Builder<T extends Builder,
V extends CompressionMetadataWriterOptions> extends AbstractStructBuilder<T, V> {
final Map<String, String> metadata = new LinkedHashMap<>();
CompressionType compressionType = CompressionType.AUTO;
/**
* Add a metadata key and a value
*/
public T withMetadata(String key, String value) {
this.metadata.put(key, value);
return (T) this;
}
/**
* Add a map of metadata keys and values
*/
public T withMetadata(Map<String, String> metadata) {
this.metadata.putAll(metadata);
return (T) this;
}
/**
* Set the compression type to use for writing
*/
public T withCompressionType(CompressionType compression) {
this.compressionType = compression;
return (T) this;
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CompressionType.java
|
/*
*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Enumeration of compression formats.
*/
public enum CompressionType {
/** No compression */
NONE(0),
/** Automatically detect or select the compression codec */
AUTO(1),
/** Snappy format using byte-oriented LZ77 */
SNAPPY(2),
/** GZIP format using the DEFLATE algorithm */
GZIP(3),
/** BZIP2 format using Burrows-Wheeler transform */
BZIP2(4),
/** BROTLI format using LZ77 + Huffman + 2nd order context modeling */
BROTLI(5),
/** ZIP format using DEFLATE algorithm */
ZIP(6),
/** XZ format using LZMA(2) algorithm */
XZ(7),
/** ZLIB format, using DEFLATE algorithm */
ZLIB(8),
/** LZ4 format, using LZ77 */
LZ4(9),
/** Lempel–Ziv–Oberhumer format */
LZO(10),
/** Zstandard format */
ZSTD(11);
final int nativeId;
CompressionType(int nativeId) { this.nativeId = nativeId; }
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ContigSplitGroupByResult.java
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Used to save groups and uniq key table for `Table.contiguousSplitGroupsAndGenUniqKeys`
* Each row in uniq key table is corresponding to a group
* Resource management note:
* This class is the owner of `groups` and
* `uniqKeysTable`(or uniqKeyColumns if table is not constructed)
* 1: Use `closeGroups` and `closeUniqKeyTable` to close the resources separately
* if you want to close eagerly.
* 2: Or auto close them by `AutoCloseable`
* Use `releaseGroups` to release the ownership of the `groups` to the caller,
* then the caller is responsible to close the `groups`
*/
public class ContigSplitGroupByResult implements AutoCloseable {
// set by JNI cpp code
private ContiguousTable[] groups;
// set by JNI cpp code, used to construct an uniq key Table
private long[] uniqKeyColumns;
// An additional table is introduced to store the group keys,
// and each key is corresponding to a group.
private Table uniqKeysTable;
/**
* Get the key table, each row in the key table is corresponding to a group.
* Note: Close the key table by `closeUniqKeyTable`
*
* @return the key table, it could be null if invoking native method `Table.contiguousSplitGroups`
* with `genUniqKeys` as false
*/
public Table getUniqKeyTable() {
if (uniqKeysTable == null && uniqKeyColumns != null && uniqKeyColumns.length > 0) {
// new `Table` asserts uniqKeyColumns.length > 0
uniqKeysTable = new Table(uniqKeyColumns);
uniqKeyColumns = null;
}
return uniqKeysTable;
}
/**
* Close the key table or key columns
*/
public void closeUniqKeyTable() {
if (uniqKeysTable != null) {
uniqKeysTable.close();
uniqKeysTable = null;
} else if (uniqKeyColumns != null) {
for (long handle : uniqKeyColumns) {
ColumnVector.deleteCudfColumn(handle);
}
uniqKeyColumns = null;
}
}
/**
* Get the split group tables.
* Note: Close the group tables by `closeGroups`
*
* @return the split group tables
*/
public ContiguousTable[] getGroups() {
return groups;
}
/**
* Release the ownership of the `groups`
* The caller is responsible to close the returned groups.
*
* @return split group tables
*/
ContiguousTable[] releaseGroups() {
ContiguousTable[] copy = groups;
groups = null;
return copy;
}
/**
* Close the split group tables
*/
public void closeGroups() {
if (groups != null) {
for (ContiguousTable contig : groups) {
contig.close();
}
groups = null;
}
}
@Override
public void close() {
try {
closeUniqKeyTable();
} finally {
closeGroups();
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/ContiguousTable.java
|
/*
*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.nio.ByteBuffer;
/**
* A table that is backed by a single contiguous device buffer. This makes transfers of the data
* much simpler.
*/
public final class ContiguousTable implements AutoCloseable {
private Table table = null;
private DeviceMemoryBuffer buffer;
private final long rowCount;
private PackedColumnMetadata meta;
private ByteBuffer metadataBuffer;
// This method is invoked by JNI
static ContiguousTable fromPackedTable(long metadataHandle,
long dataAddress,
long dataLength,
long rmmBufferAddress,
long rowCount) {
DeviceMemoryBuffer buffer = DeviceMemoryBuffer.fromRmm(dataAddress, dataLength, rmmBufferAddress);
return new ContiguousTable(metadataHandle, buffer, rowCount);
}
/** Construct a contiguous table instance given a table and the device buffer backing it. */
ContiguousTable(Table table, DeviceMemoryBuffer buffer) {
this.meta = new PackedColumnMetadata(createPackedMetadata(table.getNativeView(),
buffer.getAddress(), buffer.getLength()));
this.table = table;
this.buffer = buffer;
this.rowCount = table.getRowCount();
}
/**
* Construct a contiguous table
* @param metadataHandle address of the cudf packed_table host-based metadata instance
* @param buffer buffer containing the packed table data
* @param rowCount number of rows in the table
*/
ContiguousTable(long metadataHandle, DeviceMemoryBuffer buffer, long rowCount) {
this.meta = new PackedColumnMetadata(metadataHandle);
this.buffer = buffer;
this.rowCount = rowCount;
}
/**
* Returns the number of rows in the table. This accessor avoids manifesting
* the Table instance if only the row count is needed.
*/
public long getRowCount() {
return rowCount;
}
/** Get the table instance, reconstructing it from the metadata if necessary. */
public synchronized Table getTable() {
if (table == null) {
table = Table.fromPackedTable(getMetadataDirectBuffer(), buffer);
}
return table;
}
/** Get the device buffer backing the contiguous table data. */
public DeviceMemoryBuffer getBuffer() {
return buffer;
}
/**
* Get the byte buffer containing the host metadata describing the schema and layout of the
* contiguous table.
* <p>
* NOTE: This is a direct byte buffer that is backed by the underlying native metadata instance
* and therefore is only valid to be used while this contiguous table instance is valid.
* Attempts to cache and access the resulting buffer after this instance has been destroyed
* will result in undefined behavior including the possibility of segmentation faults
* or data corruption.
*/
public ByteBuffer getMetadataDirectBuffer() {
return meta.getMetadataDirectBuffer();
}
/** Close the contiguous table instance and its underlying resources. */
@Override
public void close() {
if (meta != null) {
meta.close();
}
if (table != null) {
table.close();
table = null;
}
if (buffer != null) {
buffer.close();
buffer = null;
}
}
// create packed metadata for a table backed by a single data buffer
private static native long createPackedMetadata(long tableView, long dataAddress, long dataSize);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CuFile.java
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
/**
* JNI wrapper for accessing the cuFile API.
* <p>
* Using this wrapper requires GPUDirect Storage (GDS)/cuFile to be installed in the target
* environment, and the jar to be built with `USE_GDS=ON`. Otherwise it will throw an exception when
* loading.
* <p>
* The Java APIs are experimental and subject to change.
*
* @see <a href="https://docs.nvidia.com/gpudirect-storage/">GDS documentation</a>
*/
public class CuFile {
private static final Logger log = LoggerFactory.getLogger(CuFile.class);
private static boolean initialized = false;
private static CuFileDriver driver;
static {
initialize();
}
/**
* Load the native libraries needed for libcufilejni, if not loaded already; open the cuFile
* driver, and add a shutdown hook to close it.
*/
static synchronized void initialize() {
if (!initialized) {
try {
NativeDepsLoader.loadNativeDeps(new String[]{"cufilejni"});
driver = new CuFileDriver();
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
driver.close();
}));
initialized = true;
} catch (Throwable t) {
// Cannot throw an exception here as the CI/CD machine may not have GDS installed.
log.error("Could not load cuFile jni library...", t);
}
}
}
/**
* Check if the libcufilejni library is loaded.
*
* @return true if the libcufilejni library has been successfully loaded.
*/
public static boolean libraryLoaded() {
return initialized;
}
/**
* Write a device buffer to a given file path synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param path The file path to copy to.
* @param file_offset The file offset from which to write the buffer.
* @param buffer The device buffer to copy from.
*/
public static void writeDeviceBufferToFile(File path, long file_offset,
BaseDeviceMemoryBuffer buffer) {
writeDeviceMemoryToFile(path, file_offset, buffer.getAddress(), buffer.getLength());
}
/**
* Write device memory to a given file path synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param path The file path to copy to.
* @param file_offset The file offset from which to write the buffer.
* @param address The device memory address to copy from.
* @param length The length to copy.
*/
public static void writeDeviceMemoryToFile(File path, long file_offset, long address,
long length) {
writeToFile(path.getAbsolutePath(), file_offset, address, length);
}
/**
* Append a device buffer to a given file path synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param path The file path to copy to.
* @param buffer The device buffer to copy from.
* @return The file offset from which the buffer was appended.
*/
public static long appendDeviceBufferToFile(File path, BaseDeviceMemoryBuffer buffer) {
return appendDeviceMemoryToFile(path, buffer.getAddress(), buffer.getLength());
}
/**
* Append device memory to a given file path synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param path The file path to copy to.
* @param address The device memory address to copy from.
* @param length The length to copy.
* @return The file offset from which the buffer was appended.
*/
public static long appendDeviceMemoryToFile(File path, long address, long length) {
return appendToFile(path.getAbsolutePath(), address, length);
}
/**
* Read a file into a device buffer synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param buffer The device buffer to copy into.
* @param path The file path to copy from.
* @param fileOffset The file offset from which to copy the content.
*/
public static void readFileToDeviceBuffer(BaseDeviceMemoryBuffer buffer, File path,
long fileOffset) {
readFileToDeviceMemory(buffer.getAddress(), buffer.getLength(), path, fileOffset);
}
/**
* Read a file into device memory synchronously.
* <p>
* This method is NOT thread safe if the path points to the same file on disk.
*
* @param address The device memory address to read into.
* @param length The length to read.
* @param path The file path to copy from.
* @param fileOffset The file offset from which to copy the content.
*/
public static void readFileToDeviceMemory(long address, long length, File path, long fileOffset) {
readFromFile(address, length, path.getAbsolutePath(), fileOffset);
}
private static native void writeToFile(String path, long file_offset, long address, long length);
private static native long appendToFile(String path, long address, long length);
private static native void readFromFile(long address, long length, String path, long fileOffset);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CuFileBuffer.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Represents a cuFile buffer.
*/
public final class CuFileBuffer extends BaseDeviceMemoryBuffer {
private static final int ALIGNMENT = 4096;
private final DeviceMemoryBuffer deviceMemoryBuffer;
private final CuFileResourceCleaner cleaner;
static {
CuFile.initialize();
}
/**
* Construct a new cuFile buffer.
*
* @param buffer The device memory buffer used for the cuFile buffer. This buffer is owned
* by the cuFile buffer, and will be closed when the cuFile buffer is closed.
* @param registerBuffer If true, register the cuFile buffer.
*/
private CuFileBuffer(DeviceMemoryBuffer buffer, boolean registerBuffer) {
super(buffer.address, buffer.length, (MemoryBufferCleaner) null);
if (registerBuffer && !isAligned(buffer)) {
buffer.close();
throw new IllegalArgumentException(
"To register a cuFile buffer, its length must be a multiple of " + ALIGNMENT);
}
deviceMemoryBuffer = buffer;
cleaner = new CuFileResourceCleaner(create(buffer.address, buffer.length, registerBuffer), CuFileBuffer::destroy);
MemoryCleaner.register(this, cleaner);
}
/**
* Allocate memory for use with cuFile on the GPU. You must close it when done.
*
* @param bytes size in bytes to allocate
* @param registerBuffer If true, register the cuFile buffer.
* @return the buffer
*/
public static CuFileBuffer allocate(long bytes, boolean registerBuffer) {
DeviceMemoryBuffer buffer = DeviceMemoryBuffer.allocate(bytes);
return new CuFileBuffer(buffer, registerBuffer);
}
@Override
public MemoryBuffer slice(long offset, long len) {
throw new UnsupportedOperationException("Slice on cuFile buffer is not supported");
}
@Override
public void close() {
cleaner.close(this);
deviceMemoryBuffer.close();
}
long getPointer() {
return cleaner.getPointer();
}
private boolean isAligned(BaseDeviceMemoryBuffer buffer) {
return buffer.length % ALIGNMENT == 0;
}
private static native long create(long address, long length, boolean registerBuffer);
private static native void destroy(long pointer);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CuFileDriver.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Represents a cuFile driver.
*/
final class CuFileDriver implements AutoCloseable {
private final CuFileResourceCleaner cleaner;
CuFileDriver() {
cleaner = new CuFileResourceCleaner(create(), CuFileDriver::destroy);
MemoryCleaner.register(this, cleaner);
}
@Override
public void close() {
cleaner.close(this);
}
private static native long create();
private static native void destroy(long pointer);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CuFileHandle.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Represents a cuFile file handle.
*/
abstract class CuFileHandle implements AutoCloseable {
private final CuFileResourceCleaner cleaner;
static {
CuFile.initialize();
}
protected CuFileHandle(long pointer) {
cleaner = new CuFileResourceCleaner(pointer, CuFileHandle::destroy);
MemoryCleaner.register(this, cleaner);
}
@Override
public void close() {
cleaner.close(this);
}
protected long getPointer() {
return cleaner.getPointer();
}
private static native void destroy(long pointer);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CuFileReadHandle.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Represents a cuFile file handle for reading.
*/
public final class CuFileReadHandle extends CuFileHandle {
/**
* Construct a reader using the specified file path.
*
* @param path The file path for reading.
*/
public CuFileReadHandle(String path) {
super(create(path));
}
/**
* Read the file content into the specified cuFile buffer.
*
* @param buffer The cuFile buffer to store the content.
* @param fileOffset The file offset from which to read.
*/
public void read(CuFileBuffer buffer, long fileOffset) {
readIntoBuffer(getPointer(), fileOffset, buffer.getPointer());
}
private static native long create(String path);
private static native void readIntoBuffer(long file, long fileOffset, long buffer);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CuFileResourceCleaner.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Keeps track and cleans a cuFile native resource.
*/
final class CuFileResourceCleaner extends MemoryCleaner.Cleaner {
private static final Logger log = LoggerFactory.getLogger(CuFileResourceCleaner.class);
private long pointer;
private final CuFileResourceDestroyer destroyer;
private boolean closed = false;
CuFileResourceCleaner(long pointer, CuFileResourceDestroyer destroyer) {
this.pointer = pointer;
this.destroyer = destroyer;
addRef();
}
long getPointer() {
return pointer;
}
synchronized void close(Object resource) {
delRef();
if (closed) {
logRefCountDebug("double free " + resource);
throw new IllegalStateException("Close called too many times " + resource);
}
clean(false);
closed = true;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
long origAddress = pointer;
if (pointer != 0) {
try {
destroyer.destroy(pointer);
} finally {
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
pointer = 0;
}
neededCleanup = true;
}
if (neededCleanup && logErrorIfNotClean) {
log.error("A CUFile RESOURCE WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")");
logRefCountDebug("Leaked cuFile resource");
}
return neededCleanup;
}
@Override
public boolean isClean() {
return pointer == 0;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CuFileResourceDestroyer.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Destroys a cuFile native resource.
*/
interface CuFileResourceDestroyer {
void destroy(long pointer);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CuFileWriteHandle.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Represents a cuFile file handle for reading.
*/
public final class CuFileWriteHandle extends CuFileHandle {
/**
* Construct a writer using the specified file path.
*
* @param path The file path for writing.
*/
public CuFileWriteHandle(String path) {
super(create(path));
}
/**
* Write the specified cuFile buffer into the file.
*
* @param buffer The cuFile buffer to write from.
* @param length The number of bytes to write.
* @param fileOffset The starting file offset from which to write.
*/
public void write(CuFileBuffer buffer, long length, long fileOffset) {
writeFromBuffer(getPointer(), fileOffset, buffer.getPointer(), length);
}
/**
* Append the specified cuFile buffer to the file.
*
* @param buffer The cuFile buffer to append from.
* @param length The number of bytes to append.
* @return The file offset from which the buffer was appended.
*/
public long append(CuFileBuffer buffer, long length) {
return appendFromBuffer(getPointer(), buffer.getPointer(), length);
}
private static native long create(String path);
private static native void writeFromBuffer(long file, long fileOffset, long buffer, long length);
private static native long appendFromBuffer(long file, long buffer, long length);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/Cuda.java
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Cuda {
// This needs to happen first before calling any native methods.
static {
NativeDepsLoader.loadNativeDeps();
}
// Defined in driver_types.h in cuda library.
static final int CPU_DEVICE_ID = -1;
static final long CUDA_STREAM_DEFAULT = 0;
static final long CUDA_STREAM_LEGACY = 1;
static final long CUDA_STREAM_PER_THREAD = 2;
private final static long DEFAULT_STREAM_ID = isPtdsEnabled() ? CUDA_STREAM_PER_THREAD : CUDA_STREAM_LEGACY;
private static final Logger log = LoggerFactory.getLogger(Cuda.class);
private static Boolean isCompat = null;
private static class StreamCleaner extends MemoryCleaner.Cleaner {
private long stream;
StreamCleaner(long stream) {
this.stream = stream;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
long origAddress = stream;
if (stream != CUDA_STREAM_DEFAULT &&
stream != CUDA_STREAM_LEGACY &&
stream != CUDA_STREAM_PER_THREAD) {
destroyStream(stream);
stream = 0;
neededCleanup = true;
}
if (neededCleanup && logErrorIfNotClean) {
log.error("A CUDA STREAM WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")");
logRefCountDebug("Leaked stream");
}
return neededCleanup;
}
@Override
public boolean isClean() {
return stream == 0;
}
}
/** A class representing a CUDA stream */
public static final class Stream implements AutoCloseable {
private final StreamCleaner cleaner;
boolean closed = false;
private final long id;
/**
* Create a new CUDA stream
* @param isNonBlocking whether stream should be non-blocking with respect to the default stream
*/
public Stream(boolean isNonBlocking) {
this.cleaner = new StreamCleaner(createStream(isNonBlocking));
this.id = cleaner.id;
MemoryCleaner.register(this, cleaner);
cleaner.addRef();
}
private Stream() {
// No cleaner for the default stream...
this.cleaner = null;
this.id = -1;
}
private Stream(long id) {
this.cleaner = null;
this.id = id;
}
/**
* Wrap a given stream ID to make it accessible.
*/
static Stream wrap(long id) {
if (id == -1) {
return DEFAULT_STREAM;
}
return new Stream(id);
}
/**
* Have this stream not execute new work until the work recorded in event completes.
* @param event the event to wait on.
*/
public void waitOn(Event event) {
streamWaitEvent(getStream(), event.getEvent());
}
public long getStream() {
return cleaner == null ? DEFAULT_STREAM_ID : cleaner.stream;
}
/**
* Block the thread to wait until all pending work on this stream completes. Note that this
* does not follow any of the java threading standards. Interrupt will not work to wake up
* the thread.
*/
public void sync() {
streamSynchronize(getStream());
}
@Override
public String toString() {
return "CUDA STREAM (ID: " + id + " " + Long.toHexString(getStream()) + ")";
}
@Override
public synchronized void close() {
if (cleaner != null) {
cleaner.delRef();
}
if (closed) {
if (cleaner != null) {
cleaner.logRefCountDebug("double free " + this);
}
throw new IllegalStateException("Close called too many times " + this);
}
if (cleaner != null) {
cleaner.clean(false);
closed = true;
}
}
}
public static final Stream DEFAULT_STREAM = new Stream();
private static class EventCleaner extends MemoryCleaner.Cleaner {
private long event;
EventCleaner(long event) {
this.event = event;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
long origAddress = event;
if (event != 0) {
try {
destroyEvent(event);
} finally {
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
event = 0;
}
neededCleanup = true;
}
if (neededCleanup && logErrorIfNotClean) {
log.error("A CUDA EVENT WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")");
logRefCountDebug("Leaked event");
}
return neededCleanup;
}
@Override
public boolean isClean() {
return event == 0;
}
}
public static final class Event implements AutoCloseable {
private final EventCleaner cleaner;
boolean closed = false;
/**
* Create an event that is as fast as possible, timing is disabled and no blockingSync.
*/
public Event() {
this(false, false);
}
/**
* Create an event to be used for CUDA synchronization.
* @param enableTiming true if the event should record timing information.
* @param blockingSync true if event should use blocking synchronization.
* A host thread that calls sync() to wait on an event created with this
* flag will block until the event actually completes.
*/
public Event(boolean enableTiming, boolean blockingSync) {
this.cleaner = new EventCleaner(createEvent(enableTiming, blockingSync));
MemoryCleaner.register(this, cleaner);
cleaner.addRef();
}
long getEvent() {
return cleaner.event;
}
/**
* Check to see if the event has completed or not. This is the equivalent of cudaEventQuery.
* @return true it has completed else false.
*/
public boolean hasCompleted() {
return eventQuery(getEvent());
}
/**
* Captures the contents of stream at the time of this call. This event and stream must be on
* the same device. Calls such as hasCompleted() or Stream.waitEvent() will then examine or wait for
* completion of the work that was captured. Uses of stream after this call do not modify event.
* @param stream the stream to record the state of.
*/
public void record(Stream stream) {
eventRecord(getEvent(), stream.getStream());
}
/**
* Captures the contents of the default stream at the time of this call.
*/
public void record() {
record(DEFAULT_STREAM);
}
/**
* Block the thread to wait for the event to complete. Note that this does not follow any of
* the java threading standards. Interrupt will not work to wake up the thread.
*/
public void sync() {
eventSynchronize(getEvent());
}
@Override
public String toString() {
return "CUDA EVENT (ID: " + cleaner.id + " " + Long.toHexString(getEvent()) + ")";
}
@Override
public synchronized void close() {
cleaner.delRef();
if (closed) {
cleaner.logRefCountDebug("double free " + this);
throw new IllegalStateException("Close called too many times " + this);
}
cleaner.clean(false);
closed = true;
}
}
/**
* Gets the CUDA compute mode of the current device.
*
* @return the enum value of CudaComputeMode
*/
public static CudaComputeMode getComputeMode() {
return CudaComputeMode.fromNative(Cuda.getNativeComputeMode());
}
/**
* Mapping: cudaMemGetInfo(size_t *free, size_t *total)
*/
public static native CudaMemInfo memGetInfo() throws CudaException;
/**
* Allocate pinned memory on the host. This call takes a long time, but can really speed up
* memory transfers.
* @param size how much memory, in bytes, to allocate.
* @return the address to the allocated memory.
* @throws CudaException on any error.
*/
static native long hostAllocPinned(long size) throws CudaException;
/**
* Free memory allocated with hostAllocPinned.
* @param ptr the pointer returned by hostAllocPinned.
* @throws CudaException on any error.
*/
static native void freePinned(long ptr) throws CudaException;
/**
* Copies bytes between buffers using the default CUDA stream.
* The copy has completed when this returns, but the memory copy could overlap with
* operations occurring on other streams.
* Specifying pointers that do not match the copy direction results in undefined behavior.
* @param dst - Destination memory address
* @param src - Source memory address
* @param count - Size in bytes to copy
* @param kind - Type of transfer. {@link CudaMemcpyKind}
*/
static void memcpy(long dst, long src, long count, CudaMemcpyKind kind) {
memcpy(dst, src, count, kind, DEFAULT_STREAM);
}
/**
* Copies bytes between buffers using the default CUDA stream.
* The copy has not necessarily completed when this returns, but the memory copy could
* overlap with operations occurring on other streams.
* Specifying pointers that do not match the copy direction results in undefined behavior.
* @param dst - Destination memory address
* @param src - Source memory address
* @param count - Size in bytes to copy
* @param kind - Type of transfer. {@link CudaMemcpyKind}
*/
static void asyncMemcpy(long dst, long src, long count, CudaMemcpyKind kind) {
asyncMemcpy(dst, src, count, kind, DEFAULT_STREAM);
}
/**
* Sets count bytes starting at the memory area pointed to by dst, with value.
* The operation has completed when this returns, but it could overlap with operations occurring
* on other streams.
* @param dst - Destination memory address
* @param value - Byte value to set dst with
* @param count - Size in bytes to set
*/
public static native void memset(long dst, byte value, long count) throws CudaException;
/**
* Sets count bytes starting at the memory area pointed to by dst, with value.
* The operation has not necessarily completed when this returns, but it could overlap with
* operations occurring on other streams.
* @param dst - Destination memory address
* @param value - Byte value to set dst with
* @param count - Size in bytes to set
*/
public static native void asyncMemset(long dst, byte value, long count) throws CudaException;
/**
* Get the id of the current device.
* @return the id of the current device
* @throws CudaException on any error
*/
public static native int getDevice() throws CudaException;
/**
* Get the device count.
* @return returns the number of compute-capable devices
* @throws CudaException on any error
*/
public static native int getDeviceCount() throws CudaException;
/**
* Set the id of the current device.
* <p>Note this is relative to CUDA_SET_VISIBLE_DEVICES, e.g. if
* CUDA_SET_VISIBLE_DEVICES=1,0, and you call setDevice(0), you will get device 1.
* <p>Note if RMM has been initialized and the requested device ID does not
* match the device used to initialize RMM then this will throw an error.
* @throws CudaException on any error
*/
public static native void setDevice(int device) throws CudaException, CudfException;
/**
* Set the device for this thread to the appropriate one. Java loves threads, but cuda requires
* each thread to have the device set explicitly or it falls back to CUDA_VISIBLE_DEVICES. Most
* JNI calls through the cudf API will do this for you, but if you are writing your own JNI
* calls that extend cudf you might want to call this before calling into your JNI APIs to
* ensure that the device is set correctly.
* @throws CudaException on any error
*/
public static native void autoSetDevice() throws CudaException;
/**
* Get the CUDA Driver version, which is the latest version of CUDA supported by the driver.
* The version is returned as (1000 major + 10 minor). For example, CUDA 9.2 would be
* represented by 9020. If no driver is installed,then 0 is returned as the driver version.
*
* @return the CUDA driver version
* @throws CudaException on any error
*/
public static native int getDriverVersion() throws CudaException;
/**
* Get the CUDA Runtime version of the current CUDA Runtime instance. The version is returned
* as (1000 major + 10 minor). For example, CUDA 9.2 would be represented by 9020.
*
* @return the CUDA Runtime version
* @throws CudaException on any error
*/
public static native int getRuntimeVersion() throws CudaException;
/**
* Gets the CUDA device compute mode of the current device.
*
* @return the value of cudaComputeMode
* @throws CudaException on any error
*/
static native int getNativeComputeMode() throws CudaException;
/**
* Gets the major CUDA compute capability of the current device.
*
* For reference: https://developer.nvidia.com/cuda-gpus
* Hardware Generation Compute Capability
* Ampere 8.x
* Turing 7.5
* Volta 7.0, 7.2
* Pascal 6.x
* Maxwell 5.x
* Kepler 3.x
* Fermi 2.x
*
* @return The Major compute capability version number of the current CUDA device
* @throws CudaException on any error
*/
public static native int getComputeCapabilityMajor() throws CudaException;
/**
* Gets the minor CUDA compute capability of the current device.
*
* For reference: https://developer.nvidia.com/cuda-gpus
* Hardware Generation Compute Capability
* Ampere 8.x
* Turing 7.5
* Volta 7.0, 7.2
* Pascal 6.x
* Maxwell 5.x
* Kepler 3.x
* Fermi 2.x
*
* @return The Minor compute capability version number of the current CUDA device
* @throws CudaException on any error
*/
public static native int getComputeCapabilityMinor() throws CudaException;
/**
* Calls cudaFree(0). This can be used to initialize the GPU after a setDevice()
* @throws CudaException on any error
*/
public static native void freeZero() throws CudaException;
/**
* Create a CUDA stream
* @param isNonBlocking whether stream should be non-blocking with respect to the default stream
* @return handle to a CUDA stream
* @throws CudaException on any error
*/
static native long createStream(boolean isNonBlocking) throws CudaException;
/**
* Destroy a CUDA stream
* @param stream handle to the CUDA stream to destroy
* @throws CudaException on any error
*/
static native void destroyStream(long stream) throws CudaException;
/**
* Have this stream not execute new work until the work recorded in event completes.
* @param stream the stream handle.
* @param event the event handle.
*/
static native void streamWaitEvent(long stream, long event) throws CudaException;
/**
* Block the thread until the pending execution on the stream completes
* @param stream the stream handle
* @throws CudaException on any error.
*/
static native void streamSynchronize(long stream) throws CudaException;
/**
* Create a CUDA event
* @param enableTiming true if timing should be enabled.
* @param blockingSync true if blocking sync should be enabled.
* @return handle to a CUDA event
* @throws CudaException on any error
*/
static native long createEvent(boolean enableTiming, boolean blockingSync) throws CudaException;
/**
* Destroy a CUDA event
* @param event handle to the CUDA event to destroy
* @throws CudaException on any error
*/
static native void destroyEvent(long event) throws CudaException;
/**
* Check to see if the event happened or not.
* @param event the event handle
* @return true the event finished else false.
* @throws CudaException on any error.
*/
static native boolean eventQuery(long event) throws CudaException;
/**
* Reset the state of this event to be what is on the stream right now.
* @param event the event handle
* @param stream the stream handle
* @throws CudaException on any error.
*/
static native void eventRecord(long event, long stream) throws CudaException;
/**
* Block the thread until the execution recorded in the event is complete.
* @param event the event handle
* @throws CudaException on any error.
*/
static native void eventSynchronize(long event) throws CudaException;
/**
* Copies bytes between buffers using the specified CUDA stream.
* The copy has completed when this returns, but the memory copy could overlap with
* operations occurring on other streams.
* Specifying pointers that do not match the copy direction results in undefined behavior.
* @param dst destination memory address
* @param src source memory address
* @param count size in bytes to copy
* @param kind direction of transfer. {@link CudaMemcpyKind}
* @param stream CUDA stream to use for the copy
*/
static void memcpy(long dst, long src, long count, CudaMemcpyKind kind, Stream stream) {
memcpyOnStream(dst, src, count, kind.getValue(), stream.getStream());
}
private static native void memcpyOnStream(long dst, long src, long count, int kind,
long stream) throws CudaException;
/**
* Copies bytes between buffers using the specified CUDA stream.
* The copy has not necessarily completed when this returns, but the memory copy could
* overlap with operations occurring on other streams.
* Specifying pointers that do not match the copy direction results in undefined behavior.
* @param dst destination memory address
* @param src source memory address
* @param count size in bytes to copy
* @param kind direction of transfer. {@link CudaMemcpyKind}
* @param stream CUDA stream to use for the copy
*/
static void asyncMemcpy(long dst, long src, long count, CudaMemcpyKind kind, Stream stream) {
asyncMemcpyOnStream(dst, src, count, kind.getValue(), stream.getStream());
}
private static native void asyncMemcpyOnStream(long dst, long src, long count, int kind,
long stream) throws CudaException;
/**
* This should only be used for tests, to enable or disable tests if the current environment
* is not compatible with this version of the library. Currently it only does some very
* basic checks, but these may be expanded in the future depending on needs.
* @return true if it is compatible else false.
*/
public static synchronized boolean isEnvCompatibleForTesting() {
if (isCompat == null) {
if (NativeDepsLoader.libraryLoaded()) {
try {
int device = getDevice();
if (device >= 0) {
isCompat = true;
return isCompat;
}
} catch (Throwable e) {
log.error("Error trying to detect device", e);
}
}
isCompat = false;
}
return isCompat;
}
/**
* Whether per-thread default stream is enabled.
*/
public static native boolean isPtdsEnabled();
/**
* Copy data from multiple device buffer sources to multiple device buffer destinations.
* For each buffer to copy there is a corresponding entry in the destination address, source
* address, and copy size vectors.
* @param destAddrs vector of device destination addresses
* @param srcAddrs vector of device source addresses
* @param copySizes vector of copy sizes
* @param stream CUDA stream to use for the copy
*/
public static void multiBufferCopyAsync(long [] destAddrs,
long [] srcAddrs,
long [] copySizes,
Stream stream) {
// Temporary sub-par stand-in for a multi-buffer copy CUDA kernel
assert(destAddrs.length == srcAddrs.length);
assert(copySizes.length == destAddrs.length);
try (NvtxRange copyRange = new NvtxRange("multiBufferCopyAsync", NvtxColor.CYAN)){
for (int i = 0; i < destAddrs.length; i++) {
asyncMemcpy(destAddrs[i], srcAddrs[i], copySizes[i], CudaMemcpyKind.DEVICE_TO_DEVICE, stream);
}
}
}
/**
* Begins an Nsight profiling session, if a profiler is currently attached.
* @note if a profiler session has a already started, `profilerStart` has
* no effect.
*/
public static native void profilerStart();
/**
* Stops an active Nsight profiling session.
* @note if a profiler session isn't active, `profilerStop` has
* no effect.
*/
public static native void profilerStop();
/**
* Synchronizes the whole device using cudaDeviceSynchronize.
* @note this is very expensive and should almost never be used
*/
public static native void deviceSynchronize();
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CudaComputeMode.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* This is the Java mapping of CUDA device compute modes.
*/
public enum CudaComputeMode {
/**
* Default compute mode
* Multiple threads can use cudaSetDevice() with this device.
*/
DEFAULT(0),
/**
* Compute-exclusive-thread mode
* Only one thread in one process will be able to use cudaSetDevice() with this device.
*
* WARNING: This mode was deprecated! Using EXCLUSIVE_PROCESS instead.
*/
EXCLUSIVE(1),
/**
* Compute-prohibited mode
* No threads can use cudaSetDevice() with this device.
*/
PROHIBITED(2),
/**
* Compute-exclusive-process mode
* Many threads in one process will be able to use cudaSetDevice() with this device.
*/
EXCLUSIVE_PROCESS(3);
private CudaComputeMode(int nativeId) {
this.nativeId = nativeId;
}
static CudaComputeMode fromNative(int nativeId) {
for (CudaComputeMode mode : COMPUTE_MODES) {
if (mode.nativeId == nativeId) return mode;
}
throw new IllegalArgumentException("Could not translate " + nativeId + " into a CudaComputeMode");
}
// mapping to the value of native mode
final int nativeId;
private static final CudaComputeMode[] COMPUTE_MODES = CudaComputeMode.values();
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CudaException.java
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import java.util.HashMap;
import java.util.Map;
/**
* Exception from the cuda language/library. Be aware that because of how cuda does asynchronous
* processing exceptions from cuda can be thrown by method calls that did not cause the exception
* to take place. These will take place on the same thread that caused the error.
* <p>
* Please See
* <a href="https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__ERROR.html">the cuda docs</a>
* for more details on how this works.
* <p>
* In general you can recover from cuda errors even in async calls if you make sure that you
* don't switch between threads for different parts of processing that can be retried as a chunk.
*/
public class CudaException extends RuntimeException {
CudaException(String message, int errorCode) {
this(message, "No native stacktrace is available.", errorCode);
}
CudaException(String message, String nativeStacktrace, int errorCode) {
super(message);
this.nativeStacktrace = nativeStacktrace;
cudaError = CudaError.parseErrorCode(errorCode);
}
CudaException(String message, String nativeStacktrace, int errorCode, Throwable cause) {
super(message, cause);
this.nativeStacktrace = nativeStacktrace;
cudaError = CudaError.parseErrorCode(errorCode);
}
public String getNativeStacktrace() {
return nativeStacktrace;
}
public CudaError getCudaError() {
return cudaError;
}
private final String nativeStacktrace;
private final CudaError cudaError;
/**
* The Java mirror of cudaError, which facilities the tracking of CUDA errors in JVM.
*/
public enum CudaError {
UnknownNativeError(-1), // native CUDA error type which Java doesn't have a representation
cudaErrorInvalidValue(1),
cudaErrorMemoryAllocation(2),
cudaErrorInitializationError(3),
cudaErrorCudartUnloading(4),
cudaErrorProfilerDisabled(5),
cudaErrorProfilerNotInitialized(6),
cudaErrorProfilerAlreadyStarted(7),
cudaErrorProfilerAlreadyStopped(8),
cudaErrorInvalidConfiguration(9),
cudaErrorInvalidPitchValue(12),
cudaErrorInvalidSymbol(13),
cudaErrorInvalidHostPointer(16),
cudaErrorInvalidDevicePointer(17),
cudaErrorInvalidTexture(18),
cudaErrorInvalidTextureBinding(19),
cudaErrorInvalidChannelDescriptor(20),
cudaErrorInvalidMemcpyDirection(21),
cudaErrorAddressOfConstant(22),
cudaErrorTextureFetchFailed(23),
cudaErrorTextureNotBound(24),
cudaErrorSynchronizationError(25),
cudaErrorInvalidFilterSetting(26),
cudaErrorInvalidNormSetting(27),
cudaErrorMixedDeviceExecution(28),
cudaErrorNotYetImplemented(31),
cudaErrorMemoryValueTooLarge(32),
cudaErrorStubLibrary(34),
cudaErrorInsufficientDriver(35),
cudaErrorCallRequiresNewerDriver(36),
cudaErrorInvalidSurface(37),
cudaErrorDuplicateVariableName(43),
cudaErrorDuplicateTextureName(44),
cudaErrorDuplicateSurfaceName(45),
cudaErrorDevicesUnavailable(46),
cudaErrorIncompatibleDriverContext(49),
cudaErrorMissingConfiguration(52),
cudaErrorPriorLaunchFailure(53),
cudaErrorLaunchMaxDepthExceeded(65),
cudaErrorLaunchFileScopedTex(66),
cudaErrorLaunchFileScopedSurf(67),
cudaErrorSyncDepthExceeded(68),
cudaErrorLaunchPendingCountExceeded(69),
cudaErrorInvalidDeviceFunction(98),
cudaErrorNoDevice(100),
cudaErrorInvalidDevice(101),
cudaErrorDeviceNotLicensed(102),
cudaErrorSoftwareValidityNotEstablished(103),
cudaErrorStartupFailure(127),
cudaErrorInvalidKernelImage(200),
cudaErrorDeviceUninitialized(201),
cudaErrorMapBufferObjectFailed(205),
cudaErrorUnmapBufferObjectFailed(206),
cudaErrorArrayIsMapped(207),
cudaErrorAlreadyMapped(208),
cudaErrorNoKernelImageForDevice(209),
cudaErrorAlreadyAcquired(210),
cudaErrorNotMapped(211),
cudaErrorNotMappedAsArray(212),
cudaErrorNotMappedAsPointer(213),
cudaErrorECCUncorrectable(214),
cudaErrorUnsupportedLimit(215),
cudaErrorDeviceAlreadyInUse(216),
cudaErrorPeerAccessUnsupported(217),
cudaErrorInvalidPtx(218),
cudaErrorInvalidGraphicsContext(219),
cudaErrorNvlinkUncorrectable(220),
cudaErrorJitCompilerNotFound(221),
cudaErrorUnsupportedPtxVersion(222),
cudaErrorJitCompilationDisabled(223),
cudaErrorUnsupportedExecAffinity(224),
cudaErrorInvalidSource(300),
cudaErrorFileNotFound(301),
cudaErrorSharedObjectSymbolNotFound(302),
cudaErrorSharedObjectInitFailed(303),
cudaErrorOperatingSystem(304),
cudaErrorInvalidResourceHandle(400),
cudaErrorIllegalState(401),
cudaErrorSymbolNotFound(500),
cudaErrorNotReady(600),
cudaErrorIllegalAddress(700),
cudaErrorLaunchOutOfResources(701),
cudaErrorLaunchTimeout(702),
cudaErrorLaunchIncompatibleTexturing(703),
cudaErrorPeerAccessAlreadyEnabled(704),
cudaErrorPeerAccessNotEnabled(705),
cudaErrorSetOnActiveProcess(708),
cudaErrorContextIsDestroyed(709),
cudaErrorAssert(710),
cudaErrorTooManyPeers(711),
cudaErrorHostMemoryAlreadyRegistered(712),
cudaErrorHostMemoryNotRegistered(713),
cudaErrorHardwareStackError(714),
cudaErrorIllegalInstruction(715),
cudaErrorMisalignedAddress(716),
cudaErrorInvalidAddressSpace(717),
cudaErrorInvalidPc(718),
cudaErrorLaunchFailure(719),
cudaErrorCooperativeLaunchTooLarge(720),
cudaErrorNotPermitted(800),
cudaErrorNotSupported(801),
cudaErrorSystemNotReady(802),
cudaErrorSystemDriverMismatch(803),
cudaErrorCompatNotSupportedOnDevice(804),
cudaErrorMpsConnectionFailed(805),
cudaErrorMpsRpcFailure(806),
cudaErrorMpsServerNotReady(807),
cudaErrorMpsMaxClientsReached(808),
cudaErrorMpsMaxConnectionsReached(809),
cudaErrorStreamCaptureUnsupported(900),
cudaErrorStreamCaptureInvalidated(901),
cudaErrorStreamCaptureMerge(902),
cudaErrorStreamCaptureUnmatched(903),
cudaErrorStreamCaptureUnjoined(904),
cudaErrorStreamCaptureIsolation(905),
cudaErrorStreamCaptureImplicit(906),
cudaErrorCapturedEvent(907),
cudaErrorStreamCaptureWrongThread(908),
cudaErrorTimeout(909),
cudaErrorGraphExecUpdateFailure(910),
cudaErrorExternalDevice(911),
cudaErrorUnknown(999),
cudaErrorApiFailureBase(10000);
final int code;
private static Map<Integer, CudaError> codeToError = new HashMap<Integer, CudaError>(){{
put(cudaErrorInvalidValue.code, cudaErrorInvalidValue);
put(cudaErrorMemoryAllocation.code, cudaErrorMemoryAllocation);
put(cudaErrorInitializationError.code, cudaErrorInitializationError);
put(cudaErrorCudartUnloading.code, cudaErrorCudartUnloading);
put(cudaErrorProfilerDisabled.code, cudaErrorProfilerDisabled);
put(cudaErrorProfilerNotInitialized.code, cudaErrorProfilerNotInitialized);
put(cudaErrorProfilerAlreadyStarted.code, cudaErrorProfilerAlreadyStarted);
put(cudaErrorProfilerAlreadyStopped.code, cudaErrorProfilerAlreadyStopped);
put(cudaErrorInvalidConfiguration.code, cudaErrorInvalidConfiguration);
put(cudaErrorInvalidPitchValue.code, cudaErrorInvalidPitchValue);
put(cudaErrorInvalidSymbol.code, cudaErrorInvalidSymbol);
put(cudaErrorInvalidHostPointer.code, cudaErrorInvalidHostPointer);
put(cudaErrorInvalidDevicePointer.code, cudaErrorInvalidDevicePointer);
put(cudaErrorInvalidTexture.code, cudaErrorInvalidTexture);
put(cudaErrorInvalidTextureBinding.code, cudaErrorInvalidTextureBinding);
put(cudaErrorInvalidChannelDescriptor.code, cudaErrorInvalidChannelDescriptor);
put(cudaErrorInvalidMemcpyDirection.code, cudaErrorInvalidMemcpyDirection);
put(cudaErrorAddressOfConstant.code, cudaErrorAddressOfConstant);
put(cudaErrorTextureFetchFailed.code, cudaErrorTextureFetchFailed);
put(cudaErrorTextureNotBound.code, cudaErrorTextureNotBound);
put(cudaErrorSynchronizationError.code, cudaErrorSynchronizationError);
put(cudaErrorInvalidFilterSetting.code, cudaErrorInvalidFilterSetting);
put(cudaErrorInvalidNormSetting.code, cudaErrorInvalidNormSetting);
put(cudaErrorMixedDeviceExecution.code, cudaErrorMixedDeviceExecution);
put(cudaErrorNotYetImplemented.code, cudaErrorNotYetImplemented);
put(cudaErrorMemoryValueTooLarge.code, cudaErrorMemoryValueTooLarge);
put(cudaErrorStubLibrary.code, cudaErrorStubLibrary);
put(cudaErrorInsufficientDriver.code, cudaErrorInsufficientDriver);
put(cudaErrorCallRequiresNewerDriver.code, cudaErrorCallRequiresNewerDriver);
put(cudaErrorInvalidSurface.code, cudaErrorInvalidSurface);
put(cudaErrorDuplicateVariableName.code, cudaErrorDuplicateVariableName);
put(cudaErrorDuplicateTextureName.code, cudaErrorDuplicateTextureName);
put(cudaErrorDuplicateSurfaceName.code, cudaErrorDuplicateSurfaceName);
put(cudaErrorDevicesUnavailable.code, cudaErrorDevicesUnavailable);
put(cudaErrorIncompatibleDriverContext.code, cudaErrorIncompatibleDriverContext);
put(cudaErrorMissingConfiguration.code, cudaErrorMissingConfiguration);
put(cudaErrorPriorLaunchFailure.code, cudaErrorPriorLaunchFailure);
put(cudaErrorLaunchMaxDepthExceeded.code, cudaErrorLaunchMaxDepthExceeded);
put(cudaErrorLaunchFileScopedTex.code, cudaErrorLaunchFileScopedTex);
put(cudaErrorLaunchFileScopedSurf.code, cudaErrorLaunchFileScopedSurf);
put(cudaErrorSyncDepthExceeded.code, cudaErrorSyncDepthExceeded);
put(cudaErrorLaunchPendingCountExceeded.code, cudaErrorLaunchPendingCountExceeded);
put(cudaErrorInvalidDeviceFunction.code, cudaErrorInvalidDeviceFunction);
put(cudaErrorNoDevice.code, cudaErrorNoDevice);
put(cudaErrorInvalidDevice.code, cudaErrorInvalidDevice);
put(cudaErrorDeviceNotLicensed.code, cudaErrorDeviceNotLicensed);
put(cudaErrorSoftwareValidityNotEstablished.code, cudaErrorSoftwareValidityNotEstablished);
put(cudaErrorStartupFailure.code, cudaErrorStartupFailure);
put(cudaErrorInvalidKernelImage.code, cudaErrorInvalidKernelImage);
put(cudaErrorDeviceUninitialized.code, cudaErrorDeviceUninitialized);
put(cudaErrorMapBufferObjectFailed.code, cudaErrorMapBufferObjectFailed);
put(cudaErrorUnmapBufferObjectFailed.code, cudaErrorUnmapBufferObjectFailed);
put(cudaErrorArrayIsMapped.code, cudaErrorArrayIsMapped);
put(cudaErrorAlreadyMapped.code, cudaErrorAlreadyMapped);
put(cudaErrorNoKernelImageForDevice.code, cudaErrorNoKernelImageForDevice);
put(cudaErrorAlreadyAcquired.code, cudaErrorAlreadyAcquired);
put(cudaErrorNotMapped.code, cudaErrorNotMapped);
put(cudaErrorNotMappedAsArray.code, cudaErrorNotMappedAsArray);
put(cudaErrorNotMappedAsPointer.code, cudaErrorNotMappedAsPointer);
put(cudaErrorECCUncorrectable.code, cudaErrorECCUncorrectable);
put(cudaErrorUnsupportedLimit.code, cudaErrorUnsupportedLimit);
put(cudaErrorDeviceAlreadyInUse.code, cudaErrorDeviceAlreadyInUse);
put(cudaErrorPeerAccessUnsupported.code, cudaErrorPeerAccessUnsupported);
put(cudaErrorInvalidPtx.code, cudaErrorInvalidPtx);
put(cudaErrorInvalidGraphicsContext.code, cudaErrorInvalidGraphicsContext);
put(cudaErrorNvlinkUncorrectable.code, cudaErrorNvlinkUncorrectable);
put(cudaErrorJitCompilerNotFound.code, cudaErrorJitCompilerNotFound);
put(cudaErrorUnsupportedPtxVersion.code, cudaErrorUnsupportedPtxVersion);
put(cudaErrorJitCompilationDisabled.code, cudaErrorJitCompilationDisabled);
put(cudaErrorUnsupportedExecAffinity.code, cudaErrorUnsupportedExecAffinity);
put(cudaErrorInvalidSource.code, cudaErrorInvalidSource);
put(cudaErrorFileNotFound.code, cudaErrorFileNotFound);
put(cudaErrorSharedObjectSymbolNotFound.code, cudaErrorSharedObjectSymbolNotFound);
put(cudaErrorSharedObjectInitFailed.code, cudaErrorSharedObjectInitFailed);
put(cudaErrorOperatingSystem.code, cudaErrorOperatingSystem);
put(cudaErrorInvalidResourceHandle.code, cudaErrorInvalidResourceHandle);
put(cudaErrorIllegalState.code, cudaErrorIllegalState);
put(cudaErrorSymbolNotFound.code, cudaErrorSymbolNotFound);
put(cudaErrorNotReady.code, cudaErrorNotReady);
put(cudaErrorIllegalAddress.code, cudaErrorIllegalAddress);
put(cudaErrorLaunchOutOfResources.code, cudaErrorLaunchOutOfResources);
put(cudaErrorLaunchTimeout.code, cudaErrorLaunchTimeout);
put(cudaErrorLaunchIncompatibleTexturing.code, cudaErrorLaunchIncompatibleTexturing);
put(cudaErrorPeerAccessAlreadyEnabled.code, cudaErrorPeerAccessAlreadyEnabled);
put(cudaErrorPeerAccessNotEnabled.code, cudaErrorPeerAccessNotEnabled);
put(cudaErrorSetOnActiveProcess.code, cudaErrorSetOnActiveProcess);
put(cudaErrorContextIsDestroyed.code, cudaErrorContextIsDestroyed);
put(cudaErrorAssert.code, cudaErrorAssert);
put(cudaErrorTooManyPeers.code, cudaErrorTooManyPeers);
put(cudaErrorHostMemoryAlreadyRegistered.code, cudaErrorHostMemoryAlreadyRegistered);
put(cudaErrorHostMemoryNotRegistered.code, cudaErrorHostMemoryNotRegistered);
put(cudaErrorHardwareStackError.code, cudaErrorHardwareStackError);
put(cudaErrorIllegalInstruction.code, cudaErrorIllegalInstruction);
put(cudaErrorMisalignedAddress.code, cudaErrorMisalignedAddress);
put(cudaErrorInvalidAddressSpace.code, cudaErrorInvalidAddressSpace);
put(cudaErrorInvalidPc.code, cudaErrorInvalidPc);
put(cudaErrorLaunchFailure.code, cudaErrorLaunchFailure);
put(cudaErrorCooperativeLaunchTooLarge.code, cudaErrorCooperativeLaunchTooLarge);
put(cudaErrorNotPermitted.code, cudaErrorNotPermitted);
put(cudaErrorNotSupported.code, cudaErrorNotSupported);
put(cudaErrorSystemNotReady.code, cudaErrorSystemNotReady);
put(cudaErrorSystemDriverMismatch.code, cudaErrorSystemDriverMismatch);
put(cudaErrorCompatNotSupportedOnDevice.code, cudaErrorCompatNotSupportedOnDevice);
put(cudaErrorMpsConnectionFailed.code, cudaErrorMpsConnectionFailed);
put(cudaErrorMpsRpcFailure.code, cudaErrorMpsRpcFailure);
put(cudaErrorMpsServerNotReady.code, cudaErrorMpsServerNotReady);
put(cudaErrorMpsMaxClientsReached.code, cudaErrorMpsMaxClientsReached);
put(cudaErrorMpsMaxConnectionsReached.code, cudaErrorMpsMaxConnectionsReached);
put(cudaErrorStreamCaptureUnsupported.code, cudaErrorStreamCaptureUnsupported);
put(cudaErrorStreamCaptureInvalidated.code, cudaErrorStreamCaptureInvalidated);
put(cudaErrorStreamCaptureMerge.code, cudaErrorStreamCaptureMerge);
put(cudaErrorStreamCaptureUnmatched.code, cudaErrorStreamCaptureUnmatched);
put(cudaErrorStreamCaptureUnjoined.code, cudaErrorStreamCaptureUnjoined);
put(cudaErrorStreamCaptureIsolation.code, cudaErrorStreamCaptureIsolation);
put(cudaErrorStreamCaptureImplicit.code, cudaErrorStreamCaptureImplicit);
put(cudaErrorCapturedEvent.code, cudaErrorCapturedEvent);
put(cudaErrorStreamCaptureWrongThread.code, cudaErrorStreamCaptureWrongThread);
put(cudaErrorTimeout.code, cudaErrorTimeout);
put(cudaErrorGraphExecUpdateFailure.code, cudaErrorGraphExecUpdateFailure);
put(cudaErrorExternalDevice.code, cudaErrorExternalDevice);
put(cudaErrorUnknown.code, cudaErrorUnknown);
put(cudaErrorApiFailureBase.code, cudaErrorApiFailureBase);
}};
CudaError(int errorCode) {
this.code = errorCode;
}
public static CudaError parseErrorCode(int errorCode) {
if (!codeToError.containsKey(errorCode)) {
return UnknownNativeError;
}
return codeToError.get(errorCode);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CudaFatalException.java
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* CudaFatalException is a kind of CudaException which leaves the process in an inconsistent state
* and any further CUDA work will return the same error.
* To continue using CUDA, the process must be terminated and relaunched.
*/
public class CudaFatalException extends CudaException {
CudaFatalException(String message, int errorCode) {
this(message, "No native stacktrace is available.", errorCode);
}
CudaFatalException(String message, String nativeStacktrace, int errorCode) {
super(message, nativeStacktrace, errorCode);
}
CudaFatalException(String message, String nativeStacktrace, int errorCode, Throwable cause) {
super(message, nativeStacktrace, errorCode, cause);
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CudaMemInfo.java
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Represent free and total device memory.
*/
public class CudaMemInfo {
/**
* free memory in bytes
*/
public final long free;
/**
* total memory in bytes
*/
public final long total;
CudaMemInfo(long free, long total) {
this.free = free;
this.total = total;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CudaMemcpyKind.java
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
enum CudaMemcpyKind {
HOST_TO_HOST(0), /*< Host -> Host */
HOST_TO_DEVICE(1), /*< Host -> Device */
DEVICE_TO_HOST(2), /*< Device -> Host */
DEVICE_TO_DEVICE(3), /*< Device -> Device */
DEFAULT(4); /*< Direction of the transfer is inferred from the pointer values. Requires
unified virtual addressing */
private final int value;
CudaMemcpyKind(int value) {
this.value = value;
}
int getValue() {
return value;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CudaMemoryBuffer.java
|
/*
*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class represents data allocated using `cudaMalloc` directly instead of the default RMM
* memory resource. Closing this object will effectively release the memory held by the buffer.
* Note that because of reference counting if a buffer is sliced it may not actually result in the
* memory being released.
*/
public class CudaMemoryBuffer extends BaseDeviceMemoryBuffer {
private static final Logger log = LoggerFactory.getLogger(CudaMemoryBuffer.class);
private static final class CudaBufferCleaner extends MemoryBufferCleaner {
private long address;
private long lengthInBytes;
private Cuda.Stream stream;
CudaBufferCleaner(long address, long lengthInBytes, Cuda.Stream stream) {
this.address = address;
this.lengthInBytes = lengthInBytes;
this.stream = stream;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
long origAddress = address;
if (address != 0) {
long s = stream == null ? 0 : stream.getStream();
try {
Rmm.freeCuda(address, lengthInBytes, s);
} finally {
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
address = 0;
lengthInBytes = 0;
stream = null;
}
neededCleanup = true;
}
if (neededCleanup && logErrorIfNotClean) {
log.error("A CUDA BUFFER WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")");
logRefCountDebug("Leaked device buffer");
}
return neededCleanup;
}
@Override
public boolean isClean() {
return address == 0;
}
}
/**
* Wrap an existing CUDA allocation in a device memory buffer. The CUDA allocation will be freed
* when the resulting device memory buffer instance frees its memory resource (i.e.: when its
* reference count goes to zero).
* @param address device address of the CUDA memory allocation
* @param lengthInBytes length of the CUDA allocation in bytes
* @param stream CUDA stream to use for synchronization when freeing the allocation
*/
public CudaMemoryBuffer(long address, long lengthInBytes, Cuda.Stream stream) {
super(address, lengthInBytes, new CudaBufferCleaner(address, lengthInBytes, stream));
}
private CudaMemoryBuffer(long address, long lengthInBytes, CudaMemoryBuffer parent) {
super(address, lengthInBytes, parent);
}
/**
* Allocate memory for use on the GPU. You must close it when done.
* @param bytes size in bytes to allocate
* @return the buffer
*/
public static CudaMemoryBuffer allocate(long bytes) {
return allocate(bytes, Cuda.DEFAULT_STREAM);
}
/**
* Allocate memory for use on the GPU. You must close it when done.
* @param bytes size in bytes to allocate
* @param stream The stream in which to synchronize this command
* @return the buffer
*/
public static CudaMemoryBuffer allocate(long bytes, Cuda.Stream stream) {
return Rmm.allocCuda(bytes, stream);
}
/**
* Slice off a part of the device buffer. Note that this is a zero copy operation and all
* slices must be closed along with the original buffer before the memory is released to RMM.
* So use this with some caution.
* @param offset where to start the slice at.
* @param len how many bytes to slice
* @return a device buffer that will need to be closed independently from this buffer.
*/
@Override
public synchronized final CudaMemoryBuffer slice(long offset, long len) {
addressOutOfBoundsCheck(address + offset, len, "slice");
incRefCount();
return new CudaMemoryBuffer(getAddress() + offset, len, this);
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/Cudf.java
|
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
public class Cudf {
static {
NativeDepsLoader.loadNativeDeps();
}
/**
* cuDF copies that are smaller than the threshold will use a kernel to copy, instead
* of cudaMemcpyAsync.
*/
public static native void setKernelPinnedCopyThreshold(long kernelPinnedCopyThreshold);
/**
* cudf allocations that are smaller than the threshold will use the pinned host
* memory resource.
*/
public static native void setPinnedAllocationThreshold(long pinnedAllocationThreshold);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CudfColumnSizeOverflowException.java
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Exception thrown when CUDF operation results in a column size
* exceeding CUDF column size limits
*/
public class CudfColumnSizeOverflowException extends CudfException {
CudfColumnSizeOverflowException(String message) {
super(message);
}
CudfColumnSizeOverflowException(String message, String nativeStacktrace) {
super(message, nativeStacktrace);
}
CudfColumnSizeOverflowException(String message, String nativeStacktrace, Throwable cause) {
super(message, nativeStacktrace, cause);
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/CudfException.java
|
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* Exception thrown by cudf itself.
*/
public class CudfException extends RuntimeException {
CudfException(String message) {
this(message, "No native stacktrace is available.");
}
CudfException(String message, String nativeStacktrace) {
super(message);
this.nativeStacktrace = nativeStacktrace;
}
CudfException(String message, String nativeStacktrace, Throwable cause) {
super(message, cause);
this.nativeStacktrace = nativeStacktrace;
}
public final String getNativeStacktrace() {
return nativeStacktrace;
}
private final String nativeStacktrace;
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DType.java
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import java.math.BigDecimal;
import java.util.EnumSet;
import java.util.Objects;
public final class DType {
public static final int DECIMAL32_MAX_PRECISION = 9;
public static final int DECIMAL64_MAX_PRECISION = 18;
public static final int DECIMAL128_MAX_PRECISION = 38;
/* enum representing various types. Whenever a new non-decimal type is added please make sure
below sections are updated as well:
1. Create a singleton object of the new type.
2. Update SINGLETON_DTYPE_LOOKUP to reflect new type. The order should be maintained between
DTypeEnum and SINGLETON_DTYPE_LOOKUP */
public enum DTypeEnum {
EMPTY(0, 0),
INT8(1, 1),
INT16(2, 2),
INT32(4, 3),
INT64(8, 4),
UINT8(1, 5),
UINT16(2, 6),
UINT32(4, 7),
UINT64(8, 8),
FLOAT32(4, 9),
FLOAT64(8, 10),
/**
* Byte wise true non-0/false 0. In general true will be 1.
*/
BOOL8(1, 11),
/**
* Days since the UNIX epoch
*/
TIMESTAMP_DAYS(4, 12),
/**
* s since the UNIX epoch
*/
TIMESTAMP_SECONDS(8, 13),
/**
* ms since the UNIX epoch
*/
TIMESTAMP_MILLISECONDS(8, 14),
/**
* microseconds since the UNIX epoch
*/
TIMESTAMP_MICROSECONDS(8, 15),
/**
* ns since the UNIX epoch
*/
TIMESTAMP_NANOSECONDS(8, 16),
DURATION_DAYS(4, 17),
DURATION_SECONDS(8, 18),
DURATION_MILLISECONDS(8, 19),
DURATION_MICROSECONDS(8, 20),
DURATION_NANOSECONDS(8, 21),
//DICTIONARY32(4, 22),
STRING(0, 23),
LIST(0, 24),
DECIMAL32(4, 25),
DECIMAL64(8, 26),
DECIMAL128(16, 27),
STRUCT(0, 28);
final int sizeInBytes;
final int nativeId;
DTypeEnum(int sizeInBytes, int nativeId) {
this.sizeInBytes = sizeInBytes;
this.nativeId = nativeId;
}
public int getNativeId() { return nativeId; }
public boolean isDecimalType() { return DType.DECIMALS.contains(this); }
}
final DTypeEnum typeId;
private final int scale;
private DType(DTypeEnum id) {
typeId = id;
scale = 0;
}
/**
* Constructor for Decimal Type
* @param id Enum representing data type.
* @param decimalScale Scale of fixed point decimal type
*/
private DType(DTypeEnum id, int decimalScale) {
typeId = id;
scale = decimalScale;
}
public static final DType EMPTY = new DType(DTypeEnum.EMPTY);
public static final DType INT8 = new DType(DTypeEnum.INT8);
public static final DType INT16 = new DType(DTypeEnum.INT16);
public static final DType INT32 = new DType(DTypeEnum.INT32);
public static final DType INT64 = new DType(DTypeEnum.INT64);
public static final DType UINT8 = new DType(DTypeEnum.UINT8);
public static final DType UINT16 = new DType(DTypeEnum.UINT16);
public static final DType UINT32 = new DType(DTypeEnum.UINT32);
public static final DType UINT64 = new DType(DTypeEnum.UINT64);
public static final DType FLOAT32 = new DType(DTypeEnum.FLOAT32);
public static final DType FLOAT64 = new DType(DTypeEnum.FLOAT64);
public static final DType BOOL8 = new DType(DTypeEnum.BOOL8);
public static final DType TIMESTAMP_DAYS = new DType(DTypeEnum.TIMESTAMP_DAYS);
public static final DType TIMESTAMP_SECONDS = new DType(DTypeEnum.TIMESTAMP_SECONDS);
public static final DType TIMESTAMP_MILLISECONDS = new DType(DTypeEnum.TIMESTAMP_MILLISECONDS);
public static final DType TIMESTAMP_MICROSECONDS = new DType(DTypeEnum.TIMESTAMP_MICROSECONDS);
public static final DType TIMESTAMP_NANOSECONDS = new DType(DTypeEnum.TIMESTAMP_NANOSECONDS);
public static final DType DURATION_DAYS = new DType(DTypeEnum.DURATION_DAYS);
public static final DType DURATION_SECONDS = new DType(DTypeEnum.DURATION_SECONDS);
public static final DType DURATION_MILLISECONDS = new DType(DTypeEnum.DURATION_MILLISECONDS);
public static final DType DURATION_MICROSECONDS = new DType(DTypeEnum.DURATION_MICROSECONDS);
public static final DType DURATION_NANOSECONDS = new DType(DTypeEnum.DURATION_NANOSECONDS);
public static final DType STRING = new DType(DTypeEnum.STRING);
public static final DType LIST = new DType(DTypeEnum.LIST);
public static final DType STRUCT = new DType(DTypeEnum.STRUCT);
/* This is used in fromNative method to return singleton object for non-decimal types.
Please make sure the order here is same as that of DTypeEnum. Whenever a new non-decimal
type is added in DTypeEnum, this array needs to be updated as well.*/
private static final DType[] SINGLETON_DTYPE_LOOKUP = new DType[]{
EMPTY,
INT8,
INT16,
INT32,
INT64,
UINT8,
UINT16,
UINT32,
UINT64,
FLOAT32,
FLOAT64,
BOOL8,
TIMESTAMP_DAYS,
TIMESTAMP_SECONDS,
TIMESTAMP_MILLISECONDS,
TIMESTAMP_MICROSECONDS,
TIMESTAMP_NANOSECONDS,
DURATION_DAYS,
DURATION_SECONDS,
DURATION_MILLISECONDS,
DURATION_MICROSECONDS,
DURATION_NANOSECONDS,
null, // DICTIONARY32
STRING,
LIST,
null, // DECIMAL32
null, // DECIMAL64
null, // DECIMAL128
STRUCT
};
/**
* Returns max precision for Decimal Type.
* @return max precision this Decimal Type can hold
*/
public int getDecimalMaxPrecision() {
if (!isDecimalType()) {
throw new IllegalArgumentException("not a decimal type: " + this);
}
if (typeId == DTypeEnum.DECIMAL32) return DECIMAL32_MAX_PRECISION;
if (typeId == DTypeEnum.DECIMAL64) return DECIMAL64_MAX_PRECISION;
return DType.DECIMAL128_MAX_PRECISION;
}
/**
* Get the number of decimal places needed to hold the Integral Type.
* NOTE: this method is NOT for Decimal Type but for Integral Type.
* @return the minimum decimal precision (places) for Integral Type
*/
public int getPrecisionForInt() {
// -128 to 127
if (typeId == DTypeEnum.INT8) return 3;
// -32768 to 32767
if (typeId == DTypeEnum.INT16) return 5;
// -2147483648 to 2147483647
if (typeId == DTypeEnum.INT32) return 10;
// -9223372036854775808 to 9223372036854775807
if (typeId == DTypeEnum.INT64) return 19;
throw new IllegalArgumentException("not an integral type: " + this);
}
/**
* This only works for fixed width types. Variable width types like strings the value is
* undefined and should be ignored.
*
* @return size of type in bytes.
*/
public int getSizeInBytes() { return typeId.sizeInBytes; }
/**
* Returns scale for Decimal Type.
* @return scale base-10 exponent to multiply the unscaled value to produce the decimal value.
* Example: Consider unscaled value = 123456
* if scale = -2, decimal value = 123456 * 10^-2 = 1234.56
* if scale = 2, decimal value = 123456 * 10^2 = 12345600
*/
public int getScale() { return scale; }
/**
* Return enum for this DType
* @return DTypeEnum
*/
public DTypeEnum getTypeId() {
return typeId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DType type = (DType) o;
return scale == type.scale && typeId == type.typeId;
}
@Override
public int hashCode() {
return Objects.hash(typeId, scale);
}
@Override
public String toString() {
if (isDecimalType()) {
return typeId + " scale:" + scale;
} else {
return String.valueOf(typeId);
}
}
/**
* Factory method for non-decimal DType instances.
* @param dt enum corresponding to datatype.
* @return DType
*/
public static DType create(DTypeEnum dt) {
if (DType.DECIMALS.contains(dt)) {
throw new IllegalArgumentException("Could not create a Decimal DType without scale");
}
return DType.fromNative(dt.nativeId, 0);
}
/**
* Factory method specialized for decimal DType instances.
* @param dt enum corresponding to datatype.
* @param scale base-10 exponent to multiply the unscaled value to produce the decimal value.
* Example: Consider unscaled value = 123456
* if scale = -2, decimal value = 123456 * 10^-2 = 1234.56
* if scale = 2, decimal value = 123456 * 10^2 = 12345600
* @return DType
*/
public static DType create(DTypeEnum dt, int scale) {
if (!DType.DECIMALS.contains(dt)) {
throw new IllegalArgumentException("Could not create a non-Decimal DType with scale");
}
return DType.fromNative(dt.nativeId, scale);
}
/**
* Factory method for DType instances
* @param nativeId nativeId of DataTypeEnun
* @param scale base-10 exponent to multiply the unscaled value to produce the decimal value
* Example: Consider unscaled value = 123456
* if scale = -2, decimal value = 123456 * 10^-2 = 1234.56
* if scale = 2, decimal value = 123456 * 10^2 = 12345600
* @return DType
*/
public static DType fromNative(int nativeId, int scale) {
if (nativeId >=0 && nativeId < SINGLETON_DTYPE_LOOKUP.length) {
DType ret = SINGLETON_DTYPE_LOOKUP[nativeId];
if (ret != null) {
assert ret.typeId.nativeId == nativeId : "Something went wrong and it looks like " +
"SINGLETON_DTYPE_LOOKUP is out of sync";
return ret;
}
if (nativeId == DTypeEnum.DECIMAL32.nativeId) {
if (-scale > DECIMAL32_MAX_PRECISION) {
throw new IllegalArgumentException(
"Scale " + (-scale) + " exceeds DECIMAL32_MAX_PRECISION " + DECIMAL32_MAX_PRECISION);
}
return new DType(DTypeEnum.DECIMAL32, scale);
}
if (nativeId == DTypeEnum.DECIMAL64.nativeId) {
if (-scale > DECIMAL64_MAX_PRECISION) {
throw new IllegalArgumentException(
"Scale " + (-scale) + " exceeds DECIMAL64_MAX_PRECISION " + DECIMAL64_MAX_PRECISION);
}
return new DType(DTypeEnum.DECIMAL64, scale);
}
if (nativeId == DTypeEnum.DECIMAL128.nativeId) {
if (-scale > DECIMAL128_MAX_PRECISION) {
throw new IllegalArgumentException(
"Scale " + (-scale) + " exceeds DECIMAL128_MAX_PRECISION " + DECIMAL128_MAX_PRECISION);
}
return new DType(DTypeEnum.DECIMAL128, scale);
}
}
throw new IllegalArgumentException("Could not translate " + nativeId + " into a DType");
}
/**
* Create decimal-like DType using precision and scale of Java BigDecimal.
*
* @param dec BigDecimal
* @return DType
*/
public static DType fromJavaBigDecimal(BigDecimal dec) {
// Notice: Compared to scale of Java BigDecimal, scale of libcudf works in opposite.
// So, we negate the scale value before passing it into constructor.
if (dec.precision() <= DECIMAL32_MAX_PRECISION) {
return new DType(DTypeEnum.DECIMAL32, -dec.scale());
} else if (dec.precision() <= DECIMAL64_MAX_PRECISION) {
return new DType(DTypeEnum.DECIMAL64, -dec.scale());
} else if (dec.precision() <= DECIMAL128_MAX_PRECISION) {
return new DType(DTypeEnum.DECIMAL128, -dec.scale());
}
throw new IllegalArgumentException("Precision " + dec.precision() +
" exceeds max precision cuDF can support " + DECIMAL128_MAX_PRECISION);
}
/**
* Returns true for timestamps with time level resolution, as opposed to day level resolution
*/
public boolean hasTimeResolution() {
return TIME_RESOLUTION.contains(this.typeId);
}
/**
* Returns true if this type is backed by int type
* Namely this method will return true for the following types
* DType.INT32,
* DType.UINT32,
* DType.DURATION_DAYS,
* DType.TIMESTAMP_DAYS,
* DType.DECIMAL32
*/
public boolean isBackedByInt() {
return INTS.contains(this.typeId);
}
/**
* Returns true if this type is backed by long type
* Namely this method will return true for the following types
* DType.INT64,
* DType.UINT64,
* DType.DURATION_SECONDS,
* DType.DURATION_MILLISECONDS,
* DType.DURATION_MICROSECONDS,
* DType.DURATION_NANOSECONDS,
* DType.TIMESTAMP_SECONDS,
* DType.TIMESTAMP_MILLISECONDS,
* DType.TIMESTAMP_MICROSECONDS,
* DType.TIMESTAMP_NANOSECONDS,
* DType.DECIMAL64
*/
public boolean isBackedByLong() {
return LONGS.contains(this.typeId);
}
/**
* Returns true if this type is backed by short type
* Namely this method will return true for the following types
* DType.INT16,
* DType.UINT16
*/
public boolean isBackedByShort() { return SHORTS.contains(this.typeId); }
/**
* Returns true if this type is backed by byte type
* Namely this method will return true for the following types
* DType.INT8,
* DType.UINT8,
* DType.BOOL8
*/
public boolean isBackedByByte() { return BYTES.contains(this.typeId); }
/**
* Returns true if this type is of decimal type
* Namely this method will return true for the following types
* DType.DECIMAL32,
* DType.DECIMAL64
*/
public boolean isDecimalType() { return this.typeId.isDecimalType(); }
/**
* Returns true for duration types
*/
public boolean isDurationType() {
return DURATION_TYPE.contains(this.typeId);
}
/**
* Returns true for strictly Integer types not a type backed by
* ints
*/
public boolean isIntegral() {
return INTEGRALS.contains(this.typeId);
}
/**
* Returns true for nested types
*/
public boolean isNestedType() {
return NESTED_TYPE.contains(this.typeId);
}
@Deprecated
public boolean isTimestamp() {
return TIMESTAMPS.contains(this.typeId);
}
public boolean isTimestampType() {
return TIMESTAMPS.contains(this.typeId);
}
/**
* Returns true if the type uses a vector of offsets
*/
public boolean hasOffsets() {
return OFFSETS_TYPE.contains(this.typeId);
}
private static final EnumSet<DTypeEnum> TIMESTAMPS = EnumSet.of(
DTypeEnum.TIMESTAMP_DAYS,
DTypeEnum.TIMESTAMP_SECONDS,
DTypeEnum.TIMESTAMP_MILLISECONDS,
DTypeEnum.TIMESTAMP_MICROSECONDS,
DTypeEnum.TIMESTAMP_NANOSECONDS);
private static final EnumSet<DTypeEnum> TIME_RESOLUTION = EnumSet.of(
DTypeEnum.TIMESTAMP_SECONDS,
DTypeEnum.TIMESTAMP_MILLISECONDS,
DTypeEnum.TIMESTAMP_MICROSECONDS,
DTypeEnum.TIMESTAMP_NANOSECONDS);
private static final EnumSet<DTypeEnum> DURATION_TYPE = EnumSet.of(
DTypeEnum.DURATION_DAYS,
DTypeEnum.DURATION_MICROSECONDS,
DTypeEnum.DURATION_MILLISECONDS,
DTypeEnum.DURATION_NANOSECONDS,
DTypeEnum.DURATION_SECONDS
);
private static final EnumSet<DTypeEnum> LONGS = EnumSet.of(
DTypeEnum.INT64,
DTypeEnum.UINT64,
DTypeEnum.DURATION_SECONDS,
DTypeEnum.DURATION_MILLISECONDS,
DTypeEnum.DURATION_MICROSECONDS,
DTypeEnum.DURATION_NANOSECONDS,
DTypeEnum.TIMESTAMP_SECONDS,
DTypeEnum.TIMESTAMP_MILLISECONDS,
DTypeEnum.TIMESTAMP_MICROSECONDS,
DTypeEnum.TIMESTAMP_NANOSECONDS,
// The unscaledValue of DECIMAL64 is of type INT64, which means it can be fetched by getLong.
DTypeEnum.DECIMAL64
);
private static final EnumSet<DTypeEnum> INTS = EnumSet.of(
DTypeEnum.INT32,
DTypeEnum.UINT32,
DTypeEnum.DURATION_DAYS,
DTypeEnum.TIMESTAMP_DAYS,
// The unscaledValue of DECIMAL32 is of type INT32, which means it can be fetched by getInt.
DTypeEnum.DECIMAL32
);
private static final EnumSet<DTypeEnum> SHORTS = EnumSet.of(
DTypeEnum.INT16,
DTypeEnum.UINT16
);
private static final EnumSet<DTypeEnum> BYTES = EnumSet.of(
DTypeEnum.INT8,
DTypeEnum.UINT8,
DTypeEnum.BOOL8
);
private static final EnumSet<DTypeEnum> DECIMALS = EnumSet.of(
DTypeEnum.DECIMAL32,
DTypeEnum.DECIMAL64,
DTypeEnum.DECIMAL128
);
private static final EnumSet<DTypeEnum> NESTED_TYPE = EnumSet.of(
DTypeEnum.LIST,
DTypeEnum.STRUCT
);
private static final EnumSet<DTypeEnum> OFFSETS_TYPE = EnumSet.of(
DTypeEnum.STRING,
DTypeEnum.LIST
);
private static final EnumSet<DTypeEnum> INTEGRALS = EnumSet.of(
DTypeEnum.INT8,
DTypeEnum.INT16,
DTypeEnum.INT32,
DTypeEnum.INT64,
DTypeEnum.UINT8,
DTypeEnum.UINT16,
DTypeEnum.UINT32,
DTypeEnum.UINT64
);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DataSource.java
|
/*
*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
/**
* Base class that can be used to provide data dynamically to CUDF. This follows somewhat
* closely with cudf::io::datasource. There are a few main differences.
* <br/>
* First this does not expose async device reads. It will call the non-async device read API
* instead. This might be added in the future, but there was no direct use case for it in java
* right now to warrant the added complexity.
* <br/>
* Second there is no implementation of the device read API that returns a buffer instead of
* writing into one. This is not used by CUDF yet so testing an implementation that isn't used
* didn't feel ideal. If it is needed we will add one in the future.
*/
public abstract class DataSource implements AutoCloseable {
private static final Logger log = LoggerFactory.getLogger(DataSource.class);
/**
* This is used to keep track of the HostMemoryBuffers in java land so the C++ layer
* does not have to do it.
*/
private final HashMap<Long, HostMemoryBuffer> cachedBuffers = new HashMap<>();
@Override
public void close() {
if (!cachedBuffers.isEmpty()) {
throw new IllegalStateException("DataSource closed before all returned host buffers were closed");
}
}
/**
* Get the size of the source in bytes.
*/
public abstract long size();
/**
* Read data from the source at the given offset. Return a HostMemoryBuffer for the data
* that was read.
* @param offset where to start reading from.
* @param amount the maximum number of bytes to read.
* @return a buffer that points to the data.
* @throws IOException on any error.
*/
public abstract HostMemoryBuffer hostRead(long offset, long amount) throws IOException;
/**
* Called when the buffer returned from hostRead is done. The default is to close the buffer.
*/
protected void onHostBufferDone(HostMemoryBuffer buffer) {
if (buffer != null) {
buffer.close();
}
}
/**
* Read data from the source at the given offset into dest. Note that dest should not be closed,
* and no reference to it can outlive the call to hostRead. The target amount to read is
* dest's length.
* @param offset the offset to start reading from in the source.
* @param dest where to write the data.
* @return the actual number of bytes written to dest.
*/
public abstract long hostRead(long offset, HostMemoryBuffer dest) throws IOException;
/**
* Return true if this supports reading directly to the device else false. The default is
* no device support. This cannot change dynamically. It is typically read just once.
*/
public boolean supportsDeviceRead() {
return false;
}
/**
* Get the size cutoff between device reads and host reads when device reads are supported.
* Anything larger than the cutoff will be a device read and anything smaller will be a
* host read. By default, the cutoff is 0 so all reads will be device reads if device reads
* are supported.
*/
public long getDeviceReadCutoff() {
return 0;
}
/**
* Read data from the source at the given offset into dest. Note that dest should not be closed,
* and no reference to it can outlive the call to hostRead. The target amount to read is
* dest's length.
* @param offset the offset to start reading from
* @param dest where to write the data.
* @param stream the stream to do the copy on.
* @return the actual number of bytes written to dest.
*/
public long deviceRead(long offset, DeviceMemoryBuffer dest,
Cuda.Stream stream) throws IOException {
throw new IllegalStateException("Device read is not implemented");
}
/////////////////////////////////////////////////
// Internal methods called from JNI
/////////////////////////////////////////////////
private static class NoopCleaner extends MemoryBuffer.MemoryBufferCleaner {
@Override
protected boolean cleanImpl(boolean logErrorIfNotClean) {
return true;
}
@Override
public boolean isClean() {
return true;
}
}
private static final NoopCleaner cleaner = new NoopCleaner();
// Called from JNI
private void onHostBufferDone(long bufferId) {
HostMemoryBuffer hmb = cachedBuffers.remove(bufferId);
if (hmb != null) {
onHostBufferDone(hmb);
} else {
// Called from C++ destructor so avoid throwing...
log.warn("Got a close callback for a buffer we could not find " + bufferId);
}
}
// Called from JNI
private long hostRead(long offset, long amount, long dst) throws IOException {
if (amount < 0) {
throw new IllegalArgumentException("Cannot allocate more than " + Long.MAX_VALUE + " bytes");
}
try (HostMemoryBuffer dstBuffer = new HostMemoryBuffer(dst, amount, cleaner)) {
return hostRead(offset, dstBuffer);
}
}
// Called from JNI
private long[] hostReadBuff(long offset, long amount) throws IOException {
if (amount < 0) {
throw new IllegalArgumentException("Cannot read more than " + Long.MAX_VALUE + " bytes");
}
HostMemoryBuffer buff = hostRead(offset, amount);
long[] ret = new long[3];
if (buff != null) {
long id = buff.id;
if (cachedBuffers.put(id, buff) != null) {
throw new IllegalStateException("Already had a buffer cached for " + buff);
}
ret[0] = buff.address;
ret[1] = buff.length;
ret[2] = id;
} // else they are all 0 because java does that already
return ret;
}
// Called from JNI
private long deviceRead(long offset, long amount, long dst, long stream) throws IOException {
if (amount < 0) {
throw new IllegalArgumentException("Cannot read more than " + Long.MAX_VALUE + " bytes");
}
Cuda.Stream strm = Cuda.Stream.wrap(stream);
try (DeviceMemoryBuffer dstBuffer = new DeviceMemoryBuffer(dst, amount, cleaner)) {
return deviceRead(offset, dstBuffer, strm);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DataSourceHelper.java
|
/*
*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* This is here because we need some JNI methods to work with a DataSource, but
* we also want to cache callback methods at startup for performance reasons. If
* we put both in the same class we will get a deadlock because of how we load
* the JNI. We have a static block that blocks loading the class until the JNI
* library is loaded and the JNI library cannot load until the class is loaded
* and cached. This breaks the loop.
*/
class DataSourceHelper {
static {
NativeDepsLoader.loadNativeDeps();
}
static long createWrapperDataSource(DataSource ds) {
return createWrapperDataSource(ds, ds.size(), ds.supportsDeviceRead(),
ds.getDeviceReadCutoff());
}
private static native long createWrapperDataSource(DataSource ds, long size,
boolean deviceReadSupport,
long deviceReadCutoff);
static native void destroyWrapperDataSource(long handle);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DateTimeComponent.java
|
/*
*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Types of datetime components that may be extracted.
*/
public enum DateTimeComponent {
/**
* year as an INT16
*/
YEAR(0),
/**
* month 1 - jan, as an INT16
*/
MONTH(1),
/**
* Day of the month as an INT16
*/
DAY(2),
/**
* day of the week, Monday=1, ..., Sunday=7 as an INT16
*/
WEEKDAY(3),
/**
* hour of the day 24-hour clock as an INT16
*/
HOUR(4),
/**
* minutes past the hour as an INT16
*/
MINUTE(5),
/**
* seconds past the minute as an INT16
*/
SECOND(6),
/**
* milliseconds past the seconds as an INT16
*/
MILLISECOND(7),
/**
* microseconds past the millisecond as an INT16
*/
MICROSECOND(8),
/**
* nanoseconds past the microsecond as an INT16
*/
NANOSECOND(9);
final int id;
DateTimeComponent(int id) {
this.id = id;
}
public int getNativeId() {
return id;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DateTimeRoundingFrequency.java
|
/*
*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
public enum DateTimeRoundingFrequency {
DAY(0),
HOUR(1),
MINUTE(2),
SECOND(3),
MILLISECOND(4),
MICROSECOND(5),
NANOSECOND(6);
final int id;
DateTimeRoundingFrequency(int id) {
this.id = id;
}
public int getNativeId() {
return id;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DecimalUtils.java
|
/*
*
* Copyright (c) 2022-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.math.BigDecimal;
import java.util.AbstractMap;
import java.util.Map;
public class DecimalUtils {
/**
* Creates a cuDF decimal type with precision and scale
*/
public static DType createDecimalType(int precision, int scale) {
if (precision <= DType.DECIMAL32_MAX_PRECISION) {
return DType.create(DType.DTypeEnum.DECIMAL32, -scale);
} else if (precision <= DType.DECIMAL64_MAX_PRECISION) {
return DType.create(DType.DTypeEnum.DECIMAL64, -scale);
} else if (precision <= DType.DECIMAL128_MAX_PRECISION) {
return DType.create(DType.DTypeEnum.DECIMAL128, -scale);
}
throw new IllegalArgumentException("precision overflow: " + precision);
}
/**
* Given decimal precision and scale, returns the lower and upper bound of current decimal type.
*
* Be very careful when comparing these CUDF decimal comparisons really only work
* when both types are already the same precision and scale, and when you change the scale
* you end up losing information.
* @param precision the max precision of decimal type
* @param scale the scale of decimal type
* @return a Map Entry of BigDecimal, lower bound as the key, upper bound as the value
*/
public static Map.Entry<BigDecimal, BigDecimal> bounds(int precision, int scale) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < precision; i++) sb.append("9");
sb.append("e");
sb.append(-scale);
String boundStr = sb.toString();
BigDecimal upperBound = new BigDecimal(boundStr);
BigDecimal lowerBound = new BigDecimal("-" + boundStr);
return new AbstractMap.SimpleImmutableEntry<>(lowerBound, upperBound);
}
/**
* With precision and scale, checks each value of input decimal column for out of bound.
* @return the boolean column represents whether specific values are out of bound or not
*/
public static ColumnVector outOfBounds(ColumnView input, int precision, int scale) {
Map.Entry<BigDecimal, BigDecimal> boundPair = bounds(precision, scale);
BigDecimal lowerBound = boundPair.getKey();
BigDecimal upperBound = boundPair.getValue();
try (ColumnVector over = greaterThan(input, upperBound);
ColumnVector under = lessThan(input, lowerBound)) {
return over.or(under);
}
}
/**
* Because the native lessThan operator has issues with comparing decimal values that have different
* precision and scale accurately. This method takes some special steps to get rid of these issues.
*/
public static ColumnVector lessThan(ColumnView lhs, BigDecimal rhs) {
assert (lhs.getType().isDecimalType());
int leftScale = lhs.getType().getScale();
int leftPrecision = lhs.getType().getDecimalMaxPrecision();
// First we have to round the scalar (rhs) to the same scale as lhs.
// For comparing the two values they should be the same scale, we round the value to positive infinity to maintain
// the relation. Ex:
// 10.2 < 10.29 = true, after rounding rhs to ceiling ===> 10.2 < 10.3 = true, relation is maintained
// 10.3 < 10.29 = false, after rounding rhs to ceiling ===> 10.3 < 10.3 = false, relation is maintained
// 10.1 < 10.10 = false, after rounding rhs to ceiling ===> 10.1 < 10.1 = false, relation is maintained
BigDecimal roundedRhs = rhs.setScale(-leftScale, BigDecimal.ROUND_CEILING);
if (roundedRhs.precision() > leftPrecision) {
// converting rhs to the same precision as lhs would result in an overflow/error, but
// the scale is the same so we can still figure this out. For example if LHS precision is
// 4 and RHS precision is 5 we get the following...
// 9999 < 99999 => true
// -9999 < 99999 => true
// 9999 < -99999 => false
// -9999 < -99999 => false
// so the result should be the same as RHS > 0
try (Scalar isPositive = Scalar.fromBool(roundedRhs.compareTo(BigDecimal.ZERO) > 0)) {
return ColumnVector.fromScalar(isPositive, (int) lhs.getRowCount());
}
}
try (Scalar scalarRhs = Scalar.fromDecimal(roundedRhs.unscaledValue(), lhs.getType())) {
return lhs.lessThan(scalarRhs);
}
}
/**
* Because the native lessThan operator has issues with comparing decimal values that have different
* precision and scale accurately. This method takes some special steps to get rid of these issues.
*/
public static ColumnVector lessThan(BinaryOperable lhs, BigDecimal rhs, int numRows) {
if (lhs instanceof ColumnView) {
return lessThan((ColumnView) lhs, rhs);
}
Scalar scalarLhs = (Scalar) lhs;
if (scalarLhs.isValid()) {
try (Scalar isLess = Scalar.fromBool(scalarLhs.getBigDecimal().compareTo(rhs) < 0)) {
return ColumnVector.fromScalar(isLess, numRows);
}
}
try (Scalar nullScalar = Scalar.fromNull(DType.BOOL8)) {
return ColumnVector.fromScalar(nullScalar, numRows);
}
}
/**
* Because the native greaterThan operator has issues with comparing decimal values that have different
* precision and scale accurately. This method takes some special steps to get rid of these issues.
*/
public static ColumnVector greaterThan(ColumnView lhs, BigDecimal rhs) {
assert (lhs.getType().isDecimalType());
int cvScale = lhs.getType().getScale();
int maxPrecision = lhs.getType().getDecimalMaxPrecision();
// First we have to round the scalar (rhs) to the same scale as lhs.
// For comparing the two values they should be the same scale, we round the value to negative infinity to maintain
// the relation. Ex:
// 10.3 > 10.29 = true, after rounding rhs to floor ===> 10.3 > 10.2 = true, relation is maintained
// 10.2 > 10.29 = false, after rounding rhs to floor ===> 10.2 > 10.2 = false, relation is maintained
// 10.1 > 10.10 = false, after rounding rhs to floor ===> 10.1 > 10.1 = false, relation is maintained
BigDecimal roundedRhs = rhs.setScale(-cvScale, BigDecimal.ROUND_FLOOR);
if (roundedRhs.precision() > maxPrecision) {
// converting rhs to the same precision as lhs would result in an overflow/error, but
// the scale is the same so we can still figure this out. For example if LHS precision is
// 4 and RHS precision is 5 we get the following...
// 9999 > 99999 => false
// -9999 > 99999 => false
// 9999 > -99999 => true
// -9999 > -99999 => true
// so the result should be the same as RHS < 0
try (Scalar isNegative = Scalar.fromBool(roundedRhs.compareTo(BigDecimal.ZERO) < 0)) {
return ColumnVector.fromScalar(isNegative, (int) lhs.getRowCount());
}
}
try (Scalar scalarRhs = Scalar.fromDecimal(roundedRhs.unscaledValue(), lhs.getType())) {
return lhs.greaterThan(scalarRhs);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DefaultHostMemoryAllocator.java
|
/*
*
* Copyright (c) 2023-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
public class DefaultHostMemoryAllocator implements HostMemoryAllocator {
private static volatile HostMemoryAllocator instance = new DefaultHostMemoryAllocator();
/**
* Retrieve current host memory allocator used by default if not passed directly to API
*
* @return current default HostMemoryAllocator implementation
*/
public static HostMemoryAllocator get() {
return instance;
}
/**
* Sets a new default host memory allocator implementation by default.
* @param hostMemoryAllocator the new allocator to use.
*/
public static void set(HostMemoryAllocator hostMemoryAllocator) {
instance = hostMemoryAllocator;
}
@Override
public HostMemoryBuffer allocate(long bytes, boolean preferPinned) {
if (preferPinned) {
HostMemoryBuffer pinnedBuffer = PinnedMemoryPool.tryAllocate(bytes);
if (pinnedBuffer != null) {
return pinnedBuffer;
}
}
return new HostMemoryBuffer(UnsafeMemoryAccessor.allocate(bytes), bytes);
}
@Override
public HostMemoryBuffer allocate(long bytes) {
return allocate(bytes, HostMemoryBuffer.defaultPreferPinned);
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DeviceMemoryBuffer.java
|
/*
*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class represents data in some form on the GPU. Closing this object will effectively release
* the memory held by the buffer. Note that because of pooling in RMM or reference counting if a
* buffer is sliced it may not actually result in the memory being released.
*/
public class DeviceMemoryBuffer extends BaseDeviceMemoryBuffer {
private static final Logger log = LoggerFactory.getLogger(DeviceMemoryBuffer.class);
private static final class DeviceBufferCleaner extends MemoryBufferCleaner {
private long address;
private long lengthInBytes;
private Cuda.Stream stream;
DeviceBufferCleaner(long address, long lengthInBytes, Cuda.Stream stream) {
this.address = address;
this.lengthInBytes = lengthInBytes;
this.stream = stream;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
long origAddress = address;
if (address != 0) {
long s = stream == null ? 0 : stream.getStream();
try {
Rmm.free(address, lengthInBytes, s);
} finally {
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
address = 0;
lengthInBytes = 0;
stream = null;
}
neededCleanup = true;
}
if (neededCleanup && logErrorIfNotClean) {
log.error("A DEVICE BUFFER WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")");
logRefCountDebug("Leaked device buffer");
}
return neededCleanup;
}
@Override
public boolean isClean() {
return address == 0;
}
}
private static final class RmmDeviceBufferCleaner extends MemoryBufferCleaner {
private long rmmBufferAddress;
RmmDeviceBufferCleaner(long rmmBufferAddress) {
this.rmmBufferAddress = rmmBufferAddress;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
if (rmmBufferAddress != 0) {
Rmm.freeDeviceBuffer(rmmBufferAddress);
rmmBufferAddress = 0;
neededCleanup = true;
}
if (neededCleanup && logErrorIfNotClean) {
log.error("WE LEAKED A DEVICE BUFFER!!!!");
logRefCountDebug("Leaked device buffer");
}
return neededCleanup;
}
@Override
public boolean isClean() {
return rmmBufferAddress == 0;
}
}
/**
* Wrap an existing RMM allocation in a device memory buffer. The RMM allocation will be freed
* when the resulting device memory buffer instance frees its memory resource (i.e.: when its
* reference count goes to zero).
* @param address device address of the RMM allocation
* @param lengthInBytes length of the RMM allocation in bytes
* @param rmmBufferAddress host address of the rmm::device_buffer that owns the device memory
* @return new device memory buffer instance that wraps the existing RMM allocation
*/
public static DeviceMemoryBuffer fromRmm(long address, long lengthInBytes, long rmmBufferAddress) {
return new DeviceMemoryBuffer(address, lengthInBytes, rmmBufferAddress);
}
DeviceMemoryBuffer(long address, long lengthInBytes, MemoryBufferCleaner cleaner) {
super(address, lengthInBytes, cleaner);
}
DeviceMemoryBuffer(long address, long lengthInBytes, long rmmBufferAddress) {
super(address, lengthInBytes, new RmmDeviceBufferCleaner(rmmBufferAddress));
}
DeviceMemoryBuffer(long address, long lengthInBytes, Cuda.Stream stream) {
super(address, lengthInBytes, new DeviceBufferCleaner(address, lengthInBytes, stream));
}
private DeviceMemoryBuffer(long address, long lengthInBytes, DeviceMemoryBuffer parent) {
super(address, lengthInBytes, parent);
}
/**
* Allocate memory for use on the GPU. You must close it when done.
* @param bytes size in bytes to allocate
* @return the buffer
*/
public static DeviceMemoryBuffer allocate(long bytes) {
return allocate(bytes, Cuda.DEFAULT_STREAM);
}
/**
* Allocate memory for use on the GPU. You must close it when done.
* @param bytes size in bytes to allocate
* @param stream The stream in which to synchronize this command
* @return the buffer
*/
public static DeviceMemoryBuffer allocate(long bytes, Cuda.Stream stream) {
return Rmm.alloc(bytes, stream);
}
/**
* Slice off a part of the device buffer. Note that this is a zero copy operation and all
* slices must be closed along with the original buffer before the memory is released to RMM.
* So use this with some caution.
* @param offset where to start the slice at.
* @param len how many bytes to slice
* @return a device buffer that will need to be closed independently from this buffer.
*/
@Override
public synchronized final DeviceMemoryBuffer slice(long offset, long len) {
addressOutOfBoundsCheck(address + offset, len, "slice");
incRefCount();
return new DeviceMemoryBuffer(getAddress() + offset, len, this);
}
/**
* Convert a view that is a subset of this Buffer by slicing this.
* @param view the view to use as a reference.
* @return the sliced buffer.
*/
synchronized final BaseDeviceMemoryBuffer sliceFrom(DeviceMemoryBufferView view) {
if (view == null) {
return null;
}
addressOutOfBoundsCheck(view.address, view.length, "sliceFrom");
incRefCount();
return new DeviceMemoryBuffer(view.address, view.length, this);
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DeviceMemoryBufferView.java
|
/*
*
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* This class represents data in some form on the GPU. The memory pointed at by this buffer is
* not owned by this buffer. So you have to be sure that this buffer does not outlive the buffer
* that is backing it.
*/
public class DeviceMemoryBufferView extends BaseDeviceMemoryBuffer {
public DeviceMemoryBufferView(long address, long lengthInBytes) {
// Set the cleaner to null so we don't end up releasing anything
super(address, lengthInBytes, (MemoryBufferCleaner) null);
}
/**
* At the moment we don't have use for slicing a view.
*/
@Override
public synchronized final DeviceMemoryBufferView slice(long offset, long len) {
throw new UnsupportedOperationException("Slice on view is not supported");
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/DuplicateKeepOption.java
|
/*
*
* Copyright (c) 2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Used for the dropListDuplicates function
* Specifies which duplicate to keep
* @see cudf::duplicate_keep_option in /cpp/include/cudf/stream_compaction.hpp,
* from which this enum is based off of. Values should be kept in sync.
*/
public enum DuplicateKeepOption {
KEEP_ANY(0), // keep any instance of a value
KEEP_FIRST(1), // only keep the first instance of an value
KEEP_LAST(2), // keep the last instance of an value
KEEP_NONE(3); // remove all instances of values with duplicates
final int nativeId;
DuplicateKeepOption(int nativeId) { this.nativeId = nativeId; }
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/GatherMap.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* This class tracks the data associated with a gather map, a buffer of INT32 elements that index
* a source table and can be passed to a table gather operation.
*/
public class GatherMap implements AutoCloseable {
private DeviceMemoryBuffer buffer;
/**
* Construct a gather map instance from a device buffer. The buffer length must be a multiple of
* the {@link DType#INT32} size, as each row of the gather map is an INT32.
* @param buffer device buffer backing the gather map data
*/
public GatherMap(DeviceMemoryBuffer buffer) {
if (buffer.getLength() % DType.INT32.getSizeInBytes() != 0) {
throw new IllegalArgumentException("buffer length not a multiple of 4");
}
this.buffer = buffer;
}
/** Return the number of rows in the gather map */
public long getRowCount() {
ensureOpen();
return buffer.getLength() / 4;
}
/**
* Create a column view that can be used to perform a gather operation. Note that the resulting
* column view MUST NOT outlive the underlying device buffer within this instance!
* @param startRow row offset where the resulting gather map will start
* @param numRows number of rows in the resulting gather map
* @return column view of gather map data
*/
public ColumnView toColumnView(long startRow, int numRows) {
ensureOpen();
return ColumnView.fromDeviceBuffer(buffer, startRow * 4, DType.INT32, numRows);
}
/**
* Release the underlying device buffer instance. After this is called, closing this instance
* will not close the underlying device buffer. It is the responsibility of the caller to close
* the returned device buffer.
* @return device buffer backing gather map data or null if the buffer has already been released
*/
public DeviceMemoryBuffer releaseBuffer() {
DeviceMemoryBuffer result = buffer;
buffer = null;
return result;
}
/** Close the device buffer backing the gather map data. */
@Override
public void close() {
if (buffer != null) {
buffer.close();
buffer = null;
}
}
private void ensureOpen() {
if (buffer == null) {
throw new IllegalStateException("instance is closed");
}
if (buffer.closed) {
throw new IllegalStateException("buffer is closed");
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/GetJsonObjectOptions.java
|
/*
*
* Copyright (c) 2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
public final class GetJsonObjectOptions {
public static GetJsonObjectOptions DEFAULT = new GetJsonObjectOptions.Builder().build();
private final boolean allowSingleQuotes;
private final boolean stripQuotesFromSingleStrings;
private final boolean missingFieldsAsNulls;
private GetJsonObjectOptions(Builder builder) {
this.allowSingleQuotes = builder.allowSingleQuotes;
this.stripQuotesFromSingleStrings = builder.stripQuotesFromSingleStrings;
this.missingFieldsAsNulls = builder.missingFieldsAsNulls;
}
public boolean isAllowSingleQuotes() {
return allowSingleQuotes;
}
public boolean isStripQuotesFromSingleStrings() {
return stripQuotesFromSingleStrings;
}
public boolean isMissingFieldsAsNulls() {
return missingFieldsAsNulls;
}
public static Builder builder() {
return new Builder();
}
public static final class Builder {
private boolean allowSingleQuotes = false;
private boolean stripQuotesFromSingleStrings = true;
private boolean missingFieldsAsNulls = false;
public Builder allowSingleQuotes(boolean allowSingleQuotes) {
this.allowSingleQuotes = allowSingleQuotes;
return this;
}
public Builder stripQuotesFromSingleStrings(boolean stripQuotesFromSingleStrings) {
this.stripQuotesFromSingleStrings = stripQuotesFromSingleStrings;
return this;
}
public Builder missingFieldsAsNulls(boolean missingFieldsAsNulls) {
this.missingFieldsAsNulls = missingFieldsAsNulls;
return this;
}
public GetJsonObjectOptions build() {
return new GetJsonObjectOptions(this);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/GroupByAggregation.java
|
/*
*
* Copyright (c) 2021-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* An aggregation that can be used for a reduce.
*/
public final class GroupByAggregation {
private final Aggregation wrapped;
private GroupByAggregation(Aggregation wrapped) {
this.wrapped = wrapped;
}
Aggregation getWrapped() {
return wrapped;
}
/**
* Add a column to the Aggregation so it can be used on a specific column of data.
* @param columnIndex the index of the column to operate on.
*/
public GroupByAggregationOnColumn onColumn(int columnIndex) {
return new GroupByAggregationOnColumn(this, columnIndex);
}
@Override
public int hashCode() {
return wrapped.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other instanceof GroupByAggregation) {
GroupByAggregation o = (GroupByAggregation) other;
return wrapped.equals(o.wrapped);
}
return false;
}
/**
* Count number of valid, a.k.a. non-null, elements.
*/
public static GroupByAggregation count() {
return new GroupByAggregation(Aggregation.count());
}
/**
* Count number of elements.
* @param nullPolicy INCLUDE if nulls should be counted. EXCLUDE if only non-null values
* should be counted.
*/
public static GroupByAggregation count(NullPolicy nullPolicy) {
return new GroupByAggregation(Aggregation.count(nullPolicy));
}
/**
* Sum Aggregation
*/
public static GroupByAggregation sum() {
return new GroupByAggregation(Aggregation.sum());
}
/**
* Product Aggregation.
*/
public static GroupByAggregation product() {
return new GroupByAggregation(Aggregation.product());
}
/**
* Index of max element. Please note that when using this aggregation if the
* data is not already sorted by the grouping keys it may be automatically sorted
* prior to doing the aggregation. This would result in an index into the sorted data being
* returned.
*/
public static GroupByAggregation argMax() {
return new GroupByAggregation(Aggregation.argMax());
}
/**
* Index of min element. Please note that when using this aggregation if the
* data is not already sorted by the grouping keys it may be automatically sorted
* prior to doing the aggregation. This would result in an index into the sorted data being
* returned.
*/
public static GroupByAggregation argMin() {
return new GroupByAggregation(Aggregation.argMin());
}
/**
* Min Aggregation
*/
public static GroupByAggregation min() {
return new GroupByAggregation(Aggregation.min());
}
/**
* Max Aggregation
*/
public static GroupByAggregation max() {
return new GroupByAggregation(Aggregation.max());
}
/**
* Arithmetic mean reduction.
*/
public static GroupByAggregation mean() {
return new GroupByAggregation(Aggregation.mean());
}
/**
* Sum of square of differences from mean.
*/
public static GroupByAggregation M2() {
return new GroupByAggregation(Aggregation.M2());
}
/**
* Variance aggregation with 1 as the delta degrees of freedom.
*/
public static GroupByAggregation variance() {
return new GroupByAggregation(Aggregation.variance());
}
/**
* Variance aggregation.
* @param ddof delta degrees of freedom. The divisor used in calculation of variance is
* <code>N - ddof</code>, where N is the population size.
*/
public static GroupByAggregation variance(int ddof) {
return new GroupByAggregation(Aggregation.variance(ddof));
}
/**
* Standard deviation aggregation with 1 as the delta degrees of freedom.
*/
public static GroupByAggregation standardDeviation() {
return new GroupByAggregation(Aggregation.standardDeviation());
}
/**
* Standard deviation aggregation.
* @param ddof delta degrees of freedom. The divisor used in calculation of std is
* <code>N - ddof</code>, where N is the population size.
*/
public static GroupByAggregation standardDeviation(int ddof) {
return new GroupByAggregation(Aggregation.standardDeviation(ddof));
}
/**
* Aggregate to compute the specified quantiles. Uses linear interpolation by default.
*/
public static GroupByAggregation quantile(double ... quantiles) {
return new GroupByAggregation(Aggregation.quantile(quantiles));
}
/**
* Aggregate to compute various quantiles.
*/
public static GroupByAggregation quantile(QuantileMethod method, double ... quantiles) {
return new GroupByAggregation(Aggregation.quantile(method, quantiles));
}
/**
* Median reduction.
*/
public static GroupByAggregation median() {
return new GroupByAggregation(Aggregation.median());
}
/**
* Number of unique, non-null, elements.
*/
public static GroupByAggregation nunique() {
return new GroupByAggregation(Aggregation.nunique());
}
/**
* Number of unique elements.
* @param nullPolicy INCLUDE if nulls should be counted else EXCLUDE. If nulls are counted they
* compare as equal so multiple null values in a range would all only
* increase the count by 1.
*/
public static GroupByAggregation nunique(NullPolicy nullPolicy) {
return new GroupByAggregation(Aggregation.nunique(nullPolicy));
}
/**
* Get the nth, non-null, element in a group.
* @param offset the offset to look at. Negative numbers go from the end of the group. Any
* value outside of the group range results in a null.
*/
public static GroupByAggregation nth(int offset) {
return new GroupByAggregation(Aggregation.nth(offset));
}
/**
* Get the nth element in a group.
* @param offset the offset to look at. Negative numbers go from the end of the group. Any
* value outside of the group range results in a null.
* @param nullPolicy INCLUDE if nulls should be included in the aggregation or EXCLUDE if they
* should be skipped.
*/
public static GroupByAggregation nth(int offset, NullPolicy nullPolicy) {
return new GroupByAggregation(Aggregation.nth(offset, nullPolicy));
}
/**
* Collect the values into a list. Nulls will be skipped.
*/
public static GroupByAggregation collectList() {
return new GroupByAggregation(Aggregation.collectList());
}
/**
* Collect the values into a list.
*
* @param nullPolicy Indicates whether to include/exclude nulls during collection.
*/
public static GroupByAggregation collectList(NullPolicy nullPolicy) {
return new GroupByAggregation(Aggregation.collectList(nullPolicy));
}
/**
* Collect the values into a set. All null values will be excluded, and all NaN values are regarded as
* unique instances.
*/
public static GroupByAggregation collectSet() {
return new GroupByAggregation(Aggregation.collectSet());
}
/**
* Collect the values into a set.
*
* @param nullPolicy Indicates whether to include/exclude nulls during collection.
* @param nullEquality Flag to specify whether null entries within each list should be considered equal.
* @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal.
*/
public static GroupByAggregation collectSet(NullPolicy nullPolicy, NullEquality nullEquality, NaNEquality nanEquality) {
return new GroupByAggregation(Aggregation.collectSet(nullPolicy, nullEquality, nanEquality));
}
/**
* Merge the partial lists produced by multiple CollectListAggregations.
* NOTICE: The partial lists to be merged should NOT include any null list element (but can include null list entries).
*/
public static GroupByAggregation mergeLists() {
return new GroupByAggregation(Aggregation.mergeLists());
}
/**
* Merge the partial sets produced by multiple CollectSetAggregations. Each null/NaN value will be regarded as
* a unique instance.
*/
public static GroupByAggregation mergeSets() {
return new GroupByAggregation(Aggregation.mergeSets());
}
/**
* Execute an aggregation using a host-side user-defined function (UDF).
* @param wrapper The wrapper for the native host UDF instance.
* @return A new GroupByAggregation instance
*/
public static GroupByAggregation hostUDF(HostUDFWrapper wrapper) {
return new GroupByAggregation(Aggregation.hostUDF(wrapper));
}
/**
* Merge the partial sets produced by multiple CollectSetAggregations.
*
* @param nullEquality Flag to specify whether null entries within each list should be considered equal.
* @param nanEquality Flag to specify whether NaN values in floating point column should be considered equal.
*/
public static GroupByAggregation mergeSets(NullEquality nullEquality, NaNEquality nanEquality) {
return new GroupByAggregation(Aggregation.mergeSets(nullEquality, nanEquality));
}
/**
* Merge the partial M2 values produced by multiple instances of M2Aggregation.
*/
public static GroupByAggregation mergeM2() {
return new GroupByAggregation(Aggregation.mergeM2());
}
/**
* Compute a t-digest from on a fixed-width numeric input column.
*
* @param delta Required accuracy (number of buckets).
* @return A list of centroids per grouping, where each centroid has a mean value and a
* weight. The number of centroids will be <= delta.
*/
public static GroupByAggregation createTDigest(int delta) {
return new GroupByAggregation(Aggregation.createTDigest(delta));
}
/**
* Merge t-digests.
*
* @param delta Required accuracy (number of buckets).
* @return A list of centroids per grouping, where each centroid has a mean value and a
* weight. The number of centroids will be <= delta.
*/
public static GroupByAggregation mergeTDigest(int delta) {
return new GroupByAggregation(Aggregation.mergeTDigest(delta));
}
/**
* Histogram aggregation, computing the frequencies for each unique row.
*
* A histogram is given as a lists column, in which the first child stores unique rows from
* the input values and the second child stores their corresponding frequencies.
*
* @return A lists of structs column in which each list contains a histogram corresponding to
* an input key.
*/
public static GroupByAggregation histogram() {
return new GroupByAggregation(Aggregation.histogram());
}
/**
* MergeHistogram aggregation, to merge multiple histograms.
*
* @return A new histogram in which the frequencies of the unique rows are sum up.
*/
public static GroupByAggregation mergeHistogram() {
return new GroupByAggregation(Aggregation.mergeHistogram());
}
/**
* Bitwise AND aggregation, computing the bitwise AND of all non-null values in a group.
*/
public static GroupByAggregation bitAnd() {
return new GroupByAggregation(Aggregation.bitAnd());
}
/**
* Bitwise OR aggregation, computing the bitwise OR of all non-null values in a group.
*/
public static GroupByAggregation bitOr() {
return new GroupByAggregation(Aggregation.bitOr());
}
/**
* Bitwise XOR aggregation, computing the bitwise XOR of all non-null values in a group.
*/
public static GroupByAggregation bitXor() {
return new GroupByAggregation(Aggregation.bitXor());
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/GroupByAggregationOnColumn.java
|
/*
*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* A GroupByAggregation for a specific column in a table.
*/
public final class GroupByAggregationOnColumn {
protected final GroupByAggregation wrapped;
protected final int columnIndex;
GroupByAggregationOnColumn(GroupByAggregation wrapped, int columnIndex) {
this.wrapped = wrapped;
this.columnIndex = columnIndex;
}
public int getColumnIndex() {
return columnIndex;
}
GroupByAggregation getWrapped() {
return wrapped;
}
@Override
public int hashCode() {
return 31 * wrapped.hashCode() + columnIndex;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other instanceof GroupByAggregationOnColumn) {
GroupByAggregationOnColumn o = (GroupByAggregationOnColumn) other;
return wrapped.equals(o.wrapped) && columnIndex == o.columnIndex;
}
return false;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/GroupByOptions.java
|
/*
*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Options for groupby (see cudf::groupby::groupby's constructor)
*/
public class GroupByOptions {
public static GroupByOptions DEFAULT = new GroupByOptions(new Builder());
private final boolean ignoreNullKeys;
private final boolean keysSorted;
private final boolean[] keysDescending;
private final boolean[] keysNullSmallest;
private GroupByOptions(Builder builder) {
ignoreNullKeys = builder.ignoreNullKeys;
keysSorted = builder.keysSorted;
keysDescending = builder.keysDescending;
keysNullSmallest = builder.keysNullSmallest;
}
boolean getIgnoreNullKeys() {
return ignoreNullKeys;
}
boolean getKeySorted() {
return keysSorted;
}
boolean[] getKeysDescending() {
return keysDescending;
}
boolean[] getKeysNullSmallest() {
return keysNullSmallest;
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private boolean ignoreNullKeys = false;
private boolean keysSorted = false;
private boolean[] keysDescending = new boolean[0];
private boolean[] keysNullSmallest = new boolean[0];
/**
* If true, the cudf groupby will ignore grouping keys that are null.
* The default value is false, so a null in the grouping column will produce a
* group.
*/
public Builder withIgnoreNullKeys(boolean ignoreNullKeys) {
this.ignoreNullKeys = ignoreNullKeys;
return this;
}
/**
* Indicates whether rows in `keys` are already sorted.
* The default value is false.
*
* If the `keys` are already sorted, better performance may be achieved by
* passing `keysSorted == true` and indicating the ascending/descending
* order of each column and null order by calling `withKeysDescending` and
* `withKeysNullSmallest`, respectively.
*/
public Builder withKeysSorted(boolean keysSorted) {
this.keysSorted = keysSorted;
return this;
}
/**
* If `keysSorted == true`, indicates whether each
* column is ascending/descending. If empty or null, assumes all columns are
* ascending. Ignored if `keysSorted == false`.
*/
public Builder withKeysDescending(boolean... keysDescending) {
if (keysDescending == null) {
// Use empty array instead of null
this.keysDescending = new boolean[0];
} else {
this.keysDescending = keysDescending;
}
return this;
}
/**
* If `keysSorted == true`, indicates the ordering
* of null values in each column. If empty or null, assumes all columns
* use 'null smallest'. Ignored if `keysSorted == false`.
*/
public Builder withKeysNullSmallest(boolean... keysNullSmallest) {
if (keysNullSmallest == null) {
// Use empty array instead of null
this.keysNullSmallest = new boolean[0];
} else {
this.keysNullSmallest = keysNullSmallest;
}
return this;
}
public GroupByOptions build() {
return new GroupByOptions(this);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/GroupByScanAggregation.java
|
/*
*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* An aggregation that can be used for a grouped scan.
*/
public final class GroupByScanAggregation {
private final Aggregation wrapped;
private GroupByScanAggregation(Aggregation wrapped) {
this.wrapped = wrapped;
}
long createNativeInstance() {
return wrapped.createNativeInstance();
}
long getDefaultOutput() {
return wrapped.getDefaultOutput();
}
Aggregation getWrapped() {
return wrapped;
}
/**
* Add a column to the Aggregation so it can be used on a specific column of data.
* @param columnIndex the index of the column to operate on.
*/
public GroupByScanAggregationOnColumn onColumn(int columnIndex) {
return new GroupByScanAggregationOnColumn(this, columnIndex);
}
@Override
public int hashCode() {
return wrapped.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other instanceof GroupByScanAggregation) {
GroupByScanAggregation o = (GroupByScanAggregation) other;
return wrapped.equals(o.wrapped);
}
return false;
}
/**
* Sum Aggregation
*/
public static GroupByScanAggregation sum() {
return new GroupByScanAggregation(Aggregation.sum());
}
/**
* Product Aggregation.
*/
public static GroupByScanAggregation product() {
return new GroupByScanAggregation(Aggregation.product());
}
/**
* Min Aggregation
*/
public static GroupByScanAggregation min() {
return new GroupByScanAggregation(Aggregation.min());
}
/**
* Max Aggregation
*/
public static GroupByScanAggregation max() {
return new GroupByScanAggregation(Aggregation.max());
}
/**
* Count number of elements.
* @param nullPolicy INCLUDE if nulls should be counted. EXCLUDE if only non-null values
* should be counted.
*/
public static GroupByScanAggregation count(NullPolicy nullPolicy) {
return new GroupByScanAggregation(Aggregation.count(nullPolicy));
}
/**
* Get the row's ranking.
*/
public static GroupByScanAggregation rank() {
return new GroupByScanAggregation(Aggregation.rank());
}
/**
* Get the row's dense ranking.
*/
public static GroupByScanAggregation denseRank() {
return new GroupByScanAggregation(Aggregation.denseRank());
}
/**
* Get the row's percent ranking.
*/
public static GroupByScanAggregation percentRank() {
return new GroupByScanAggregation(Aggregation.percentRank());
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/GroupByScanAggregationOnColumn.java
|
/*
*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* A GroupByScanAggregation for a specific column in a table.
*/
public final class GroupByScanAggregationOnColumn {
protected final GroupByScanAggregation wrapped;
protected final int columnIndex;
GroupByScanAggregationOnColumn(GroupByScanAggregation wrapped, int columnIndex) {
this.wrapped = wrapped;
this.columnIndex = columnIndex;
}
public int getColumnIndex() {
return columnIndex;
}
@Override
public int hashCode() {
return 31 * wrapped.hashCode() + columnIndex;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other instanceof GroupByScanAggregationOnColumn) {
GroupByScanAggregationOnColumn o = (GroupByScanAggregationOnColumn) other;
return wrapped.equals(o.wrapped) && columnIndex == o.columnIndex;
}
return false;
}
long createNativeInstance() {
return wrapped.createNativeInstance();
}
long getDefaultOutput() {
return wrapped.getDefaultOutput();
}
GroupByScanAggregation getWrapped() {
return wrapped;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HashJoin.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class represents a hash table built from the join keys of the right-side table for a
* join operation. This hash table can then be reused across a series of left probe tables
* to compute gather maps for joins more efficiently when the right-side table is not changing.
* It can also be used to query the output row count of a join and then pass that result to the
* operation that generates the join gather maps to avoid redundant computation when the output
* row count must be checked before manifesting the join gather maps.
*/
public class HashJoin implements AutoCloseable {
static {
NativeDepsLoader.loadNativeDeps();
}
private static final Logger log = LoggerFactory.getLogger(HashJoin.class);
private static class HashJoinCleaner extends MemoryCleaner.Cleaner {
private Table buildKeys;
private long nativeHandle;
HashJoinCleaner(Table buildKeys, long nativeHandle) {
this.buildKeys = buildKeys;
this.nativeHandle = nativeHandle;
addRef();
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
long origAddress = nativeHandle;
boolean neededCleanup = nativeHandle != 0;
if (neededCleanup) {
try {
destroy(nativeHandle);
buildKeys.close();
buildKeys = null;
} finally {
nativeHandle = 0;
}
if (logErrorIfNotClean) {
log.error("A HASH TABLE WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress));
}
}
return neededCleanup;
}
@Override
public boolean isClean() {
return nativeHandle == 0;
}
}
private final HashJoinCleaner cleaner;
private final boolean compareNulls;
private boolean isClosed = false;
/**
* Construct a hash table for a join from a table representing the join key columns from the
* right-side table in the join. The resulting instance must be closed to release the
* GPU resources associated with the instance.
* @param buildKeys table view containing the join keys for the right-side join table
* @param compareNulls true if null key values should match otherwise false
*/
public HashJoin(Table buildKeys, boolean compareNulls) {
this.compareNulls = compareNulls;
Table buildTable = new Table(buildKeys.getColumns());
try {
long handle = create(buildTable.getNativeView(), compareNulls);
this.cleaner = new HashJoinCleaner(buildTable, handle);
MemoryCleaner.register(this, cleaner);
} catch (Throwable t) {
try {
buildTable.close();
} catch (Throwable t2) {
t.addSuppressed(t2);
}
throw t;
}
}
@Override
public synchronized void close() {
cleaner.delRef();
if (isClosed) {
cleaner.logRefCountDebug("double free " + this);
throw new IllegalStateException("Close called too many times " + this);
}
cleaner.clean(false);
isClosed = true;
}
long getNativeView() {
return cleaner.nativeHandle;
}
/** Get the number of join key columns for the table that was used to generate the has table. */
public long getNumberOfColumns() {
return cleaner.buildKeys.getNumberOfColumns();
}
/** Returns true if the hash table was built to match on nulls otherwise false. */
public boolean getCompareNulls() {
return compareNulls;
}
private static native long create(long tableView, boolean nullEqual);
private static native void destroy(long handle);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HashType.java
|
/*
*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Hash algorithm identifiers, mirroring native enum cudf::hash_id
*/
public enum HashType {
IDENTITY(0),
MURMUR3(1);
private static final HashType[] HASH_TYPES = HashType.values();
final int nativeId;
HashType(int nativeId) {
this.nativeId = nativeId;
}
public int getNativeId() {
return nativeId;
}
public static HashType fromNative(int nativeId) {
for (HashType type : HASH_TYPES) {
if (type.nativeId == nativeId) {
return type;
}
}
throw new IllegalArgumentException("Could not translate " + nativeId + " into a HashType");
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HostBufferConsumer.java
|
/*
*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Provides a set of APIs for consuming host buffers. This is typically used
* when writing out Tables in various file formats.
*/
public interface HostBufferConsumer {
/**
* Consume a buffer.
* @param buffer the buffer. Be sure to close this buffer when you are done
* with it or it will leak.
* @param len the length of the buffer that is valid. The valid data will be 0 until len.
*/
void handleBuffer(HostMemoryBuffer buffer, long len);
/**
* Indicates that no more buffers will be supplied.
*/
default void done() {}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HostBufferProvider.java
|
/*
*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Provides a set of APIs for providing host buffers to be read.
*/
public interface HostBufferProvider extends AutoCloseable {
/**
* Place data into the given buffer.
* @param buffer the buffer to put data into.
* @param len the maximum amount of data to put into buffer. Less is okay if at EOF.
* @return the actual amount of data put into the buffer.
*/
long readInto(HostMemoryBuffer buffer, long len);
/**
* Indicates that no more buffers will be supplied.
*/
@Override
default void close() {}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HostColumnVector.java
|
/*
*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.StringJoiner;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
/**
* Similar to a ColumnVector, but the data is stored in host memory and accessible directly from
* the JVM. This class holds references to off heap memory and is reference counted to know when
* to release it. Call close to decrement the reference count when you are done with the column,
* and call incRefCount to increment the reference count.
*/
public final class HostColumnVector extends HostColumnVectorCore {
/**
* Interface to handle events for this HostColumnVector. Only invoked during
* close, hence `onClosed` is the only event.
*/
public interface EventHandler {
/**
* `onClosed` is invoked with the updated `refCount` during `close`.
* The last invocation of `onClosed` will be with `refCount=0`.
*
* @note the callback is invoked with this `HostColumnVector`'s lock held.
*
* @param cv reference to the HostColumnVector we are closing
* @param refCount the updated ref count for this HostColumnVector at
* the time of invocation
*/
void onClosed(HostColumnVector cv, int refCount);
}
/**
* The size in bytes of an offset entry
*/
static final int OFFSET_SIZE = DType.INT32.getSizeInBytes();
private int refCount;
private EventHandler eventHandler;
/**
* Create a new column vector with data populated on the host.
*/
HostColumnVector(DType type, long rows, Optional<Long> nullCount,
HostMemoryBuffer hostDataBuffer, HostMemoryBuffer hostValidityBuffer) {
this(type, rows, nullCount, hostDataBuffer, hostValidityBuffer, null);
}
/**
* Create a new column vector with data populated on the host.
* @param type the type of the vector
* @param rows the number of rows in the vector.
* @param nullCount the number of nulls in the vector.
* @param hostDataBuffer The host side data for the vector. In the case of STRING
* this is the string data stored as bytes.
* @param hostValidityBuffer Arrow-like validity buffer 1 bit per row, with padding for
* 64-bit alignment.
* @param offsetBuffer only valid for STRING this is the offsets into
* the hostDataBuffer indicating the start and end of a string
* entry. It should be (rows + 1) ints.
* @param nestedHcv list of child HostColumnVectorCore(s) for complex types
*/
//Constructor for lists and struct
public HostColumnVector(DType type, long rows, Optional<Long> nullCount,
HostMemoryBuffer hostDataBuffer, HostMemoryBuffer hostValidityBuffer,
HostMemoryBuffer offsetBuffer, List<HostColumnVectorCore> nestedHcv) {
// NOTE: This constructor MUST NOT examine the contents of any host buffers, as they may be
// asynchronously written by the device.
super(type, rows, nullCount, hostDataBuffer, hostValidityBuffer, offsetBuffer, nestedHcv);
refCount = 0;
incRefCountInternal(true);
}
HostColumnVector(DType type, long rows, Optional<Long> nullCount,
HostMemoryBuffer hostDataBuffer, HostMemoryBuffer hostValidityBuffer,
HostMemoryBuffer offsetBuffer) {
// NOTE: This constructor MUST NOT examine the contents of any host buffers, as they may be
// asynchronously written by the device.
super(type, rows, nullCount, hostDataBuffer, hostValidityBuffer, offsetBuffer, new ArrayList<>());
assert !type.equals(DType.LIST) : "This constructor should not be used for list type";
if (nullCount.isPresent() && nullCount.get() > 0 && hostValidityBuffer == null) {
throw new IllegalStateException("Buffer cannot have a nullCount without a validity buffer");
}
if (!type.equals(DType.STRING) && !type.equals(DType.LIST)) {
assert offsetBuffer == null : "offsets are only supported for STRING and LIST";
}
refCount = 0;
incRefCountInternal(true);
}
/**
* Set an event handler for this host vector. This method can be invoked with
* null to unset the handler.
*
* @param newHandler - the EventHandler to use from this point forward
* @return the prior event handler, or null if not set.
*/
public synchronized EventHandler setEventHandler(EventHandler newHandler) {
EventHandler prev = this.eventHandler;
this.eventHandler = newHandler;
return prev;
}
/**
* Returns the current event handler for this HostColumnVector or null if no
* handler is associated.
*/
public synchronized EventHandler getEventHandler() {
return this.eventHandler;
}
/**
* This is a really ugly API, but it is possible that the lifecycle of a column of
* data may not have a clear lifecycle thanks to java and GC. This API informs the leak
* tracking code that this is expected for this column, and big scary warnings should
* not be printed when this happens.
*/
public void noWarnLeakExpected() {
offHeap.noWarnLeakExpected();
}
/**
* Close this Vector and free memory allocated for HostMemoryBuffer and DeviceMemoryBuffer
*/
@Override
public synchronized void close() {
refCount--;
offHeap.delRef();
try {
if (refCount == 0) {
offHeap.clean(false);
for (HostColumnVectorCore child : children) {
child.close();
}
} else if (refCount < 0) {
offHeap.logRefCountDebug("double free " + this);
throw new IllegalStateException("Close called too many times " + this);
}
} finally {
if (eventHandler != null) {
eventHandler.onClosed(this, refCount);
}
}
}
@Override
public String toString() {
return "HostColumnVector{" +
"rows=" + rows +
", type=" + type +
", nullCount=" + nullCount +
", offHeap=" + offHeap +
'}';
}
/////////////////////////////////////////////////////////////////////////////
// METADATA ACCESS
/////////////////////////////////////////////////////////////////////////////
/**
* Increment the reference count for this column. You need to call close on this
* to decrement the reference count again.
*/
public HostColumnVector incRefCount() {
return incRefCountInternal(false);
}
private synchronized HostColumnVector incRefCountInternal(boolean isFirstTime) {
offHeap.addRef();
if (refCount <= 0 && !isFirstTime) {
offHeap.logRefCountDebug("INC AFTER CLOSE " + this);
throw new IllegalStateException("Column is already closed");
}
refCount++;
return this;
}
/**
* Returns this column's current refcount
*/
public synchronized int getRefCount() {
return refCount;
}
/////////////////////////////////////////////////////////////////////////////
// DATA MOVEMENT
/////////////////////////////////////////////////////////////////////////////
/**
* Copy the data to the device.
*/
public ColumnVector copyToDevice() {
if (rows == 0) {
if (type.isNestedType()) {
return ColumnView.NestedColumnVector.createColumnVector(type, 0,
null, null, null, Optional.of(0L), children);
} else {
return new ColumnVector(type, 0, Optional.of(0L), null, null, null);
}
}
// The simplest way is just to copy the buffers and pass them down.
DeviceMemoryBuffer data = null;
DeviceMemoryBuffer valid = null;
DeviceMemoryBuffer offsets = null;
try {
if (!type.isNestedType()) {
HostMemoryBuffer hdata = this.offHeap.data;
if (hdata != null) {
long dataLen = rows * type.getSizeInBytes();
if (type.equals(DType.STRING)) {
// This needs a different type
dataLen = getEndStringOffset(rows - 1);
if (dataLen == 0 && getNullCount() == 0) {
// This is a work around to an issue where a column of all empty strings must have at
// least one byte or it will not be interpreted correctly.
dataLen = 1;
}
}
data = DeviceMemoryBuffer.allocate(dataLen);
data.copyFromHostBuffer(hdata, 0, dataLen);
}
HostMemoryBuffer hvalid = this.offHeap.valid;
if (hvalid != null) {
long validLen = ColumnView.getValidityBufferSize((int) rows);
valid = DeviceMemoryBuffer.allocate(validLen);
valid.copyFromHostBuffer(hvalid, 0, validLen);
}
HostMemoryBuffer hoff = this.offHeap.offsets;
if (hoff != null) {
long offsetsLen = OFFSET_SIZE * (rows + 1);
offsets = DeviceMemoryBuffer.allocate(offsetsLen);
offsets.copyFromHostBuffer(hoff, 0, offsetsLen);
}
ColumnVector ret = new ColumnVector(type, rows, nullCount, data, valid, offsets);
data = null;
valid = null;
offsets = null;
return ret;
} else {
return ColumnView.NestedColumnVector.createColumnVector(
type, (int) rows, offHeap.data, offHeap.valid, offHeap.offsets, nullCount, children);
}
} finally {
if (data != null) {
data.close();
}
if (valid != null) {
valid.close();
}
if (offsets != null) {
offsets.close();
}
}
}
/////////////////////////////////////////////////////////////////////////////
// BUILDER
/////////////////////////////////////////////////////////////////////////////
/**
* Create a new Builder to hold the specified number of rows. Be sure to close the builder when
* done with it. Please try to use {@see #build(int, Consumer)} instead to avoid needing to
* close the builder.
* @param type the type of vector to build.
* @param rows the number of rows this builder can hold
* @return the builder to use.
*/
public static Builder builder(DType type, int rows) {
return new Builder(type, rows, 0);
}
/**
* Create a new Builder to hold the specified number of rows and with enough space to hold the
* given amount of string data. Be sure to close the builder when done with it. Please try to
* use {@see #build(int, int, Consumer)} instead to avoid needing to close the builder.
* @param rows the number of rows this builder can hold
* @param stringBufferSize the size of the string buffer to allocate.
* @return the builder to use.
*/
public static Builder builder(int rows, long stringBufferSize) {
return new HostColumnVector.Builder(DType.STRING, rows, stringBufferSize);
}
/**
* Create a new vector.
* @param type the type of vector to build.
* @param rows maximum number of rows that the vector can hold.
* @param init what will initialize the vector.
* @return the created vector.
*/
public static HostColumnVector build(DType type, int rows, Consumer<Builder> init) {
try (HostColumnVector.Builder builder = builder(type, rows)) {
init.accept(builder);
return builder.build();
}
}
public static HostColumnVector build(int rows, long stringBufferSize, Consumer<Builder> init) {
try (HostColumnVector.Builder builder = builder(rows, stringBufferSize)) {
init.accept(builder);
return builder.build();
}
}
public static<T> HostColumnVector fromLists(DataType dataType, List<T>... values) {
try (ColumnBuilder cb = new ColumnBuilder(dataType, values.length)) {
cb.appendLists(values);
return cb.build();
}
}
public static HostColumnVector fromStructs(DataType dataType,
List<StructData> values) {
try (ColumnBuilder cb = new ColumnBuilder(dataType, values.size())) {
cb.appendStructValues(values);
return cb.build();
}
}
public static HostColumnVector fromStructs(DataType dataType, StructData... values) {
try (ColumnBuilder cb = new ColumnBuilder(dataType, values.length)) {
cb.appendStructValues(values);
return cb.build();
}
}
public static HostColumnVector emptyStructs(DataType dataType, long rows) {
StructData sd = new StructData();
try (ColumnBuilder cb = new ColumnBuilder(dataType, rows)) {
for (long i = 0; i < rows; i++) {
cb.append(sd);
}
return cb.build();
}
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector boolFromBytes(byte... values) {
return build(DType.BOOL8, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector fromBytes(byte... values) {
return build(DType.INT8, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned byte type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static HostColumnVector fromUnsignedBytes(byte... values) {
return build(DType.UINT8, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector fromShorts(short... values) {
return build(DType.INT16, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned short type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static HostColumnVector fromUnsignedShorts(short... values) {
return build(DType.UINT16, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector durationNanosecondsFromLongs(long... values) {
return build(DType.DURATION_NANOSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector durationMicrosecondsFromLongs(long... values) {
return build(DType.DURATION_MICROSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector durationMillisecondsFromLongs(long... values) {
return build(DType.DURATION_MILLISECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector durationSecondsFromLongs(long... values) {
return build(DType.DURATION_SECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector durationDaysFromInts(int... values) {
return build(DType.DURATION_DAYS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector fromInts(int... values) {
return build(DType.INT32, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned int type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static HostColumnVector fromUnsignedInts(int... values) {
return build(DType.UINT32, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector fromLongs(long... values) {
return build(DType.INT64, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
* <p>
* Java does not have an unsigned long type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static HostColumnVector fromUnsignedLongs(long... values) {
return build(DType.UINT64, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector fromFloats(float... values) {
return build(DType.FLOAT32, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector fromDoubles(double... values) {
return build(DType.FLOAT64, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector daysFromInts(int... values) {
return build(DType.TIMESTAMP_DAYS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector timestampSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_SECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector timestampMilliSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_MILLISECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector timestampMicroSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_MICROSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new vector from the given values.
*/
public static HostColumnVector timestampNanoSecondsFromLongs(long... values) {
return build(DType.TIMESTAMP_NANOSECONDS, values.length, (b) -> b.appendArray(values));
}
/**
* Create a new decimal vector from unscaled values (int array) and scale.
* The created vector is of type DType.DECIMAL32, whose max precision is 9.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static HostColumnVector decimalFromInts(int scale, int... values) {
return build(DType.create(DType.DTypeEnum.DECIMAL32, scale), values.length, (b) -> b.appendUnscaledDecimalArray(values));
}
/**
* Create a new decimal vector from boxed unscaled values (Integer array) and scale.
* The created vector is of type DType.DECIMAL32, whose max precision is 9.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static HostColumnVector decimalFromBoxedInts(int scale, Integer... values) {
return build(DType.create(DType.DTypeEnum.DECIMAL32, scale), values.length, (b) -> {
for (Integer v : values) {
if (v == null) {
b.appendNull();
} else {
b.appendUnscaledDecimal(v);
}
}
});
}
/**
* Create a new decimal vector from unscaled values (long array) and scale.
* The created vector is of type DType.DECIMAL64, whose max precision is 18.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static HostColumnVector decimalFromLongs(int scale, long... values) {
return build(DType.create(DType.DTypeEnum.DECIMAL64, scale), values.length, (b) -> b.appendUnscaledDecimalArray(values));
}
/**
* Create a new decimal vector from boxed unscaled values (Long array) and scale.
* The created vector is of type DType.DECIMAL64, whose max precision is 18.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static HostColumnVector decimalFromBoxedLongs(int scale, Long... values) {
return build(DType.create(DType.DTypeEnum.DECIMAL64, scale), values.length, (b) -> {
for (Long v : values) {
if (v == null) {
b.appendNull();
} else {
b.appendUnscaledDecimal(v);
}
}
});
}
/**
* Create a new decimal vector from unscaled values (BigInteger array) and scale.
* The created vector is of type DType.DECIMAL128.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static HostColumnVector decimalFromBigIntegers(int scale, BigInteger... values) {
return build(DType.create(DType.DTypeEnum.DECIMAL128, scale), values.length, (b) -> {
for (BigInteger v : values) {
if (v == null) {
b.appendNull();
} else {
b.appendUnscaledDecimal(v);
}
}
});
}
/**
* Create a new decimal vector from double floats with specific DecimalType and RoundingMode.
* All doubles will be rescaled if necessary, according to scale of input DecimalType and RoundingMode.
* If any overflow occurs in extracting integral part, an IllegalArgumentException will be thrown.
* This API is inefficient because of slow double -> decimal conversion, so it is mainly for testing.
* Compared with scale of [[java.math.BigDecimal]], the scale here represents the opposite meaning.
*/
public static HostColumnVector decimalFromDoubles(DType type, RoundingMode mode, double... values) {
assert type.isDecimalType();
if (type.typeId == DType.DTypeEnum.DECIMAL64) {
long[] data = new long[values.length];
for (int i = 0; i < values.length; i++) {
BigDecimal dec = BigDecimal.valueOf(values[i]).setScale(-type.getScale(), mode);
data[i] = dec.unscaledValue().longValueExact();
}
return build(type, values.length, (b) -> b.appendUnscaledDecimalArray(data));
} else {
int[] data = new int[values.length];
for (int i = 0; i < values.length; i++) {
BigDecimal dec = BigDecimal.valueOf(values[i]).setScale(-type.getScale(), mode);
data[i] = dec.unscaledValue().intValueExact();
}
return build(type, values.length, (b) -> b.appendUnscaledDecimalArray(data));
}
}
/**
* Create a new string vector from the given values. This API
* supports inline nulls. This is really intended to be used only for testing as
* it is slow and memory intensive to translate between java strings and UTF8 strings.
*/
public static HostColumnVector fromStrings(String... values) {
int rows = values.length;
long nullCount = 0;
// How many bytes do we need to hold the data. Sorry this is really expensive
long bufferSize = 0;
for (String s: values) {
if (s == null) {
nullCount++;
} else {
bufferSize += s.getBytes(StandardCharsets.UTF_8).length;
}
}
if (nullCount > 0) {
return build(rows, bufferSize, (b) -> b.appendBoxed(values));
}
return build(rows, bufferSize, (b) -> {
for (String s: values) {
b.append(s);
}
});
}
/**
* Create a new string vector from the given values. This API
* supports inline nulls.
*/
public static HostColumnVector fromUTF8Strings(byte[]... values) {
int rows = values.length;
long nullCount = 0;
long bufferSize = 0;
// How many bytes do we need to hold the data.
for (byte[] s: values) {
if (s == null) {
nullCount++;
} else {
bufferSize += s.length;
}
}
BiConsumer<Builder, byte[]> appendUTF8 = nullCount == 0 ?
(b, s) -> b.appendUTF8String(s) :
(b, s) -> {
if (s == null) {
b.appendNull();
} else {
b.appendUTF8String(s);
}
};
return build(rows, bufferSize, (b) -> {
for (byte[] s: values) {
appendUTF8.accept(b, s);
}
});
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than building from primitive array of unscaledValues.
* Notice:
* 1. Input values will be rescaled with min scale (max scale in terms of java.math.BigDecimal),
* which avoids potential precision loss due to rounding. But there exists risk of precision overflow.
* 2. The scale will be zero if all input values are null.
*/
public static HostColumnVector fromDecimals(BigDecimal... values) {
// 1. Fetch the element with max precision (maxDec). Fill with ZERO if inputs is empty.
// 2. Fetch the max scale. Fill with ZERO if inputs is empty.
// 3. Rescale the maxDec with the max scale, so to come out the max precision capacity we need.
BigDecimal maxDec = Arrays.stream(values).filter(Objects::nonNull)
.max(Comparator.comparingInt(BigDecimal::precision))
.orElse(BigDecimal.ZERO);
int maxScale = Arrays.stream(values).filter(Objects::nonNull)
.map(decimal -> decimal.scale())
.max(Comparator.naturalOrder())
.orElse(0);
maxDec = maxDec.setScale(maxScale, RoundingMode.UNNECESSARY);
return build(DType.fromJavaBigDecimal(maxDec), values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector fromBoxedBooleans(Boolean... values) {
return build(DType.BOOL8, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector fromBoxedBytes(Byte... values) {
return build(DType.INT8, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned byte type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static HostColumnVector fromBoxedUnsignedBytes(Byte... values) {
return build(DType.UINT8, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector fromBoxedShorts(Short... values) {
return build(DType.INT16, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned short type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static HostColumnVector fromBoxedUnsignedShorts(Short... values) {
return build(DType.UINT16, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector durationNanosecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_NANOSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector durationMicrosecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_MICROSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector durationMillisecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_MILLISECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector durationSecondsFromBoxedLongs(Long... values) {
return build(DType.DURATION_SECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector durationDaysFromBoxedInts(Integer... values) {
return build(DType.DURATION_DAYS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector fromBoxedInts(Integer... values) {
return build(DType.INT32, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned int type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static HostColumnVector fromBoxedUnsignedInts(Integer... values) {
return build(DType.UINT32, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector fromBoxedLongs(Long... values) {
return build(DType.INT64, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
* <p>
* Java does not have an unsigned long type, so the values will be
* treated as if the bits represent an unsigned value.
*/
public static HostColumnVector fromBoxedUnsignedLongs(Long... values) {
return build(DType.UINT64, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector fromBoxedFloats(Float... values) {
return build(DType.FLOAT32, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector fromBoxedDoubles(Double... values) {
return build(DType.FLOAT64, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector timestampDaysFromBoxedInts(Integer... values) {
return build(DType.TIMESTAMP_DAYS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector timestampSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_SECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector timestampMilliSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_MILLISECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector timestampMicroSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_MICROSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Create a new vector from the given values. This API supports inline nulls,
* but is much slower than using a regular array and should really only be used
* for tests.
*/
public static HostColumnVector timestampNanoSecondsFromBoxedLongs(Long... values) {
return build(DType.TIMESTAMP_NANOSECONDS, values.length, (b) -> b.appendBoxed(values));
}
/**
* Build
*/
public static final class ColumnBuilder implements AutoCloseable {
private DType type;
private HostMemoryBuffer data;
private HostMemoryBuffer valid;
private HostMemoryBuffer offsets;
private long nullCount = 0l;
//TODO nullable currently not used
private boolean nullable;
private long rows;
private long estimatedRows;
private long rowCapacity = 0L;
private long validCapacity = 0L;
private boolean built = false;
private List<ColumnBuilder> childBuilders = new ArrayList<>();
private Runnable nullHandler;
// The value of currentIndex can't exceed Int32.Max. Storing currentIndex as a long is to
// adapt HostMemoryBuffer.setXXX, which requires a long offset.
private long currentIndex = 0;
// Only for Strings: pointer of the byte (data) buffer
private int currentStringByteIndex = 0;
// Use bit shift instead of multiply to transform row offset to byte offset
private int bitShiftBySize = 0;
private static final int bitShiftByOffset = (int)(Math.log(OFFSET_SIZE) / Math.log(2));
public ColumnBuilder(HostColumnVector.DataType type, long estimatedRows) {
this.type = type.getType();
this.nullable = type.isNullable();
this.rows = 0;
this.estimatedRows = Math.max(estimatedRows, 1L);
this.bitShiftBySize = (int)(Math.log(this.type.getSizeInBytes()) / Math.log(2));
// initialize the null handler according to the data type
this.setupNullHandler();
for (int i = 0; i < type.getNumChildren(); i++) {
childBuilders.add(new ColumnBuilder(type.getChild(i), estimatedRows));
}
}
private void setupNullHandler() {
if (this.type == DType.LIST) {
this.nullHandler = () -> {
this.growListBuffersAndRows();
this.growValidBuffer();
setNullAt(currentIndex++);
offsets.setInt(currentIndex << bitShiftByOffset, childBuilders.get(0).getCurrentIndex());
};
} else if (this.type == DType.STRING) {
this.nullHandler = () -> {
this.growStringBuffersAndRows(0);
this.growValidBuffer();
setNullAt(currentIndex++);
offsets.setInt(currentIndex << bitShiftByOffset, currentStringByteIndex);
};
} else if (this.type == DType.STRUCT) {
this.nullHandler = () -> {
this.growStructBuffersAndRows();
this.growValidBuffer();
setNullAt(currentIndex++);
for (ColumnBuilder childBuilder : childBuilders) {
childBuilder.appendNull();
}
};
} else {
this.nullHandler = () -> {
this.growFixedWidthBuffersAndRows();
this.growValidBuffer();
setNullAt(currentIndex++);
};
}
}
public HostColumnVector build() {
List<HostColumnVectorCore> hostColumnVectorCoreList = new ArrayList<>();
for (ColumnBuilder childBuilder : childBuilders) {
hostColumnVectorCoreList.add(childBuilder.buildNestedInternal());
}
// Aligns the valid buffer size with other buffers in terms of row size, because it grows lazily.
if (valid != null) {
growValidBuffer();
}
HostColumnVector hostColumnVector = new HostColumnVector(type, rows, Optional.of(nullCount), data, valid, offsets,
hostColumnVectorCoreList);
built = true;
return hostColumnVector;
}
private HostColumnVectorCore buildNestedInternal() {
List<HostColumnVectorCore> hostColumnVectorCoreList = new ArrayList<>();
for (ColumnBuilder childBuilder : childBuilders) {
hostColumnVectorCoreList.add(childBuilder.buildNestedInternal());
}
// Aligns the valid buffer size with other buffers in terms of row size, because it grows lazily.
if (valid != null) {
growValidBuffer();
}
return new HostColumnVectorCore(type, rows, Optional.of(nullCount), data, valid, offsets, hostColumnVectorCoreList);
}
public ColumnBuilder appendLists(List... inputLists) {
for (List inputList : inputLists) {
// one row
append(inputList);
}
return this;
}
public ColumnBuilder appendStructValues(List<StructData> inputList) {
for (StructData structInput : inputList) {
// one row
append(structInput);
}
return this;
}
public ColumnBuilder appendStructValues(StructData... inputList) {
for (StructData structInput : inputList) {
append(structInput);
}
return this;
}
/**
* Grows valid buffer lazily. The valid buffer won't be materialized until the first null
* value appended. This method reuses the rowCapacity to track the sizes of column.
* Therefore, please call specific growBuffer method to update rowCapacity before calling
* this method.
*/
private void growValidBuffer() {
if (valid == null) {
long maskBytes = ColumnView.getValidityBufferSize((int) rowCapacity);
valid = HostMemoryBuffer.allocate(maskBytes);
valid.setMemory(0, valid.length, (byte) 0xFF);
validCapacity = rowCapacity;
return;
}
if (validCapacity < rowCapacity) {
long maskBytes = ColumnView.getValidityBufferSize((int) rowCapacity);
HostMemoryBuffer newValid = HostMemoryBuffer.allocate(maskBytes);
newValid.setMemory(0, newValid.length, (byte) 0xFF);
valid = copyBuffer(newValid, valid);
validCapacity = rowCapacity;
}
}
/**
* A method automatically grows data buffer for fixed-width columns as needed along with
* incrementing the row counts. Please call this method before appending any value or null.
*/
private void growFixedWidthBuffersAndRows() {
growFixedWidthBuffersAndRows(1);
}
/**
* A method automatically grows data buffer for fixed-width columns for a given size as needed
* along with incrementing the row counts. Please call this method before appending
* multiple values or nulls.
*/
private void growFixedWidthBuffersAndRows(int numRows) {
assert rows + numRows <= Integer.MAX_VALUE : "Row count cannot go over Integer.MAX_VALUE";
rows += numRows;
if (data == null) {
long neededSize = Math.max(rows, estimatedRows);
data = HostMemoryBuffer.allocate(neededSize << bitShiftBySize);
rowCapacity = neededSize;
} else if (rows > rowCapacity) {
long neededSize = Math.max(rows, rowCapacity * 2);
long newCap = Math.min(neededSize, Integer.MAX_VALUE - 1);
data = copyBuffer(HostMemoryBuffer.allocate(newCap << bitShiftBySize), data);
rowCapacity = newCap;
}
}
/**
* A method automatically grows offsets buffer for list columns as needed along with
* incrementing the row counts. Please call this method before appending any value or null.
*/
private void growListBuffersAndRows() {
assert rows + 2 <= Integer.MAX_VALUE : "Row count cannot go over Integer.MAX_VALUE";
rows++;
if (offsets == null) {
offsets = HostMemoryBuffer.allocate((estimatedRows + 1) << bitShiftByOffset);
offsets.setInt(0, 0);
rowCapacity = estimatedRows;
} else if (rows > rowCapacity) {
long newCap = Math.min(rowCapacity * 2, Integer.MAX_VALUE - 2);
offsets = copyBuffer(HostMemoryBuffer.allocate((newCap + 1) << bitShiftByOffset), offsets);
rowCapacity = newCap;
}
}
/**
* A method automatically grows offsets and data buffer for string columns as needed along with
* incrementing the row counts. Please call this method before appending any value or null.
*
* @param stringLength number of bytes required by the next row
*/
private void growStringBuffersAndRows(int stringLength) {
assert rows + 2 <= Integer.MAX_VALUE : "Row count cannot go over Integer.MAX_VALUE";
rows++;
if (offsets == null) {
// Initialize data buffer with at least 1 byte in case the first appended value is null.
data = HostMemoryBuffer.allocate(Math.max(1, stringLength));
offsets = HostMemoryBuffer.allocate((estimatedRows + 1) << bitShiftByOffset);
offsets.setInt(0, 0);
rowCapacity = estimatedRows;
return;
}
if (rows > rowCapacity) {
long newCap = Math.min(rowCapacity * 2, Integer.MAX_VALUE - 2);
offsets = copyBuffer(HostMemoryBuffer.allocate((newCap + 1) << bitShiftByOffset), offsets);
rowCapacity = newCap;
}
long currentLength = currentStringByteIndex + stringLength;
if (currentLength > data.length) {
long requiredLength = data.length;
do {
requiredLength = requiredLength * 2;
} while (currentLength > requiredLength);
data = copyBuffer(HostMemoryBuffer.allocate(requiredLength), data);
}
}
/**
* For struct columns, we only need to update rows and rowCapacity (for the growth of
* valid buffer), because struct columns hold no buffer itself.
* Please call this method before appending any value or null.
*/
private void growStructBuffersAndRows() {
assert rows + 1 <= Integer.MAX_VALUE : "Row count cannot go over Integer.MAX_VALUE";
rows++;
if (rowCapacity == 0) {
rowCapacity = estimatedRows;
} else if (rows > rowCapacity) {
rowCapacity = Math.min(rowCapacity * 2, Integer.MAX_VALUE - 1);
}
}
private HostMemoryBuffer copyBuffer(HostMemoryBuffer targetBuffer, HostMemoryBuffer buffer) {
try {
targetBuffer.copyFromHostBuffer(0, buffer, 0, buffer.length);
buffer.close();
buffer = targetBuffer;
targetBuffer = null;
} finally {
if (targetBuffer != null) {
targetBuffer.close();
}
}
return buffer;
}
/**
* Method that sets the null bit in the validity vector
* @param index the row index at which the null is marked
*/
private void setNullAt(long index) {
assert index < rows : "Index for null value should fit the column with " + rows + " rows";
nullCount += BitVectorHelper.setNullAt(valid, index);
}
public final ColumnBuilder appendNull() {
nullHandler.run();
return this;
}
//For structs
private ColumnBuilder append(StructData structData) {
assert type.isNestedType();
if (type.equals(DType.STRUCT)) {
if (structData == null || structData.isNull()) {
return appendNull();
} else {
for (int i = 0; i < structData.getNumFields(); i++) {
ColumnBuilder childBuilder = childBuilders.get(i);
appendChildOrNull(childBuilder, structData.getField(i));
}
endStruct();
}
}
return this;
}
private boolean allChildrenHaveSameIndex() {
if (childBuilders.size() > 0) {
int expected = childBuilders.get(0).getCurrentIndex();
for (ColumnBuilder child: childBuilders) {
if (child.getCurrentIndex() != expected) {
return false;
}
}
}
return true;
}
/**
* If you want to build up a struct column you can get each child `builder.getChild(N)` and
* append to all of them, then when you are done call `endStruct` to update this builder.
* Do not start to append to the child and then append a null to this without ending the struct
* first or you might not get the results that you expected.
* @return this for chaining.
*/
public ColumnBuilder endStruct() {
assert type.equals(DType.STRUCT) : "This only works for structs";
assert allChildrenHaveSameIndex() : "Appending structs data appears to be off " +
childBuilders + " should all have the same currentIndex " + type;
growStructBuffersAndRows();
currentIndex++;
return this;
}
/**
* If you want to build up a list column you can get `builder.getChild(0)` and append to than,
* then when you are done call `endList` and everything that was appended to that builder
* will now be in the next list. Do not start to append to the child and then append a null
* to this without ending the list first or you might not get the results that you expected.
* @return this for chaining.
*/
public ColumnBuilder endList() {
assert type.equals(DType.LIST);
growListBuffersAndRows();
offsets.setInt(++currentIndex << bitShiftByOffset, childBuilders.get(0).getCurrentIndex());
return this;
}
// For lists
private <T> ColumnBuilder append(List<T> inputList) {
if (inputList == null) {
appendNull();
} else {
ColumnBuilder childBuilder = childBuilders.get(0);
for (Object listElement : inputList) {
appendChildOrNull(childBuilder, listElement);
}
endList();
}
return this;
}
private void appendChildOrNull(ColumnBuilder childBuilder, Object listElement) {
if (listElement == null) {
childBuilder.appendNull();
} else if (listElement instanceof Integer) {
childBuilder.append((Integer) listElement);
} else if (listElement instanceof String) {
childBuilder.append((String) listElement);
} else if (listElement instanceof Double) {
childBuilder.append((Double) listElement);
} else if (listElement instanceof Float) {
childBuilder.append((Float) listElement);
} else if (listElement instanceof Boolean) {
childBuilder.append((Boolean) listElement);
} else if (listElement instanceof Long) {
childBuilder.append((Long) listElement);
} else if (listElement instanceof Byte) {
childBuilder.append((Byte) listElement);
} else if (listElement instanceof Short) {
childBuilder.append((Short) listElement);
} else if (listElement instanceof BigDecimal) {
childBuilder.append((BigDecimal) listElement);
} else if (listElement instanceof BigInteger) {
childBuilder.append((BigInteger) listElement);
} else if (listElement instanceof List) {
childBuilder.append((List<?>) listElement);
} else if (listElement instanceof StructData) {
childBuilder.append((StructData) listElement);
} else if (listElement instanceof byte[]) {
childBuilder.appendUTF8String((byte[]) listElement);
} else {
throw new IllegalStateException("Unexpected element type: " + listElement.getClass());
}
}
@Deprecated
public void incrCurrentIndex() {
currentIndex = currentIndex + 1;
}
public int getCurrentIndex() {
return (int) currentIndex;
}
@Deprecated
public int getCurrentByteIndex() {
return currentStringByteIndex;
}
public final ColumnBuilder append(byte value) {
growFixedWidthBuffersAndRows();
assert type.isBackedByByte();
assert currentIndex < rows;
data.setByte(currentIndex++ << bitShiftBySize, value);
return this;
}
public final ColumnBuilder append(short value) {
growFixedWidthBuffersAndRows();
assert type.isBackedByShort();
assert currentIndex < rows;
data.setShort(currentIndex++ << bitShiftBySize, value);
return this;
}
public final ColumnBuilder append(int value) {
growFixedWidthBuffersAndRows();
assert type.isBackedByInt();
assert currentIndex < rows;
data.setInt(currentIndex++ << bitShiftBySize, value);
return this;
}
public final ColumnBuilder append(long value) {
growFixedWidthBuffersAndRows();
assert type.isBackedByLong();
assert currentIndex < rows;
data.setLong(currentIndex++ << bitShiftBySize, value);
return this;
}
public final ColumnBuilder append(float value) {
growFixedWidthBuffersAndRows();
assert type.equals(DType.FLOAT32);
assert currentIndex < rows;
data.setFloat(currentIndex++ << bitShiftBySize, value);
return this;
}
public final ColumnBuilder append(double value) {
growFixedWidthBuffersAndRows();
assert type.equals(DType.FLOAT64);
assert currentIndex < rows;
data.setDouble(currentIndex++ << bitShiftBySize, value);
return this;
}
public final ColumnBuilder append(boolean value) {
growFixedWidthBuffersAndRows();
assert type.equals(DType.BOOL8);
assert currentIndex < rows;
data.setBoolean(currentIndex++ << bitShiftBySize, value);
return this;
}
public ColumnBuilder append(BigDecimal value) {
return append(value.setScale(-type.getScale(), RoundingMode.UNNECESSARY).unscaledValue());
}
public ColumnBuilder append(BigInteger unscaledVal) {
growFixedWidthBuffersAndRows();
assert currentIndex < rows;
if (type.typeId == DType.DTypeEnum.DECIMAL32) {
data.setInt(currentIndex++ << bitShiftBySize, unscaledVal.intValueExact());
} else if (type.typeId == DType.DTypeEnum.DECIMAL64) {
data.setLong(currentIndex++ << bitShiftBySize, unscaledVal.longValueExact());
} else if (type.typeId == DType.DTypeEnum.DECIMAL128) {
byte[] unscaledValueBytes = unscaledVal.toByteArray();
byte[] result = convertDecimal128FromJavaToCudf(unscaledValueBytes);
data.setBytes(currentIndex++ << bitShiftBySize, result, 0, result.length);
} else {
throw new IllegalStateException(type + " is not a supported decimal type.");
}
return this;
}
public ColumnBuilder append(String value) {
assert value != null : "appendNull must be used to append null strings";
return appendUTF8String(value.getBytes(StandardCharsets.UTF_8));
}
public ColumnBuilder appendUTF8String(byte[] value) {
return appendUTF8String(value, 0, value.length);
}
public ColumnBuilder appendUTF8String(byte[] value, int srcOffset, int length) {
assert value != null : "appendNull must be used to append null strings";
assert srcOffset >= 0;
assert length >= 0;
assert value.length + srcOffset <= length;
assert type.equals(DType.STRING) : " type " + type + " is not String";
growStringBuffersAndRows(length);
assert currentIndex < rows;
if (length > 0) {
data.setBytes(currentStringByteIndex, value, srcOffset, length);
}
currentStringByteIndex += length;
offsets.setInt(++currentIndex << bitShiftByOffset, currentStringByteIndex);
return this;
}
/**
* Append multiple non-null byte values.
*/
public ColumnBuilder append(byte[] value, int srcOffset, int length) {
assert type.isBackedByByte();
assert srcOffset >= 0;
assert length >= 0;
assert length + srcOffset <= value.length;
if (length > 0) {
growFixedWidthBuffersAndRows(length);
assert currentIndex < rows;
data.setBytes(currentIndex, value, srcOffset, length);
}
currentIndex += length;
return this;
}
/**
* Appends byte to a LIST of INT8/UINT8
*/
public ColumnBuilder appendByteList(byte[] value) {
return appendByteList(value, 0, value.length);
}
/**
* Appends bytes to a LIST of INT8/UINT8
*/
public ColumnBuilder appendByteList(byte[] value, int srcOffset, int length) {
assert value != null : "appendNull must be used to append null bytes";
assert type.equals(DType.LIST) : " type " + type + " is not LIST";
getChild(0).append(value, srcOffset, length);
return endList();
}
/**
* Accepts a byte array containing the two's-complement representation of the unscaled value, which
* is in big-endian byte-order. Then, transforms it into the representation of cuDF Decimal128 for
* appending.
* This method is more efficient than `append(BigInteger unscaledVal)` if we can directly access the
* two's-complement representation of a BigDecimal without encoding via the method `toByteArray`.
*/
public ColumnBuilder appendDecimal128(byte[] binary) {
growFixedWidthBuffersAndRows();
assert type.getTypeId().equals(DType.DTypeEnum.DECIMAL128);
assert currentIndex < rows;
assert binary.length <= type.getSizeInBytes();
byte[] cuBinary = convertDecimal128FromJavaToCudf(binary);
data.setBytes(currentIndex++ << bitShiftBySize, cuBinary, 0, cuBinary.length);
return this;
}
public ColumnBuilder getChild(int index) {
return childBuilders.get(index);
}
/**
* Finish and create the immutable ColumnVector, copied to the device.
*/
public final ColumnVector buildAndPutOnDevice() {
try (HostColumnVector tmp = build()) {
return tmp.copyToDevice();
}
}
@Override
public void close() {
if (!built) {
if (data != null) {
data.close();
data = null;
}
if (valid != null) {
valid.close();
valid = null;
}
if (offsets != null) {
offsets.close();
offsets = null;
}
for (ColumnBuilder childBuilder : childBuilders) {
childBuilder.close();
}
built = true;
}
}
@Override
public String toString() {
StringJoiner sj = new StringJoiner(",");
for (ColumnBuilder cb : childBuilders) {
sj.add(cb.toString());
}
return "ColumnBuilder{" +
"type=" + type +
", children=" + sj +
", data=" + data +
", valid=" + valid +
", currentIndex=" + currentIndex +
", nullCount=" + nullCount +
", estimatedRows=" + estimatedRows +
", populatedRows=" + rows +
", built=" + built +
'}';
}
}
public static final class Builder implements AutoCloseable {
private final long rows;
private final DType type;
private HostMemoryBuffer data;
private HostMemoryBuffer valid;
private HostMemoryBuffer offsets;
private long currentIndex = 0;
private long nullCount;
private int currentStringByteIndex = 0;
private boolean built;
/**
* Create a builder with a buffer of size rows
* @param type datatype
* @param rows number of rows to allocate.
* @param stringBufferSize the size of the string data buffer if we are
* working with Strings. It is ignored otherwise.
*/
Builder(DType type, long rows, long stringBufferSize) {
this.type = type;
this.rows = rows;
if (type.equals(DType.STRING)) {
if (stringBufferSize <= 0) {
// We need at least one byte or we will get NULL back for data
stringBufferSize = 1;
}
this.data = HostMemoryBuffer.allocate(stringBufferSize);
// The offsets are ints and there is 1 more than the number of rows.
this.offsets = HostMemoryBuffer.allocate((rows + 1) * OFFSET_SIZE);
// The first offset is always 0
this.offsets.setInt(0, 0);
} else {
this.data = HostMemoryBuffer.allocate(rows * type.getSizeInBytes());
}
}
/**
* Create a builder with a buffer of size rows (for testing ONLY).
* @param type datatype
* @param rows number of rows to allocate.
* @param testData a buffer to hold the data (should be large enough to hold rows entries).
* @param testValid a buffer to hold the validity vector (should be large enough to hold
* rows entries or is null).
* @param testOffsets a buffer to hold the offsets for strings and string categories.
*/
Builder(DType type, long rows, HostMemoryBuffer testData,
HostMemoryBuffer testValid, HostMemoryBuffer testOffsets) {
this.type = type;
this.rows = rows;
this.data = testData;
this.valid = testValid;
}
public final Builder append(boolean value) {
assert type.equals(DType.BOOL8);
assert currentIndex < rows;
data.setByte(currentIndex * type.getSizeInBytes(), value ? (byte)1 : (byte)0);
currentIndex++;
return this;
}
public final Builder append(byte value) {
assert type.isBackedByByte();
assert currentIndex < rows;
data.setByte(currentIndex * type.getSizeInBytes(), value);
currentIndex++;
return this;
}
public final Builder append(byte value, long count) {
assert (count + currentIndex) <= rows;
assert type.isBackedByByte();
data.setMemory(currentIndex * type.getSizeInBytes(), count, value);
currentIndex += count;
return this;
}
public final Builder append(short value) {
assert type.isBackedByShort();
assert currentIndex < rows;
data.setShort(currentIndex * type.getSizeInBytes(), value);
currentIndex++;
return this;
}
public final Builder append(int value) {
assert type.isBackedByInt();
assert currentIndex < rows;
data.setInt(currentIndex * type.getSizeInBytes(), value);
currentIndex++;
return this;
}
public final Builder append(long value) {
assert type.isBackedByLong();
assert currentIndex < rows;
data.setLong(currentIndex * type.getSizeInBytes(), value);
currentIndex++;
return this;
}
public final Builder append(float value) {
assert type.equals(DType.FLOAT32);
assert currentIndex < rows;
data.setFloat(currentIndex * type.getSizeInBytes(), value);
currentIndex++;
return this;
}
public final Builder append(double value) {
assert type.equals(DType.FLOAT64);
assert currentIndex < rows;
data.setDouble(currentIndex * type.getSizeInBytes(), value);
currentIndex++;
return this;
}
/**
* Append java.math.BigDecimal into HostColumnVector with UNNECESSARY RoundingMode.
* Input decimal should have a larger scale than column vector.Otherwise, an ArithmeticException will be thrown while rescaling.
* If unscaledValue after rescaling exceeds the max precision of rapids type,
* an ArithmeticException will be thrown while extracting integral.
*
* @param value BigDecimal value to be appended
*/
public final Builder append(BigDecimal value) {
return append(value, RoundingMode.UNNECESSARY);
}
/**
* Append java.math.BigDecimal into HostColumnVector with user-defined RoundingMode.
* Input decimal will be rescaled according to scale of column type and RoundingMode before appended.
* If unscaledValue after rescaling exceeds the max precision of rapids type, an ArithmeticException will be thrown.
*
* @param value BigDecimal value to be appended
* @param roundingMode rounding mode determines rescaling behavior
*/
public final Builder append(BigDecimal value, RoundingMode roundingMode) {
assert type.isDecimalType();
assert currentIndex < rows: "appended too many values " + currentIndex + " out of total rows " + rows;
BigInteger unscaledValue = value.setScale(-type.getScale(), roundingMode).unscaledValue();
if (type.typeId == DType.DTypeEnum.DECIMAL32) {
assert value.precision() <= DType.DECIMAL32_MAX_PRECISION : "value exceeds maximum precision for DECIMAL32";
data.setInt(currentIndex * type.getSizeInBytes(), unscaledValue.intValueExact());
} else if (type.typeId == DType.DTypeEnum.DECIMAL64) {
assert value.precision() <= DType.DECIMAL64_MAX_PRECISION : "value exceeds maximum precision for DECIMAL64 ";
data.setLong(currentIndex * type.getSizeInBytes(), unscaledValue.longValueExact());
} else if (type.typeId == DType.DTypeEnum.DECIMAL128) {
assert value.precision() <= DType.DECIMAL128_MAX_PRECISION : "value exceeds maximum precision for DECIMAL128 ";
appendUnscaledDecimal(value.unscaledValue());
return this;
} else {
throw new IllegalStateException(type + " is not a supported decimal type.");
}
currentIndex++;
return this;
}
public final Builder appendUnscaledDecimal(int value) {
assert type.typeId == DType.DTypeEnum.DECIMAL32;
assert currentIndex < rows;
data.setInt(currentIndex * type.getSizeInBytes(), value);
currentIndex++;
return this;
}
public final Builder appendUnscaledDecimal(long value) {
assert type.typeId == DType.DTypeEnum.DECIMAL64;
assert currentIndex < rows;
data.setLong(currentIndex * type.getSizeInBytes(), value);
currentIndex++;
return this;
}
public final Builder appendUnscaledDecimal(BigInteger value) {
assert type.typeId == DType.DTypeEnum.DECIMAL128;
assert currentIndex < rows;
byte[] unscaledValueBytes = value.toByteArray();
byte[] result = convertDecimal128FromJavaToCudf(unscaledValueBytes);
data.setBytes(currentIndex*DType.DTypeEnum.DECIMAL128.sizeInBytes, result, 0, result.length);
currentIndex++;
return this;
}
public Builder append(String value) {
assert value != null : "appendNull must be used to append null strings";
return appendUTF8String(value.getBytes(StandardCharsets.UTF_8));
}
public Builder appendUTF8String(byte[] value) {
return appendUTF8String(value, 0, value.length);
}
public Builder appendUTF8String(byte[] value, int offset, int length) {
assert value != null : "appendNull must be used to append null strings";
assert offset >= 0;
assert length >= 0;
assert length + offset <= value.length;
assert type.equals(DType.STRING);
assert currentIndex < rows;
// just for strings we want to throw a real exception if we would overrun the buffer
long oldLen = data.getLength();
long newLen = oldLen;
while (currentStringByteIndex + length > newLen) {
newLen *= 2;
}
if (newLen > Integer.MAX_VALUE) {
throw new IllegalStateException("A string buffer is not supported over 2GB in size");
}
if (newLen != oldLen) {
// need to grow the size of the buffer.
HostMemoryBuffer newData = HostMemoryBuffer.allocate(newLen);
try {
newData.copyFromHostBuffer(0, data, 0, currentStringByteIndex);
data.close();
data = newData;
newData = null;
} finally {
if (newData != null) {
newData.close();
}
}
}
if (length > 0) {
data.setBytes(currentStringByteIndex, value, offset, length);
}
currentStringByteIndex += length;
currentIndex++;
offsets.setInt(currentIndex * OFFSET_SIZE, currentStringByteIndex);
return this;
}
public Builder appendArray(byte... values) {
assert (values.length + currentIndex) <= rows;
assert type.isBackedByByte();
data.setBytes(currentIndex * type.getSizeInBytes(), values, 0, values.length);
currentIndex += values.length;
return this;
}
public Builder appendArray(short... values) {
assert type.isBackedByShort();
assert (values.length + currentIndex) <= rows;
data.setShorts(currentIndex * type.getSizeInBytes(), values, 0, values.length);
currentIndex += values.length;
return this;
}
public Builder appendArray(int... values) {
assert type.isBackedByInt();
assert (values.length + currentIndex) <= rows;
data.setInts(currentIndex * type.getSizeInBytes(), values, 0, values.length);
currentIndex += values.length;
return this;
}
public Builder appendArray(long... values) {
assert type.isBackedByLong();
assert (values.length + currentIndex) <= rows;
data.setLongs(currentIndex * type.getSizeInBytes(), values, 0, values.length);
currentIndex += values.length;
return this;
}
public Builder appendArray(float... values) {
assert type.equals(DType.FLOAT32);
assert (values.length + currentIndex) <= rows;
data.setFloats(currentIndex * type.getSizeInBytes(), values, 0, values.length);
currentIndex += values.length;
return this;
}
public Builder appendArray(double... values) {
assert type.equals(DType.FLOAT64);
assert (values.length + currentIndex) <= rows;
data.setDoubles(currentIndex * type.getSizeInBytes(), values, 0, values.length);
currentIndex += values.length;
return this;
}
public Builder appendUnscaledDecimalArray(int... values) {
assert type.typeId == DType.DTypeEnum.DECIMAL32;
assert (values.length + currentIndex) <= rows;
data.setInts(currentIndex * type.getSizeInBytes(), values, 0, values.length);
currentIndex += values.length;
return this;
}
public Builder appendUnscaledDecimalArray(long... values) {
assert type.typeId == DType.DTypeEnum.DECIMAL64;
assert (values.length + currentIndex) <= rows;
data.setLongs(currentIndex * type.getSizeInBytes(), values, 0, values.length);
currentIndex += values.length;
return this;
}
/**
* Append multiple values. This is very slow and should really only be used for tests.
* @param values the values to append, including nulls.
* @return this for chaining.
* @throws {@link IndexOutOfBoundsException}
*/
public Builder appendBoxed(BigDecimal... values) throws IndexOutOfBoundsException {
assert type.isDecimalType();
for (BigDecimal v : values) {
if (v == null) {
appendNull();
} else {
append(v);
}
}
return this;
}
/**
* Append multiple values. This is very slow and should really only be used for tests.
* @param values the values to append, including nulls.
* @return this for chaining.
* @throws {@link IndexOutOfBoundsException}
*/
public final Builder appendBoxed(Byte... values) throws IndexOutOfBoundsException {
for (Byte b : values) {
if (b == null) {
appendNull();
} else {
append(b);
}
}
return this;
}
/**
* Append multiple values. This is very slow and should really only be used for tests.
* @param values the values to append, including nulls.
* @return this for chaining.
* @throws {@link IndexOutOfBoundsException}
*/
public final Builder appendBoxed(Boolean... values) throws IndexOutOfBoundsException {
for (Boolean b : values) {
if (b == null) {
appendNull();
} else {
append(b ? (byte) 1 : (byte) 0);
}
}
return this;
}
/**
* Append multiple values. This is very slow and should really only be used for tests.
* @param values the values to append, including nulls.
* @return this for chaining.
* @throws {@link IndexOutOfBoundsException}
*/
public final Builder appendBoxed(Short... values) throws IndexOutOfBoundsException {
for (Short b : values) {
if (b == null) {
appendNull();
} else {
append(b);
}
}
return this;
}
/**
* Append multiple values. This is very slow and should really only be used for tests.
* @param values the values to append, including nulls.
* @return this for chaining.
* @throws {@link IndexOutOfBoundsException}
*/
public final Builder appendBoxed(Integer... values) throws IndexOutOfBoundsException {
for (Integer b : values) {
if (b == null) {
appendNull();
} else {
append(b);
}
}
return this;
}
/**
* Append multiple values. This is very slow and should really only be used for tests.
* @param values the values to append, including nulls.
* @return this for chaining.
* @throws {@link IndexOutOfBoundsException}
*/
public final Builder appendBoxed(Long... values) throws IndexOutOfBoundsException {
for (Long b : values) {
if (b == null) {
appendNull();
} else {
append(b);
}
}
return this;
}
/**
* Append multiple values. This is very slow and should really only be used for tests.
* @param values the values to append, including nulls.
* @return this for chaining.
* @throws {@link IndexOutOfBoundsException}
*/
public final Builder appendBoxed(Float... values) throws IndexOutOfBoundsException {
for (Float b : values) {
if (b == null) {
appendNull();
} else {
append(b);
}
}
return this;
}
/**
* Append multiple values. This is very slow and should really only be used for tests.
* @param values the values to append, including nulls.
* @return this for chaining.
* @throws {@link IndexOutOfBoundsException}
*/
public final Builder appendBoxed(Double... values) throws IndexOutOfBoundsException {
for (Double b : values) {
if (b == null) {
appendNull();
} else {
append(b);
}
}
return this;
}
/**
* Append multiple values. This is very slow and should really only be used for tests.
* @param values the values to append, including nulls.
* @return this for chaining.
* @throws {@link IndexOutOfBoundsException}
*/
public final Builder appendBoxed(String... values) throws IndexOutOfBoundsException {
for (String b : values) {
if (b == null) {
appendNull();
} else {
append(b);
}
}
return this;
}
// TODO see if we can remove this...
/**
* Append this vector to the end of this vector
* @param columnVector - Vector to be added
* @return - The CudfColumn based on this builder values
*/
public final Builder append(HostColumnVector columnVector) {
assert columnVector.rows <= (rows - currentIndex);
assert columnVector.type.equals(type);
if (type.equals(DType.STRING)) {
throw new UnsupportedOperationException(
"Appending a string column vector client side is not currently supported");
} else {
data.copyFromHostBuffer(currentIndex * type.getSizeInBytes(), columnVector.offHeap.data,
0L,
columnVector.getRowCount() * type.getSizeInBytes());
}
//As this is doing the append on the host assume that a null count is available
long otherNc = columnVector.getNullCount();
if (otherNc != 0) {
if (valid == null) {
allocateBitmaskAndSetDefaultValues();
}
//copy values from intCudfColumn to this
BitVectorHelper.append(columnVector.offHeap.valid, valid, currentIndex,
columnVector.rows);
nullCount += otherNc;
}
currentIndex += columnVector.rows;
return this;
}
private void allocateBitmaskAndSetDefaultValues() {
long bitmaskSize = ColumnView.getValidityBufferSize((int) rows);
valid = HostMemoryBuffer.allocate(bitmaskSize);
valid.setMemory(0, bitmaskSize, (byte) 0xFF);
}
/**
* Append null value.
*/
public final Builder appendNull() {
setNullAt(currentIndex);
currentIndex++;
if (type.equals(DType.STRING)) {
offsets.setInt(currentIndex * OFFSET_SIZE, currentStringByteIndex);
}
return this;
}
/**
* Set a specific index to null.
* @param index
*/
public final Builder setNullAt(long index) {
assert index < rows;
// add null
if (this.valid == null) {
allocateBitmaskAndSetDefaultValues();
}
nullCount += BitVectorHelper.setNullAt(valid, index);
return this;
}
/**
* Finish and create the immutable CudfColumn.
*/
public final HostColumnVector build() {
HostColumnVector cv = new HostColumnVector(type,
currentIndex, Optional.of(nullCount), data, valid, offsets);
built = true;
return cv;
}
/**
* Finish and create the immutable ColumnVector, copied to the device.
*/
public final ColumnVector buildAndPutOnDevice() {
try (HostColumnVector tmp = build()) {
return tmp.copyToDevice();
}
}
/**
* Close this builder and free memory if the CudfColumn wasn't generated. Verifies that
* the data was released even in the case of an error.
*/
@Override
public final void close() {
if (!built) {
data.close();
data = null;
if (valid != null) {
valid.close();
valid = null;
}
if (offsets != null) {
offsets.close();
offsets = null;
}
built = true;
}
}
@Override
public String toString() {
return "Builder{" +
"data=" + data +
"type=" + type +
", valid=" + valid +
", currentIndex=" + currentIndex +
", nullCount=" + nullCount +
", rows=" + rows +
", built=" + built +
'}';
}
}
public static abstract class DataType {
public abstract DType getType();
public abstract boolean isNullable();
public abstract DataType getChild(int index);
public abstract int getNumChildren();
}
public static class ListType extends HostColumnVector.DataType {
private boolean isNullable;
private HostColumnVector.DataType child;
public ListType(boolean isNullable, DataType child) {
this.isNullable = isNullable;
this.child = child;
}
@Override
public DType getType() {
return DType.LIST;
}
@Override
public boolean isNullable() {
return isNullable;
}
@Override
public HostColumnVector.DataType getChild(int index) {
if (index > 0) {
return null;
}
return child;
}
@Override
public int getNumChildren() {
return 1;
}
}
public static class StructData {
List<Object> dataRecord;
public StructData(List<Object> dataRecord) {
this.dataRecord = dataRecord;
}
public StructData(Object... data) {
this(Arrays.asList(data));
}
public int getNumFields() {
if (dataRecord != null) {
return dataRecord.size();
} else {
return 0;
}
}
public boolean isNull() {
return (this.dataRecord == null);
}
public Object getField(int index) {
return this.dataRecord.get(index);
}
}
public static class StructType extends HostColumnVector.DataType {
private boolean isNullable;
private List<HostColumnVector.DataType> children;
public StructType(boolean isNullable, List<HostColumnVector.DataType> children) {
this.isNullable = isNullable;
this.children = children;
}
public StructType(boolean isNullable, DataType... children) {
this(isNullable, Arrays.asList(children));
}
@Override
public DType getType() {
return DType.STRUCT;
}
@Override
public boolean isNullable() {
return isNullable;
}
@Override
public HostColumnVector.DataType getChild(int index) {
return children.get(index);
}
@Override
public int getNumChildren() {
return children.size();
}
}
public static class BasicType extends HostColumnVector.DataType {
private DType type;
private boolean isNullable;
public BasicType(boolean isNullable, DType type) {
this.isNullable = isNullable;
this.type = type;
}
@Override
public DType getType() {
return type;
}
@Override
public boolean isNullable() {
return isNullable;
}
@Override
public HostColumnVector.DataType getChild(int index) {
return null;
}
@Override
public int getNumChildren() {
return 0;
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HostColumnVectorCore.java
|
/*
*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteOrder;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* A class that holds Host side Column Vector APIs and the OffHeapState.
* Any children of a HostColumnVector will be instantiated via this class.
*/
public class HostColumnVectorCore implements AutoCloseable {
private static final Logger log = LoggerFactory.getLogger(HostColumnVector.class);
protected final OffHeapState offHeap;
protected final DType type;
protected long rows;
protected Optional<Long> nullCount;
protected List<HostColumnVectorCore> children;
public HostColumnVectorCore(DType type, long rows,
Optional<Long> nullCount, HostMemoryBuffer data, HostMemoryBuffer validity,
HostMemoryBuffer offsets, List<HostColumnVectorCore> nestedChildren) {
// NOTE: This constructor MUST NOT examine the contents of any host buffers, as they may be
// asynchronously written by the device.
this.offHeap = new OffHeapState(data, validity, offsets);
MemoryCleaner.register(this, offHeap);
this.type = type;
this.rows = rows;
this.nullCount = nullCount;
this.children = nestedChildren;
}
/**
* Returns the type of this vector.
*/
public DType getType() {
return type;
}
/**
* Returns the data buffer for a given host side column vector
*/
public HostMemoryBuffer getData() {
return offHeap.data;
}
/**
* Returns the validity buffer for a given host side column vector
*/
public HostMemoryBuffer getValidity() {
return offHeap.valid;
}
/**
* Returns the offset buffer
*/
public HostMemoryBuffer getOffsets() {
return offHeap.offsets;
}
public HostColumnVectorCore getChildColumnView(int childIndex) {
return getNestedChildren().get(childIndex);
}
/**
* Returns the number of nulls in the data. Note that this might end up
* being a very expensive operation because if the null count is not
* known it will be calculated.
*/
public long getNullCount() {
if (!nullCount.isPresent()) {
throw new IllegalStateException("Calculating an unknown null count on the host is not currently supported");
}
return nullCount.get();
}
/**
* Returns the list of child host column vectors for a given host side column
*/
List<HostColumnVectorCore> getNestedChildren() {
return children;
}
/**
* Returns the number of rows for a given host side column vector
*/
public long getRowCount() {
return rows;
}
/**
* Returns the number of children for this column
*/
public int getNumChildren() {
return children.size();
}
/**
* Return the element at a given row for a give data type
* @param rowIndex the row number
* @return an object that would need to be casted to appropriate type based on this vector's data type
*/
Object getElement(int rowIndex) {
if (type.equals(DType.LIST)) {
return getList(rowIndex);
} else if (type.equals(DType.STRUCT)) {
return getStruct(rowIndex);
} else {
if (isNull(rowIndex)) {
return null;
}
return readValue(rowIndex);
}
}
private Object getString(int rowIndex) {
if (isNull(rowIndex)) {
return null;
}
int start = (int)getStartListOffset(rowIndex);
int end = (int)getEndListOffset(rowIndex);
int size = end - start;
byte[] rawData = new byte[size];
if (size > 0) {
offHeap.data.getBytes(rawData, 0, start, size);
return new String(rawData);
} else {
return new String();
}
}
/////////////////////////////////////////////////////////////////////////////
// DATA ACCESS
/////////////////////////////////////////////////////////////////////////////
/**
* For testing only. Allows null checks to go past the number of rows, but not past the end
* of the buffer. NOTE: If the validity vector was allocated by cudf itself it is not
* guaranteed to have the same padding, but for all practical purposes it does. This is
* just to verify that the buffer was allocated and initialized properly.
*/
boolean isNullExtendedRange(long index) {
long maxNullRow = BitVectorHelper.getValidityAllocationSizeInBytes(rows) * 8;
assert (index >= 0 && index < maxNullRow) : "TEST: index is out of range 0 <= " + index + " <" +
" " + maxNullRow;
if (hasValidityVector()) {
if (nullCount.isPresent() && !hasNulls()) {
return false;
}
return BitVectorHelper.isNull(offHeap.valid, index);
}
return false;
}
/**
* Get access to the raw host buffer for this column. This is intended to be used with a lot
* of caution. The lifetime of the buffer is tied to the lifetime of the column (Do not close
* the buffer, as the column will take care of it). Do not modify the contents of the buffer or
* it might negatively impact what happens on the column. The data must be on the host for this
* to work.
* @param type the type of buffer to get access to.
* @return the underlying buffer or null if no buffer is associated with it for this column.
* Please note that if the column is empty there may be no buffers at all associated with the
* column.
*/
public HostMemoryBuffer getHostBufferFor(BufferType type) {
HostMemoryBuffer srcBuffer = null;
switch(type) {
case VALIDITY:
srcBuffer = offHeap.valid;
break;
case OFFSET:
srcBuffer = offHeap.offsets;
break;
case DATA:
srcBuffer = offHeap.data;
break;
default:
throw new IllegalArgumentException(type + " is not a supported buffer type.");
}
return srcBuffer;
}
void copyHostBufferBytes(byte[] dst, int dstOffset, BufferType src, long srcOffset,
int length) {
assert dstOffset >= 0;
assert srcOffset >= 0;
assert length >= 0;
assert dstOffset + length <= dst.length;
HostMemoryBuffer srcBuffer = getHostBufferFor(src);
assert srcOffset + length <= srcBuffer.length : "would copy off end of buffer "
+ srcOffset + " + " + length + " > " + srcBuffer.length;
UnsafeMemoryAccessor.getBytes(dst, dstOffset,
srcBuffer.getAddress() + srcOffset, length);
}
/**
* Generic type independent asserts when getting a value from a single index.
* @param index where to get the data from.
*/
private void assertsForGet(long index) {
assert (index >= 0 && index < rows) : "index is out of range 0 <= " + index + " < " + rows;
assert !isNull(index) : " value at " + index + " is null";
}
/**
* Get the value at index.
*/
public byte getByte(long index) {
assert type.isBackedByByte() : type + " is not stored as a byte.";
assertsForGet(index);
return offHeap.data.getByte(index * type.getSizeInBytes());
}
/**
* Get the value at index.
*/
public final short getShort(long index) {
assert type.isBackedByShort() : type + " is not stored as a short.";
assertsForGet(index);
return offHeap.data.getShort(index * type.getSizeInBytes());
}
/**
* Get the value at index.
*/
public final int getInt(long index) {
assert type.isBackedByInt() : type + " is not stored as a int.";
assertsForGet(index);
return offHeap.data.getInt(index * type.getSizeInBytes());
}
/**
* Get the starting byte offset for the string at index
* Wraps getStartListOffset for backwards compatibility
*/
long getStartStringOffset(long index) {
return getStartListOffset(index);
}
/**
* Get the starting element offset for the list or string at index
*/
public long getStartListOffset(long index) {
assert type.equals(DType.STRING) || type.equals(DType.LIST): type +
" is not a supported string or list type.";
assert (index >= 0 && index < rows) : "index is out of range 0 <= " + index + " < " + rows;
return offHeap.offsets.getInt(index * 4);
}
/**
* Get the ending byte offset for the string at index.
* Wraps getEndListOffset for backwards compatibility
*/
long getEndStringOffset(long index) {
return getEndListOffset(index);
}
/**
* Get the ending element offset for the list or string at index.
*/
public long getEndListOffset(long index) {
assert type.equals(DType.STRING) || type.equals(DType.LIST): type +
" is not a supported string or list type.";
assert (index >= 0 && index < rows) : "index is out of range 0 <= " + index + " < " + rows;
// The offsets has one more entry than there are rows.
return offHeap.offsets.getInt((index + 1) * 4);
}
/**
* Get the value at index.
*/
public final long getLong(long index) {
// Timestamps with time values are stored as longs
assert type.isBackedByLong(): type + " is not stored as a long.";
assertsForGet(index);
return offHeap.data.getLong(index * type.getSizeInBytes());
}
/**
* Get the value at index.
*/
public final float getFloat(long index) {
assert type.equals(DType.FLOAT32) : type + " is not a supported float type.";
assertsForGet(index);
return offHeap.data.getFloat(index * type.getSizeInBytes());
}
/**
* Get the value at index.
*/
public final double getDouble(long index) {
assert type.equals(DType.FLOAT64) : type + " is not a supported double type.";
assertsForGet(index);
return offHeap.data.getDouble(index * type.getSizeInBytes());
}
/**
* Get the boolean value at index
*/
public final boolean getBoolean(long index) {
assert type.equals(DType.BOOL8) : type + " is not a supported boolean type.";
assertsForGet(index);
return offHeap.data.getBoolean(index * type.getSizeInBytes());
}
/**
* Get the BigDecimal value at index.
*/
public final BigDecimal getBigDecimal(long index) {
assert type.isDecimalType() : type + " is not a supported decimal type.";
assertsForGet(index);
if (type.typeId == DType.DTypeEnum.DECIMAL32) {
int unscaledValue = offHeap.data.getInt(index * type.getSizeInBytes());
return BigDecimal.valueOf(unscaledValue, -type.getScale());
} else if (type.typeId == DType.DTypeEnum.DECIMAL64) {
long unscaledValue = offHeap.data.getLong(index * type.getSizeInBytes());
return BigDecimal.valueOf(unscaledValue, -type.getScale());
} else if (type.typeId == DType.DTypeEnum.DECIMAL128) {
int sizeInBytes = DType.DTypeEnum.DECIMAL128.sizeInBytes;
byte[] dst = new byte[sizeInBytes];
// We need to switch the endianness for decimal128 byte arrays between java and native code.
offHeap.data.getBytes(dst, 0, (index * sizeInBytes), sizeInBytes);
convertInPlaceToBigEndian(dst);
return new BigDecimal(new BigInteger(dst), -type.getScale());
} else {
throw new IllegalStateException(type + " is not a supported decimal type.");
}
}
/**
* Get the raw UTF8 bytes at index. This API is faster than getJavaString, but still not
* ideal because it is copying the data onto the heap.
*/
public byte[] getUTF8(long index) {
assert type.equals(DType.STRING) : type + " is not a supported string type.";
assertsForGet(index);
int start = (int)getStartListOffset(index);
int size = (int)getEndListOffset(index) - start;
byte[] rawData = new byte[size];
if (size > 0) {
offHeap.data.getBytes(rawData, 0, start, size);
}
return rawData;
}
/**
* Get the value at index. This API is slow as it has to translate the
* string representation. Please use it with caution.
*/
public String getJavaString(long index) {
byte[] rawData = getUTF8(index);
return new String(rawData, StandardCharsets.UTF_8);
}
/**
* WARNING: Special case for lists of int8 or uint8, does not support null list values or lists
*
* Get array of bytes at index from a list column of int8 or uint8. The column may not be a list
* of lists and may not have nulls.
*/
public byte[] getBytesFromList(long rowIndex) {
assert type.equals(DType.LIST) : type + " is not a supported list of bytes type.";
HostColumnVectorCore listData = children.get(0);
assert listData.type.equals(DType.INT8) || listData.type.equals(DType.UINT8) : type +
" is not a supported list of bytes type.";
assert !listData.hasNulls() : "byte list column with nulls are not supported";
assertsForGet(rowIndex);
int start = (int)getStartListOffset(rowIndex);
int end = (int)getEndListOffset(rowIndex);
int size = end - start;
byte[] result = new byte[size];
if (size > 0) {
listData.offHeap.data.getBytes(result, 0, start, size);
}
return result;
}
/**
* WARNING: Strictly for test only. This call is not efficient for production.
*/
public List getList(long rowIndex) {
assert rowIndex < rows;
assert type.equals(DType.LIST);
List retList = new ArrayList();
int start = (int)getStartListOffset(rowIndex);
int end = (int)getEndListOffset(rowIndex);
// check if null or empty
if (isNull(rowIndex)) {
return null;
}
for(int j = start; j < end; j++) {
for (HostColumnVectorCore childHcv : children) {
// lists have only 1 child
retList.add(childHcv.getElement(j));
}
}
return retList;
}
/**
* WARNING: Strictly for test only. This call is not efficient for production.
*/
public HostColumnVector.StructData getStruct(int rowIndex) {
assert rowIndex < rows;
assert type.equals(DType.STRUCT);
List<Object> retList = new ArrayList<>();
// check if null or empty
if (isNull(rowIndex)) {
return null;
}
for (int k = 0; k < this.getNumChildren(); k++) {
retList.add(children.get(k).getElement(rowIndex));
}
return new HostColumnVector.StructData(retList);
}
/**
* Method that returns a boolean to indicate if the element at a given row index is null
* @param rowIndex the row index
* @return true if null else false
*/
public boolean isNull(long rowIndex) {
return rowIndex < 0 || rowIndex >= rows // unknown, hence NULL
|| hasValidityVector() && BitVectorHelper.isNull(offHeap.valid, rowIndex);
}
/**
* Returns if the vector has a validity vector allocated or not.
*/
public boolean hasValidityVector() {
return (offHeap.valid != null);
}
/**
* Returns if the vector has nulls. Note that this might end up
* being a very expensive operation because if the null count is not
* known it will be calculated.
*/
public boolean hasNulls() {
return getNullCount() > 0;
}
/**
* Helper method that reads in a value at a given row index
* @param rowIndex the row index
* @return an object that would need to be casted to appropriate type based on this vector's data type
*/
private Object readValue(int rowIndex) {
assert rowIndex < rows;
int rowOffset = rowIndex * type.getSizeInBytes();
switch (type.typeId) {
case INT32: // fall through
case UINT32: // fall through
case TIMESTAMP_DAYS:
case DURATION_DAYS: return offHeap.data.getInt(rowOffset);
case INT64: // fall through
case UINT64: // fall through
case DURATION_MICROSECONDS: // fall through
case DURATION_MILLISECONDS: // fall through
case DURATION_NANOSECONDS: // fall through
case DURATION_SECONDS: // fall through
case TIMESTAMP_MICROSECONDS: // fall through
case TIMESTAMP_MILLISECONDS: // fall through
case TIMESTAMP_NANOSECONDS: // fall through
case TIMESTAMP_SECONDS: return offHeap.data.getLong(rowOffset);
case FLOAT32: return offHeap.data.getFloat(rowOffset);
case FLOAT64: return offHeap.data.getDouble(rowOffset);
case UINT8: // fall through
case INT8: return offHeap.data.getByte(rowOffset);
case UINT16: // fall through
case INT16: return offHeap.data.getShort(rowOffset);
case BOOL8: return offHeap.data.getBoolean(rowOffset);
case STRING: return getString(rowIndex);
case DECIMAL32: return BigDecimal.valueOf(offHeap.data.getInt(rowOffset), -type.getScale());
case DECIMAL64: return BigDecimal.valueOf(offHeap.data.getLong(rowOffset), -type.getScale());
default: throw new UnsupportedOperationException("Do not support " + type);
}
}
/**
* Returns the amount of host memory used to store column/validity data (not metadata).
*/
public long getHostMemorySize() {
long totalSize = offHeap.getHostMemorySize();
for (HostColumnVectorCore nhcv : children) {
totalSize += nhcv.getHostMemorySize();
}
return totalSize;
}
/**
* Close method for the column
*/
@Override
public synchronized void close() {
for (HostColumnVectorCore child : children) {
if (child != null) {
child.close();
}
}
offHeap.delRef();
offHeap.cleanImpl(false);
}
@Override
public String toString() {
return "HostColumnVectorCore{" +
"rows=" + rows +
", type=" + type +
", nullCount=" + nullCount +
", offHeap=" + offHeap +
'}';
}
protected static byte[] convertDecimal128FromJavaToCudf(byte[] bytes) {
byte[] finalBytes = new byte[DType.DTypeEnum.DECIMAL128.sizeInBytes];
byte lastByte = bytes[0];
//Convert to 2's complement representation and make sure the sign bit is extended correctly
byte setByte = (lastByte & 0x80) > 0 ? (byte)0xff : (byte)0x00;
for(int i = bytes.length; i < finalBytes.length; i++) {
finalBytes[i] = setByte;
}
// After setting the sign bits, reverse the rest of the bytes for endianness
for(int k = 0; k < bytes.length; k++) {
finalBytes[k] = bytes[bytes.length - k - 1];
}
return finalBytes;
}
private void convertInPlaceToBigEndian(byte[] dst) {
assert ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN);
int i =0;
int j = dst.length -1;
while (j > i) {
byte tmp;
tmp = dst[j];
dst[j] = dst[i];
dst[i] = tmp;
j--;
i++;
}
}
/////////////////////////////////////////////////////////////////////////////
// HELPER CLASSES
/////////////////////////////////////////////////////////////////////////////
/**
* Holds the off heap state of the column vector so we can clean it up, even if it is leaked.
*/
protected static final class OffHeapState extends MemoryCleaner.Cleaner {
public HostMemoryBuffer data;
public HostMemoryBuffer valid = null;
public HostMemoryBuffer offsets = null;
OffHeapState(HostMemoryBuffer data, HostMemoryBuffer valid, HostMemoryBuffer offsets) {
this.data = data;
this.valid = valid;
this.offsets = offsets;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
if (data != null || valid != null || offsets != null) {
try {
if (data != null) {
data.close();
}
if (offsets != null) {
offsets.close();
}
if (valid != null) {
valid.close();
}
} finally {
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
data = null;
valid = null;
offsets = null;
}
neededCleanup = true;
}
if (neededCleanup && logErrorIfNotClean) {
log.error("A HOST COLUMN VECTOR WAS LEAKED (ID: " + id + ")");
logRefCountDebug("Leaked vector");
}
return neededCleanup;
}
@Override
public void noWarnLeakExpected() {
super.noWarnLeakExpected();
if (data != null) {
data.noWarnLeakExpected();
}
if (valid != null) {
valid.noWarnLeakExpected();
}
if (offsets != null) {
offsets.noWarnLeakExpected();
}
}
@Override
public boolean isClean() {
return data == null && valid == null && offsets == null;
}
/**
* This returns total memory allocated on the host for the ColumnVector.
*/
public long getHostMemorySize() {
long total = 0;
if (valid != null) {
total += valid.length;
}
if (data != null) {
total += data.length;
}
if (offsets != null) {
total += offsets.length;
}
return total;
}
@Override
public String toString() {
return "(ID: " + id + ")";
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HostMemoryAllocator.java
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
public interface HostMemoryAllocator {
/**
* Allocate memory, but be sure to close the returned buffer to avoid memory leaks.
* @param bytes size in bytes to allocate
* @param preferPinned If set to true, the pinned memory pool will be used if possible with a
* fallback to off-heap memory. If set to false, the allocation will always
* be from off-heap memory.
* @return the newly created buffer
*/
HostMemoryBuffer allocate(long bytes, boolean preferPinned);
/**
* Allocate memory, but be sure to close the returned buffer to avoid memory leaks. Pinned memory
* for allocations preference is up to the implementor
*
* @param bytes size in bytes to allocate
* @return the newly created buffer
*/
HostMemoryBuffer allocate(long bytes);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HostMemoryBuffer.java
|
/*
*
* Copyright (c) 2019-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.FileChannel.MapMode;
/**
* This class holds an off-heap buffer in the host/CPU memory.
* Please note that instances must be explicitly closed or native memory will be leaked!
*
* Internally this class will try to use PinnedMemoryPool to allocate and free the memory
* it uses by default. To avoid using the pinned memory pool for allocations by default
* set the Java system property ai.rapids.cudf.prefer-pinned to false.
*
* Be aware that the off heap memory limits set by Java do not apply to these buffers.
*/
public class HostMemoryBuffer extends MemoryBuffer {
static final boolean defaultPreferPinned;
private static final Logger log = LoggerFactory.getLogger(HostMemoryBuffer.class);
static {
boolean preferPinned = true;
String propString = System.getProperty("ai.rapids.cudf.prefer-pinned");
if (propString != null) {
preferPinned = Boolean.parseBoolean(propString);
}
defaultPreferPinned = preferPinned;
}
private static final class HostBufferCleaner extends MemoryBufferCleaner {
private long address;
private final long length;
HostBufferCleaner(long address, long length) {
this.address = address;
this.length = length;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
long origAddress = address;
if (address != 0) {
try {
UnsafeMemoryAccessor.free(address);
} finally {
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
address = 0;
}
neededCleanup = true;
}
if (neededCleanup && logErrorIfNotClean) {
log.error("A HOST BUFFER WAS LEAKED (ID: " + id + " " + Long.toHexString(origAddress) + ")");
logRefCountDebug("Leaked host buffer");
}
return neededCleanup;
}
@Override
public boolean isClean() {
return address == 0;
}
}
private static final class MmapCleaner extends MemoryBufferCleaner {
private long address;
private final long length;
MmapCleaner(long address, long length) {
this.address = address;
this.length = length;
}
@Override
protected boolean cleanImpl(boolean logErrorIfNotClean) {
boolean neededCleanup = false;
if (address != 0) {
try {
HostMemoryBufferNativeUtils.munmap(address, length);
} finally {
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
address = 0;
}
neededCleanup = true;
}
if (neededCleanup && logErrorIfNotClean) {
log.error("A MEMORY MAPPED BUFFER WAS LEAKED!!!!");
logRefCountDebug("Leaked mmap buffer");
}
return neededCleanup;
}
@Override
public boolean isClean() {
return address == 0;
}
}
/**
* Allocate memory, but be sure to close the returned buffer to avoid memory leaks.
* @param bytes size in bytes to allocate
* @param preferPinned If set to true, the pinned memory pool will be used if possible with a
* fallback to off-heap memory. If set to false, the allocation will always
* be from off-heap memory.
* @return the newly created buffer
*/
public static HostMemoryBuffer allocate(long bytes, boolean preferPinned) {
return DefaultHostMemoryAllocator.get().allocate(bytes, preferPinned);
}
/**
* Allocate memory, but be sure to close the returned buffer to avoid memory leaks. Pinned memory
* will be preferred for allocations if the java system property ai.rapids.cudf.prefer-pinned is
* set to true.
* @param bytes size in bytes to allocate
* @return the newly created buffer
*/
public static HostMemoryBuffer allocate(long bytes) {
return allocate(bytes, defaultPreferPinned);
}
/**
* Allocate host memory bypassing the default allocator. This is intended to only be used by other allocators.
* Pinned memory will not be used for these allocations.
* @param bytes size in bytes to allocate
* @return the newly created buffer
*/
public static HostMemoryBuffer allocateRaw(long bytes) {
return new HostMemoryBuffer(UnsafeMemoryAccessor.allocate(bytes), bytes);
}
/**
* Create a host buffer that is memory-mapped to a file.
* @param path path to the file to map into host memory
* @param mode mapping type
* @param offset file offset where the map will start
* @param length the number of bytes to map
* @return file-mapped buffer
*/
public static HostMemoryBuffer mapFile(File path, MapMode mode,
long offset, long length) throws IOException {
// mapping offset must be a multiple of the system page size
long offsetDelta = offset & (UnsafeMemoryAccessor.pageSize() - 1);
long address;
try {
address = HostMemoryBufferNativeUtils.mmap(path.getPath(),
modeAsInt(mode), offset - offsetDelta, length + offsetDelta);
} catch (IOException e) {
throw new IOException("Error creating memory map for " + path, e);
}
return new HostMemoryBuffer(address + offsetDelta, length,
new MmapCleaner(address, length + offsetDelta));
}
private static int modeAsInt(MapMode mode) {
if (MapMode.READ_ONLY.equals(mode)) {
return 0;
} else if (MapMode.READ_WRITE.equals(mode)) {
return 1;
} else {
throw new UnsupportedOperationException("Unsupported mapping mode: " + mode);
}
}
HostMemoryBuffer(long address, long length) {
this(address, length, new HostBufferCleaner(address, length));
}
HostMemoryBuffer(long address, long length, MemoryBufferCleaner cleaner) {
super(address, length, cleaner);
}
private HostMemoryBuffer(long address, long lengthInBytes, HostMemoryBuffer parent) {
super(address, lengthInBytes, parent);
// This is a slice so we are not going to mark it as allocated
}
/**
* Return a ByteBuffer that provides access to the underlying memory. Please note: if the buffer
* is larger than a ByteBuffer can handle (2GB) an exception will be thrown. Also
* be aware that the ByteBuffer will be in native endian order, which is different from regular
* ByteBuffers that are big endian by default.
*/
public final ByteBuffer asByteBuffer() {
assert length <= Integer.MAX_VALUE : "2GB limit on ByteBuffers";
return asByteBuffer(0, (int) length);
}
/**
* Return a ByteBuffer that provides access to the underlying memory. Be aware that the
* ByteBuffer will be in native endian order, which is different from regular
* ByteBuffers that are big endian by default.
* @param offset the offset to start at
* @param length how many bytes to include.
*/
public final ByteBuffer asByteBuffer(long offset, int length) {
addressOutOfBoundsCheck(address + offset, length, "asByteBuffer");
return HostMemoryBufferNativeUtils.wrapRangeInBuffer(address + offset, length)
.order(ByteOrder.nativeOrder());
}
/**
* Copy the contents of the given buffer to this buffer
* @param destOffset offset in bytes in this buffer to start copying to
* @param srcData Buffer to be copied from
* @param srcOffset offset in bytes to start copying from in srcData
* @param length number of bytes to copy
*/
public final void copyFromHostBuffer(long destOffset, HostMemoryBuffer srcData, long srcOffset,
long length) {
addressOutOfBoundsCheck(address + destOffset, length, "copy from dest");
srcData.addressOutOfBoundsCheck(srcData.address + srcOffset, length, "copy from source");
UnsafeMemoryAccessor.copyMemory(null, srcData.address + srcOffset, null,
address + destOffset, length);
}
/**
* Copy len bytes from in to this buffer.
* @param destOffset offset in bytes in this buffer to start copying to
* @param in input stream to copy bytes from
* @param byteLength number of bytes to copy
* @throws EOFException If there are not enough bytes in the stream to copy.
* @throws IOException If there is an error reading from the stream.
*/
public final void copyFromStream(long destOffset, InputStream in, long byteLength) throws IOException {
addressOutOfBoundsCheck(address + destOffset, byteLength, "copy from stream");
byte[] arrayBuffer = new byte[(int) Math.min(1024 * 128, byteLength)];
long left = byteLength;
while (left > 0) {
int amountToCopy = (int) Math.min(arrayBuffer.length, left);
int amountRead = in.read(arrayBuffer, 0, amountToCopy);
if (amountRead < 0) {
throw new EOFException("Unexpected end of stream, expected " + left + " more bytes");
}
setBytes(destOffset, arrayBuffer, 0, amountRead);
destOffset += amountRead;
left -= amountRead;
}
}
/**
* Returns the byte value at that offset
* @param offset - offset from the address
* @return - value
*/
public final byte getByte(long offset) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 1, "getByte");
return UnsafeMemoryAccessor.getByte(requestedAddress);
}
/**
* Sets the byte value at that offset
* @param offset - offset from the address
* @param value - value to be set
*/
public final void setByte(long offset, byte value) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 1, "setByte");
UnsafeMemoryAccessor.setByte(requestedAddress, value);
}
/**
* Copy a set of bytes to an array from the buffer starting at offset.
* @param dst destination byte array
* @param dstOffset starting offset within the destination array
* @param srcOffset starting offset within this buffer
* @param len number of bytes to copy
*/
public final void getBytes(byte[] dst, long dstOffset, long srcOffset, long len) {
assert len >= 0;
assert len <= dst.length - dstOffset;
assert srcOffset >= 0;
long requestedAddress = this.address + srcOffset;
addressOutOfBoundsCheck(requestedAddress, len, "getBytes");
UnsafeMemoryAccessor.getBytes(dst, dstOffset, requestedAddress, len);
}
/**
* Copy a set of bytes from an array into the buffer at offset.
* @param offset the offset from the address to start copying to
* @param data the data to be copied.
*/
public final void setBytes(long offset, byte[] data, long srcOffset, long len) {
assert len >= 0 : "length is not allowed " + len;
assert len <= data.length - srcOffset;
assert srcOffset >= 0;
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, len, "setBytes");
UnsafeMemoryAccessor.setBytes(requestedAddress, data, srcOffset, len);
}
/**
* Returns the Short value at that offset
* @param offset - offset from the address
* @return - value
*/
public final short getShort(long offset) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 2, "getShort");
return UnsafeMemoryAccessor.getShort(requestedAddress);
}
/**
* Sets the Short value at that offset
* @param offset - offset from the address
* @param value - value to be set
*/
public final void setShort(long offset, short value) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 2, "setShort");
UnsafeMemoryAccessor.setShort(requestedAddress, value);
}
/**
* Copy a set of shorts from an array into the buffer at offset.
* @param offset the offset from the address to start copying to
* @param data the data to be copied.
* @param srcOffset index in data to start at.
*/
public final void setShorts(long offset, short[] data, long srcOffset, long len) {
assert len >= 0 : "length is not allowed " + len;
assert len <= data.length - srcOffset;
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, len * 2, "setShorts");
UnsafeMemoryAccessor.setShorts(requestedAddress, data, srcOffset, len);
}
/**
* Returns the Integer value at that offset
* @param offset - offset from the address
* @return - value
*/
public final int getInt(long offset) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 4, "getInt");
return UnsafeMemoryAccessor.getInt(requestedAddress);
}
/**
* Copy a set of ints to an array from the buffer starting at offset.
* @param dst destination int array
* @param dstIndex starting index within the destination array
* @param srcOffset starting offset within this buffer
* @param count number of ints to copy
*/
public final void getInts(int[] dst, long dstIndex, long srcOffset, int count) {
assert count >= 0;
assert count <= dst.length - dstIndex;
assert srcOffset >= 0;
long requestedAddress = this.address + srcOffset;
addressOutOfBoundsCheck(requestedAddress, count * 4L, "getInts");
UnsafeMemoryAccessor.getInts(dst, dstIndex, requestedAddress, count);
}
/**
* Sets the Integer value at that offset
* @param offset - offset from the address
* @param value - value to be set
*/
public final void setInt(long offset, int value) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 4, "setInt");
UnsafeMemoryAccessor.setInt(requestedAddress, value);
}
/**
* Copy a set of ints from an array into the buffer at offset.
* @param offset the offset from the address to start copying to
* @param data the data to be copied.
* @param srcOffset index into data to start at
*/
public final void setInts(long offset, int[] data, long srcOffset, long len) {
assert len >= 0 : "length is not allowed " + len;
assert len <= data.length - srcOffset;
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, len * 4, "setInts");
UnsafeMemoryAccessor.setInts(requestedAddress, data, srcOffset, len);
}
/**
* Returns the Long value at that offset
* @param offset - offset from the address
* @return - value
*/
public final long getLong(long offset) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 8, "getLong");
return UnsafeMemoryAccessor.getLong(requestedAddress);
}
/**
* Sets the Long value at that offset
* @param offset - offset from the address
* @param value - value to be set
*/
public final void setLong(long offset, long value) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 8, "setLong");
UnsafeMemoryAccessor.setLong(requestedAddress, value);
}
/**
* Copy a set of longs to an array from the buffer starting at offset.
* @param dst destination long array
* @param dstIndex starting index within the destination array
* @param srcOffset starting offset within this buffer
* @param count number of longs to copy
*/
public final void getLongs(long[] dst, long dstIndex, long srcOffset, int count) {
assert count >= 0;
assert count <= dst.length - dstIndex;
assert srcOffset >= 0;
long requestedAddress = this.address + srcOffset;
addressOutOfBoundsCheck(requestedAddress, count * 8L, "getLongs");
UnsafeMemoryAccessor.getLongs(dst, dstIndex, requestedAddress, count);
}
/**
* Copy a set of longs from an array into the buffer at offset.
* @param offset the offset from the address to start copying to
* @param data the data to be copied.
* @param srcOffset index into data to start at.
*/
public final void setLongs(long offset, long[] data, long srcOffset, long len) {
assert len >= 0 : "length is not allowed " + len;
assert len <= data.length - srcOffset;
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, len * 8, "setLongs");
UnsafeMemoryAccessor.setLongs(requestedAddress, data, srcOffset, len);
}
/**
* Returns the Float value at that offset
* @param offset - offset from the address
* @return - value
*/
public final float getFloat(long offset) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 4, "getFloat");
return UnsafeMemoryAccessor.getFloat(requestedAddress);
}
/**
* Sets the Float value at that offset
* @param offset - offset from the address
* @param value - value to be set
*/
public final void setFloat(long offset, float value) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 4, "setFloat");
UnsafeMemoryAccessor.setFloat(requestedAddress, value);
}
/**
* Copy a set of floats from an array into the buffer at offset.
* @param offset the offset from the address to start copying to
* @param data the data to be copied.
* @param srcOffset index into data to start at
*/
public final void setFloats(long offset, float[] data, long srcOffset, long len) {
assert len >= 0 : "length is not allowed " + len;
assert len <= data.length - srcOffset;
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, len * 4, "setFloats");
UnsafeMemoryAccessor.setFloats(requestedAddress, data, srcOffset, len);
}
/**
* Returns the Double value at that offset
* @param offset - offset from the address
* @return - value
*/
public final double getDouble(long offset) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 8, "getDouble");
return UnsafeMemoryAccessor.getDouble(requestedAddress);
}
/**
* Sets the Double value at that offset
* @param offset - offset from the address
* @param value - value to be set
*/
public final void setDouble(long offset, double value) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 8, "setDouble");
UnsafeMemoryAccessor.setDouble(requestedAddress, value);
}
/**
* Copy a set of doubles from an array into the buffer at offset.
* @param offset the offset from the address to start copying to
* @param data the data to be copied.
* @param srcOffset index into data to start at
*/
public final void setDoubles(long offset, double[] data, long srcOffset, long len) {
assert len >= 0 : "length is not allowed " + len;
assert len <= data.length - srcOffset;
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, len * 8, "setDoubles");
UnsafeMemoryAccessor.setDoubles(requestedAddress, data, srcOffset, len);
}
/**
* Returns the Boolean value at that offset
* @param offset - offset from the address
* @return - value
*/
public final boolean getBoolean(long offset) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 1, "getBoolean");
return UnsafeMemoryAccessor.getBoolean(requestedAddress);
}
/**
* Sets the Boolean value at that offset
* @param offset - offset from the address
* @param value - value to be set
*/
public final void setBoolean(long offset, boolean value) {
long requestedAddress = this.address + offset;
addressOutOfBoundsCheck(requestedAddress, 1, "setBoolean");
UnsafeMemoryAccessor.setBoolean(requestedAddress, value);
}
/**
* Sets the values in this buffer repeatedly
* @param offset - offset from the address
* @param length - number of bytes to set
* @param value - value to be set
*/
public final void setMemory(long offset, long length, byte value) {
addressOutOfBoundsCheck(address + offset, length, "set memory");
UnsafeMemoryAccessor.setMemory(address + offset, length, value);
}
final void copyFromMemory(long fromAddress, long len) {
addressOutOfBoundsCheck(address, len, "copy from memory");
UnsafeMemoryAccessor.copyMemory(null, fromAddress, null, address, len);
}
/**
* Copy data from this buffer to the given address.
* @param toAddress where the data should go
* @param len how much data to copy
*/
final void copyToMemory(long toAddress, long len) {
addressOutOfBoundsCheck(address, len, "copy to memory");
UnsafeMemoryAccessor.copyMemory(null, address, null, toAddress, len);
}
/**
* Synchronously copy from a DeviceMemoryBuffer to a HostMemoryBuffer
* @param deviceMemoryBuffer buffer to copy data from
*/
public final void copyFromDeviceBuffer(BaseDeviceMemoryBuffer deviceMemoryBuffer) {
addressOutOfBoundsCheck(address, deviceMemoryBuffer.length, "copy range dest");
assert !deviceMemoryBuffer.closed;
Cuda.memcpy(address, deviceMemoryBuffer.address, deviceMemoryBuffer.length,
CudaMemcpyKind.DEVICE_TO_HOST);
}
/**
* Copy from a DeviceMemoryBuffer to a HostMemoryBuffer using the specified stream.
* The copy has completed when this returns, but the memory copy could overlap with
* operations occurring on other streams.
* @param deviceMemoryBuffer buffer to copy data from
* @param stream CUDA stream to use
*/
public final void copyFromDeviceBuffer(BaseDeviceMemoryBuffer deviceMemoryBuffer,
Cuda.Stream stream) {
addressOutOfBoundsCheck(address, deviceMemoryBuffer.length, "copy range dest");
assert !deviceMemoryBuffer.closed;
Cuda.memcpy(address, deviceMemoryBuffer.address, deviceMemoryBuffer.length,
CudaMemcpyKind.DEVICE_TO_HOST, stream);
}
/**
* Copy from a DeviceMemoryBuffer to a HostMemoryBuffer using the specified stream.
* The copy is async and may not have completed when this returns.
* @param deviceMemoryBuffer buffer to copy data from
* @param stream CUDA stream to use
*/
public final void copyFromDeviceBufferAsync(BaseDeviceMemoryBuffer deviceMemoryBuffer,
Cuda.Stream stream) {
addressOutOfBoundsCheck(address, deviceMemoryBuffer.length, "copy range dest");
assert !deviceMemoryBuffer.closed;
Cuda.asyncMemcpy(address, deviceMemoryBuffer.address, deviceMemoryBuffer.length,
CudaMemcpyKind.DEVICE_TO_HOST, stream);
}
/**
* Slice off a part of the host buffer.
* @param offset where to start the slice at.
* @param len how many bytes to slice
* @return a host buffer that will need to be closed independently from this buffer.
*/
@Override
public final synchronized HostMemoryBuffer slice(long offset, long len) {
addressOutOfBoundsCheck(address + offset, len, "slice");
refCount++;
cleaner.addRef();
return new HostMemoryBuffer(address + offset, len, this);
}
/**
* Slice off a part of the host buffer, actually making a copy of the data.
* @param offset where to start the slice at.
* @param len how many bytes to slice
* @return a host buffer that will need to be closed independently from this buffer.
*/
public final HostMemoryBuffer sliceWithCopy(long offset, long len) {
addressOutOfBoundsCheck(address + offset, len, "slice");
HostMemoryBuffer ret = null;
boolean success = false;
try {
ret = allocate(len);
UnsafeMemoryAccessor.copyMemory(null, address + offset, null, ret.getAddress(), len);
success = true;
return ret;
} finally {
if (!success && ret != null) {
ret.close();
}
}
}
/**
* WARNING: Debug only method to print buffer. Does not work for buffers over 2GB.
*/
public void printBuffer() {
printBuffer(5);
}
/**
* WARNING: Debug only method to print buffer. Does not work for buffers over 2GB.
* @param wordsPerRow the number of 32 bit works to print per row.
*/
public void printBuffer(int wordsPerRow) {
final int bytesPerWord = 4;
final int bytesPerRow = bytesPerWord * wordsPerRow;
assert (length == (int)length) : "The buffer is too large to be printed";
byte[] data = new byte[(int)length];
System.out.println("BUFFER length = " + data.length);
getBytes(data, 0, 0, length);
for (int i = 0; i < data.length; i++) {
if (i % bytesPerWord == 0) {
if (i % bytesPerRow == 0) {
System.out.println();
} else {
System.out.print(" ");
}
}
System.out.print(String.format("%02x",((long)data[i]) & 0xFF));
}
System.out.println();
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HostMemoryBufferNativeUtils.java
|
/*
*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* Wrapper for {@link HostMemoryBuffer} native callbacks so that class avoids
* loading the native libraries unless one if its methods requires it.
*/
class HostMemoryBufferNativeUtils {
static {
NativeDepsLoader.loadNativeDeps();
}
/**
* This will turn an address into a ByteBuffer. The buffer will NOT own the memory
* so closing it has no impact on the underlying memory. It should never
* be used if the corresponding HostMemoryBuffer is closed.
*/
static native ByteBuffer wrapRangeInBuffer(long address, long len);
/**
* Memory map a portion of a local file
* @param file path to the local file to be mapped
* @param mode 0=read, 1=read+write
* @param offset file offset where map starts. Must be a system page boundary.
* @param len number of bytes to map
* @return address of the memory-mapped region
* @throws IOException I/O error during mapping
*/
static native long mmap(String file, int mode, long offset, long len) throws IOException;
/**
* Unmap a memory region that was memory-mapped.
* @param address address of the memory-mapped region
* @param length size of the mapped region in bytes
*/
static native void munmap(long address, long length);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/HostUDFWrapper.java
|
/*
* Copyright (c) 2024-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* A wrapper around native host UDF aggregations.
* <p>
* This class is used to create the native handle of a host UDF aggregation and is used as
* a proxy object to compute hash code and compare two host UDF aggregations for equality.
* <p>
* A new host UDF aggregation implementation must extend this class and override the
* {@code computeHashCode} and {@code isEqual} methods for such purposes.
*
*/
public abstract class HostUDFWrapper {
/**
* Create a derived host UDF native instance.
* The instance created by this function MUST be closed by `closeUDFInstance`
* <p>Typical usage, refer to Aggregation.java:</p>
* <pre>
* long udf = 0;
* try {
* udf = wrapper.createUDFInstance();
* return Aggregation.createHostUDFAgg(udf);
* } finally {
* // a new UDF is cloned in `createHostUDFAgg`, here should close the UDF instance.
* if (udf != 0) {
* HostUDFWrapper.closeUDFInstance(udf);
* }
* }
* </pre>
*
*/
public abstract long createUDFInstance();
/**
* Close the derived UDF instance created by `createUDFInstance`.
* @param hostUDFInstance the UDF instance
*/
public static void closeUDFInstance(long hostUDFInstance) {
close(hostUDFInstance);
}
public abstract int computeHashCode();
@Override
public int hashCode() {
return computeHashCode();
}
public abstract boolean isEqual(Object obj);
@Override
public boolean equals(Object obj) {
return isEqual(obj);
}
static native void close(long hostUDFInstance);
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/JCudfSerialization.java
|
/*
*
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* Serialize and deserialize CUDF tables and columns using a custom format. The goal of this is
* to provide a way to efficiently serialize and deserialize cudf data for distributed
* processing within a single application. Typically after a partition like operation has happened.
* It is not intended for inter-application communication or for long term storage of data, there
* are much better standards based formats for all of that.
* <p>
* The goal is to transfer data from a local GPU to a remote GPU as quickly and efficiently as
* possible using build in java communication channels. There is no guarantee of compatibility
* between different releases of CUDF. This is to allow us to adapt if internal memory layouts
* and formats change.
* <p>
* This version optimizes for reduced memory transfers, and as such will try to do the fewest number
* of transfers possible when putting the data back onto the GPU. This means that it will slice
* a single large memory buffer into smaller buffers used by the resulting ColumnVectors. The
* downside of this is that generally none of the memory can be released until all of the
* ColumnVectors are closed. It is assumed that this will not be a problem because for processing
* efficiency after the data is transferred it will likely be combined with other similar batches
* from other processes into a single larger buffer.
*/
public class JCudfSerialization {
/**
* Magic number "CUDF" in ASCII, which is 1178883395 if read in LE from big endian, which is
* too large for any reasonable metadata for arrow, so we should probably be okay detecting
* this, and switching back/forth at a later time.
*/
private static final int SER_FORMAT_MAGIC_NUMBER = 0x43554446;
private static final short VERSION_NUMBER = 0x0000;
private static final class ColumnOffsets {
private final long validity;
private final long offsets;
private final long data;
private final long dataLen;
public ColumnOffsets(long validity, long offsets, long data, long dataLen) {
this.validity = validity;
this.offsets = offsets;
this.data = data;
this.dataLen = dataLen;
}
}
/**
* Holds the metadata about a serialized table. If this is being read from a stream
* isInitialized will return true if the metadata was read correctly from the stream.
* It will return false if an EOF was encountered at the beginning indicating that
* there was no data to be read.
*/
public static final class SerializedTableHeader {
private SerializedColumnHeader[] columns;
private int numRows;
private long dataLen;
private boolean initialized = false;
private boolean dataRead = false;
public SerializedTableHeader(DataInputStream din) throws IOException {
readFrom(din);
}
SerializedTableHeader(SerializedColumnHeader[] columns, int numRows, long dataLen) {
this.columns = columns;
this.numRows = numRows;
this.dataLen = dataLen;
initialized = true;
dataRead = true;
}
/** Constructor for a row-count only table (no columns) */
public SerializedTableHeader(int numRows) {
this(new SerializedColumnHeader[0], numRows, 0);
}
/** Get the column header for the corresponding column index */
public SerializedColumnHeader getColumnHeader(int columnIndex) {
return columns[columnIndex];
}
/**
* Set to true once data is successfully read from a stream by readTableIntoBuffer.
* @return true if data was read, else false.
*/
public boolean wasDataRead() {
return dataRead;
}
/**
* Returns the size of a buffer needed to read data into the stream.
*/
public long getDataLen() {
return dataLen;
}
/**
* Returns the number of rows stored in this table.
*/
public int getNumRows() {
return numRows;
}
/**
* Returns the number of columns stored in this table
*/
public int getNumColumns() {
return columns != null ? columns.length : 0;
}
/**
* Returns true if the metadata for this table was read, else false indicating an EOF was
* encountered.
*/
public boolean wasInitialized() {
return initialized;
}
/**
* Returns the number of bytes needed to serialize this table header.
* Note that this is only the metadata for the table (i.e.: column types, row counts, etc.)
* and does not include the bytes needed to serialize the table data.
*/
public long getSerializedHeaderSizeInBytes() {
// table header always has:
// - 4-byte magic number
// - 2-byte version number
// - 4-byte column count
// - 4-byte row count
// - 8-byte data buffer length
long total = 4 + 2 + 4 + 4 + 8;
for (SerializedColumnHeader column : columns) {
total += column.getSerializedHeaderSizeInBytes();
}
return total;
}
/** Returns the number of bytes needed to serialize this table header and the table data. */
public long getTotalSerializedSizeInBytes() {
return getSerializedHeaderSizeInBytes() + dataLen;
}
private void readFrom(DataInputStream din) throws IOException {
try {
int num = din.readInt();
if (num != SER_FORMAT_MAGIC_NUMBER) {
throw new IllegalStateException("THIS DOES NOT LOOK LIKE CUDF SERIALIZED DATA. " +
"Expected magic number " + SER_FORMAT_MAGIC_NUMBER + " Found " + num);
}
} catch (EOFException e) {
// If we get an EOF at the very beginning don't treat it as an error because we may
// have finished reading everything...
return;
}
short version = din.readShort();
if (version != VERSION_NUMBER) {
throw new IllegalStateException("READING THE WRONG SERIALIZATION FORMAT VERSION FOUND "
+ version + " EXPECTED " + VERSION_NUMBER);
}
int numColumns = din.readInt();
numRows = din.readInt();
columns = new SerializedColumnHeader[numColumns];
for (int i = 0; i < numColumns; i++) {
columns[i] = SerializedColumnHeader.readFrom(din, numRows);
}
dataLen = din.readLong();
initialized = true;
}
public void writeTo(DataWriter dout) throws IOException {
// Now write out the data
dout.writeInt(SER_FORMAT_MAGIC_NUMBER);
dout.writeShort(VERSION_NUMBER);
dout.writeInt(columns.length);
dout.writeInt(numRows);
// Header for each column...
for (SerializedColumnHeader column : columns) {
column.writeTo(dout);
}
dout.writeLong(dataLen);
}
}
/** Holds the metadata about a serialized column. */
public static final class SerializedColumnHeader {
public final DType dtype;
public final long nullCount;
public final long rowCount;
public final SerializedColumnHeader[] children;
SerializedColumnHeader(DType dtype, long rowCount, long nullCount,
SerializedColumnHeader[] children) {
this.dtype = dtype;
this.rowCount = rowCount;
this.nullCount = nullCount;
this.children = children;
}
SerializedColumnHeader(ColumnBufferProvider column, long rowOffset, long numRows) {
this.dtype = column.getType();
this.rowCount = numRows;
long columnNullCount = column.getNullCount();
// For a subset of the original column we do not know the null count unless
// the original column is either all nulls or no nulls.
if (column.getRowCount() == numRows
|| columnNullCount == 0 || columnNullCount == column.getRowCount()) {
this.nullCount = Math.min(columnNullCount, numRows);
} else {
this.nullCount = ColumnView.UNKNOWN_NULL_COUNT;
}
ColumnBufferProvider[] childProviders = column.getChildProviders();
if (childProviders != null) {
children = new SerializedColumnHeader[childProviders.length];
long childRowOffset = rowOffset;
long childNumRows = numRows;
if (dtype.equals(DType.LIST)) {
if (numRows > 0) {
childRowOffset = column.getOffset(rowOffset);
childNumRows = column.getOffset(rowOffset + numRows) - childRowOffset;
}
}
for (int i = 0; i < children.length; i++) {
children[i] = new SerializedColumnHeader(childProviders[i], childRowOffset, childNumRows);
}
} else {
children = null;
}
}
/** Get the data type of the column */
public DType getType() {
return dtype;
}
/** Get the row count of the column */
public long getRowCount() {
return rowCount;
}
/** Get the null count of the column */
public long getNullCount() {
return nullCount;
}
/** Get the metadata for any child columns or null if there are no children */
public SerializedColumnHeader[] getChildren() {
return children;
}
/** Get the number of child columns */
public int getNumChildren() {
return children != null ? children.length : 0;
}
/** Return the number of bytes needed to store this column header in serialized form. */
public long getSerializedHeaderSizeInBytes() {
// column header always has:
// - 4-byte type ID
// - 4-byte type scale
// - 4-byte null count
long total = 4 + 4 + 4;
if (dtype.isNestedType()) {
assert children != null;
if (dtype.equals(DType.LIST)) {
total += 4; // 4-byte child row count
} else if (dtype.equals(DType.STRUCT)) {
total += 4; // 4-byte child count
} else {
throw new IllegalStateException("Unexpected nested type: " + dtype);
}
for (SerializedColumnHeader child : children) {
total += child.getSerializedHeaderSizeInBytes();
}
}
return total;
}
/** Write this column header to the specified writer */
public void writeTo(DataWriter dout) throws IOException {
dout.writeInt(dtype.typeId.getNativeId());
dout.writeInt(dtype.getScale());
dout.writeInt((int) nullCount);
if (dtype.isNestedType()) {
assert children != null;
if (dtype.equals(DType.LIST)) {
dout.writeInt((int) children[0].getRowCount());
} else if (dtype.equals(DType.STRUCT)) {
dout.writeInt(getNumChildren());
} else {
throw new IllegalStateException("Unexpected nested type: " + dtype);
}
for (SerializedColumnHeader child : children) {
child.writeTo(dout);
}
}
}
static SerializedColumnHeader readFrom(DataInputStream din, long rowCount) throws IOException {
DType dtype = DType.fromNative(din.readInt(), din.readInt());
long nullCount = din.readInt();
SerializedColumnHeader[] children = null;
if (dtype.isNestedType()) {
int numChildren;
long childRowCount;
if (dtype.equals(DType.LIST)) {
numChildren = 1;
childRowCount = din.readInt();
} else if (dtype.equals(DType.STRUCT)) {
numChildren = din.readInt();
childRowCount = rowCount;
} else {
throw new IllegalStateException("Unexpected nested type: " + dtype);
}
children = new SerializedColumnHeader[numChildren];
for (int i = 0; i < numChildren; i++) {
children[i] = readFrom(din, childRowCount);
}
}
return new SerializedColumnHeader(dtype, rowCount, nullCount, children);
}
}
/** Class to hold the header and buffer pair result from host-side concatenation */
public static final class HostConcatResult implements AutoCloseable {
private final SerializedTableHeader tableHeader;
private final HostMemoryBuffer hostBuffer;
public HostConcatResult(SerializedTableHeader tableHeader, HostMemoryBuffer tableBuffer) {
this.tableHeader = tableHeader;
this.hostBuffer = tableBuffer;
}
public SerializedTableHeader getTableHeader() {
return tableHeader;
}
public HostMemoryBuffer getHostBuffer() {
return hostBuffer;
}
/** Build a contiguous table in device memory from this host-concatenated result */
public ContiguousTable toContiguousTable() {
DeviceMemoryBuffer devBuffer = DeviceMemoryBuffer.allocate(hostBuffer.length);
try {
if (hostBuffer.length > 0) {
devBuffer.copyFromHostBuffer(hostBuffer);
}
Table table = sliceUpColumnVectors(tableHeader, devBuffer, hostBuffer);
try {
return new ContiguousTable(table, devBuffer);
} catch (Exception e) {
table.close();
throw e;
}
} catch (Exception e) {
devBuffer.close();
throw e;
}
}
@Override
public void close() {
hostBuffer.close();
}
}
/**
* Visible for testing
*/
static abstract class ColumnBufferProvider implements AutoCloseable {
public abstract DType getType();
public abstract long getNullCount();
public abstract long getOffset(long index);
public abstract long getRowCount();
public abstract HostMemoryBuffer getHostBufferFor(BufferType buffType);
public abstract long getBufferStartOffset(BufferType buffType);
public abstract ColumnBufferProvider[] getChildProviders();
@Override
public abstract void close();
}
/**
* Visible for testing
*/
static class ColumnProvider extends ColumnBufferProvider {
private final HostColumnVectorCore column;
private final boolean closeAtEnd;
private final ColumnBufferProvider[] childProviders;
ColumnProvider(HostColumnVectorCore column, boolean closeAtEnd) {
this.column = column;
this.closeAtEnd = closeAtEnd;
if (getType().isNestedType()) {
int numChildren = column.getNumChildren();
childProviders = new ColumnBufferProvider[numChildren];
for (int i = 0; i < numChildren; i++) {
childProviders[i] = new ColumnProvider(column.getChildColumnView(i), false);
}
} else {
childProviders = null;
}
}
@Override
public DType getType() {
return column.getType();
}
@Override
public long getNullCount() {
return column.getNullCount();
}
@Override
public long getOffset(long index) {
return column.getOffsets().getInt(index * Integer.BYTES);
}
@Override
public long getRowCount() {
return column.getRowCount();
}
@Override
public HostMemoryBuffer getHostBufferFor(BufferType buffType) {
switch (buffType) {
case VALIDITY: return column.getValidity();
case OFFSET: return column.getOffsets();
case DATA: return column.getData();
default: throw new IllegalStateException("Unexpected buffer type: " + buffType);
}
}
@Override
public long getBufferStartOffset(BufferType buffType) {
// All of the buffers start at 0 for this.
return 0;
}
@Override
public ColumnBufferProvider[] getChildProviders() {
return childProviders;
}
@Override
public void close() {
if (closeAtEnd) {
column.close();
}
}
}
private static class BufferOffsetProvider extends ColumnBufferProvider {
private final SerializedColumnHeader header;
private final ColumnOffsets offsets;
private final HostMemoryBuffer buffer;
private final ColumnBufferProvider[] childProviders;
private BufferOffsetProvider(SerializedColumnHeader header,
ColumnOffsets offsets,
HostMemoryBuffer buffer,
ColumnBufferProvider[] childProviders) {
this.header = header;
this.offsets = offsets;
this.buffer = buffer;
this.childProviders = childProviders;
}
@Override
public DType getType() {
return header.getType();
}
@Override
public long getNullCount() {
return header.getNullCount();
}
@Override
public long getRowCount() {
return header.getRowCount();
}
@Override
public HostMemoryBuffer getHostBufferFor(BufferType buffType) {
return buffer;
}
@Override
public long getBufferStartOffset(BufferType buffType) {
switch (buffType) {
case DATA:
return offsets.data;
case OFFSET:
return offsets.offsets;
case VALIDITY:
return offsets.validity;
default:
throw new IllegalArgumentException("Buffer type " + buffType + " is not supported");
}
}
@Override
public long getOffset(long index) {
assert getType().hasOffsets();
assert (index >= 0 && index <= getRowCount()) : "index is out of range 0 <= " + index + " <= " + getRowCount();
return buffer.getInt(offsets.offsets + (index * Integer.BYTES));
}
@Override
public ColumnBufferProvider[] getChildProviders() {
return childProviders;
}
@Override
public void close() {
// NOOP
}
}
/**
* Visible for testing
*/
static abstract class DataWriter {
public abstract void writeByte(byte b) throws IOException;
public abstract void writeShort(short s) throws IOException;
public abstract void writeInt(int i) throws IOException;
public abstract void writeIntNativeOrder(int i) throws IOException;
public abstract void writeLong(long val) throws IOException;
/**
* Copy data from src starting at srcOffset and going for len bytes.
* @param src where to copy from.
* @param srcOffset offset to start at.
* @param len amount to copy.
*/
public abstract void copyDataFrom(HostMemoryBuffer src, long srcOffset, long len)
throws IOException;
public void copyDataFrom(ColumnBufferProvider column, BufferType buffType,
long offset, long length) throws IOException {
HostMemoryBuffer buff = column.getHostBufferFor(buffType);
long startOffset = column.getBufferStartOffset(buffType);
copyDataFrom(buff, startOffset + offset, length);
}
public void flush() throws IOException {
// NOOP by default
}
public abstract void write(byte[] arr, int offset, int length) throws IOException;
}
/**
* Visible for testing
*/
static final class DataOutputStreamWriter extends DataWriter {
private final byte[] arrayBuffer = new byte[1024 * 128];
private final DataOutputStream dout;
public DataOutputStreamWriter(DataOutputStream dout) {
this.dout = dout;
}
@Override
public void writeByte(byte b) throws IOException {
dout.writeByte(b);
}
@Override
public void writeShort(short s) throws IOException {
dout.writeShort(s);
}
@Override
public void writeInt(int i) throws IOException {
dout.writeInt(i);
}
@Override
public void writeIntNativeOrder(int i) throws IOException {
// TODO this only works on Little Endian Architectures, x86. If we need
// to support others we need to detect the endianness and switch on the right implementation.
writeInt(Integer.reverseBytes(i));
}
@Override
public void writeLong(long val) throws IOException {
dout.writeLong(val);
}
@Override
public void copyDataFrom(HostMemoryBuffer src, long srcOffset, long len) throws IOException {
long dataLeft = len;
while (dataLeft > 0) {
int amountToCopy = (int)Math.min(arrayBuffer.length, dataLeft);
src.getBytes(arrayBuffer, 0, srcOffset, amountToCopy);
dout.write(arrayBuffer, 0, amountToCopy);
srcOffset += amountToCopy;
dataLeft -= amountToCopy;
}
}
@Override
public void flush() throws IOException {
dout.flush();
}
@Override
public void write(byte[] arr, int offset, int length) throws IOException {
dout.write(arr, offset, length);
}
}
private static final class HostDataWriter extends DataWriter {
private final HostMemoryBuffer buffer;
private long offset = 0;
public HostDataWriter(HostMemoryBuffer buffer) {
this.buffer = buffer;
}
@Override
public void writeByte(byte b) {
buffer.setByte(offset, b);
offset += 1;
}
@Override
public void writeShort(short s) {
buffer.setShort(offset, s);
offset += 2;
}
@Override
public void writeInt(int i) {
buffer.setInt(offset, i);
offset += 4;
}
@Override
public void writeIntNativeOrder(int i) {
// This is already in the native order...
writeInt(i);
}
@Override
public void writeLong(long val) {
buffer.setLong(offset, val);
offset += 8;
}
@Override
public void copyDataFrom(HostMemoryBuffer src, long srcOffset, long len) {
buffer.copyFromHostBuffer(offset, src, srcOffset, len);
offset += len;
}
@Override
public void write(byte[] arr, int srcOffset, int length) {
buffer.setBytes(offset, arr, srcOffset, length);
offset += length;
}
}
/////////////////////////////////////////////
// METHODS
/////////////////////////////////////////////
/////////////////////////////////////////////
// PADDING FOR ALIGNMENT
/////////////////////////////////////////////
private static long padFor64byteAlignment(long orig) {
return ((orig + 63) / 64) * 64;
}
private static long padFor64byteAlignment(DataWriter out, long bytes) throws IOException {
final long paddedBytes = padFor64byteAlignment(bytes);
while (paddedBytes > bytes) {
out.writeByte((byte)0);
bytes++;
}
return paddedBytes;
}
/////////////////////////////////////////////
// SERIALIZED SIZE
/////////////////////////////////////////////
private static long getRawStringDataLength(ColumnBufferProvider column, long rowOffset, long numRows) {
if (numRows <= 0) {
return 0;
}
long start = column.getOffset(rowOffset);
long end = column.getOffset(rowOffset + numRows);
return end - start;
}
private static long getSlicedSerializedDataSizeInBytes(ColumnBufferProvider[] columns, long rowOffset, long numRows) {
long totalDataSize = 0;
for (ColumnBufferProvider column: columns) {
totalDataSize += getSlicedSerializedDataSizeInBytes(column, rowOffset, numRows);
}
return totalDataSize;
}
private static long getSlicedSerializedDataSizeInBytes(ColumnBufferProvider column, long rowOffset, long numRows) {
long totalDataSize = 0;
DType type = column.getType();
if (needsValidityBuffer(column.getNullCount())) {
totalDataSize += padFor64byteAlignment(BitVectorHelper.getValidityLengthInBytes(numRows));
}
if (type.hasOffsets()) {
if (numRows > 0) {
// Add in size of offsets vector
totalDataSize += padFor64byteAlignment((numRows + 1) * Integer.BYTES);
if (type.equals(DType.STRING)) {
totalDataSize += padFor64byteAlignment(getRawStringDataLength(column, rowOffset, numRows));
}
}
} else if (type.getSizeInBytes() > 0) {
totalDataSize += padFor64byteAlignment(column.getType().getSizeInBytes() * numRows);
}
if (numRows > 0 && type.isNestedType()) {
if (type.equals(DType.LIST)) {
ColumnBufferProvider child = column.getChildProviders()[0];
long childStartRow = column.getOffset(rowOffset);
long childNumRows = column.getOffset(rowOffset + numRows) - childStartRow;
totalDataSize += getSlicedSerializedDataSizeInBytes(child, childStartRow, childNumRows);
} else if (type.equals(DType.STRUCT)) {
for (ColumnBufferProvider childProvider : column.getChildProviders()) {
totalDataSize += getSlicedSerializedDataSizeInBytes(childProvider, rowOffset, numRows);
}
} else {
throw new IllegalStateException("Unexpected nested type: " + type);
}
}
return totalDataSize;
}
/**
* Get the size in bytes needed to serialize the given data. The columns should be in host memory
* before calling this.
* @param columns columns to be serialized.
* @param rowOffset the first row to serialize.
* @param numRows the number of rows to serialize.
* @return the size in bytes needed to serialize the data including the header.
*/
public static long getSerializedSizeInBytes(HostColumnVector[] columns, long rowOffset, long numRows) {
ColumnBufferProvider[] providers = providersFrom(columns, false);
try {
SerializedColumnHeader[] columnHeaders = new SerializedColumnHeader[providers.length];
for (int i = 0; i < columnHeaders.length; i++) {
columnHeaders[i] = new SerializedColumnHeader(providers[i], rowOffset, numRows);
}
long dataLen = getSlicedSerializedDataSizeInBytes(providers, rowOffset, numRows);
SerializedTableHeader tableHeader = new SerializedTableHeader(columnHeaders,
(int) numRows, dataLen);
return tableHeader.getTotalSerializedSizeInBytes();
} finally {
closeAll(providers);
}
}
/////////////////////////////////////////////
// HELPER METHODS buildIndex
/////////////////////////////////////////////
/** Build a list of column offset descriptors using a pre-order traversal of the columns */
static ArrayDeque<ColumnOffsets> buildIndex(SerializedTableHeader header,
HostMemoryBuffer buffer) {
int numTopColumns = header.getNumColumns();
ArrayDeque<ColumnOffsets> offsetsList = new ArrayDeque<>();
long bufferOffset = 0;
for (int i = 0; i < numTopColumns; i++) {
SerializedColumnHeader column = header.getColumnHeader(i);
bufferOffset = buildIndex(column, buffer, offsetsList, bufferOffset);
}
return offsetsList;
}
/**
* Append a list of column offset descriptors using a pre-order traversal of the column
* @param column column offset descriptors will be built for this column and its child columns
* @param buffer host buffer backing the column data
* @param offsetsList list where column offset descriptors will be appended during traversal
* @param bufferOffset offset in the host buffer where the column data begins
* @return buffer offset at the end of this column's data including all child columns
*/
private static long buildIndex(SerializedColumnHeader column, HostMemoryBuffer buffer,
ArrayDeque<ColumnOffsets> offsetsList, long bufferOffset) {
long validity = 0;
long offsets = 0;
long data = 0;
long dataLen = 0;
long rowCount = column.getRowCount();
if (needsValidityBuffer(column.getNullCount())) {
long validityLen = padFor64byteAlignment(BitVectorHelper.getValidityLengthInBytes(rowCount));
validity = bufferOffset;
bufferOffset += validityLen;
}
DType dtype = column.getType();
if (dtype.hasOffsets()) {
if (rowCount > 0) {
long offsetsLen = (rowCount + 1) * Integer.BYTES;
offsets = bufferOffset;
int startOffset = buffer.getInt(bufferOffset);
int endOffset = buffer.getInt(bufferOffset + (rowCount * Integer.BYTES));
bufferOffset += padFor64byteAlignment(offsetsLen);
if (dtype.equals(DType.STRING)) {
dataLen = endOffset - startOffset;
data = bufferOffset;
bufferOffset += padFor64byteAlignment(dataLen);
}
}
} else if (dtype.getSizeInBytes() > 0) {
dataLen = dtype.getSizeInBytes() * rowCount;
data = bufferOffset;
bufferOffset += padFor64byteAlignment(dataLen);
}
offsetsList.add(new ColumnOffsets(validity, offsets, data, dataLen));
SerializedColumnHeader[] children = column.getChildren();
if (children != null) {
for (SerializedColumnHeader child : children) {
bufferOffset = buildIndex(child, buffer, offsetsList, bufferOffset);
}
}
return bufferOffset;
}
/////////////////////////////////////////////
// HELPER METHODS FOR PROVIDERS
/////////////////////////////////////////////
private static void closeAll(ColumnBufferProvider[] providers) {
for (int i = 0; i < providers.length; i++) {
providers[i].close();
}
}
private static void closeAll(ColumnBufferProvider[][] providers) {
for (int i = 0; i < providers.length; i++) {
if (providers[i] != null) {
closeAll(providers[i]);
}
}
}
private static ColumnBufferProvider[] providersFrom(ColumnVector[] columns) {
HostColumnVector[] onHost = new HostColumnVector[columns.length];
boolean success = false;
try {
for (int i = 0; i < columns.length; i++) {
onHost[i] = columns[i].copyToHostAsync(Cuda.DEFAULT_STREAM);
}
Cuda.DEFAULT_STREAM.sync();
ColumnBufferProvider[] ret = providersFrom(onHost, true);
success = true;
return ret;
} finally {
if (!success) {
for (int i = 0; i < onHost.length; i++) {
if (onHost[i] != null) {
onHost[i].close();
onHost[i] = null;
}
}
}
}
}
private static ColumnBufferProvider[] providersFrom(HostColumnVector[] columns, boolean closeAtEnd) {
ColumnBufferProvider[] providers = new ColumnBufferProvider[columns.length];
for (int i = 0; i < columns.length; i++) {
providers[i] = new ColumnProvider(columns[i], closeAtEnd);
}
return providers;
}
/**
* For a batch of tables described by a header and corresponding buffer, return a mapping of
* top column index to the corresponding column providers for that column across all tables.
*/
private static ColumnBufferProvider[][] providersFrom(SerializedTableHeader[] headers,
HostMemoryBuffer[] dataBuffers) {
int numColumns = 0;
int numTables = headers.length;
int numNonEmptyTables = 0;
ArrayList<ArrayList<ColumnBufferProvider>> providersPerColumn = null;
for (int tableIdx = 0; tableIdx < numTables; tableIdx++) {
SerializedTableHeader header = headers[tableIdx];
if (tableIdx == 0) {
numColumns = header.getNumColumns();
providersPerColumn = new ArrayList<>(numColumns);
for (int i = 0; i < numColumns; i++) {
providersPerColumn.add(new ArrayList<>(numTables));
}
} else {
checkCompatibleTypes(headers[0], header, tableIdx);
}
// filter out empty tables but keep at least one if all were empty
if (headers[tableIdx].getNumRows() > 0 ||
(numNonEmptyTables == 0 && tableIdx == numTables - 1)) {
numNonEmptyTables++;
HostMemoryBuffer dataBuffer = dataBuffers[tableIdx];
ArrayDeque<ColumnOffsets> offsets = buildIndex(header, dataBuffer);
for (int columnIdx = 0; columnIdx < numColumns; columnIdx++) {
ColumnBufferProvider provider = buildBufferOffsetProvider(
header.getColumnHeader(columnIdx), offsets, dataBuffer);
providersPerColumn.get(columnIdx).add(provider);
}
assert offsets.isEmpty();
} else {
assert headers[tableIdx].dataLen == 0;
}
}
ColumnBufferProvider[][] result = new ColumnBufferProvider[numColumns][];
for (int i = 0; i < numColumns; i++) {
result[i] = providersPerColumn.get(i).toArray(new ColumnBufferProvider[0]);
}
return result;
}
private static void checkCompatibleTypes(SerializedTableHeader expected,
SerializedTableHeader other,
int tableIdx) {
int numColumns = expected.getNumColumns();
if (other.getNumColumns() != numColumns) {
throw new IllegalArgumentException("The number of columns did not match " + tableIdx
+ " " + other.getNumColumns() + " != " + numColumns);
}
for (int i = 0; i < numColumns; i++) {
checkCompatibleTypes(expected.getColumnHeader(i), other.getColumnHeader(i), tableIdx, i);
}
}
private static void checkCompatibleTypes(SerializedColumnHeader expected,
SerializedColumnHeader other,
int tableIdx, int columnIdx) {
DType dtype = expected.getType();
if (!dtype.equals(other.getType())) {
throw new IllegalArgumentException("Type mismatch at table " + tableIdx +
"column " + columnIdx + " expected " + dtype + " but found " + other.getType());
}
if (dtype.isNestedType()) {
SerializedColumnHeader[] expectedChildren = expected.getChildren();
SerializedColumnHeader[] otherChildren = other.getChildren();
if (expectedChildren.length != otherChildren.length) {
throw new IllegalArgumentException("Child count mismatch at table " + tableIdx +
"column " + columnIdx + " expected " + expectedChildren.length + " but found " +
otherChildren.length);
}
for (int i = 0; i < expectedChildren.length; i++) {
checkCompatibleTypes(expectedChildren[i], otherChildren[i], tableIdx, columnIdx);
}
}
}
private static BufferOffsetProvider buildBufferOffsetProvider(SerializedColumnHeader header,
ArrayDeque<ColumnOffsets> offsets,
HostMemoryBuffer dataBuffer) {
ColumnOffsets columnOffsets = offsets.remove();
ColumnBufferProvider[] childProviders = null;
SerializedColumnHeader[] children = header.getChildren();
if (children != null) {
childProviders = new ColumnBufferProvider[children.length];
for (int i = 0; i < children.length; i++) {
childProviders[i] = buildBufferOffsetProvider(children[i], offsets, dataBuffer);
}
}
return new BufferOffsetProvider(header, columnOffsets, dataBuffer, childProviders);
}
/////////////////////////////////////////////
// HELPER METHODS FOR SerializedTableHeader
/////////////////////////////////////////////
private static SerializedTableHeader calcHeader(ColumnBufferProvider[] columns,
long rowOffset,
int numRows) {
SerializedColumnHeader[] headers = new SerializedColumnHeader[columns.length];
for (int i = 0; i < headers.length; i++) {
headers[i] = new SerializedColumnHeader(columns[i], rowOffset, numRows);
}
long dataLength = getSlicedSerializedDataSizeInBytes(columns, rowOffset, numRows);
return new SerializedTableHeader(headers, numRows, dataLength);
}
/**
* Calculate the new header for a concatenated set of columns.
* @param providersPerColumn first index is the column, second index is the table.
* @return the new header.
*/
private static SerializedTableHeader calcConcatHeader(ColumnBufferProvider[][] providersPerColumn) {
int numColumns = providersPerColumn.length;
long rowCount = 0;
long totalDataSize = 0;
ArrayList<SerializedColumnHeader> headers = new ArrayList<>(numColumns);
for (int columnIdx = 0; columnIdx < numColumns; columnIdx++) {
totalDataSize += calcConcatColumnHeaderAndSize(headers, providersPerColumn[columnIdx]);
if (columnIdx == 0) {
rowCount = headers.get(0).getRowCount();
} else {
assert rowCount == headers.get(columnIdx).getRowCount();
}
}
SerializedColumnHeader[] columnHeaders = headers.toArray(new SerializedColumnHeader[0]);
return new SerializedTableHeader(columnHeaders, (int)rowCount, totalDataSize);
}
/**
* Calculate a column header describing all of the columns concatenated together
* @param outHeaders list that will be appended with the new column header
* @param providers columns to be concatenated
* @return total bytes needed to store the data for the result column and its children
*/
private static long calcConcatColumnHeaderAndSize(ArrayList<SerializedColumnHeader> outHeaders,
ColumnBufferProvider[] providers) {
long totalSize = 0;
int numTables = providers.length;
long rowCount = 0;
long nullCount = 0;
for (ColumnBufferProvider provider : providers) {
rowCount += provider.getRowCount();
if (nullCount != ColumnView.UNKNOWN_NULL_COUNT) {
long providerNullCount = provider.getNullCount();
if (providerNullCount == ColumnView.UNKNOWN_NULL_COUNT) {
nullCount = ColumnView.UNKNOWN_NULL_COUNT;
} else {
nullCount += providerNullCount;
}
}
}
if (rowCount > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Cannot build a batch larger than " + Integer.MAX_VALUE + " rows");
}
if (needsValidityBuffer(nullCount)) {
totalSize += padFor64byteAlignment(BitVectorHelper.getValidityLengthInBytes(rowCount));
}
ColumnBufferProvider firstProvider = providers[0];
DType dtype = firstProvider.getType();
if (dtype.hasOffsets()) {
if (rowCount > 0) {
totalSize += padFor64byteAlignment((rowCount + 1) * Integer.BYTES);
if (dtype.equals(DType.STRING)) {
long stringDataLen = 0;
for (ColumnBufferProvider provider : providers) {
stringDataLen += getRawStringDataLength(provider, 0, provider.getRowCount());
}
totalSize += padFor64byteAlignment(stringDataLen);
}
}
} else if (dtype.getSizeInBytes() > 0) {
totalSize += padFor64byteAlignment(dtype.getSizeInBytes() * rowCount);
}
SerializedColumnHeader[] children = null;
if (dtype.isNestedType()) {
int numChildren = firstProvider.getChildProviders().length;
ArrayList<SerializedColumnHeader> childHeaders = new ArrayList<>(numChildren);
ColumnBufferProvider[] childColumnProviders = new ColumnBufferProvider[numTables];
for (int childIdx = 0; childIdx < numChildren; childIdx++) {
// collect all the providers for the current child and build the child's header
for (int tableIdx = 0; tableIdx < numTables; tableIdx++) {
childColumnProviders[tableIdx] = providers[tableIdx].getChildProviders()[childIdx];
}
totalSize += calcConcatColumnHeaderAndSize(childHeaders, childColumnProviders);
}
children = childHeaders.toArray(new SerializedColumnHeader[0]);
}
outHeaders.add(new SerializedColumnHeader(dtype, rowCount, nullCount, children));
return totalSize;
}
/////////////////////////////////////////////
// HELPER METHODS FOR DataWriters
/////////////////////////////////////////////
private static DataWriter writerFrom(OutputStream out) {
if (!(out instanceof DataOutputStream)) {
out = new DataOutputStream(new BufferedOutputStream(out));
}
return new DataOutputStreamWriter((DataOutputStream) out);
}
private static DataWriter writerFrom(HostMemoryBuffer buffer) {
return new HostDataWriter(buffer);
}
/////////////////////////////////////////////
// Serialize Data Methods
/////////////////////////////////////////////
private static long copySlicedAndPad(DataWriter out,
ColumnBufferProvider column,
BufferType buffer,
long offset,
long length) throws IOException {
out.copyDataFrom(column, buffer, offset, length);
return padFor64byteAlignment(out, length);
}
/////////////////////////////////////////////
// VALIDITY
/////////////////////////////////////////////
private static boolean needsValidityBuffer(long nullCount) {
return nullCount > 0 || nullCount == ColumnView.UNKNOWN_NULL_COUNT;
}
private static int copyPartialValidity(byte[] dest,
int destBitOffset,
ColumnBufferProvider provider,
int srcBitOffset,
int lengthBits) {
HostMemoryBuffer src = provider.getHostBufferFor(BufferType.VALIDITY);
long baseSrcByteOffset = provider.getBufferStartOffset(BufferType.VALIDITY);
int destStartBytes = destBitOffset / 8;
int destStartBitOffset = destBitOffset % 8;
long srcStartBytes = baseSrcByteOffset + (srcBitOffset / 8);
int srcStartBitOffset = srcBitOffset % 8;
int availableDestBits = (dest.length * 8) - destBitOffset;
int bitsToCopy = Math.min(lengthBits, availableDestBits);
int lastIndex = (bitsToCopy + destStartBitOffset + 7) / 8;
byte allBitsSet = ~0;
byte firstSrcMask = (byte)(allBitsSet << destStartBitOffset);
int srcShift = destStartBitOffset - srcStartBitOffset;
if (srcShift > 0) {
// Shift left. If we are going to shift this is the path typically taken.
byte current = src.getByte(srcStartBytes);
byte result = (byte)(current << srcShift);
// The first time we need to include any data already in dest.
result |= dest[destStartBytes] & ~firstSrcMask;
dest[destStartBytes] = result;
// Keep the previous bytes around so we don't have to keep reading from src, which is not free
byte previous = current;
for (int index = 1; index < lastIndex; index++) {
current = src.getByte(index + srcStartBytes);
result = (byte)(current << srcShift);
result |= (previous & 0xFF) >>> (8 - srcShift);
dest[index + destStartBytes] = result;
previous = current;
}
return bitsToCopy;
} else if (srcShift < 0) {
srcShift = -srcShift;
// shifting right only happens when the buffer runs out of space.
byte result = src.getByte(srcStartBytes);
result = (byte)((result & 0xFF) >>> srcShift);
byte next = 0;
if (srcStartBytes + 1 < src.length) {
next = src.getByte(srcStartBytes + 1);
}
result |= (byte)(next << 8 - srcShift);
result &= firstSrcMask;
// The first time through we need to include the data already in dest.
result |= dest[destStartBytes] & ~firstSrcMask;
dest[destStartBytes] = result;
for (int index = 1; index < lastIndex - 1; index++) {
result = next;
result = (byte)((result & 0xFF) >>> srcShift);
next = src.getByte(srcStartBytes + index + 1);
result |= (byte)(next << 8 - srcShift);
dest[index + destStartBytes] = result;
}
int idx = lastIndex - 1;
if (idx > 0) {
result = next;
result = (byte) ((result & 0xFF) >>> srcShift);
next = 0;
if (srcStartBytes + idx + 1 < src.length) {
next = src.getByte(srcStartBytes + idx + 1);
}
result |= (byte) (next << 8 - srcShift);
dest[idx + destStartBytes] = result;
}
return bitsToCopy;
} else {
src.getBytes(dest, destStartBytes, srcStartBytes, (bitsToCopy + 7) / 8);
return bitsToCopy;
}
}
// package-private for testing
static long copySlicedValidity(DataWriter out,
ColumnBufferProvider column,
long rowOffset,
long numRows) throws IOException {
long validityLen = BitVectorHelper.getValidityLengthInBytes(numRows);
long byteOffset = (rowOffset / 8);
long bytesLeft = validityLen;
int lshift = (int) rowOffset % 8;
if (lshift == 0) {
out.copyDataFrom(column, BufferType.VALIDITY, byteOffset, bytesLeft);
} else {
byte[] arrayBuffer = new byte[128 * 1024];
int rowsStoredInArray = 0;
int rowsLeftInBatch = (int) numRows;
int validityBitOffset = (int) rowOffset;
while(rowsLeftInBatch > 0) {
int rowsStoredJustNow = copyPartialValidity(arrayBuffer, rowsStoredInArray, column, validityBitOffset, rowsLeftInBatch);
assert rowsStoredJustNow > 0;
rowsLeftInBatch -= rowsStoredJustNow;
rowsStoredInArray += rowsStoredJustNow;
validityBitOffset += rowsStoredJustNow;
if (rowsStoredInArray == arrayBuffer.length * 8) {
out.write(arrayBuffer, 0, arrayBuffer.length);
rowsStoredInArray = 0;
}
}
if (rowsStoredInArray > 0) {
out.write(arrayBuffer, 0, (rowsStoredInArray + 7) / 8);
}
}
return padFor64byteAlignment(out, validityLen);
}
// Package private for testing
static int fillValidity(byte[] dest, int destBitOffset, int lengthBits) {
int destStartBytes = destBitOffset / 8;
int destStartBits = destBitOffset % 8;
long lengthBytes = BitVectorHelper.getValidityLengthInBytes(lengthBits);
int rshift = destStartBits;
int totalCopied = 0;
if (rshift != 0) {
// Fill in what we need to make it copyable
dest[destStartBytes] |= (0xFF << destStartBits);
destStartBytes += 1;
totalCopied = (8 - destStartBits);
// Not used again, but just to be safe
destStartBits = 0;
}
int amountToCopyBytes = (int) Math.min(lengthBytes, dest.length - destStartBytes);
for (int i = 0; i < amountToCopyBytes; i++) {
dest[i + destStartBytes] = (byte) 0xFF;
}
totalCopied += amountToCopyBytes * 8;
return Math.min(totalCopied, lengthBits);
}
private static long concatValidity(DataWriter out, long numRows,
ColumnBufferProvider[] providers) throws IOException {
long validityLen = BitVectorHelper.getValidityLengthInBytes(numRows);
byte[] arrayBuffer = new byte[128 * 1024];
int rowsStoredInArray = 0;
for (ColumnBufferProvider provider : providers) {
int rowsLeftInBatch = (int) provider.getRowCount();
int validityBitOffset = 0;
while(rowsLeftInBatch > 0) {
int rowsStoredJustNow;
if (needsValidityBuffer(provider.getNullCount())) {
rowsStoredJustNow = copyPartialValidity(arrayBuffer, rowsStoredInArray, provider, validityBitOffset, rowsLeftInBatch);
} else {
rowsStoredJustNow = fillValidity(arrayBuffer, rowsStoredInArray, rowsLeftInBatch);
}
assert rowsStoredJustNow > 0;
assert rowsStoredJustNow <= rowsLeftInBatch;
rowsLeftInBatch -= rowsStoredJustNow;
rowsStoredInArray += rowsStoredJustNow;
validityBitOffset += rowsStoredJustNow;
if (rowsStoredInArray == arrayBuffer.length * 8) {
out.write(arrayBuffer, 0, arrayBuffer.length);
rowsStoredInArray = 0;
}
}
}
if (rowsStoredInArray > 0) {
int len = (rowsStoredInArray + 7) / 8;
out.write(arrayBuffer, 0, len);
}
return padFor64byteAlignment(out, validityLen);
}
/////////////////////////////////////////////
// STRING
/////////////////////////////////////////////
private static long copySlicedStringData(DataWriter out, ColumnBufferProvider column, long rowOffset,
long numRows) throws IOException {
if (numRows > 0) {
long startByteOffset = column.getOffset(rowOffset);
long endByteOffset = column.getOffset(rowOffset + numRows);
long bytesToCopy = endByteOffset - startByteOffset;
long srcOffset = startByteOffset;
return copySlicedAndPad(out, column, BufferType.DATA, srcOffset, bytesToCopy);
}
return 0;
}
private static void copyConcatStringData(DataWriter out,
ColumnBufferProvider[] providers) throws IOException {
long totalCopied = 0;
for (ColumnBufferProvider provider : providers) {
long rowCount = provider.getRowCount();
if (rowCount > 0) {
HostMemoryBuffer dataBuffer = provider.getHostBufferFor(BufferType.DATA);
long currentOffset = provider.getBufferStartOffset(BufferType.DATA);
long dataLeft = provider.getOffset(rowCount);
out.copyDataFrom(dataBuffer, currentOffset, dataLeft);
totalCopied += dataLeft;
}
}
padFor64byteAlignment(out, totalCopied);
}
private static long copySlicedOffsets(DataWriter out, ColumnBufferProvider column, long rowOffset,
long numRows) throws IOException {
if (numRows <= 0) {
// Don't copy anything, there are no rows
return 0;
}
long bytesToCopy = (numRows + 1) * Integer.BYTES;
long srcOffset = rowOffset * Integer.BYTES;
if (rowOffset == 0) {
return copySlicedAndPad(out, column, BufferType.OFFSET, srcOffset, bytesToCopy);
}
HostMemoryBuffer buff = column.getHostBufferFor(BufferType.OFFSET);
long startOffset = column.getBufferStartOffset(BufferType.OFFSET) + srcOffset;
if (bytesToCopy >= Integer.MAX_VALUE) {
throw new IllegalStateException("Copy is too large, need to do chunked copy");
}
ByteBuffer bb = buff.asByteBuffer(startOffset, (int)bytesToCopy);
int start = bb.getInt();
out.writeIntNativeOrder(0);
long total = Integer.BYTES;
for (int i = 1; i < (numRows + 1); i++) {
int offset = bb.getInt();
out.writeIntNativeOrder(offset - start);
total += Integer.BYTES;
}
assert total == bytesToCopy;
long ret = padFor64byteAlignment(out, total);
return ret;
}
private static void copyConcatOffsets(DataWriter out,
ColumnBufferProvider[] providers) throws IOException {
long totalCopied = 0;
int offsetToAdd = 0;
for (ColumnBufferProvider provider : providers) {
long rowCount = provider.getRowCount();
if (rowCount > 0) {
HostMemoryBuffer offsetsBuffer = provider.getHostBufferFor(BufferType.OFFSET);
long currentOffset = provider.getBufferStartOffset(BufferType.OFFSET);
if (totalCopied == 0) {
// first chunk of offsets can be copied verbatim
totalCopied = (rowCount + 1) * Integer.BYTES;
out.copyDataFrom(offsetsBuffer, currentOffset, totalCopied);
offsetToAdd = offsetsBuffer.getInt(currentOffset + (rowCount * Integer.BYTES));
} else {
int localOffset = 0;
// first row's offset has already been written when processing the previous table
for (int row = 1; row < rowCount + 1; row++) {
localOffset = offsetsBuffer.getInt(currentOffset + (row * Integer.BYTES));
out.writeIntNativeOrder(localOffset + offsetToAdd);
}
// last local offset of this chunk is the length of data referenced by offsets
offsetToAdd += localOffset;
totalCopied += rowCount * Integer.BYTES;
}
}
}
padFor64byteAlignment(out, totalCopied);
}
/////////////////////////////////////////////
// BASIC DATA
/////////////////////////////////////////////
private static long sliceBasicData(DataWriter out,
ColumnBufferProvider column,
long rowOffset,
long numRows) throws IOException {
DType type = column.getType();
long bytesToCopy = numRows * type.getSizeInBytes();
long srcOffset = rowOffset * type.getSizeInBytes();
return copySlicedAndPad(out, column, BufferType.DATA, srcOffset, bytesToCopy);
}
private static void concatBasicData(DataWriter out,
DType type,
ColumnBufferProvider[] providers) throws IOException {
long totalCopied = 0;
for (ColumnBufferProvider provider : providers) {
long rowCount = provider.getRowCount();
if (rowCount > 0) {
HostMemoryBuffer dataBuffer = provider.getHostBufferFor(BufferType.DATA);
long currentOffset = provider.getBufferStartOffset(BufferType.DATA);
long dataLeft = rowCount * type.getSizeInBytes();
out.copyDataFrom(dataBuffer, currentOffset, dataLeft);
totalCopied += dataLeft;
}
}
padFor64byteAlignment(out, totalCopied);
}
/////////////////////////////////////////////
// COLUMN AND TABLE WRITE
/////////////////////////////////////////////
private static void writeConcat(DataWriter out, SerializedColumnHeader header,
ColumnBufferProvider[] providers) throws IOException {
if (needsValidityBuffer(header.getNullCount())) {
concatValidity(out, header.getRowCount(), providers);
}
DType dtype = header.getType();
if (dtype.hasOffsets()) {
if (header.getRowCount() > 0) {
copyConcatOffsets(out, providers);
if (dtype.equals(DType.STRING)) {
copyConcatStringData(out, providers);
}
}
} else if (dtype.getSizeInBytes() > 0) {
concatBasicData(out, dtype, providers);
}
if (dtype.isNestedType()) {
int numTables = providers.length;
SerializedColumnHeader[] childHeaders = header.getChildren();
ColumnBufferProvider[] childColumnProviders = new ColumnBufferProvider[numTables];
for (int childIdx = 0; childIdx < childHeaders.length; childIdx++) {
// collect all the providers for the current child column
for (int tableIdx = 0; tableIdx < numTables; tableIdx++) {
childColumnProviders[tableIdx] = providers[tableIdx].getChildProviders()[childIdx];
}
writeConcat(out, childHeaders[childIdx], childColumnProviders);
}
}
}
private static void writeSliced(DataWriter out,
ColumnBufferProvider column,
long rowOffset,
long numRows) throws IOException {
if (needsValidityBuffer(column.getNullCount())) {
try (NvtxRange range = new NvtxRange("Write Validity", NvtxColor.DARK_GREEN)) {
copySlicedValidity(out, column, rowOffset, numRows);
}
}
DType type = column.getType();
if (type.hasOffsets()) {
if (numRows > 0) {
try (NvtxRange offsetRange = new NvtxRange("Write Offset Data", NvtxColor.ORANGE)) {
copySlicedOffsets(out, column, rowOffset, numRows);
if (type.equals(DType.STRING)) {
try (NvtxRange dataRange = new NvtxRange("Write String Data", NvtxColor.RED)) {
copySlicedStringData(out, column, rowOffset, numRows);
}
}
}
}
} else if (type.getSizeInBytes() > 0){
try (NvtxRange range = new NvtxRange("Write Data", NvtxColor.BLUE)) {
sliceBasicData(out, column, rowOffset, numRows);
}
}
if (numRows > 0 && type.isNestedType()) {
if (type.equals(DType.LIST)) {
try (NvtxRange range = new NvtxRange("Write List Child", NvtxColor.PURPLE)) {
ColumnBufferProvider child = column.getChildProviders()[0];
long childStartRow = column.getOffset(rowOffset);
long childNumRows = column.getOffset(rowOffset + numRows) - childStartRow;
writeSliced(out, child, childStartRow, childNumRows);
}
} else if (type.equals(DType.STRUCT)) {
try (NvtxRange range = new NvtxRange("Write Struct Children", NvtxColor.PURPLE)) {
for (ColumnBufferProvider child : column.getChildProviders()) {
writeSliced(out, child, rowOffset, numRows);
}
}
} else {
throw new IllegalStateException("Unexpected nested type: " + type);
}
}
}
private static void writeSliced(ColumnBufferProvider[] columns,
DataWriter out,
long rowOffset,
long numRows) throws IOException {
assert rowOffset >= 0;
assert numRows >= 0;
for (int i = 0; i < columns.length; i++) {
long rows = columns[i].getRowCount();
assert rowOffset + numRows <= rows;
assert rows == (int) rows : "can only support an int for indexes";
}
SerializedTableHeader header = calcHeader(columns, rowOffset, (int) numRows);
header.writeTo(out);
try (NvtxRange range = new NvtxRange("Write Sliced", NvtxColor.GREEN)) {
for (int i = 0; i < columns.length; i++) {
writeSliced(out, columns[i], rowOffset, numRows);
}
}
out.flush();
}
/**
* Write all or part of a table out in an internal format.
* @param t the table to be written.
* @param out the stream to write the serialized table out to.
* @param rowOffset the first row to write out.
* @param numRows the number of rows to write out.
*/
public static void writeToStream(Table t, OutputStream out, long rowOffset, long numRows)
throws IOException {
writeToStream(t.getColumns(), out, rowOffset, numRows);
}
/**
* Write all or part of a set of columns out in an internal format.
* @param columns the columns to be written.
* @param out the stream to write the serialized table out to.
* @param rowOffset the first row to write out.
* @param numRows the number of rows to write out.
*/
public static void writeToStream(ColumnVector[] columns, OutputStream out, long rowOffset,
long numRows) throws IOException {
ColumnBufferProvider[] providers = providersFrom(columns);
try {
DataWriter writer = writerFrom(out);
writeSliced(providers, writer, rowOffset, numRows);
} finally {
closeAll(providers);
}
}
/**
* Write all or part of a set of columns out in an internal format.
* @param columns the columns to be written.
* @param out the stream to write the serialized table out to.
* @param rowOffset the first row to write out.
* @param numRows the number of rows to write out.
*/
public static void writeToStream(HostColumnVector[] columns, OutputStream out, long rowOffset,
long numRows) throws IOException {
ColumnBufferProvider[] providers = providersFrom(columns, false);
try {
DataWriter writer = writerFrom(out);
writeSliced(providers, writer, rowOffset, numRows);
} finally {
closeAll(providers);
}
}
/**
* Write a rowcount only header to the output stream in a case
* where a columnar batch with no columns but a non zero row count is received
* @param out the stream to write the serialized table out to.
* @param numRows the number of rows to write out.
*/
public static void writeRowsToStream(OutputStream out, long numRows) throws IOException {
DataWriter writer = writerFrom(out);
SerializedTableHeader header = new SerializedTableHeader((int) numRows);
header.writeTo(writer);
writer.flush();
}
/**
* Take the data from multiple batches stored in the parsed headers and the dataBuffer and write
* it out to out as if it were a single buffer.
* @param headers the headers parsed from multiple streams.
* @param dataBuffers an array of buffers that hold the data, one per header.
* @param out what to write the data out to.
* @throws IOException on any error.
*/
public static void writeConcatedStream(SerializedTableHeader[] headers,
HostMemoryBuffer[] dataBuffers,
OutputStream out) throws IOException {
ColumnBufferProvider[][] providersPerColumn = providersFrom(headers, dataBuffers);
try {
SerializedTableHeader combined = calcConcatHeader(providersPerColumn);
DataWriter writer = writerFrom(out);
combined.writeTo(writer);
try (NvtxRange range = new NvtxRange("Concat Host Side", NvtxColor.GREEN)) {
int numColumns = combined.getNumColumns();
for (int columnIdx = 0; columnIdx < numColumns; columnIdx++) {
ColumnBufferProvider[] providers = providersPerColumn[columnIdx];
writeConcat(writer, combined.getColumnHeader(columnIdx), providersPerColumn[columnIdx]);
}
}
writer.flush();
} finally {
closeAll(providersPerColumn);
}
}
/////////////////////////////////////////////
// COLUMN AND TABLE READ
/////////////////////////////////////////////
private static HostColumnVectorCore buildHostColumn(SerializedColumnHeader column,
ArrayDeque<ColumnOffsets> columnOffsets,
HostMemoryBuffer buffer,
boolean isRootColumn) {
ColumnOffsets offsetsInfo = columnOffsets.remove();
SerializedColumnHeader[] children = column.getChildren();
int numChildren = children != null ? children.length : 0;
List<HostColumnVectorCore> childColumns = new ArrayList<>(numChildren);
try {
if (children != null) {
for (SerializedColumnHeader child : children) {
childColumns.add(buildHostColumn(child, columnOffsets, buffer, false));
}
}
DType dtype = column.getType();
long rowCount = column.getRowCount();
long nullCount = column.getNullCount();
HostMemoryBuffer dataBuffer = null;
HostMemoryBuffer validityBuffer = null;
HostMemoryBuffer offsetsBuffer = null;
if (!dtype.isNestedType()) {
dataBuffer = buffer.slice(offsetsInfo.data, offsetsInfo.dataLen);
}
if (needsValidityBuffer(nullCount)) {
long validitySize = BitVectorHelper.getValidityLengthInBytes(rowCount);
validityBuffer = buffer.slice(offsetsInfo.validity, validitySize);
}
if (dtype.hasOffsets()) {
// one 32-bit integer offset per row plus one additional offset at the end
long offsetsSize = rowCount > 0 ? (rowCount + 1) * Integer.BYTES : 0;
offsetsBuffer = buffer.slice(offsetsInfo.offsets, offsetsSize);
}
HostColumnVectorCore result;
// Only creates HostColumnVector for root columns, since child columns are managed by their parents.
if (isRootColumn) {
result = new HostColumnVector(dtype, rowCount,
Optional.of(nullCount), dataBuffer, validityBuffer, offsetsBuffer,
childColumns);
} else {
result = new HostColumnVectorCore(dtype, rowCount,
Optional.of(nullCount), dataBuffer, validityBuffer, offsetsBuffer,
childColumns);
}
childColumns = null;
return result;
} finally {
if (childColumns != null) {
for (HostColumnVectorCore c : childColumns) {
c.close();
}
}
}
}
private static long buildColumnView(SerializedColumnHeader column,
ArrayDeque<ColumnOffsets> columnOffsets,
DeviceMemoryBuffer combinedBuffer) {
ColumnOffsets offsetsInfo = columnOffsets.remove();
long[] childViews = null;
try {
SerializedColumnHeader[] children = column.getChildren();
if (children != null) {
childViews = new long[children.length];
for (int i = 0; i < childViews.length; i++) {
childViews[i] = buildColumnView(children[i], columnOffsets, combinedBuffer);
}
}
DType dtype = column.getType();
long bufferAddress = combinedBuffer.getAddress();
long dataAddress = offsetsInfo.dataLen == 0 ? 0 : bufferAddress + offsetsInfo.data;
long validityAddress = needsValidityBuffer(column.getNullCount())
? bufferAddress + offsetsInfo.validity : 0;
long offsetsAddress = dtype.hasOffsets() ? bufferAddress + offsetsInfo.offsets : 0;
return ColumnView.makeCudfColumnView(
dtype.typeId.getNativeId(), dtype.getScale(),
dataAddress, offsetsInfo.dataLen,
offsetsAddress, validityAddress,
(int) column.getNullCount(), (int) column.getRowCount(),
childViews);
} finally {
if (childViews != null) {
for (long childView : childViews) {
ColumnView.deleteColumnView(childView);
}
}
}
}
private static Table sliceUpColumnVectors(SerializedTableHeader header,
DeviceMemoryBuffer combinedBuffer,
HostMemoryBuffer combinedBufferOnHost) {
try (NvtxRange range = new NvtxRange("bufferToTable", NvtxColor.PURPLE)) {
ArrayDeque<ColumnOffsets> columnOffsets = buildIndex(header, combinedBufferOnHost);
int numColumns = header.getNumColumns();
ColumnVector[] vectors = new ColumnVector[numColumns];
try {
for (int i = 0; i < numColumns; i++) {
SerializedColumnHeader column = header.getColumnHeader(i);
long columnView = buildColumnView(column, columnOffsets, combinedBuffer);
vectors[i] = ColumnVector.fromViewWithContiguousAllocation(columnView, combinedBuffer);
}
assert columnOffsets.isEmpty();
return new Table(vectors);
} finally {
for (ColumnVector cv: vectors) {
if (cv != null) {
cv.close();
}
}
}
}
}
public static Table readAndConcat(SerializedTableHeader[] headers,
HostMemoryBuffer[] dataBuffers) throws IOException {
ContiguousTable ct = concatToContiguousTable(headers, dataBuffers);
ct.getBuffer().close();
return ct.getTable();
}
/**
* Concatenate multiple tables in host memory into a contiguous table in device memory.
* @param headers table headers corresponding to the host table buffers
* @param dataBuffers host table buffer for each input table to be concatenated
* @return contiguous table in device memory
*/
public static ContiguousTable concatToContiguousTable(SerializedTableHeader[] headers,
HostMemoryBuffer[] dataBuffers) throws IOException {
try (HostConcatResult concatResult = concatToHostBuffer(headers, dataBuffers)) {
return concatResult.toContiguousTable();
}
}
/**
* Concatenate multiple tables in host memory into a single host table buffer.
* @param headers table headers corresponding to the host table buffers
* @param dataBuffers host table buffer for each input table to be concatenated
* @param hostMemoryAllocator allocator for host memory buffers
* @return host table header and buffer
*/
public static HostConcatResult concatToHostBuffer(SerializedTableHeader[] headers,
HostMemoryBuffer[] dataBuffers,
HostMemoryAllocator hostMemoryAllocator
) throws IOException {
ColumnBufferProvider[][] providersPerColumn = providersFrom(headers, dataBuffers);
try {
SerializedTableHeader combined = calcConcatHeader(providersPerColumn);
HostMemoryBuffer hostBuffer = hostMemoryAllocator.allocate(combined.dataLen);
try {
try (NvtxRange range = new NvtxRange("Concat Host Side", NvtxColor.GREEN)) {
DataWriter writer = writerFrom(hostBuffer);
int numColumns = combined.getNumColumns();
for (int columnIdx = 0; columnIdx < numColumns; columnIdx++) {
writeConcat(writer, combined.getColumnHeader(columnIdx), providersPerColumn[columnIdx]);
}
}
} catch (Exception e) {
hostBuffer.close();
throw e;
}
return new HostConcatResult(combined, hostBuffer);
} finally {
closeAll(providersPerColumn);
}
}
public static HostConcatResult concatToHostBuffer(SerializedTableHeader[] headers,
HostMemoryBuffer[] dataBuffers
) throws IOException {
return concatToHostBuffer(headers, dataBuffers, DefaultHostMemoryAllocator.get());
}
/**
* Deserialize a serialized contiguous table into an array of host columns.
*
* @param header serialized table header
* @param hostBuffer buffer containing the data for all columns in the serialized table
* @return array of host columns representing the data from the serialized table
*/
public static HostColumnVector[] unpackHostColumnVectors(SerializedTableHeader header,
HostMemoryBuffer hostBuffer) {
ArrayDeque<ColumnOffsets> columnOffsets = buildIndex(header, hostBuffer);
int numColumns = header.getNumColumns();
HostColumnVector[] columns = new HostColumnVector[numColumns];
boolean succeeded = false;
try {
for (int i = 0; i < numColumns; i++) {
SerializedColumnHeader column = header.getColumnHeader(i);
columns[i] = (HostColumnVector) buildHostColumn(column, columnOffsets, hostBuffer, true);
}
assert columnOffsets.isEmpty();
succeeded = true;
} finally {
if (!succeeded) {
for (HostColumnVector c : columns) {
if (c != null) {
c.close();
}
}
}
}
return columns;
}
/**
* After reading a header for a table read the data portion into a host side buffer.
* @param in the stream to read the data from.
* @param header the header that finished just moments ago.
* @param buffer the buffer to write the data into. If there is not enough room to store
* the data in buffer it will not be read and header will still have dataRead
* set to false.
* @throws IOException
*/
public static void readTableIntoBuffer(InputStream in,
SerializedTableHeader header,
HostMemoryBuffer buffer) throws IOException {
if (header.initialized &&
(buffer.length >= header.dataLen)) {
try (NvtxRange range = new NvtxRange("Read Data", NvtxColor.RED)) {
buffer.copyFromStream(0, in, header.dataLen);
}
header.dataRead = true;
}
}
public static TableAndRowCountPair readTableFrom(SerializedTableHeader header,
HostMemoryBuffer hostBuffer) {
ContiguousTable contigTable = null;
DeviceMemoryBuffer devBuffer = DeviceMemoryBuffer.allocate(hostBuffer.length);
try {
if (hostBuffer.length > 0) {
try (NvtxRange range = new NvtxRange("Copy Data To Device", NvtxColor.WHITE)) {
devBuffer.copyFromHostBuffer(hostBuffer);
}
}
if (header.getNumColumns() > 0) {
Table table = sliceUpColumnVectors(header, devBuffer, hostBuffer);
contigTable = new ContiguousTable(table, devBuffer);
}
} finally {
if (contigTable == null) {
devBuffer.close();
}
}
return new TableAndRowCountPair(header.numRows, contigTable);
}
/**
* Read a serialize table from the given InputStream.
* @param in the stream to read the table data from.
* @param hostMemoryAllocator a host memory allocator for an intermediate host memory buffer
* @return the deserialized table in device memory, or null if the stream has no table to read
* from, an end of the stream at the very beginning.
* @throws IOException on any error.
* @throws EOFException if the data stream ended unexpectedly in the middle of processing.
*/
public static TableAndRowCountPair readTableFrom(InputStream in,
HostMemoryAllocator hostMemoryAllocator) throws IOException {
DataInputStream din;
if (in instanceof DataInputStream) {
din = (DataInputStream) in;
} else {
din = new DataInputStream(in);
}
SerializedTableHeader header = new SerializedTableHeader(din);
if (!header.initialized) {
return new TableAndRowCountPair(0, null);
}
try (HostMemoryBuffer hostBuffer = hostMemoryAllocator.allocate(header.dataLen)) {
if (header.dataLen > 0) {
readTableIntoBuffer(din, header, hostBuffer);
}
return readTableFrom(header, hostBuffer);
}
}
public static TableAndRowCountPair readTableFrom(InputStream in) throws IOException {
return readTableFrom(in, DefaultHostMemoryAllocator.get());
}
/** Holds the result of deserializing a table. */
public static final class TableAndRowCountPair implements Closeable {
private final int numRows;
private final ContiguousTable contigTable;
public TableAndRowCountPair(int numRows, ContiguousTable table) {
this.numRows = numRows;
this.contigTable = table;
}
@Override
public void close() {
if (contigTable != null) {
contigTable.close();
}
}
/** Get the number of rows that were deserialized. */
public int getNumRows() {
return numRows;
}
/**
* Get the Table that was deserialized or null if there was no data
* (e.g.: rows without columns).
* <p>NOTE: Ownership of the table is not transferred by this method.
* The table is still owned by this instance and will be closed when this
* instance is closed.
*/
public Table getTable() {
if (contigTable != null) {
return contigTable.getTable();
}
return null;
}
/**
* Get the ContiguousTable that was deserialized or null if there was no
* data (e.g.: rows without columns).
* <p>NOTE: Ownership of the contiguous table is not transferred by this
* method. The contiguous table is still owned by this instance and will
* be closed when this instance is closed.
*/
public ContiguousTable getContiguousTable() {
return contigTable;
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/JSONOptions.java
|
/*
*
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import java.util.Collection;
/**
* Options for reading in JSON encoded data.
*/
public final class JSONOptions extends ColumnFilterOptions {
public static JSONOptions DEFAULT = new JSONOptions(builder());
private final boolean dayFirst;
private final boolean lines;
private final boolean recoverWithNull;
private final boolean normalizeSingleQuotes;
private final boolean normalizeWhitespace;
private final boolean mixedTypesAsStrings;
private final boolean keepStringQuotes;
private final boolean strictValidation;
private final boolean allowLeadingZeros;
private final boolean allowNonNumericNumbers;
private final boolean allowUnquotedControlChars;
private final boolean cudfPruneSchema;
private final boolean experimental;
private final byte lineDelimiter;
private JSONOptions(Builder builder) {
super(builder);
dayFirst = builder.dayFirst;
lines = builder.lines;
recoverWithNull = builder.recoverWithNull;
normalizeSingleQuotes = builder.normalizeSingleQuotes;
normalizeWhitespace = builder.normalizeWhitespace;
mixedTypesAsStrings = builder.mixedTypesAsStrings;
keepStringQuotes = builder.keepQuotes;
strictValidation = builder.strictValidation;
allowLeadingZeros = builder.allowLeadingZeros;
allowNonNumericNumbers = builder.allowNonNumericNumbers;
allowUnquotedControlChars = builder.allowUnquotedControlChars;
cudfPruneSchema = builder.cudfPruneSchema;
experimental = builder.experimental;
lineDelimiter = builder.lineDelimiter;
}
public boolean shouldCudfPruneSchema() {
return cudfPruneSchema;
}
public byte getLineDelimiter() {
return lineDelimiter;
}
public boolean isDayFirst() {
return dayFirst;
}
public boolean isLines() {
return lines;
}
/** Return the value of the recoverWithNull option */
public boolean isRecoverWithNull() {
return recoverWithNull;
}
public boolean isNormalizeSingleQuotes() {
return normalizeSingleQuotes;
}
public boolean isNormalizeWhitespace() {
return normalizeWhitespace;
}
public boolean isMixedTypesAsStrings() {
return mixedTypesAsStrings;
}
public boolean keepStringQuotes() {
return keepStringQuotes;
}
public boolean strictValidation() {
return strictValidation;
}
public boolean leadingZerosAllowed() {
return allowLeadingZeros;
}
public boolean nonNumericNumbersAllowed() {
return allowNonNumericNumbers;
}
public boolean unquotedControlChars() {
return allowUnquotedControlChars;
}
public boolean experimental() {
return experimental;
}
@Override
String[] getIncludeColumnNames() {
throw new UnsupportedOperationException("JSON reader didn't support column prune");
}
public static Builder builder() {
return new Builder();
}
public static final class Builder extends ColumnFilterOptions.Builder<JSONOptions.Builder> {
private boolean strictValidation = false;
private boolean allowUnquotedControlChars = true;
private boolean allowNonNumericNumbers = false;
private boolean allowLeadingZeros = false;
private boolean dayFirst = false;
private boolean lines = true;
private boolean recoverWithNull = false;
private boolean normalizeSingleQuotes = false;
private boolean normalizeWhitespace = false;
private boolean mixedTypesAsStrings = false;
private boolean keepQuotes = false;
private boolean cudfPruneSchema = false;
private boolean experimental = false;
private byte lineDelimiter = '\n';
public Builder withCudfPruneSchema(boolean prune) {
cudfPruneSchema = prune;
return this;
}
public Builder withLineDelimiter(char delimiter) {
if (delimiter > Byte.MAX_VALUE) {
throw new IllegalArgumentException("Only basic ASCII values are supported as line delimiters " + delimiter);
}
lineDelimiter = (byte)delimiter;
return this;
}
/**
* Should json validation be strict or not
*/
public Builder withStrictValidation(boolean isAllowed) {
strictValidation = isAllowed;
return this;
}
/**
* Should experimental features be enabled or not
*/
public Builder withExperimental(boolean isAllowed) {
experimental = isAllowed;
return this;
}
/**
* Should leading zeros on numbers be allowed or not. Strict validation
* must be enabled for this to have any effect.
*/
public Builder withLeadingZeros(boolean isAllowed) {
allowLeadingZeros = isAllowed;
return this;
}
/**
* Should non-numeric numbers be allowed or not. Strict validation
* must be enabled for this to have any effect.
*/
public Builder withNonNumericNumbers(boolean isAllowed) {
allowNonNumericNumbers = isAllowed;
return this;
}
/**
* Should unquoted control chars be allowed in strings. Strict validation
* must be enabled for this to have any effect.
*/
public Builder withUnquotedControlChars(boolean isAllowed) {
allowUnquotedControlChars = isAllowed;
return this;
}
/**
* Whether to parse dates as DD/MM versus MM/DD
* @param dayFirst true: DD/MM, false, MM/DD
* @return builder for chaining
*/
public Builder withDayFirst(boolean dayFirst) {
this.dayFirst = dayFirst;
return this;
}
/**
* Whether to read the file as a json object per line
* @param perLine true: per line, false: multi-line
* @return builder for chaining
*/
public Builder withLines(boolean perLine) {
assert perLine == true : "Cudf does not support multi-line";
this.lines = perLine;
return this;
}
/**
* Specify how to handle invalid lines when parsing json. Setting
* recoverWithNull to true will cause null values to be returned
* for invalid lines. Setting recoverWithNull to false will cause
* the parsing to fail with an exception.
*
* @param recoverWithNull true: return nulls, false: throw exception
* @return builder for chaining
*/
public Builder withRecoverWithNull(boolean recoverWithNull) {
this.recoverWithNull = recoverWithNull;
return this;
}
/**
* Should the single quotes be normalized.
*/
public Builder withNormalizeSingleQuotes(boolean normalizeSingleQuotes) {
this.normalizeSingleQuotes = normalizeSingleQuotes;
return this;
}
/**
* Should the unquoted whitespace be removed.
*/
public Builder withNormalizeWhitespace(boolean normalizeWhitespace) {
this.normalizeWhitespace = normalizeWhitespace;
return this;
}
/**
* Specify how to handle columns that contain mixed types.
*
* @param mixedTypesAsStrings true: return unparsed JSON, false: throw exception
* @return builder for chaining
*/
public Builder withMixedTypesAsStrings(boolean mixedTypesAsStrings) {
this.mixedTypesAsStrings = mixedTypesAsStrings;
return this;
}
/**
* Set whether the reader should keep quotes of string values.
* @param keepQuotes true to keep them, else false.
* @return this for chaining.
*/
public Builder withKeepQuotes(boolean keepQuotes) {
this.keepQuotes = keepQuotes;
return this;
}
@Override
public Builder includeColumn(String... names) {
throw new UnsupportedOperationException("JSON reader didn't support column prune");
}
@Override
public Builder includeColumn(Collection<String> names) {
throw new UnsupportedOperationException("JSON reader didn't support column prune");
}
public JSONOptions build() {
return new JSONOptions(this);
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/MaskState.java
|
/*
*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
enum MaskState {
UNALLOCATED(0),
UNINITIALIZED(1),
ALL_VALID(2),
ALL_NULL(3);
private static final MaskState[] MASK_STATES = MaskState.values();
final int nativeId;
MaskState(int nativeId) {
this.nativeId = nativeId;
}
static MaskState fromNative(int nativeId) {
for (MaskState type : MASK_STATES) {
if (type.nativeId == nativeId) {
return type;
}
}
throw new IllegalArgumentException("Could not translate " + nativeId + " into a MaskState");
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/MemoryBuffer.java
|
/*
*
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Abstract class for representing the Memory Buffer
*
* NOTE: MemoryBuffer is public to make it easier to work with the class hierarchy,
* subclassing beyond what is included in CUDF is not recommended and not supported.
*/
abstract public class MemoryBuffer implements AutoCloseable {
/**
* Interface to handle events for this MemoryBuffer. Only invoked during
* close, hence `onClosed` is the only event.
*/
public interface EventHandler {
/**
* `onClosed` is invoked with the updated `refCount` during `close`.
* The last invocation of `onClosed` will be with `refCount=0`.
*
* @note the callback is invoked with this `MemoryBuffer`'s lock held.
*
* @param refCount - the updated ref count for this MemoryBuffer at the time
* of invocation
*/
void onClosed(int refCount);
}
private static final Logger log = LoggerFactory.getLogger(MemoryBuffer.class);
protected final long address;
protected final long length;
protected boolean closed = false;
protected int refCount = 0;
protected final MemoryBufferCleaner cleaner;
protected final long id;
private EventHandler eventHandler;
public static abstract class MemoryBufferCleaner extends MemoryCleaner.Cleaner{}
private static final class SlicedBufferCleaner extends MemoryBufferCleaner {
private MemoryBuffer parent;
SlicedBufferCleaner(MemoryBuffer parent) {
this.parent = parent;
}
@Override
protected synchronized boolean cleanImpl(boolean logErrorIfNotClean) {
if (parent != null) {
if (logErrorIfNotClean) {
log.error("A SLICED BUFFER WAS LEAKED(ID: " + id + " parent: " + parent + ")");
logRefCountDebug("Leaked sliced buffer");
}
try {
parent.close();
} finally {
// Always mark the resource as freed even if an exception is thrown.
// We cannot know how far it progressed before the exception, and
// therefore it is unsafe to retry.
parent = null;
}
return true;
}
return false;
}
@Override
public boolean isClean() {
return parent == null;
}
}
/**
* This is a really ugly API, but it is possible that the lifecycle of a column of
* data may not have a clear lifecycle thanks to java and GC. This API informs the leak
* tracking code that this is expected for this column, and big scary warnings should
* not be printed when this happens.
*/
public void noWarnLeakExpected() {
if (cleaner != null) {
cleaner.noWarnLeakExpected();
}
}
/**
* Constructor
* @param address location in memory
* @param length size of this buffer
* @param cleaner used to clean up the memory. May be null if no cleanup is needed.
*/
protected MemoryBuffer(long address, long length, MemoryBufferCleaner cleaner) {
this.address = address;
this.length = length;
this.cleaner = cleaner;
if (cleaner != null) {
this.id = cleaner.id;
incRefCount();
MemoryCleaner.register(this, cleaner);
} else {
this.id = -1;
}
}
/**
* Constructor
* @param address location in memory
* @param length size of this buffer
*/
protected MemoryBuffer(long address, long length) {
this(address, length, (MemoryBufferCleaner)null);
}
/**
* Internal constructor used when creating a slice.
* @param address location in memory
* @param length size of this buffer
* @param parent the buffer that should be closed instead of closing this one.
*/
protected MemoryBuffer(long address, long length, MemoryBuffer parent) {
this(address, length, new SlicedBufferCleaner(parent));
}
/**
* Returns the size of this buffer
* @return - size
*/
public final long getLength() {
return length;
}
protected final void addressOutOfBoundsCheck(long address, long size, String type) {
assert !closed : "Buffer is already closed " + Long.toHexString(this.address);
assert size >= 0 : "A positive size is required";
assert address >= this.address : "Start address is too low for " + type +
" 0x" + Long.toHexString(address) + " < 0x" + Long.toHexString(this.address);
assert (address + size) <= (this.address + length) : "End address is too high for " + type +
" 0x" + Long.toHexString(address + size) + " < 0x" + Long.toHexString(this.address + length);
}
/**
* Returns the location of the data pointed to by this buffer
* @return - data address
*/
public final long getAddress() {
return address;
}
/**
* Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream.
* The copy has completed when this returns, but the memory copy could overlap with
* operations occurring on other streams.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
* @param stream CUDA stream to use
*/
public final void copyFromMemoryBuffer(
long destOffset, MemoryBuffer src, long srcOffset, long length, Cuda.Stream stream) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.memcpy(address + destOffset, src.address + srcOffset, length, CudaMemcpyKind.DEFAULT, stream);
}
/**
* Copy a subset of src to this buffer starting at destOffset using the specified CUDA stream.
* The copy is async and may not have completed when this returns.
* @param destOffset the offset in this to start copying from.
* @param src what to copy from
* @param srcOffset offset into src to start out
* @param length how many bytes to copy
* @param stream CUDA stream to use
*/
public final void copyFromMemoryBufferAsync(
long destOffset, MemoryBuffer src, long srcOffset, long length, Cuda.Stream stream) {
addressOutOfBoundsCheck(address + destOffset, length, "copy range dest");
src.addressOutOfBoundsCheck(src.address + srcOffset, length, "copy range src");
Cuda.asyncMemcpy(address + destOffset, src.address + srcOffset, length, CudaMemcpyKind.DEFAULT, stream);
}
/**
* Slice off a part of the buffer. Note that this is a zero copy operation and all
* slices must be closed along with the original buffer before the memory is released.
* So use this with some caution.
*
* Note that [[DeviceMemoryBuffer]] and [[HostMemoryBuffer]] support slicing, and override this
* function.
*
* @param offset where to start the slice at.
* @param len how many bytes to slice
* @return a slice of the original buffer that will need to be closed independently
*/
public abstract MemoryBuffer slice(long offset, long len);
/**
* Set an event handler for this buffer. This method can be invoked with null
* to unset the handler.
*
* @param newHandler - the EventHandler to use from this point forward
* @return the prior event handler, or null if not set.
*/
public synchronized EventHandler setEventHandler(EventHandler newHandler) {
EventHandler prev = this.eventHandler;
this.eventHandler = newHandler;
return prev;
}
/**
* Returns the current event handler for this buffer or null if no handler
* is associated or this buffer is closed.
*/
public synchronized EventHandler getEventHandler() {
return this.eventHandler;
}
/**
* Close this buffer and free memory
*/
public synchronized void close() {
if (cleaner != null) {
refCount--;
cleaner.delRef();
try {
if (refCount == 0) {
cleaner.clean(false);
closed = true;
} else if (refCount < 0) {
cleaner.logRefCountDebug("double free " + this);
throw new IllegalStateException("Close called too many times " + this);
}
} finally {
if (eventHandler != null) {
eventHandler.onClosed(refCount);
}
}
}
}
@Override
public String toString() {
long id = -1;
if (cleaner != null) {
id = cleaner.id;
}
String name = this.getClass().getSimpleName();
return name + "{" +
"address=0x" + Long.toHexString(address) +
", length=" + length +
", id=" + id + "}";
}
/**
* Increment the reference count for this column. You need to call close on this
* to decrement the reference count again.
*/
public synchronized void incRefCount() {
refCount++;
cleaner.addRef();
}
/**
* Get the current reference count for this buffer.
*/
public synchronized int getRefCount() {
return refCount;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/MemoryCleaner.java
|
/*
*
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
import ai.rapids.cudf.ast.CompiledExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
/**
* ColumnVectors may store data off heap, and because of complicated processing the life time of
* an individual vector can vary a lot. Typically a java finalizer could be used for this but
* they can cause a number of performance issues related to gc, and in some cases may effectively
* leak resources if the heap is large and GC's end up being delayed.
* <p>
* To address these issues the primary way to releasing the resources of a ColumnVector that is
* stored off of the java heap should be through reference counting. Because memory leaks are
* really bad for long lived daemons this is intended to be a backup.
* <p>
* When a ColumnVector first allocates off heap resources it should register itself with this
* along with a Cleaner instance. The Cleaner instance should have no direct links to the
* ColumnVector that would prevent the ColumnVector from being garbage collected. This will
* use WeakReferences internally to know when the resources have been leaked.
* A ColumnVector may keep a reference to the Cleaner instance and either update it as new
* resources are allocated or use it to release the resources it is holding. Once the
* ColumnVector's reference count reaches 0 and the resources are released. At some point
* later the Cleaner itself will be released.
*/
public final class MemoryCleaner {
private static final boolean REF_COUNT_DEBUG = Boolean.getBoolean("ai.rapids.refcount.debug");
private static final Logger log = LoggerFactory.getLogger(MemoryCleaner.class);
private static final AtomicLong idGen = new AtomicLong(0);
/**
* Check if configured the shutdown hook which checks leaks at shutdown time.
*
* @return true if configured, false otherwise.
*/
public static boolean configuredDefaultShutdownHook() {
return REF_COUNT_DEBUG;
}
/**
* API that can be used to clean up the resources for a vector, even if there was a leak
*/
public static abstract class Cleaner {
private final List<RefCountDebugItem> refCountDebug;
public final long id = idGen.incrementAndGet();
private boolean leakExpected = false;
public Cleaner() {
if (REF_COUNT_DEBUG) {
refCountDebug = new LinkedList<>();
} else {
refCountDebug = null;
}
}
public final void addRef() {
if (REF_COUNT_DEBUG && refCountDebug != null) {
synchronized(this) {
refCountDebug.add(new MemoryCleaner.RefCountDebugItem("INC"));
}
}
}
public final void delRef() {
if (REF_COUNT_DEBUG && refCountDebug != null) {
synchronized(this) {
refCountDebug.add(new MemoryCleaner.RefCountDebugItem("DEC"));
}
}
}
public final void logRefCountDebug(String message) {
if (REF_COUNT_DEBUG && refCountDebug != null) {
synchronized(this) {
log.error("{} (ID: {}): {}", message, id, MemoryCleaner.stringJoin("\n", refCountDebug));
}
}
}
/**
* Clean up any resources not previously released.
* @param logErrorIfNotClean if true we should log a leak unless it is expected.
* @return true if resources were cleaned up else false.
*/
public final boolean clean(boolean logErrorIfNotClean) {
boolean cleaned = cleanImpl(logErrorIfNotClean && !leakExpected);
if (cleaned) {
all.remove(id);
}
return cleaned;
}
/**
* Return true if a leak is expected for this object else false.
*/
public final boolean isLeakExpected() {
return leakExpected;
}
/**
* Clean up any resources not previously released.
* @param logErrorIfNotClean if true and there are resources to clean up a leak has happened
* so log it.
* @return true if resources were cleaned up else false.
*/
protected abstract boolean cleanImpl(boolean logErrorIfNotClean);
public void noWarnLeakExpected() {
leakExpected = true;
}
/**
* Check if the underlying memory has been cleaned up or not.
* @return true this is clean else false.
*/
public abstract boolean isClean();
}
static final AtomicLong leakCount = new AtomicLong();
private static final Map<Long, CleanerWeakReference> all =
new ConcurrentHashMap(); // We want to be thread safe
private static final ReferenceQueue<?> collected = new ReferenceQueue<>();
private static class CleanerWeakReference<T> extends WeakReference<T> {
private final Cleaner cleaner;
final boolean isRmmBlocker;
public CleanerWeakReference(T orig, Cleaner cleaner, ReferenceQueue collected, boolean isRmmBlocker) {
super(orig, collected);
this.cleaner = cleaner;
this.isRmmBlocker = isRmmBlocker;
}
public void clean() {
if (cleaner.clean(true)) {
leakCount.incrementAndGet();
}
}
}
/**
* The default GPU as set by user threads.
*/
private static volatile int defaultGpu = -1;
/**
* This should be called from RMM when it is initialized.
*/
static void setDefaultGpu(int defaultGpuId) {
defaultGpu = defaultGpuId;
}
private static final Thread t = new Thread(() -> {
try {
int currentGpuId = -1;
while (true) {
CleanerWeakReference next = (CleanerWeakReference)collected.remove(100);
if (next != null) {
try {
if (currentGpuId != defaultGpu) {
Cuda.setDevice(defaultGpu);
currentGpuId = defaultGpu;
}
} catch (Throwable t) {
log.error("ERROR TRYING TO SET GPU ID TO " + defaultGpu, t);
}
try {
next.clean();
} catch (Throwable t) {
log.error("CAUGHT EXCEPTION WHILE TRYING TO CLEAN " + next, t);
}
all.remove(next.cleaner.id);
}
}
} catch (InterruptedException e) {
// Ignored just exit
}
}, "Cleaner Thread");
/**
* Default shutdown runnable used to be added to Java default shutdown hook.
* It checks the leaks at shutdown time.
*/
private static final Runnable DEFAULT_SHUTDOWN_RUNNABLE = () -> {
// If we are debugging things do a best effort to check for leaks at the end
System.gc();
// Avoid issues on shutdown with the cleaner thread.
t.interrupt();
try {
t.join(1000);
} catch (InterruptedException e) {
// Ignored
}
if (defaultGpu >= 0) {
Cuda.setDevice(defaultGpu);
}
for (CleanerWeakReference cwr : all.values()) {
cwr.clean();
}
};
private static final Thread DEFAULT_SHUTDOWN_THREAD = new Thread(DEFAULT_SHUTDOWN_RUNNABLE);
static {
t.setDaemon(true);
t.start();
if (REF_COUNT_DEBUG) {
Runtime.getRuntime().addShutdownHook(DEFAULT_SHUTDOWN_THREAD);
}
}
/**
* De-register the default shutdown hook from Java default Runtime, then return the corresponding
* shutdown runnable.
* If you want to register the default shutdown runnable in a custom shutdown hook manager
* instead of Java default Runtime, should first remove it using this method and then add it
*
* @return the default shutdown runnable
*/
public static Runnable removeDefaultShutdownHook() {
Runtime.getRuntime().removeShutdownHook(DEFAULT_SHUTDOWN_THREAD);
return DEFAULT_SHUTDOWN_RUNNABLE;
}
static void register(ColumnVector vec, Cleaner cleaner) {
// It is now registered...
all.put(cleaner.id, new CleanerWeakReference(vec, cleaner, collected, true));
}
static void register(Scalar s, Cleaner cleaner) {
// It is now registered...
all.put(cleaner.id, new CleanerWeakReference(s, cleaner, collected, true));
}
static void register(HostColumnVectorCore vec, Cleaner cleaner) {
// It is now registered...
all.put(cleaner.id, new CleanerWeakReference(vec, cleaner, collected, false));
}
static void register(MemoryBuffer buf, Cleaner cleaner) {
// It is now registered...
all.put(cleaner.id, new CleanerWeakReference(buf, cleaner, collected, buf instanceof BaseDeviceMemoryBuffer));
}
static void register(Cuda.Stream stream, Cleaner cleaner) {
// It is now registered...
all.put(cleaner.id, new CleanerWeakReference(stream, cleaner, collected, false));
}
static void register(Cuda.Event event, Cleaner cleaner) {
// It is now registered...
all.put(cleaner.id, new CleanerWeakReference(event, cleaner, collected, false));
}
static void register(CuFileDriver driver, Cleaner cleaner) {
// It is now registered...
all.put(cleaner.id, new CleanerWeakReference(driver, cleaner, collected, false));
}
static void register(CuFileBuffer buffer, Cleaner cleaner) {
// It is now registered...
all.put(cleaner.id, new CleanerWeakReference(buffer, cleaner, collected, false));
}
static void register(CuFileHandle handle, Cleaner cleaner) {
// It is now registered...
all.put(cleaner.id, new CleanerWeakReference(handle, cleaner, collected, false));
}
public static void register(CompiledExpression expr, Cleaner cleaner) {
all.put(cleaner.id, new CleanerWeakReference(expr, cleaner, collected, false));
}
static void register(HashJoin hashJoin, Cleaner cleaner) {
all.put(cleaner.id, new CleanerWeakReference(hashJoin, cleaner, collected, true));
}
/**
* This is not 100% perfect and we can still run into situations where RMM buffers were not
* collected and this returns false because of thread race conditions. This is just a best effort.
* @return true if there are rmm blockers else false.
*/
static boolean bestEffortHasRmmBlockers() {
return all.values().stream().anyMatch(cwr -> cwr.isRmmBlocker && !cwr.cleaner.isClean());
}
/**
* Convert elements in it to a String and join them together. Only use for debug messages
* where the code execution itself can be disabled as this is not fast.
*/
private static <T> String stringJoin(String delim, Iterable<T> it) {
return String.join(delim,
StreamSupport.stream(it.spliterator(), false)
.map((i) -> i.toString())
.collect(Collectors.toList()));
}
/**
* When debug is enabled holds information about inc and dec of ref count.
*/
private static final class RefCountDebugItem {
final StackTraceElement[] stackTrace;
final long timeMs;
final String op;
public RefCountDebugItem(String op) {
this.stackTrace = Thread.currentThread().getStackTrace();
this.timeMs = System.currentTimeMillis();
this.op = op;
}
public String toString() {
Date date = new Date(timeMs);
// Simple Date Format is horribly expensive only do this when debug is turned on!
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSSS z");
return dateFormat.format(date) + ": " + op + "\n"
+ stringJoin("\n", Arrays.asList(stackTrace))
+ "\n";
}
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/MixedJoinSize.java
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/** This class tracks size information associated with a mixed table join. */
public final class MixedJoinSize implements AutoCloseable {
private final long outputRowCount;
// This is in flux, avoid exposing publicly until the dust settles.
private ColumnVector matches;
MixedJoinSize(long outputRowCount, ColumnVector matches) {
this.outputRowCount = outputRowCount;
this.matches = matches;
}
/** Return the number of output rows that would be generated from the mixed join */
public long getOutputRowCount() {
return outputRowCount;
}
ColumnVector getMatches() {
return matches;
}
@Override
public synchronized void close() {
matches.close();
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/MultiBufferDataSource.java
|
/*
*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* This is a DataSource that can take multiple HostMemoryBuffers. They
* are treated as if they are all part of a single file connected end to end.
*/
public class MultiBufferDataSource extends DataSource {
private final long sizeInBytes;
private final HostMemoryBuffer[] hostBuffers;
private final long[] startOffsets;
private final HostMemoryAllocator allocator;
// Metrics
private long hostReads = 0;
private long hostReadBytes = 0;
private long devReads = 0;
private long devReadBytes = 0;
/**
* Create a new data source backed by multiple buffers.
* @param buffers the buffers that will back the data source.
*/
public MultiBufferDataSource(HostMemoryBuffer ... buffers) {
this(DefaultHostMemoryAllocator.get(), buffers);
}
/**
* Create a new data source backed by multiple buffers.
* @param allocator the allocator to use for host buffers, if needed.
* @param buffers the buffers that will back the data source.
*/
public MultiBufferDataSource(HostMemoryAllocator allocator, HostMemoryBuffer ... buffers) {
int numBuffers = buffers.length;
hostBuffers = new HostMemoryBuffer[numBuffers];
startOffsets = new long[numBuffers];
long currentOffset = 0;
for (int i = 0; i < numBuffers; i++) {
HostMemoryBuffer hmb = buffers[i];
hmb.incRefCount();
hostBuffers[i] = hmb;
startOffsets[i] = currentOffset;
currentOffset += hmb.getLength();
}
sizeInBytes = currentOffset;
this.allocator = allocator;
}
@Override
public long size() {
return sizeInBytes;
}
private int getStartBufferIndexForOffset(long offset) {
assert (offset >= 0);
// It is super common to read from the start or end of a file (the header or footer)
// so special case them
if (offset == 0) {
return 0;
}
int startIndex = 0;
int endIndex = startOffsets.length - 1;
if (offset >= startOffsets[endIndex]) {
return endIndex;
}
while (startIndex != endIndex) {
int midIndex = (int)(((long)startIndex + endIndex) / 2);
long midStartOffset = startOffsets[midIndex];
if (offset >= midStartOffset) {
// It is either in mid or after mid.
if (midIndex == endIndex || offset <= startOffsets[midIndex + 1]) {
// We found it in mid
return midIndex;
} else {
// It is after mid
startIndex = midIndex + 1;
}
} else {
// It is before mid
endIndex = midIndex - 1;
}
}
return startIndex;
}
interface DoCopy<T extends MemoryBuffer> {
void copyFromHostBuffer(T dest, long destOffset, HostMemoryBuffer src,
long srcOffset, long srcAmount);
}
private <T extends MemoryBuffer> long read(long offset, T dest, DoCopy<T> doCopy) {
assert (offset >= 0);
long realOffset = Math.min(offset, sizeInBytes);
long realAmount = Math.min(sizeInBytes - realOffset, dest.getLength());
int index = getStartBufferIndexForOffset(realOffset);
HostMemoryBuffer buffer = hostBuffers[index];
long bufferOffset = realOffset - startOffsets[index];
long bufferAmount = Math.min(buffer.length - bufferOffset, realAmount);
long remainingAmount = realAmount;
long currentOffset = realOffset;
long outputOffset = 0;
while (remainingAmount > 0) {
doCopy.copyFromHostBuffer(dest, outputOffset, buffer,
bufferOffset, bufferAmount);
remainingAmount -= bufferAmount;
outputOffset += bufferAmount;
currentOffset += bufferAmount;
index++;
if (index < hostBuffers.length) {
buffer = hostBuffers[index];
bufferOffset = currentOffset - startOffsets[index];
bufferAmount = Math.min(buffer.length - bufferOffset, remainingAmount);
}
}
return realAmount;
}
@Override
public HostMemoryBuffer hostRead(long offset, long amount) {
assert (offset >= 0);
assert (amount >= 0);
long realOffset = Math.min(offset, sizeInBytes);
long realAmount = Math.min(sizeInBytes - realOffset, amount);
int index = getStartBufferIndexForOffset(realOffset);
HostMemoryBuffer buffer = hostBuffers[index];
long bufferOffset = realOffset - startOffsets[index];
long bufferAmount = Math.min(buffer.length - bufferOffset, realAmount);
if (bufferAmount == realAmount) {
hostReads += 1;
hostReadBytes += realAmount;
// It all fits in a single buffer, so do a zero copy operation
return buffer.slice(bufferOffset, bufferAmount);
} else {
// We will have to allocate a new buffer and copy data into it.
boolean success = false;
HostMemoryBuffer ret = allocator.allocate(realAmount, true);
try {
long amountRead = read(offset, ret, HostMemoryBuffer::copyFromHostBuffer);
assert(amountRead == realAmount);
hostReads += 1;
hostReadBytes += amountRead;
success = true;
return ret;
} finally {
if (!success) {
ret.close();
}
}
}
}
@Override
public long hostRead(long offset, HostMemoryBuffer dest) {
long ret = read(offset, dest, HostMemoryBuffer::copyFromHostBuffer);
hostReads += 1;
hostReadBytes += ret;
return ret;
}
@Override
public boolean supportsDeviceRead() {
return true;
}
@Override
public long deviceRead(long offset, DeviceMemoryBuffer dest,
Cuda.Stream stream) {
long ret = read(offset, dest, (destParam, destOffset, src, srcOffset, srcAmount) ->
destParam.copyFromHostBufferAsync(destOffset, src, srcOffset, srcAmount, stream));
devReads += 1;
devReadBytes += ret;
return ret;
}
@Override
public void close() {
try {
super.close();
} finally {
for (HostMemoryBuffer hmb: hostBuffers) {
if (hmb != null) {
hmb.close();
}
}
}
}
public long getHostReads() {
return hostReads;
}
public long getHostReadBytes() {
return hostReadBytes;
}
public long getDevReads() {
return devReads;
}
public long getDevReadBytes() {
return devReadBytes;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/NaNEquality.java
|
/*
*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* How should NaNs be compared in an operation. In floating point there are multiple
* different binary representations for NaN.
*/
public enum NaNEquality {
/**
* No NaN representation is considered equal to any NaN representation, even for the
* exact same representation.
*/
UNEQUAL(false),
/**
* All representations of NaN are considered to be equal.
*/
ALL_EQUAL(true);
NaNEquality(boolean nansEqual) {
this.nansEqual = nansEqual;
}
final boolean nansEqual;
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/NativeDepsLoader.java
|
/*
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
/**
* This class will load the native dependencies.
*/
public class NativeDepsLoader {
private static final Logger log = LoggerFactory.getLogger(NativeDepsLoader.class);
/**
* Set this system property to true to prevent unpacked dependency files from
* being deleted immediately after they are loaded. The files will still be
* scheduled for deletion upon exit.
*/
private static final Boolean preserveDepsAfterLoad = Boolean.getBoolean(
"ai.rapids.cudf.preserve-dependencies");
/**
* Defines the loading order for the dependencies. Dependencies are loaded in
* stages where all the dependencies in a stage are not interdependent and
* therefore can be loaded in parallel. All dependencies within an earlier
* stage are guaranteed to have finished loading before any dependencies in
* subsequent stages are loaded.
*/
private static final String[][] loadOrder = new String[][]{
new String[]{
"nvcomp"
},
new String[]{
"cudf"
},
new String[]{
"cudfjni"
}
};
private static final ClassLoader loader = NativeDepsLoader.class.getClassLoader();
private static boolean loaded = false;
/**
* Load the native libraries needed for libcudf, if not loaded already.
*/
public static synchronized void loadNativeDeps() {
if (!loaded) {
try {
loadNativeDeps(loadOrder, preserveDepsAfterLoad);
loaded = true;
} catch (Throwable t) {
log.error("Could not load cudf jni library...", t);
}
}
}
/**
* Allows other libraries to reuse the same native deps loading logic. Libraries will be searched
* for under ${os.arch}/${os.name}/ in the class path using the class loader for this class.
* <br/>
* Because this just loads the libraries and loading the libraries themselves needs to be a
* singleton operation it is recommended that any library using this provide their own wrapper
* function similar to
* <pre>
* private static boolean loaded = false;
* static synchronized void loadNativeDeps() {
* if (!loaded) {
* try {
* // If you also depend on the cudf liobrary being loaded, be sure it is loaded
* // first
* ai.rapids.cudf.NativeDepsLoader.loadNativeDeps();
* ai.rapids.cudf.NativeDepsLoader.loadNativeDeps(new String[]{...});
* loaded = true;
* } catch (Throwable t) {
* log.error("Could not load ...", t);
* }
* }
* }
* </pre>
* This function should be called from the static initialization block of any class that uses
* JNI. For example
* <pre>
* public class UsesJNI {
* static {
* MyNativeDepsLoader.loadNativeDeps();
* }
* }
* </pre>
* @param loadOrder the base name of the libraries. For example libfoo.so would be passed in as
* "foo". The libraries are loaded in the order provided.
* @throws IOException on any error trying to load the libraries.
*/
public static void loadNativeDeps(String[] loadOrder) throws IOException {
loadNativeDeps(loadOrder, preserveDepsAfterLoad);
}
/**
* Allows other libraries to reuse the same native deps loading logic. Libraries will be searched
* for under ${os.arch}/${os.name}/ in the class path using the class loader for this class.
* <br/>
* Because this just loads the libraries and loading the libraries themselves needs to be a
* singleton operation it is recommended that any library using this provide their own wrapper
* function similar to
* <pre>
* private static boolean loaded = false;
* static synchronized void loadNativeDeps() {
* if (!loaded) {
* try {
* // If you also depend on the cudf liobrary being loaded, be sure it is loaded
* // first
* ai.rapids.cudf.NativeDepsLoader.loadNativeDeps();
* ai.rapids.cudf.NativeDepsLoader.loadNativeDeps(new String[]{...});
* loaded = true;
* } catch (Throwable t) {
* log.error("Could not load ...", t);
* }
* }
* }
* </pre>
* This function should be called from the static initialization block of any class that uses
* JNI. For example
* <pre>
* public class UsesJNI {
* static {
* MyNativeDepsLoader.loadNativeDeps();
* }
* }
* </pre>
* @param loadOrder the base name of the libraries. For example libfoo.so would be passed in as
* "foo". The libraries are loaded in the order provided.
* @param preserveDeps if false the dependencies will be deleted immediately after loading
* rather than on exit.
* @throws IOException on any error trying to load the libraries.
*/
public static void loadNativeDeps(String[] loadOrder, boolean preserveDeps) throws IOException {
String os = System.getProperty("os.name");
String arch = System.getProperty("os.arch");
for (String toLoad : loadOrder) {
loadDep(os, arch, toLoad, preserveDeps);
}
}
/**
* Load native dependencies in stages, where the dependency libraries in each stage
* are loaded only after all libraries in earlier stages have completed loading.
* @param loadOrder array of stages with an array of dependency library names in each stage
* @param preserveDeps if false the dependencies will be deleted immediately after loading
* rather than on exit.
* @throws IOException on any error trying to load the libraries
*/
private static void loadNativeDeps(String[][] loadOrder, boolean preserveDeps) throws IOException {
String os = System.getProperty("os.name");
String arch = System.getProperty("os.arch");
ExecutorService executor = Executors.newCachedThreadPool();
List<List<Future<File>>> allFileFutures = new ArrayList<>();
// Start unpacking and creating the temporary files for each dependency.
// Unpacking a dependency does not depend on stage order.
for (String[] stageDependencies : loadOrder) {
List<Future<File>> stageFileFutures = new ArrayList<>();
allFileFutures.add(stageFileFutures);
for (String name : stageDependencies) {
stageFileFutures.add(executor.submit(() -> createFile(os, arch, name)));
}
}
List<Future<?>> loadCompletionFutures = new ArrayList<>();
// Proceed stage-by-stage waiting for the dependency file to have been
// produced then submit them to the thread pool to be loaded.
for (List<Future<File>> stageFileFutures : allFileFutures) {
// Submit all dependencies in the stage to be loaded in parallel
loadCompletionFutures.clear();
for (Future<File> fileFuture : stageFileFutures) {
loadCompletionFutures.add(executor.submit(() -> loadDep(fileFuture, preserveDeps)));
}
// Wait for all dependencies in this stage to have been loaded
for (Future<?> loadCompletionFuture : loadCompletionFutures) {
try {
loadCompletionFuture.get();
} catch (ExecutionException | InterruptedException e) {
throw new IOException("Error loading dependencies", e);
}
}
}
executor.shutdownNow();
}
/**
* Allows other libraries to reuse the same native deps loading logic. Library will be searched
* for under ${os.arch}/${os.name}/ in the class path using the class loader for this class.
* @param depName the base name of the library. For example libfoo.so would be passed in as
* "foo". The libraries are loaded in the order provided.
* @param preserveDep if false the dependencies will be deleted immediately after loading
* rather than on exit.
* @return path where the dependency was loaded
* @throws IOException on any error trying to load the libraries.
*/
public static File loadNativeDep(String depName, boolean preserveDep) throws IOException {
String os = System.getProperty("os.name");
String arch = System.getProperty("os.arch");
return loadDep(os, arch, depName, preserveDep);
}
private static File loadDep(String os, String arch, String baseName, boolean preserveDep)
throws IOException {
File path = createFile(os, arch, baseName);
loadDep(path, preserveDep);
return path;
}
/** Load a library at the specified path */
private static void loadDep(File path, boolean preserveDep) {
System.load(path.getAbsolutePath());
if (!preserveDep) {
path.delete();
}
}
/** Load a library, waiting for the specified future to produce the path before loading */
private static void loadDep(Future<File> fileFuture, boolean preserveDep) {
File path;
try {
path = fileFuture.get();
} catch (ExecutionException | InterruptedException e) {
throw new RuntimeException("Error loading dependencies", e);
}
loadDep(path, preserveDep);
}
/** Extract the contents of a library resource into a temporary file */
private static File createFile(String os, String arch, String baseName) throws IOException {
String path = arch + "/" + os + "/" + System.mapLibraryName(baseName);
File loc;
URL resource = loader.getResource(path);
if (resource == null) {
throw new FileNotFoundException("Could not locate native dependency " + path);
}
try (InputStream in = resource.openStream()) {
loc = File.createTempFile(baseName, ".so");
loc.deleteOnExit();
try (OutputStream out = new FileOutputStream(loc)) {
byte[] buffer = new byte[1024 * 16];
int read = 0;
while ((read = in.read(buffer)) >= 0) {
out.write(buffer, 0, read);
}
}
}
return loc;
}
public static boolean libraryLoaded() {
if (!loaded) {
loadNativeDeps();
}
return loaded;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/NullEquality.java
|
/*
*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* How should nulls be compared in an operation.
*/
public enum NullEquality {
UNEQUAL(false),
EQUAL(true);
NullEquality(boolean nullsEqual) {
this.nullsEqual = nullsEqual;
}
final boolean nullsEqual;
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/NullPolicy.java
|
/*
*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.rapids.cudf;
/**
* Specify whether to include nulls or exclude nulls in an operation.
*/
public enum NullPolicy {
EXCLUDE(false),
INCLUDE(true);
NullPolicy(boolean includeNulls) {
this.includeNulls = includeNulls;
}
final boolean includeNulls;
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/NvtxColor.java
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
public enum NvtxColor {
GREEN(0xff00ff00),
BLUE(0xff0000ff),
YELLOW(0xffffff00),
PURPLE(0xffff00ff),
CYAN(0xff00ffff),
RED(0xffff0000),
WHITE(0xffffffff),
DARK_GREEN(0xff006600),
ORANGE(0xffffa500);
final int colorBits;
NvtxColor(int colorBits) {
this.colorBits = colorBits;
}
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/NvtxRange.java
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* This class supports push/pop NVTX profiling ranges, or "scoped" ranges.
*
* The constructor pushes an NVTX range and the close method pops off the most recent range that
* was pushed. Therefore instances of this class should always be used in a try-with-resources
* block to guarantee that ranges are always closed in the proper order. For example:
* <pre>
* try (NvtxRange a = new NvtxRange("a", NvtxColor.RED)) {
* ...
* try (NvtxRange b = new NvtxRange("b", NvtxColor.BLUE)) {
* ...
* }
* ...
* }
* </pre>
*
* Instances should be associated with a single thread to avoid pushing an NVTX range in
* one thread and then trying to pop the range in a different thread.
*
* Push/pop ranges show a stacking behavior in tools such as Nsight, where newly pushed
* ranges are correlated and enclosed by the prior pushed range (in the example above,
* "b" is enclosed by "a").
*/
public class NvtxRange implements AutoCloseable {
private static final boolean isEnabled = Boolean.getBoolean("ai.rapids.cudf.nvtx.enabled");
static {
if (isEnabled) {
NativeDepsLoader.loadNativeDeps();
}
}
public NvtxRange(String name, NvtxColor color) {
this(name, color.colorBits);
}
public NvtxRange(String name, int colorBits) {
if (isEnabled) {
push(name, colorBits);
}
}
public static void pushRange(String name, NvtxColor color) {
if (isEnabled) {
push(name, color.colorBits);
}
}
public static void popRange() {
if (isEnabled) {
pop();
}
}
@Override
public void close() {
if (isEnabled) {
pop();
}
}
private static native void push(String name, int colorBits);
private static native void pop();
}
|
0
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids
|
java-sources/ai/rapids/cudf/25.08.0/ai/rapids/cudf/NvtxUniqueRange.java
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.rapids.cudf;
/**
* This class supports start/end NVTX profiling ranges.
*
* Start/end:
*
* The constructor instantiates a new NVTX range and keeps a unique handle that comes back
* from the NVTX api (nvtxRangeId). The handle is used to later close such a range. This type
* of range does not have the same order-of-operation requirements that the push/pop ranges have:
* the `NvtxUniqueRange` instance can be passed to other scopes, and even to other threads
* for the eventual call to close.
*
* It can be used in the same try-with-resources way as push/pop, or interleaved with other
* ranges, like so:
*
* <pre>
* NvtxUniqueRange a = new NvtxUniqueRange("a", NvtxColor.RED);
* NvtxUniqueRange b = new NvtxUniqueRange("b", NvtxColor.BLUE);
* a.close();
* b.close();
* </pre>
*/
public class NvtxUniqueRange implements AutoCloseable {
private static final boolean isEnabled = Boolean.getBoolean("ai.rapids.cudf.nvtx.enabled");
// this is a nvtxRangeId_t in the C++ api side
private final long nvtxRangeId;
// true if this range is already closed
private boolean closed;
static {
if (isEnabled) {
NativeDepsLoader.loadNativeDeps();
}
}
public NvtxUniqueRange(String name, NvtxColor color) {
this(name, color.colorBits);
}
public NvtxUniqueRange(String name, int colorBits) {
if (isEnabled) {
nvtxRangeId = start(name, colorBits);
} else {
// following the implementation in nvtx3, the default value of 0
// is given when NVTX is disabled
nvtxRangeId = 0;
}
}
@Override
public synchronized void close() {
if (closed) {
throw new IllegalStateException(
"Cannot call close on an already closed NvtxUniqueRange!");
}
closed = true;
if (isEnabled) {
end(this.nvtxRangeId);
}
}
private native long start(String name, int colorBits);
private native void end(long nvtxRangeId);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.