index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/MultiVersionCursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
/**
* The cursor implementation for the multi-version index.
*/
public class MultiVersionCursor implements Cursor {
private final MultiVersionIndex index;
private final Session session;
private final Cursor baseCursor, deltaCursor;
private final Object sync;
private SearchRow baseRow;
private Row deltaRow;
private boolean onBase;
private boolean end;
private boolean needNewDelta, needNewBase;
private boolean reverse;
MultiVersionCursor(Session session, MultiVersionIndex index, Cursor base,
Cursor delta, Object sync) {
this.session = session;
this.index = index;
this.baseCursor = base;
this.deltaCursor = delta;
this.sync = sync;
needNewDelta = true;
needNewBase = true;
}
/**
* Load the current row.
*/
void loadCurrent() {
synchronized (sync) {
baseRow = baseCursor.getSearchRow();
deltaRow = deltaCursor.get();
needNewDelta = false;
needNewBase = false;
}
}
private void loadNext(boolean base) {
synchronized (sync) {
if (base) {
if (step(baseCursor)) {
baseRow = baseCursor.getSearchRow();
} else {
baseRow = null;
}
} else {
if (step(deltaCursor)) {
deltaRow = deltaCursor.get();
} else {
deltaRow = null;
}
}
}
}
private boolean step(Cursor cursor) {
return reverse ? cursor.previous() : cursor.next();
}
@Override
public Row get() {
synchronized (sync) {
if (end) {
return null;
}
return onBase ? baseCursor.get() : deltaCursor.get();
}
}
@Override
public SearchRow getSearchRow() {
synchronized (sync) {
if (end) {
return null;
}
return onBase ? baseCursor.getSearchRow() : deltaCursor.getSearchRow();
}
}
@Override
public boolean next() {
synchronized (sync) {
if (SysProperties.CHECK && end) {
DbException.throwInternalError();
}
while (true) {
if (needNewDelta) {
loadNext(false);
needNewDelta = false;
}
if (needNewBase) {
loadNext(true);
needNewBase = false;
}
if (deltaRow == null) {
if (baseRow == null) {
end = true;
return false;
}
onBase = true;
needNewBase = true;
return true;
}
int sessionId = deltaRow.getSessionId();
boolean isThisSession = sessionId == session.getId();
boolean isDeleted = deltaRow.isDeleted();
if (isThisSession && isDeleted) {
needNewDelta = true;
continue;
}
if (baseRow == null) {
if (isDeleted) {
if (isThisSession) {
end = true;
return false;
}
// the row was deleted by another session: return it
onBase = false;
needNewDelta = true;
return true;
}
DbException.throwInternalError();
}
int compare = index.compareRows(deltaRow, baseRow);
if (compare == 0) {
// can't use compareKeys because the
// version would be compared as well
long k1 = deltaRow.getKey();
long k2 = baseRow.getKey();
compare = Long.compare(k1, k2);
}
if (compare == 0) {
if (isDeleted) {
if (isThisSession) {
DbException.throwInternalError();
}
// another session updated the row
} else {
if (isThisSession) {
onBase = false;
needNewBase = true;
needNewDelta = true;
return true;
}
// another session inserted the row: ignore
needNewBase = true;
needNewDelta = true;
continue;
}
}
if (compare > 0) {
onBase = true;
needNewBase = true;
return true;
}
onBase = false;
needNewDelta = true;
return true;
}
}
}
@Override
public boolean previous() {
reverse = true;
try {
return next();
} finally {
reverse = false;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/MultiVersionIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.ArrayList;
import java.util.HashSet;
import org.h2.api.ErrorCode;
import org.h2.engine.Database;
import org.h2.engine.DbObject;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.schema.Schema;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.RegularTable;
import org.h2.table.Table;
import org.h2.table.TableFilter;
import org.h2.value.Value;
import org.h2.value.ValueNull;
/**
* A multi-version index is a combination of a regular index,
* and a in-memory tree index that contains uncommitted changes.
* Uncommitted changes can include new rows, and deleted rows.
*/
public class MultiVersionIndex implements Index {
private final Index base;
private final TreeIndex delta;
private final RegularTable table;
private final Object sync;
private final Column firstColumn;
public MultiVersionIndex(Index base, RegularTable table) {
this.base = base;
this.table = table;
IndexType deltaIndexType = IndexType.createNonUnique(false);
if (base instanceof SpatialIndex) {
throw DbException.get(ErrorCode.FEATURE_NOT_SUPPORTED_1,
"MVCC & spatial index");
}
this.delta = new TreeIndex(table, -1, "DELTA", base.getIndexColumns(),
deltaIndexType);
delta.setMultiVersion(true);
this.sync = base.getDatabase();
this.firstColumn = base.getColumns()[0];
}
@Override
public void add(Session session, Row row) {
synchronized (sync) {
base.add(session, row);
if (removeIfExists(session, row)) {
// for example rolling back a delete operation
} else if (row.getSessionId() != 0) {
// don't insert rows that are added when creating an index
delta.add(session, row);
}
}
}
@Override
public void close(Session session) {
synchronized (sync) {
base.close(session);
}
}
@Override
public boolean isFindUsingFullTableScan() {
return base.isFindUsingFullTableScan();
}
@Override
public Cursor find(TableFilter filter, SearchRow first, SearchRow last) {
synchronized (sync) {
Cursor baseCursor = base.find(filter, first, last);
Cursor deltaCursor = delta.find(filter, first, last);
return new MultiVersionCursor(filter.getSession(), this,
baseCursor, deltaCursor, sync);
}
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
synchronized (sync) {
Cursor baseCursor = base.find(session, first, last);
Cursor deltaCursor = delta.find(session, first, last);
return new MultiVersionCursor(session, this, baseCursor, deltaCursor, sync);
}
}
@Override
public Cursor findNext(Session session, SearchRow first, SearchRow last) {
throw DbException.throwInternalError(toString());
}
@Override
public boolean canFindNext() {
// TODO possible, but more complicated
return false;
}
@Override
public boolean canGetFirstOrLast() {
return base.canGetFirstOrLast() && delta.canGetFirstOrLast();
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
if (first) {
// TODO optimization: this loops through NULL elements
Cursor cursor = find(session, null, null);
while (cursor.next()) {
SearchRow row = cursor.getSearchRow();
Value v = row.getValue(firstColumn.getColumnId());
if (v != ValueNull.INSTANCE) {
return cursor;
}
}
return cursor;
}
Cursor baseCursor = base.findFirstOrLast(session, false);
Cursor deltaCursor = delta.findFirstOrLast(session, false);
MultiVersionCursor cursor = new MultiVersionCursor(session, this,
baseCursor, deltaCursor, sync);
cursor.loadCurrent();
// TODO optimization: this loops through NULL elements
while (cursor.previous()) {
SearchRow row = cursor.getSearchRow();
if (row == null) {
break;
}
Value v = row.getValue(firstColumn.getColumnId());
if (v != ValueNull.INSTANCE) {
return cursor;
}
}
return cursor;
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return base.getCost(session, masks, filters, filter, sortOrder, allColumnsSet);
}
@Override
public boolean needRebuild() {
return base.needRebuild();
}
/**
* Check if there is an uncommitted row with the given key
* within a different session.
*
* @param session the original session
* @param row the row (only the key is checked)
* @return true if there is an uncommitted row
*/
public boolean isUncommittedFromOtherSession(Session session, Row row) {
Cursor c = delta.find(session, row, row);
while (c.next()) {
Row r = c.get();
return r.getSessionId() != session.getId();
}
return false;
}
private boolean removeIfExists(Session session, Row row) {
// maybe it was inserted by the same session just before
Cursor c = delta.find(session, row, row);
while (c.next()) {
Row r = c.get();
if (r.getKey() == row.getKey() && r.getVersion() == row.getVersion()) {
if (r != row && table.getScanIndex(session).compareRows(r, row) != 0) {
row.setVersion(r.getVersion() + 1);
} else {
delta.remove(session, r);
return true;
}
}
}
return false;
}
@Override
public void remove(Session session, Row row) {
synchronized (sync) {
base.remove(session, row);
if (removeIfExists(session, row)) {
// added and deleted in the same transaction: no change
} else {
delta.add(session, row);
}
}
}
@Override
public void remove(Session session) {
synchronized (sync) {
base.remove(session);
}
}
@Override
public void truncate(Session session) {
synchronized (sync) {
delta.truncate(session);
base.truncate(session);
}
}
@Override
public void commit(int operation, Row row) {
synchronized (sync) {
removeIfExists(null, row);
}
}
@Override
public int compareRows(SearchRow rowData, SearchRow compare) {
return base.compareRows(rowData, compare);
}
@Override
public int getColumnIndex(Column col) {
return base.getColumnIndex(col);
}
@Override
public boolean isFirstColumn(Column column) {
return base.isFirstColumn(column);
}
@Override
public Column[] getColumns() {
return base.getColumns();
}
@Override
public IndexColumn[] getIndexColumns() {
return base.getIndexColumns();
}
@Override
public String getCreateSQL() {
return base.getCreateSQL();
}
@Override
public String getCreateSQLForCopy(Table forTable, String quotedName) {
return base.getCreateSQLForCopy(forTable, quotedName);
}
@Override
public String getDropSQL() {
return base.getDropSQL();
}
@Override
public IndexType getIndexType() {
return base.getIndexType();
}
@Override
public String getPlanSQL() {
return base.getPlanSQL();
}
@Override
public long getRowCount(Session session) {
return base.getRowCount(session);
}
@Override
public Table getTable() {
return base.getTable();
}
@Override
public int getType() {
return base.getType();
}
@Override
public void removeChildrenAndResources(Session session) {
synchronized (sync) {
table.removeIndex(this);
remove(session);
}
}
@Override
public String getSQL() {
return base.getSQL();
}
@Override
public Schema getSchema() {
return base.getSchema();
}
@Override
public void checkRename() {
base.checkRename();
}
@Override
public ArrayList<DbObject> getChildren() {
return base.getChildren();
}
@Override
public String getComment() {
return base.getComment();
}
@Override
public Database getDatabase() {
return base.getDatabase();
}
@Override
public int getId() {
return base.getId();
}
@Override
public String getName() {
return base.getName();
}
@Override
public boolean isTemporary() {
return base.isTemporary();
}
@Override
public void rename(String newName) {
base.rename(newName);
}
@Override
public void setComment(String comment) {
base.setComment(comment);
}
@Override
public void setTemporary(boolean temporary) {
base.setTemporary(temporary);
}
@Override
public long getRowCountApproximation() {
return base.getRowCountApproximation();
}
@Override
public long getDiskSpaceUsed() {
return base.getDiskSpaceUsed();
}
public Index getBaseIndex() {
return base;
}
@Override
public Row getRow(Session session, long key) {
return base.getRow(session, key);
}
@Override
public boolean isHidden() {
return base.isHidden();
}
@Override
public boolean isRowIdIndex() {
return base.isRowIdIndex() && delta.isRowIdIndex();
}
@Override
public boolean canScan() {
return base.canScan();
}
@Override
public void setSortedInsertMode(boolean sortedInsertMode) {
base.setSortedInsertMode(sortedInsertMode);
delta.setSortedInsertMode(sortedInsertMode);
}
@Override
public IndexLookupBatch createLookupBatch(TableFilter[] filters, int filter) {
// Lookup batching is not supported.
return null;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/NonUniqueHashCursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.ArrayList;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.table.RegularTable;
/**
* Cursor implementation for non-unique hash index
*
* @author Sergi Vladykin
*/
public class NonUniqueHashCursor implements Cursor {
private final Session session;
private final ArrayList<Long> positions;
private final RegularTable tableData;
private int index = -1;
public NonUniqueHashCursor(Session session, RegularTable tableData,
ArrayList<Long> positions) {
this.session = session;
this.tableData = tableData;
this.positions = positions;
}
@Override
public Row get() {
if (index < 0 || index >= positions.size()) {
return null;
}
return tableData.getRow(session, positions.get(index));
}
@Override
public SearchRow getSearchRow() {
return get();
}
@Override
public boolean next() {
return positions != null && ++index < positions.size();
}
@Override
public boolean previous() {
return positions != null && --index >= 0;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/NonUniqueHashIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.ArrayList;
import java.util.HashSet;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.RegularTable;
import org.h2.table.TableFilter;
import org.h2.util.New;
import org.h2.util.ValueHashMap;
import org.h2.value.Value;
/**
* A non-unique index based on an in-memory hash map.
*
* @author Sergi Vladykin
*/
public class NonUniqueHashIndex extends BaseIndex {
/**
* The index of the indexed column.
*/
private final int indexColumn;
private ValueHashMap<ArrayList<Long>> rows;
private final RegularTable tableData;
private long rowCount;
public NonUniqueHashIndex(RegularTable table, int id, String indexName,
IndexColumn[] columns, IndexType indexType) {
initBaseIndex(table, id, indexName, columns, indexType);
this.indexColumn = columns[0].column.getColumnId();
this.tableData = table;
reset();
}
private void reset() {
rows = ValueHashMap.newInstance();
rowCount = 0;
}
@Override
public void truncate(Session session) {
reset();
}
@Override
public void add(Session session, Row row) {
Value key = row.getValue(indexColumn);
ArrayList<Long> positions = rows.get(key);
if (positions == null) {
positions = New.arrayList();
rows.put(key, positions);
}
positions.add(row.getKey());
rowCount++;
}
@Override
public void remove(Session session, Row row) {
if (rowCount == 1) {
// last row in table
reset();
} else {
Value key = row.getValue(indexColumn);
ArrayList<Long> positions = rows.get(key);
if (positions.size() == 1) {
// last row with such key
rows.remove(key);
} else {
positions.remove(row.getKey());
}
rowCount--;
}
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
if (first == null || last == null) {
throw DbException.throwInternalError(first + " " + last);
}
if (first != last) {
if (compareKeys(first, last) != 0) {
throw DbException.throwInternalError();
}
}
Value v = first.getValue(indexColumn);
/*
* Sometimes the incoming search is a similar, but not the same type
* e.g. the search value is INT, but the index column is LONG. In which
* case we need to convert, otherwise the ValueHashMap will not find the
* result.
*/
v = v.convertTo(tableData.getColumn(indexColumn).getType());
ArrayList<Long> positions = rows.get(v);
return new NonUniqueHashCursor(session, tableData, positions);
}
@Override
public long getRowCount(Session session) {
return rowCount;
}
@Override
public long getRowCountApproximation() {
return rowCount;
}
@Override
public long getDiskSpaceUsed() {
return 0;
}
@Override
public void close(Session session) {
// nothing to do
}
@Override
public void remove(Session session) {
// nothing to do
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
for (Column column : columns) {
int index = column.getColumnId();
int mask = masks[index];
if ((mask & IndexCondition.EQUALITY) != IndexCondition.EQUALITY) {
return Long.MAX_VALUE;
}
}
return 2;
}
@Override
public void checkRename() {
// ok
}
@Override
public boolean needRebuild() {
return true;
}
@Override
public boolean canGetFirstOrLast() {
return false;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
throw DbException.getUnsupportedException("HASH");
}
@Override
public boolean canScan() {
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageBtree.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.result.SearchRow;
import org.h2.store.Data;
import org.h2.store.Page;
/**
* A page that contains index data.
*/
public abstract class PageBtree extends Page {
/**
* This is a root page.
*/
static final int ROOT = 0;
/**
* Indicator that the row count is not known.
*/
static final int UNKNOWN_ROWCOUNT = -1;
/**
* The index.
*/
protected final PageBtreeIndex index;
/**
* The page number of the parent.
*/
protected int parentPageId;
/**
* The data page.
*/
protected final Data data;
/**
* The row offsets.
*/
protected int[] offsets;
/**
* The number of entries.
*/
protected int entryCount;
/**
* The index data
*/
protected SearchRow[] rows;
/**
* The start of the data area.
*/
protected int start;
/**
* If only the position of the row is stored in the page
*/
protected boolean onlyPosition;
/**
* Whether the data page is up-to-date.
*/
protected boolean written;
/**
* The estimated memory used by this object.
*/
private final int memoryEstimated;
PageBtree(PageBtreeIndex index, int pageId, Data data) {
this.index = index;
this.data = data;
setPos(pageId);
memoryEstimated = index.getMemoryPerPage();
}
/**
* Get the real row count. If required, this will read all child pages.
*
* @return the row count
*/
abstract int getRowCount();
/**
* Set the stored row count. This will write the page.
*
* @param rowCount the stored row count
*/
abstract void setRowCountStored(int rowCount);
/**
* Find an entry.
*
* @param compare the row
* @param bigger if looking for a larger row
* @param add if the row should be added (check for duplicate keys)
* @param compareKeys compare the row keys as well
* @return the index of the found row
*/
int find(SearchRow compare, boolean bigger, boolean add, boolean compareKeys) {
if (compare == null) {
return 0;
}
int l = 0, r = entryCount;
int comp = 1;
while (l < r) {
int i = (l + r) >>> 1;
SearchRow row = getRow(i);
comp = index.compareRows(row, compare);
if (comp == 0) {
if (add && index.indexType.isUnique()) {
if (!index.mayHaveNullDuplicates(compare)) {
throw index.getDuplicateKeyException(compare.toString());
}
}
if (compareKeys) {
comp = index.compareKeys(row, compare);
if (comp == 0) {
return i;
}
}
}
if (comp > 0 || (!bigger && comp == 0)) {
r = i;
} else {
l = i + 1;
}
}
return l;
}
/**
* Add a row if possible. If it is possible this method returns -1,
* otherwise the split point. It is always possible to add one row.
*
* @param row the row to add
* @return the split point of this page, or -1 if no split is required
*/
abstract int addRowTry(SearchRow row);
/**
* Find the first row.
*
* @param cursor the cursor
* @param first the row to find
* @param bigger if the row should be bigger
*/
abstract void find(PageBtreeCursor cursor, SearchRow first, boolean bigger);
/**
* Find the last row.
*
* @param cursor the cursor
*/
abstract void last(PageBtreeCursor cursor);
/**
* Get the row at this position.
*
* @param at the index
* @return the row
*/
SearchRow getRow(int at) {
SearchRow row = rows[at];
if (row == null) {
row = index.readRow(data, offsets[at], onlyPosition, true);
memoryChange();
rows[at] = row;
} else if (!index.hasData(row)) {
row = index.readRow(row.getKey());
memoryChange();
rows[at] = row;
}
return row;
}
/**
* The memory usage of this page was changed. Propagate the change if
* needed.
*/
protected void memoryChange() {
// nothing to do
}
/**
* Split the index page at the given point.
*
* @param splitPoint the index where to split
* @return the new page that contains about half the entries
*/
abstract PageBtree split(int splitPoint);
/**
* Change the page id.
*
* @param id the new page id
*/
void setPageId(int id) {
changeCount = index.getPageStore().getChangeCount();
written = false;
index.getPageStore().removeFromCache(getPos());
setPos(id);
index.getPageStore().logUndo(this, null);
remapChildren();
}
/**
* Get the first child leaf page of a page.
*
* @return the page
*/
abstract PageBtreeLeaf getFirstLeaf();
/**
* Get the first child leaf page of a page.
*
* @return the page
*/
abstract PageBtreeLeaf getLastLeaf();
/**
* Change the parent page id.
*
* @param id the new parent page id
*/
void setParentPageId(int id) {
index.getPageStore().logUndo(this, data);
changeCount = index.getPageStore().getChangeCount();
written = false;
parentPageId = id;
}
/**
* Update the parent id of all children.
*/
abstract void remapChildren();
/**
* Remove a row.
*
* @param row the row to remove
* @return null if the last row didn't change,
* the deleted row if the page is now empty,
* otherwise the new last row of this page
*/
abstract SearchRow remove(SearchRow row);
/**
* Free this page and all child pages.
*/
abstract void freeRecursive();
/**
* Ensure all rows are read in memory.
*/
protected void readAllRows() {
for (int i = 0; i < entryCount; i++) {
SearchRow row = rows[i];
if (row == null) {
row = index.readRow(data, offsets[i], onlyPosition, false);
rows[i] = row;
}
}
}
/**
* Get the estimated memory size.
*
* @return number of double words (4 bytes)
*/
@Override
public int getMemory() {
// need to always return the same value for the same object (otherwise
// the cache size would change after adding and then removing the same
// page from the cache) but index.getMemoryPerPage() can adopt according
// to how much memory a row needs on average
return memoryEstimated;
}
@Override
public boolean canRemove() {
return changeCount < index.getPageStore().getChangeCount();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageBtreeCursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.result.SearchRow;
/**
* The cursor implementation for the page b-tree index.
*/
public class PageBtreeCursor implements Cursor {
private final Session session;
private final PageBtreeIndex index;
private final SearchRow last;
private PageBtreeLeaf current;
private int i;
private SearchRow currentSearchRow;
private Row currentRow;
PageBtreeCursor(Session session, PageBtreeIndex index, SearchRow last) {
this.session = session;
this.index = index;
this.last = last;
}
/**
* Set the position of the current row.
*
* @param current the leaf page
* @param i the index within the page
*/
void setCurrent(PageBtreeLeaf current, int i) {
this.current = current;
this.i = i;
}
@Override
public Row get() {
if (currentRow == null && currentSearchRow != null) {
currentRow = index.getRow(session, currentSearchRow.getKey());
}
return currentRow;
}
@Override
public SearchRow getSearchRow() {
return currentSearchRow;
}
@Override
public boolean next() {
if (current == null) {
return false;
}
if (i >= current.getEntryCount()) {
current.nextPage(this);
if (current == null) {
return false;
}
}
currentSearchRow = current.getRow(i);
currentRow = null;
if (last != null && index.compareRows(currentSearchRow, last) > 0) {
currentSearchRow = null;
return false;
}
i++;
return true;
}
@Override
public boolean previous() {
if (current == null) {
return false;
}
if (i < 0) {
current.previousPage(this);
if (current == null) {
return false;
}
}
currentSearchRow = current.getRow(i);
currentRow = null;
i--;
return true;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageBtreeIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.HashSet;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.store.Data;
import org.h2.store.Page;
import org.h2.store.PageStore;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.RegularTable;
import org.h2.table.TableFilter;
import org.h2.util.MathUtils;
import org.h2.value.Value;
import org.h2.value.ValueNull;
/**
* This is the most common type of index, a b tree index.
* Only the data of the indexed columns are stored in the index.
*/
public class PageBtreeIndex extends PageIndex {
private static int memoryChangeRequired;
private final PageStore store;
private final RegularTable tableData;
private final boolean needRebuild;
private long rowCount;
private int memoryPerPage;
private int memoryCount;
public PageBtreeIndex(RegularTable table, int id, String indexName,
IndexColumn[] columns,
IndexType indexType, boolean create, Session session) {
initBaseIndex(table, id, indexName, columns, indexType);
if (!database.isStarting() && create) {
checkIndexColumnTypes(columns);
}
// int test;
// trace.setLevel(TraceSystem.DEBUG);
tableData = table;
if (!database.isPersistent() || id < 0) {
throw DbException.throwInternalError("" + indexName);
}
this.store = database.getPageStore();
store.addIndex(this);
if (create) {
// new index
rootPageId = store.allocatePage();
// TODO currently the head position is stored in the log
// it should not for new tables, otherwise redo of other operations
// must ensure this page is not used for other things
store.addMeta(this, session);
PageBtreeLeaf root = PageBtreeLeaf.create(this, rootPageId, PageBtree.ROOT);
store.logUndo(root, null);
store.update(root);
} else {
rootPageId = store.getRootPageId(id);
PageBtree root = getPage(rootPageId);
rowCount = root.getRowCount();
}
this.needRebuild = create || (rowCount == 0 && store.isRecoveryRunning());
if (trace.isDebugEnabled()) {
trace.debug("opened {0} rows: {1}", getName() , rowCount);
}
memoryPerPage = (Constants.MEMORY_PAGE_BTREE + store.getPageSize()) >> 2;
}
@Override
public void add(Session session, Row row) {
if (trace.isDebugEnabled()) {
trace.debug("{0} add {1}", getName(), row);
}
// safe memory
SearchRow newRow = getSearchRow(row);
try {
addRow(newRow);
} finally {
store.incrementChangeCount();
}
}
private void addRow(SearchRow newRow) {
while (true) {
PageBtree root = getPage(rootPageId);
int splitPoint = root.addRowTry(newRow);
if (splitPoint == -1) {
break;
}
if (trace.isDebugEnabled()) {
trace.debug("split {0}", splitPoint);
}
SearchRow pivot = root.getRow(splitPoint - 1);
store.logUndo(root, root.data);
PageBtree page1 = root;
PageBtree page2 = root.split(splitPoint);
store.logUndo(page2, null);
int id = store.allocatePage();
page1.setPageId(id);
page1.setParentPageId(rootPageId);
page2.setParentPageId(rootPageId);
PageBtreeNode newRoot = PageBtreeNode.create(
this, rootPageId, PageBtree.ROOT);
store.logUndo(newRoot, null);
newRoot.init(page1, pivot, page2);
store.update(page1);
store.update(page2);
store.update(newRoot);
root = newRoot;
}
invalidateRowCount();
rowCount++;
}
/**
* Create a search row for this row.
*
* @param row the row
* @return the search row
*/
private SearchRow getSearchRow(Row row) {
SearchRow r = table.getTemplateSimpleRow(columns.length == 1);
r.setKeyAndVersion(row);
for (Column c : columns) {
int idx = c.getColumnId();
r.setValue(idx, row.getValue(idx));
}
return r;
}
/**
* Read the given page.
*
* @param id the page id
* @return the page
*/
PageBtree getPage(int id) {
Page p = store.getPage(id);
if (p == null) {
PageBtreeLeaf empty = PageBtreeLeaf.create(this, id, PageBtree.ROOT);
// could have been created before, but never committed
store.logUndo(empty, null);
store.update(empty);
return empty;
} else if (!(p instanceof PageBtree)) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "" + p);
}
return (PageBtree) p;
}
@Override
public boolean canGetFirstOrLast() {
return true;
}
@Override
public Cursor findNext(Session session, SearchRow first, SearchRow last) {
return find(session, first, true, last);
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
return find(session, first, false, last);
}
private Cursor find(Session session, SearchRow first, boolean bigger,
SearchRow last) {
if (SysProperties.CHECK && store == null) {
throw DbException.get(ErrorCode.OBJECT_CLOSED);
}
PageBtree root = getPage(rootPageId);
PageBtreeCursor cursor = new PageBtreeCursor(session, this, last);
root.find(cursor, first, bigger);
return cursor;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
if (first) {
// TODO optimization: this loops through NULL elements
Cursor cursor = find(session, null, false, null);
while (cursor.next()) {
SearchRow row = cursor.getSearchRow();
Value v = row.getValue(columnIds[0]);
if (v != ValueNull.INSTANCE) {
return cursor;
}
}
return cursor;
}
PageBtree root = getPage(rootPageId);
PageBtreeCursor cursor = new PageBtreeCursor(session, this, null);
root.last(cursor);
cursor.previous();
// TODO optimization: this loops through NULL elements
do {
SearchRow row = cursor.getSearchRow();
if (row == null) {
break;
}
Value v = row.getValue(columnIds[0]);
if (v != ValueNull.INSTANCE) {
return cursor;
}
} while (cursor.previous());
return cursor;
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return 10 * getCostRangeIndex(masks, tableData.getRowCount(session),
filters, filter, sortOrder, false, allColumnsSet);
}
@Override
public boolean needRebuild() {
return needRebuild;
}
@Override
public void remove(Session session, Row row) {
if (trace.isDebugEnabled()) {
trace.debug("{0} remove {1}", getName(), row);
}
// TODO invalidate row count
// setChanged(session);
if (rowCount == 1) {
removeAllRows();
} else {
try {
PageBtree root = getPage(rootPageId);
root.remove(row);
invalidateRowCount();
rowCount--;
} finally {
store.incrementChangeCount();
}
}
}
@Override
public void remove(Session session) {
if (trace.isDebugEnabled()) {
trace.debug("remove");
}
removeAllRows();
store.free(rootPageId);
store.removeMeta(this, session);
}
@Override
public void truncate(Session session) {
if (trace.isDebugEnabled()) {
trace.debug("truncate");
}
removeAllRows();
if (tableData.getContainsLargeObject()) {
database.getLobStorage().removeAllForTable(table.getId());
}
tableData.setRowCount(0);
}
private void removeAllRows() {
try {
PageBtree root = getPage(rootPageId);
root.freeRecursive();
root = PageBtreeLeaf.create(this, rootPageId, PageBtree.ROOT);
store.removeFromCache(rootPageId);
store.update(root);
rowCount = 0;
} finally {
store.incrementChangeCount();
}
}
@Override
public void checkRename() {
// ok
}
/**
* Get a row from the main index.
*
* @param session the session
* @param key the row key
* @return the row
*/
@Override
public Row getRow(Session session, long key) {
return tableData.getRow(session, key);
}
PageStore getPageStore() {
return store;
}
@Override
public long getRowCountApproximation() {
return tableData.getRowCountApproximation();
}
@Override
public long getDiskSpaceUsed() {
return tableData.getDiskSpaceUsed();
}
@Override
public long getRowCount(Session session) {
return rowCount;
}
@Override
public void close(Session session) {
if (trace.isDebugEnabled()) {
trace.debug("close");
}
// can not close the index because it might get used afterwards,
// for example after running recovery
try {
writeRowCount();
} finally {
store.incrementChangeCount();
}
}
/**
* Read a row from the data page at the given offset.
*
* @param data the data
* @param offset the offset
* @param onlyPosition whether only the position of the row is stored
* @param needData whether the row data is required
* @return the row
*/
SearchRow readRow(Data data, int offset, boolean onlyPosition,
boolean needData) {
synchronized (data) {
data.setPos(offset);
long key = data.readVarLong();
if (onlyPosition) {
if (needData) {
return tableData.getRow(null, key);
}
SearchRow row = table.getTemplateSimpleRow(true);
row.setKey(key);
return row;
}
SearchRow row = table.getTemplateSimpleRow(columns.length == 1);
row.setKey(key);
for (Column col : columns) {
int idx = col.getColumnId();
row.setValue(idx, data.readValue());
}
return row;
}
}
/**
* Get the complete row from the data index.
*
* @param key the key
* @return the row
*/
SearchRow readRow(long key) {
return tableData.getRow(null, key);
}
/**
* Write a row to the data page at the given offset.
*
* @param data the data
* @param offset the offset
* @param onlyPosition whether only the position of the row is stored
* @param row the row to write
*/
void writeRow(Data data, int offset, SearchRow row, boolean onlyPosition) {
data.setPos(offset);
data.writeVarLong(row.getKey());
if (!onlyPosition) {
for (Column col : columns) {
int idx = col.getColumnId();
data.writeValue(row.getValue(idx));
}
}
}
/**
* Get the size of a row (only the part that is stored in the index).
*
* @param dummy a dummy data page to calculate the size
* @param row the row
* @param onlyPosition whether only the position of the row is stored
* @return the number of bytes
*/
int getRowSize(Data dummy, SearchRow row, boolean onlyPosition) {
int rowsize = Data.getVarLongLen(row.getKey());
if (!onlyPosition) {
for (Column col : columns) {
Value v = row.getValue(col.getColumnId());
rowsize += dummy.getValueLen(v);
}
}
return rowsize;
}
@Override
public boolean canFindNext() {
return true;
}
/**
* The root page has changed.
*
* @param session the session
* @param newPos the new position
*/
void setRootPageId(Session session, int newPos) {
store.removeMeta(this, session);
this.rootPageId = newPos;
store.addMeta(this, session);
store.addIndex(this);
}
private void invalidateRowCount() {
PageBtree root = getPage(rootPageId);
root.setRowCountStored(PageData.UNKNOWN_ROWCOUNT);
}
@Override
public void writeRowCount() {
if (SysProperties.MODIFY_ON_WRITE && rootPageId == 0) {
// currently creating the index
return;
}
PageBtree root = getPage(rootPageId);
root.setRowCountStored(MathUtils.convertLongToInt(rowCount));
}
/**
* Check whether the given row contains data.
*
* @param row the row
* @return true if it contains data
*/
boolean hasData(SearchRow row) {
return row.getValue(columns[0].getColumnId()) != null;
}
int getMemoryPerPage() {
return memoryPerPage;
}
/**
* The memory usage of a page was changed. The new value is used to adopt
* the average estimated memory size of a page.
*
* @param x the new memory size
*/
void memoryChange(int x) {
if (memoryCount < Constants.MEMORY_FACTOR) {
memoryPerPage += (x - memoryPerPage) / ++memoryCount;
} else {
memoryPerPage += (x > memoryPerPage ? 1 : -1) +
((x - memoryPerPage) / Constants.MEMORY_FACTOR);
}
}
/**
* Check if calculating the memory is required.
*
* @return true if it is
*/
static boolean isMemoryChangeRequired() {
if (memoryChangeRequired-- <= 0) {
memoryChangeRequired = 10;
return true;
}
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageBtreeLeaf.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.Arrays;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.result.SearchRow;
import org.h2.store.Data;
import org.h2.store.Page;
import org.h2.store.PageStore;
/**
* A b-tree leaf page that contains index data. Format:
* <ul>
* <li>page type: byte</li>
* <li>checksum: short</li>
* <li>parent page id (0 for root): int</li>
* <li>index id: varInt</li>
* <li>entry count: short</li>
* <li>list of offsets: short</li>
* <li>data (key: varLong, value,...)</li>
* </ul>
*/
public class PageBtreeLeaf extends PageBtree {
private static final int OFFSET_LENGTH = 2;
private final boolean optimizeUpdate;
private boolean writtenData;
private PageBtreeLeaf(PageBtreeIndex index, int pageId, Data data) {
super(index, pageId, data);
this.optimizeUpdate = index.getDatabase().getSettings().optimizeUpdate;
}
/**
* Read a b-tree leaf page.
*
* @param index the index
* @param data the data
* @param pageId the page id
* @return the page
*/
public static Page read(PageBtreeIndex index, Data data, int pageId) {
PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, data);
p.read();
return p;
}
/**
* Create a new page.
*
* @param index the index
* @param pageId the page id
* @param parentPageId the parent
* @return the page
*/
static PageBtreeLeaf create(PageBtreeIndex index, int pageId,
int parentPageId) {
PageBtreeLeaf p = new PageBtreeLeaf(index, pageId, index.getPageStore()
.createData());
index.getPageStore().logUndo(p, null);
p.rows = SearchRow.EMPTY_ARRAY;
p.parentPageId = parentPageId;
p.writeHead();
p.start = p.data.length();
return p;
}
private void read() {
data.reset();
int type = data.readByte();
data.readShortInt();
this.parentPageId = data.readInt();
onlyPosition = (type & Page.FLAG_LAST) == 0;
int indexId = data.readVarInt();
if (indexId != index.getId()) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPos() + " expected index:" + index.getId() +
"got:" + indexId);
}
entryCount = data.readShortInt();
offsets = new int[entryCount];
rows = new SearchRow[entryCount];
for (int i = 0; i < entryCount; i++) {
offsets[i] = data.readShortInt();
}
start = data.length();
written = true;
writtenData = true;
}
@Override
int addRowTry(SearchRow row) {
int x = addRow(row, true);
memoryChange();
return x;
}
private int addRow(SearchRow row, boolean tryOnly) {
int rowLength = index.getRowSize(data, row, onlyPosition);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (last - rowLength < start + OFFSET_LENGTH) {
if (tryOnly && entryCount > 1) {
int x = find(row, false, true, true);
if (entryCount < 5) {
// required, otherwise the index doesn't work correctly
return entryCount / 2;
}
// split near the insertion point to better fill pages
// split in half would be:
// return entryCount / 2;
int third = entryCount / 3;
return x < third ? third : x >= 2 * third ? 2 * third : x;
}
readAllRows();
writtenData = false;
onlyPosition = true;
// change the offsets (now storing only positions)
int o = pageSize;
for (int i = 0; i < entryCount; i++) {
o -= index.getRowSize(data, getRow(i), true);
offsets[i] = o;
}
last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
rowLength = index.getRowSize(data, row, true);
if (SysProperties.CHECK && last - rowLength < start + OFFSET_LENGTH) {
throw DbException.throwInternalError();
}
}
index.getPageStore().logUndo(this, data);
if (!optimizeUpdate) {
readAllRows();
}
changeCount = index.getPageStore().getChangeCount();
written = false;
int x;
if (entryCount == 0) {
x = 0;
} else {
x = find(row, false, true, true);
}
start += OFFSET_LENGTH;
int offset = (x == 0 ? pageSize : offsets[x - 1]) - rowLength;
if (optimizeUpdate && writtenData) {
if (entryCount > 0) {
byte[] d = data.getBytes();
int dataStart = offsets[entryCount - 1];
System.arraycopy(d, dataStart, d, dataStart - rowLength,
offset - dataStart + rowLength);
}
index.writeRow(data, offset, row, onlyPosition);
}
offsets = insert(offsets, entryCount, x, offset);
add(offsets, x + 1, entryCount + 1, -rowLength);
rows = insert(rows, entryCount, x, row);
entryCount++;
index.getPageStore().update(this);
return -1;
}
private void removeRow(int at) {
if (!optimizeUpdate) {
readAllRows();
}
index.getPageStore().logUndo(this, data);
entryCount--;
written = false;
changeCount = index.getPageStore().getChangeCount();
if (entryCount <= 0) {
DbException.throwInternalError("" + entryCount);
}
int startNext = at > 0 ? offsets[at - 1] : index.getPageStore().getPageSize();
int rowLength = startNext - offsets[at];
start -= OFFSET_LENGTH;
if (optimizeUpdate) {
if (writtenData) {
byte[] d = data.getBytes();
int dataStart = offsets[entryCount];
System.arraycopy(d, dataStart, d,
dataStart + rowLength, offsets[at] - dataStart);
Arrays.fill(d, dataStart, dataStart + rowLength, (byte) 0);
}
}
offsets = remove(offsets, entryCount + 1, at);
add(offsets, at, entryCount, rowLength);
rows = remove(rows, entryCount + 1, at);
}
int getEntryCount() {
return entryCount;
}
@Override
PageBtree split(int splitPoint) {
int newPageId = index.getPageStore().allocatePage();
PageBtreeLeaf p2 = PageBtreeLeaf.create(index, newPageId, parentPageId);
while (splitPoint < entryCount) {
p2.addRow(getRow(splitPoint), false);
removeRow(splitPoint);
}
memoryChange();
p2.memoryChange();
return p2;
}
@Override
PageBtreeLeaf getFirstLeaf() {
return this;
}
@Override
PageBtreeLeaf getLastLeaf() {
return this;
}
@Override
SearchRow remove(SearchRow row) {
int at = find(row, false, false, true);
SearchRow delete = getRow(at);
if (index.compareRows(row, delete) != 0 || delete.getKey() != row.getKey()) {
throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1,
index.getSQL() + ": " + row);
}
index.getPageStore().logUndo(this, data);
if (entryCount == 1) {
// the page is now empty
return row;
}
removeRow(at);
memoryChange();
index.getPageStore().update(this);
if (at == entryCount) {
// the last row changed
return getRow(at - 1);
}
// the last row didn't change
return null;
}
@Override
void freeRecursive() {
index.getPageStore().logUndo(this, data);
index.getPageStore().free(getPos());
}
@Override
int getRowCount() {
return entryCount;
}
@Override
void setRowCountStored(int rowCount) {
// ignore
}
@Override
public void write() {
writeData();
index.getPageStore().writePage(getPos(), data);
}
private void writeHead() {
data.reset();
data.writeByte((byte) (Page.TYPE_BTREE_LEAF |
(onlyPosition ? 0 : Page.FLAG_LAST)));
data.writeShortInt(0);
data.writeInt(parentPageId);
data.writeVarInt(index.getId());
data.writeShortInt(entryCount);
}
private void writeData() {
if (written) {
return;
}
if (!optimizeUpdate) {
readAllRows();
}
writeHead();
for (int i = 0; i < entryCount; i++) {
data.writeShortInt(offsets[i]);
}
if (!writtenData || !optimizeUpdate) {
for (int i = 0; i < entryCount; i++) {
index.writeRow(data, offsets[i], rows[i], onlyPosition);
}
writtenData = true;
}
written = true;
memoryChange();
}
@Override
void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) {
int i = find(first, bigger, false, false);
if (i > entryCount) {
if (parentPageId == PageBtree.ROOT) {
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.find(cursor, first, bigger);
return;
}
cursor.setCurrent(this, i);
}
@Override
void last(PageBtreeCursor cursor) {
cursor.setCurrent(this, entryCount - 1);
}
@Override
void remapChildren() {
// nothing to do
}
/**
* Set the cursor to the first row of the next page.
*
* @param cursor the cursor
*/
void nextPage(PageBtreeCursor cursor) {
if (parentPageId == PageBtree.ROOT) {
cursor.setCurrent(null, 0);
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.nextPage(cursor, getPos());
}
/**
* Set the cursor to the last row of the previous page.
*
* @param cursor the cursor
*/
void previousPage(PageBtreeCursor cursor) {
if (parentPageId == PageBtree.ROOT) {
cursor.setCurrent(null, 0);
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.previousPage(cursor, getPos());
}
@Override
public String toString() {
return "page[" + getPos() + "] b-tree leaf table:" +
index.getId() + " entries:" + entryCount;
}
@Override
public void moveTo(Session session, int newPos) {
PageStore store = index.getPageStore();
readAllRows();
PageBtreeLeaf p2 = PageBtreeLeaf.create(index, newPos, parentPageId);
store.logUndo(this, data);
store.logUndo(p2, null);
p2.rows = rows;
p2.entryCount = entryCount;
p2.offsets = offsets;
p2.onlyPosition = onlyPosition;
p2.parentPageId = parentPageId;
p2.start = start;
store.update(p2);
if (parentPageId == ROOT) {
index.setRootPageId(session, newPos);
} else {
PageBtreeNode p = (PageBtreeNode) store.getPage(parentPageId);
p.moveChild(getPos(), newPos);
}
store.free(getPos());
}
@Override
protected void memoryChange() {
if (!PageBtreeIndex.isMemoryChangeRequired()) {
return;
}
int memory = Constants.MEMORY_PAGE_BTREE + index.getPageStore().getPageSize();
if (rows != null) {
memory += getEntryCount() * (4 + Constants.MEMORY_POINTER);
for (int i = 0; i < entryCount; i++) {
SearchRow r = rows[i];
if (r != null) {
memory += r.getMemory();
}
}
}
index.memoryChange(memory >> 2);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageBtreeNode.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.api.DatabaseEventListener;
import org.h2.api.ErrorCode;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.result.SearchRow;
import org.h2.store.Data;
import org.h2.store.Page;
import org.h2.store.PageStore;
import org.h2.util.Utils;
/**
* A b-tree node page that contains index data. Format:
* <ul>
* <li>page type: byte</li>
* <li>checksum: short</li>
* <li>parent page id (0 for root): int</li>
* <li>index id: varInt</li>
* <li>count of all children (-1 if not known): int</li>
* <li>entry count: short</li>
* <li>rightmost child page id: int</li>
* <li>entries (child page id: int, offset: short)</li>
* </ul>
* The row contains the largest key of the respective child,
* meaning row[0] contains the largest key of child[0].
*/
public class PageBtreeNode extends PageBtree {
private static final int CHILD_OFFSET_PAIR_LENGTH = 6;
private static final int MAX_KEY_LENGTH = 10;
private final boolean pageStoreInternalCount;
/**
* The page ids of the children.
*/
private int[] childPageIds;
private int rowCountStored = UNKNOWN_ROWCOUNT;
private int rowCount = UNKNOWN_ROWCOUNT;
private PageBtreeNode(PageBtreeIndex index, int pageId, Data data) {
super(index, pageId, data);
this.pageStoreInternalCount = index.getDatabase().
getSettings().pageStoreInternalCount;
}
/**
* Read a b-tree node page.
*
* @param index the index
* @param data the data
* @param pageId the page id
* @return the page
*/
public static Page read(PageBtreeIndex index, Data data, int pageId) {
PageBtreeNode p = new PageBtreeNode(index, pageId, data);
p.read();
return p;
}
/**
* Create a new b-tree node page.
*
* @param index the index
* @param pageId the page id
* @param parentPageId the parent page id
* @return the page
*/
static PageBtreeNode create(PageBtreeIndex index, int pageId,
int parentPageId) {
PageBtreeNode p = new PageBtreeNode(index, pageId, index.getPageStore()
.createData());
index.getPageStore().logUndo(p, null);
p.parentPageId = parentPageId;
p.writeHead();
// 4 bytes for the rightmost child page id
p.start = p.data.length() + 4;
p.rows = SearchRow.EMPTY_ARRAY;
if (p.pageStoreInternalCount) {
p.rowCount = 0;
}
return p;
}
private void read() {
data.reset();
int type = data.readByte();
data.readShortInt();
this.parentPageId = data.readInt();
onlyPosition = (type & Page.FLAG_LAST) == 0;
int indexId = data.readVarInt();
if (indexId != index.getId()) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPos() + " expected index:" + index.getId() +
"got:" + indexId);
}
rowCount = rowCountStored = data.readInt();
entryCount = data.readShortInt();
childPageIds = new int[entryCount + 1];
childPageIds[entryCount] = data.readInt();
rows = entryCount == 0 ? SearchRow.EMPTY_ARRAY : new SearchRow[entryCount];
offsets = Utils.newIntArray(entryCount);
for (int i = 0; i < entryCount; i++) {
childPageIds[i] = data.readInt();
offsets[i] = data.readShortInt();
}
check();
start = data.length();
written = true;
}
/**
* Add a row. If it is possible this method returns -1, otherwise
* the split point. It is always possible to add two rows.
*
* @param row the now to add
* @return the split point of this page, or -1 if no split is required
*/
private int addChildTry(SearchRow row) {
if (entryCount < 4) {
return -1;
}
int startData;
if (onlyPosition) {
// if we only store the position, we may at most store as many
// entries as there is space for keys, because the current data area
// might get larger when _removing_ a child (if the new key needs
// more space) - and removing a child can't split this page
startData = entryCount + 1 * MAX_KEY_LENGTH;
} else {
int rowLength = index.getRowSize(data, row, onlyPosition);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
startData = last - rowLength;
}
if (startData < start + CHILD_OFFSET_PAIR_LENGTH) {
return entryCount / 2;
}
return -1;
}
/**
* Add a child at the given position.
*
* @param x the position
* @param childPageId the child
* @param row the row smaller than the first row of the child and its
* children
*/
private void addChild(int x, int childPageId, SearchRow row) {
int rowLength = index.getRowSize(data, row, onlyPosition);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
if (last - rowLength < start + CHILD_OFFSET_PAIR_LENGTH) {
readAllRows();
onlyPosition = true;
// change the offsets (now storing only positions)
int o = pageSize;
for (int i = 0; i < entryCount; i++) {
o -= index.getRowSize(data, getRow(i), true);
offsets[i] = o;
}
last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
rowLength = index.getRowSize(data, row, true);
if (SysProperties.CHECK && last - rowLength <
start + CHILD_OFFSET_PAIR_LENGTH) {
throw DbException.throwInternalError();
}
}
int offset = last - rowLength;
if (entryCount > 0) {
if (x < entryCount) {
offset = (x == 0 ? pageSize : offsets[x - 1]) - rowLength;
}
}
rows = insert(rows, entryCount, x, row);
offsets = insert(offsets, entryCount, x, offset);
add(offsets, x + 1, entryCount + 1, -rowLength);
childPageIds = insert(childPageIds, entryCount + 1, x + 1, childPageId);
start += CHILD_OFFSET_PAIR_LENGTH;
if (pageStoreInternalCount) {
if (rowCount != UNKNOWN_ROWCOUNT) {
rowCount += offset;
}
}
entryCount++;
written = false;
changeCount = index.getPageStore().getChangeCount();
}
@Override
int addRowTry(SearchRow row) {
while (true) {
int x = find(row, false, true, true);
PageBtree page = index.getPage(childPageIds[x]);
int splitPoint = page.addRowTry(row);
if (splitPoint == -1) {
break;
}
SearchRow pivot = page.getRow(splitPoint - 1);
index.getPageStore().logUndo(this, data);
int splitPoint2 = addChildTry(pivot);
if (splitPoint2 != -1) {
return splitPoint2;
}
PageBtree page2 = page.split(splitPoint);
readAllRows();
addChild(x, page2.getPos(), pivot);
index.getPageStore().update(page);
index.getPageStore().update(page2);
index.getPageStore().update(this);
}
updateRowCount(1);
written = false;
changeCount = index.getPageStore().getChangeCount();
return -1;
}
private void updateRowCount(int offset) {
if (rowCount != UNKNOWN_ROWCOUNT) {
rowCount += offset;
}
if (rowCountStored != UNKNOWN_ROWCOUNT) {
rowCountStored = UNKNOWN_ROWCOUNT;
index.getPageStore().logUndo(this, data);
if (written) {
writeHead();
}
index.getPageStore().update(this);
}
}
@Override
PageBtree split(int splitPoint) {
int newPageId = index.getPageStore().allocatePage();
PageBtreeNode p2 = PageBtreeNode.create(index, newPageId, parentPageId);
index.getPageStore().logUndo(this, data);
if (onlyPosition) {
// TODO optimize: maybe not required
p2.onlyPosition = true;
}
int firstChild = childPageIds[splitPoint];
readAllRows();
while (splitPoint < entryCount) {
p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], getRow(splitPoint));
removeChild(splitPoint);
}
int lastChild = childPageIds[splitPoint - 1];
removeChild(splitPoint - 1);
childPageIds[splitPoint - 1] = lastChild;
if (p2.childPageIds == null) {
p2.childPageIds = new int[1];
}
p2.childPageIds[0] = firstChild;
p2.remapChildren();
return p2;
}
@Override
protected void remapChildren() {
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
PageBtree p = index.getPage(child);
p.setParentPageId(getPos());
index.getPageStore().update(p);
}
}
/**
* Initialize the page.
*
* @param page1 the first child page
* @param pivot the pivot key
* @param page2 the last child page
*/
void init(PageBtree page1, SearchRow pivot, PageBtree page2) {
entryCount = 0;
childPageIds = new int[] { page1.getPos() };
rows = SearchRow.EMPTY_ARRAY;
offsets = Utils.EMPTY_INT_ARRAY;
addChild(0, page2.getPos(), pivot);
if (pageStoreInternalCount) {
rowCount = page1.getRowCount() + page2.getRowCount();
}
check();
}
@Override
void find(PageBtreeCursor cursor, SearchRow first, boolean bigger) {
int i = find(first, bigger, false, false);
if (i > entryCount) {
if (parentPageId == PageBtree.ROOT) {
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.find(cursor, first, bigger);
return;
}
PageBtree page = index.getPage(childPageIds[i]);
page.find(cursor, first, bigger);
}
@Override
void last(PageBtreeCursor cursor) {
int child = childPageIds[entryCount];
index.getPage(child).last(cursor);
}
@Override
PageBtreeLeaf getFirstLeaf() {
int child = childPageIds[0];
return index.getPage(child).getFirstLeaf();
}
@Override
PageBtreeLeaf getLastLeaf() {
int child = childPageIds[entryCount];
return index.getPage(child).getLastLeaf();
}
@Override
SearchRow remove(SearchRow row) {
int at = find(row, false, false, true);
// merge is not implemented to allow concurrent usage
// TODO maybe implement merge
PageBtree page = index.getPage(childPageIds[at]);
SearchRow last = page.remove(row);
index.getPageStore().logUndo(this, data);
updateRowCount(-1);
written = false;
changeCount = index.getPageStore().getChangeCount();
if (last == null) {
// the last row didn't change - nothing to do
return null;
} else if (last == row) {
// this child is now empty
index.getPageStore().free(page.getPos());
if (entryCount < 1) {
// no more children - this page is empty as well
return row;
}
if (at == entryCount) {
// removing the last child
last = getRow(at - 1);
} else {
last = null;
}
removeChild(at);
index.getPageStore().update(this);
return last;
}
// the last row is in the last child
if (at == entryCount) {
return last;
}
int child = childPageIds[at];
removeChild(at);
// TODO this can mean only the position is now stored
// should split at the next possible moment
addChild(at, child, last);
// remove and add swapped two children, fix that
int temp = childPageIds[at];
childPageIds[at] = childPageIds[at + 1];
childPageIds[at + 1] = temp;
index.getPageStore().update(this);
return null;
}
@Override
int getRowCount() {
if (rowCount == UNKNOWN_ROWCOUNT) {
int count = 0;
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
PageBtree page = index.getPage(child);
count += page.getRowCount();
index.getDatabase().setProgress(
DatabaseEventListener.STATE_SCAN_FILE,
index.getName(), count, Integer.MAX_VALUE);
}
rowCount = count;
}
return rowCount;
}
@Override
void setRowCountStored(int rowCount) {
if (rowCount < 0 && pageStoreInternalCount) {
return;
}
this.rowCount = rowCount;
if (rowCountStored != rowCount) {
rowCountStored = rowCount;
index.getPageStore().logUndo(this, data);
if (written) {
changeCount = index.getPageStore().getChangeCount();
writeHead();
}
index.getPageStore().update(this);
}
}
private void check() {
if (SysProperties.CHECK) {
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
if (child == 0) {
DbException.throwInternalError();
}
}
}
}
@Override
public void write() {
check();
writeData();
index.getPageStore().writePage(getPos(), data);
}
private void writeHead() {
data.reset();
data.writeByte((byte) (Page.TYPE_BTREE_NODE |
(onlyPosition ? 0 : Page.FLAG_LAST)));
data.writeShortInt(0);
data.writeInt(parentPageId);
data.writeVarInt(index.getId());
data.writeInt(rowCountStored);
data.writeShortInt(entryCount);
}
private void writeData() {
if (written) {
return;
}
readAllRows();
writeHead();
data.writeInt(childPageIds[entryCount]);
for (int i = 0; i < entryCount; i++) {
data.writeInt(childPageIds[i]);
data.writeShortInt(offsets[i]);
}
for (int i = 0; i < entryCount; i++) {
index.writeRow(data, offsets[i], rows[i], onlyPosition);
}
written = true;
}
@Override
void freeRecursive() {
index.getPageStore().logUndo(this, data);
index.getPageStore().free(getPos());
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
index.getPage(child).freeRecursive();
}
}
private void removeChild(int i) {
readAllRows();
entryCount--;
if (pageStoreInternalCount) {
updateRowCount(-index.getPage(childPageIds[i]).getRowCount());
}
written = false;
changeCount = index.getPageStore().getChangeCount();
if (entryCount < 0) {
DbException.throwInternalError("" + entryCount);
}
if (entryCount > i) {
int startNext = i > 0 ? offsets[i - 1] : index.getPageStore().getPageSize();
int rowLength = startNext - offsets[i];
add(offsets, i, entryCount + 1, rowLength);
}
rows = remove(rows, entryCount + 1, i);
offsets = remove(offsets, entryCount + 1, i);
childPageIds = remove(childPageIds, entryCount + 2, i);
start -= CHILD_OFFSET_PAIR_LENGTH;
}
/**
* Set the cursor to the first row of the next page.
*
* @param cursor the cursor
* @param pageId id of the next page
*/
void nextPage(PageBtreeCursor cursor, int pageId) {
int i;
// TODO maybe keep the index in the child page (transiently)
for (i = 0; i < entryCount + 1; i++) {
if (childPageIds[i] == pageId) {
i++;
break;
}
}
if (i > entryCount) {
if (parentPageId == PageBtree.ROOT) {
cursor.setCurrent(null, 0);
return;
}
PageBtreeNode next = (PageBtreeNode) index.getPage(parentPageId);
next.nextPage(cursor, getPos());
return;
}
PageBtree page = index.getPage(childPageIds[i]);
PageBtreeLeaf leaf = page.getFirstLeaf();
cursor.setCurrent(leaf, 0);
}
/**
* Set the cursor to the last row of the previous page.
*
* @param cursor the cursor
* @param pageId id of the previous page
*/
void previousPage(PageBtreeCursor cursor, int pageId) {
int i;
// TODO maybe keep the index in the child page (transiently)
for (i = entryCount; i >= 0; i--) {
if (childPageIds[i] == pageId) {
i--;
break;
}
}
if (i < 0) {
if (parentPageId == PageBtree.ROOT) {
cursor.setCurrent(null, 0);
return;
}
PageBtreeNode previous = (PageBtreeNode) index.getPage(parentPageId);
previous.previousPage(cursor, getPos());
return;
}
PageBtree page = index.getPage(childPageIds[i]);
PageBtreeLeaf leaf = page.getLastLeaf();
cursor.setCurrent(leaf, leaf.entryCount - 1);
}
@Override
public String toString() {
return "page[" + getPos() + "] b-tree node table:" +
index.getId() + " entries:" + entryCount;
}
@Override
public void moveTo(Session session, int newPos) {
PageStore store = index.getPageStore();
store.logUndo(this, data);
PageBtreeNode p2 = PageBtreeNode.create(index, newPos, parentPageId);
readAllRows();
p2.rowCountStored = rowCountStored;
p2.rowCount = rowCount;
p2.childPageIds = childPageIds;
p2.rows = rows;
p2.entryCount = entryCount;
p2.offsets = offsets;
p2.onlyPosition = onlyPosition;
p2.parentPageId = parentPageId;
p2.start = start;
store.update(p2);
if (parentPageId == ROOT) {
index.setRootPageId(session, newPos);
} else {
Page p = store.getPage(parentPageId);
if (!(p instanceof PageBtreeNode)) {
throw DbException.throwInternalError();
}
PageBtreeNode n = (PageBtreeNode) p;
n.moveChild(getPos(), newPos);
}
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
PageBtree p = index.getPage(child);
p.setParentPageId(newPos);
store.update(p);
}
store.free(getPos());
}
/**
* One of the children has moved to a new page.
*
* @param oldPos the old position
* @param newPos the new position
*/
void moveChild(int oldPos, int newPos) {
for (int i = 0; i < entryCount + 1; i++) {
if (childPageIds[i] == oldPos) {
index.getPageStore().logUndo(this, data);
written = false;
changeCount = index.getPageStore().getChangeCount();
childPageIds[i] = newPos;
index.getPageStore().update(this);
return;
}
}
throw DbException.throwInternalError(oldPos + " " + newPos);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageData.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.engine.Session;
import org.h2.result.Row;
import org.h2.store.Data;
import org.h2.store.Page;
/**
* A page that contains data rows.
*/
abstract class PageData extends Page {
/**
* The position of the parent page id.
*/
static final int START_PARENT = 3;
/**
* This is a root page.
*/
static final int ROOT = 0;
/**
* Indicator that the row count is not known.
*/
static final int UNKNOWN_ROWCOUNT = -1;
/**
* The index.
*/
protected final PageDataIndex index;
/**
* The page number of the parent.
*/
protected int parentPageId;
/**
* The data page.
*/
protected final Data data;
/**
* The number of entries.
*/
protected int entryCount;
/**
* The row keys.
*/
protected long[] keys;
/**
* Whether the data page is up-to-date.
*/
protected boolean written;
/**
* The estimated heap memory used by this object, in number of double words
* (4 bytes each).
*/
private final int memoryEstimated;
PageData(PageDataIndex index, int pageId, Data data) {
this.index = index;
this.data = data;
setPos(pageId);
memoryEstimated = index.getMemoryPerPage();
}
/**
* Get the real row count. If required, this will read all child pages.
*
* @return the row count
*/
abstract int getRowCount();
/**
* Set the stored row count. This will write the page.
*
* @param rowCount the stored row count
*/
abstract void setRowCountStored(int rowCount);
/**
* Get the used disk space for this index.
*
* @return the estimated number of bytes
*/
abstract long getDiskSpaceUsed();
/**
* Find an entry by key.
*
* @param key the key (may not exist)
* @return the matching or next index
*/
int find(long key) {
int l = 0, r = entryCount;
while (l < r) {
int i = (l + r) >>> 1;
long k = keys[i];
if (k == key) {
return i;
} else if (k > key) {
r = i;
} else {
l = i + 1;
}
}
return l;
}
/**
* Add a row if possible. If it is possible this method returns -1,
* otherwise the split point. It is always possible to add one row.
*
* @param row the now to add
* @return the split point of this page, or -1 if no split is required
*/
abstract int addRowTry(Row row);
/**
* Get a cursor.
*
* @param session the session
* @param minKey the smallest key
* @param maxKey the largest key
* @param multiVersion if the delta should be used
* @return the cursor
*/
abstract Cursor find(Session session, long minKey, long maxKey,
boolean multiVersion);
/**
* Get the key at this position.
*
* @param at the index
* @return the key
*/
long getKey(int at) {
return keys[at];
}
/**
* Split the index page at the given point.
*
* @param splitPoint the index where to split
* @return the new page that contains about half the entries
*/
abstract PageData split(int splitPoint);
/**
* Change the page id.
*
* @param id the new page id
*/
void setPageId(int id) {
int old = getPos();
index.getPageStore().removeFromCache(getPos());
setPos(id);
index.getPageStore().logUndo(this, null);
remapChildren(old);
}
/**
* Get the last key of a page.
*
* @return the last key
*/
abstract long getLastKey();
/**
* Get the first child leaf page of a page.
*
* @return the page
*/
abstract PageDataLeaf getFirstLeaf();
/**
* Change the parent page id.
*
* @param id the new parent page id
*/
void setParentPageId(int id) {
index.getPageStore().logUndo(this, data);
parentPageId = id;
if (written) {
changeCount = index.getPageStore().getChangeCount();
data.setInt(START_PARENT, parentPageId);
}
}
/**
* Update the parent id of all children.
*
* @param old the previous position
*/
abstract void remapChildren(int old);
/**
* Remove a row.
*
* @param key the key of the row to remove
* @return true if this page is now empty
*/
abstract boolean remove(long key);
/**
* Free this page and all child pages.
*/
abstract void freeRecursive();
/**
* Get the row for the given key.
*
* @param key the key
* @return the row
*/
abstract Row getRowWithKey(long key);
/**
* Get the estimated heap memory size.
*
* @return number of double words (4 bytes each)
*/
@Override
public int getMemory() {
// need to always return the same value for the same object (otherwise
// the cache size would change after adding and then removing the same
// page from the cache) but index.getMemoryPerPage() can adopt according
// to how much memory a row needs on average
return memoryEstimated;
}
int getParentPageId() {
return parentPageId;
}
@Override
public boolean canRemove() {
return changeCount < index.getPageStore().getChangeCount();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageDataCursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.Iterator;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
/**
* The cursor implementation for the page scan index.
*/
class PageDataCursor implements Cursor {
private PageDataLeaf current;
private int idx;
private final long maxKey;
private Row row;
private final boolean multiVersion;
private final Session session;
private Iterator<Row> delta;
PageDataCursor(Session session, PageDataLeaf current, int idx, long maxKey,
boolean multiVersion) {
this.current = current;
this.idx = idx;
this.maxKey = maxKey;
this.multiVersion = multiVersion;
this.session = session;
if (multiVersion) {
delta = current.index.getDelta();
}
}
@Override
public Row get() {
return row;
}
@Override
public SearchRow getSearchRow() {
return get();
}
@Override
public boolean next() {
if (!multiVersion) {
nextRow();
return checkMax();
}
while (true) {
if (delta != null) {
if (!delta.hasNext()) {
delta = null;
row = null;
continue;
}
row = delta.next();
if (!row.isDeleted() || row.getSessionId() == session.getId()) {
continue;
}
} else {
nextRow();
if (row != null && row.getSessionId() != 0 &&
row.getSessionId() != session.getId()) {
continue;
}
}
break;
}
return checkMax();
}
private boolean checkMax() {
if (row != null) {
if (maxKey != Long.MAX_VALUE) {
long x = current.index.getKey(row, Long.MAX_VALUE, Long.MAX_VALUE);
if (x > maxKey) {
row = null;
return false;
}
}
return true;
}
return false;
}
private void nextRow() {
if (idx >= current.getEntryCount()) {
current = current.getNextPage();
idx = 0;
if (current == null) {
row = null;
return;
}
}
row = current.getRowAt(idx);
idx++;
}
@Override
public boolean previous() {
throw DbException.throwInternalError(toString());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageDataIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.engine.UndoLogRecord;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.store.Page;
import org.h2.store.PageStore;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.RegularTable;
import org.h2.table.TableFilter;
import org.h2.util.MathUtils;
import org.h2.value.Value;
import org.h2.value.ValueNull;
/**
* The scan index allows to access a row by key. It can be used to iterate over
* all rows of a table. Each regular table has one such object, even if no
* primary key or indexes are defined.
*/
public class PageDataIndex extends PageIndex {
private final PageStore store;
private final RegularTable tableData;
private long lastKey;
private long rowCount;
private HashSet<Row> delta;
private int rowCountDiff;
private final HashMap<Integer, Integer> sessionRowCount;
private int mainIndexColumn = -1;
private DbException fastDuplicateKeyException;
/**
* The estimated heap memory per page, in number of double words (4 bytes
* each).
*/
private int memoryPerPage;
private int memoryCount;
private final boolean multiVersion;
public PageDataIndex(RegularTable table, int id, IndexColumn[] columns,
IndexType indexType, boolean create, Session session) {
initBaseIndex(table, id, table.getName() + "_DATA", columns, indexType);
this.multiVersion = database.isMultiVersion();
// trace = database.getTrace(Trace.PAGE_STORE + "_di");
// trace.setLevel(TraceSystem.DEBUG);
if (multiVersion) {
sessionRowCount = new HashMap<>();
isMultiVersion = true;
} else {
sessionRowCount = null;
}
tableData = table;
this.store = database.getPageStore();
store.addIndex(this);
if (!database.isPersistent()) {
throw DbException.throwInternalError(table.getName());
}
if (create) {
rootPageId = store.allocatePage();
store.addMeta(this, session);
PageDataLeaf root = PageDataLeaf.create(this, rootPageId, PageData.ROOT);
store.update(root);
} else {
rootPageId = store.getRootPageId(id);
PageData root = getPage(rootPageId, 0);
lastKey = root.getLastKey();
rowCount = root.getRowCount();
}
if (trace.isDebugEnabled()) {
trace.debug("{0} opened rows: {1}", this, rowCount);
}
table.setRowCount(rowCount);
memoryPerPage = (Constants.MEMORY_PAGE_DATA + store.getPageSize()) >> 2;
}
@Override
public DbException getDuplicateKeyException(String key) {
if (fastDuplicateKeyException == null) {
fastDuplicateKeyException = super.getDuplicateKeyException(null);
}
return fastDuplicateKeyException;
}
@Override
public void add(Session session, Row row) {
boolean retry = false;
if (mainIndexColumn != -1) {
row.setKey(row.getValue(mainIndexColumn).getLong());
} else {
if (row.getKey() == 0) {
row.setKey((int) ++lastKey);
retry = true;
}
}
if (tableData.getContainsLargeObject()) {
for (int i = 0, len = row.getColumnCount(); i < len; i++) {
Value v = row.getValue(i);
Value v2 = v.copy(database, getId());
if (v2.isLinkedToTable()) {
session.removeAtCommitStop(v2);
}
if (v != v2) {
row.setValue(i, v2);
}
}
}
// when using auto-generated values, it's possible that multiple
// tries are required (specially if there was originally a primary key)
if (trace.isDebugEnabled()) {
trace.debug("{0} add {1}", getName(), row);
}
long add = 0;
while (true) {
try {
addTry(session, row);
break;
} catch (DbException e) {
if (e != fastDuplicateKeyException) {
throw e;
}
if (!retry) {
throw getNewDuplicateKeyException();
}
if (add == 0) {
// in the first re-try add a small random number,
// to avoid collisions after a re-start
row.setKey((long) (row.getKey() + Math.random() * 10_000));
} else {
row.setKey(row.getKey() + add);
}
add++;
} finally {
store.incrementChangeCount();
}
}
lastKey = Math.max(lastKey, row.getKey());
}
public DbException getNewDuplicateKeyException() {
String sql = "PRIMARY KEY ON " + table.getSQL();
if (mainIndexColumn >= 0 && mainIndexColumn < indexColumns.length) {
sql += "(" + indexColumns[mainIndexColumn].getSQL() + ")";
}
DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, sql);
e.setSource(this);
return e;
}
private void addTry(Session session, Row row) {
while (true) {
PageData root = getPage(rootPageId, 0);
int splitPoint = root.addRowTry(row);
if (splitPoint == -1) {
break;
}
if (trace.isDebugEnabled()) {
trace.debug("{0} split", this);
}
long pivot = splitPoint == 0 ? row.getKey() : root.getKey(splitPoint - 1);
PageData page1 = root;
PageData page2 = root.split(splitPoint);
int id = store.allocatePage();
page1.setPageId(id);
page1.setParentPageId(rootPageId);
page2.setParentPageId(rootPageId);
PageDataNode newRoot = PageDataNode.create(this, rootPageId, PageData.ROOT);
newRoot.init(page1, pivot, page2);
store.update(page1);
store.update(page2);
store.update(newRoot);
root = newRoot;
}
row.setDeleted(false);
if (multiVersion) {
if (delta == null) {
delta = new HashSet<>();
}
boolean wasDeleted = delta.remove(row);
if (!wasDeleted) {
delta.add(row);
}
incrementRowCount(session.getId(), 1);
}
invalidateRowCount();
rowCount++;
store.logAddOrRemoveRow(session, tableData.getId(), row, true);
}
/**
* Read an overflow page page.
*
* @param id the page id
* @return the page
*/
PageDataOverflow getPageOverflow(int id) {
Page p = store.getPage(id);
if (p instanceof PageDataOverflow) {
return (PageDataOverflow) p;
}
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
p == null ? "null" : p.toString());
}
/**
* Read the given page.
*
* @param id the page id
* @param parent the parent, or -1 if unknown
* @return the page
*/
PageData getPage(int id, int parent) {
Page pd = store.getPage(id);
if (pd == null) {
PageDataLeaf empty = PageDataLeaf.create(this, id, parent);
// could have been created before, but never committed
store.logUndo(empty, null);
store.update(empty);
return empty;
} else if (!(pd instanceof PageData)) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "" + pd);
}
PageData p = (PageData) pd;
if (parent != -1) {
if (p.getParentPageId() != parent) {
throw DbException.throwInternalError(p +
" parent " + p.getParentPageId() + " expected " + parent);
}
}
return p;
}
@Override
public boolean canGetFirstOrLast() {
return false;
}
/**
* Get the key from the row.
*
* @param row the row
* @param ifEmpty the value to use if the row is empty
* @param ifNull the value to use if the column is NULL
* @return the key
*/
long getKey(SearchRow row, long ifEmpty, long ifNull) {
if (row == null) {
return ifEmpty;
}
Value v = row.getValue(mainIndexColumn);
if (v == null) {
throw DbException.throwInternalError(row.toString());
} else if (v == ValueNull.INSTANCE) {
return ifNull;
}
return v.getLong();
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
long from = first == null ? Long.MIN_VALUE : first.getKey();
long to = last == null ? Long.MAX_VALUE : last.getKey();
PageData root = getPage(rootPageId, 0);
return root.find(session, from, to, isMultiVersion);
}
/**
* Search for a specific row or a set of rows.
*
* @param session the session
* @param first the key of the first row
* @param last the key of the last row
* @param multiVersion if mvcc should be used
* @return the cursor
*/
Cursor find(Session session, long first, long last, boolean multiVersion) {
PageData root = getPage(rootPageId, 0);
return root.find(session, first, last, multiVersion);
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
throw DbException.throwInternalError(toString());
}
long getLastKey() {
PageData root = getPage(rootPageId, 0);
return root.getLastKey();
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return 10 * (tableData.getRowCountApproximation() +
Constants.COST_ROW_OFFSET);
}
@Override
public boolean needRebuild() {
return false;
}
@Override
public void remove(Session session, Row row) {
if (tableData.getContainsLargeObject()) {
for (int i = 0, len = row.getColumnCount(); i < len; i++) {
Value v = row.getValue(i);
if (v.isLinkedToTable()) {
session.removeAtCommitStop(v);
}
}
}
if (trace.isDebugEnabled()) {
trace.debug("{0} remove {1}", getName(), row);
}
if (rowCount == 1) {
removeAllRows();
} else {
try {
long key = row.getKey();
PageData root = getPage(rootPageId, 0);
root.remove(key);
invalidateRowCount();
rowCount--;
} finally {
store.incrementChangeCount();
}
}
if (multiVersion) {
// if storage is null, the delete flag is not yet set
row.setDeleted(true);
if (delta == null) {
delta = new HashSet<>();
}
boolean wasAdded = delta.remove(row);
if (!wasAdded) {
delta.add(row);
}
incrementRowCount(session.getId(), -1);
}
store.logAddOrRemoveRow(session, tableData.getId(), row, false);
}
@Override
public void remove(Session session) {
if (trace.isDebugEnabled()) {
trace.debug("{0} remove", this);
}
removeAllRows();
store.free(rootPageId);
store.removeMeta(this, session);
}
@Override
public void truncate(Session session) {
if (trace.isDebugEnabled()) {
trace.debug("{0} truncate", this);
}
store.logTruncate(session, tableData.getId());
removeAllRows();
if (tableData.getContainsLargeObject() && tableData.isPersistData()) {
// unfortunately, the data is gone on rollback
session.commit(false);
database.getLobStorage().removeAllForTable(table.getId());
}
if (multiVersion) {
sessionRowCount.clear();
}
tableData.setRowCount(0);
}
private void removeAllRows() {
try {
PageData root = getPage(rootPageId, 0);
root.freeRecursive();
root = PageDataLeaf.create(this, rootPageId, PageData.ROOT);
store.removeFromCache(rootPageId);
store.update(root);
rowCount = 0;
lastKey = 0;
} finally {
store.incrementChangeCount();
}
}
@Override
public void checkRename() {
throw DbException.getUnsupportedException("PAGE");
}
@Override
public Row getRow(Session session, long key) {
return getRowWithKey(key);
}
/**
* Get the row with the given key.
*
* @param key the key
* @return the row
*/
public Row getRowWithKey(long key) {
PageData root = getPage(rootPageId, 0);
return root.getRowWithKey(key);
}
PageStore getPageStore() {
return store;
}
@Override
public long getRowCountApproximation() {
return rowCount;
}
@Override
public long getRowCount(Session session) {
if (multiVersion) {
Integer i = sessionRowCount.get(session.getId());
long count = i == null ? 0 : i.intValue();
count += rowCount;
count -= rowCountDiff;
return count;
}
return rowCount;
}
@Override
public long getDiskSpaceUsed() {
PageData root = getPage(rootPageId, 0);
return root.getDiskSpaceUsed();
}
@Override
public String getCreateSQL() {
return null;
}
@Override
public int getColumnIndex(Column col) {
// can not use this index - use the PageDelegateIndex instead
return -1;
}
@Override
public boolean isFirstColumn(Column column) {
return false;
}
@Override
public void close(Session session) {
if (trace.isDebugEnabled()) {
trace.debug("{0} close", this);
}
if (delta != null) {
delta.clear();
}
rowCountDiff = 0;
if (sessionRowCount != null) {
sessionRowCount.clear();
}
// can not close the index because it might get used afterwards,
// for example after running recovery
writeRowCount();
}
Iterator<Row> getDelta() {
if (delta == null) {
List<Row> e = Collections.emptyList();
return e.iterator();
}
return delta.iterator();
}
private void incrementRowCount(int sessionId, int count) {
if (multiVersion) {
Integer id = sessionId;
Integer c = sessionRowCount.get(id);
int current = c == null ? 0 : c.intValue();
sessionRowCount.put(id, current + count);
rowCountDiff += count;
}
}
@Override
public void commit(int operation, Row row) {
if (multiVersion) {
if (delta != null) {
delta.remove(row);
}
incrementRowCount(row.getSessionId(),
operation == UndoLogRecord.DELETE ? 1 : -1);
}
}
/**
* The root page has changed.
*
* @param session the session
* @param newPos the new position
*/
void setRootPageId(Session session, int newPos) {
store.removeMeta(this, session);
this.rootPageId = newPos;
store.addMeta(this, session);
store.addIndex(this);
}
public void setMainIndexColumn(int mainIndexColumn) {
this.mainIndexColumn = mainIndexColumn;
}
public int getMainIndexColumn() {
return mainIndexColumn;
}
@Override
public String toString() {
return getName();
}
private void invalidateRowCount() {
PageData root = getPage(rootPageId, 0);
root.setRowCountStored(PageData.UNKNOWN_ROWCOUNT);
}
@Override
public void writeRowCount() {
if (SysProperties.MODIFY_ON_WRITE && rootPageId == 0) {
// currently creating the index
return;
}
try {
PageData root = getPage(rootPageId, 0);
root.setRowCountStored(MathUtils.convertLongToInt(rowCount));
} finally {
store.incrementChangeCount();
}
}
@Override
public String getPlanSQL() {
return table.getSQL() + ".tableScan";
}
int getMemoryPerPage() {
return memoryPerPage;
}
/**
* The memory usage of a page was changed. The new value is used to adopt
* the average estimated memory size of a page.
*
* @param x the new memory size
*/
void memoryChange(int x) {
if (memoryCount < Constants.MEMORY_FACTOR) {
memoryPerPage += (x - memoryPerPage) / ++memoryCount;
} else {
memoryPerPage += (x > memoryPerPage ? 1 : -1) +
((x - memoryPerPage) / Constants.MEMORY_FACTOR);
}
}
@Override
public boolean isRowIdIndex() {
return true;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageDataLeaf.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.lang.ref.SoftReference;
import java.util.Arrays;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.store.Data;
import org.h2.store.Page;
import org.h2.store.PageStore;
import org.h2.value.Value;
/**
* A leaf page that contains data of one or multiple rows. Format:
* <ul>
* <li>page type: byte (0)</li>
* <li>checksum: short (1-2)</li>
* <li>parent page id (0 for root): int (3-6)</li>
* <li>table id: varInt</li>
* <li>column count: varInt</li>
* <li>entry count: short</li>
* <li>with overflow: the first overflow page id: int</li>
* <li>list of key / offset pairs (key: varLong, offset: shortInt)</li>
* <li>data</li>
* </ul>
*/
public class PageDataLeaf extends PageData {
private final boolean optimizeUpdate;
/**
* The row offsets.
*/
private int[] offsets;
/**
* The rows.
*/
private Row[] rows;
/**
* For pages with overflow: the soft reference to the row
*/
private SoftReference<Row> rowRef;
/**
* The page id of the first overflow page (0 if no overflow).
*/
private int firstOverflowPageId;
/**
* The start of the data area.
*/
private int start;
/**
* The size of the row in bytes for large rows.
*/
private int overflowRowSize;
private int columnCount;
private int memoryData;
private boolean writtenData;
private PageDataLeaf(PageDataIndex index, int pageId, Data data) {
super(index, pageId, data);
this.optimizeUpdate = index.getDatabase().getSettings().optimizeUpdate;
}
/**
* Create a new page.
*
* @param index the index
* @param pageId the page id
* @param parentPageId the parent
* @return the page
*/
static PageDataLeaf create(PageDataIndex index, int pageId, int parentPageId) {
PageDataLeaf p = new PageDataLeaf(index, pageId, index.getPageStore()
.createData());
index.getPageStore().logUndo(p, null);
p.rows = Row.EMPTY_ARRAY;
p.parentPageId = parentPageId;
p.columnCount = index.getTable().getColumns().length;
p.writeHead();
p.start = p.data.length();
return p;
}
/**
* Read a data leaf page.
*
* @param index the index
* @param data the data
* @param pageId the page id
* @return the page
*/
public static Page read(PageDataIndex index, Data data, int pageId) {
PageDataLeaf p = new PageDataLeaf(index, pageId, data);
p.read();
return p;
}
private void read() {
data.reset();
int type = data.readByte();
data.readShortInt();
this.parentPageId = data.readInt();
int tableId = data.readVarInt();
if (tableId != index.getId()) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPos() + " expected table:" + index.getId() +
" got:" + tableId + " type:" + type);
}
columnCount = data.readVarInt();
entryCount = data.readShortInt();
offsets = new int[entryCount];
keys = new long[entryCount];
rows = new Row[entryCount];
if (type == Page.TYPE_DATA_LEAF) {
if (entryCount != 1) {
DbException.throwInternalError("entries: " + entryCount);
}
firstOverflowPageId = data.readInt();
}
for (int i = 0; i < entryCount; i++) {
keys[i] = data.readVarLong();
offsets[i] = data.readShortInt();
}
start = data.length();
written = true;
writtenData = true;
}
private int getRowLength(Row row) {
int size = 0;
for (int i = 0; i < columnCount; i++) {
size += data.getValueLen(row.getValue(i));
}
return size;
}
private int findInsertionPoint(long key) {
int x = find(key);
if (x < entryCount && keys[x] == key) {
throw index.getDuplicateKeyException(""+key);
}
return x;
}
@Override
int addRowTry(Row row) {
index.getPageStore().logUndo(this, data);
int rowLength = getRowLength(row);
int pageSize = index.getPageStore().getPageSize();
int last = entryCount == 0 ? pageSize : offsets[entryCount - 1];
int keyOffsetPairLen = 2 + Data.getVarLongLen(row.getKey());
if (entryCount > 0 && last - rowLength < start + keyOffsetPairLen) {
int x = findInsertionPoint(row.getKey());
if (entryCount > 1) {
if (entryCount < 5) {
// required, otherwise the index doesn't work correctly
return entryCount / 2;
}
if (index.isSortedInsertMode()) {
return x < 2 ? 1 : x > entryCount - 1 ? entryCount - 1 : x;
}
// split near the insertion point to better fill pages
// split in half would be:
// return entryCount / 2;
int third = entryCount / 3;
return x < third ? third : x >= 2 * third ? 2 * third : x;
}
return x;
}
index.getPageStore().logUndo(this, data);
int x;
if (entryCount == 0) {
x = 0;
} else {
if (!optimizeUpdate) {
readAllRows();
}
x = findInsertionPoint(row.getKey());
}
written = false;
changeCount = index.getPageStore().getChangeCount();
last = x == 0 ? pageSize : offsets[x - 1];
int offset = last - rowLength;
start += keyOffsetPairLen;
offsets = insert(offsets, entryCount, x, offset);
add(offsets, x + 1, entryCount + 1, -rowLength);
keys = insert(keys, entryCount, x, row.getKey());
rows = insert(rows, entryCount, x, row);
entryCount++;
index.getPageStore().update(this);
if (optimizeUpdate) {
if (writtenData && offset >= start) {
byte[] d = data.getBytes();
int dataStart = offsets[entryCount - 1] + rowLength;
int dataEnd = offsets[x];
System.arraycopy(d, dataStart, d, dataStart - rowLength,
dataEnd - dataStart + rowLength);
data.setPos(dataEnd);
for (int j = 0; j < columnCount; j++) {
data.writeValue(row.getValue(j));
}
}
}
if (offset < start) {
writtenData = false;
if (entryCount > 1) {
DbException.throwInternalError("" + entryCount);
}
// need to write the overflow page id
start += 4;
int remaining = rowLength - (pageSize - start);
// fix offset
offset = start;
offsets[x] = offset;
int previous = getPos();
int dataOffset = pageSize;
int page = index.getPageStore().allocatePage();
firstOverflowPageId = page;
this.overflowRowSize = pageSize + rowLength;
writeData();
// free up the space used by the row
Row r = rows[0];
rowRef = new SoftReference<>(r);
rows[0] = null;
Data all = index.getPageStore().createData();
all.checkCapacity(data.length());
all.write(data.getBytes(), 0, data.length());
data.truncate(index.getPageStore().getPageSize());
do {
int type, size, next;
if (remaining <= pageSize - PageDataOverflow.START_LAST) {
type = Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST;
size = remaining;
next = 0;
} else {
type = Page.TYPE_DATA_OVERFLOW;
size = pageSize - PageDataOverflow.START_MORE;
next = index.getPageStore().allocatePage();
}
PageDataOverflow overflow = PageDataOverflow.create(index.getPageStore(),
page, type, previous, next, all, dataOffset, size);
index.getPageStore().update(overflow);
dataOffset += size;
remaining -= size;
previous = page;
page = next;
} while (remaining > 0);
}
if (rowRef == null) {
memoryChange(true, row);
} else {
memoryChange(true, null);
}
return -1;
}
private void removeRow(int i) {
index.getPageStore().logUndo(this, data);
written = false;
changeCount = index.getPageStore().getChangeCount();
if (!optimizeUpdate) {
readAllRows();
}
Row r = getRowAt(i);
if (r != null) {
memoryChange(false, r);
}
entryCount--;
if (entryCount < 0) {
DbException.throwInternalError("" + entryCount);
}
if (firstOverflowPageId != 0) {
start -= 4;
freeOverflow();
firstOverflowPageId = 0;
overflowRowSize = 0;
rowRef = null;
}
int keyOffsetPairLen = 2 + Data.getVarLongLen(keys[i]);
int startNext = i > 0 ? offsets[i - 1] : index.getPageStore().getPageSize();
int rowLength = startNext - offsets[i];
if (optimizeUpdate) {
if (writtenData) {
byte[] d = data.getBytes();
int dataStart = offsets[entryCount];
System.arraycopy(d, dataStart, d, dataStart + rowLength,
offsets[i] - dataStart);
Arrays.fill(d, dataStart, dataStart + rowLength, (byte) 0);
}
} else {
int clearStart = offsets[entryCount];
Arrays.fill(data.getBytes(), clearStart, clearStart + rowLength, (byte) 0);
}
start -= keyOffsetPairLen;
offsets = remove(offsets, entryCount + 1, i);
add(offsets, i, entryCount, rowLength);
keys = remove(keys, entryCount + 1, i);
rows = remove(rows, entryCount + 1, i);
}
@Override
Cursor find(Session session, long minKey, long maxKey, boolean multiVersion) {
int x = find(minKey);
return new PageDataCursor(session, this, x, maxKey, multiVersion);
}
/**
* Get the row at the given index.
*
* @param at the index
* @return the row
*/
Row getRowAt(int at) {
Row r = rows[at];
if (r == null) {
if (firstOverflowPageId == 0) {
r = readRow(data, offsets[at], columnCount);
} else {
if (rowRef != null) {
r = rowRef.get();
if (r != null) {
return r;
}
}
PageStore store = index.getPageStore();
Data buff = store.createData();
int pageSize = store.getPageSize();
int offset = offsets[at];
buff.write(data.getBytes(), offset, pageSize - offset);
int next = firstOverflowPageId;
do {
PageDataOverflow page = index.getPageOverflow(next);
next = page.readInto(buff);
} while (next != 0);
overflowRowSize = pageSize + buff.length();
r = readRow(buff, 0, columnCount);
}
r.setKey(keys[at]);
if (firstOverflowPageId != 0) {
rowRef = new SoftReference<>(r);
} else {
rows[at] = r;
memoryChange(true, r);
}
}
return r;
}
int getEntryCount() {
return entryCount;
}
@Override
PageData split(int splitPoint) {
int newPageId = index.getPageStore().allocatePage();
PageDataLeaf p2 = PageDataLeaf.create(index, newPageId, parentPageId);
while (splitPoint < entryCount) {
int split = p2.addRowTry(getRowAt(splitPoint));
if (split != -1) {
DbException.throwInternalError("split " + split);
}
removeRow(splitPoint);
}
return p2;
}
@Override
long getLastKey() {
// TODO re-use keys, but remove this mechanism
if (entryCount == 0) {
return 0;
}
return getRowAt(entryCount - 1).getKey();
}
PageDataLeaf getNextPage() {
if (parentPageId == PageData.ROOT) {
return null;
}
PageDataNode next = (PageDataNode) index.getPage(parentPageId, -1);
return next.getNextPage(keys[entryCount - 1]);
}
@Override
PageDataLeaf getFirstLeaf() {
return this;
}
@Override
protected void remapChildren(int old) {
if (firstOverflowPageId == 0) {
return;
}
PageDataOverflow overflow = index.getPageOverflow(firstOverflowPageId);
overflow.setParentPageId(getPos());
index.getPageStore().update(overflow);
}
@Override
boolean remove(long key) {
int i = find(key);
if (keys == null || keys[i] != key) {
throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1,
index.getSQL() + ": " + key + " " + (keys == null ? -1 : keys[i]));
}
index.getPageStore().logUndo(this, data);
if (entryCount == 1) {
freeRecursive();
return true;
}
removeRow(i);
index.getPageStore().update(this);
return false;
}
@Override
void freeRecursive() {
index.getPageStore().logUndo(this, data);
index.getPageStore().free(getPos());
freeOverflow();
}
private void freeOverflow() {
if (firstOverflowPageId != 0) {
int next = firstOverflowPageId;
do {
PageDataOverflow page = index.getPageOverflow(next);
page.free();
next = page.getNextOverflow();
} while (next != 0);
}
}
@Override
Row getRowWithKey(long key) {
int at = find(key);
return getRowAt(at);
}
@Override
int getRowCount() {
return entryCount;
}
@Override
void setRowCountStored(int rowCount) {
// ignore
}
@Override
long getDiskSpaceUsed() {
return index.getPageStore().getPageSize();
}
@Override
public void write() {
writeData();
index.getPageStore().writePage(getPos(), data);
data.truncate(index.getPageStore().getPageSize());
}
private void readAllRows() {
for (int i = 0; i < entryCount; i++) {
getRowAt(i);
}
}
private void writeHead() {
data.reset();
int type;
if (firstOverflowPageId == 0) {
type = Page.TYPE_DATA_LEAF | Page.FLAG_LAST;
} else {
type = Page.TYPE_DATA_LEAF;
}
data.writeByte((byte) type);
data.writeShortInt(0);
if (SysProperties.CHECK2) {
if (data.length() != START_PARENT) {
DbException.throwInternalError();
}
}
data.writeInt(parentPageId);
data.writeVarInt(index.getId());
data.writeVarInt(columnCount);
data.writeShortInt(entryCount);
}
private void writeData() {
if (written) {
return;
}
if (!optimizeUpdate) {
readAllRows();
}
writeHead();
if (firstOverflowPageId != 0) {
data.writeInt(firstOverflowPageId);
data.checkCapacity(overflowRowSize);
}
for (int i = 0; i < entryCount; i++) {
data.writeVarLong(keys[i]);
data.writeShortInt(offsets[i]);
}
if (!writtenData || !optimizeUpdate) {
for (int i = 0; i < entryCount; i++) {
data.setPos(offsets[i]);
Row r = getRowAt(i);
for (int j = 0; j < columnCount; j++) {
data.writeValue(r.getValue(j));
}
}
writtenData = true;
}
written = true;
}
@Override
public String toString() {
return "page[" + getPos() + "] data leaf table:" +
index.getId() + " " + index.getTable().getName() +
" entries:" + entryCount + " parent:" + parentPageId +
(firstOverflowPageId == 0 ? "" : " overflow:" + firstOverflowPageId) +
" keys:" + Arrays.toString(keys) + " offsets:" + Arrays.toString(offsets);
}
@Override
public void moveTo(Session session, int newPos) {
PageStore store = index.getPageStore();
// load the pages into the cache, to ensure old pages
// are written
if (parentPageId != ROOT) {
store.getPage(parentPageId);
}
store.logUndo(this, data);
PageDataLeaf p2 = PageDataLeaf.create(index, newPos, parentPageId);
readAllRows();
p2.keys = keys;
p2.overflowRowSize = overflowRowSize;
p2.firstOverflowPageId = firstOverflowPageId;
p2.rowRef = rowRef;
p2.rows = rows;
if (firstOverflowPageId != 0) {
p2.rows[0] = getRowAt(0);
}
p2.entryCount = entryCount;
p2.offsets = offsets;
p2.start = start;
p2.remapChildren(getPos());
p2.writeData();
p2.data.truncate(index.getPageStore().getPageSize());
store.update(p2);
if (parentPageId == ROOT) {
index.setRootPageId(session, newPos);
} else {
PageDataNode p = (PageDataNode) store.getPage(parentPageId);
p.moveChild(getPos(), newPos);
}
store.free(getPos());
}
/**
* Set the overflow page id.
*
* @param old the old overflow page id
* @param overflow the new overflow page id
*/
void setOverflow(int old, int overflow) {
if (SysProperties.CHECK && old != firstOverflowPageId) {
DbException.throwInternalError("move " + this + " " + firstOverflowPageId);
}
index.getPageStore().logUndo(this, data);
firstOverflowPageId = overflow;
if (written) {
changeCount = index.getPageStore().getChangeCount();
writeHead();
data.writeInt(firstOverflowPageId);
}
index.getPageStore().update(this);
}
private void memoryChange(boolean add, Row r) {
int diff = r == null ? 0 : 4 + 8 + Constants.MEMORY_POINTER + r.getMemory();
memoryData += add ? diff : -diff;
index.memoryChange((Constants.MEMORY_PAGE_DATA +
memoryData + index.getPageStore().getPageSize()) >> 2);
}
@Override
public boolean isStream() {
return firstOverflowPageId > 0;
}
/**
* Read a row from the data page at the given position.
*
* @param data the data page
* @param pos the position to read from
* @param columnCount the number of columns
* @return the row
*/
private Row readRow(Data data, int pos, int columnCount) {
Value[] values = new Value[columnCount];
synchronized (data) {
data.setPos(pos);
for (int i = 0; i < columnCount; i++) {
values[i] = data.readValue();
}
}
return index.getDatabase().createRow(values, Row.MEMORY_CALCULATE);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageDataNode.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.Arrays;
import org.h2.api.DatabaseEventListener;
import org.h2.api.ErrorCode;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.store.Data;
import org.h2.store.Page;
import org.h2.store.PageStore;
import org.h2.util.Utils;
/**
* A leaf page that contains data of one or multiple rows. Format:
* <ul>
* <li>page type: byte (0)</li>
* <li>checksum: short (1-2)</li>
* <li>parent page id (0 for root): int (3-6)</li>
* <li>table id: varInt</li>
* <li>count of all children (-1 if not known): int</li>
* <li>entry count: short</li>
* <li>rightmost child page id: int</li>
* <li>entries (child page id: int, key: varLong)</li>
* </ul>
* The key is the largest key of the respective child, meaning key[0] is the
* largest key of child[0].
*/
public class PageDataNode extends PageData {
/**
* The page ids of the children.
*/
private int[] childPageIds;
private int rowCountStored = UNKNOWN_ROWCOUNT;
private int rowCount = UNKNOWN_ROWCOUNT;
/**
* The number of bytes used in the page
*/
private int length;
private PageDataNode(PageDataIndex index, int pageId, Data data) {
super(index, pageId, data);
}
/**
* Create a new page.
*
* @param index the index
* @param pageId the page id
* @param parentPageId the parent
* @return the page
*/
static PageDataNode create(PageDataIndex index, int pageId, int parentPageId) {
PageDataNode p = new PageDataNode(index, pageId,
index.getPageStore().createData());
index.getPageStore().logUndo(p, null);
p.parentPageId = parentPageId;
p.writeHead();
// 4 bytes for the rightmost child page id
p.length = p.data.length() + 4;
return p;
}
/**
* Read a data node page.
*
* @param index the index
* @param data the data
* @param pageId the page id
* @return the page
*/
public static Page read(PageDataIndex index, Data data, int pageId) {
PageDataNode p = new PageDataNode(index, pageId, data);
p.read();
return p;
}
private void read() {
data.reset();
data.readByte();
data.readShortInt();
this.parentPageId = data.readInt();
int indexId = data.readVarInt();
if (indexId != index.getId()) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"page:" + getPos() + " expected index:" + index.getId() +
"got:" + indexId);
}
rowCount = rowCountStored = data.readInt();
entryCount = data.readShortInt();
childPageIds = new int[entryCount + 1];
childPageIds[entryCount] = data.readInt();
keys = Utils.newLongArray(entryCount);
for (int i = 0; i < entryCount; i++) {
childPageIds[i] = data.readInt();
keys[i] = data.readVarLong();
}
length = data.length();
check();
written = true;
}
private void addChild(int x, int childPageId, long key) {
index.getPageStore().logUndo(this, data);
written = false;
changeCount = index.getPageStore().getChangeCount();
childPageIds = insert(childPageIds, entryCount + 1, x + 1, childPageId);
keys = insert(keys, entryCount, x, key);
entryCount++;
length += 4 + Data.getVarLongLen(key);
}
@Override
int addRowTry(Row row) {
index.getPageStore().logUndo(this, data);
int keyOffsetPairLen = 4 + Data.getVarLongLen(row.getKey());
while (true) {
int x = find(row.getKey());
PageData page = index.getPage(childPageIds[x], getPos());
int splitPoint = page.addRowTry(row);
if (splitPoint == -1) {
break;
}
if (length + keyOffsetPairLen > index.getPageStore().getPageSize()) {
return entryCount / 2;
}
long pivot = splitPoint == 0 ? row.getKey() : page.getKey(splitPoint - 1);
PageData page2 = page.split(splitPoint);
index.getPageStore().update(page);
index.getPageStore().update(page2);
addChild(x, page2.getPos(), pivot);
index.getPageStore().update(this);
}
updateRowCount(1);
return -1;
}
private void updateRowCount(int offset) {
if (rowCount != UNKNOWN_ROWCOUNT) {
rowCount += offset;
}
if (rowCountStored != UNKNOWN_ROWCOUNT) {
rowCountStored = UNKNOWN_ROWCOUNT;
index.getPageStore().logUndo(this, data);
if (written) {
writeHead();
}
index.getPageStore().update(this);
}
}
@Override
Cursor find(Session session, long minKey, long maxKey, boolean multiVersion) {
int x = find(minKey);
int child = childPageIds[x];
return index.getPage(child, getPos()).find(session, minKey, maxKey,
multiVersion);
}
@Override
PageData split(int splitPoint) {
int newPageId = index.getPageStore().allocatePage();
PageDataNode p2 = PageDataNode.create(index, newPageId, parentPageId);
int firstChild = childPageIds[splitPoint];
while (splitPoint < entryCount) {
p2.addChild(p2.entryCount, childPageIds[splitPoint + 1], keys[splitPoint]);
removeChild(splitPoint);
}
int lastChild = childPageIds[splitPoint - 1];
removeChild(splitPoint - 1);
childPageIds[splitPoint - 1] = lastChild;
p2.childPageIds[0] = firstChild;
p2.remapChildren(getPos());
return p2;
}
@Override
protected void remapChildren(int old) {
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
PageData p = index.getPage(child, old);
p.setParentPageId(getPos());
index.getPageStore().update(p);
}
}
/**
* Initialize the page.
*
* @param page1 the first child page
* @param pivot the pivot key
* @param page2 the last child page
*/
void init(PageData page1, long pivot, PageData page2) {
entryCount = 1;
childPageIds = new int[] { page1.getPos(), page2.getPos() };
keys = new long[] { pivot };
length += 4 + Data.getVarLongLen(pivot);
check();
}
@Override
long getLastKey() {
return index.getPage(childPageIds[entryCount], getPos()).getLastKey();
}
/**
* Get the next leaf page.
*
* @param key the last key of the current page
* @return the next leaf page
*/
PageDataLeaf getNextPage(long key) {
int i = find(key) + 1;
if (i > entryCount) {
if (parentPageId == PageData.ROOT) {
return null;
}
PageDataNode next = (PageDataNode) index.getPage(parentPageId, -1);
return next.getNextPage(key);
}
PageData page = index.getPage(childPageIds[i], getPos());
return page.getFirstLeaf();
}
@Override
PageDataLeaf getFirstLeaf() {
int child = childPageIds[0];
return index.getPage(child, getPos()).getFirstLeaf();
}
@Override
boolean remove(long key) {
int at = find(key);
// merge is not implemented to allow concurrent usage
// TODO maybe implement merge
PageData page = index.getPage(childPageIds[at], getPos());
boolean empty = page.remove(key);
index.getPageStore().logUndo(this, data);
updateRowCount(-1);
if (!empty) {
// the first row didn't change - nothing to do
return false;
}
// this child is now empty
index.getPageStore().free(page.getPos());
if (entryCount < 1) {
// no more children - this page is empty as well
return true;
}
removeChild(at);
index.getPageStore().update(this);
return false;
}
@Override
void freeRecursive() {
index.getPageStore().logUndo(this, data);
index.getPageStore().free(getPos());
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
index.getPage(child, getPos()).freeRecursive();
}
}
@Override
Row getRowWithKey(long key) {
int at = find(key);
PageData page = index.getPage(childPageIds[at], getPos());
return page.getRowWithKey(key);
}
@Override
int getRowCount() {
if (rowCount == UNKNOWN_ROWCOUNT) {
int count = 0;
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
PageData page = index.getPage(child, getPos());
if (getPos() == page.getPos()) {
throw DbException.throwInternalError("Page is its own child: " + getPos());
}
count += page.getRowCount();
index.getDatabase().setProgress(DatabaseEventListener.STATE_SCAN_FILE,
index.getTable() + "." + index.getName(), count, Integer.MAX_VALUE);
}
rowCount = count;
}
return rowCount;
}
@Override
long getDiskSpaceUsed() {
long count = 0;
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
PageData page = index.getPage(child, getPos());
if (getPos() == page.getPos()) {
throw DbException.throwInternalError("Page is its own child: " + getPos());
}
count += page.getDiskSpaceUsed();
index.getDatabase().setProgress(DatabaseEventListener.STATE_SCAN_FILE,
index.getTable() + "." + index.getName(),
(int) (count >> 16), Integer.MAX_VALUE);
}
return count;
}
@Override
void setRowCountStored(int rowCount) {
this.rowCount = rowCount;
if (rowCountStored != rowCount) {
rowCountStored = rowCount;
index.getPageStore().logUndo(this, data);
if (written) {
changeCount = index.getPageStore().getChangeCount();
writeHead();
}
index.getPageStore().update(this);
}
}
private void check() {
if (SysProperties.CHECK) {
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
if (child == 0) {
DbException.throwInternalError();
}
}
}
}
@Override
public void write() {
writeData();
index.getPageStore().writePage(getPos(), data);
}
private void writeHead() {
data.reset();
data.writeByte((byte) Page.TYPE_DATA_NODE);
data.writeShortInt(0);
if (SysProperties.CHECK2) {
if (data.length() != START_PARENT) {
DbException.throwInternalError();
}
}
data.writeInt(parentPageId);
data.writeVarInt(index.getId());
data.writeInt(rowCountStored);
data.writeShortInt(entryCount);
}
private void writeData() {
if (written) {
return;
}
check();
writeHead();
data.writeInt(childPageIds[entryCount]);
for (int i = 0; i < entryCount; i++) {
data.writeInt(childPageIds[i]);
data.writeVarLong(keys[i]);
}
if (length != data.length()) {
DbException.throwInternalError("expected pos: " + length +
" got: " + data.length());
}
written = true;
}
private void removeChild(int i) {
index.getPageStore().logUndo(this, data);
written = false;
changeCount = index.getPageStore().getChangeCount();
int removedKeyIndex = i < entryCount ? i : i - 1;
entryCount--;
length -= 4 + Data.getVarLongLen(keys[removedKeyIndex]);
if (entryCount < 0) {
DbException.throwInternalError("" + entryCount);
}
keys = remove(keys, entryCount + 1, removedKeyIndex);
childPageIds = remove(childPageIds, entryCount + 2, i);
}
@Override
public String toString() {
return "page[" + getPos() + "] data node table:" + index.getId() +
" entries:" + entryCount + " " + Arrays.toString(childPageIds);
}
@Override
public void moveTo(Session session, int newPos) {
PageStore store = index.getPageStore();
// load the pages into the cache, to ensure old pages
// are written
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
store.getPage(child);
}
if (parentPageId != ROOT) {
store.getPage(parentPageId);
}
store.logUndo(this, data);
PageDataNode p2 = PageDataNode.create(index, newPos, parentPageId);
p2.rowCountStored = rowCountStored;
p2.rowCount = rowCount;
p2.childPageIds = childPageIds;
p2.keys = keys;
p2.entryCount = entryCount;
p2.length = length;
store.update(p2);
if (parentPageId == ROOT) {
index.setRootPageId(session, newPos);
} else {
PageDataNode p = (PageDataNode) store.getPage(parentPageId);
p.moveChild(getPos(), newPos);
}
for (int i = 0; i < entryCount + 1; i++) {
int child = childPageIds[i];
PageData p = (PageData) store.getPage(child);
p.setParentPageId(newPos);
store.update(p);
}
store.free(getPos());
}
/**
* One of the children has moved to another page.
*
* @param oldPos the old position
* @param newPos the new position
*/
void moveChild(int oldPos, int newPos) {
for (int i = 0; i < entryCount + 1; i++) {
if (childPageIds[i] == oldPos) {
index.getPageStore().logUndo(this, data);
written = false;
changeCount = index.getPageStore().getChangeCount();
childPageIds[i] = newPos;
index.getPageStore().update(this);
return;
}
}
throw DbException.throwInternalError(oldPos + " " + newPos);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageDataOverflow.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.store.Data;
import org.h2.store.Page;
import org.h2.store.PageStore;
/**
* Overflow data for a leaf page. Format:
* <ul>
* <li>page type: byte (0)</li>
* <li>checksum: short (1-2)</li>
* <li>parent page id (0 for root): int (3-6)</li>
* <li>more data: next overflow page id: int (7-10)</li>
* <li>last remaining size: short (7-8)</li>
* <li>data (11-/9-)</li>
* </ul>
*/
public class PageDataOverflow extends Page {
/**
* The start of the data in the last overflow page.
*/
static final int START_LAST = 9;
/**
* The start of the data in a overflow page that is not the last one.
*/
static final int START_MORE = 11;
private static final int START_NEXT_OVERFLOW = 7;
/**
* The page store.
*/
private final PageStore store;
/**
* The page type.
*/
private int type;
/**
* The parent page (overflow or leaf).
*/
private int parentPageId;
/**
* The next overflow page, or 0.
*/
private int nextPage;
private final Data data;
private int start;
private int size;
/**
* Create an object from the given data page.
*
* @param store the page store
* @param pageId the page id
* @param data the data page
*/
private PageDataOverflow(PageStore store, int pageId, Data data) {
this.store = store;
setPos(pageId);
this.data = data;
}
/**
* Read an overflow page.
*
* @param store the page store
* @param data the data
* @param pageId the page id
* @return the page
*/
public static Page read(PageStore store, Data data, int pageId) {
PageDataOverflow p = new PageDataOverflow(store, pageId, data);
p.read();
return p;
}
/**
* Create a new overflow page.
*
* @param store the page store
* @param page the page id
* @param type the page type
* @param parentPageId the parent page id
* @param next the next page or 0
* @param all the data
* @param offset the offset within the data
* @param size the number of bytes
* @return the page
*/
static PageDataOverflow create(PageStore store, int page,
int type, int parentPageId, int next,
Data all, int offset, int size) {
Data data = store.createData();
PageDataOverflow p = new PageDataOverflow(store, page, data);
store.logUndo(p, null);
data.writeByte((byte) type);
data.writeShortInt(0);
data.writeInt(parentPageId);
if (type == Page.TYPE_DATA_OVERFLOW) {
data.writeInt(next);
} else {
data.writeShortInt(size);
}
p.start = data.length();
data.write(all.getBytes(), offset, size);
p.type = type;
p.parentPageId = parentPageId;
p.nextPage = next;
p.size = size;
return p;
}
/**
* Read the page.
*/
private void read() {
data.reset();
type = data.readByte();
data.readShortInt();
parentPageId = data.readInt();
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
size = data.readShortInt();
nextPage = 0;
} else if (type == Page.TYPE_DATA_OVERFLOW) {
nextPage = data.readInt();
size = store.getPageSize() - data.length();
} else {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" +
getPos() + " type:" + type);
}
start = data.length();
}
/**
* Read the data into a target buffer.
*
* @param target the target data page
* @return the next page, or 0 if no next page
*/
int readInto(Data target) {
target.checkCapacity(size);
if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) {
target.write(data.getBytes(), START_LAST, size);
return 0;
}
target.write(data.getBytes(), START_MORE, size);
return nextPage;
}
int getNextOverflow() {
return nextPage;
}
private void writeHead() {
data.writeByte((byte) type);
data.writeShortInt(0);
data.writeInt(parentPageId);
}
@Override
public void write() {
writeData();
store.writePage(getPos(), data);
}
private void writeData() {
data.reset();
writeHead();
if (type == Page.TYPE_DATA_OVERFLOW) {
data.writeInt(nextPage);
} else {
data.writeShortInt(size);
}
}
@Override
public String toString() {
return "page[" + getPos() + "] data leaf overflow parent:" +
parentPageId + " next:" + nextPage;
}
/**
* Get the estimated memory size.
*
* @return number of double words (4 bytes)
*/
@Override
public int getMemory() {
return (Constants.MEMORY_PAGE_DATA_OVERFLOW + store.getPageSize()) >> 2;
}
void setParentPageId(int parent) {
store.logUndo(this, data);
this.parentPageId = parent;
}
@Override
public void moveTo(Session session, int newPos) {
// load the pages into the cache, to ensure old pages
// are written
Page parent = store.getPage(parentPageId);
if (parent == null) {
throw DbException.throwInternalError();
}
PageDataOverflow next = null;
if (nextPage != 0) {
next = (PageDataOverflow) store.getPage(nextPage);
}
store.logUndo(this, data);
PageDataOverflow p2 = PageDataOverflow.create(store, newPos, type,
parentPageId, nextPage, data, start, size);
store.update(p2);
if (next != null) {
next.setParentPageId(newPos);
store.update(next);
}
if (parent instanceof PageDataOverflow) {
PageDataOverflow p1 = (PageDataOverflow) parent;
p1.setNext(getPos(), newPos);
} else {
PageDataLeaf p1 = (PageDataLeaf) parent;
p1.setOverflow(getPos(), newPos);
}
store.update(parent);
store.free(getPos());
}
private void setNext(int old, int nextPage) {
if (SysProperties.CHECK && old != this.nextPage) {
DbException.throwInternalError("move " + this + " " + nextPage);
}
store.logUndo(this, data);
this.nextPage = nextPage;
data.setInt(START_NEXT_OVERFLOW, nextPage);
}
/**
* Free this page.
*/
void free() {
store.logUndo(this, data);
store.free(getPos());
}
@Override
public boolean canRemove() {
return true;
}
@Override
public boolean isStream() {
return true;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageDelegateIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.HashSet;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.store.PageStore;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.RegularTable;
import org.h2.table.TableFilter;
/**
* An index that delegates indexing to the page data index.
*/
public class PageDelegateIndex extends PageIndex {
private final PageDataIndex mainIndex;
public PageDelegateIndex(RegularTable table, int id, String name,
IndexType indexType, PageDataIndex mainIndex, boolean create,
Session session) {
IndexColumn[] cols = IndexColumn.wrap(
new Column[] { table.getColumn(mainIndex.getMainIndexColumn())});
this.initBaseIndex(table, id, name, cols, indexType);
this.mainIndex = mainIndex;
if (!database.isPersistent() || id < 0) {
throw DbException.throwInternalError("" + name);
}
PageStore store = database.getPageStore();
store.addIndex(this);
if (create) {
store.addMeta(this, session);
}
}
@Override
public void add(Session session, Row row) {
// nothing to do
}
@Override
public boolean canFindNext() {
return false;
}
@Override
public boolean canGetFirstOrLast() {
return true;
}
@Override
public void close(Session session) {
// nothing to do
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
long min = mainIndex.getKey(first, Long.MIN_VALUE, Long.MIN_VALUE);
// ifNull is MIN_VALUE as well, because the column is never NULL
// so avoid returning all rows (returning one row is OK)
long max = mainIndex.getKey(last, Long.MAX_VALUE, Long.MIN_VALUE);
return mainIndex.find(session, min, max, false);
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
Cursor cursor;
if (first) {
cursor = mainIndex.find(session, Long.MIN_VALUE, Long.MAX_VALUE, false);
} else {
long x = mainIndex.getLastKey();
cursor = mainIndex.find(session, x, x, false);
}
cursor.next();
return cursor;
}
@Override
public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) {
throw DbException.throwInternalError(toString());
}
@Override
public int getColumnIndex(Column col) {
if (col.getColumnId() == mainIndex.getMainIndexColumn()) {
return 0;
}
return -1;
}
@Override
public boolean isFirstColumn(Column column) {
return getColumnIndex(column) == 0;
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return 10 * getCostRangeIndex(masks, mainIndex.getRowCount(session),
filters, filter, sortOrder, false, allColumnsSet);
}
@Override
public boolean needRebuild() {
return false;
}
@Override
public void remove(Session session, Row row) {
// nothing to do
}
@Override
public void remove(Session session) {
mainIndex.setMainIndexColumn(-1);
session.getDatabase().getPageStore().removeMeta(this, session);
}
@Override
public void truncate(Session session) {
// nothing to do
}
@Override
public void checkRename() {
// ok
}
@Override
public long getRowCount(Session session) {
return mainIndex.getRowCount(session);
}
@Override
public long getRowCountApproximation() {
return mainIndex.getRowCountApproximation();
}
@Override
public long getDiskSpaceUsed() {
return mainIndex.getDiskSpaceUsed();
}
@Override
public void writeRowCount() {
// ignore
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/PageIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
/**
* A page store index.
*/
public abstract class PageIndex extends BaseIndex {
/**
* The root page of this index.
*/
protected int rootPageId;
private boolean sortedInsertMode;
/**
* Get the root page of this index.
*
* @return the root page id
*/
public int getRootPageId() {
return rootPageId;
}
/**
* Write back the row count if it has changed.
*/
public abstract void writeRowCount();
@Override
public void setSortedInsertMode(boolean sortedInsertMode) {
this.sortedInsertMode = sortedInsertMode;
}
boolean isSortedInsertMode() {
return sortedInsertMode;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/RangeCursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.value.Value;
import org.h2.value.ValueLong;
/**
* The cursor implementation for the range index.
*/
class RangeCursor implements Cursor {
private final Session session;
private boolean beforeFirst;
private long current;
private Row currentRow;
private final long start, end, step;
RangeCursor(Session session, long start, long end) {
this(session, start, end, 1);
}
RangeCursor(Session session, long start, long end, long step) {
this.session = session;
this.start = start;
this.end = end;
this.step = step;
beforeFirst = true;
}
@Override
public Row get() {
return currentRow;
}
@Override
public SearchRow getSearchRow() {
return currentRow;
}
@Override
public boolean next() {
if (beforeFirst) {
beforeFirst = false;
current = start;
} else {
current += step;
}
currentRow = session.createRow(new Value[]{ValueLong.get(current)}, 1);
return step > 0 ? current <= end : current >= end;
}
@Override
public boolean previous() {
throw DbException.throwInternalError(toString());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/RangeIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.HashSet;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.RangeTable;
import org.h2.table.TableFilter;
/**
* An index for the SYSTEM_RANGE table.
* This index can only scan through all rows, search is not supported.
*/
public class RangeIndex extends BaseIndex {
private final RangeTable rangeTable;
public RangeIndex(RangeTable table, IndexColumn[] columns) {
initBaseIndex(table, 0, "RANGE_INDEX", columns,
IndexType.createNonUnique(true));
this.rangeTable = table;
}
@Override
public void close(Session session) {
// nothing to do
}
@Override
public void add(Session session, Row row) {
throw DbException.getUnsupportedException("SYSTEM_RANGE");
}
@Override
public void remove(Session session, Row row) {
throw DbException.getUnsupportedException("SYSTEM_RANGE");
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
long min = rangeTable.getMin(session), start = min;
long max = rangeTable.getMax(session), end = max;
long step = rangeTable.getStep(session);
try {
start = Math.max(min, first == null ? min : first.getValue(0).getLong());
} catch (Exception e) {
// error when converting the value - ignore
}
try {
end = Math.min(max, last == null ? max : last.getValue(0).getLong());
} catch (Exception e) {
// error when converting the value - ignore
}
return new RangeCursor(session, start, end, step);
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return 1;
}
@Override
public String getCreateSQL() {
return null;
}
@Override
public void remove(Session session) {
throw DbException.getUnsupportedException("SYSTEM_RANGE");
}
@Override
public void truncate(Session session) {
throw DbException.getUnsupportedException("SYSTEM_RANGE");
}
@Override
public boolean needRebuild() {
return false;
}
@Override
public void checkRename() {
throw DbException.getUnsupportedException("SYSTEM_RANGE");
}
@Override
public boolean canGetFirstOrLast() {
return true;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
long pos = first ? rangeTable.getMin(session) : rangeTable.getMax(session);
return new RangeCursor(session, pos, pos);
}
@Override
public long getRowCount(Session session) {
return rangeTable.getRowCountApproximation();
}
@Override
public long getRowCountApproximation() {
return rangeTable.getRowCountApproximation();
}
@Override
public long getDiskSpaceUsed() {
return 0;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/ScanCursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.Iterator;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
/**
* The cursor implementation for the scan index.
*/
public class ScanCursor implements Cursor {
private final ScanIndex scan;
private Row row;
private final Session session;
private final boolean multiVersion;
private Iterator<Row> delta;
ScanCursor(Session session, ScanIndex scan, boolean multiVersion) {
this.session = session;
this.scan = scan;
this.multiVersion = multiVersion;
if (multiVersion) {
delta = scan.getDelta();
}
row = null;
}
@Override
public Row get() {
return row;
}
@Override
public SearchRow getSearchRow() {
return row;
}
@Override
public boolean next() {
if (multiVersion) {
while (true) {
if (delta != null) {
if (!delta.hasNext()) {
delta = null;
row = null;
continue;
}
row = delta.next();
if (!row.isDeleted() || row.getSessionId() == session.getId()) {
continue;
}
} else {
row = scan.getNextRow(row);
if (row != null && row.getSessionId() != 0 &&
row.getSessionId() != session.getId()) {
continue;
}
}
break;
}
return row != null;
}
row = scan.getNextRow(row);
return row != null;
}
@Override
public boolean previous() {
throw DbException.throwInternalError(toString());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/ScanIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.engine.UndoLogRecord;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.RegularTable;
import org.h2.table.TableFilter;
import org.h2.util.New;
/**
* The scan index is not really an 'index' in the strict sense, because it can
* not be used for direct lookup. It can only be used to iterate over all rows
* of a table. Each regular table has one such object, even if no primary key or
* indexes are defined.
*/
public class ScanIndex extends BaseIndex {
private long firstFree = -1;
private ArrayList<Row> rows = New.arrayList();
private final RegularTable tableData;
private int rowCountDiff;
private final HashMap<Integer, Integer> sessionRowCount;
private HashSet<Row> delta;
private long rowCount;
public ScanIndex(RegularTable table, int id, IndexColumn[] columns,
IndexType indexType) {
initBaseIndex(table, id, table.getName() + "_DATA", columns, indexType);
if (database.isMultiVersion()) {
sessionRowCount = new HashMap<>();
} else {
sessionRowCount = null;
}
tableData = table;
}
@Override
public void remove(Session session) {
truncate(session);
}
@Override
public void truncate(Session session) {
rows = New.arrayList();
firstFree = -1;
if (tableData.getContainsLargeObject() && tableData.isPersistData()) {
database.getLobStorage().removeAllForTable(table.getId());
}
tableData.setRowCount(0);
rowCount = 0;
rowCountDiff = 0;
if (database.isMultiVersion()) {
sessionRowCount.clear();
}
}
@Override
public String getCreateSQL() {
return null;
}
@Override
public void close(Session session) {
// nothing to do
}
@Override
public Row getRow(Session session, long key) {
return rows.get((int) key);
}
@Override
public void add(Session session, Row row) {
// in-memory
if (firstFree == -1) {
int key = rows.size();
row.setKey(key);
rows.add(row);
} else {
long key = firstFree;
Row free = rows.get((int) key);
firstFree = free.getKey();
row.setKey(key);
rows.set((int) key, row);
}
row.setDeleted(false);
if (database.isMultiVersion()) {
if (delta == null) {
delta = new HashSet<>();
}
boolean wasDeleted = delta.remove(row);
if (!wasDeleted) {
delta.add(row);
}
incrementRowCount(session.getId(), 1);
}
rowCount++;
}
@Override
public void commit(int operation, Row row) {
if (database.isMultiVersion()) {
if (delta != null) {
delta.remove(row);
}
incrementRowCount(row.getSessionId(),
operation == UndoLogRecord.DELETE ? 1 : -1);
}
}
private void incrementRowCount(int sessionId, int count) {
if (database.isMultiVersion()) {
Integer id = sessionId;
Integer c = sessionRowCount.get(id);
int current = c == null ? 0 : c.intValue();
sessionRowCount.put(id, current + count);
rowCountDiff += count;
}
}
@Override
public void remove(Session session, Row row) {
// in-memory
if (!database.isMultiVersion() && rowCount == 1) {
rows = New.arrayList();
firstFree = -1;
} else {
Row free = session.createRow(null, 1);
free.setKey(firstFree);
long key = row.getKey();
if (rows.size() <= key) {
throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1,
rows.size() + ": " + key);
}
rows.set((int) key, free);
firstFree = key;
}
if (database.isMultiVersion()) {
// if storage is null, the delete flag is not yet set
row.setDeleted(true);
if (delta == null) {
delta = new HashSet<>();
}
boolean wasAdded = delta.remove(row);
if (!wasAdded) {
delta.add(row);
}
incrementRowCount(session.getId(), -1);
}
rowCount--;
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
return new ScanCursor(session, this, database.isMultiVersion());
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return tableData.getRowCountApproximation() + Constants.COST_ROW_OFFSET;
}
@Override
public long getRowCount(Session session) {
if (database.isMultiVersion()) {
Integer i = sessionRowCount.get(session.getId());
long count = i == null ? 0 : i.intValue();
count += rowCount;
count -= rowCountDiff;
return count;
}
return rowCount;
}
/**
* Get the next row that is stored after this row.
*
* @param row the current row or null to start the scan
* @return the next row or null if there are no more rows
*/
Row getNextRow(Row row) {
long key;
if (row == null) {
key = -1;
} else {
key = row.getKey();
}
while (true) {
key++;
if (key >= rows.size()) {
return null;
}
row = rows.get((int) key);
if (!row.isEmpty()) {
return row;
}
}
}
@Override
public int getColumnIndex(Column col) {
// the scan index cannot use any columns
return -1;
}
@Override
public boolean isFirstColumn(Column column) {
return false;
}
@Override
public void checkRename() {
throw DbException.getUnsupportedException("SCAN");
}
@Override
public boolean needRebuild() {
return false;
}
@Override
public boolean canGetFirstOrLast() {
return false;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
throw DbException.getUnsupportedException("SCAN");
}
Iterator<Row> getDelta() {
if (delta == null) {
List<Row> e = Collections.emptyList();
return e.iterator();
}
return delta.iterator();
}
@Override
public long getRowCountApproximation() {
return rowCount;
}
@Override
public long getDiskSpaceUsed() {
return 0;
}
@Override
public String getPlanSQL() {
return table.getSQL() + ".tableScan";
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/SingleRowCursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
/**
* A cursor with at most one row.
*/
public class SingleRowCursor implements Cursor {
private Row row;
private boolean end;
/**
* Create a new cursor.
*
* @param row - the single row (if null then cursor is empty)
*/
public SingleRowCursor(Row row) {
this.row = row;
}
@Override
public Row get() {
return row;
}
@Override
public SearchRow getSearchRow() {
return row;
}
@Override
public boolean next() {
if (row == null || end) {
row = null;
return false;
}
end = true;
return true;
}
@Override
public boolean previous() {
throw DbException.throwInternalError(toString());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/SpatialIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.result.SearchRow;
import org.h2.table.TableFilter;
/**
* A spatial index. Spatial indexes are used to speed up searching
* spatial/geometric data.
*/
public interface SpatialIndex extends Index {
/**
* Find a row or a list of rows and create a cursor to iterate over the
* result.
*
* @param filter the table filter (which possibly knows about additional
* conditions)
* @param first the lower bound
* @param last the upper bound
* @param intersection the geometry which values should intersect with, or
* null for anything
* @return the cursor to iterate over the results
*/
Cursor findByGeometry(TableFilter filter, SearchRow first, SearchRow last,
SearchRow intersection);
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/SpatialTreeIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.HashSet;
import java.util.Iterator;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.mvstore.MVStore;
import org.h2.mvstore.db.MVTableEngine;
import org.h2.mvstore.rtree.MVRTreeMap;
import org.h2.mvstore.rtree.SpatialKey;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.Table;
import org.h2.table.TableFilter;
import org.h2.value.Value;
import org.h2.value.ValueGeometry;
import org.h2.value.ValueNull;
import org.locationtech.jts.geom.Envelope;
import org.locationtech.jts.geom.Geometry;
/**
* This is an index based on a MVR-TreeMap.
*
* @author Thomas Mueller
* @author Noel Grandin
* @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888
*/
public class SpatialTreeIndex extends BaseIndex implements SpatialIndex {
private static final String MAP_PREFIX = "RTREE_";
private final MVRTreeMap<Long> treeMap;
private final MVStore store;
private boolean closed;
private boolean needRebuild;
/**
* Constructor.
*
* @param table the table instance
* @param id the index id
* @param indexName the index name
* @param columns the indexed columns (only one geometry column allowed)
* @param persistent whether the index should be persisted
* @param indexType the index type (only spatial index)
* @param create whether to create a new index
* @param session the session.
*/
public SpatialTreeIndex(Table table, int id, String indexName,
IndexColumn[] columns, IndexType indexType, boolean persistent,
boolean create, Session session) {
if (indexType.isUnique()) {
throw DbException.getUnsupportedException("not unique");
}
if (!persistent && !create) {
throw DbException.getUnsupportedException(
"Non persistent index called with create==false");
}
if (columns.length > 1) {
throw DbException.getUnsupportedException(
"can only do one column");
}
if ((columns[0].sortType & SortOrder.DESCENDING) != 0) {
throw DbException.getUnsupportedException(
"cannot do descending");
}
if ((columns[0].sortType & SortOrder.NULLS_FIRST) != 0) {
throw DbException.getUnsupportedException(
"cannot do nulls first");
}
if ((columns[0].sortType & SortOrder.NULLS_LAST) != 0) {
throw DbException.getUnsupportedException(
"cannot do nulls last");
}
initBaseIndex(table, id, indexName, columns, indexType);
this.needRebuild = create;
this.table = table;
if (!database.isStarting()) {
if (columns[0].column.getType() != Value.GEOMETRY) {
throw DbException.getUnsupportedException(
"spatial index on non-geometry column, " +
columns[0].column.getCreateSQL());
}
}
if (!persistent) {
// Index in memory
store = MVStore.open(null);
treeMap = store.openMap("spatialIndex",
new MVRTreeMap.Builder<Long>());
} else {
if (id < 0) {
throw DbException.getUnsupportedException(
"Persistent index with id<0");
}
MVTableEngine.init(session.getDatabase());
store = session.getDatabase().getMvStore().getStore();
// Called after CREATE SPATIAL INDEX or
// by PageStore.addMeta
treeMap = store.openMap(MAP_PREFIX + getId(),
new MVRTreeMap.Builder<Long>());
if (treeMap.isEmpty()) {
needRebuild = true;
}
}
}
@Override
public void close(Session session) {
store.close();
closed = true;
}
@Override
public void add(Session session, Row row) {
if (closed) {
throw DbException.throwInternalError();
}
treeMap.add(getKey(row), row.getKey());
}
private SpatialKey getKey(SearchRow row) {
if (row == null) {
return null;
}
Value v = row.getValue(columnIds[0]);
if (v == ValueNull.INSTANCE) {
return null;
}
Geometry g = ((ValueGeometry) v.convertTo(Value.GEOMETRY)).getGeometryNoCopy();
Envelope env = g.getEnvelopeInternal();
return new SpatialKey(row.getKey(),
(float) env.getMinX(), (float) env.getMaxX(),
(float) env.getMinY(), (float) env.getMaxY());
}
@Override
public void remove(Session session, Row row) {
if (closed) {
throw DbException.throwInternalError();
}
if (!treeMap.remove(getKey(row), row.getKey())) {
throw DbException.throwInternalError("row not found");
}
}
@Override
public Cursor find(TableFilter filter, SearchRow first, SearchRow last) {
return find(filter.getSession());
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
return find(session);
}
private Cursor find(Session session) {
return new SpatialCursor(treeMap.keySet().iterator(), table, session);
}
@Override
public Cursor findByGeometry(TableFilter filter, SearchRow first,
SearchRow last, SearchRow intersection) {
if (intersection == null) {
return find(filter.getSession(), first, last);
}
return new SpatialCursor(
treeMap.findIntersectingKeys(getKey(intersection)), table,
filter.getSession());
}
/**
* Compute spatial index cost
* @param masks Search mask
* @param columns Table columns
* @return Index cost hint
*/
public static long getCostRangeIndex(int[] masks, Column[] columns) {
// Never use spatial tree index without spatial filter
if (columns.length == 0) {
return Long.MAX_VALUE;
}
for (Column column : columns) {
int index = column.getColumnId();
int mask = masks[index];
if ((mask & IndexCondition.SPATIAL_INTERSECTS) != IndexCondition.SPATIAL_INTERSECTS) {
return Long.MAX_VALUE;
}
}
return 2;
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return getCostRangeIndex(masks, columns);
}
@Override
public void remove(Session session) {
if (!treeMap.isClosed()) {
store.removeMap(treeMap);
}
}
@Override
public void truncate(Session session) {
treeMap.clear();
}
@Override
public void checkRename() {
// nothing to do
}
@Override
public boolean needRebuild() {
return needRebuild;
}
@Override
public boolean canGetFirstOrLast() {
return true;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
if (closed) {
throw DbException.throwInternalError(toString());
}
if (!first) {
throw DbException.throwInternalError(
"Spatial Index can only be fetch by ascending order");
}
return find(session);
}
@Override
public long getRowCount(Session session) {
return treeMap.sizeAsLong();
}
@Override
public long getRowCountApproximation() {
return treeMap.sizeAsLong();
}
@Override
public long getDiskSpaceUsed() {
// TODO estimate disk space usage
return 0;
}
/**
* A cursor to iterate over spatial keys.
*/
private static final class SpatialCursor implements Cursor {
private final Iterator<SpatialKey> it;
private SpatialKey current;
private final Table table;
private Session session;
public SpatialCursor(Iterator<SpatialKey> it, Table table, Session session) {
this.it = it;
this.table = table;
this.session = session;
}
@Override
public Row get() {
return table.getRow(session, current.getId());
}
@Override
public SearchRow getSearchRow() {
return get();
}
@Override
public boolean next() {
if (!it.hasNext()) {
return false;
}
current = it.next();
return true;
}
@Override
public boolean previous() {
return false;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/TreeCursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.result.Row;
import org.h2.result.SearchRow;
/**
* The cursor implementation for a tree index.
*/
public class TreeCursor implements Cursor {
private final TreeIndex tree;
private TreeNode node;
private boolean beforeFirst;
private final SearchRow first, last;
TreeCursor(TreeIndex tree, TreeNode node, SearchRow first, SearchRow last) {
this.tree = tree;
this.node = node;
this.first = first;
this.last = last;
beforeFirst = true;
}
@Override
public Row get() {
return node == null ? null : node.row;
}
@Override
public SearchRow getSearchRow() {
return get();
}
@Override
public boolean next() {
if (beforeFirst) {
beforeFirst = false;
if (node == null) {
return false;
}
if (first != null && tree.compareRows(node.row, first) < 0) {
node = next(node);
}
} else {
node = next(node);
}
if (node != null && last != null) {
if (tree.compareRows(node.row, last) > 0) {
node = null;
}
}
return node != null;
}
@Override
public boolean previous() {
node = previous(node);
return node != null;
}
/**
* Get the next node if there is one.
*
* @param x the node
* @return the next node or null
*/
private static TreeNode next(TreeNode x) {
if (x == null) {
return null;
}
TreeNode r = x.right;
if (r != null) {
x = r;
TreeNode l = x.left;
while (l != null) {
x = l;
l = x.left;
}
return x;
}
TreeNode ch = x;
x = x.parent;
while (x != null && ch == x.right) {
ch = x;
x = x.parent;
}
return x;
}
/**
* Get the previous node if there is one.
*
* @param x the node
* @return the previous node or null
*/
private static TreeNode previous(TreeNode x) {
if (x == null) {
return null;
}
TreeNode l = x.left;
if (l != null) {
x = l;
TreeNode r = x.right;
while (r != null) {
x = r;
r = x.right;
}
return x;
}
TreeNode ch = x;
x = x.parent;
while (x != null && ch == x.left) {
ch = x;
x = x.parent;
}
return x;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/TreeIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.HashSet;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.RegularTable;
import org.h2.table.TableFilter;
import org.h2.value.Value;
import org.h2.value.ValueNull;
/**
* The tree index is an in-memory index based on a binary AVL trees.
*/
public class TreeIndex extends BaseIndex {
private TreeNode root;
private final RegularTable tableData;
private long rowCount;
private boolean closed;
public TreeIndex(RegularTable table, int id, String indexName,
IndexColumn[] columns, IndexType indexType) {
initBaseIndex(table, id, indexName, columns, indexType);
tableData = table;
if (!database.isStarting()) {
checkIndexColumnTypes(columns);
}
}
@Override
public void close(Session session) {
root = null;
closed = true;
}
@Override
public void add(Session session, Row row) {
if (closed) {
throw DbException.throwInternalError();
}
TreeNode i = new TreeNode(row);
TreeNode n = root, x = n;
boolean isLeft = true;
while (true) {
if (n == null) {
if (x == null) {
root = i;
rowCount++;
return;
}
set(x, isLeft, i);
break;
}
Row r = n.row;
int compare = compareRows(row, r);
if (compare == 0) {
if (indexType.isUnique()) {
if (!mayHaveNullDuplicates(row)) {
throw getDuplicateKeyException(row.toString());
}
}
compare = compareKeys(row, r);
}
isLeft = compare < 0;
x = n;
n = child(x, isLeft);
}
balance(x, isLeft);
rowCount++;
}
private void balance(TreeNode x, boolean isLeft) {
while (true) {
int sign = isLeft ? 1 : -1;
switch (x.balance * sign) {
case 1:
x.balance = 0;
return;
case 0:
x.balance = -sign;
break;
case -1:
TreeNode l = child(x, isLeft);
if (l.balance == -sign) {
replace(x, l);
set(x, isLeft, child(l, !isLeft));
set(l, !isLeft, x);
x.balance = 0;
l.balance = 0;
} else {
TreeNode r = child(l, !isLeft);
replace(x, r);
set(l, !isLeft, child(r, isLeft));
set(r, isLeft, l);
set(x, isLeft, child(r, !isLeft));
set(r, !isLeft, x);
int rb = r.balance;
x.balance = (rb == -sign) ? sign : 0;
l.balance = (rb == sign) ? -sign : 0;
r.balance = 0;
}
return;
default:
DbException.throwInternalError("b:" + x.balance * sign);
}
if (x == root) {
return;
}
isLeft = x.isFromLeft();
x = x.parent;
}
}
private static TreeNode child(TreeNode x, boolean isLeft) {
return isLeft ? x.left : x.right;
}
private void replace(TreeNode x, TreeNode n) {
if (x == root) {
root = n;
if (n != null) {
n.parent = null;
}
} else {
set(x.parent, x.isFromLeft(), n);
}
}
private static void set(TreeNode parent, boolean left, TreeNode n) {
if (left) {
parent.left = n;
} else {
parent.right = n;
}
if (n != null) {
n.parent = parent;
}
}
@Override
public void remove(Session session, Row row) {
if (closed) {
throw DbException.throwInternalError();
}
TreeNode x = findFirstNode(row, true);
if (x == null) {
throw DbException.throwInternalError("not found!");
}
TreeNode n;
if (x.left == null) {
n = x.right;
} else if (x.right == null) {
n = x.left;
} else {
TreeNode d = x;
x = x.left;
for (TreeNode temp = x; (temp = temp.right) != null;) {
x = temp;
}
// x will be replaced with n later
n = x.left;
// swap d and x
int b = x.balance;
x.balance = d.balance;
d.balance = b;
// set x.parent
TreeNode xp = x.parent;
TreeNode dp = d.parent;
if (d == root) {
root = x;
}
x.parent = dp;
if (dp != null) {
if (dp.right == d) {
dp.right = x;
} else {
dp.left = x;
}
}
// TODO index / tree: link d.r = x(p?).r directly
if (xp == d) {
d.parent = x;
if (d.left == x) {
x.left = d;
x.right = d.right;
} else {
x.right = d;
x.left = d.left;
}
} else {
d.parent = xp;
xp.right = d;
x.right = d.right;
x.left = d.left;
}
if (SysProperties.CHECK && x.right == null) {
DbException.throwInternalError("tree corrupted");
}
x.right.parent = x;
x.left.parent = x;
// set d.left, d.right
d.left = n;
if (n != null) {
n.parent = d;
}
d.right = null;
x = d;
}
rowCount--;
boolean isLeft = x.isFromLeft();
replace(x, n);
n = x.parent;
while (n != null) {
x = n;
int sign = isLeft ? 1 : -1;
switch (x.balance * sign) {
case -1:
x.balance = 0;
break;
case 0:
x.balance = sign;
return;
case 1:
TreeNode r = child(x, !isLeft);
int b = r.balance;
if (b * sign >= 0) {
replace(x, r);
set(x, !isLeft, child(r, isLeft));
set(r, isLeft, x);
if (b == 0) {
x.balance = sign;
r.balance = -sign;
return;
}
x.balance = 0;
r.balance = 0;
x = r;
} else {
TreeNode l = child(r, isLeft);
replace(x, l);
b = l.balance;
set(r, isLeft, child(l, !isLeft));
set(l, !isLeft, r);
set(x, !isLeft, child(l, isLeft));
set(l, isLeft, x);
x.balance = (b == sign) ? -sign : 0;
r.balance = (b == -sign) ? sign : 0;
l.balance = 0;
x = l;
}
break;
default:
DbException.throwInternalError("b: " + x.balance * sign);
}
isLeft = x.isFromLeft();
n = x.parent;
}
}
private TreeNode findFirstNode(SearchRow row, boolean withKey) {
TreeNode x = root, result = x;
while (x != null) {
result = x;
int compare = compareRows(x.row, row);
if (compare == 0 && withKey) {
compare = compareKeys(x.row, row);
}
if (compare == 0) {
if (withKey) {
return x;
}
x = x.left;
} else if (compare > 0) {
x = x.left;
} else {
x = x.right;
}
}
return result;
}
@Override
public Cursor find(TableFilter filter, SearchRow first, SearchRow last) {
return find(first, last);
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
return find(first, last);
}
private Cursor find(SearchRow first, SearchRow last) {
if (first == null) {
TreeNode x = root, n;
while (x != null) {
n = x.left;
if (n == null) {
break;
}
x = n;
}
return new TreeCursor(this, x, null, last);
}
TreeNode x = findFirstNode(first, false);
return new TreeCursor(this, x, first, last);
}
@Override
public double getCost(Session session, int[] masks, TableFilter[] filters, int filter,
SortOrder sortOrder, HashSet<Column> allColumnsSet) {
return getCostRangeIndex(masks, tableData.getRowCountApproximation(),
filters, filter, sortOrder, false, allColumnsSet);
}
@Override
public void remove(Session session) {
truncate(session);
}
@Override
public void truncate(Session session) {
root = null;
rowCount = 0;
}
@Override
public void checkRename() {
// nothing to do
}
@Override
public boolean needRebuild() {
return true;
}
@Override
public boolean canGetFirstOrLast() {
return true;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
if (closed) {
throw DbException.throwInternalError(toString());
}
if (first) {
// TODO optimization: this loops through NULL
Cursor cursor = find(session, null, null);
while (cursor.next()) {
SearchRow row = cursor.getSearchRow();
Value v = row.getValue(columnIds[0]);
if (v != ValueNull.INSTANCE) {
return cursor;
}
}
return cursor;
}
TreeNode x = root, n;
while (x != null) {
n = x.right;
if (n == null) {
break;
}
x = n;
}
TreeCursor cursor = new TreeCursor(this, x, null, null);
if (x == null) {
return cursor;
}
// TODO optimization: this loops through NULL elements
do {
SearchRow row = cursor.getSearchRow();
if (row == null) {
break;
}
Value v = row.getValue(columnIds[0]);
if (v != ValueNull.INSTANCE) {
return cursor;
}
} while (cursor.previous());
return cursor;
}
@Override
public long getRowCount(Session session) {
return rowCount;
}
@Override
public long getRowCountApproximation() {
return rowCount;
}
@Override
public long getDiskSpaceUsed() {
return 0;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/TreeNode.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.result.Row;
/**
* Represents a index node of a tree index.
*/
class TreeNode {
/**
* The balance. For more information, see the AVL tree documentation.
*/
int balance;
/**
* The left child node or null.
*/
TreeNode left;
/**
* The right child node or null.
*/
TreeNode right;
/**
* The parent node or null if this is the root node.
*/
TreeNode parent;
/**
* The row.
*/
final Row row;
TreeNode(Row row) {
this.row = row;
}
/**
* Check if this node is the left child of its parent. This method returns
* true if this is the root node.
*
* @return true if this node is the root or a left child
*/
boolean isFromLeft() {
return parent == null || parent.left == this;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/ViewCursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import org.h2.message.DbException;
import org.h2.result.ResultInterface;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.table.Table;
import org.h2.value.Value;
import org.h2.value.ValueNull;
/**
* The cursor implementation of a view index.
*/
public class ViewCursor implements Cursor {
private final Table table;
private final ViewIndex index;
private final ResultInterface result;
private final SearchRow first, last;
private Row current;
public ViewCursor(ViewIndex index, ResultInterface result, SearchRow first,
SearchRow last) {
this.table = index.getTable();
this.index = index;
this.result = result;
this.first = first;
this.last = last;
}
@Override
public Row get() {
return current;
}
@Override
public SearchRow getSearchRow() {
return current;
}
@Override
public boolean next() {
while (true) {
boolean res = result.next();
if (!res) {
if (index.isRecursive()) {
result.reset();
} else {
result.close();
}
current = null;
return false;
}
current = table.getTemplateRow();
Value[] values = result.currentRow();
for (int i = 0, len = current.getColumnCount(); i < len; i++) {
Value v = i < values.length ? values[i] : ValueNull.INSTANCE;
current.setValue(i, v);
}
int comp;
if (first != null) {
comp = index.compareRows(current, first);
if (comp < 0) {
continue;
}
}
if (last != null) {
comp = index.compareRows(current, last);
if (comp > 0) {
continue;
}
}
return true;
}
}
@Override
public boolean previous() {
throw DbException.throwInternalError(toString());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/index/ViewIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.index;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.concurrent.TimeUnit;
import org.h2.api.ErrorCode;
import org.h2.command.Parser;
import org.h2.command.Prepared;
import org.h2.command.dml.Query;
import org.h2.command.dml.SelectUnion;
import org.h2.engine.Constants;
import org.h2.engine.Session;
import org.h2.expression.Comparison;
import org.h2.expression.Parameter;
import org.h2.message.DbException;
import org.h2.result.LocalResult;
import org.h2.result.ResultInterface;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.JoinBatch;
import org.h2.table.TableFilter;
import org.h2.table.TableView;
import org.h2.util.IntArray;
import org.h2.util.New;
import org.h2.value.Value;
/**
* This object represents a virtual index for a query.
* Actually it only represents a prepared SELECT statement.
*/
public class ViewIndex extends BaseIndex implements SpatialIndex {
private static final long MAX_AGE_NANOS =
TimeUnit.MILLISECONDS.toNanos(Constants.VIEW_COST_CACHE_MAX_AGE);
private final TableView view;
private final String querySQL;
private final ArrayList<Parameter> originalParameters;
private boolean recursive;
private final int[] indexMasks;
private Query query;
private final Session createSession;
/**
* The time in nanoseconds when this index (and its cost) was calculated.
*/
private final long evaluatedAt;
/**
* Constructor for the original index in {@link TableView}.
*
* @param view the table view
* @param querySQL the query SQL
* @param originalParameters the original parameters
* @param recursive if the view is recursive
*/
public ViewIndex(TableView view, String querySQL,
ArrayList<Parameter> originalParameters, boolean recursive) {
initBaseIndex(view, 0, null, null, IndexType.createNonUnique(false));
this.view = view;
this.querySQL = querySQL;
this.originalParameters = originalParameters;
this.recursive = recursive;
columns = new Column[0];
this.createSession = null;
this.indexMasks = null;
// this is a main index of TableView, it does not need eviction time
// stamp
evaluatedAt = Long.MIN_VALUE;
}
/**
* Constructor for plan item generation. Over this index the query will be
* executed.
*
* @param view the table view
* @param index the view index
* @param session the session
* @param masks the masks
* @param filters table filters
* @param filter current filter
* @param sortOrder sort order
*/
public ViewIndex(TableView view, ViewIndex index, Session session,
int[] masks, TableFilter[] filters, int filter, SortOrder sortOrder) {
initBaseIndex(view, 0, null, null, IndexType.createNonUnique(false));
this.view = view;
this.querySQL = index.querySQL;
this.originalParameters = index.originalParameters;
this.recursive = index.recursive;
this.indexMasks = masks;
this.createSession = session;
columns = new Column[0];
if (!recursive) {
query = getQuery(session, masks, filters, filter, sortOrder);
}
// we don't need eviction for recursive views since we can't calculate
// their cost if it is a sub-query we don't need eviction as well
// because the whole ViewIndex cache is getting dropped in
// Session.prepareLocal
evaluatedAt = recursive || view.getTopQuery() != null ? Long.MAX_VALUE : System.nanoTime();
}
@Override
public IndexLookupBatch createLookupBatch(TableFilter[] filters, int filter) {
if (recursive) {
// we do not support batching for recursive queries
return null;
}
return JoinBatch.createViewIndexLookupBatch(this);
}
public Session getSession() {
return createSession;
}
public boolean isExpired() {
assert evaluatedAt != Long.MIN_VALUE : "must not be called for main index of TableView";
return !recursive && view.getTopQuery() == null &&
System.nanoTime() - evaluatedAt > MAX_AGE_NANOS;
}
@Override
public String getPlanSQL() {
return query == null ? null : query.getPlanSQL();
}
@Override
public void close(Session session) {
// nothing to do
}
@Override
public void add(Session session, Row row) {
throw DbException.getUnsupportedException("VIEW");
}
@Override
public void remove(Session session, Row row) {
throw DbException.getUnsupportedException("VIEW");
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return recursive ? 1000 : query.getCost();
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
return find(session, first, last, null);
}
@Override
public Cursor findByGeometry(TableFilter filter, SearchRow first,
SearchRow last, SearchRow intersection) {
return find(filter.getSession(), first, last, intersection);
}
private static Query prepareSubQuery(String sql, Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder) {
Prepared p;
session.pushSubQueryInfo(masks, filters, filter, sortOrder);
try {
p = session.prepare(sql, true, true);
} finally {
session.popSubQueryInfo();
}
return (Query) p;
}
private Cursor findRecursive(SearchRow first, SearchRow last) {
assert recursive;
ResultInterface recursiveResult = view.getRecursiveResult();
if (recursiveResult != null) {
recursiveResult.reset();
return new ViewCursor(this, recursiveResult, first, last);
}
if (query == null) {
Parser parser = new Parser(createSession);
parser.setRightsChecked(true);
parser.setSuppliedParameterList(originalParameters);
query = (Query) parser.prepare(querySQL);
query.setNeverLazy(true);
}
if (!query.isUnion()) {
throw DbException.get(ErrorCode.SYNTAX_ERROR_2,
"recursive queries without UNION");
}
SelectUnion union = (SelectUnion) query;
Query left = union.getLeft();
left.setNeverLazy(true);
// to ensure the last result is not closed
left.disableCache();
ResultInterface resultInterface = left.query(0);
LocalResult localResult = union.getEmptyResult();
// ensure it is not written to disk,
// because it is not closed normally
localResult.setMaxMemoryRows(Integer.MAX_VALUE);
while (resultInterface.next()) {
Value[] cr = resultInterface.currentRow();
localResult.addRow(cr);
}
Query right = union.getRight();
right.setNeverLazy(true);
resultInterface.reset();
view.setRecursiveResult(resultInterface);
// to ensure the last result is not closed
right.disableCache();
while (true) {
resultInterface = right.query(0);
if (!resultInterface.hasNext()) {
break;
}
while (resultInterface.next()) {
Value[] cr = resultInterface.currentRow();
localResult.addRow(cr);
}
resultInterface.reset();
view.setRecursiveResult(resultInterface);
}
view.setRecursiveResult(null);
localResult.done();
return new ViewCursor(this, localResult, first, last);
}
/**
* Set the query parameters.
*
* @param session the session
* @param first the lower bound
* @param last the upper bound
* @param intersection the intersection
*/
public void setupQueryParameters(Session session, SearchRow first, SearchRow last,
SearchRow intersection) {
ArrayList<Parameter> paramList = query.getParameters();
if (originalParameters != null) {
for (Parameter orig : originalParameters) {
int idx = orig.getIndex();
Value value = orig.getValue(session);
setParameter(paramList, idx, value);
}
}
int len;
if (first != null) {
len = first.getColumnCount();
} else if (last != null) {
len = last.getColumnCount();
} else if (intersection != null) {
len = intersection.getColumnCount();
} else {
len = 0;
}
int idx = view.getParameterOffset(originalParameters);
for (int i = 0; i < len; i++) {
int mask = indexMasks[i];
if ((mask & IndexCondition.EQUALITY) != 0) {
setParameter(paramList, idx++, first.getValue(i));
}
if ((mask & IndexCondition.START) != 0) {
setParameter(paramList, idx++, first.getValue(i));
}
if ((mask & IndexCondition.END) != 0) {
setParameter(paramList, idx++, last.getValue(i));
}
if ((mask & IndexCondition.SPATIAL_INTERSECTS) != 0) {
setParameter(paramList, idx++, intersection.getValue(i));
}
}
}
private Cursor find(Session session, SearchRow first, SearchRow last,
SearchRow intersection) {
if (recursive) {
return findRecursive(first, last);
}
setupQueryParameters(session, first, last, intersection);
ResultInterface result = query.query(0);
return new ViewCursor(this, result, first, last);
}
private static void setParameter(ArrayList<Parameter> paramList, int x,
Value v) {
if (x >= paramList.size()) {
// the parameter may be optimized away as in
// select * from (select null as x) where x=1;
return;
}
Parameter param = paramList.get(x);
param.setValue(v);
}
public Query getQuery() {
return query;
}
private Query getQuery(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder) {
Query q = prepareSubQuery(querySQL, session, masks, filters, filter, sortOrder);
if (masks == null) {
return q;
}
if (!q.allowGlobalConditions()) {
return q;
}
int firstIndexParam = view.getParameterOffset(originalParameters);
// the column index of each parameter
// (for example: paramColumnIndex {0, 0} mean
// param[0] is column 0, and param[1] is also column 0)
IntArray paramColumnIndex = new IntArray();
int indexColumnCount = 0;
for (int i = 0; i < masks.length; i++) {
int mask = masks[i];
if (mask == 0) {
continue;
}
indexColumnCount++;
// the number of parameters depends on the mask;
// for range queries it is 2: >= x AND <= y
// but bitMask could also be 7 (=, and <=, and >=)
int bitCount = Integer.bitCount(mask);
for (int j = 0; j < bitCount; j++) {
paramColumnIndex.add(i);
}
}
int len = paramColumnIndex.size();
ArrayList<Column> columnList = New.arrayList();
for (int i = 0; i < len;) {
int idx = paramColumnIndex.get(i);
columnList.add(table.getColumn(idx));
int mask = masks[idx];
if ((mask & IndexCondition.EQUALITY) != 0) {
Parameter param = new Parameter(firstIndexParam + i);
q.addGlobalCondition(param, idx, Comparison.EQUAL_NULL_SAFE);
i++;
}
if ((mask & IndexCondition.START) != 0) {
Parameter param = new Parameter(firstIndexParam + i);
q.addGlobalCondition(param, idx, Comparison.BIGGER_EQUAL);
i++;
}
if ((mask & IndexCondition.END) != 0) {
Parameter param = new Parameter(firstIndexParam + i);
q.addGlobalCondition(param, idx, Comparison.SMALLER_EQUAL);
i++;
}
if ((mask & IndexCondition.SPATIAL_INTERSECTS) != 0) {
Parameter param = new Parameter(firstIndexParam + i);
q.addGlobalCondition(param, idx, Comparison.SPATIAL_INTERSECTS);
i++;
}
}
columns = columnList.toArray(new Column[0]);
// reconstruct the index columns from the masks
this.indexColumns = new IndexColumn[indexColumnCount];
this.columnIds = new int[indexColumnCount];
for (int type = 0, indexColumnId = 0; type < 2; type++) {
for (int i = 0; i < masks.length; i++) {
int mask = masks[i];
if (mask == 0) {
continue;
}
if (type == 0) {
if ((mask & IndexCondition.EQUALITY) == 0) {
// the first columns need to be equality conditions
continue;
}
} else {
if ((mask & IndexCondition.EQUALITY) != 0) {
// after that only range conditions
continue;
}
}
IndexColumn c = new IndexColumn();
c.column = table.getColumn(i);
indexColumns[indexColumnId] = c;
columnIds[indexColumnId] = c.column.getColumnId();
indexColumnId++;
}
}
String sql = q.getPlanSQL();
q = prepareSubQuery(sql, session, masks, filters, filter, sortOrder);
return q;
}
@Override
public void remove(Session session) {
throw DbException.getUnsupportedException("VIEW");
}
@Override
public void truncate(Session session) {
throw DbException.getUnsupportedException("VIEW");
}
@Override
public void checkRename() {
throw DbException.getUnsupportedException("VIEW");
}
@Override
public boolean needRebuild() {
return false;
}
@Override
public boolean canGetFirstOrLast() {
return false;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
throw DbException.getUnsupportedException("VIEW");
}
public void setRecursive(boolean value) {
this.recursive = value;
}
@Override
public long getRowCount(Session session) {
return 0;
}
@Override
public long getRowCountApproximation() {
return 0;
}
@Override
public long getDiskSpaceUsed() {
return 0;
}
public boolean isRecursive() {
return recursive;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcArray.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.sql.Array;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import java.util.Arrays;
import java.util.Map;
import org.h2.api.ErrorCode;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.tools.SimpleResultSet;
import org.h2.value.Value;
/**
* Represents an ARRAY value.
*/
public class JdbcArray extends TraceObject implements Array {
private Value value;
private final JdbcConnection conn;
/**
* INTERNAL
*/
public JdbcArray(JdbcConnection conn, Value value, int id) {
setTrace(conn.getSession().getTrace(), TraceObject.ARRAY, id);
this.conn = conn;
this.value = value;
}
/**
* Returns the value as a Java array.
* This method always returns an Object[].
*
* @return the Object array
*/
@Override
public Object getArray() throws SQLException {
try {
debugCodeCall("getArray");
checkClosed();
return get();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value as a Java array.
* This method always returns an Object[].
*
* @param map is ignored. Only empty or null maps are supported
* @return the Object array
*/
@Override
public Object getArray(Map<String, Class<?>> map) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getArray("+quoteMap(map)+");");
}
JdbcConnection.checkMap(map);
checkClosed();
return get();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value as a Java array. A subset of the array is returned,
* starting from the index (1 meaning the first element) and up to the given
* object count. This method always returns an Object[].
*
* @param index the start index of the subset (starting with 1)
* @param count the maximum number of values
* @return the Object array
*/
@Override
public Object getArray(long index, int count) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getArray(" + index + ", " + count + ");");
}
checkClosed();
return get(index, count);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value as a Java array. A subset of the array is returned,
* starting from the index (1 meaning the first element) and up to the given
* object count. This method always returns an Object[].
*
* @param index the start index of the subset (starting with 1)
* @param count the maximum number of values
* @param map is ignored. Only empty or null maps are supported
* @return the Object array
*/
@Override
public Object getArray(long index, int count, Map<String, Class<?>> map)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getArray(" + index + ", " + count + ", " + quoteMap(map)+");");
}
checkClosed();
JdbcConnection.checkMap(map);
return get(index, count);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the base type of the array. This database does support mixed type
* arrays and therefore there is no base type.
*
* @return Types.NULL
*/
@Override
public int getBaseType() throws SQLException {
try {
debugCodeCall("getBaseType");
checkClosed();
return Types.NULL;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the base type name of the array. This database does support mixed
* type arrays and therefore there is no base type.
*
* @return "NULL"
*/
@Override
public String getBaseTypeName() throws SQLException {
try {
debugCodeCall("getBaseTypeName");
checkClosed();
return "NULL";
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value as a result set.
* The first column contains the index
* (starting with 1) and the second column the value.
*
* @return the result set
*/
@Override
public ResultSet getResultSet() throws SQLException {
try {
debugCodeCall("getResultSet");
checkClosed();
return getResultSet(get(), 0);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value as a result set. The first column contains the index
* (starting with 1) and the second column the value.
*
* @param map is ignored. Only empty or null maps are supported
* @return the result set
*/
@Override
public ResultSet getResultSet(Map<String, Class<?>> map) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getResultSet("+quoteMap(map)+");");
}
checkClosed();
JdbcConnection.checkMap(map);
return getResultSet(get(), 0);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value as a result set. The first column contains the index
* (starting with 1) and the second column the value. A subset of the array
* is returned, starting from the index (1 meaning the first element) and
* up to the given object count.
*
* @param index the start index of the subset (starting with 1)
* @param count the maximum number of values
* @return the result set
*/
@Override
public ResultSet getResultSet(long index, int count) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getResultSet("+index+", " + count+");");
}
checkClosed();
return getResultSet(get(index, count), index - 1);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value as a result set.
* The first column contains the index
* (starting with 1) and the second column the value.
* A subset of the array is returned, starting from the index
* (1 meaning the first element) and up to the given object count.
*
* @param index the start index of the subset (starting with 1)
* @param count the maximum number of values
* @param map is ignored. Only empty or null maps are supported
* @return the result set
*/
@Override
public ResultSet getResultSet(long index, int count,
Map<String, Class<?>> map) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getResultSet("+index+", " + count+", " + quoteMap(map)+");");
}
checkClosed();
JdbcConnection.checkMap(map);
return getResultSet(get(index, count), index - 1);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Release all resources of this object.
*/
@Override
public void free() {
debugCodeCall("free");
value = null;
}
private static ResultSet getResultSet(Object[] array, long offset) {
SimpleResultSet rs = new SimpleResultSet();
rs.addColumn("INDEX", Types.BIGINT, 0, 0);
// TODO array result set: there are multiple data types possible
rs.addColumn("VALUE", Types.NULL, 0, 0);
for (int i = 0; i < array.length; i++) {
rs.addRow(offset + i + 1, array[i]);
}
return rs;
}
private void checkClosed() {
conn.checkClosed();
if (value == null) {
throw DbException.get(ErrorCode.OBJECT_CLOSED);
}
}
private Object[] get() {
return (Object[]) value.convertTo(Value.ARRAY).getObject();
}
private Object[] get(long index, int count) {
Object[] array = get();
if (count < 0 || count > array.length) {
throw DbException.getInvalidValueException("count (1.."
+ array.length + ")", count);
}
if (index < 1 || index > array.length) {
throw DbException.getInvalidValueException("index (1.."
+ array.length + ")", index);
}
int offset = (int) (index - 1);
return Arrays.copyOfRange(array, offset, offset + count);
}
/**
* INTERNAL
*/
@Override
public String toString() {
return value == null ? "null" :
(getTraceObjectName() + ": " + value.getTraceSQL());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcBatchUpdateException.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.sql.BatchUpdateException;
import java.sql.SQLException;
/**
* Represents a batch update database exception.
*/
public class JdbcBatchUpdateException extends BatchUpdateException {
private static final long serialVersionUID = 1L;
/**
* INTERNAL
*/
JdbcBatchUpdateException(SQLException next, int[] updateCounts) {
super(next.getMessage(), next.getSQLState(), next.getErrorCode(), updateCounts);
setNextException(next);
}
/**
* INTERNAL
*/
@Override
public void printStackTrace() {
// The default implementation already does that,
// but we do it again to avoid problems.
// If it is not implemented, somebody might implement it
// later on which would be a problem if done in the wrong way.
printStackTrace(System.err);
}
/**
* INTERNAL
*/
@Override
public void printStackTrace(PrintWriter s) {
if (s != null) {
super.printStackTrace(s);
if (getNextException() != null) {
getNextException().printStackTrace(s);
}
}
}
/**
* INTERNAL
*/
@Override
public void printStackTrace(PrintStream s) {
if (s != null) {
super.printStackTrace(s);
if (getNextException() != null) {
getNextException().printStackTrace(s);
}
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcBlob.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.sql.Blob;
import java.sql.SQLException;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.util.IOUtils;
import org.h2.util.Task;
import org.h2.value.Value;
/**
* Represents a BLOB value.
*/
public class JdbcBlob extends TraceObject implements Blob {
Value value;
private final JdbcConnection conn;
/**
* INTERNAL
*/
public JdbcBlob(JdbcConnection conn, Value value, int id) {
setTrace(conn.getSession().getTrace(), TraceObject.BLOB, id);
this.conn = conn;
this.value = value;
}
/**
* Returns the length.
*
* @return the length
*/
@Override
public long length() throws SQLException {
try {
debugCodeCall("length");
checkClosed();
if (value.getType() == Value.BLOB) {
long precision = value.getPrecision();
if (precision > 0) {
return precision;
}
}
return IOUtils.copyAndCloseInput(value.getInputStream(), null);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Truncates the object.
*
* @param len the new length
*/
@Override
public void truncate(long len) throws SQLException {
throw unsupported("LOB update");
}
/**
* Returns some bytes of the object.
*
* @param pos the index, the first byte is at position 1
* @param length the number of bytes
* @return the bytes, at most length bytes
*/
@Override
public byte[] getBytes(long pos, int length) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getBytes("+pos+", "+length+");");
}
checkClosed();
ByteArrayOutputStream out = new ByteArrayOutputStream();
try (InputStream in = value.getInputStream()) {
IOUtils.skipFully(in, pos - 1);
IOUtils.copy(in, out, length);
}
return out.toByteArray();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Fills the Blob. This is only supported for new, empty Blob objects that
* were created with Connection.createBlob(). The position
* must be 1, meaning the whole Blob data is set.
*
* @param pos where to start writing (the first byte is at position 1)
* @param bytes the bytes to set
* @return the length of the added data
*/
@Override
public int setBytes(long pos, byte[] bytes) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBytes("+pos+", "+quoteBytes(bytes)+");");
}
checkClosed();
if (pos != 1) {
throw DbException.getInvalidValueException("pos", pos);
}
value = conn.createBlob(new ByteArrayInputStream(bytes), -1);
return bytes.length;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets some bytes of the object.
*
* @param pos the write position
* @param bytes the bytes to set
* @param offset the bytes offset
* @param len the number of bytes to write
* @return how many bytes have been written
*/
@Override
public int setBytes(long pos, byte[] bytes, int offset, int len)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBytes(" + pos + ", " + quoteBytes(bytes) + ", " + offset + ", " + len + ");");
}
checkClosed();
if (pos != 1) {
throw DbException.getInvalidValueException("pos", pos);
}
value = conn.createBlob(new ByteArrayInputStream(bytes, offset, len), -1);
return (int) value.getPrecision();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the input stream.
*
* @return the input stream
*/
@Override
public InputStream getBinaryStream() throws SQLException {
try {
debugCodeCall("getBinaryStream");
checkClosed();
return value.getInputStream();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Get a writer to update the Blob. This is only supported for new, empty
* Blob objects that were created with Connection.createBlob(). The Blob is
* created in a separate thread, and the object is only updated when
* OutputStream.close() is called. The position must be 1, meaning the whole
* Blob data is set.
*
* @param pos where to start writing (the first byte is at position 1)
* @return an output stream
*/
@Override
public OutputStream setBinaryStream(long pos) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBinaryStream("+pos+");");
}
checkClosed();
if (pos != 1) {
throw DbException.getInvalidValueException("pos", pos);
}
if (value.getPrecision() != 0) {
throw DbException.getInvalidValueException("length", value.getPrecision());
}
final JdbcConnection c = conn; // local variable avoids generating synthetic accessor method
final PipedInputStream in = new PipedInputStream();
final Task task = new Task() {
@Override
public void call() {
value = c.createBlob(in, -1);
}
};
PipedOutputStream out = new PipedOutputStream(in) {
@Override
public void close() throws IOException {
super.close();
try {
task.get();
} catch (Exception e) {
throw DbException.convertToIOException(e);
}
}
};
task.execute();
return new BufferedOutputStream(out);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Searches a pattern and return the position.
*
* @param pattern the pattern to search
* @param start the index, the first byte is at position 1
* @return the position (first byte is at position 1), or -1 for not found
*/
@Override
public long position(byte[] pattern, long start) throws SQLException {
if (isDebugEnabled()) {
debugCode("position("+quoteBytes(pattern)+", "+start+");");
}
if (Constants.BLOB_SEARCH) {
try {
checkClosed();
if (pattern == null) {
return -1;
}
if (pattern.length == 0) {
return 1;
}
// TODO performance: blob pattern search is slow
BufferedInputStream in = new BufferedInputStream(value.getInputStream());
IOUtils.skipFully(in, start - 1);
int pos = 0;
int patternPos = 0;
while (true) {
int x = in.read();
if (x < 0) {
break;
}
if (x == (pattern[patternPos] & 0xff)) {
if (patternPos == 0) {
in.mark(pattern.length);
}
if (patternPos == pattern.length) {
return pos - patternPos;
}
patternPos++;
} else {
if (patternPos > 0) {
in.reset();
pos -= patternPos;
}
}
pos++;
}
return -1;
} catch (Exception e) {
throw logAndConvert(e);
}
}
throw unsupported("LOB search");
}
/**
* [Not supported] Searches a pattern and return the position.
*
* @param blobPattern the pattern to search
* @param start the index, the first byte is at position 1
* @return the position (first byte is at position 1), or -1 for not found
*/
@Override
public long position(Blob blobPattern, long start) throws SQLException {
if (isDebugEnabled()) {
debugCode("position(blobPattern, "+start+");");
}
if (Constants.BLOB_SEARCH) {
try {
checkClosed();
if (blobPattern == null) {
return -1;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
InputStream in = blobPattern.getBinaryStream();
while (true) {
int x = in.read();
if (x < 0) {
break;
}
out.write(x);
}
return position(out.toByteArray(), start);
} catch (Exception e) {
throw logAndConvert(e);
}
}
throw unsupported("LOB subset");
}
/**
* Release all resources of this object.
*/
@Override
public void free() {
debugCodeCall("free");
value = null;
}
/**
* Returns the input stream, starting from an offset.
*
* @param pos where to start reading
* @param length the number of bytes that will be read
* @return the input stream to read
*/
@Override
public InputStream getBinaryStream(long pos, long length) throws SQLException {
try {
debugCodeCall("getBinaryStream(pos, length)");
checkClosed();
return value.getInputStream(pos, length);
} catch (Exception e) {
throw logAndConvert(e);
}
}
private void checkClosed() {
conn.checkClosed();
if (value == null) {
throw DbException.get(ErrorCode.OBJECT_CLOSED);
}
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": " +
(value == null ? "null" : value.getTraceSQL());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcCallableStatement.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.Ref;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Map;
import org.h2.api.ErrorCode;
import org.h2.expression.ParameterInterface;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.util.BitField;
import org.h2.value.ValueNull;
/**
* Represents a callable statement.
*
* @author Sergi Vladykin
* @author Thomas Mueller
*/
public class JdbcCallableStatement extends JdbcPreparedStatement implements
CallableStatement, JdbcCallableStatementBackwardsCompat {
private BitField outParameters;
private int maxOutParameters;
private HashMap<String, Integer> namedParameters;
JdbcCallableStatement(JdbcConnection conn, String sql, int id,
int resultSetType, int resultSetConcurrency) {
super(conn, sql, id, resultSetType, resultSetConcurrency, false, false);
setTrace(session.getTrace(), TraceObject.CALLABLE_STATEMENT, id);
}
/**
* Executes a statement (insert, update, delete, create, drop)
* and returns the update count.
* If another result set exists for this statement, this will be closed
* (even if this statement fails).
*
* If auto commit is on, this statement will be committed.
* If the statement is a DDL statement (create, drop, alter) and does not
* throw an exception, the current transaction (if any) is committed after
* executing the statement.
*
* @return the update count (number of row affected by an insert, update or
* delete, or 0 if no rows or the statement was a create, drop,
* commit or rollback)
* @throws SQLException if this object is closed or invalid
*/
@Override
public int executeUpdate() throws SQLException {
try {
checkClosed();
if (command.isQuery()) {
super.executeQuery();
return 0;
}
return super.executeUpdate();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement (insert, update, delete, create, drop)
* and returns the update count.
* If another result set exists for this statement, this will be closed
* (even if this statement fails).
*
* If auto commit is on, this statement will be committed.
* If the statement is a DDL statement (create, drop, alter) and does not
* throw an exception, the current transaction (if any) is committed after
* executing the statement.
*
* @return the update count (number of row affected by an insert, update or
* delete, or 0 if no rows or the statement was a create, drop,
* commit or rollback)
* @throws SQLException if this object is closed or invalid
*/
@Override
public long executeLargeUpdate() throws SQLException {
try {
checkClosed();
if (command.isQuery()) {
super.executeQuery();
return 0;
}
return super.executeLargeUpdate();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Registers the given OUT parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param sqlType the data type (Types.x) - ignored
*/
@Override
public void registerOutParameter(int parameterIndex, int sqlType)
throws SQLException {
registerOutParameter(parameterIndex);
}
/**
* Registers the given OUT parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param sqlType the data type (Types.x) - ignored
* @param typeName the SQL type name - ignored
*/
@Override
public void registerOutParameter(int parameterIndex, int sqlType,
String typeName) throws SQLException {
registerOutParameter(parameterIndex);
}
/**
* Registers the given OUT parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param sqlType the data type (Types.x)
* @param scale is ignored
*/
@Override
public void registerOutParameter(int parameterIndex, int sqlType, int scale)
throws SQLException {
registerOutParameter(parameterIndex);
}
/**
* Registers the given OUT parameter.
*
* @param parameterName the parameter name
* @param sqlType the data type (Types.x) - ignored
* @param typeName the SQL type name - ignored
*/
@Override
public void registerOutParameter(String parameterName, int sqlType,
String typeName) throws SQLException {
registerOutParameter(getIndexForName(parameterName), sqlType, typeName);
}
/**
* Registers the given OUT parameter.
*
* @param parameterName the parameter name
* @param sqlType the data type (Types.x) - ignored
* @param scale is ignored
*/
@Override
public void registerOutParameter(String parameterName, int sqlType,
int scale) throws SQLException {
registerOutParameter(getIndexForName(parameterName), sqlType, scale);
}
/**
* Registers the given OUT parameter.
*
* @param parameterName the parameter name
* @param sqlType the data type (Types.x) - ignored
*/
@Override
public void registerOutParameter(String parameterName, int sqlType)
throws SQLException {
registerOutParameter(getIndexForName(parameterName), sqlType);
}
/**
* Returns whether the last column accessed was null.
*
* @return true if the last column accessed was null
*/
@Override
public boolean wasNull() throws SQLException {
return getOpenResultSet().wasNull();
}
/**
* [Not supported]
*/
@Override
public URL getURL(int parameterIndex) throws SQLException {
throw unsupported("url");
}
/**
* Returns the value of the specified column as a String.
*
* @param parameterIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public String getString(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getString(parameterIndex);
}
/**
* Returns the value of the specified column as a boolean.
*
* @param parameterIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public boolean getBoolean(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getBoolean(parameterIndex);
}
/**
* Returns the value of the specified column as a byte.
*
* @param parameterIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public byte getByte(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getByte(parameterIndex);
}
/**
* Returns the value of the specified column as a short.
*
* @param parameterIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public short getShort(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getShort(parameterIndex);
}
/**
* Returns the value of the specified column as an int.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public int getInt(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getInt(parameterIndex);
}
/**
* Returns the value of the specified column as a long.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public long getLong(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getLong(parameterIndex);
}
/**
* Returns the value of the specified column as a float.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public float getFloat(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getFloat(parameterIndex);
}
/**
* Returns the value of the specified column as a double.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public double getDouble(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getDouble(parameterIndex);
}
/**
* Returns the value of the specified column as a BigDecimal.
*
* @deprecated use {@link #getBigDecimal(int)}
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param scale is ignored
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Deprecated
@Override
public BigDecimal getBigDecimal(int parameterIndex, int scale)
throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getBigDecimal(parameterIndex, scale);
}
/**
* Returns the value of the specified column as a byte array.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public byte[] getBytes(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getBytes(parameterIndex);
}
/**
* Returns the value of the specified column as a java.sql.Date.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Date getDate(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getDate(parameterIndex);
}
/**
* Returns the value of the specified column as a java.sql.Time.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Time getTime(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getTime(parameterIndex);
}
/**
* Returns the value of the specified column as a java.sql.Timestamp.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Timestamp getTimestamp(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getTimestamp(parameterIndex);
}
/**
* Returns a column value as a Java object. The data is
* de-serialized into a Java object (on the client side).
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value or null
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Object getObject(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getObject(parameterIndex);
}
/**
* Returns the value of the specified column as a BigDecimal.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public BigDecimal getBigDecimal(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getBigDecimal(parameterIndex);
}
/**
* [Not supported] Gets a column as a object using the specified type
* mapping.
*/
@Override
public Object getObject(int parameterIndex, Map<String, Class<?>> map)
throws SQLException {
throw unsupported("map");
}
/**
* [Not supported] Gets a column as a reference.
*/
@Override
public Ref getRef(int parameterIndex) throws SQLException {
throw unsupported("ref");
}
/**
* Returns the value of the specified column as a Blob.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Blob getBlob(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getBlob(parameterIndex);
}
/**
* Returns the value of the specified column as a Clob.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Clob getClob(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getClob(parameterIndex);
}
/**
* Returns the value of the specified column as an Array.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Array getArray(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getArray(parameterIndex);
}
/**
* Returns the value of the specified column as a java.sql.Date using a
* specified time zone.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param cal the calendar
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Date getDate(int parameterIndex, Calendar cal) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getDate(parameterIndex, cal);
}
/**
* Returns the value of the specified column as a java.sql.Time using a
* specified time zone.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param cal the calendar
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Time getTime(int parameterIndex, Calendar cal) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getTime(parameterIndex, cal);
}
/**
* Returns the value of the specified column as a java.sql.Timestamp using a
* specified time zone.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param cal the calendar
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Timestamp getTimestamp(int parameterIndex, Calendar cal)
throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getTimestamp(parameterIndex, cal);
}
/**
* [Not supported]
*/
@Override
public URL getURL(String parameterName) throws SQLException {
throw unsupported("url");
}
/**
* Returns the value of the specified column as a java.sql.Timestamp using a
* specified time zone.
*
* @param parameterName the parameter name
* @param cal the calendar
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Timestamp getTimestamp(String parameterName, Calendar cal)
throws SQLException {
return getTimestamp(getIndexForName(parameterName), cal);
}
/**
* Returns the value of the specified column as a java.sql.Time using a
* specified time zone.
*
* @param parameterName the parameter name
* @param cal the calendar
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Time getTime(String parameterName, Calendar cal) throws SQLException {
return getTime(getIndexForName(parameterName), cal);
}
/**
* Returns the value of the specified column as a java.sql.Date using a
* specified time zone.
*
* @param parameterName the parameter name
* @param cal the calendar
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Date getDate(String parameterName, Calendar cal) throws SQLException {
return getDate(getIndexForName(parameterName), cal);
}
/**
* Returns the value of the specified column as an Array.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Array getArray(String parameterName) throws SQLException {
return getArray(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a Clob.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Clob getClob(String parameterName) throws SQLException {
return getClob(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a Blob.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Blob getBlob(String parameterName) throws SQLException {
return getBlob(getIndexForName(parameterName));
}
/**
* [Not supported] Gets a column as a reference.
*/
@Override
public Ref getRef(String parameterName) throws SQLException {
throw unsupported("ref");
}
/**
* [Not supported] Gets a column as a object using the specified type
* mapping.
*/
@Override
public Object getObject(String parameterName, Map<String, Class<?>> map)
throws SQLException {
throw unsupported("map");
}
/**
* Returns the value of the specified column as a BigDecimal.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public BigDecimal getBigDecimal(String parameterName) throws SQLException {
return getBigDecimal(getIndexForName(parameterName));
}
/**
* Returns a column value as a Java object. The data is
* de-serialized into a Java object (on the client side).
*
* @param parameterName the parameter name
* @return the value or null
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Object getObject(String parameterName) throws SQLException {
return getObject(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a java.sql.Timestamp.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Timestamp getTimestamp(String parameterName) throws SQLException {
return getTimestamp(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a java.sql.Time.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Time getTime(String parameterName) throws SQLException {
return getTime(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a java.sql.Date.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Date getDate(String parameterName) throws SQLException {
return getDate(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a byte array.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public byte[] getBytes(String parameterName) throws SQLException {
return getBytes(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a double.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public double getDouble(String parameterName) throws SQLException {
return getDouble(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a float.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public float getFloat(String parameterName) throws SQLException {
return getFloat(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a long.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public long getLong(String parameterName) throws SQLException {
return getLong(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as an int.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public int getInt(String parameterName) throws SQLException {
return getInt(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a short.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public short getShort(String parameterName) throws SQLException {
return getShort(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a byte.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public byte getByte(String parameterName) throws SQLException {
return getByte(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a boolean.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public boolean getBoolean(String parameterName) throws SQLException {
return getBoolean(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a String.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public String getString(String parameterName) throws SQLException {
return getString(getIndexForName(parameterName));
}
/**
* [Not supported] Returns the value of the specified column as a row id.
*
* @param parameterIndex the parameter index (1, 2, ...)
*/
@Override
public RowId getRowId(int parameterIndex) throws SQLException {
throw unsupported("rowId");
}
/**
* [Not supported] Returns the value of the specified column as a row id.
*
* @param parameterName the parameter name
*/
@Override
public RowId getRowId(String parameterName) throws SQLException {
throw unsupported("rowId");
}
/**
* Returns the value of the specified column as a Clob.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public NClob getNClob(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getNClob(parameterIndex);
}
/**
* Returns the value of the specified column as a Clob.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public NClob getNClob(String parameterName) throws SQLException {
return getNClob(getIndexForName(parameterName));
}
/**
* [Not supported] Returns the value of the specified column as a SQLXML
* object.
*/
@Override
public SQLXML getSQLXML(int parameterIndex) throws SQLException {
throw unsupported("SQLXML");
}
/**
* [Not supported] Returns the value of the specified column as a SQLXML
* object.
*/
@Override
public SQLXML getSQLXML(String parameterName) throws SQLException {
throw unsupported("SQLXML");
}
/**
* Returns the value of the specified column as a String.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public String getNString(int parameterIndex) throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getNString(parameterIndex);
}
/**
* Returns the value of the specified column as a String.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public String getNString(String parameterName) throws SQLException {
return getNString(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a reader.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Reader getNCharacterStream(int parameterIndex)
throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getNCharacterStream(parameterIndex);
}
/**
* Returns the value of the specified column as a reader.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Reader getNCharacterStream(String parameterName)
throws SQLException {
return getNCharacterStream(getIndexForName(parameterName));
}
/**
* Returns the value of the specified column as a reader.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Reader getCharacterStream(int parameterIndex)
throws SQLException {
checkRegistered(parameterIndex);
return getOpenResultSet().getCharacterStream(parameterIndex);
}
/**
* Returns the value of the specified column as a reader.
*
* @param parameterName the parameter name
* @return the value
* @throws SQLException if the column is not found or if this object is
* closed
*/
@Override
public Reader getCharacterStream(String parameterName)
throws SQLException {
return getCharacterStream(getIndexForName(parameterName));
}
// =============================================================
/**
* Sets a parameter to null.
*
* @param parameterName the parameter name
* @param sqlType the data type (Types.x)
* @param typeName this parameter is ignored
* @throws SQLException if this object is closed
*/
@Override
public void setNull(String parameterName, int sqlType, String typeName)
throws SQLException {
setNull(getIndexForName(parameterName), sqlType, typeName);
}
/**
* Sets a parameter to null.
*
* @param parameterName the parameter name
* @param sqlType the data type (Types.x)
* @throws SQLException if this object is closed
*/
@Override
public void setNull(String parameterName, int sqlType) throws SQLException {
setNull(getIndexForName(parameterName), sqlType);
}
/**
* Sets the timestamp using a specified time zone. The value will be
* converted to the local time zone.
*
* @param parameterName the parameter name
* @param x the value
* @param cal the calendar
* @throws SQLException if this object is closed
*/
@Override
public void setTimestamp(String parameterName, Timestamp x, Calendar cal)
throws SQLException {
setTimestamp(getIndexForName(parameterName), x, cal);
}
/**
* Sets the time using a specified time zone. The value will be converted to
* the local time zone.
*
* @param parameterName the parameter name
* @param x the value
* @param cal the calendar
* @throws SQLException if this object is closed
*/
@Override
public void setTime(String parameterName, Time x, Calendar cal)
throws SQLException {
setTime(getIndexForName(parameterName), x, cal);
}
/**
* Sets the date using a specified time zone. The value will be converted to
* the local time zone.
*
* @param parameterName the parameter name
* @param x the value
* @param cal the calendar
* @throws SQLException if this object is closed
*/
@Override
public void setDate(String parameterName, Date x, Calendar cal)
throws SQLException {
setDate(getIndexForName(parameterName), x, cal);
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setCharacterStream(String parameterName, Reader x, int length)
throws SQLException {
setCharacterStream(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter.
* Objects of unknown classes are serialized (on the client side).
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setObject(String parameterName, Object x) throws SQLException {
setObject(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter. The object is converted, if required, to
* the specified data type before sending to the database.
* Objects of unknown classes are serialized (on the client side).
*
* @param parameterName the parameter name
* @param x the value, null is allowed
* @param targetSqlType the type as defined in java.sql.Types
* @throws SQLException if this object is closed
*/
@Override
public void setObject(String parameterName, Object x, int targetSqlType)
throws SQLException {
setObject(getIndexForName(parameterName), x, targetSqlType);
}
/**
* Sets the value of a parameter. The object is converted, if required, to
* the specified data type before sending to the database.
* Objects of unknown classes are serialized (on the client side).
*
* @param parameterName the parameter name
* @param x the value, null is allowed
* @param targetSqlType the type as defined in java.sql.Types
* @param scale is ignored
* @throws SQLException if this object is closed
*/
@Override
public void setObject(String parameterName, Object x, int targetSqlType,
int scale) throws SQLException {
setObject(getIndexForName(parameterName), x, targetSqlType, scale);
}
/**
* Sets the value of a parameter as an input stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setBinaryStream(String parameterName, InputStream x, int length)
throws SQLException {
setBinaryStream(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter as an ASCII stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setAsciiStream(String parameterName,
InputStream x, long length) throws SQLException {
setAsciiStream(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setTimestamp(String parameterName, Timestamp x)
throws SQLException {
setTimestamp(getIndexForName(parameterName), x);
}
/**
* Sets the time using a specified time zone.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setTime(String parameterName, Time x) throws SQLException {
setTime(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setDate(String parameterName, Date x) throws SQLException {
setDate(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as a byte array.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBytes(String parameterName, byte[] x) throws SQLException {
setBytes(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setString(String parameterName, String x) throws SQLException {
setString(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBigDecimal(String parameterName, BigDecimal x)
throws SQLException {
setBigDecimal(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setDouble(String parameterName, double x) throws SQLException {
setDouble(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setFloat(String parameterName, float x) throws SQLException {
setFloat(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setLong(String parameterName, long x) throws SQLException {
setLong(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setInt(String parameterName, int x) throws SQLException {
setInt(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setShort(String parameterName, short x) throws SQLException {
setShort(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setByte(String parameterName, byte x) throws SQLException {
setByte(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBoolean(String parameterName, boolean x) throws SQLException {
setBoolean(getIndexForName(parameterName), x);
}
/**
* [Not supported]
*/
@Override
public void setURL(String parameterName, URL val) throws SQLException {
throw unsupported("url");
}
/**
* [Not supported] Sets the value of a parameter as a row id.
*/
@Override
public void setRowId(String parameterName, RowId x)
throws SQLException {
throw unsupported("rowId");
}
/**
* Sets the value of a parameter.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setNString(String parameterName, String x)
throws SQLException {
setNString(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setNCharacterStream(String parameterName,
Reader x, long length) throws SQLException {
setNCharacterStream(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter as a Clob.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setNClob(String parameterName, NClob x)
throws SQLException {
setNClob(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as a Clob.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setClob(String parameterName, Reader x,
long length) throws SQLException {
setClob(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter as a Blob.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setBlob(String parameterName, InputStream x,
long length) throws SQLException {
setBlob(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter as a Clob.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setNClob(String parameterName, Reader x,
long length) throws SQLException {
setNClob(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter as a Blob.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBlob(String parameterName, Blob x)
throws SQLException {
setBlob(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as a Clob.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setClob(String parameterName, Clob x) throws SQLException {
setClob(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as an ASCII stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setAsciiStream(String parameterName, InputStream x)
throws SQLException {
setAsciiStream(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as an ASCII stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setAsciiStream(String parameterName,
InputStream x, int length) throws SQLException {
setAsciiStream(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter as an input stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBinaryStream(String parameterName,
InputStream x) throws SQLException {
setBinaryStream(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as an input stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setBinaryStream(String parameterName,
InputStream x, long length) throws SQLException {
setBinaryStream(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter as a Blob.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBlob(String parameterName, InputStream x)
throws SQLException {
setBlob(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setCharacterStream(String parameterName, Reader x)
throws SQLException {
setCharacterStream(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setCharacterStream(String parameterName,
Reader x, long length) throws SQLException {
setCharacterStream(getIndexForName(parameterName), x, length);
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setClob(String parameterName, Reader x) throws SQLException {
setClob(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setNCharacterStream(String parameterName, Reader x)
throws SQLException {
setNCharacterStream(getIndexForName(parameterName), x);
}
/**
* Sets the value of a parameter as a Clob.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterName the parameter name
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setNClob(String parameterName, Reader x)
throws SQLException {
setNClob(getIndexForName(parameterName), x);
}
/**
* [Not supported] Sets the value of a parameter as a SQLXML object.
*/
@Override
public void setSQLXML(String parameterName, SQLXML x)
throws SQLException {
throw unsupported("SQLXML");
}
/**
* [Not supported]
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param type the class of the returned value
*/
@Override
public <T> T getObject(int parameterIndex, Class<T> type) throws SQLException {
return getOpenResultSet().getObject(parameterIndex, type);
}
/**
* [Not supported]
*
* @param parameterName the parameter name
* @param type the class of the returned value
*/
@Override
public <T> T getObject(String parameterName, Class<T> type) throws SQLException {
return getObject(getIndexForName(parameterName), type);
}
private ResultSetMetaData getCheckedMetaData() throws SQLException {
ResultSetMetaData meta = getMetaData();
if (meta == null) {
throw DbException.getUnsupportedException(
"Supported only for calling stored procedures");
}
return meta;
}
private void checkIndexBounds(int parameterIndex) {
checkClosed();
if (parameterIndex < 1 || parameterIndex > maxOutParameters) {
throw DbException.getInvalidValueException("parameterIndex", parameterIndex);
}
}
private void registerOutParameter(int parameterIndex) throws SQLException {
try {
checkClosed();
if (outParameters == null) {
maxOutParameters = Math.min(
getParameterMetaData().getParameterCount(),
getCheckedMetaData().getColumnCount());
outParameters = new BitField();
}
checkIndexBounds(parameterIndex);
ParameterInterface param = command.getParameters().get(--parameterIndex);
if (!param.isValueSet()) {
param.setValue(ValueNull.INSTANCE, false);
}
outParameters.set(parameterIndex);
} catch (Exception e) {
throw logAndConvert(e);
}
}
private void checkRegistered(int parameterIndex) throws SQLException {
try {
checkIndexBounds(parameterIndex);
if (!outParameters.get(parameterIndex - 1)) {
throw DbException.getInvalidValueException("parameterIndex", parameterIndex);
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
private int getIndexForName(String parameterName) throws SQLException {
try {
checkClosed();
if (namedParameters == null) {
ResultSetMetaData meta = getCheckedMetaData();
int columnCount = meta.getColumnCount();
HashMap<String, Integer> map = new HashMap<>(columnCount);
for (int i = 1; i <= columnCount; i++) {
map.put(meta.getColumnLabel(i), i);
}
namedParameters = map;
}
Integer index = namedParameters.get(parameterName);
if (index == null) {
throw DbException.getInvalidValueException("parameterName", parameterName);
}
return index;
} catch (Exception e) {
throw logAndConvert(e);
}
}
private JdbcResultSet getOpenResultSet() throws SQLException {
try {
checkClosed();
if (resultSet == null) {
throw DbException.get(ErrorCode.NO_DATA_AVAILABLE);
}
if (resultSet.isBeforeFirst()) {
resultSet.next();
}
return resultSet;
} catch (Exception e) {
throw logAndConvert(e);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcCallableStatementBackwardsCompat.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
/**
* Allows us to compile on older platforms, while still implementing the methods
* from the newer JDBC API.
*/
public interface JdbcCallableStatementBackwardsCompat {
// compatibility interface
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcClob.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.io.Reader;
import java.io.StringReader;
import java.io.StringWriter;
import java.io.Writer;
import java.sql.Clob;
import java.sql.NClob;
import java.sql.SQLException;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.store.RangeReader;
import org.h2.util.IOUtils;
import org.h2.util.Task;
import org.h2.value.Value;
/**
* Represents a CLOB value.
*/
public class JdbcClob extends TraceObject implements NClob
{
Value value;
private final JdbcConnection conn;
/**
* INTERNAL
*/
public JdbcClob(JdbcConnection conn, Value value, int id) {
setTrace(conn.getSession().getTrace(), TraceObject.CLOB, id);
this.conn = conn;
this.value = value;
}
/**
* Returns the length.
*
* @return the length
*/
@Override
public long length() throws SQLException {
try {
debugCodeCall("length");
checkClosed();
if (value.getType() == Value.CLOB) {
long precision = value.getPrecision();
if (precision > 0) {
return precision;
}
}
return IOUtils.copyAndCloseInput(value.getReader(), null, Long.MAX_VALUE);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Truncates the object.
*/
@Override
public void truncate(long len) throws SQLException {
throw unsupported("LOB update");
}
/**
* Returns the input stream.
*
* @return the input stream
*/
@Override
public InputStream getAsciiStream() throws SQLException {
try {
debugCodeCall("getAsciiStream");
checkClosed();
String s = value.getString();
return IOUtils.getInputStreamFromString(s);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Returns an output stream.
*/
@Override
public OutputStream setAsciiStream(long pos) throws SQLException {
throw unsupported("LOB update");
}
/**
* Returns the reader.
*
* @return the reader
*/
@Override
public Reader getCharacterStream() throws SQLException {
try {
debugCodeCall("getCharacterStream");
checkClosed();
return value.getReader();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Get a writer to update the Clob. This is only supported for new, empty
* Clob objects that were created with Connection.createClob() or
* createNClob(). The Clob is created in a separate thread, and the object
* is only updated when Writer.close() is called. The position must be 1,
* meaning the whole Clob data is set.
*
* @param pos where to start writing (the first character is at position 1)
* @return a writer
*/
@Override
public Writer setCharacterStream(long pos) throws SQLException {
try {
if (isDebugEnabled()) {
debugCodeCall("setCharacterStream(" + pos + ");");
}
checkClosed();
if (pos != 1) {
throw DbException.getInvalidValueException("pos", pos);
}
if (value.getPrecision() != 0) {
throw DbException.getInvalidValueException("length", value.getPrecision());
}
final JdbcConnection c = conn; // required to avoid synthetic method creation
// PipedReader / PipedWriter are a lot slower
// than PipedInputStream / PipedOutputStream
// (Sun/Oracle Java 1.6.0_20)
final PipedInputStream in = new PipedInputStream();
final Task task = new Task() {
@Override
public void call() {
value = c.createClob(IOUtils.getReader(in), -1);
}
};
PipedOutputStream out = new PipedOutputStream(in) {
@Override
public void close() throws IOException {
super.close();
try {
task.get();
} catch (Exception e) {
throw DbException.convertToIOException(e);
}
}
};
task.execute();
return IOUtils.getBufferedWriter(out);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns a substring.
*
* @param pos the position (the first character is at position 1)
* @param length the number of characters
* @return the string
*/
@Override
public String getSubString(long pos, int length) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getSubString(" + pos + ", " + length + ");");
}
checkClosed();
if (pos < 1) {
throw DbException.getInvalidValueException("pos", pos);
}
if (length < 0) {
throw DbException.getInvalidValueException("length", length);
}
StringWriter writer = new StringWriter(
Math.min(Constants.IO_BUFFER_SIZE, length));
try (Reader reader = value.getReader()) {
IOUtils.skipFully(reader, pos - 1);
IOUtils.copyAndCloseInput(reader, writer, length);
}
return writer.toString();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Fills the Clob. This is only supported for new, empty Clob objects that
* were created with Connection.createClob() or createNClob(). The position
* must be 1, meaning the whole Clob data is set.
*
* @param pos where to start writing (the first character is at position 1)
* @param str the string to add
* @return the length of the added text
*/
@Override
public int setString(long pos, String str) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setString(" + pos + ", " + quote(str) + ");");
}
checkClosed();
if (pos != 1) {
throw DbException.getInvalidValueException("pos", pos);
} else if (str == null) {
throw DbException.getInvalidValueException("str", str);
}
value = conn.createClob(new StringReader(str), -1);
return str.length();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Fills the Clob. This is only supported for new, empty Clob objects that
* were created with Connection.createClob() or createNClob(). The position
* must be 1, meaning the whole Clob data is set.
*
* @param pos where to start writing (the first character is at position 1)
* @param str the string to add
* @param offset the string offset
* @param len the number of characters to read
* @return the length of the added text
*/
@Override
public int setString(long pos, String str, int offset, int len)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setString(" + pos + ", " + quote(str) + ", " + offset + ", " + len + ");");
}
checkClosed();
if (pos != 1) {
throw DbException.getInvalidValueException("pos", pos);
} else if (str == null) {
throw DbException.getInvalidValueException("str", str);
}
value = conn.createClob(new RangeReader(new StringReader(str), offset, len), -1);
return (int) value.getPrecision();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Searches a pattern and return the position.
*/
@Override
public long position(String pattern, long start) throws SQLException {
throw unsupported("LOB search");
}
/**
* [Not supported] Searches a pattern and return the position.
*/
@Override
public long position(Clob clobPattern, long start) throws SQLException {
throw unsupported("LOB search");
}
/**
* Release all resources of this object.
*/
@Override
public void free() {
debugCodeCall("free");
value = null;
}
/**
* Returns the reader, starting from an offset.
*
* @param pos 1-based offset
* @param length length of requested area
* @return the reader
*/
@Override
public Reader getCharacterStream(long pos, long length) throws SQLException {
try {
debugCodeCall("getCharacterStream(pos, length)");
checkClosed();
return value.getReader(pos, length);
} catch (Exception e) {
throw logAndConvert(e);
}
}
private void checkClosed() {
conn.checkClosed();
if (value == null) {
throw DbException.get(ErrorCode.OBJECT_CLOSED);
}
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": " + (value == null ?
"null" : value.getTraceSQL());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcConnection.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0, and the
* EPL 1.0 (http://h2database.com/html/license.html). Initial Developer: H2
* Group
*/
package org.h2.jdbc;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
import java.sql.ClientInfoStatus;
import java.sql.Clob;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.NClob;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLClientInfoException;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Savepoint;
import java.sql.Statement;
import java.sql.Struct;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.concurrent.Executor;
import java.util.regex.Pattern;
import org.h2.api.ErrorCode;
import org.h2.command.CommandInterface;
import org.h2.engine.ConnectionInfo;
import org.h2.engine.Constants;
import org.h2.engine.Mode;
import org.h2.engine.SessionInterface;
import org.h2.engine.SessionRemote;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.result.ResultInterface;
import org.h2.util.CloseWatcher;
import org.h2.util.JdbcUtils;
import org.h2.util.Utils;
import org.h2.value.CompareMode;
import org.h2.value.DataType;
import org.h2.value.Value;
import org.h2.value.ValueInt;
import org.h2.value.ValueNull;
import org.h2.value.ValueString;
/**
* <p>
* Represents a connection (session) to a database.
* </p>
* <p>
* Thread safety: the connection is thread-safe, because access is synchronized.
* However, for compatibility with other databases, a connection should only be
* used in one thread at any time.
* </p>
*/
public class JdbcConnection extends TraceObject
implements Connection, JdbcConnectionBackwardsCompat {
private static final String NUM_SERVERS = "numServers";
private static final String PREFIX_SERVER = "server";
private static boolean keepOpenStackTrace;
private final String url;
private final String user;
// ResultSet.HOLD_CURSORS_OVER_COMMIT
private int holdability = 1;
private SessionInterface session;
private CommandInterface commit, rollback;
private CommandInterface getReadOnly, getGeneratedKeys;
private CommandInterface setLockMode, getLockMode;
private CommandInterface setQueryTimeout, getQueryTimeout;
private int savepointId;
private String catalog;
private Statement executingStatement;
private final CloseWatcher watcher;
private int queryTimeoutCache = -1;
private Map<String, String> clientInfo;
private String mode;
private final boolean scopeGeneratedKeys;
/**
* INTERNAL
*/
public JdbcConnection(String url, Properties info) throws SQLException {
this(new ConnectionInfo(url, info), true);
}
/**
* INTERNAL
*/
/*
* the session closable object does not leak as Eclipse warns - due to the
* CloseWatcher.
*/
@SuppressWarnings("resource")
public JdbcConnection(ConnectionInfo ci, boolean useBaseDir)
throws SQLException {
try {
if (useBaseDir) {
String baseDir = SysProperties.getBaseDir();
if (baseDir != null) {
ci.setBaseDir(baseDir);
}
}
// this will return an embedded or server connection
session = new SessionRemote(ci).connectEmbeddedOrServer(false);
trace = session.getTrace();
int id = getNextId(TraceObject.CONNECTION);
setTrace(trace, TraceObject.CONNECTION, id);
this.user = ci.getUserName();
if (isInfoEnabled()) {
trace.infoCode("Connection " + getTraceObjectName()
+ " = DriverManager.getConnection("
+ quote(ci.getOriginalURL()) + ", " + quote(user)
+ ", \"\");");
}
this.url = ci.getURL();
scopeGeneratedKeys = ci.getProperty("SCOPE_GENERATED_KEYS", false);
closeOld();
watcher = CloseWatcher.register(this, session, keepOpenStackTrace);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* INTERNAL
*/
public JdbcConnection(JdbcConnection clone) {
this.session = clone.session;
trace = session.getTrace();
int id = getNextId(TraceObject.CONNECTION);
setTrace(trace, TraceObject.CONNECTION, id);
this.user = clone.user;
this.url = clone.url;
this.catalog = clone.catalog;
this.commit = clone.commit;
this.getGeneratedKeys = clone.getGeneratedKeys;
this.getLockMode = clone.getLockMode;
this.getQueryTimeout = clone.getQueryTimeout;
this.getReadOnly = clone.getReadOnly;
this.rollback = clone.rollback;
this.scopeGeneratedKeys = clone.scopeGeneratedKeys;
this.watcher = null;
if (clone.clientInfo != null) {
this.clientInfo = new HashMap<>(clone.clientInfo);
}
}
/**
* INTERNAL
*/
public JdbcConnection(SessionInterface session, String user, String url) {
this.session = session;
trace = session.getTrace();
int id = getNextId(TraceObject.CONNECTION);
setTrace(trace, TraceObject.CONNECTION, id);
this.user = user;
this.url = url;
this.scopeGeneratedKeys = false;
this.watcher = null;
}
private void closeOld() {
while (true) {
CloseWatcher w = CloseWatcher.pollUnclosed();
if (w == null) {
break;
}
try {
w.getCloseable().close();
} catch (Exception e) {
trace.error(e, "closing session");
}
// there was an unclosed object -
// keep the stack trace from now on
keepOpenStackTrace = true;
String s = w.getOpenStackTrace();
Exception ex = DbException
.get(ErrorCode.TRACE_CONNECTION_NOT_CLOSED);
trace.error(ex, s);
}
}
/**
* Creates a new statement.
*
* @return the new statement
* @throws SQLException if the connection is closed
*/
@Override
public Statement createStatement() throws SQLException {
try {
int id = getNextId(TraceObject.STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("Statement", TraceObject.STATEMENT, id,
"createStatement()");
}
checkClosed();
return new JdbcStatement(this, id, ResultSet.TYPE_FORWARD_ONLY,
Constants.DEFAULT_RESULT_SET_CONCURRENCY, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a statement with the specified result set type and concurrency.
*
* @param resultSetType the result set type (ResultSet.TYPE_*)
* @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*)
* @return the statement
* @throws SQLException if the connection is closed or the result set type
* or concurrency are not supported
*/
@Override
public Statement createStatement(int resultSetType,
int resultSetConcurrency) throws SQLException {
try {
int id = getNextId(TraceObject.STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("Statement", TraceObject.STATEMENT, id,
"createStatement(" + resultSetType + ", "
+ resultSetConcurrency + ")");
}
checkTypeConcurrency(resultSetType, resultSetConcurrency);
checkClosed();
return new JdbcStatement(this, id, resultSetType,
resultSetConcurrency, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a statement with the specified result set type, concurrency, and
* holdability.
*
* @param resultSetType the result set type (ResultSet.TYPE_*)
* @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*)
* @param resultSetHoldability the holdability (ResultSet.HOLD* / CLOSE*)
* @return the statement
* @throws SQLException if the connection is closed or the result set type,
* concurrency, or holdability are not supported
*/
@Override
public Statement createStatement(int resultSetType,
int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
try {
int id = getNextId(TraceObject.STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("Statement", TraceObject.STATEMENT, id,
"createStatement(" + resultSetType + ", "
+ resultSetConcurrency + ", "
+ resultSetHoldability + ")");
}
checkTypeConcurrency(resultSetType, resultSetConcurrency);
checkHoldability(resultSetHoldability);
checkClosed();
return new JdbcStatement(this, id, resultSetType,
resultSetConcurrency, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a new prepared statement.
*
* @param sql the SQL statement
* @return the prepared statement
* @throws SQLException if the connection is closed
*/
@Override
public PreparedStatement prepareStatement(String sql) throws SQLException {
try {
int id = getNextId(TraceObject.PREPARED_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("PreparedStatement",
TraceObject.PREPARED_STATEMENT, id,
"prepareStatement(" + quote(sql) + ")");
}
checkClosed();
sql = translateSQL(sql);
return new JdbcPreparedStatement(this, sql, id,
ResultSet.TYPE_FORWARD_ONLY,
Constants.DEFAULT_RESULT_SET_CONCURRENCY, false, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Prepare a statement that will automatically close when the result set is
* closed. This method is used to retrieve database meta data.
*
* @param sql the SQL statement
* @return the prepared statement
*/
PreparedStatement prepareAutoCloseStatement(String sql)
throws SQLException {
try {
int id = getNextId(TraceObject.PREPARED_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("PreparedStatement",
TraceObject.PREPARED_STATEMENT, id,
"prepareStatement(" + quote(sql) + ")");
}
checkClosed();
sql = translateSQL(sql);
return new JdbcPreparedStatement(this, sql, id,
ResultSet.TYPE_FORWARD_ONLY,
Constants.DEFAULT_RESULT_SET_CONCURRENCY, true, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the database meta data for this database.
*
* @return the database meta data
* @throws SQLException if the connection is closed
*/
@Override
public DatabaseMetaData getMetaData() throws SQLException {
try {
int id = getNextId(TraceObject.DATABASE_META_DATA);
if (isDebugEnabled()) {
debugCodeAssign("DatabaseMetaData",
TraceObject.DATABASE_META_DATA, id, "getMetaData()");
}
checkClosed();
return new JdbcDatabaseMetaData(this, trace, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* INTERNAL
*/
public SessionInterface getSession() {
return session;
}
/**
* Closes this connection. All open statements, prepared statements and
* result sets that where created by this connection become invalid after
* calling this method. If there is an uncommitted transaction, it will be
* rolled back.
*/
@Override
public synchronized void close() throws SQLException {
try {
debugCodeCall("close");
if (session == null) {
return;
}
CloseWatcher.unregister(watcher);
session.cancel();
synchronized (session) {
if (executingStatement != null) {
try {
executingStatement.cancel();
} catch (NullPointerException e) {
// ignore
}
}
try {
if (!session.isClosed()) {
try {
if (session.hasPendingTransaction()) {
// roll back unless that would require to
// re-connect (the transaction can't be rolled
// back after re-connecting)
if (!session.isReconnectNeeded(true)) {
try {
rollbackInternal();
} catch (DbException e) {
// ignore if the connection is broken
// right now
if (e.getErrorCode() != ErrorCode.CONNECTION_BROKEN_1) {
throw e;
}
}
}
session.afterWriting();
}
closePreparedCommands();
} finally {
session.close();
}
}
} finally {
session = null;
}
}
} catch (Throwable e) {
throw logAndConvert(e);
}
}
private void closePreparedCommands() {
commit = closeAndSetNull(commit);
rollback = closeAndSetNull(rollback);
getReadOnly = closeAndSetNull(getReadOnly);
getGeneratedKeys = closeAndSetNull(getGeneratedKeys);
getLockMode = closeAndSetNull(getLockMode);
setLockMode = closeAndSetNull(setLockMode);
getQueryTimeout = closeAndSetNull(getQueryTimeout);
setQueryTimeout = closeAndSetNull(setQueryTimeout);
}
private static CommandInterface closeAndSetNull(CommandInterface command) {
if (command != null) {
command.close();
}
return null;
}
/**
* Switches auto commit on or off. Enabling it commits an uncommitted
* transaction, if there is one.
*
* @param autoCommit true for auto commit on, false for off
* @throws SQLException if the connection is closed
*/
@Override
public synchronized void setAutoCommit(boolean autoCommit)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setAutoCommit(" + autoCommit + ");");
}
checkClosed();
if (autoCommit && !session.getAutoCommit()) {
commit();
}
synchronized (session) {
session.setAutoCommit(autoCommit);
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the current setting for auto commit.
*
* @return true for on, false for off
* @throws SQLException if the connection is closed
*/
@Override
public synchronized boolean getAutoCommit() throws SQLException {
try {
checkClosed();
debugCodeCall("getAutoCommit");
return session.getAutoCommit();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Commits the current transaction. This call has only an effect if auto
* commit is switched off.
*
* @throws SQLException if the connection is closed
*/
@Override
public synchronized void commit() throws SQLException {
try {
debugCodeCall("commit");
checkClosedForWrite();
try {
commit = prepareCommand("COMMIT", commit);
commit.executeUpdate(false);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Rolls back the current transaction. This call has only an effect if auto
* commit is switched off.
*
* @throws SQLException if the connection is closed
*/
@Override
public synchronized void rollback() throws SQLException {
try {
debugCodeCall("rollback");
checkClosedForWrite();
try {
rollbackInternal();
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns true if this connection has been closed.
*
* @return true if close was called
*/
@Override
public boolean isClosed() throws SQLException {
try {
debugCodeCall("isClosed");
return session == null || session.isClosed();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Translates a SQL statement into the database grammar.
*
* @param sql the SQL statement with or without JDBC escape sequences
* @return the translated statement
* @throws SQLException if the connection is closed
*/
@Override
public String nativeSQL(String sql) throws SQLException {
try {
debugCodeCall("nativeSQL", sql);
checkClosed();
return translateSQL(sql);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* According to the JDBC specs, this setting is only a hint to the database
* to enable optimizations - it does not cause writes to be prohibited.
*
* @param readOnly ignored
* @throws SQLException if the connection is closed
*/
@Override
public void setReadOnly(boolean readOnly) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setReadOnly(" + readOnly + ");");
}
checkClosed();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns true if the database is read-only.
*
* @return if the database is read-only
* @throws SQLException if the connection is closed
*/
@Override
public boolean isReadOnly() throws SQLException {
try {
debugCodeCall("isReadOnly");
checkClosed();
getReadOnly = prepareCommand("CALL READONLY()", getReadOnly);
ResultInterface result = getReadOnly.executeQuery(0, false);
result.next();
return result.currentRow()[0].getBoolean();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Set the default catalog name. This call is ignored.
*
* @param catalog ignored
* @throws SQLException if the connection is closed
*/
@Override
public void setCatalog(String catalog) throws SQLException {
try {
debugCodeCall("setCatalog", catalog);
checkClosed();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the current catalog name.
*
* @return the catalog name
* @throws SQLException if the connection is closed
*/
@Override
public String getCatalog() throws SQLException {
try {
debugCodeCall("getCatalog");
checkClosed();
if (catalog == null) {
CommandInterface cat = prepareCommand("CALL DATABASE()",
Integer.MAX_VALUE);
ResultInterface result = cat.executeQuery(0, false);
result.next();
catalog = result.currentRow()[0].getString();
cat.close();
}
return catalog;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the first warning reported by calls on this object.
*
* @return null
*/
@Override
public SQLWarning getWarnings() throws SQLException {
try {
debugCodeCall("getWarnings");
checkClosed();
return null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Clears all warnings.
*/
@Override
public void clearWarnings() throws SQLException {
try {
debugCodeCall("clearWarnings");
checkClosed();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a prepared statement with the specified result set type and
* concurrency.
*
* @param sql the SQL statement
* @param resultSetType the result set type (ResultSet.TYPE_*)
* @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*)
* @return the prepared statement
* @throws SQLException if the connection is closed or the result set type
* or concurrency are not supported
*/
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType,
int resultSetConcurrency) throws SQLException {
try {
int id = getNextId(TraceObject.PREPARED_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("PreparedStatement",
TraceObject.PREPARED_STATEMENT, id,
"prepareStatement(" + quote(sql) + ", " + resultSetType
+ ", " + resultSetConcurrency + ")");
}
checkTypeConcurrency(resultSetType, resultSetConcurrency);
checkClosed();
sql = translateSQL(sql);
return new JdbcPreparedStatement(this, sql, id, resultSetType,
resultSetConcurrency, false, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Changes the current transaction isolation level. Calling this method will
* commit an open transaction, even if the new level is the same as the old
* one, except if the level is not supported. Internally, this method calls
* SET LOCK_MODE, which affects all connections. The following isolation
* levels are supported:
* <ul>
* <li>Connection.TRANSACTION_READ_UNCOMMITTED = SET LOCK_MODE 0: no locking
* (should only be used for testing).</li>
* <li>Connection.TRANSACTION_SERIALIZABLE = SET LOCK_MODE 1: table level
* locking.</li>
* <li>Connection.TRANSACTION_READ_COMMITTED = SET LOCK_MODE 3: table level
* locking, but read locks are released immediately (default).</li>
* </ul>
* This setting is not persistent. Please note that using
* TRANSACTION_READ_UNCOMMITTED while at the same time using multiple
* connections may result in inconsistent transactions.
*
* @param level the new transaction isolation level:
* Connection.TRANSACTION_READ_UNCOMMITTED,
* Connection.TRANSACTION_READ_COMMITTED, or
* Connection.TRANSACTION_SERIALIZABLE
* @throws SQLException if the connection is closed or the isolation level
* is not supported
*/
@Override
public void setTransactionIsolation(int level) throws SQLException {
try {
debugCodeCall("setTransactionIsolation", level);
checkClosed();
int lockMode;
switch (level) {
case Connection.TRANSACTION_READ_UNCOMMITTED:
lockMode = Constants.LOCK_MODE_OFF;
break;
case Connection.TRANSACTION_READ_COMMITTED:
lockMode = Constants.LOCK_MODE_READ_COMMITTED;
break;
case Connection.TRANSACTION_REPEATABLE_READ:
case Connection.TRANSACTION_SERIALIZABLE:
lockMode = Constants.LOCK_MODE_TABLE;
break;
default:
throw DbException.getInvalidValueException("level", level);
}
commit();
setLockMode = prepareCommand("SET LOCK_MODE ?", setLockMode);
setLockMode.getParameters().get(0).setValue(ValueInt.get(lockMode),
false);
setLockMode.executeUpdate(false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* INTERNAL
*/
public void setQueryTimeout(int seconds) throws SQLException {
try {
debugCodeCall("setQueryTimeout", seconds);
checkClosed();
setQueryTimeout = prepareCommand("SET QUERY_TIMEOUT ?",
setQueryTimeout);
setQueryTimeout.getParameters().get(0)
.setValue(ValueInt.get(seconds * 1000), false);
setQueryTimeout.executeUpdate(false);
queryTimeoutCache = seconds;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* INTERNAL
*/
int getQueryTimeout() throws SQLException {
try {
if (queryTimeoutCache == -1) {
checkClosed();
getQueryTimeout = prepareCommand(
"SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS "
+ "WHERE NAME=?",
getQueryTimeout);
getQueryTimeout.getParameters().get(0)
.setValue(ValueString.get("QUERY_TIMEOUT"), false);
ResultInterface result = getQueryTimeout.executeQuery(0, false);
result.next();
int queryTimeout = result.currentRow()[0].getInt();
result.close();
if (queryTimeout != 0) {
// round to the next second, otherwise 999 millis would
// return 0 seconds
queryTimeout = (queryTimeout + 999) / 1000;
}
queryTimeoutCache = queryTimeout;
}
return queryTimeoutCache;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the current transaction isolation level.
*
* @return the isolation level.
* @throws SQLException if the connection is closed
*/
@Override
public int getTransactionIsolation() throws SQLException {
try {
debugCodeCall("getTransactionIsolation");
checkClosed();
getLockMode = prepareCommand("CALL LOCK_MODE()", getLockMode);
ResultInterface result = getLockMode.executeQuery(0, false);
result.next();
int lockMode = result.currentRow()[0].getInt();
result.close();
int transactionIsolationLevel;
switch (lockMode) {
case Constants.LOCK_MODE_OFF:
transactionIsolationLevel = Connection.TRANSACTION_READ_UNCOMMITTED;
break;
case Constants.LOCK_MODE_READ_COMMITTED:
transactionIsolationLevel = Connection.TRANSACTION_READ_COMMITTED;
break;
case Constants.LOCK_MODE_TABLE:
case Constants.LOCK_MODE_TABLE_GC:
transactionIsolationLevel = Connection.TRANSACTION_SERIALIZABLE;
break;
default:
throw DbException.throwInternalError("lockMode:" + lockMode);
}
return transactionIsolationLevel;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Changes the current result set holdability.
*
* @param holdability ResultSet.HOLD_CURSORS_OVER_COMMIT or
* ResultSet.CLOSE_CURSORS_AT_COMMIT;
* @throws SQLException if the connection is closed or the holdability is
* not supported
*/
@Override
public void setHoldability(int holdability) throws SQLException {
try {
debugCodeCall("setHoldability", holdability);
checkClosed();
checkHoldability(holdability);
this.holdability = holdability;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the current result set holdability.
*
* @return the holdability
* @throws SQLException if the connection is closed
*/
@Override
public int getHoldability() throws SQLException {
try {
debugCodeCall("getHoldability");
checkClosed();
return holdability;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the type map.
*
* @return null
* @throws SQLException if the connection is closed
*/
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
try {
debugCodeCall("getTypeMap");
checkClosed();
return null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Partially supported] Sets the type map. This is only supported if the
* map is empty or null.
*/
@Override
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setTypeMap(" + quoteMap(map) + ");");
}
checkMap(map);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a new callable statement.
*
* @param sql the SQL statement
* @return the callable statement
* @throws SQLException if the connection is closed or the statement is not
* valid
*/
@Override
public CallableStatement prepareCall(String sql) throws SQLException {
try {
int id = getNextId(TraceObject.CALLABLE_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("CallableStatement",
TraceObject.CALLABLE_STATEMENT, id,
"prepareCall(" + quote(sql) + ")");
}
checkClosed();
sql = translateSQL(sql);
return new JdbcCallableStatement(this, sql, id,
ResultSet.TYPE_FORWARD_ONLY,
Constants.DEFAULT_RESULT_SET_CONCURRENCY);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a callable statement with the specified result set type and
* concurrency.
*
* @param sql the SQL statement
* @param resultSetType the result set type (ResultSet.TYPE_*)
* @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*)
* @return the callable statement
* @throws SQLException if the connection is closed or the result set type
* or concurrency are not supported
*/
@Override
public CallableStatement prepareCall(String sql, int resultSetType,
int resultSetConcurrency) throws SQLException {
try {
int id = getNextId(TraceObject.CALLABLE_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("CallableStatement",
TraceObject.CALLABLE_STATEMENT, id,
"prepareCall(" + quote(sql) + ", " + resultSetType
+ ", " + resultSetConcurrency + ")");
}
checkTypeConcurrency(resultSetType, resultSetConcurrency);
checkClosed();
sql = translateSQL(sql);
return new JdbcCallableStatement(this, sql, id, resultSetType,
resultSetConcurrency);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a callable statement with the specified result set type,
* concurrency, and holdability.
*
* @param sql the SQL statement
* @param resultSetType the result set type (ResultSet.TYPE_*)
* @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*)
* @param resultSetHoldability the holdability (ResultSet.HOLD* / CLOSE*)
* @return the callable statement
* @throws SQLException if the connection is closed or the result set type,
* concurrency, or holdability are not supported
*/
@Override
public CallableStatement prepareCall(String sql, int resultSetType,
int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
try {
int id = getNextId(TraceObject.CALLABLE_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("CallableStatement",
TraceObject.CALLABLE_STATEMENT, id,
"prepareCall(" + quote(sql) + ", " + resultSetType
+ ", " + resultSetConcurrency + ", "
+ resultSetHoldability + ")");
}
checkTypeConcurrency(resultSetType, resultSetConcurrency);
checkHoldability(resultSetHoldability);
checkClosed();
sql = translateSQL(sql);
return new JdbcCallableStatement(this, sql, id, resultSetType,
resultSetConcurrency);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a new unnamed savepoint.
*
* @return the new savepoint
*/
@Override
public Savepoint setSavepoint() throws SQLException {
try {
int id = getNextId(TraceObject.SAVEPOINT);
if (isDebugEnabled()) {
debugCodeAssign("Savepoint", TraceObject.SAVEPOINT, id,
"setSavepoint()");
}
checkClosed();
CommandInterface set = prepareCommand(
"SAVEPOINT " + JdbcSavepoint.getName(null, savepointId),
Integer.MAX_VALUE);
set.executeUpdate(false);
JdbcSavepoint savepoint = new JdbcSavepoint(this, savepointId, null,
trace, id);
savepointId++;
return savepoint;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a new named savepoint.
*
* @param name the savepoint name
* @return the new savepoint
*/
@Override
public Savepoint setSavepoint(String name) throws SQLException {
try {
int id = getNextId(TraceObject.SAVEPOINT);
if (isDebugEnabled()) {
debugCodeAssign("Savepoint", TraceObject.SAVEPOINT, id,
"setSavepoint(" + quote(name) + ")");
}
checkClosed();
CommandInterface set = prepareCommand(
"SAVEPOINT " + JdbcSavepoint.getName(name, 0),
Integer.MAX_VALUE);
set.executeUpdate(false);
return new JdbcSavepoint(this, 0, name, trace,
id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Rolls back to a savepoint.
*
* @param savepoint the savepoint
*/
@Override
public void rollback(Savepoint savepoint) throws SQLException {
try {
JdbcSavepoint sp = convertSavepoint(savepoint);
if (isDebugEnabled()) {
debugCode("rollback(" + sp.getTraceObjectName() + ");");
}
checkClosedForWrite();
try {
sp.rollback();
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Releases a savepoint.
*
* @param savepoint the savepoint to release
*/
@Override
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
try {
debugCode("releaseSavepoint(savepoint);");
checkClosed();
convertSavepoint(savepoint).release();
} catch (Exception e) {
throw logAndConvert(e);
}
}
private static JdbcSavepoint convertSavepoint(Savepoint savepoint) {
if (!(savepoint instanceof JdbcSavepoint)) {
throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1,
"" + savepoint);
}
return (JdbcSavepoint) savepoint;
}
/**
* Creates a prepared statement with the specified result set type,
* concurrency, and holdability.
*
* @param sql the SQL statement
* @param resultSetType the result set type (ResultSet.TYPE_*)
* @param resultSetConcurrency the concurrency (ResultSet.CONCUR_*)
* @param resultSetHoldability the holdability (ResultSet.HOLD* / CLOSE*)
* @return the prepared statement
* @throws SQLException if the connection is closed or the result set type,
* concurrency, or holdability are not supported
*/
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType,
int resultSetConcurrency, int resultSetHoldability)
throws SQLException {
try {
int id = getNextId(TraceObject.PREPARED_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("PreparedStatement",
TraceObject.PREPARED_STATEMENT, id,
"prepareStatement(" + quote(sql) + ", " + resultSetType
+ ", " + resultSetConcurrency + ", "
+ resultSetHoldability + ")");
}
checkTypeConcurrency(resultSetType, resultSetConcurrency);
checkHoldability(resultSetHoldability);
checkClosed();
sql = translateSQL(sql);
return new JdbcPreparedStatement(this, sql, id, resultSetType,
resultSetConcurrency, false, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a new prepared statement.
*
* @param sql the SQL statement
* @param autoGeneratedKeys
* {@link Statement#RETURN_GENERATED_KEYS} if generated keys should
* be available for retrieval, {@link Statement#NO_GENERATED_KEYS} if
* generated keys should not be available
* @return the prepared statement
* @throws SQLException if the connection is closed
*/
@Override
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys)
throws SQLException {
try {
int id = getNextId(TraceObject.PREPARED_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("PreparedStatement",
TraceObject.PREPARED_STATEMENT, id,
"prepareStatement(" + quote(sql) + ", "
+ autoGeneratedKeys + ");");
}
checkClosed();
sql = translateSQL(sql);
return new JdbcPreparedStatement(this, sql, id,
ResultSet.TYPE_FORWARD_ONLY,
Constants.DEFAULT_RESULT_SET_CONCURRENCY, false,
autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a new prepared statement.
*
* @param sql the SQL statement
* @param columnIndexes
* an array of column indexes indicating the columns with generated
* keys that should be returned from the inserted row
* @return the prepared statement
* @throws SQLException if the connection is closed
*/
@Override
public PreparedStatement prepareStatement(String sql, int[] columnIndexes)
throws SQLException {
try {
int id = getNextId(TraceObject.PREPARED_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("PreparedStatement",
TraceObject.PREPARED_STATEMENT, id,
"prepareStatement(" + quote(sql) + ", "
+ quoteIntArray(columnIndexes) + ");");
}
checkClosed();
sql = translateSQL(sql);
return new JdbcPreparedStatement(this, sql, id,
ResultSet.TYPE_FORWARD_ONLY,
Constants.DEFAULT_RESULT_SET_CONCURRENCY, false, columnIndexes);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Creates a new prepared statement.
*
* @param sql the SQL statement
* @param columnNames
* an array of column names indicating the columns with generated
* keys that should be returned from the inserted row
* @return the prepared statement
* @throws SQLException if the connection is closed
*/
@Override
public PreparedStatement prepareStatement(String sql, String[] columnNames)
throws SQLException {
try {
int id = getNextId(TraceObject.PREPARED_STATEMENT);
if (isDebugEnabled()) {
debugCodeAssign("PreparedStatement",
TraceObject.PREPARED_STATEMENT, id,
"prepareStatement(" + quote(sql) + ", "
+ quoteArray(columnNames) + ");");
}
checkClosed();
sql = translateSQL(sql);
return new JdbcPreparedStatement(this, sql, id,
ResultSet.TYPE_FORWARD_ONLY,
Constants.DEFAULT_RESULT_SET_CONCURRENCY, false, columnNames);
} catch (Exception e) {
throw logAndConvert(e);
}
}
// =============================================================
/**
* Prepare an command. This will parse the SQL statement.
*
* @param sql the SQL statement
* @param fetchSize the fetch size (used in remote connections)
* @return the command
*/
CommandInterface prepareCommand(String sql, int fetchSize) {
return session.prepareCommand(sql, fetchSize);
}
private CommandInterface prepareCommand(String sql, CommandInterface old) {
return old == null ? session.prepareCommand(sql, Integer.MAX_VALUE)
: old;
}
private static int translateGetEnd(String sql, int i, char c) {
int len = sql.length();
switch (c) {
case '$': {
if (i < len - 1 && sql.charAt(i + 1) == '$'
&& (i == 0 || sql.charAt(i - 1) <= ' ')) {
int j = sql.indexOf("$$", i + 2);
if (j < 0) {
throw DbException.getSyntaxError(sql, i);
}
return j + 1;
}
return i;
}
case '\'': {
int j = sql.indexOf('\'', i + 1);
if (j < 0) {
throw DbException.getSyntaxError(sql, i);
}
return j;
}
case '"': {
int j = sql.indexOf('"', i + 1);
if (j < 0) {
throw DbException.getSyntaxError(sql, i);
}
return j;
}
case '/': {
checkRunOver(i + 1, len, sql);
if (sql.charAt(i + 1) == '*') {
// block comment
int j = sql.indexOf("*/", i + 2);
if (j < 0) {
throw DbException.getSyntaxError(sql, i);
}
i = j + 1;
} else if (sql.charAt(i + 1) == '/') {
// single line comment
i += 2;
while (i < len && (c = sql.charAt(i)) != '\r' && c != '\n') {
i++;
}
}
return i;
}
case '-': {
checkRunOver(i + 1, len, sql);
if (sql.charAt(i + 1) == '-') {
// single line comment
i += 2;
while (i < len && (c = sql.charAt(i)) != '\r' && c != '\n') {
i++;
}
}
return i;
}
default:
throw DbException.throwInternalError("c=" + c);
}
}
/**
* Convert JDBC escape sequences in the SQL statement. This method throws an
* exception if the SQL statement is null.
*
* @param sql the SQL statement with or without JDBC escape sequences
* @return the SQL statement without JDBC escape sequences
*/
private static String translateSQL(String sql) {
return translateSQL(sql, true);
}
/**
* Convert JDBC escape sequences in the SQL statement if required. This
* method throws an exception if the SQL statement is null.
*
* @param sql the SQL statement with or without JDBC escape sequences
* @param escapeProcessing whether escape sequences should be replaced
* @return the SQL statement without JDBC escape sequences
*/
static String translateSQL(String sql, boolean escapeProcessing) {
if (sql == null) {
throw DbException.getInvalidValueException("SQL", null);
}
if (!escapeProcessing) {
return sql;
}
if (sql.indexOf('{') < 0) {
return sql;
}
int len = sql.length();
char[] chars = null;
int level = 0;
for (int i = 0; i < len; i++) {
char c = sql.charAt(i);
switch (c) {
case '\'':
case '"':
case '/':
case '-':
i = translateGetEnd(sql, i, c);
break;
case '{':
level++;
if (chars == null) {
chars = sql.toCharArray();
}
chars[i] = ' ';
while (Character.isSpaceChar(chars[i])) {
i++;
checkRunOver(i, len, sql);
}
int start = i;
if (chars[i] >= '0' && chars[i] <= '9') {
chars[i - 1] = '{';
while (true) {
checkRunOver(i, len, sql);
c = chars[i];
if (c == '}') {
break;
}
switch (c) {
case '\'':
case '"':
case '/':
case '-':
i = translateGetEnd(sql, i, c);
break;
default:
}
i++;
}
level--;
break;
} else if (chars[i] == '?') {
i++;
checkRunOver(i, len, sql);
while (Character.isSpaceChar(chars[i])) {
i++;
checkRunOver(i, len, sql);
}
if (sql.charAt(i) != '=') {
throw DbException.getSyntaxError(sql, i, "=");
}
i++;
checkRunOver(i, len, sql);
while (Character.isSpaceChar(chars[i])) {
i++;
checkRunOver(i, len, sql);
}
}
while (!Character.isSpaceChar(chars[i])) {
i++;
checkRunOver(i, len, sql);
}
int remove = 0;
if (found(sql, start, "fn")) {
remove = 2;
} else if (found(sql, start, "escape")) {
break;
} else if (found(sql, start, "call")) {
break;
} else if (found(sql, start, "oj")) {
remove = 2;
} else if (found(sql, start, "ts")) {
break;
} else if (found(sql, start, "t")) {
break;
} else if (found(sql, start, "d")) {
break;
} else if (found(sql, start, "params")) {
remove = "params".length();
}
for (i = start; remove > 0; i++, remove--) {
chars[i] = ' ';
}
break;
case '}':
if (--level < 0) {
throw DbException.getSyntaxError(sql, i);
}
chars[i] = ' ';
break;
case '$':
i = translateGetEnd(sql, i, c);
break;
default:
}
}
if (level != 0) {
throw DbException.getSyntaxError(sql, sql.length() - 1);
}
if (chars != null) {
sql = new String(chars);
}
return sql;
}
private static void checkRunOver(int i, int len, String sql) {
if (i >= len) {
throw DbException.getSyntaxError(sql, i);
}
}
private static boolean found(String sql, int start, String other) {
return sql.regionMatches(true, start, other, 0, other.length());
}
private static void checkTypeConcurrency(int resultSetType,
int resultSetConcurrency) {
switch (resultSetType) {
case ResultSet.TYPE_FORWARD_ONLY:
case ResultSet.TYPE_SCROLL_INSENSITIVE:
case ResultSet.TYPE_SCROLL_SENSITIVE:
break;
default:
throw DbException.getInvalidValueException("resultSetType",
resultSetType);
}
switch (resultSetConcurrency) {
case ResultSet.CONCUR_READ_ONLY:
case ResultSet.CONCUR_UPDATABLE:
break;
default:
throw DbException.getInvalidValueException("resultSetConcurrency",
resultSetConcurrency);
}
}
private static void checkHoldability(int resultSetHoldability) {
// TODO compatibility / correctness: DBPool uses
// ResultSet.HOLD_CURSORS_OVER_COMMIT
if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT
&& resultSetHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) {
throw DbException.getInvalidValueException("resultSetHoldability",
resultSetHoldability);
}
}
/**
* INTERNAL. Check if this connection is closed. The next operation is a
* read request.
*
* @throws DbException if the connection or session is closed
*/
protected void checkClosed() {
checkClosed(false);
}
/**
* Check if this connection is closed. The next operation may be a write
* request.
*
* @throws DbException if the connection or session is closed
*/
private void checkClosedForWrite() {
checkClosed(true);
}
/**
* INTERNAL. Check if this connection is closed.
*
* @param write if the next operation is possibly writing
* @throws DbException if the connection or session is closed
*/
protected void checkClosed(boolean write) {
if (session == null) {
throw DbException.get(ErrorCode.OBJECT_CLOSED);
}
if (session.isClosed()) {
throw DbException.get(ErrorCode.DATABASE_CALLED_AT_SHUTDOWN);
}
if (session.isReconnectNeeded(write)) {
trace.debug("reconnect");
closePreparedCommands();
session = session.reconnect(write);
trace = session.getTrace();
}
}
/**
* INTERNAL. Called after executing a command that could have written
* something.
*/
protected void afterWriting() {
if (session != null) {
session.afterWriting();
}
}
String getURL() {
checkClosed();
return url;
}
String getUser() {
checkClosed();
return user;
}
private void rollbackInternal() {
rollback = prepareCommand("ROLLBACK", rollback);
rollback.executeUpdate(false);
}
/**
* INTERNAL
*/
public int getPowerOffCount() {
return (session == null || session.isClosed()) ? 0
: session.getPowerOffCount();
}
/**
* INTERNAL
*/
public void setPowerOffCount(int count) {
if (session != null) {
session.setPowerOffCount(count);
}
}
/**
* INTERNAL
*/
public void setExecutingStatement(Statement stat) {
executingStatement = stat;
}
/**
* INTERNAL
*/
boolean scopeGeneratedKeys() {
return scopeGeneratedKeys;
}
/**
* INTERNAL
*/
ResultSet getGeneratedKeys(JdbcStatement stat, int id) {
getGeneratedKeys = prepareCommand(
"SELECT SCOPE_IDENTITY() "
+ "WHERE SCOPE_IDENTITY() IS NOT NULL",
getGeneratedKeys);
ResultInterface result = getGeneratedKeys.executeQuery(0, false);
return new JdbcResultSet(this, stat, getGeneratedKeys, result,
id, false, true, false);
}
/**
* Create a new empty Clob object.
*
* @return the object
*/
@Override
public Clob createClob() throws SQLException {
try {
int id = getNextId(TraceObject.CLOB);
debugCodeAssign("Clob", TraceObject.CLOB, id, "createClob()");
checkClosedForWrite();
try {
Value v = session.getDataHandler().getLobStorage()
.createClob(new InputStreamReader(
new ByteArrayInputStream(Utils.EMPTY_BYTES)),
0);
session.addTemporaryLob(v);
return new JdbcClob(this, v, id);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Create a new empty Blob object.
*
* @return the object
*/
@Override
public Blob createBlob() throws SQLException {
try {
int id = getNextId(TraceObject.BLOB);
debugCodeAssign("Blob", TraceObject.BLOB, id, "createClob()");
checkClosedForWrite();
try {
Value v = session.getDataHandler().getLobStorage().createBlob(
new ByteArrayInputStream(Utils.EMPTY_BYTES), 0);
synchronized (session) {
session.addTemporaryLob(v);
}
return new JdbcBlob(this, v, id);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Create a new empty NClob object.
*
* @return the object
*/
@Override
public NClob createNClob() throws SQLException {
try {
int id = getNextId(TraceObject.CLOB);
debugCodeAssign("NClob", TraceObject.CLOB, id, "createNClob()");
checkClosedForWrite();
try {
Value v = session.getDataHandler().getLobStorage()
.createClob(new InputStreamReader(
new ByteArrayInputStream(Utils.EMPTY_BYTES)),
0);
session.addTemporaryLob(v);
return new JdbcClob(this, v, id);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Create a new empty SQLXML object.
*/
@Override
public SQLXML createSQLXML() throws SQLException {
throw unsupported("SQLXML");
}
/**
* Create a new Array object.
*
* @param typeName the type name
* @param elements the values
* @return the array
*/
@Override
public Array createArrayOf(String typeName, Object[] elements)
throws SQLException {
try {
int id = getNextId(TraceObject.ARRAY);
debugCodeAssign("Array", TraceObject.ARRAY, id, "createArrayOf()");
checkClosed();
Value value = DataType.convertToValue(session, elements,
Value.ARRAY);
return new JdbcArray(this, value, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Create a new empty Struct object.
*/
@Override
public Struct createStruct(String typeName, Object[] attributes)
throws SQLException {
throw unsupported("Struct");
}
/**
* Returns true if this connection is still valid.
*
* @param timeout the number of seconds to wait for the database to respond
* (ignored)
* @return true if the connection is valid.
*/
@Override
public synchronized boolean isValid(int timeout) {
try {
debugCodeCall("isValid", timeout);
if (session == null || session.isClosed()) {
return false;
}
// force a network round trip (if networked)
getTransactionIsolation();
return true;
} catch (Exception e) {
// this method doesn't throw an exception, but it logs it
logAndConvert(e);
return false;
}
}
/**
* Set a client property. This method always throws a SQLClientInfoException
* in standard mode. In compatibility mode the following properties are
* supported:
* <ul>
* <li>DB2: The properties: ApplicationName, ClientAccountingInformation,
* ClientUser and ClientCorrelationToken are supported.</li>
* <li>MySQL: All property names are supported.</li>
* <li>Oracle: All properties in the form <namespace>.<key name>
* are supported.</li>
* <li>PostgreSQL: The ApplicationName property is supported.</li>
* </ul>
* For unsupported properties a SQLClientInfoException is thrown.
*
* @param name the name of the property
* @param value the value
*/
@Override
public void setClientInfo(String name, String value)
throws SQLClientInfoException {
try {
if (isDebugEnabled()) {
debugCode("setClientInfo(" + quote(name) + ", " + quote(value)
+ ");");
}
checkClosed();
// no change to property: Ignore call. This early exit fixes a
// problem with websphere liberty resetting the client info of a
// pooled connection to its initial values.
if (Objects.equals(value, getClientInfo(name))) {
return;
}
if (isInternalProperty(name)) {
throw new SQLClientInfoException(
"Property name '" + name + " is used internally by H2.",
Collections.<String, ClientInfoStatus> emptyMap());
}
Pattern clientInfoNameRegEx = Mode
.getInstance(getMode()).supportedClientInfoPropertiesRegEx;
if (clientInfoNameRegEx != null
&& clientInfoNameRegEx.matcher(name).matches()) {
if (clientInfo == null) {
clientInfo = new HashMap<>();
}
clientInfo.put(name, value);
} else {
throw new SQLClientInfoException(
"Client info name '" + name + "' not supported.",
Collections.<String, ClientInfoStatus> emptyMap());
}
} catch (Exception e) {
throw convertToClientInfoException(logAndConvert(e));
}
}
private static boolean isInternalProperty(String name) {
return NUM_SERVERS.equals(name) || name.startsWith(PREFIX_SERVER);
}
private static SQLClientInfoException convertToClientInfoException(
SQLException x) {
if (x instanceof SQLClientInfoException) {
return (SQLClientInfoException) x;
}
return new SQLClientInfoException(x.getMessage(), x.getSQLState(),
x.getErrorCode(), null, null);
}
/**
* Set the client properties. This replaces all existing properties. This
* method always throws a SQLClientInfoException in standard mode. In
* compatibility mode some properties may be supported (see
* setProperty(String, String) for details).
*
* @param properties the properties (ignored)
*/
@Override
public void setClientInfo(Properties properties)
throws SQLClientInfoException {
try {
if (isDebugEnabled()) {
debugCode("setClientInfo(properties);");
}
checkClosed();
if (clientInfo == null) {
clientInfo = new HashMap<>();
} else {
clientInfo.clear();
}
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
setClientInfo((String) entry.getKey(),
(String) entry.getValue());
}
} catch (Exception e) {
throw convertToClientInfoException(logAndConvert(e));
}
}
/**
* Get the client properties.
*
* @return the property list
*/
@Override
public Properties getClientInfo() throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getClientInfo();");
}
checkClosed();
ArrayList<String> serverList = session.getClusterServers();
Properties p = new Properties();
if (clientInfo != null) {
for (Map.Entry<String, String> entry : clientInfo.entrySet()) {
p.setProperty(entry.getKey(), entry.getValue());
}
}
p.setProperty(NUM_SERVERS, String.valueOf(serverList.size()));
for (int i = 0; i < serverList.size(); i++) {
p.setProperty(PREFIX_SERVER + String.valueOf(i),
serverList.get(i));
}
return p;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Get a client property.
*
* @param name the client info name
* @return the property value or null if the property is not found or not
* supported.
*/
@Override
public String getClientInfo(String name) throws SQLException {
try {
if (isDebugEnabled()) {
debugCodeCall("getClientInfo", name);
}
checkClosed();
if (name == null) {
throw DbException.getInvalidValueException("name", null);
}
return getClientInfo().getProperty(name);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Return an object of this class if possible.
*
* @param iface the class
* @return this
*/
@Override
@SuppressWarnings("unchecked")
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
if (isWrapperFor(iface)) {
return (T) this;
}
throw DbException.getInvalidValueException("iface", iface);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if unwrap can return an object of this class.
*
* @param iface the class
* @return whether or not the interface is assignable from this class
*/
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface != null && iface.isAssignableFrom(getClass());
}
/**
* Create a Clob value from this reader.
*
* @param x the reader
* @param length the length (if smaller or equal than 0, all data until the
* end of file is read)
* @return the value
*/
public Value createClob(Reader x, long length) {
if (x == null) {
return ValueNull.INSTANCE;
}
if (length <= 0) {
length = -1;
}
Value v = session.getDataHandler().getLobStorage().createClob(x,
length);
session.addTemporaryLob(v);
return v;
}
/**
* Create a Blob value from this input stream.
*
* @param x the input stream
* @param length the length (if smaller or equal than 0, all data until the
* end of file is read)
* @return the value
*/
public Value createBlob(InputStream x, long length) {
if (x == null) {
return ValueNull.INSTANCE;
}
if (length <= 0) {
length = -1;
}
Value v = session.getDataHandler().getLobStorage().createBlob(x,
length);
session.addTemporaryLob(v);
return v;
}
/**
* Sets the given schema name to access. Current implementation is case
* sensitive, i.e. requires schema name to be passed in correct case.
*
* @param schema the schema name
*/
@Override
public void setSchema(String schema) throws SQLException {
try {
if (isDebugEnabled()) {
debugCodeCall("setSchema", schema);
}
checkClosed();
session.setCurrentSchemaName(schema);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Retrieves this current schema name for this connection.
*
* @return current schema name
*/
@Override
public String getSchema() throws SQLException {
try {
if (isDebugEnabled()) {
debugCodeCall("getSchema");
}
checkClosed();
return session.getCurrentSchemaName();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
*
* @param executor the executor used by this method
*/
@Override
public void abort(Executor executor) {
// not supported
}
/**
* [Not supported]
*
* @param executor the executor used by this method
* @param milliseconds the TCP connection timeout
*/
@Override
public void setNetworkTimeout(Executor executor, int milliseconds) {
// not supported
}
/**
* [Not supported]
*/
@Override
public int getNetworkTimeout() {
return 0;
}
/**
* Check that the given type map is either null or empty.
*
* @param map the type map
* @throws DbException if the map is not empty
*/
static void checkMap(Map<String, Class<?>> map) {
if (map != null && map.size() > 0) {
throw DbException.getUnsupportedException("map.size > 0");
}
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": url=" + url + " user=" + user;
}
/**
* Convert an object to the default Java object for the given SQL type. For
* example, LOB objects are converted to java.sql.Clob / java.sql.Blob.
*
* @param v the value
* @return the object
*/
Object convertToDefaultObject(Value v) {
switch (v.getType()) {
case Value.CLOB: {
int id = getNextId(TraceObject.CLOB);
return new JdbcClob(this, v, id);
}
case Value.BLOB: {
int id = getNextId(TraceObject.BLOB);
return new JdbcBlob(this, v, id);
}
case Value.JAVA_OBJECT:
if (SysProperties.serializeJavaObject) {
return JdbcUtils.deserialize(v.getBytesNoCopy(),
session.getDataHandler());
}
break;
case Value.BYTE:
case Value.SHORT:
if (!SysProperties.OLD_RESULT_SET_GET_OBJECT) {
return v.getInt();
}
break;
}
return v.getObject();
}
CompareMode getCompareMode() {
return session.getDataHandler().getCompareMode();
}
/**
* INTERNAL
*/
public void setTraceLevel(int level) {
trace.setLevel(level);
}
String getMode() throws SQLException {
if (mode == null) {
PreparedStatement prep = prepareStatement(
"SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME=?");
prep.setString(1, "MODE");
ResultSet rs = prep.executeQuery();
rs.next();
mode = rs.getString(1);
prep.close();
}
return mode;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcConnectionBackwardsCompat.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
/**
* Allows us to compile on older platforms, while still implementing the methods
* from the newer JDBC API.
*/
public interface JdbcConnectionBackwardsCompat {
// compatibility interface
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcDatabaseMetaData.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.RowIdLifetime;
import java.sql.SQLException;
import java.sql.Types;
import java.util.Arrays;
import java.util.Properties;
import org.h2.engine.Constants;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.message.TraceObject;
import org.h2.tools.SimpleResultSet;
import org.h2.util.StatementBuilder;
import org.h2.util.StringUtils;
/**
* Represents the meta data for a database.
*/
public class JdbcDatabaseMetaData extends TraceObject implements
DatabaseMetaData, JdbcDatabaseMetaDataBackwardsCompat {
private final JdbcConnection conn;
JdbcDatabaseMetaData(JdbcConnection conn, Trace trace, int id) {
setTrace(trace, TraceObject.DATABASE_META_DATA, id);
this.conn = conn;
}
/**
* Returns the major version of this driver.
*
* @return the major version number
*/
@Override
public int getDriverMajorVersion() {
debugCodeCall("getDriverMajorVersion");
return Constants.VERSION_MAJOR;
}
/**
* Returns the minor version of this driver.
*
* @return the minor version number
*/
@Override
public int getDriverMinorVersion() {
debugCodeCall("getDriverMinorVersion");
return Constants.VERSION_MINOR;
}
/**
* Gets the database product name.
*
* @return the product name ("H2")
*/
@Override
public String getDatabaseProductName() {
debugCodeCall("getDatabaseProductName");
// This value must stay like that, see
// http://opensource.atlassian.com/projects/hibernate/browse/HHH-2682
return "H2";
}
/**
* Gets the product version of the database.
*
* @return the product version
*/
@Override
public String getDatabaseProductVersion() {
debugCodeCall("getDatabaseProductVersion");
return Constants.getFullVersion();
}
/**
* Gets the name of the JDBC driver.
*
* @return the driver name ("H2 JDBC Driver")
*/
@Override
public String getDriverName() {
debugCodeCall("getDriverName");
return "H2 JDBC Driver";
}
/**
* Gets the version number of the driver. The format is
* [MajorVersion].[MinorVersion].
*
* @return the version number
*/
@Override
public String getDriverVersion() {
debugCodeCall("getDriverVersion");
return Constants.getFullVersion();
}
/**
* Gets the list of tables in the database. The result set is sorted by
* TABLE_TYPE, TABLE_SCHEM, and TABLE_NAME.
*
* <ul>
* <li>1 TABLE_CAT (String) table catalog </li>
* <li>2 TABLE_SCHEM (String) table schema </li>
* <li>3 TABLE_NAME (String) table name </li>
* <li>4 TABLE_TYPE (String) table type </li>
* <li>5 REMARKS (String) comment </li>
* <li>6 TYPE_CAT (String) always null </li>
* <li>7 TYPE_SCHEM (String) always null </li>
* <li>8 TYPE_NAME (String) always null </li>
* <li>9 SELF_REFERENCING_COL_NAME (String) always null </li>
* <li>10 REF_GENERATION (String) always null </li>
* <li>11 SQL (String) the create table statement or NULL for systems tables
* </li>
* </ul>
*
* @param catalogPattern null (to get all objects) or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param tableNamePattern null (to get all objects) or a table name
* (uppercase for unquoted names)
* @param types null or a list of table types
* @return the list of columns
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getTables(String catalogPattern, String schemaPattern,
String tableNamePattern, String[] types) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getTables(" + quote(catalogPattern) + ", " +
quote(schemaPattern) + ", " + quote(tableNamePattern) +
", " + quoteArray(types) + ");");
}
checkClosed();
String tableType;
if (types != null && types.length > 0) {
StatementBuilder buff = new StatementBuilder("TABLE_TYPE IN(");
for (String ignored : types) {
buff.appendExceptFirst(", ");
buff.append('?');
}
tableType = buff.append(')').toString();
} else {
tableType = "TRUE";
}
String tableSelect = "SELECT "
+ "TABLE_CATALOG TABLE_CAT, "
+ "TABLE_SCHEMA TABLE_SCHEM, "
+ "TABLE_NAME, "
+ "TABLE_TYPE, "
+ "REMARKS, "
+ "TYPE_NAME TYPE_CAT, "
+ "TYPE_NAME TYPE_SCHEM, "
+ "TYPE_NAME, "
+ "TYPE_NAME SELF_REFERENCING_COL_NAME, "
+ "TYPE_NAME REF_GENERATION, "
+ "SQL "
+ "FROM INFORMATION_SCHEMA.TABLES "
+ "WHERE TABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND TABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND TABLE_NAME LIKE ? ESCAPE ? "
+ "AND (" + tableType + ") ";
boolean includeSynonyms = types == null || Arrays.asList(types).contains("SYNONYM");
String synonymSelect = "SELECT "
+ "SYNONYM_CATALOG TABLE_CAT, "
+ "SYNONYM_SCHEMA TABLE_SCHEM, "
+ "SYNONYM_NAME as TABLE_NAME, "
+ "TYPE_NAME AS TABLE_TYPE, "
+ "REMARKS, "
+ "TYPE_NAME TYPE_CAT, "
+ "TYPE_NAME TYPE_SCHEM, "
+ "TYPE_NAME AS TYPE_NAME, "
+ "TYPE_NAME SELF_REFERENCING_COL_NAME, "
+ "TYPE_NAME REF_GENERATION, "
+ "NULL AS SQL "
+ "FROM INFORMATION_SCHEMA.SYNONYMS "
+ "WHERE SYNONYM_CATALOG LIKE ? ESCAPE ? "
+ "AND SYNONYM_SCHEMA LIKE ? ESCAPE ? "
+ "AND SYNONYM_NAME LIKE ? ESCAPE ? "
+ "AND (" + includeSynonyms + ") ";
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "TABLE_CAT, "
+ "TABLE_SCHEM, "
+ "TABLE_NAME, "
+ "TABLE_TYPE, "
+ "REMARKS, "
+ "TYPE_CAT, "
+ "TYPE_SCHEM, "
+ "TYPE_NAME, "
+ "SELF_REFERENCING_COL_NAME, "
+ "REF_GENERATION, "
+ "SQL "
+ "FROM (" + synonymSelect + " UNION " + tableSelect + ") "
+ "ORDER BY TABLE_TYPE, TABLE_SCHEM, TABLE_NAME");
prep.setString(1, getCatalogPattern(catalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(schemaPattern));
prep.setString(4, "\\");
prep.setString(5, getPattern(tableNamePattern));
prep.setString(6, "\\");
prep.setString(7, getCatalogPattern(catalogPattern));
prep.setString(8, "\\");
prep.setString(9, getSchemaPattern(schemaPattern));
prep.setString(10, "\\");
prep.setString(11, getPattern(tableNamePattern));
prep.setString(12, "\\");
for (int i = 0; types != null && i < types.length; i++) {
prep.setString(13 + i, types[i]);
}
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of columns. The result set is sorted by TABLE_SCHEM,
* TABLE_NAME, and ORDINAL_POSITION.
*
* <ul>
* <li>1 TABLE_CAT (String) table catalog </li>
* <li>2 TABLE_SCHEM (String) table schema </li>
* <li>3 TABLE_NAME (String) table name </li>
* <li>4 COLUMN_NAME (String) column name </li>
* <li>5 DATA_TYPE (short) data type (see java.sql.Types) </li>
* <li>6 TYPE_NAME (String) data type name ("INTEGER", "VARCHAR",...) </li>
* <li>7 COLUMN_SIZE (int) precision
* (values larger than 2 GB are returned as 2 GB)</li>
* <li>8 BUFFER_LENGTH (int) unused </li>
* <li>9 DECIMAL_DIGITS (int) scale (0 for INTEGER and VARCHAR) </li>
* <li>10 NUM_PREC_RADIX (int) radix (always 10) </li>
* <li>11 NULLABLE (int) columnNoNulls or columnNullable</li>
* <li>12 REMARKS (String) comment (always empty) </li>
* <li>13 COLUMN_DEF (String) default value </li>
* <li>14 SQL_DATA_TYPE (int) unused </li>
* <li>15 SQL_DATETIME_SUB (int) unused </li>
* <li>16 CHAR_OCTET_LENGTH (int) unused </li>
* <li>17 ORDINAL_POSITION (int) the column index (1,2,...) </li>
* <li>18 IS_NULLABLE (String) "NO" or "YES" </li>
* <li>19 SCOPE_CATALOG (String) always null </li>
* <li>20 SCOPE_SCHEMA (String) always null </li>
* <li>21 SCOPE_TABLE (String) always null </li>
* <li>22 SOURCE_DATA_TYPE (short) null </li>
* <li>23 IS_AUTOINCREMENT (String) "NO" or "YES" </li>
* <li>24 SCOPE_CATLOG (String) always null (the typo is on purpose,
* for compatibility with the JDBC specification prior to 4.1)</li>
* </ul>
*
* @param catalogPattern null (to get all objects) or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param tableNamePattern null (to get all objects) or a table name
* (uppercase for unquoted names)
* @param columnNamePattern null (to get all objects) or a column name
* (uppercase for unquoted names)
* @return the list of columns
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getColumns(String catalogPattern, String schemaPattern,
String tableNamePattern, String columnNamePattern)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getColumns(" + quote(catalogPattern)+", "
+quote(schemaPattern)+", "
+quote(tableNamePattern)+", "
+quote(columnNamePattern)+");");
}
checkClosed();
String tableSql = "SELECT "
+ "TABLE_CATALOG TABLE_CAT, "
+ "TABLE_SCHEMA TABLE_SCHEM, "
+ "TABLE_NAME, "
+ "COLUMN_NAME, "
+ "DATA_TYPE, "
+ "TYPE_NAME, "
+ "CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, "
+ "CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, "
+ "NUMERIC_SCALE DECIMAL_DIGITS, "
+ "NUMERIC_PRECISION_RADIX NUM_PREC_RADIX, "
+ "NULLABLE, "
+ "REMARKS, "
+ "COLUMN_DEFAULT COLUMN_DEF, "
+ "DATA_TYPE SQL_DATA_TYPE, "
+ "ZERO() SQL_DATETIME_SUB, "
+ "CHARACTER_OCTET_LENGTH CHAR_OCTET_LENGTH, "
+ "ORDINAL_POSITION, "
+ "IS_NULLABLE IS_NULLABLE, "
+ "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATALOG, "
+ "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_SCHEMA, "
+ "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_TABLE, "
+ "SOURCE_DATA_TYPE, "
+ "CASE WHEN SEQUENCE_NAME IS NULL THEN "
+ "CAST(? AS VARCHAR) ELSE CAST(? AS VARCHAR) END IS_AUTOINCREMENT, "
+ "CAST(SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATLOG "
+ "FROM INFORMATION_SCHEMA.COLUMNS "
+ "WHERE TABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND TABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND TABLE_NAME LIKE ? ESCAPE ? "
+ "AND COLUMN_NAME LIKE ? ESCAPE ? "
+ "ORDER BY TABLE_SCHEM, TABLE_NAME, ORDINAL_POSITION";
String synonymSql = "SELECT "
+ "s.SYNONYM_CATALOG TABLE_CAT, "
+ "s.SYNONYM_SCHEMA TABLE_SCHEM, "
+ "s.SYNONYM_NAME TABLE_NAME, "
+ "c.COLUMN_NAME, "
+ "c.DATA_TYPE, "
+ "c.TYPE_NAME, "
+ "c.CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, "
+ "c.CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, "
+ "c.NUMERIC_SCALE DECIMAL_DIGITS, "
+ "c.NUMERIC_PRECISION_RADIX NUM_PREC_RADIX, "
+ "c.NULLABLE, "
+ "c.REMARKS, "
+ "c.COLUMN_DEFAULT COLUMN_DEF, "
+ "c.DATA_TYPE SQL_DATA_TYPE, "
+ "ZERO() SQL_DATETIME_SUB, "
+ "c.CHARACTER_OCTET_LENGTH CHAR_OCTET_LENGTH, "
+ "c.ORDINAL_POSITION, "
+ "c.IS_NULLABLE IS_NULLABLE, "
+ "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATALOG, "
+ "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_SCHEMA, "
+ "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_TABLE, "
+ "c.SOURCE_DATA_TYPE, "
+ "CASE WHEN c.SEQUENCE_NAME IS NULL THEN "
+ "CAST(? AS VARCHAR) ELSE CAST(? AS VARCHAR) END IS_AUTOINCREMENT, "
+ "CAST(c.SOURCE_DATA_TYPE AS VARCHAR) SCOPE_CATLOG "
+ "FROM INFORMATION_SCHEMA.COLUMNS c JOIN INFORMATION_SCHEMA.SYNONYMS s ON "
+ "s.SYNONYM_FOR = c.TABLE_NAME "
+ "AND s.SYNONYM_FOR_SCHEMA = c.TABLE_SCHEMA "
+ "WHERE s.SYNONYM_CATALOG LIKE ? ESCAPE ? "
+ "AND s.SYNONYM_SCHEMA LIKE ? ESCAPE ? "
+ "AND s.SYNONYM_NAME LIKE ? ESCAPE ? "
+ "AND c.COLUMN_NAME LIKE ? ESCAPE ? ";
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "TABLE_CAT, "
+ "TABLE_SCHEM, "
+ "TABLE_NAME, "
+ "COLUMN_NAME, "
+ "DATA_TYPE, "
+ "TYPE_NAME, "
+ "COLUMN_SIZE, "
+ "BUFFER_LENGTH, "
+ "DECIMAL_DIGITS, "
+ "NUM_PREC_RADIX, "
+ "NULLABLE, "
+ "REMARKS, "
+ "COLUMN_DEF, "
+ "SQL_DATA_TYPE, "
+ "SQL_DATETIME_SUB, "
+ "CHAR_OCTET_LENGTH, "
+ "ORDINAL_POSITION, "
+ "IS_NULLABLE, "
+ "SCOPE_CATALOG, "
+ "SCOPE_SCHEMA, "
+ "SCOPE_TABLE, "
+ "SOURCE_DATA_TYPE, "
+ "IS_AUTOINCREMENT, "
+ "SCOPE_CATLOG "
+ "FROM ((" + tableSql + ") UNION (" + synonymSql
+ ")) ORDER BY TABLE_SCHEM, TABLE_NAME, ORDINAL_POSITION");
prep.setString(1, "NO");
prep.setString(2, "YES");
prep.setString(3, getCatalogPattern(catalogPattern));
prep.setString(4, "\\");
prep.setString(5, getSchemaPattern(schemaPattern));
prep.setString(6, "\\");
prep.setString(7, getPattern(tableNamePattern));
prep.setString(8, "\\");
prep.setString(9, getPattern(columnNamePattern));
prep.setString(10, "\\");
prep.setString(11, "NO");
prep.setString(12, "YES");
prep.setString(13, getCatalogPattern(catalogPattern));
prep.setString(14, "\\");
prep.setString(15, getSchemaPattern(schemaPattern));
prep.setString(16, "\\");
prep.setString(17, getPattern(tableNamePattern));
prep.setString(18, "\\");
prep.setString(19, getPattern(columnNamePattern));
prep.setString(20, "\\");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of indexes for this database. The primary key index (if
* there is one) is also listed, with the name PRIMARY_KEY. The result set
* is sorted by NON_UNIQUE ('false' first), TYPE, TABLE_SCHEM, INDEX_NAME,
* and ORDINAL_POSITION.
*
* <ul>
* <li>1 TABLE_CAT (String) table catalog </li>
* <li>2 TABLE_SCHEM (String) table schema </li>
* <li>3 TABLE_NAME (String) table name </li>
* <li>4 NON_UNIQUE (boolean) 'true' if non-unique</li>
* <li>5 INDEX_QUALIFIER (String) index catalog </li>
* <li>6 INDEX_NAME (String) index name </li>
* <li>7 TYPE (short) the index type (always tableIndexOther) </li>
* <li>8 ORDINAL_POSITION (short) column index (1, 2, ...) </li>
* <li>9 COLUMN_NAME (String) column name </li>
* <li>10 ASC_OR_DESC (String) ascending or descending (always 'A') </li>
* <li>11 CARDINALITY (int) numbers of unique values </li>
* <li>12 PAGES (int) number of pages use (always 0) </li>
* <li>13 FILTER_CONDITION (String) filter condition (always empty) </li>
* <li>14 SORT_TYPE (int) the sort type bit map: 1=DESCENDING,
* 2=NULLS_FIRST, 4=NULLS_LAST </li>
* </ul>
*
* @param catalogPattern null or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param tableName table name (must be specified)
* @param unique only unique indexes
* @param approximate is ignored
* @return the list of indexes and columns
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getIndexInfo(String catalogPattern, String schemaPattern,
String tableName, boolean unique, boolean approximate)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getIndexInfo(" + quote(catalogPattern) + ", " +
quote(schemaPattern) + ", " + quote(tableName) + ", " +
unique + ", " + approximate + ");");
}
String uniqueCondition;
if (unique) {
uniqueCondition = "NON_UNIQUE=FALSE";
} else {
uniqueCondition = "TRUE";
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "TABLE_CATALOG TABLE_CAT, "
+ "TABLE_SCHEMA TABLE_SCHEM, "
+ "TABLE_NAME, "
+ "NON_UNIQUE, "
+ "TABLE_CATALOG INDEX_QUALIFIER, "
+ "INDEX_NAME, "
+ "INDEX_TYPE TYPE, "
+ "ORDINAL_POSITION, "
+ "COLUMN_NAME, "
+ "ASC_OR_DESC, "
// TODO meta data for number of unique values in an index
+ "CARDINALITY, "
+ "PAGES, "
+ "FILTER_CONDITION, "
+ "SORT_TYPE "
+ "FROM INFORMATION_SCHEMA.INDEXES "
+ "WHERE TABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND TABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND (" + uniqueCondition + ") "
+ "AND TABLE_NAME = ? "
+ "ORDER BY NON_UNIQUE, TYPE, TABLE_SCHEM, INDEX_NAME, ORDINAL_POSITION");
prep.setString(1, getCatalogPattern(catalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(schemaPattern));
prep.setString(4, "\\");
prep.setString(5, tableName);
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the primary key columns for a table. The result set is sorted by
* TABLE_SCHEM, and COLUMN_NAME (and not by KEY_SEQ).
*
* <ul>
* <li>1 TABLE_CAT (String) table catalog</li>
* <li>2 TABLE_SCHEM (String) table schema</li>
* <li>3 TABLE_NAME (String) table name</li>
* <li>4 COLUMN_NAME (String) column name</li>
* <li>5 KEY_SEQ (short) the column index of this column (1,2,...)</li>
* <li>6 PK_NAME (String) the name of the primary key index</li>
* </ul>
*
* @param catalogPattern null or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param tableName table name (must be specified)
* @return the list of primary key columns
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getPrimaryKeys(String catalogPattern,
String schemaPattern, String tableName) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getPrimaryKeys("
+quote(catalogPattern)+", "
+quote(schemaPattern)+", "
+quote(tableName)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "TABLE_CATALOG TABLE_CAT, "
+ "TABLE_SCHEMA TABLE_SCHEM, "
+ "TABLE_NAME, "
+ "COLUMN_NAME, "
+ "ORDINAL_POSITION KEY_SEQ, "
+ "IFNULL(CONSTRAINT_NAME, INDEX_NAME) PK_NAME "
+ "FROM INFORMATION_SCHEMA.INDEXES "
+ "WHERE TABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND TABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND TABLE_NAME = ? "
+ "AND PRIMARY_KEY = TRUE "
+ "ORDER BY COLUMN_NAME");
prep.setString(1, getCatalogPattern(catalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(schemaPattern));
prep.setString(4, "\\");
prep.setString(5, tableName);
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if all procedures callable.
*
* @return true
*/
@Override
public boolean allProceduresAreCallable() {
debugCodeCall("allProceduresAreCallable");
return true;
}
/**
* Checks if it possible to query all tables returned by getTables.
*
* @return true
*/
@Override
public boolean allTablesAreSelectable() {
debugCodeCall("allTablesAreSelectable");
return true;
}
/**
* Returns the database URL for this connection.
*
* @return the url
*/
@Override
public String getURL() throws SQLException {
try {
debugCodeCall("getURL");
return conn.getURL();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the user name as passed to DriverManager.getConnection(url, user,
* password).
*
* @return the user name
*/
@Override
public String getUserName() throws SQLException {
try {
debugCodeCall("getUserName");
return conn.getUser();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the same as Connection.isReadOnly().
*
* @return if read only optimization is switched on
*/
@Override
public boolean isReadOnly() throws SQLException {
try {
debugCodeCall("isReadOnly");
return conn.isReadOnly();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if NULL is sorted high (bigger than anything that is not null).
*
* @return false by default; true if the system property h2.sortNullsHigh is
* set to true
*/
@Override
public boolean nullsAreSortedHigh() {
debugCodeCall("nullsAreSortedHigh");
return SysProperties.SORT_NULLS_HIGH;
}
/**
* Checks if NULL is sorted low (smaller than anything that is not null).
*
* @return true by default; false if the system property h2.sortNullsHigh is
* set to true
*/
@Override
public boolean nullsAreSortedLow() {
debugCodeCall("nullsAreSortedLow");
return !SysProperties.SORT_NULLS_HIGH;
}
/**
* Checks if NULL is sorted at the beginning (no matter if ASC or DESC is
* used).
*
* @return false
*/
@Override
public boolean nullsAreSortedAtStart() {
debugCodeCall("nullsAreSortedAtStart");
return false;
}
/**
* Checks if NULL is sorted at the end (no matter if ASC or DESC is used).
*
* @return false
*/
@Override
public boolean nullsAreSortedAtEnd() {
debugCodeCall("nullsAreSortedAtEnd");
return false;
}
/**
* Returns the connection that created this object.
*
* @return the connection
*/
@Override
public Connection getConnection() {
debugCodeCall("getConnection");
return conn;
}
/**
* Gets the list of procedures. The result set is sorted by PROCEDURE_SCHEM,
* PROCEDURE_NAME, and NUM_INPUT_PARAMS. There are potentially multiple
* procedures with the same name, each with a different number of input
* parameters.
*
* <ul>
* <li>1 PROCEDURE_CAT (String) catalog </li>
* <li>2 PROCEDURE_SCHEM (String) schema </li>
* <li>3 PROCEDURE_NAME (String) name </li>
* <li>4 NUM_INPUT_PARAMS (int) the number of arguments </li>
* <li>5 NUM_OUTPUT_PARAMS (int) for future use, always 0 </li>
* <li>6 NUM_RESULT_SETS (int) for future use, always 0 </li>
* <li>7 REMARKS (String) description </li>
* <li>8 PROCEDURE_TYPE (short) if this procedure returns a result
* (procedureNoResult or procedureReturnsResult) </li>
* <li>9 SPECIFIC_NAME (String) name </li>
* </ul>
*
* @param catalogPattern null or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param procedureNamePattern the procedure name pattern
* @return the procedures
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getProcedures(String catalogPattern, String schemaPattern,
String procedureNamePattern) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getProcedures("
+quote(catalogPattern)+", "
+quote(schemaPattern)+", "
+quote(procedureNamePattern)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "ALIAS_CATALOG PROCEDURE_CAT, "
+ "ALIAS_SCHEMA PROCEDURE_SCHEM, "
+ "ALIAS_NAME PROCEDURE_NAME, "
+ "COLUMN_COUNT NUM_INPUT_PARAMS, "
+ "ZERO() NUM_OUTPUT_PARAMS, "
+ "ZERO() NUM_RESULT_SETS, "
+ "REMARKS, "
+ "RETURNS_RESULT PROCEDURE_TYPE, "
+ "ALIAS_NAME SPECIFIC_NAME "
+ "FROM INFORMATION_SCHEMA.FUNCTION_ALIASES "
+ "WHERE ALIAS_CATALOG LIKE ? ESCAPE ? "
+ "AND ALIAS_SCHEMA LIKE ? ESCAPE ? "
+ "AND ALIAS_NAME LIKE ? ESCAPE ? "
+ "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, NUM_INPUT_PARAMS");
prep.setString(1, getCatalogPattern(catalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(schemaPattern));
prep.setString(4, "\\");
prep.setString(5, getPattern(procedureNamePattern));
prep.setString(6, "\\");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of procedure columns. The result set is sorted by
* PROCEDURE_SCHEM, PROCEDURE_NAME, NUM_INPUT_PARAMS, and POS.
* There are potentially multiple procedures with the same name, each with a
* different number of input parameters.
*
* <ul>
* <li>1 PROCEDURE_CAT (String) catalog </li>
* <li>2 PROCEDURE_SCHEM (String) schema </li>
* <li>3 PROCEDURE_NAME (String) name </li>
* <li>4 COLUMN_NAME (String) column name </li>
* <li>5 COLUMN_TYPE (short) column type
* (always DatabaseMetaData.procedureColumnIn)</li>
* <li>6 DATA_TYPE (short) sql type </li>
* <li>7 TYPE_NAME (String) type name </li>
* <li>8 PRECISION (int) precision </li>
* <li>9 LENGTH (int) length </li>
* <li>10 SCALE (short) scale </li>
* <li>11 RADIX (int) always 10 </li>
* <li>12 NULLABLE (short) nullable
* (DatabaseMetaData.columnNoNulls for primitive data types,
* DatabaseMetaData.columnNullable otherwise)</li>
* <li>13 REMARKS (String) description </li>
* <li>14 COLUMN_DEF (String) always null </li>
* <li>15 SQL_DATA_TYPE (int) for future use, always 0 </li>
* <li>16 SQL_DATETIME_SUB (int) for future use, always 0 </li>
* <li>17 CHAR_OCTET_LENGTH (int) always null </li>
* <li>18 ORDINAL_POSITION (int) the parameter index
* starting from 1 (0 is the return value) </li>
* <li>19 IS_NULLABLE (String) always "YES" </li>
* <li>20 SPECIFIC_NAME (String) name </li>
* </ul>
*
* @param catalogPattern null or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param procedureNamePattern the procedure name pattern
* @param columnNamePattern the procedure name pattern
* @return the procedure columns
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getProcedureColumns(String catalogPattern,
String schemaPattern, String procedureNamePattern,
String columnNamePattern) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getProcedureColumns("
+quote(catalogPattern)+", "
+quote(schemaPattern)+", "
+quote(procedureNamePattern)+", "
+quote(columnNamePattern)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "ALIAS_CATALOG PROCEDURE_CAT, "
+ "ALIAS_SCHEMA PROCEDURE_SCHEM, "
+ "ALIAS_NAME PROCEDURE_NAME, "
+ "COLUMN_NAME, "
+ "COLUMN_TYPE, "
+ "DATA_TYPE, "
+ "TYPE_NAME, "
+ "PRECISION, "
+ "PRECISION LENGTH, "
+ "SCALE, "
+ "RADIX, "
+ "NULLABLE, "
+ "REMARKS, "
+ "COLUMN_DEFAULT COLUMN_DEF, "
+ "ZERO() SQL_DATA_TYPE, "
+ "ZERO() SQL_DATETIME_SUB, "
+ "ZERO() CHAR_OCTET_LENGTH, "
+ "POS ORDINAL_POSITION, "
+ "? IS_NULLABLE, "
+ "ALIAS_NAME SPECIFIC_NAME "
+ "FROM INFORMATION_SCHEMA.FUNCTION_COLUMNS "
+ "WHERE ALIAS_CATALOG LIKE ? ESCAPE ? "
+ "AND ALIAS_SCHEMA LIKE ? ESCAPE ? "
+ "AND ALIAS_NAME LIKE ? ESCAPE ? "
+ "AND COLUMN_NAME LIKE ? ESCAPE ? "
+ "ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, ORDINAL_POSITION");
prep.setString(1, "YES");
prep.setString(2, getCatalogPattern(catalogPattern));
prep.setString(3, "\\");
prep.setString(4, getSchemaPattern(schemaPattern));
prep.setString(5, "\\");
prep.setString(6, getPattern(procedureNamePattern));
prep.setString(7, "\\");
prep.setString(8, getPattern(columnNamePattern));
prep.setString(9, "\\");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of schemas.
* The result set is sorted by TABLE_SCHEM.
*
* <ul>
* <li>1 TABLE_SCHEM (String) schema name
* </li><li>2 TABLE_CATALOG (String) catalog name
* </li><li>3 IS_DEFAULT (boolean) if this is the default schema
* </li></ul>
*
* @return the schema list
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getSchemas() throws SQLException {
try {
debugCodeCall("getSchemas");
checkClosed();
PreparedStatement prep = conn
.prepareAutoCloseStatement("SELECT "
+ "SCHEMA_NAME TABLE_SCHEM, "
+ "CATALOG_NAME TABLE_CATALOG, "
+" IS_DEFAULT "
+ "FROM INFORMATION_SCHEMA.SCHEMATA "
+ "ORDER BY SCHEMA_NAME");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of catalogs.
* The result set is sorted by TABLE_CAT.
*
* <ul>
* <li>1 TABLE_CAT (String) catalog name
* </li></ul>
*
* @return the catalog list
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getCatalogs() throws SQLException {
try {
debugCodeCall("getCatalogs");
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement(
"SELECT CATALOG_NAME TABLE_CAT "
+ "FROM INFORMATION_SCHEMA.CATALOGS");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of table types. This call returns a result set with five
* records: "SYSTEM TABLE", "TABLE", "VIEW", "TABLE LINK" and "EXTERNAL".
* <ul>
* <li>1 TABLE_TYPE (String) table type
* </li></ul>
*
* @return the table types
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getTableTypes() throws SQLException {
try {
debugCodeCall("getTableTypes");
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "TYPE TABLE_TYPE "
+ "FROM INFORMATION_SCHEMA.TABLE_TYPES "
+ "ORDER BY TABLE_TYPE");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of column privileges. The result set is sorted by
* COLUMN_NAME and PRIVILEGE
*
* <ul>
* <li>1 TABLE_CAT (String) table catalog</li>
* <li>2 TABLE_SCHEM (String) table schema</li>
* <li>3 TABLE_NAME (String) table name</li>
* <li>4 COLUMN_NAME (String) column name</li>
* <li>5 GRANTOR (String) grantor of access</li>
* <li>6 GRANTEE (String) grantee of access</li>
* <li>7 PRIVILEGE (String) SELECT, INSERT, UPDATE, DELETE or REFERENCES
* (only one per row)</li>
* <li>8 IS_GRANTABLE (String) YES means the grantee can grant access to
* others</li>
* </ul>
*
* @param catalogPattern null (to get all objects) or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param table a table name (uppercase for unquoted names)
* @param columnNamePattern null (to get all objects) or a column name
* (uppercase for unquoted names)
* @return the list of privileges
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getColumnPrivileges(String catalogPattern,
String schemaPattern, String table, String columnNamePattern)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getColumnPrivileges("
+quote(catalogPattern)+", "
+quote(schemaPattern)+", "
+quote(table)+", "
+quote(columnNamePattern)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "TABLE_CATALOG TABLE_CAT, "
+ "TABLE_SCHEMA TABLE_SCHEM, "
+ "TABLE_NAME, "
+ "COLUMN_NAME, "
+ "GRANTOR, "
+ "GRANTEE, "
+ "PRIVILEGE_TYPE PRIVILEGE, "
+ "IS_GRANTABLE "
+ "FROM INFORMATION_SCHEMA.COLUMN_PRIVILEGES "
+ "WHERE TABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND TABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND TABLE_NAME = ? "
+ "AND COLUMN_NAME LIKE ? ESCAPE ? "
+ "ORDER BY COLUMN_NAME, PRIVILEGE");
prep.setString(1, getCatalogPattern(catalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(schemaPattern));
prep.setString(4, "\\");
prep.setString(5, table);
prep.setString(6, getPattern(columnNamePattern));
prep.setString(7, "\\");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of table privileges. The result set is sorted by
* TABLE_SCHEM, TABLE_NAME, and PRIVILEGE.
*
* <ul>
* <li>1 TABLE_CAT (String) table catalog </li>
* <li>2 TABLE_SCHEM (String) table schema </li>
* <li>3 TABLE_NAME (String) table name </li>
* <li>4 GRANTOR (String) grantor of access </li>
* <li>5 GRANTEE (String) grantee of access </li>
* <li>6 PRIVILEGE (String) SELECT, INSERT, UPDATE, DELETE or REFERENCES
* (only one per row) </li>
* <li>7 IS_GRANTABLE (String) YES means the grantee can grant access to
* others </li>
* </ul>
*
* @param catalogPattern null (to get all objects) or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param tableNamePattern null (to get all objects) or a table name
* (uppercase for unquoted names)
* @return the list of privileges
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getTablePrivileges(String catalogPattern,
String schemaPattern, String tableNamePattern) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getTablePrivileges("
+quote(catalogPattern)+", "
+quote(schemaPattern)+", "
+quote(tableNamePattern)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "TABLE_CATALOG TABLE_CAT, "
+ "TABLE_SCHEMA TABLE_SCHEM, "
+ "TABLE_NAME, "
+ "GRANTOR, "
+ "GRANTEE, "
+ "PRIVILEGE_TYPE PRIVILEGE, "
+ "IS_GRANTABLE "
+ "FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES "
+ "WHERE TABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND TABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND TABLE_NAME LIKE ? ESCAPE ? "
+ "ORDER BY TABLE_SCHEM, TABLE_NAME, PRIVILEGE");
prep.setString(1, getCatalogPattern(catalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(schemaPattern));
prep.setString(4, "\\");
prep.setString(5, getPattern(tableNamePattern));
prep.setString(6, "\\");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of columns that best identifier a row in a table.
* The list is ordered by SCOPE.
*
* <ul>
* <li>1 SCOPE (short) scope of result (always bestRowSession)
* </li><li>2 COLUMN_NAME (String) column name
* </li><li>3 DATA_TYPE (short) SQL data type, see also java.sql.Types
* </li><li>4 TYPE_NAME (String) type name
* </li><li>5 COLUMN_SIZE (int) precision
* (values larger than 2 GB are returned as 2 GB)
* </li><li>6 BUFFER_LENGTH (int) unused
* </li><li>7 DECIMAL_DIGITS (short) scale
* </li><li>8 PSEUDO_COLUMN (short) (always bestRowNotPseudo)
* </li></ul>
*
* @param catalogPattern null (to get all objects) or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param tableName table name (must be specified)
* @param scope ignored
* @param nullable ignored
* @return the primary key index
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getBestRowIdentifier(String catalogPattern,
String schemaPattern, String tableName, int scope, boolean nullable)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getBestRowIdentifier("
+quote(catalogPattern)+", "
+quote(schemaPattern)+", "
+quote(tableName)+", "
+scope+", "+nullable+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "CAST(? AS SMALLINT) SCOPE, "
+ "C.COLUMN_NAME, "
+ "C.DATA_TYPE, "
+ "C.TYPE_NAME, "
+ "C.CHARACTER_MAXIMUM_LENGTH COLUMN_SIZE, "
+ "C.CHARACTER_MAXIMUM_LENGTH BUFFER_LENGTH, "
+ "CAST(C.NUMERIC_SCALE AS SMALLINT) DECIMAL_DIGITS, "
+ "CAST(? AS SMALLINT) PSEUDO_COLUMN "
+ "FROM INFORMATION_SCHEMA.INDEXES I, "
+" INFORMATION_SCHEMA.COLUMNS C "
+ "WHERE C.TABLE_NAME = I.TABLE_NAME "
+ "AND C.COLUMN_NAME = I.COLUMN_NAME "
+ "AND C.TABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND C.TABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND C.TABLE_NAME = ? "
+ "AND I.PRIMARY_KEY = TRUE "
+ "ORDER BY SCOPE");
// SCOPE
prep.setInt(1, DatabaseMetaData.bestRowSession);
// PSEUDO_COLUMN
prep.setInt(2, DatabaseMetaData.bestRowNotPseudo);
prep.setString(3, getCatalogPattern(catalogPattern));
prep.setString(4, "\\");
prep.setString(5, getSchemaPattern(schemaPattern));
prep.setString(6, "\\");
prep.setString(7, tableName);
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Get the list of columns that are update when any value is updated.
* The result set is always empty.
*
* <ul>
* <li>1 SCOPE (int) not used
* </li><li>2 COLUMN_NAME (String) column name
* </li><li>3 DATA_TYPE (int) SQL data type - see also java.sql.Types
* </li><li>4 TYPE_NAME (String) data type name
* </li><li>5 COLUMN_SIZE (int) precision
* (values larger than 2 GB are returned as 2 GB)
* </li><li>6 BUFFER_LENGTH (int) length (bytes)
* </li><li>7 DECIMAL_DIGITS (int) scale
* </li><li>8 PSEUDO_COLUMN (int) is this column a pseudo column
* </li></ul>
*
* @param catalog null (to get all objects) or the catalog name
* @param schema null (to get all objects) or a schema name
* @param tableName table name (must be specified)
* @return an empty result set
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getVersionColumns(String catalog, String schema,
String tableName) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getVersionColumns("
+quote(catalog)+", "
+quote(schema)+", "
+quote(tableName)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "ZERO() SCOPE, "
+ "COLUMN_NAME, "
+ "CAST(DATA_TYPE AS INT) DATA_TYPE, "
+ "TYPE_NAME, "
+ "NUMERIC_PRECISION COLUMN_SIZE, "
+ "NUMERIC_PRECISION BUFFER_LENGTH, "
+ "NUMERIC_PRECISION DECIMAL_DIGITS, "
+ "ZERO() PSEUDO_COLUMN "
+ "FROM INFORMATION_SCHEMA.COLUMNS "
+ "WHERE FALSE");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of primary key columns that are referenced by a table. The
* result set is sorted by PKTABLE_CAT, PKTABLE_SCHEM, PKTABLE_NAME,
* FK_NAME, KEY_SEQ.
*
* <ul>
* <li>1 PKTABLE_CAT (String) primary catalog </li>
* <li>2 PKTABLE_SCHEM (String) primary schema </li>
* <li>3 PKTABLE_NAME (String) primary table </li>
* <li>4 PKCOLUMN_NAME (String) primary column </li>
* <li>5 FKTABLE_CAT (String) foreign catalog </li>
* <li>6 FKTABLE_SCHEM (String) foreign schema </li>
* <li>7 FKTABLE_NAME (String) foreign table </li>
* <li>8 FKCOLUMN_NAME (String) foreign column </li>
* <li>9 KEY_SEQ (short) sequence number (1, 2, ...) </li>
* <li>10 UPDATE_RULE (short) action on update (see
* DatabaseMetaData.importedKey...) </li>
* <li>11 DELETE_RULE (short) action on delete (see
* DatabaseMetaData.importedKey...) </li>
* <li>12 FK_NAME (String) foreign key name </li>
* <li>13 PK_NAME (String) primary key name </li>
* <li>14 DEFERRABILITY (short) deferrable or not (always
* importedKeyNotDeferrable) </li>
* </ul>
*
* @param catalogPattern null (to get all objects) or the catalog name
* @param schemaPattern the schema name of the foreign table
* @param tableName the name of the foreign table
* @return the result set
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getImportedKeys(String catalogPattern,
String schemaPattern, String tableName) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getImportedKeys("
+quote(catalogPattern)+", "
+quote(schemaPattern)+", "
+quote(tableName)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "PKTABLE_CATALOG PKTABLE_CAT, "
+ "PKTABLE_SCHEMA PKTABLE_SCHEM, "
+ "PKTABLE_NAME PKTABLE_NAME, "
+ "PKCOLUMN_NAME, "
+ "FKTABLE_CATALOG FKTABLE_CAT, "
+ "FKTABLE_SCHEMA FKTABLE_SCHEM, "
+ "FKTABLE_NAME, "
+ "FKCOLUMN_NAME, "
+ "ORDINAL_POSITION KEY_SEQ, "
+ "UPDATE_RULE, "
+ "DELETE_RULE, "
+ "FK_NAME, "
+ "PK_NAME, "
+ "DEFERRABILITY "
+ "FROM INFORMATION_SCHEMA.CROSS_REFERENCES "
+ "WHERE FKTABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND FKTABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND FKTABLE_NAME = ? "
+ "ORDER BY PKTABLE_CAT, PKTABLE_SCHEM, PKTABLE_NAME, FK_NAME, KEY_SEQ");
prep.setString(1, getCatalogPattern(catalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(schemaPattern));
prep.setString(4, "\\");
prep.setString(5, tableName);
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of foreign key columns that reference a table. The result
* set is sorted by FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME,
* KEY_SEQ.
*
* <ul>
* <li>1 PKTABLE_CAT (String) primary catalog </li>
* <li>2 PKTABLE_SCHEM (String) primary schema </li>
* <li>3 PKTABLE_NAME (String) primary table </li>
* <li>4 PKCOLUMN_NAME (String) primary column </li>
* <li>5 FKTABLE_CAT (String) foreign catalog </li>
* <li>6 FKTABLE_SCHEM (String) foreign schema </li>
* <li>7 FKTABLE_NAME (String) foreign table </li>
* <li>8 FKCOLUMN_NAME (String) foreign column </li>
* <li>9 KEY_SEQ (short) sequence number (1,2,...) </li>
* <li>10 UPDATE_RULE (short) action on update (see
* DatabaseMetaData.importedKey...) </li>
* <li>11 DELETE_RULE (short) action on delete (see
* DatabaseMetaData.importedKey...) </li>
* <li>12 FK_NAME (String) foreign key name </li>
* <li>13 PK_NAME (String) primary key name </li>
* <li>14 DEFERRABILITY (short) deferrable or not (always
* importedKeyNotDeferrable) </li>
* </ul>
*
* @param catalogPattern null or the catalog name
* @param schemaPattern the schema name of the primary table
* @param tableName the name of the primary table
* @return the result set
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getExportedKeys(String catalogPattern,
String schemaPattern, String tableName) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getExportedKeys("
+quote(catalogPattern)+", "
+quote(schemaPattern)+", "
+quote(tableName)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "PKTABLE_CATALOG PKTABLE_CAT, "
+ "PKTABLE_SCHEMA PKTABLE_SCHEM, "
+ "PKTABLE_NAME PKTABLE_NAME, "
+ "PKCOLUMN_NAME, "
+ "FKTABLE_CATALOG FKTABLE_CAT, "
+ "FKTABLE_SCHEMA FKTABLE_SCHEM, "
+ "FKTABLE_NAME, "
+ "FKCOLUMN_NAME, "
+ "ORDINAL_POSITION KEY_SEQ, "
+ "UPDATE_RULE, "
+ "DELETE_RULE, "
+ "FK_NAME, "
+ "PK_NAME, "
+ "DEFERRABILITY "
+ "FROM INFORMATION_SCHEMA.CROSS_REFERENCES "
+ "WHERE PKTABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND PKTABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND PKTABLE_NAME = ? "
+ "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ");
prep.setString(1, getCatalogPattern(catalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(schemaPattern));
prep.setString(4, "\\");
prep.setString(5, tableName);
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of foreign key columns that references a table, as well as
* the list of primary key columns that are references by a table. The
* result set is sorted by FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME,
* FK_NAME, KEY_SEQ.
*
* <ul>
* <li>1 PKTABLE_CAT (String) primary catalog </li>
* <li>2 PKTABLE_SCHEM (String) primary schema </li>
* <li>3 PKTABLE_NAME (String) primary table </li>
* <li>4 PKCOLUMN_NAME (String) primary column </li>
* <li>5 FKTABLE_CAT (String) foreign catalog </li>
* <li>6 FKTABLE_SCHEM (String) foreign schema </li>
* <li>7 FKTABLE_NAME (String) foreign table </li>
* <li>8 FKCOLUMN_NAME (String) foreign column </li>
* <li>9 KEY_SEQ (short) sequence number (1,2,...) </li>
* <li>10 UPDATE_RULE (short) action on update (see
* DatabaseMetaData.importedKey...) </li>
* <li>11 DELETE_RULE (short) action on delete (see
* DatabaseMetaData.importedKey...) </li>
* <li>12 FK_NAME (String) foreign key name </li>
* <li>13 PK_NAME (String) primary key name </li>
* <li>14 DEFERRABILITY (short) deferrable or not (always
* importedKeyNotDeferrable) </li>
* </ul>
*
* @param primaryCatalogPattern null or the catalog name
* @param primarySchemaPattern the schema name of the primary table
* (optional)
* @param primaryTable the name of the primary table (must be specified)
* @param foreignCatalogPattern null or the catalog name
* @param foreignSchemaPattern the schema name of the foreign table
* (optional)
* @param foreignTable the name of the foreign table (must be specified)
* @return the result set
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getCrossReference(String primaryCatalogPattern,
String primarySchemaPattern, String primaryTable, String foreignCatalogPattern,
String foreignSchemaPattern, String foreignTable) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getCrossReference("
+quote(primaryCatalogPattern)+", "
+quote(primarySchemaPattern)+", "
+quote(primaryTable)+", "
+quote(foreignCatalogPattern)+", "
+quote(foreignSchemaPattern)+", "
+quote(foreignTable)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "PKTABLE_CATALOG PKTABLE_CAT, "
+ "PKTABLE_SCHEMA PKTABLE_SCHEM, "
+ "PKTABLE_NAME PKTABLE_NAME, "
+ "PKCOLUMN_NAME, "
+ "FKTABLE_CATALOG FKTABLE_CAT, "
+ "FKTABLE_SCHEMA FKTABLE_SCHEM, "
+ "FKTABLE_NAME, "
+ "FKCOLUMN_NAME, "
+ "ORDINAL_POSITION KEY_SEQ, "
+ "UPDATE_RULE, "
+ "DELETE_RULE, "
+ "FK_NAME, "
+ "PK_NAME, "
+ "DEFERRABILITY "
+ "FROM INFORMATION_SCHEMA.CROSS_REFERENCES "
+ "WHERE PKTABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND PKTABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND PKTABLE_NAME = ? "
+ "AND FKTABLE_CATALOG LIKE ? ESCAPE ? "
+ "AND FKTABLE_SCHEMA LIKE ? ESCAPE ? "
+ "AND FKTABLE_NAME = ? "
+ "ORDER BY FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, FK_NAME, KEY_SEQ");
prep.setString(1, getCatalogPattern(primaryCatalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(primarySchemaPattern));
prep.setString(4, "\\");
prep.setString(5, primaryTable);
prep.setString(6, getCatalogPattern(foreignCatalogPattern));
prep.setString(7, "\\");
prep.setString(8, getSchemaPattern(foreignSchemaPattern));
prep.setString(9, "\\");
prep.setString(10, foreignTable);
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of user-defined data types.
* This call returns an empty result set.
*
* <ul>
* <li>1 TYPE_CAT (String) catalog
* </li><li>2 TYPE_SCHEM (String) schema
* </li><li>3 TYPE_NAME (String) type name
* </li><li>4 CLASS_NAME (String) Java class
* </li><li>5 DATA_TYPE (short) SQL Type - see also java.sql.Types
* </li><li>6 REMARKS (String) description
* </li><li>7 BASE_TYPE (short) base type - see also java.sql.Types
* </li></ul>
*
* @param catalog ignored
* @param schemaPattern ignored
* @param typeNamePattern ignored
* @param types ignored
* @return an empty result set
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getUDTs(String catalog, String schemaPattern,
String typeNamePattern, int[] types) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getUDTs("
+quote(catalog)+", "
+quote(schemaPattern)+", "
+quote(typeNamePattern)+", "
+quoteIntArray(types)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "CAST(NULL AS VARCHAR) TYPE_CAT, "
+ "CAST(NULL AS VARCHAR) TYPE_SCHEM, "
+ "CAST(NULL AS VARCHAR) TYPE_NAME, "
+ "CAST(NULL AS VARCHAR) CLASS_NAME, "
+ "CAST(NULL AS SMALLINT) DATA_TYPE, "
+ "CAST(NULL AS VARCHAR) REMARKS, "
+ "CAST(NULL AS SMALLINT) BASE_TYPE "
+ "FROM DUAL WHERE FALSE");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the list of data types. The result set is sorted by DATA_TYPE and
* afterwards by how closely the data type maps to the corresponding JDBC
* SQL type (best match first).
*
* <ul>
* <li>1 TYPE_NAME (String) type name </li>
* <li>2 DATA_TYPE (short) SQL data type - see also java.sql.Types </li>
* <li>3 PRECISION (int) maximum precision </li>
* <li>4 LITERAL_PREFIX (String) prefix used to quote a literal </li>
* <li>5 LITERAL_SUFFIX (String) suffix used to quote a literal </li>
* <li>6 CREATE_PARAMS (String) parameters used (may be null) </li>
* <li>7 NULLABLE (short) typeNoNulls (NULL not allowed) or typeNullable
* </li>
* <li>8 CASE_SENSITIVE (boolean) case sensitive </li>
* <li>9 SEARCHABLE (short) typeSearchable </li>
* <li>10 UNSIGNED_ATTRIBUTE (boolean) unsigned </li>
* <li>11 FIXED_PREC_SCALE (boolean) fixed precision </li>
* <li>12 AUTO_INCREMENT (boolean) auto increment </li>
* <li>13 LOCAL_TYPE_NAME (String) localized version of the data type </li>
* <li>14 MINIMUM_SCALE (short) minimum scale </li>
* <li>15 MAXIMUM_SCALE (short) maximum scale </li>
* <li>16 SQL_DATA_TYPE (int) unused </li>
* <li>17 SQL_DATETIME_SUB (int) unused </li>
* <li>18 NUM_PREC_RADIX (int) 2 for binary, 10 for decimal </li>
* </ul>
*
* @return the list of data types
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getTypeInfo() throws SQLException {
try {
debugCodeCall("getTypeInfo");
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "TYPE_NAME, "
+ "DATA_TYPE, "
+ "PRECISION, "
+ "PREFIX LITERAL_PREFIX, "
+ "SUFFIX LITERAL_SUFFIX, "
+ "PARAMS CREATE_PARAMS, "
+ "NULLABLE, "
+ "CASE_SENSITIVE, "
+ "SEARCHABLE, "
+ "FALSE UNSIGNED_ATTRIBUTE, "
+ "FALSE FIXED_PREC_SCALE, "
+ "AUTO_INCREMENT, "
+ "TYPE_NAME LOCAL_TYPE_NAME, "
+ "MINIMUM_SCALE, "
+ "MAXIMUM_SCALE, "
+ "DATA_TYPE SQL_DATA_TYPE, "
+ "ZERO() SQL_DATETIME_SUB, "
+ "RADIX NUM_PREC_RADIX "
+ "FROM INFORMATION_SCHEMA.TYPE_INFO "
+ "ORDER BY DATA_TYPE, POS");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this database store data in local files.
*
* @return true
*/
@Override
public boolean usesLocalFiles() {
debugCodeCall("usesLocalFiles");
return true;
}
/**
* Checks if this database use one file per table.
*
* @return false
*/
@Override
public boolean usesLocalFilePerTable() {
debugCodeCall("usesLocalFilePerTable");
return false;
}
/**
* Returns the string used to quote identifiers.
*
* @return a double quote
*/
@Override
public String getIdentifierQuoteString() {
debugCodeCall("getIdentifierQuoteString");
return "\"";
}
/**
* Gets the comma-separated list of all SQL keywords that are not supported
* as table/column/index name, in addition to the SQL-2003 keywords. The list
* returned is:
* <pre>
* LIMIT,MINUS,OFFSET,ROWNUM,SYSDATE,SYSTIME,SYSTIMESTAMP,TODAY
* </pre>
* The complete list of keywords (including SQL-2003 keywords) is:
* <pre>
* ALL, CHECK, CONSTRAINT, CROSS, CURRENT_DATE, CURRENT_TIME,
* CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FOREIGN,
* FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT,
* MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT,
* SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE, WITH
* </pre>
*
* @return a list of additional the keywords
*/
@Override
public String getSQLKeywords() {
debugCodeCall("getSQLKeywords");
return "LIMIT,MINUS,OFFSET,ROWNUM,SYSDATE,SYSTIME,SYSTIMESTAMP,TODAY";
}
/**
* Returns the list of numeric functions supported by this database.
*
* @return the list
*/
@Override
public String getNumericFunctions() throws SQLException {
debugCodeCall("getNumericFunctions");
return getFunctions("Functions (Numeric)");
}
/**
* Returns the list of string functions supported by this database.
*
* @return the list
*/
@Override
public String getStringFunctions() throws SQLException {
debugCodeCall("getStringFunctions");
return getFunctions("Functions (String)");
}
/**
* Returns the list of system functions supported by this database.
*
* @return the list
*/
@Override
public String getSystemFunctions() throws SQLException {
debugCodeCall("getSystemFunctions");
return getFunctions("Functions (System)");
}
/**
* Returns the list of date and time functions supported by this database.
*
* @return the list
*/
@Override
public String getTimeDateFunctions() throws SQLException {
debugCodeCall("getTimeDateFunctions");
return getFunctions("Functions (Time and Date)");
}
private String getFunctions(String section) throws SQLException {
try {
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT TOPIC "
+ "FROM INFORMATION_SCHEMA.HELP WHERE SECTION = ?");
prep.setString(1, section);
ResultSet rs = prep.executeQuery();
StatementBuilder buff = new StatementBuilder();
while (rs.next()) {
String s = rs.getString(1).trim();
String[] array = StringUtils.arraySplit(s, ',', true);
for (String a : array) {
buff.appendExceptFirst(",");
String f = a.trim();
if (f.indexOf(' ') >= 0) {
// remove 'Function' from 'INSERT Function'
f = f.substring(0, f.indexOf(' ')).trim();
}
buff.append(f);
}
}
rs.close();
prep.close();
return buff.toString();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the default escape character for DatabaseMetaData search
* patterns.
*
* @return the default escape character (always '\', independent on the
* mode)
*/
@Override
public String getSearchStringEscape() {
debugCodeCall("getSearchStringEscape");
return "\\";
}
/**
* Returns the characters that are allowed for identifiers in addiction to
* A-Z, a-z, 0-9 and '_'.
*
* @return an empty String ("")
*/
@Override
public String getExtraNameCharacters() {
debugCodeCall("getExtraNameCharacters");
return "";
}
/**
* Returns whether alter table with add column is supported.
* @return true
*/
@Override
public boolean supportsAlterTableWithAddColumn() {
debugCodeCall("supportsAlterTableWithAddColumn");
return true;
}
/**
* Returns whether alter table with drop column is supported.
*
* @return true
*/
@Override
public boolean supportsAlterTableWithDropColumn() {
debugCodeCall("supportsAlterTableWithDropColumn");
return true;
}
/**
* Returns whether column aliasing is supported.
*
* @return true
*/
@Override
public boolean supportsColumnAliasing() {
debugCodeCall("supportsColumnAliasing");
return true;
}
/**
* Returns whether NULL+1 is NULL or not.
*
* @return true
*/
@Override
public boolean nullPlusNonNullIsNull() {
debugCodeCall("nullPlusNonNullIsNull");
return true;
}
/**
* Returns whether CONVERT is supported.
*
* @return true
*/
@Override
public boolean supportsConvert() {
debugCodeCall("supportsConvert");
return true;
}
/**
* Returns whether CONVERT is supported for one datatype to another.
*
* @param fromType the source SQL type
* @param toType the target SQL type
* @return true
*/
@Override
public boolean supportsConvert(int fromType, int toType) {
if (isDebugEnabled()) {
debugCode("supportsConvert("+fromType+", "+fromType+");");
}
return true;
}
/**
* Returns whether table correlation names (table alias) are supported.
*
* @return true
*/
@Override
public boolean supportsTableCorrelationNames() {
debugCodeCall("supportsTableCorrelationNames");
return true;
}
/**
* Returns whether table correlation names (table alias) are restricted to
* be different than table names.
*
* @return false
*/
@Override
public boolean supportsDifferentTableCorrelationNames() {
debugCodeCall("supportsDifferentTableCorrelationNames");
return false;
}
/**
* Returns whether expression in ORDER BY are supported.
*
* @return true
*/
@Override
public boolean supportsExpressionsInOrderBy() {
debugCodeCall("supportsExpressionsInOrderBy");
return true;
}
/**
* Returns whether ORDER BY is supported if the column is not in the SELECT
* list.
*
* @return true
*/
@Override
public boolean supportsOrderByUnrelated() {
debugCodeCall("supportsOrderByUnrelated");
return true;
}
/**
* Returns whether GROUP BY is supported.
*
* @return true
*/
@Override
public boolean supportsGroupBy() {
debugCodeCall("supportsGroupBy");
return true;
}
/**
* Returns whether GROUP BY is supported if the column is not in the SELECT
* list.
*
* @return true
*/
@Override
public boolean supportsGroupByUnrelated() {
debugCodeCall("supportsGroupByUnrelated");
return true;
}
/**
* Checks whether a GROUP BY clause can use columns that are not in the
* SELECT clause, provided that it specifies all the columns in the SELECT
* clause.
*
* @return true
*/
@Override
public boolean supportsGroupByBeyondSelect() {
debugCodeCall("supportsGroupByBeyondSelect");
return true;
}
/**
* Returns whether LIKE... ESCAPE is supported.
*
* @return true
*/
@Override
public boolean supportsLikeEscapeClause() {
debugCodeCall("supportsLikeEscapeClause");
return true;
}
/**
* Returns whether multiple result sets are supported.
*
* @return false
*/
@Override
public boolean supportsMultipleResultSets() {
debugCodeCall("supportsMultipleResultSets");
return false;
}
/**
* Returns whether multiple transactions (on different connections) are
* supported.
*
* @return true
*/
@Override
public boolean supportsMultipleTransactions() {
debugCodeCall("supportsMultipleTransactions");
return true;
}
/**
* Returns whether columns with NOT NULL are supported.
*
* @return true
*/
@Override
public boolean supportsNonNullableColumns() {
debugCodeCall("supportsNonNullableColumns");
return true;
}
/**
* Returns whether ODBC Minimum SQL grammar is supported.
*
* @return true
*/
@Override
public boolean supportsMinimumSQLGrammar() {
debugCodeCall("supportsMinimumSQLGrammar");
return true;
}
/**
* Returns whether ODBC Core SQL grammar is supported.
*
* @return true
*/
@Override
public boolean supportsCoreSQLGrammar() {
debugCodeCall("supportsCoreSQLGrammar");
return true;
}
/**
* Returns whether ODBC Extended SQL grammar is supported.
*
* @return false
*/
@Override
public boolean supportsExtendedSQLGrammar() {
debugCodeCall("supportsExtendedSQLGrammar");
return false;
}
/**
* Returns whether SQL-92 entry level grammar is supported.
*
* @return true
*/
@Override
public boolean supportsANSI92EntryLevelSQL() {
debugCodeCall("supportsANSI92EntryLevelSQL");
return true;
}
/**
* Returns whether SQL-92 intermediate level grammar is supported.
*
* @return false
*/
@Override
public boolean supportsANSI92IntermediateSQL() {
debugCodeCall("supportsANSI92IntermediateSQL");
return false;
}
/**
* Returns whether SQL-92 full level grammar is supported.
*
* @return false
*/
@Override
public boolean supportsANSI92FullSQL() {
debugCodeCall("supportsANSI92FullSQL");
return false;
}
/**
* Returns whether referential integrity is supported.
*
* @return true
*/
@Override
public boolean supportsIntegrityEnhancementFacility() {
debugCodeCall("supportsIntegrityEnhancementFacility");
return true;
}
/**
* Returns whether outer joins are supported.
*
* @return true
*/
@Override
public boolean supportsOuterJoins() {
debugCodeCall("supportsOuterJoins");
return true;
}
/**
* Returns whether full outer joins are supported.
*
* @return false
*/
@Override
public boolean supportsFullOuterJoins() {
debugCodeCall("supportsFullOuterJoins");
return false;
}
/**
* Returns whether limited outer joins are supported.
*
* @return true
*/
@Override
public boolean supportsLimitedOuterJoins() {
debugCodeCall("supportsLimitedOuterJoins");
return true;
}
/**
* Returns the term for "schema".
*
* @return "schema"
*/
@Override
public String getSchemaTerm() {
debugCodeCall("getSchemaTerm");
return "schema";
}
/**
* Returns the term for "procedure".
*
* @return "procedure"
*/
@Override
public String getProcedureTerm() {
debugCodeCall("getProcedureTerm");
return "procedure";
}
/**
* Returns the term for "catalog".
*
* @return "catalog"
*/
@Override
public String getCatalogTerm() {
debugCodeCall("getCatalogTerm");
return "catalog";
}
/**
* Returns whether the catalog is at the beginning.
*
* @return true
*/
@Override
public boolean isCatalogAtStart() {
debugCodeCall("isCatalogAtStart");
return true;
}
/**
* Returns the catalog separator.
*
* @return "."
*/
@Override
public String getCatalogSeparator() {
debugCodeCall("getCatalogSeparator");
return ".";
}
/**
* Returns whether the schema name in INSERT, UPDATE, DELETE is supported.
*
* @return true
*/
@Override
public boolean supportsSchemasInDataManipulation() {
debugCodeCall("supportsSchemasInDataManipulation");
return true;
}
/**
* Returns whether the schema name in procedure calls is supported.
*
* @return true
*/
@Override
public boolean supportsSchemasInProcedureCalls() {
debugCodeCall("supportsSchemasInProcedureCalls");
return true;
}
/**
* Returns whether the schema name in CREATE TABLE is supported.
*
* @return true
*/
@Override
public boolean supportsSchemasInTableDefinitions() {
debugCodeCall("supportsSchemasInTableDefinitions");
return true;
}
/**
* Returns whether the schema name in CREATE INDEX is supported.
*
* @return true
*/
@Override
public boolean supportsSchemasInIndexDefinitions() {
debugCodeCall("supportsSchemasInIndexDefinitions");
return true;
}
/**
* Returns whether the schema name in GRANT is supported.
*
* @return true
*/
@Override
public boolean supportsSchemasInPrivilegeDefinitions() {
debugCodeCall("supportsSchemasInPrivilegeDefinitions");
return true;
}
/**
* Returns whether the catalog name in INSERT, UPDATE, DELETE is supported.
*
* @return true
*/
@Override
public boolean supportsCatalogsInDataManipulation() {
debugCodeCall("supportsCatalogsInDataManipulation");
return true;
}
/**
* Returns whether the catalog name in procedure calls is supported.
*
* @return false
*/
@Override
public boolean supportsCatalogsInProcedureCalls() {
debugCodeCall("supportsCatalogsInProcedureCalls");
return false;
}
/**
* Returns whether the catalog name in CREATE TABLE is supported.
*
* @return true
*/
@Override
public boolean supportsCatalogsInTableDefinitions() {
debugCodeCall("supportsCatalogsInTableDefinitions");
return true;
}
/**
* Returns whether the catalog name in CREATE INDEX is supported.
*
* @return true
*/
@Override
public boolean supportsCatalogsInIndexDefinitions() {
debugCodeCall("supportsCatalogsInIndexDefinitions");
return true;
}
/**
* Returns whether the catalog name in GRANT is supported.
*
* @return true
*/
@Override
public boolean supportsCatalogsInPrivilegeDefinitions() {
debugCodeCall("supportsCatalogsInPrivilegeDefinitions");
return true;
}
/**
* Returns whether positioned deletes are supported.
*
* @return true
*/
@Override
public boolean supportsPositionedDelete() {
debugCodeCall("supportsPositionedDelete");
return true;
}
/**
* Returns whether positioned updates are supported.
*
* @return true
*/
@Override
public boolean supportsPositionedUpdate() {
debugCodeCall("supportsPositionedUpdate");
return true;
}
/**
* Returns whether SELECT ... FOR UPDATE is supported.
*
* @return true
*/
@Override
public boolean supportsSelectForUpdate() {
debugCodeCall("supportsSelectForUpdate");
return true;
}
/**
* Returns whether stored procedures are supported.
*
* @return false
*/
@Override
public boolean supportsStoredProcedures() {
debugCodeCall("supportsStoredProcedures");
return false;
}
/**
* Returns whether subqueries (SELECT) in comparisons are supported.
*
* @return true
*/
@Override
public boolean supportsSubqueriesInComparisons() {
debugCodeCall("supportsSubqueriesInComparisons");
return true;
}
/**
* Returns whether SELECT in EXISTS is supported.
*
* @return true
*/
@Override
public boolean supportsSubqueriesInExists() {
debugCodeCall("supportsSubqueriesInExists");
return true;
}
/**
* Returns whether IN(SELECT...) is supported.
*
* @return true
*/
@Override
public boolean supportsSubqueriesInIns() {
debugCodeCall("supportsSubqueriesInIns");
return true;
}
/**
* Returns whether subqueries in quantified expression are supported.
*
* @return true
*/
@Override
public boolean supportsSubqueriesInQuantifieds() {
debugCodeCall("supportsSubqueriesInQuantifieds");
return true;
}
/**
* Returns whether correlated subqueries are supported.
*
* @return true
*/
@Override
public boolean supportsCorrelatedSubqueries() {
debugCodeCall("supportsCorrelatedSubqueries");
return true;
}
/**
* Returns whether UNION SELECT is supported.
*
* @return true
*/
@Override
public boolean supportsUnion() {
debugCodeCall("supportsUnion");
return true;
}
/**
* Returns whether UNION ALL SELECT is supported.
*
* @return true
*/
@Override
public boolean supportsUnionAll() {
debugCodeCall("supportsUnionAll");
return true;
}
/**
* Returns whether open result sets across commits are supported.
*
* @return false
*/
@Override
public boolean supportsOpenCursorsAcrossCommit() {
debugCodeCall("supportsOpenCursorsAcrossCommit");
return false;
}
/**
* Returns whether open result sets across rollback are supported.
*
* @return false
*/
@Override
public boolean supportsOpenCursorsAcrossRollback() {
debugCodeCall("supportsOpenCursorsAcrossRollback");
return false;
}
/**
* Returns whether open statements across commit are supported.
*
* @return true
*/
@Override
public boolean supportsOpenStatementsAcrossCommit() {
debugCodeCall("supportsOpenStatementsAcrossCommit");
return true;
}
/**
* Returns whether open statements across rollback are supported.
*
* @return true
*/
@Override
public boolean supportsOpenStatementsAcrossRollback() {
debugCodeCall("supportsOpenStatementsAcrossRollback");
return true;
}
/**
* Returns whether transactions are supported.
*
* @return true
*/
@Override
public boolean supportsTransactions() {
debugCodeCall("supportsTransactions");
return true;
}
/**
* Returns whether a specific transaction isolation level is supported.
*
* @param level the transaction isolation level (Connection.TRANSACTION_*)
* @return true
*/
@Override
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
debugCodeCall("supportsTransactionIsolationLevel");
if (level == Connection.TRANSACTION_READ_UNCOMMITTED) {
// currently the combination of LOCK_MODE=0 and MULTI_THREADED
// is not supported, also see code in Database#setLockMode(int)
PreparedStatement prep = conn.prepareAutoCloseStatement(
"SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME=?");
prep.setString(1, "MULTI_THREADED");
ResultSet rs = prep.executeQuery();
return !rs.next() || !rs.getString(1).equals("1");
}
return true;
}
/**
* Returns whether data manipulation and CREATE/DROP is supported in
* transactions.
*
* @return false
*/
@Override
public boolean supportsDataDefinitionAndDataManipulationTransactions() {
debugCodeCall("supportsDataDefinitionAndDataManipulationTransactions");
return false;
}
/**
* Returns whether only data manipulations are supported in transactions.
*
* @return true
*/
@Override
public boolean supportsDataManipulationTransactionsOnly() {
debugCodeCall("supportsDataManipulationTransactionsOnly");
return true;
}
/**
* Returns whether CREATE/DROP commit an open transaction.
*
* @return true
*/
@Override
public boolean dataDefinitionCausesTransactionCommit() {
debugCodeCall("dataDefinitionCausesTransactionCommit");
return true;
}
/**
* Returns whether CREATE/DROP do not affect transactions.
*
* @return false
*/
@Override
public boolean dataDefinitionIgnoredInTransactions() {
debugCodeCall("dataDefinitionIgnoredInTransactions");
return false;
}
/**
* Returns whether a specific result set type is supported.
* ResultSet.TYPE_SCROLL_SENSITIVE is not supported.
*
* @param type the result set type
* @return true for all types except ResultSet.TYPE_FORWARD_ONLY
*/
@Override
public boolean supportsResultSetType(int type) {
debugCodeCall("supportsResultSetType", type);
return type != ResultSet.TYPE_SCROLL_SENSITIVE;
}
/**
* Returns whether a specific result set concurrency is supported.
* ResultSet.TYPE_SCROLL_SENSITIVE is not supported.
*
* @param type the result set type
* @param concurrency the result set concurrency
* @return true if the type is not ResultSet.TYPE_SCROLL_SENSITIVE
*/
@Override
public boolean supportsResultSetConcurrency(int type, int concurrency) {
if (isDebugEnabled()) {
debugCode("supportsResultSetConcurrency("+type+", "+concurrency+");");
}
return type != ResultSet.TYPE_SCROLL_SENSITIVE;
}
/**
* Returns whether own updates are visible.
*
* @param type the result set type
* @return true
*/
@Override
public boolean ownUpdatesAreVisible(int type) {
debugCodeCall("ownUpdatesAreVisible", type);
return true;
}
/**
* Returns whether own deletes are visible.
*
* @param type the result set type
* @return false
*/
@Override
public boolean ownDeletesAreVisible(int type) {
debugCodeCall("ownDeletesAreVisible", type);
return false;
}
/**
* Returns whether own inserts are visible.
*
* @param type the result set type
* @return false
*/
@Override
public boolean ownInsertsAreVisible(int type) {
debugCodeCall("ownInsertsAreVisible", type);
return false;
}
/**
* Returns whether other updates are visible.
*
* @param type the result set type
* @return false
*/
@Override
public boolean othersUpdatesAreVisible(int type) {
debugCodeCall("othersUpdatesAreVisible", type);
return false;
}
/**
* Returns whether other deletes are visible.
*
* @param type the result set type
* @return false
*/
@Override
public boolean othersDeletesAreVisible(int type) {
debugCodeCall("othersDeletesAreVisible", type);
return false;
}
/**
* Returns whether other inserts are visible.
*
* @param type the result set type
* @return false
*/
@Override
public boolean othersInsertsAreVisible(int type) {
debugCodeCall("othersInsertsAreVisible", type);
return false;
}
/**
* Returns whether updates are detected.
*
* @param type the result set type
* @return false
*/
@Override
public boolean updatesAreDetected(int type) {
debugCodeCall("updatesAreDetected", type);
return false;
}
/**
* Returns whether deletes are detected.
*
* @param type the result set type
* @return false
*/
@Override
public boolean deletesAreDetected(int type) {
debugCodeCall("deletesAreDetected", type);
return false;
}
/**
* Returns whether inserts are detected.
*
* @param type the result set type
* @return false
*/
@Override
public boolean insertsAreDetected(int type) {
debugCodeCall("insertsAreDetected", type);
return false;
}
/**
* Returns whether batch updates are supported.
*
* @return true
*/
@Override
public boolean supportsBatchUpdates() {
debugCodeCall("supportsBatchUpdates");
return true;
}
/**
* Returns whether the maximum row size includes blobs.
*
* @return false
*/
@Override
public boolean doesMaxRowSizeIncludeBlobs() {
debugCodeCall("doesMaxRowSizeIncludeBlobs");
return false;
}
/**
* Returns the default transaction isolation level.
*
* @return Connection.TRANSACTION_READ_COMMITTED
*/
@Override
public int getDefaultTransactionIsolation() {
debugCodeCall("getDefaultTransactionIsolation");
return Connection.TRANSACTION_READ_COMMITTED;
}
/**
* Checks if for CREATE TABLE Test(ID INT), getTables returns Test as the
* table name.
*
* @return false
*/
@Override
public boolean supportsMixedCaseIdentifiers() {
debugCodeCall("supportsMixedCaseIdentifiers");
return false;
}
/**
* Checks if a table created with CREATE TABLE "Test"(ID INT) is a different
* table than a table created with CREATE TABLE TEST(ID INT).
*
* @return true usually, and false in MySQL mode
*/
@Override
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
debugCodeCall("supportsMixedCaseQuotedIdentifiers");
String m = conn.getMode();
return !m.equals("MySQL");
}
/**
* Checks if for CREATE TABLE Test(ID INT), getTables returns TEST as the
* table name.
*
* @return true usually, and false in MySQL mode
*/
@Override
public boolean storesUpperCaseIdentifiers() throws SQLException {
debugCodeCall("storesUpperCaseIdentifiers");
String m = conn.getMode();
return !m.equals("MySQL");
}
/**
* Checks if for CREATE TABLE Test(ID INT), getTables returns test as the
* table name.
*
* @return false usually, and true in MySQL mode
*/
@Override
public boolean storesLowerCaseIdentifiers() throws SQLException {
debugCodeCall("storesLowerCaseIdentifiers");
String m = conn.getMode();
return m.equals("MySQL");
}
/**
* Checks if for CREATE TABLE Test(ID INT), getTables returns Test as the
* table name.
*
* @return false
*/
@Override
public boolean storesMixedCaseIdentifiers() {
debugCodeCall("storesMixedCaseIdentifiers");
return false;
}
/**
* Checks if for CREATE TABLE "Test"(ID INT), getTables returns TEST as the
* table name.
*
* @return false usually, and true in MySQL mode
*/
@Override
public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
debugCodeCall("storesUpperCaseQuotedIdentifiers");
String m = conn.getMode();
return m.equals("MySQL");
}
/**
* Checks if for CREATE TABLE "Test"(ID INT), getTables returns test as the
* table name.
*
* @return false usually, and true in MySQL mode
*/
@Override
public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
debugCodeCall("storesLowerCaseQuotedIdentifiers");
String m = conn.getMode();
return m.equals("MySQL");
}
/**
* Checks if for CREATE TABLE "Test"(ID INT), getTables returns Test as the
* table name.
*
* @return true usually, and false in MySQL mode
*/
@Override
public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
debugCodeCall("storesMixedCaseQuotedIdentifiers");
String m = conn.getMode();
return !m.equals("MySQL");
}
/**
* Returns the maximum length for hex values (characters).
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxBinaryLiteralLength() {
debugCodeCall("getMaxBinaryLiteralLength");
return 0;
}
/**
* Returns the maximum length for literals.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxCharLiteralLength() {
debugCodeCall("getMaxCharLiteralLength");
return 0;
}
/**
* Returns the maximum length for column names.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxColumnNameLength() {
debugCodeCall("getMaxColumnNameLength");
return 0;
}
/**
* Returns the maximum number of columns in GROUP BY.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxColumnsInGroupBy() {
debugCodeCall("getMaxColumnsInGroupBy");
return 0;
}
/**
* Returns the maximum number of columns in CREATE INDEX.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxColumnsInIndex() {
debugCodeCall("getMaxColumnsInIndex");
return 0;
}
/**
* Returns the maximum number of columns in ORDER BY.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxColumnsInOrderBy() {
debugCodeCall("getMaxColumnsInOrderBy");
return 0;
}
/**
* Returns the maximum number of columns in SELECT.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxColumnsInSelect() {
debugCodeCall("getMaxColumnsInSelect");
return 0;
}
/**
* Returns the maximum number of columns in CREATE TABLE.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxColumnsInTable() {
debugCodeCall("getMaxColumnsInTable");
return 0;
}
/**
* Returns the maximum number of open connection.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxConnections() {
debugCodeCall("getMaxConnections");
return 0;
}
/**
* Returns the maximum length for a cursor name.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxCursorNameLength() {
debugCodeCall("getMaxCursorNameLength");
return 0;
}
/**
* Returns the maximum length for an index (in bytes).
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxIndexLength() {
debugCodeCall("getMaxIndexLength");
return 0;
}
/**
* Returns the maximum length for a schema name.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxSchemaNameLength() {
debugCodeCall("getMaxSchemaNameLength");
return 0;
}
/**
* Returns the maximum length for a procedure name.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxProcedureNameLength() {
debugCodeCall("getMaxProcedureNameLength");
return 0;
}
/**
* Returns the maximum length for a catalog name.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxCatalogNameLength() {
debugCodeCall("getMaxCatalogNameLength");
return 0;
}
/**
* Returns the maximum size of a row (in bytes).
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxRowSize() {
debugCodeCall("getMaxRowSize");
return 0;
}
/**
* Returns the maximum length of a statement.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxStatementLength() {
debugCodeCall("getMaxStatementLength");
return 0;
}
/**
* Returns the maximum number of open statements.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxStatements() {
debugCodeCall("getMaxStatements");
return 0;
}
/**
* Returns the maximum length for a table name.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxTableNameLength() {
debugCodeCall("getMaxTableNameLength");
return 0;
}
/**
* Returns the maximum number of tables in a SELECT.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxTablesInSelect() {
debugCodeCall("getMaxTablesInSelect");
return 0;
}
/**
* Returns the maximum length for a user name.
*
* @return 0 for limit is unknown
*/
@Override
public int getMaxUserNameLength() {
debugCodeCall("getMaxUserNameLength");
return 0;
}
/**
* Does the database support savepoints.
*
* @return true
*/
@Override
public boolean supportsSavepoints() {
debugCodeCall("supportsSavepoints");
return true;
}
/**
* Does the database support named parameters.
*
* @return false
*/
@Override
public boolean supportsNamedParameters() {
debugCodeCall("supportsNamedParameters");
return false;
}
/**
* Does the database support multiple open result sets.
*
* @return true
*/
@Override
public boolean supportsMultipleOpenResults() {
debugCodeCall("supportsMultipleOpenResults");
return true;
}
/**
* Does the database support getGeneratedKeys.
*
* @return true
*/
@Override
public boolean supportsGetGeneratedKeys() {
debugCodeCall("supportsGetGeneratedKeys");
return true;
}
/**
* [Not supported]
*/
@Override
public ResultSet getSuperTypes(String catalog, String schemaPattern,
String typeNamePattern) throws SQLException {
throw unsupported("superTypes");
}
/**
* Get the list of super tables of a table. This method currently returns an
* empty result set.
* <ul>
* <li>1 TABLE_CAT (String) table catalog</li>
* <li>2 TABLE_SCHEM (String) table schema</li>
* <li>3 TABLE_NAME (String) table name</li>
* <li>4 SUPERTABLE_NAME (String) the name of the super table</li>
* </ul>
*
* @param catalog null (to get all objects) or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param tableNamePattern null (to get all objects) or a table name pattern
* (uppercase for unquoted names)
* @return an empty result set
*/
@Override
public ResultSet getSuperTables(String catalog, String schemaPattern,
String tableNamePattern) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getSuperTables("
+quote(catalog)+", "
+quote(schemaPattern)+", "
+quote(tableNamePattern)+");");
}
checkClosed();
PreparedStatement prep = conn.prepareAutoCloseStatement("SELECT "
+ "CATALOG_NAME TABLE_CAT, "
+ "CATALOG_NAME TABLE_SCHEM, "
+ "CATALOG_NAME TABLE_NAME, "
+ "CATALOG_NAME SUPERTABLE_NAME "
+ "FROM INFORMATION_SCHEMA.CATALOGS "
+ "WHERE FALSE");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
*/
@Override
public ResultSet getAttributes(String catalog, String schemaPattern,
String typeNamePattern, String attributeNamePattern)
throws SQLException {
throw unsupported("attributes");
}
/**
* Does this database supports a result set holdability.
*
* @param holdability ResultSet.HOLD_CURSORS_OVER_COMMIT or
* CLOSE_CURSORS_AT_COMMIT
* @return true if the holdability is ResultSet.CLOSE_CURSORS_AT_COMMIT
*/
@Override
public boolean supportsResultSetHoldability(int holdability) {
debugCodeCall("supportsResultSetHoldability", holdability);
return holdability == ResultSet.CLOSE_CURSORS_AT_COMMIT;
}
/**
* Gets the result set holdability.
*
* @return ResultSet.CLOSE_CURSORS_AT_COMMIT
*/
@Override
public int getResultSetHoldability() {
debugCodeCall("getResultSetHoldability");
return ResultSet.CLOSE_CURSORS_AT_COMMIT;
}
/**
* Gets the major version of the database.
*
* @return the major version
*/
@Override
public int getDatabaseMajorVersion() {
debugCodeCall("getDatabaseMajorVersion");
return Constants.VERSION_MAJOR;
}
/**
* Gets the minor version of the database.
*
* @return the minor version
*/
@Override
public int getDatabaseMinorVersion() {
debugCodeCall("getDatabaseMinorVersion");
return Constants.VERSION_MINOR;
}
/**
* Gets the major version of the supported JDBC API.
*
* @return the major version (4)
*/
@Override
public int getJDBCMajorVersion() {
debugCodeCall("getJDBCMajorVersion");
return 4;
}
/**
* Gets the minor version of the supported JDBC API.
*
* @return the minor version (0)
*/
@Override
public int getJDBCMinorVersion() {
debugCodeCall("getJDBCMinorVersion");
return 0;
}
/**
* Gets the SQL State type.
*
* @return DatabaseMetaData.sqlStateSQL99
*/
@Override
public int getSQLStateType() {
debugCodeCall("getSQLStateType");
return DatabaseMetaData.sqlStateSQL99;
}
/**
* Does the database make a copy before updating.
*
* @return false
*/
@Override
public boolean locatorsUpdateCopy() {
debugCodeCall("locatorsUpdateCopy");
return false;
}
/**
* Does the database support statement pooling.
*
* @return false
*/
@Override
public boolean supportsStatementPooling() {
debugCodeCall("supportsStatementPooling");
return false;
}
// =============================================================
private void checkClosed() {
conn.checkClosed();
}
private static String getPattern(String pattern) {
return pattern == null ? "%" : pattern;
}
private static String getSchemaPattern(String pattern) {
return pattern == null ? "%" : pattern.length() == 0 ?
Constants.SCHEMA_MAIN : pattern;
}
private static String getCatalogPattern(String catalogPattern) {
// Workaround for OpenOffice: getColumns is called with "" as the
// catalog
return catalogPattern == null || catalogPattern.length() == 0 ?
"%" : catalogPattern;
}
/**
* Get the lifetime of a rowid.
*
* @return ROWID_UNSUPPORTED
*/
@Override
public RowIdLifetime getRowIdLifetime() {
debugCodeCall("getRowIdLifetime");
return RowIdLifetime.ROWID_UNSUPPORTED;
}
/**
* Gets the list of schemas in the database.
* The result set is sorted by TABLE_SCHEM.
*
* <ul>
* <li>1 TABLE_SCHEM (String) schema name
* </li><li>2 TABLE_CATALOG (String) catalog name
* </li><li>3 IS_DEFAULT (boolean) if this is the default schema
* </li></ul>
*
* @param catalogPattern null (to get all objects) or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @return the schema list
* @throws SQLException if the connection is closed
*/
@Override
public ResultSet getSchemas(String catalogPattern, String schemaPattern)
throws SQLException {
try {
debugCodeCall("getSchemas(String,String)");
checkClosed();
PreparedStatement prep = conn
.prepareAutoCloseStatement("SELECT "
+ "SCHEMA_NAME TABLE_SCHEM, "
+ "CATALOG_NAME TABLE_CATALOG, "
+" IS_DEFAULT "
+ "FROM INFORMATION_SCHEMA.SCHEMATA "
+ "WHERE CATALOG_NAME LIKE ? ESCAPE ? "
+ "AND SCHEMA_NAME LIKE ? ESCAPE ? "
+ "ORDER BY SCHEMA_NAME");
prep.setString(1, getCatalogPattern(catalogPattern));
prep.setString(2, "\\");
prep.setString(3, getSchemaPattern(schemaPattern));
prep.setString(4, "\\");
return prep.executeQuery();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns whether the database supports calling functions using the call
* syntax.
*
* @return true
*/
@Override
public boolean supportsStoredFunctionsUsingCallSyntax() {
debugCodeCall("supportsStoredFunctionsUsingCallSyntax");
return true;
}
/**
* Returns whether an exception while auto commit is on closes all result
* sets.
*
* @return false
*/
@Override
public boolean autoCommitFailureClosesAllResultSets() {
debugCodeCall("autoCommitFailureClosesAllResultSets");
return false;
}
@Override
public ResultSet getClientInfoProperties() throws SQLException {
Properties clientInfo = conn.getClientInfo();
SimpleResultSet result = new SimpleResultSet();
result.addColumn("Name", Types.VARCHAR, 0, 0);
result.addColumn("Value", Types.VARCHAR, 0, 0);
for (Object key : clientInfo.keySet()) {
result.addRow(key, clientInfo.get(key));
}
return result;
}
/**
* Return an object of this class if possible.
*
* @param iface the class
* @return this
*/
@Override
@SuppressWarnings("unchecked")
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
if (isWrapperFor(iface)) {
return (T) this;
}
throw DbException.getInvalidValueException("iface", iface);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if unwrap can return an object of this class.
*
* @param iface the class
* @return whether or not the interface is assignable from this class
*/
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface != null && iface.isAssignableFrom(getClass());
}
/**
* [Not supported] Gets the list of function columns.
*/
@Override
public ResultSet getFunctionColumns(String catalog, String schemaPattern,
String functionNamePattern, String columnNamePattern)
throws SQLException {
throw unsupported("getFunctionColumns");
}
/**
* [Not supported] Gets the list of functions.
*/
@Override
public ResultSet getFunctions(String catalog, String schemaPattern,
String functionNamePattern) throws SQLException {
throw unsupported("getFunctions");
}
/**
* [Not supported]
*/
@Override
public boolean generatedKeyAlwaysReturned() {
return true;
}
/**
* [Not supported]
*
* @param catalog null (to get all objects) or the catalog name
* @param schemaPattern null (to get all objects) or a schema name
* (uppercase for unquoted names)
* @param tableNamePattern null (to get all objects) or a table name
* (uppercase for unquoted names)
* @param columnNamePattern null (to get all objects) or a column name
* (uppercase for unquoted names)
*/
@Override
public ResultSet getPseudoColumns(String catalog, String schemaPattern,
String tableNamePattern, String columnNamePattern) {
return null;
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": " + conn;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcDatabaseMetaDataBackwardsCompat.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0, and the
* EPL 1.0 (http://h2database.com/html/license.html). Initial Developer: H2
* Group
*/
package org.h2.jdbc;
/**
* Allows us to compile on older platforms, while still implementing the methods
* from the newer JDBC API.
*/
public interface JdbcDatabaseMetaDataBackwardsCompat {
// compatibility interface
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcParameterMetaData.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.sql.ParameterMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
import org.h2.command.CommandInterface;
import org.h2.expression.ParameterInterface;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.message.TraceObject;
import org.h2.util.MathUtils;
import org.h2.value.DataType;
import org.h2.value.Value;
/**
* Information about the parameters of a prepared statement.
*/
public class JdbcParameterMetaData extends TraceObject implements
ParameterMetaData {
private final JdbcPreparedStatement prep;
private final int paramCount;
private final ArrayList<? extends ParameterInterface> parameters;
JdbcParameterMetaData(Trace trace, JdbcPreparedStatement prep,
CommandInterface command, int id) {
setTrace(trace, TraceObject.PARAMETER_META_DATA, id);
this.prep = prep;
this.parameters = command.getParameters();
this.paramCount = parameters.size();
}
/**
* Returns the number of parameters.
*
* @return the number
*/
@Override
public int getParameterCount() throws SQLException {
try {
debugCodeCall("getParameterCount");
checkClosed();
return paramCount;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the parameter mode.
* Always returns parameterModeIn.
*
* @param param the column index (1,2,...)
* @return parameterModeIn
*/
@Override
public int getParameterMode(int param) throws SQLException {
try {
debugCodeCall("getParameterMode", param);
getParameter(param);
return parameterModeIn;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the parameter type.
* java.sql.Types.VARCHAR is returned if the data type is not known.
*
* @param param the column index (1,2,...)
* @return the data type
*/
@Override
public int getParameterType(int param) throws SQLException {
try {
debugCodeCall("getParameterType", param);
ParameterInterface p = getParameter(param);
int type = p.getType();
if (type == Value.UNKNOWN) {
type = Value.STRING;
}
return DataType.getDataType(type).sqlType;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the parameter precision.
* The value 0 is returned if the precision is not known.
*
* @param param the column index (1,2,...)
* @return the precision
*/
@Override
public int getPrecision(int param) throws SQLException {
try {
debugCodeCall("getPrecision", param);
ParameterInterface p = getParameter(param);
return MathUtils.convertLongToInt(p.getPrecision());
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the parameter scale.
* The value 0 is returned if the scale is not known.
*
* @param param the column index (1,2,...)
* @return the scale
*/
@Override
public int getScale(int param) throws SQLException {
try {
debugCodeCall("getScale", param);
ParameterInterface p = getParameter(param);
return p.getScale();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this is nullable parameter.
* Returns ResultSetMetaData.columnNullableUnknown..
*
* @param param the column index (1,2,...)
* @return ResultSetMetaData.columnNullableUnknown
*/
@Override
public int isNullable(int param) throws SQLException {
try {
debugCodeCall("isNullable", param);
return getParameter(param).getNullable();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this parameter is signed.
* It always returns true.
*
* @param param the column index (1,2,...)
* @return true
*/
@Override
public boolean isSigned(int param) throws SQLException {
try {
debugCodeCall("isSigned", param);
getParameter(param);
return true;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the Java class name of the parameter.
* "java.lang.String" is returned if the type is not known.
*
* @param param the column index (1,2,...)
* @return the Java class name
*/
@Override
public String getParameterClassName(int param) throws SQLException {
try {
debugCodeCall("getParameterClassName", param);
ParameterInterface p = getParameter(param);
int type = p.getType();
if (type == Value.UNKNOWN) {
type = Value.STRING;
}
return DataType.getTypeClassName(type);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the parameter type name.
* "VARCHAR" is returned if the type is not known.
*
* @param param the column index (1,2,...)
* @return the type name
*/
@Override
public String getParameterTypeName(int param) throws SQLException {
try {
debugCodeCall("getParameterTypeName", param);
ParameterInterface p = getParameter(param);
int type = p.getType();
if (type == Value.UNKNOWN) {
type = Value.STRING;
}
return DataType.getDataType(type).name;
} catch (Exception e) {
throw logAndConvert(e);
}
}
private ParameterInterface getParameter(int param) {
checkClosed();
if (param < 1 || param > paramCount) {
throw DbException.getInvalidValueException("param", param);
}
return parameters.get(param - 1);
}
private void checkClosed() {
prep.checkClosed();
}
/**
* Return an object of this class if possible.
*
* @param iface the class
* @return this
*/
@Override
@SuppressWarnings("unchecked")
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
if (isWrapperFor(iface)) {
return (T) this;
}
throw DbException.getInvalidValueException("iface", iface);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if unwrap can return an object of this class.
*
* @param iface the class
* @return whether or not the interface is assignable from this class
*/
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface != null && iface.isAssignableFrom(getClass());
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": parameterCount=" + paramCount;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcPreparedStatement.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.NClob;
import java.sql.ParameterMetaData;
import java.sql.PreparedStatement;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.HashMap;
import org.h2.api.ErrorCode;
import org.h2.command.CommandInterface;
import org.h2.expression.ParameterInterface;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.result.ResultInterface;
import org.h2.result.ResultWithGeneratedKeys;
import org.h2.util.DateTimeUtils;
import org.h2.util.IOUtils;
import org.h2.util.MergedResultSet;
import org.h2.util.New;
import org.h2.value.DataType;
import org.h2.value.Value;
import org.h2.value.ValueBoolean;
import org.h2.value.ValueByte;
import org.h2.value.ValueBytes;
import org.h2.value.ValueDate;
import org.h2.value.ValueDecimal;
import org.h2.value.ValueDouble;
import org.h2.value.ValueFloat;
import org.h2.value.ValueInt;
import org.h2.value.ValueLong;
import org.h2.value.ValueNull;
import org.h2.value.ValueShort;
import org.h2.value.ValueString;
import org.h2.value.ValueTime;
import org.h2.value.ValueTimestamp;
/**
* Represents a prepared statement.
*/
public class JdbcPreparedStatement extends JdbcStatement implements
PreparedStatement, JdbcPreparedStatementBackwardsCompat {
protected CommandInterface command;
private final String sqlStatement;
private ArrayList<Value[]> batchParameters;
private MergedResultSet batchIdentities;
private HashMap<String, Integer> cachedColumnLabelMap;
private final Object generatedKeysRequest;
JdbcPreparedStatement(JdbcConnection conn, String sql, int id,
int resultSetType, int resultSetConcurrency,
boolean closeWithResultSet, Object generatedKeysRequest) {
super(conn, id, resultSetType, resultSetConcurrency, closeWithResultSet);
this.generatedKeysRequest = conn.scopeGeneratedKeys() ? false : generatedKeysRequest;
setTrace(session.getTrace(), TraceObject.PREPARED_STATEMENT, id);
this.sqlStatement = sql;
command = conn.prepareCommand(sql, fetchSize);
}
/**
* Cache the column labels (looking up the column index can sometimes show
* up on the performance profile).
*
* @param cachedColumnLabelMap the column map
*/
void setCachedColumnLabelMap(HashMap<String, Integer> cachedColumnLabelMap) {
this.cachedColumnLabelMap = cachedColumnLabelMap;
}
/**
* Executes a query (select statement) and returns the result set. If
* another result set exists for this statement, this will be closed (even
* if this statement fails).
*
* @return the result set
* @throws SQLException if this object is closed or invalid
*/
@Override
public ResultSet executeQuery() throws SQLException {
try {
int id = getNextId(TraceObject.RESULT_SET);
if (isDebugEnabled()) {
debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "executeQuery()");
}
batchIdentities = null;
synchronized (session) {
checkClosed();
closeOldResultSet();
ResultInterface result;
boolean lazy = false;
boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY;
boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE;
try {
setExecutingStatement(command);
result = command.executeQuery(maxRows, scrollable);
lazy = result.isLazy();
} finally {
if (!lazy) {
setExecutingStatement(null);
}
}
resultSet = new JdbcResultSet(conn, this, command, result, id,
closedByResultSet, scrollable, updatable, cachedColumnLabelMap);
}
return resultSet;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement (insert, update, delete, create, drop)
* and returns the update count.
* If another result set exists for this statement, this will be closed
* (even if this statement fails).
*
* If auto commit is on, this statement will be committed.
* If the statement is a DDL statement (create, drop, alter) and does not
* throw an exception, the current transaction (if any) is committed after
* executing the statement.
*
* @return the update count (number of row affected by an insert, update or
* delete, or 0 if no rows or the statement was a create, drop,
* commit or rollback)
* @throws SQLException if this object is closed or invalid
*/
@Override
public int executeUpdate() throws SQLException {
try {
debugCodeCall("executeUpdate");
checkClosedForWrite();
batchIdentities = null;
try {
return executeUpdateInternal();
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement (insert, update, delete, create, drop)
* and returns the update count.
* If another result set exists for this statement, this will be closed
* (even if this statement fails).
*
* If auto commit is on, this statement will be committed.
* If the statement is a DDL statement (create, drop, alter) and does not
* throw an exception, the current transaction (if any) is committed after
* executing the statement.
*
* @return the update count (number of row affected by an insert, update or
* delete, or 0 if no rows or the statement was a create, drop,
* commit or rollback)
* @throws SQLException if this object is closed or invalid
*/
@Override
public long executeLargeUpdate() throws SQLException {
try {
debugCodeCall("executeLargeUpdate");
checkClosedForWrite();
batchIdentities = null;
try {
return executeUpdateInternal();
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
private int executeUpdateInternal() throws SQLException {
closeOldResultSet();
synchronized (session) {
try {
setExecutingStatement(command);
ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest);
updateCount = result.getUpdateCount();
ResultInterface gk = result.getGeneratedKeys();
if (gk != null) {
int id = getNextId(TraceObject.RESULT_SET);
generatedKeys = new JdbcResultSet(conn, this, command, gk, id,
false, true, false);
}
} finally {
setExecutingStatement(null);
}
}
return updateCount;
}
/**
* Executes an arbitrary statement. If another result set exists for this
* statement, this will be closed (even if this statement fails). If auto
* commit is on, and the statement is not a select, this statement will be
* committed.
*
* @return true if a result set is available, false if not
* @throws SQLException if this object is closed or invalid
*/
@Override
public boolean execute() throws SQLException {
try {
int id = getNextId(TraceObject.RESULT_SET);
if (isDebugEnabled()) {
debugCodeCall("execute");
}
checkClosedForWrite();
try {
boolean returnsResultSet;
synchronized (conn.getSession()) {
closeOldResultSet();
boolean lazy = false;
try {
setExecutingStatement(command);
if (command.isQuery()) {
returnsResultSet = true;
boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY;
boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE;
ResultInterface result = command.executeQuery(maxRows, scrollable);
lazy = result.isLazy();
resultSet = new JdbcResultSet(conn, this, command, result,
id, closedByResultSet, scrollable,
updatable, cachedColumnLabelMap);
} else {
returnsResultSet = false;
ResultWithGeneratedKeys result = command.executeUpdate(generatedKeysRequest);
updateCount = result.getUpdateCount();
ResultInterface gk = result.getGeneratedKeys();
if (gk != null) {
generatedKeys = new JdbcResultSet(conn, this, command, gk, id,
false, true, false);
}
}
} finally {
if (!lazy) {
setExecutingStatement(null);
}
}
}
return returnsResultSet;
} finally {
afterWriting();
}
} catch (Throwable e) {
throw logAndConvert(e);
}
}
/**
* Clears all parameters.
*
* @throws SQLException if this object is closed or invalid
*/
@Override
public void clearParameters() throws SQLException {
try {
debugCodeCall("clearParameters");
checkClosed();
ArrayList<? extends ParameterInterface> parameters = command.getParameters();
for (ParameterInterface param : parameters) {
// can only delete old temp files if they are not in the batch
param.setValue(null, batchParameters == null);
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @throws SQLException Unsupported Feature
*/
@Override
public ResultSet executeQuery(String sql) throws SQLException {
try {
debugCodeCall("executeQuery", sql);
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @throws SQLException Unsupported Feature
*/
@Override
public void addBatch(String sql) throws SQLException {
try {
debugCodeCall("addBatch", sql);
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @throws SQLException Unsupported Feature
*/
@Override
public int executeUpdate(String sql) throws SQLException {
try {
debugCodeCall("executeUpdate", sql);
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @throws SQLException Unsupported Feature
*/
@Override
public long executeLargeUpdate(String sql) throws SQLException {
try {
debugCodeCall("executeLargeUpdate", sql);
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @throws SQLException Unsupported Feature
*/
@Override
public boolean execute(String sql) throws SQLException {
try {
debugCodeCall("execute", sql);
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
// =============================================================
/**
* Sets a parameter to null.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param sqlType the data type (Types.x)
* @throws SQLException if this object is closed
*/
@Override
public void setNull(int parameterIndex, int sqlType) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setNull("+parameterIndex+", "+sqlType+");");
}
setParameter(parameterIndex, ValueNull.INSTANCE);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setInt(int parameterIndex, int x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setInt("+parameterIndex+", "+x+");");
}
setParameter(parameterIndex, ValueInt.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setString(int parameterIndex, String x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setString("+parameterIndex+", "+quote(x)+");");
}
Value v = x == null ? (Value) ValueNull.INSTANCE : ValueString.get(x);
setParameter(parameterIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBigDecimal(int parameterIndex, BigDecimal x)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBigDecimal("+parameterIndex+", " + quoteBigDecimal(x) + ");");
}
Value v = x == null ? (Value) ValueNull.INSTANCE : ValueDecimal.get(x);
setParameter(parameterIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setDate(int parameterIndex, java.sql.Date x)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setDate("+parameterIndex+", " + quoteDate(x) + ");");
}
Value v = x == null ? (Value) ValueNull.INSTANCE : ValueDate.get(x);
setParameter(parameterIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setTime(int parameterIndex, java.sql.Time x)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setTime("+parameterIndex+", " + quoteTime(x) + ");");
}
Value v = x == null ? (Value) ValueNull.INSTANCE : ValueTime.get(x);
setParameter(parameterIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setTimestamp(int parameterIndex, java.sql.Timestamp x)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setTimestamp("+parameterIndex+", " + quoteTimestamp(x) + ");");
}
Value v = x == null ? (Value) ValueNull.INSTANCE : ValueTimestamp.get(x);
setParameter(parameterIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
* Objects of unknown classes are serialized (on the client side).
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setObject("+parameterIndex+", x);");
}
if (x == null) {
// throw Errors.getInvalidValueException("null", "x");
setParameter(parameterIndex, ValueNull.INSTANCE);
} else {
setParameter(parameterIndex,
DataType.convertToValue(session, x, Value.UNKNOWN));
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter. The object is converted, if required, to
* the specified data type before sending to the database.
* Objects of unknown classes are serialized (on the client side).
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value, null is allowed
* @param targetSqlType the type as defined in java.sql.Types
* @throws SQLException if this object is closed
*/
@Override
public void setObject(int parameterIndex, Object x, int targetSqlType)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setObject("+parameterIndex+", x, "+targetSqlType+");");
}
int type = DataType.convertSQLTypeToValueType(targetSqlType);
if (x == null) {
setParameter(parameterIndex, ValueNull.INSTANCE);
} else {
Value v = DataType.convertToValue(conn.getSession(), x, type);
setParameter(parameterIndex, v.convertTo(type));
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter. The object is converted, if required, to
* the specified data type before sending to the database.
* Objects of unknown classes are serialized (on the client side).
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value, null is allowed
* @param targetSqlType the type as defined in java.sql.Types
* @param scale is ignored
* @throws SQLException if this object is closed
*/
@Override
public void setObject(int parameterIndex, Object x, int targetSqlType,
int scale) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setObject("+parameterIndex+", x, "+targetSqlType+", "+scale+");");
}
setObject(parameterIndex, x, targetSqlType);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBoolean(int parameterIndex, boolean x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBoolean("+parameterIndex+", "+x+");");
}
setParameter(parameterIndex, ValueBoolean.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setByte(int parameterIndex, byte x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setByte("+parameterIndex+", "+x+");");
}
setParameter(parameterIndex, ValueByte.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setShort(int parameterIndex, short x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setShort("+parameterIndex+", (short) "+x+");");
}
setParameter(parameterIndex, ValueShort.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setLong(int parameterIndex, long x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setLong("+parameterIndex+", "+x+"L);");
}
setParameter(parameterIndex, ValueLong.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setFloat(int parameterIndex, float x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setFloat("+parameterIndex+", "+x+"f);");
}
setParameter(parameterIndex, ValueFloat.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setDouble(int parameterIndex, double x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setDouble("+parameterIndex+", "+x+"d);");
}
setParameter(parameterIndex, ValueDouble.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Sets the value of a column as a reference.
*/
@Override
public void setRef(int parameterIndex, Ref x) throws SQLException {
throw unsupported("ref");
}
/**
* Sets the date using a specified time zone. The value will be converted to
* the local time zone.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param calendar the calendar
* @throws SQLException if this object is closed
*/
@Override
public void setDate(int parameterIndex, java.sql.Date x, Calendar calendar)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setDate("+parameterIndex+", " + quoteDate(x) + ", calendar);");
}
if (x == null) {
setParameter(parameterIndex, ValueNull.INSTANCE);
} else {
setParameter(parameterIndex, DateTimeUtils.convertDate(x, calendar));
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the time using a specified time zone. The value will be converted to
* the local time zone.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param calendar the calendar
* @throws SQLException if this object is closed
*/
@Override
public void setTime(int parameterIndex, java.sql.Time x, Calendar calendar)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setTime("+parameterIndex+", " + quoteTime(x) + ", calendar);");
}
if (x == null) {
setParameter(parameterIndex, ValueNull.INSTANCE);
} else {
setParameter(parameterIndex, DateTimeUtils.convertTime(x, calendar));
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the timestamp using a specified time zone. The value will be
* converted to the local time zone.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param calendar the calendar
* @throws SQLException if this object is closed
*/
@Override
public void setTimestamp(int parameterIndex, java.sql.Timestamp x,
Calendar calendar) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setTimestamp(" + parameterIndex + ", " +
quoteTimestamp(x) + ", calendar);");
}
if (x == null) {
setParameter(parameterIndex, ValueNull.INSTANCE);
} else {
setParameter(parameterIndex, DateTimeUtils.convertTimestamp(x, calendar));
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] This feature is deprecated and not supported.
*
* @deprecated since JDBC 2.0, use setCharacterStream
*/
@Deprecated
@Override
public void setUnicodeStream(int parameterIndex, InputStream x, int length)
throws SQLException {
throw unsupported("unicodeStream");
}
/**
* Sets a parameter to null.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param sqlType the data type (Types.x)
* @param typeName this parameter is ignored
* @throws SQLException if this object is closed
*/
@Override
public void setNull(int parameterIndex, int sqlType, String typeName)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setNull("+parameterIndex+", "+sqlType+", "+quote(typeName)+");");
}
setNull(parameterIndex, sqlType);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a Blob.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBlob(int parameterIndex, Blob x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBlob("+parameterIndex+", x);");
}
checkClosedForWrite();
try {
Value v;
if (x == null) {
v = ValueNull.INSTANCE;
} else {
v = conn.createBlob(x.getBinaryStream(), -1);
}
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a Blob.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBlob(int parameterIndex, InputStream x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBlob("+parameterIndex+", x);");
}
checkClosedForWrite();
try {
Value v = conn.createBlob(x, -1);
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a Clob.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setClob(int parameterIndex, Clob x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setClob("+parameterIndex+", x);");
}
checkClosedForWrite();
try {
Value v;
if (x == null) {
v = ValueNull.INSTANCE;
} else {
v = conn.createClob(x.getCharacterStream(), -1);
}
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a Clob.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setClob(int parameterIndex, Reader x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setClob("+parameterIndex+", x);");
}
checkClosedForWrite();
try {
Value v;
if (x == null) {
v = ValueNull.INSTANCE;
} else {
v = conn.createClob(x, -1);
}
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as an Array.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setArray(int parameterIndex, Array x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setArray("+parameterIndex+", x);");
}
checkClosed();
Value v;
if (x == null) {
v = ValueNull.INSTANCE;
} else {
v = DataType.convertToValue(session, x.getArray(), Value.ARRAY);
}
setParameter(parameterIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a byte array.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBytes(int parameterIndex, byte[] x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBytes("+parameterIndex+", "+quoteBytes(x)+");");
}
Value v = x == null ? (Value) ValueNull.INSTANCE : ValueBytes.get(x);
setParameter(parameterIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as an input stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setBinaryStream(int parameterIndex, InputStream x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBinaryStream("+parameterIndex+", x, "+length+"L);");
}
checkClosedForWrite();
try {
Value v = conn.createBlob(x, length);
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as an input stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setBinaryStream(int parameterIndex, InputStream x, int length)
throws SQLException {
setBinaryStream(parameterIndex, x, (long) length);
}
/**
* Sets the value of a parameter as an input stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setBinaryStream(int parameterIndex, InputStream x)
throws SQLException {
setBinaryStream(parameterIndex, x, -1);
}
/**
* Sets the value of a parameter as an ASCII stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setAsciiStream(int parameterIndex, InputStream x, int length)
throws SQLException {
setAsciiStream(parameterIndex, x, (long) length);
}
/**
* Sets the value of a parameter as an ASCII stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setAsciiStream(int parameterIndex, InputStream x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setAsciiStream("+parameterIndex+", x, "+length+"L);");
}
checkClosedForWrite();
try {
Value v = conn.createClob(IOUtils.getAsciiReader(x), length);
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as an ASCII stream.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setAsciiStream(int parameterIndex, InputStream x)
throws SQLException {
setAsciiStream(parameterIndex, x, -1);
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setCharacterStream(int parameterIndex, Reader x, int length)
throws SQLException {
setCharacterStream(parameterIndex, x, (long) length);
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setCharacterStream(int parameterIndex, Reader x)
throws SQLException {
setCharacterStream(parameterIndex, x, -1);
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setCharacterStream(int parameterIndex, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setCharacterStream("+parameterIndex+", x, "+length+"L);");
}
checkClosedForWrite();
try {
Value v = conn.createClob(x, length);
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
*/
@Override
public void setURL(int parameterIndex, URL x) throws SQLException {
throw unsupported("url");
}
/**
* Gets the result set metadata of the query returned when the statement is
* executed. If this is not a query, this method returns null.
*
* @return the meta data or null if this is not a query
* @throws SQLException if this object is closed
*/
@Override
public ResultSetMetaData getMetaData() throws SQLException {
try {
debugCodeCall("getMetaData");
checkClosed();
ResultInterface result = command.getMetaData();
if (result == null) {
return null;
}
int id = getNextId(TraceObject.RESULT_SET_META_DATA);
if (isDebugEnabled()) {
debugCodeAssign("ResultSetMetaData",
TraceObject.RESULT_SET_META_DATA, id, "getMetaData()");
}
String catalog = conn.getCatalog();
return new JdbcResultSetMetaData(
null, this, result, catalog, session.getTrace(), id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Clears the batch.
*/
@Override
public void clearBatch() throws SQLException {
try {
debugCodeCall("clearBatch");
checkClosed();
batchParameters = null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Closes this statement.
* All result sets that where created by this statement
* become invalid after calling this method.
*/
@Override
public void close() throws SQLException {
try {
super.close();
batchParameters = null;
if (command != null) {
command.close();
command = null;
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes the batch.
* If one of the batched statements fails, this database will continue.
*
* @return the array of update counts
*/
@Override
public int[] executeBatch() throws SQLException {
try {
int id = getNextId(TraceObject.PREPARED_STATEMENT);
debugCodeCall("executeBatch");
if (batchParameters == null) {
// TODO batch: check what other database do if no parameters are
// set
batchParameters = New.arrayList();
}
batchIdentities = new MergedResultSet();
int size = batchParameters.size();
int[] result = new int[size];
boolean error = false;
SQLException next = null;
checkClosedForWrite();
try {
for (int i = 0; i < size; i++) {
Value[] set = batchParameters.get(i);
ArrayList<? extends ParameterInterface> parameters =
command.getParameters();
for (int j = 0; j < set.length; j++) {
Value value = set[j];
ParameterInterface param = parameters.get(j);
param.setValue(value, false);
}
try {
result[i] = executeUpdateInternal();
// Cannot use own implementation, it returns batch identities
ResultSet rs = super.getGeneratedKeys();
batchIdentities.add(rs);
} catch (Exception re) {
SQLException e = logAndConvert(re);
if (next == null) {
next = e;
} else {
e.setNextException(next);
next = e;
}
result[i] = Statement.EXECUTE_FAILED;
error = true;
}
}
batchParameters = null;
if (error) {
throw new JdbcBatchUpdateException(next, result);
}
return result;
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
@Override
public ResultSet getGeneratedKeys() throws SQLException {
if (batchIdentities != null) {
return batchIdentities.getResult();
}
return super.getGeneratedKeys();
}
/**
* Adds the current settings to the batch.
*/
@Override
public void addBatch() throws SQLException {
try {
debugCodeCall("addBatch");
checkClosedForWrite();
try {
ArrayList<? extends ParameterInterface> parameters =
command.getParameters();
int size = parameters.size();
Value[] set = new Value[size];
for (int i = 0; i < size; i++) {
ParameterInterface param = parameters.get(i);
param.checkSet();
Value value = param.getParamValue();
set[i] = value;
}
if (batchParameters == null) {
batchParameters = New.arrayList();
}
batchParameters.add(set);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @param autoGeneratedKeys ignored
* @throws SQLException Unsupported Feature
*/
@Override
public int executeUpdate(String sql, int autoGeneratedKeys)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeUpdate("+quote(sql)+", "+autoGeneratedKeys+");");
}
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @param autoGeneratedKeys ignored
* @throws SQLException Unsupported Feature
*/
@Override
public long executeLargeUpdate(String sql, int autoGeneratedKeys)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeLargeUpdate("+quote(sql)+", "+autoGeneratedKeys+");");
}
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @param columnIndexes ignored
* @throws SQLException Unsupported Feature
*/
@Override
public int executeUpdate(String sql, int[] columnIndexes)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeUpdate(" + quote(sql) + ", " +
quoteIntArray(columnIndexes) + ");");
}
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @param columnIndexes ignored
* @throws SQLException Unsupported Feature
*/
@Override
public long executeLargeUpdate(String sql, int[] columnIndexes)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeLargeUpdate(" + quote(sql) + ", " +
quoteIntArray(columnIndexes) + ");");
}
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @param columnNames ignored
* @throws SQLException Unsupported Feature
*/
@Override
public int executeUpdate(String sql, String[] columnNames)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeUpdate(" + quote(sql) + ", " +
quoteArray(columnNames) + ");");
}
throw DbException.get(
ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @param columnNames ignored
* @throws SQLException Unsupported Feature
*/
@Override
public long executeLargeUpdate(String sql, String[] columnNames)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeLargeUpdate(" + quote(sql) + ", " +
quoteArray(columnNames) + ");");
}
throw DbException.get(
ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @param autoGeneratedKeys ignored
* @throws SQLException Unsupported Feature
*/
@Override
public boolean execute(String sql, int autoGeneratedKeys)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("execute(" + quote(sql) + ", " + autoGeneratedKeys + ");");
}
throw DbException.get(
ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @param columnIndexes ignored
* @throws SQLException Unsupported Feature
*/
@Override
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("execute(" + quote(sql) + ", " + quoteIntArray(columnIndexes) + ");");
}
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Calling this method is not legal on a PreparedStatement.
*
* @param sql ignored
* @param columnNames ignored
* @throws SQLException Unsupported Feature
*/
@Override
public boolean execute(String sql, String[] columnNames)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("execute(" + quote(sql) + ", " + quoteArray(columnNames) + ");");
}
throw DbException.get(
ErrorCode.METHOD_NOT_ALLOWED_FOR_PREPARED_STATEMENT);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Get the parameter meta data of this prepared statement.
*
* @return the meta data
*/
@Override
public ParameterMetaData getParameterMetaData() throws SQLException {
try {
int id = getNextId(TraceObject.PARAMETER_META_DATA);
if (isDebugEnabled()) {
debugCodeAssign("ParameterMetaData",
TraceObject.PARAMETER_META_DATA, id, "getParameterMetaData()");
}
checkClosed();
return new JdbcParameterMetaData(
session.getTrace(), this, command, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
// =============================================================
private void setParameter(int parameterIndex, Value value) {
checkClosed();
parameterIndex--;
ArrayList<? extends ParameterInterface> parameters = command.getParameters();
if (parameterIndex < 0 || parameterIndex >= parameters.size()) {
throw DbException.getInvalidValueException("parameterIndex",
parameterIndex + 1);
}
ParameterInterface param = parameters.get(parameterIndex);
// can only delete old temp files if they are not in the batch
param.setValue(value, batchParameters == null);
}
/**
* [Not supported] Sets the value of a parameter as a row id.
*/
@Override
public void setRowId(int parameterIndex, RowId x) throws SQLException {
throw unsupported("rowId");
}
/**
* Sets the value of a parameter.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setNString(int parameterIndex, String x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setNString("+parameterIndex+", "+quote(x)+");");
}
Value v = x == null ? (Value) ValueNull.INSTANCE : ValueString.get(x);
setParameter(parameterIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setNCharacterStream(int parameterIndex, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setNCharacterStream("+
parameterIndex+", x, "+length+"L);");
}
checkClosedForWrite();
try {
Value v = conn.createClob(x, length);
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a character stream.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setNCharacterStream(int parameterIndex, Reader x)
throws SQLException {
setNCharacterStream(parameterIndex, x, -1);
}
/**
* Sets the value of a parameter as a Clob.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setNClob(int parameterIndex, NClob x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setNClob("+parameterIndex+", x);");
}
checkClosedForWrite();
Value v;
if (x == null) {
v = ValueNull.INSTANCE;
} else {
v = conn.createClob(x.getCharacterStream(), -1);
}
setParameter(parameterIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a Clob.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @throws SQLException if this object is closed
*/
@Override
public void setNClob(int parameterIndex, Reader x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setNClob("+parameterIndex+", x);");
}
checkClosedForWrite();
try {
Value v = conn.createClob(x, -1);
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a Clob. This method does not close the
* reader. The reader may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setClob(int parameterIndex, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setClob("+parameterIndex+", x, "+length+"L);");
}
checkClosedForWrite();
try {
Value v = conn.createClob(x, length);
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a Blob.
* This method does not close the stream.
* The stream may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of bytes
* @throws SQLException if this object is closed
*/
@Override
public void setBlob(int parameterIndex, InputStream x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setBlob("+parameterIndex+", x, "+length+"L);");
}
checkClosedForWrite();
try {
Value v = conn.createBlob(x, length);
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the value of a parameter as a Clob.
* This method does not close the reader.
* The reader may be closed after executing the statement.
*
* @param parameterIndex the parameter index (1, 2, ...)
* @param x the value
* @param length the maximum number of characters
* @throws SQLException if this object is closed
*/
@Override
public void setNClob(int parameterIndex, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setNClob("+parameterIndex+", x, "+length+"L);");
}
checkClosedForWrite();
try {
Value v = conn.createClob(x, length);
setParameter(parameterIndex, v);
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Sets the value of a parameter as a SQLXML object.
*/
@Override
public void setSQLXML(int parameterIndex, SQLXML x) throws SQLException {
throw unsupported("SQLXML");
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": " + command;
}
@Override
protected boolean checkClosed(boolean write) {
if (super.checkClosed(write)) {
// if the session was re-connected, re-prepare the statement
ArrayList<? extends ParameterInterface> oldParams = command.getParameters();
command = conn.prepareCommand(sqlStatement, fetchSize);
ArrayList<? extends ParameterInterface> newParams = command.getParameters();
for (int i = 0, size = oldParams.size(); i < size; i++) {
ParameterInterface old = oldParams.get(i);
Value value = old.getParamValue();
if (value != null) {
ParameterInterface n = newParams.get(i);
n.setValue(value, false);
}
}
return true;
}
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcPreparedStatementBackwardsCompat.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.sql.SQLException;
/**
* Allows us to compile on older platforms, while still implementing the methods
* from the newer JDBC API.
*/
public interface JdbcPreparedStatementBackwardsCompat {
// compatibility interface
// JDBC 4.2 (incomplete)
/**
* Executes a statement (insert, update, delete, create, drop)
* and returns the update count.
* If another result set exists for this statement, this will be closed
* (even if this statement fails).
*
* If auto commit is on, this statement will be committed.
* If the statement is a DDL statement (create, drop, alter) and does not
* throw an exception, the current transaction (if any) is committed after
* executing the statement.
*
* @return the update count (number of row affected by an insert, update or
* delete, or 0 if no rows or the statement was a create, drop,
* commit or rollback)
* @throws SQLException if this object is closed or invalid
*/
long executeLargeUpdate() throws SQLException;
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcResultSet.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.net.URL;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Statement;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import org.h2.api.ErrorCode;
import org.h2.api.TimestampWithTimeZone;
import org.h2.command.CommandInterface;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.result.ResultInterface;
import org.h2.result.UpdatableRow;
import org.h2.util.DateTimeUtils;
import org.h2.util.IOUtils;
import org.h2.util.LocalDateTimeUtils;
import org.h2.util.StringUtils;
import org.h2.value.CompareMode;
import org.h2.value.DataType;
import org.h2.value.Value;
import org.h2.value.ValueBoolean;
import org.h2.value.ValueByte;
import org.h2.value.ValueBytes;
import org.h2.value.ValueDate;
import org.h2.value.ValueDecimal;
import org.h2.value.ValueDouble;
import org.h2.value.ValueFloat;
import org.h2.value.ValueInt;
import org.h2.value.ValueLong;
import org.h2.value.ValueNull;
import org.h2.value.ValueShort;
import org.h2.value.ValueString;
import org.h2.value.ValueTime;
import org.h2.value.ValueTimestamp;
/**
* <p>
* Represents a result set.
* </p>
* <p>
* Column labels are case-insensitive, quotes are not supported. The first
* column has the column index 1.
* </p>
* <p>
* Updatable result sets: Result sets are updatable when the result only
* contains columns from one table, and if it contains all columns of a unique
* index (primary key or other) of this table. Key columns may not contain NULL
* (because multiple rows with NULL could exist). In updatable result sets, own
* changes are visible, but not own inserts and deletes.
* </p>
*/
public class JdbcResultSet extends TraceObject implements ResultSet, JdbcResultSetBackwardsCompat {
private final boolean closeStatement;
private final boolean scrollable;
private final boolean updatable;
private ResultInterface result;
private JdbcConnection conn;
private JdbcStatement stat;
private int columnCount;
private boolean wasNull;
private Value[] insertRow;
private Value[] updateRow;
private HashMap<String, Integer> columnLabelMap;
private HashMap<Integer, Value[]> patchedRows;
private JdbcPreparedStatement preparedStatement;
private final CommandInterface command;
JdbcResultSet(JdbcConnection conn, JdbcStatement stat, CommandInterface command,
ResultInterface result, int id, boolean closeStatement,
boolean scrollable, boolean updatable) {
setTrace(conn.getSession().getTrace(), TraceObject.RESULT_SET, id);
this.conn = conn;
this.stat = stat;
this.command = command;
this.result = result;
this.columnCount = result.getVisibleColumnCount();
this.closeStatement = closeStatement;
this.scrollable = scrollable;
this.updatable = updatable;
}
JdbcResultSet(JdbcConnection conn, JdbcPreparedStatement preparedStatement,
CommandInterface command, ResultInterface result, int id, boolean closeStatement,
boolean scrollable, boolean updatable,
HashMap<String, Integer> columnLabelMap) {
this(conn, preparedStatement, command, result, id, closeStatement, scrollable,
updatable);
this.columnLabelMap = columnLabelMap;
this.preparedStatement = preparedStatement;
}
/**
* Moves the cursor to the next row of the result set.
*
* @return true if successful, false if there are no more rows
*/
@Override
public boolean next() throws SQLException {
try {
debugCodeCall("next");
checkClosed();
return nextRow();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the meta data of this result set.
*
* @return the meta data
*/
@Override
public ResultSetMetaData getMetaData() throws SQLException {
try {
int id = getNextId(TraceObject.RESULT_SET_META_DATA);
if (isDebugEnabled()) {
debugCodeAssign("ResultSetMetaData",
TraceObject.RESULT_SET_META_DATA, id, "getMetaData()");
}
checkClosed();
String catalog = conn.getCatalog();
return new JdbcResultSetMetaData(this, null, result, catalog, conn.getSession().getTrace(), id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns whether the last column accessed was null.
*
* @return true if the last column accessed was null
*/
@Override
public boolean wasNull() throws SQLException {
try {
debugCodeCall("wasNull");
checkClosed();
return wasNull;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Searches for a specific column in the result set. A case-insensitive
* search is made.
*
* @param columnLabel the column label
* @return the column index (1,2,...)
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public int findColumn(String columnLabel) throws SQLException {
try {
debugCodeCall("findColumn", columnLabel);
return getColumnIndex(columnLabel);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Closes the result set.
*/
@Override
public void close() throws SQLException {
try {
debugCodeCall("close");
closeInternal();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Close the result set. This method also closes the statement if required.
*/
void closeInternal() throws SQLException {
if (result != null) {
try {
if (result.isLazy()) {
stat.onLazyResultSetClose(command, preparedStatement == null);
}
result.close();
if (closeStatement && stat != null) {
stat.close();
}
} finally {
columnCount = 0;
result = null;
stat = null;
conn = null;
insertRow = null;
updateRow = null;
}
}
}
/**
* Returns the statement that created this object.
*
* @return the statement or prepared statement, or null if created by a
* DatabaseMetaData call.
*/
@Override
public Statement getStatement() throws SQLException {
try {
debugCodeCall("getStatement");
checkClosed();
if (closeStatement) {
// if the result set was opened by a DatabaseMetaData call
return null;
}
return stat;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the first warning reported by calls on this object.
*
* @return null
*/
@Override
public SQLWarning getWarnings() throws SQLException {
try {
debugCodeCall("getWarnings");
checkClosed();
return null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Clears all warnings.
*/
@Override
public void clearWarnings() throws SQLException {
try {
debugCodeCall("clearWarnings");
checkClosed();
} catch (Exception e) {
throw logAndConvert(e);
}
}
// =============================================================
/**
* Returns the value of the specified column as a String.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public String getString(int columnIndex) throws SQLException {
try {
debugCodeCall("getString", columnIndex);
return get(columnIndex).getString();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a String.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public String getString(String columnLabel) throws SQLException {
try {
debugCodeCall("getString", columnLabel);
return get(columnLabel).getString();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as an int.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public int getInt(int columnIndex) throws SQLException {
try {
debugCodeCall("getInt", columnIndex);
return get(columnIndex).getInt();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as an int.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public int getInt(String columnLabel) throws SQLException {
try {
debugCodeCall("getInt", columnLabel);
return get(columnLabel).getInt();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a BigDecimal.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
try {
debugCodeCall("getBigDecimal", columnIndex);
return get(columnIndex).getBigDecimal();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Date.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Date getDate(int columnIndex) throws SQLException {
try {
debugCodeCall("getDate", columnIndex);
return get(columnIndex).getDate();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Time.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Time getTime(int columnIndex) throws SQLException {
try {
debugCodeCall("getTime", columnIndex);
return get(columnIndex).getTime();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Timestamp.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Timestamp getTimestamp(int columnIndex) throws SQLException {
try {
debugCodeCall("getTimestamp", columnIndex);
return get(columnIndex).getTimestamp();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a BigDecimal.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public BigDecimal getBigDecimal(String columnLabel) throws SQLException {
try {
debugCodeCall("getBigDecimal", columnLabel);
return get(columnLabel).getBigDecimal();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Date.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Date getDate(String columnLabel) throws SQLException {
try {
debugCodeCall("getDate", columnLabel);
return get(columnLabel).getDate();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Time.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Time getTime(String columnLabel) throws SQLException {
try {
debugCodeCall("getTime", columnLabel);
return get(columnLabel).getTime();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Timestamp.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Timestamp getTimestamp(String columnLabel) throws SQLException {
try {
debugCodeCall("getTimestamp", columnLabel);
return get(columnLabel).getTimestamp();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns a column value as a Java object. The data is
* de-serialized into a Java object (on the client side).
*
* @param columnIndex (1,2,...)
* @return the value or null
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Object getObject(int columnIndex) throws SQLException {
try {
debugCodeCall("getObject", columnIndex);
Value v = get(columnIndex);
return conn.convertToDefaultObject(v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns a column value as a Java object. The data is
* de-serialized into a Java object (on the client side).
*
* @param columnLabel the column label
* @return the value or null
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Object getObject(String columnLabel) throws SQLException {
try {
debugCodeCall("getObject", columnLabel);
Value v = get(columnLabel);
return conn.convertToDefaultObject(v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a boolean.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
try {
debugCodeCall("getBoolean", columnIndex);
return get(columnIndex).getBoolean();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a boolean.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public boolean getBoolean(String columnLabel) throws SQLException {
try {
debugCodeCall("getBoolean", columnLabel);
return get(columnLabel).getBoolean();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a byte.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public byte getByte(int columnIndex) throws SQLException {
try {
debugCodeCall("getByte", columnIndex);
return get(columnIndex).getByte();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a byte.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public byte getByte(String columnLabel) throws SQLException {
try {
debugCodeCall("getByte", columnLabel);
return get(columnLabel).getByte();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a short.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public short getShort(int columnIndex) throws SQLException {
try {
debugCodeCall("getShort", columnIndex);
return get(columnIndex).getShort();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a short.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public short getShort(String columnLabel) throws SQLException {
try {
debugCodeCall("getShort", columnLabel);
return get(columnLabel).getShort();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a long.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public long getLong(int columnIndex) throws SQLException {
try {
debugCodeCall("getLong", columnIndex);
return get(columnIndex).getLong();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a long.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public long getLong(String columnLabel) throws SQLException {
try {
debugCodeCall("getLong", columnLabel);
return get(columnLabel).getLong();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a float.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public float getFloat(int columnIndex) throws SQLException {
try {
debugCodeCall("getFloat", columnIndex);
return get(columnIndex).getFloat();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a float.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public float getFloat(String columnLabel) throws SQLException {
try {
debugCodeCall("getFloat", columnLabel);
return get(columnLabel).getFloat();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a double.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public double getDouble(int columnIndex) throws SQLException {
try {
debugCodeCall("getDouble", columnIndex);
return get(columnIndex).getDouble();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a double.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public double getDouble(String columnLabel) throws SQLException {
try {
debugCodeCall("getDouble", columnLabel);
return get(columnLabel).getDouble();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a BigDecimal.
*
* @deprecated use {@link #getBigDecimal(String)}
*
* @param columnLabel the column label
* @param scale the scale of the returned value
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Deprecated
@Override
public BigDecimal getBigDecimal(String columnLabel, int scale)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getBigDecimal(" +
StringUtils.quoteJavaString(columnLabel)+", "+scale+");");
}
if (scale < 0) {
throw DbException.getInvalidValueException("scale", scale);
}
BigDecimal bd = get(columnLabel).getBigDecimal();
return bd == null ? null : ValueDecimal.setScale(bd, scale);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a BigDecimal.
*
* @deprecated use {@link #getBigDecimal(int)}
*
* @param columnIndex (1,2,...)
* @param scale the scale of the returned value
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Deprecated
@Override
public BigDecimal getBigDecimal(int columnIndex, int scale)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getBigDecimal(" + columnIndex + ", " + scale + ");");
}
if (scale < 0) {
throw DbException.getInvalidValueException("scale", scale);
}
BigDecimal bd = get(columnIndex).getBigDecimal();
return bd == null ? null : ValueDecimal.setScale(bd, scale);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
* @deprecated since JDBC 2.0, use getCharacterStream
*/
@Deprecated
@Override
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
throw unsupported("unicodeStream");
}
/**
* [Not supported]
* @deprecated since JDBC 2.0, use setCharacterStream
*/
@Deprecated
@Override
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
throw unsupported("unicodeStream");
}
/**
* [Not supported] Gets a column as a object using the specified type
* mapping.
*/
@Override
public Object getObject(int columnIndex, Map<String, Class<?>> map)
throws SQLException {
throw unsupported("map");
}
/**
* [Not supported] Gets a column as a object using the specified type
* mapping.
*/
@Override
public Object getObject(String columnLabel, Map<String, Class<?>> map)
throws SQLException {
throw unsupported("map");
}
/**
* [Not supported] Gets a column as a reference.
*/
@Override
public Ref getRef(int columnIndex) throws SQLException {
throw unsupported("ref");
}
/**
* [Not supported] Gets a column as a reference.
*/
@Override
public Ref getRef(String columnLabel) throws SQLException {
throw unsupported("ref");
}
/**
* Returns the value of the specified column as a java.sql.Date using a
* specified time zone.
*
* @param columnIndex (1,2,...)
* @param calendar the calendar
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Date getDate(int columnIndex, Calendar calendar) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getDate(" + columnIndex + ", calendar)");
}
return DateTimeUtils.convertDate(get(columnIndex), calendar);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Date using a
* specified time zone.
*
* @param columnLabel the column label
* @param calendar the calendar
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Date getDate(String columnLabel, Calendar calendar)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getDate(" +
StringUtils.quoteJavaString(columnLabel) +
", calendar)");
}
return DateTimeUtils.convertDate(get(columnLabel), calendar);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Time using a
* specified time zone.
*
* @param columnIndex (1,2,...)
* @param calendar the calendar
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Time getTime(int columnIndex, Calendar calendar) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getTime(" + columnIndex + ", calendar)");
}
return DateTimeUtils.convertTime(get(columnIndex), calendar);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Time using a
* specified time zone.
*
* @param columnLabel the column label
* @param calendar the calendar
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Time getTime(String columnLabel, Calendar calendar)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getTime(" +
StringUtils.quoteJavaString(columnLabel) +
", calendar)");
}
return DateTimeUtils.convertTime(get(columnLabel), calendar);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Timestamp using a
* specified time zone.
*
* @param columnIndex (1,2,...)
* @param calendar the calendar
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Timestamp getTimestamp(int columnIndex, Calendar calendar)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getTimestamp(" + columnIndex + ", calendar)");
}
Value value = get(columnIndex);
return DateTimeUtils.convertTimestamp(value, calendar);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a java.sql.Timestamp.
*
* @param columnLabel the column label
* @param calendar the calendar
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Timestamp getTimestamp(String columnLabel, Calendar calendar)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("getTimestamp(" +
StringUtils.quoteJavaString(columnLabel) +
", calendar)");
}
Value value = get(columnLabel);
return DateTimeUtils.convertTimestamp(value, calendar);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a Blob.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Blob getBlob(int columnIndex) throws SQLException {
try {
int id = getNextId(TraceObject.BLOB);
if (isDebugEnabled()) {
debugCodeAssign("Blob", TraceObject.BLOB,
id, "getBlob(" + columnIndex + ")");
}
Value v = get(columnIndex);
return v == ValueNull.INSTANCE ? null : new JdbcBlob(conn, v, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a Blob.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Blob getBlob(String columnLabel) throws SQLException {
try {
int id = getNextId(TraceObject.BLOB);
if (isDebugEnabled()) {
debugCodeAssign("Blob", TraceObject.BLOB,
id, "getBlob(" + quote(columnLabel) + ")");
}
Value v = get(columnLabel);
return v == ValueNull.INSTANCE ? null : new JdbcBlob(conn, v, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a byte array.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public byte[] getBytes(int columnIndex) throws SQLException {
try {
debugCodeCall("getBytes", columnIndex);
return get(columnIndex).getBytes();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a byte array.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public byte[] getBytes(String columnLabel) throws SQLException {
try {
debugCodeCall("getBytes", columnLabel);
return get(columnLabel).getBytes();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as an input stream.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public InputStream getBinaryStream(int columnIndex) throws SQLException {
try {
debugCodeCall("getBinaryStream", columnIndex);
return get(columnIndex).getInputStream();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as an input stream.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public InputStream getBinaryStream(String columnLabel) throws SQLException {
try {
debugCodeCall("getBinaryStream", columnLabel);
return get(columnLabel).getInputStream();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a Clob.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Clob getClob(int columnIndex) throws SQLException {
try {
int id = getNextId(TraceObject.CLOB);
if (isDebugEnabled()) {
debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" + columnIndex + ")");
}
Value v = get(columnIndex);
return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a Clob.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Clob getClob(String columnLabel) throws SQLException {
try {
int id = getNextId(TraceObject.CLOB);
if (isDebugEnabled()) {
debugCodeAssign("Clob", TraceObject.CLOB, id, "getClob(" +
quote(columnLabel) + ")");
}
Value v = get(columnLabel);
return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as an Array.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Array getArray(int columnIndex) throws SQLException {
try {
int id = getNextId(TraceObject.ARRAY);
if (isDebugEnabled()) {
debugCodeAssign("Array", TraceObject.ARRAY, id, "getArray(" + columnIndex + ")");
}
Value v = get(columnIndex);
return v == ValueNull.INSTANCE ? null : new JdbcArray(conn, v, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as an Array.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Array getArray(String columnLabel) throws SQLException {
try {
int id = getNextId(TraceObject.ARRAY);
if (isDebugEnabled()) {
debugCodeAssign("Array", TraceObject.ARRAY, id, "getArray(" +
quote(columnLabel) + ")");
}
Value v = get(columnLabel);
return v == ValueNull.INSTANCE ? null : new JdbcArray(conn, v, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as an input stream.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public InputStream getAsciiStream(int columnIndex) throws SQLException {
try {
debugCodeCall("getAsciiStream", columnIndex);
String s = get(columnIndex).getString();
return s == null ? null : IOUtils.getInputStreamFromString(s);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as an input stream.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public InputStream getAsciiStream(String columnLabel) throws SQLException {
try {
debugCodeCall("getAsciiStream", columnLabel);
String s = get(columnLabel).getString();
return IOUtils.getInputStreamFromString(s);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a reader.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Reader getCharacterStream(int columnIndex) throws SQLException {
try {
debugCodeCall("getCharacterStream", columnIndex);
return get(columnIndex).getReader();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a reader.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Reader getCharacterStream(String columnLabel) throws SQLException {
try {
debugCodeCall("getCharacterStream", columnLabel);
return get(columnLabel).getReader();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
*/
@Override
public URL getURL(int columnIndex) throws SQLException {
throw unsupported("url");
}
/**
* [Not supported]
*/
@Override
public URL getURL(String columnLabel) throws SQLException {
throw unsupported("url");
}
// =============================================================
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNull(int columnIndex) throws SQLException {
try {
debugCodeCall("updateNull", columnIndex);
update(columnIndex, ValueNull.INSTANCE);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNull(String columnLabel) throws SQLException {
try {
debugCodeCall("updateNull", columnLabel);
update(columnLabel, ValueNull.INSTANCE);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBoolean(int columnIndex, boolean x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBoolean("+columnIndex+", "+x+");");
}
update(columnIndex, ValueBoolean.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if result set is closed or not updatable
*/
@Override
public void updateBoolean(String columnLabel, boolean x)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBoolean("+quote(columnLabel)+", "+x+");");
}
update(columnLabel, ValueBoolean.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateByte(int columnIndex, byte x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateByte("+columnIndex+", "+x+");");
}
update(columnIndex, ValueByte.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateByte(String columnLabel, byte x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateByte("+columnLabel+", "+x+");");
}
update(columnLabel, ValueByte.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBytes(int columnIndex, byte[] x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBytes("+columnIndex+", x);");
}
update(columnIndex, x == null ? (Value) ValueNull.INSTANCE : ValueBytes.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBytes(String columnLabel, byte[] x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBytes("+quote(columnLabel)+", x);");
}
update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : ValueBytes.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateShort(int columnIndex, short x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateShort("+columnIndex+", (short) "+x+");");
}
update(columnIndex, ValueShort.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateShort(String columnLabel, short x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateShort("+quote(columnLabel)+", (short) "+x+");");
}
update(columnLabel, ValueShort.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateInt(int columnIndex, int x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateInt("+columnIndex+", "+x+");");
}
update(columnIndex, ValueInt.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateInt(String columnLabel, int x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateInt("+quote(columnLabel)+", "+x+");");
}
update(columnLabel, ValueInt.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateLong(int columnIndex, long x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateLong("+columnIndex+", "+x+"L);");
}
update(columnIndex, ValueLong.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateLong(String columnLabel, long x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateLong("+quote(columnLabel)+", "+x+"L);");
}
update(columnLabel, ValueLong.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateFloat(int columnIndex, float x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateFloat("+columnIndex+", "+x+"f);");
}
update(columnIndex, ValueFloat.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateFloat(String columnLabel, float x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateFloat("+quote(columnLabel)+", "+x+"f);");
}
update(columnLabel, ValueFloat.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateDouble(int columnIndex, double x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateDouble("+columnIndex+", "+x+"d);");
}
update(columnIndex, ValueDouble.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateDouble(String columnLabel, double x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateDouble("+quote(columnLabel)+", "+x+"d);");
}
update(columnLabel, ValueDouble.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBigDecimal(int columnIndex, BigDecimal x)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBigDecimal("+columnIndex+", " + quoteBigDecimal(x) + ");");
}
update(columnIndex, x == null ? (Value) ValueNull.INSTANCE
: ValueDecimal.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBigDecimal(String columnLabel, BigDecimal x)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBigDecimal(" + quote(columnLabel) + ", " +
quoteBigDecimal(x) + ");");
}
update(columnLabel, x == null ? (Value) ValueNull.INSTANCE
: ValueDecimal.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateString(int columnIndex, String x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateString("+columnIndex+", "+quote(x)+");");
}
update(columnIndex, x == null ? (Value) ValueNull.INSTANCE
: ValueString.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateString(String columnLabel, String x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateString("+quote(columnLabel)+", "+quote(x)+");");
}
update(columnLabel, x == null ? (Value) ValueNull.INSTANCE
: ValueString.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateDate(int columnIndex, Date x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateDate("+columnIndex+", x);");
}
update(columnIndex, x == null ? (Value) ValueNull.INSTANCE : ValueDate.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateDate(String columnLabel, Date x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateDate("+quote(columnLabel)+", x);");
}
update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : ValueDate.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateTime(int columnIndex, Time x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateTime("+columnIndex+", x);");
}
update(columnIndex, x == null ? (Value) ValueNull.INSTANCE : ValueTime.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateTime(String columnLabel, Time x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateTime("+quote(columnLabel)+", x);");
}
update(columnLabel, x == null ? (Value) ValueNull.INSTANCE : ValueTime.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateTimestamp(int columnIndex, Timestamp x)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateTimestamp("+columnIndex+", x);");
}
update(columnIndex, x == null ? (Value) ValueNull.INSTANCE
: ValueTimestamp.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateTimestamp(String columnLabel, Timestamp x)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateTimestamp("+quote(columnLabel)+", x);");
}
update(columnLabel, x == null ? (Value) ValueNull.INSTANCE
: ValueTimestamp.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateAsciiStream(int columnIndex, InputStream x, int length)
throws SQLException {
updateAsciiStream(columnIndex, x, (long) length);
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateAsciiStream(int columnIndex, InputStream x)
throws SQLException {
updateAsciiStream(columnIndex, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateAsciiStream(int columnIndex, InputStream x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateAsciiStream("+columnIndex+", x, "+length+"L);");
}
checkClosed();
Value v = conn.createClob(IOUtils.getAsciiReader(x), length);
update(columnIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateAsciiStream(String columnLabel, InputStream x, int length)
throws SQLException {
updateAsciiStream(columnLabel, x, (long) length);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed
*/
@Override
public void updateAsciiStream(String columnLabel, InputStream x)
throws SQLException {
updateAsciiStream(columnLabel, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateAsciiStream(String columnLabel, InputStream x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateAsciiStream("+quote(columnLabel)+", x, "+length+"L);");
}
checkClosed();
Value v = conn.createClob(IOUtils.getAsciiReader(x), length);
update(columnLabel, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBinaryStream(int columnIndex, InputStream x, int length)
throws SQLException {
updateBinaryStream(columnIndex, x, (long) length);
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBinaryStream(int columnIndex, InputStream x)
throws SQLException {
updateBinaryStream(columnIndex, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBinaryStream(int columnIndex, InputStream x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBinaryStream("+columnIndex+", x, "+length+"L);");
}
checkClosed();
Value v = conn.createBlob(x, length);
update(columnIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBinaryStream(String columnLabel, InputStream x)
throws SQLException {
updateBinaryStream(columnLabel, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBinaryStream(String columnLabel, InputStream x, int length)
throws SQLException {
updateBinaryStream(columnLabel, x, (long) length);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBinaryStream(String columnLabel, InputStream x,
long length) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBinaryStream("+quote(columnLabel)+", x, "+length+"L);");
}
checkClosed();
Value v = conn.createBlob(x, length);
update(columnLabel, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateCharacterStream(int columnIndex, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateCharacterStream("+columnIndex+", x, "+length+"L);");
}
checkClosed();
Value v = conn.createClob(x, length);
update(columnIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateCharacterStream(int columnIndex, Reader x, int length)
throws SQLException {
updateCharacterStream(columnIndex, x, (long) length);
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateCharacterStream(int columnIndex, Reader x)
throws SQLException {
updateCharacterStream(columnIndex, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateCharacterStream(String columnLabel, Reader x, int length)
throws SQLException {
updateCharacterStream(columnLabel, x, (long) length);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateCharacterStream(String columnLabel, Reader x)
throws SQLException {
updateCharacterStream(columnLabel, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateCharacterStream(String columnLabel, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateCharacterStream("+quote(columnLabel)+", x, "+length+"L);");
}
checkClosed();
Value v = conn.createClob(x, length);
update(columnLabel, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param scale is ignored
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateObject(int columnIndex, Object x, int scale)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateObject("+columnIndex+", x, "+scale+");");
}
update(columnIndex, convertToUnknownValue(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param scale is ignored
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateObject(String columnLabel, Object x, int scale)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateObject("+quote(columnLabel)+", x, "+scale+");");
}
update(columnLabel, convertToUnknownValue(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateObject(int columnIndex, Object x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateObject("+columnIndex+", x);");
}
update(columnIndex, convertToUnknownValue(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateObject(String columnLabel, Object x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateObject("+quote(columnLabel)+", x);");
}
update(columnLabel, convertToUnknownValue(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
*/
@Override
public void updateRef(int columnIndex, Ref x) throws SQLException {
throw unsupported("ref");
}
/**
* [Not supported]
*/
@Override
public void updateRef(String columnLabel, Ref x) throws SQLException {
throw unsupported("ref");
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBlob(int columnIndex, InputStream x) throws SQLException {
updateBlob(columnIndex, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the length
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBlob(int columnIndex, InputStream x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBlob("+columnIndex+", x, " + length + "L);");
}
checkClosed();
Value v = conn.createBlob(x, length);
update(columnIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBlob(int columnIndex, Blob x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBlob("+columnIndex+", x);");
}
checkClosed();
Value v;
if (x == null) {
v = ValueNull.INSTANCE;
} else {
v = conn.createBlob(x.getBinaryStream(), -1);
}
update(columnIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBlob(String columnLabel, Blob x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBlob("+quote(columnLabel)+", x);");
}
checkClosed();
Value v;
if (x == null) {
v = ValueNull.INSTANCE;
} else {
v = conn.createBlob(x.getBinaryStream(), -1);
}
update(columnLabel, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBlob(String columnLabel, InputStream x) throws SQLException {
updateBlob(columnLabel, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the length
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateBlob(String columnLabel, InputStream x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateBlob("+quote(columnLabel)+", x, " + length + "L);");
}
checkClosed();
Value v = conn.createBlob(x, -1);
update(columnLabel, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateClob(int columnIndex, Clob x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateClob("+columnIndex+", x);");
}
checkClosed();
Value v;
if (x == null) {
v = ValueNull.INSTANCE;
} else {
v = conn.createClob(x.getCharacterStream(), -1);
}
update(columnIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateClob(int columnIndex, Reader x) throws SQLException {
updateClob(columnIndex, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the length
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateClob(int columnIndex, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateClob("+columnIndex+", x, " + length + "L);");
}
checkClosed();
Value v = conn.createClob(x, length);
update(columnIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateClob(String columnLabel, Clob x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateClob("+quote(columnLabel)+", x);");
}
checkClosed();
Value v;
if (x == null) {
v = ValueNull.INSTANCE;
} else {
v = conn.createClob(x.getCharacterStream(), -1);
}
update(columnLabel, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateClob(String columnLabel, Reader x) throws SQLException {
updateClob(columnLabel, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the length
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateClob(String columnLabel, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateClob("+quote(columnLabel)+", x, " + length + "L);");
}
checkClosed();
Value v = conn.createClob(x, length);
update(columnLabel, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
*/
@Override
public void updateArray(int columnIndex, Array x) throws SQLException {
throw unsupported("setArray");
}
/**
* [Not supported]
*/
@Override
public void updateArray(String columnLabel, Array x) throws SQLException {
throw unsupported("setArray");
}
/**
* [Not supported] Gets the cursor name if it was defined. This feature is
* superseded by updateX methods. This method throws a SQLException because
* cursor names are not supported.
*/
@Override
public String getCursorName() throws SQLException {
throw unsupported("cursorName");
}
/**
* Gets the current row number. The first row is row 1, the second 2 and so
* on. This method returns 0 before the first and after the last row.
*
* @return the row number
*/
@Override
public int getRow() throws SQLException {
try {
debugCodeCall("getRow");
checkClosed();
if (result.isAfterLast()) {
return 0;
}
int rowId = result.getRowId();
return rowId + 1;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the result set concurrency. Result sets are only updatable if the
* statement was created with updatable concurrency, and if the result set
* contains all columns of the primary key or of a unique index of a table.
*
* @return ResultSet.CONCUR_UPDATABLE if the result set is updatable, or
* ResultSet.CONCUR_READ_ONLY otherwise
*/
@Override
public int getConcurrency() throws SQLException {
try {
debugCodeCall("getConcurrency");
checkClosed();
if (!updatable) {
return ResultSet.CONCUR_READ_ONLY;
}
UpdatableRow row = new UpdatableRow(conn, result);
return row.isUpdatable() ? ResultSet.CONCUR_UPDATABLE
: ResultSet.CONCUR_READ_ONLY;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the fetch direction.
*
* @return the direction: FETCH_FORWARD
*/
@Override
public int getFetchDirection() throws SQLException {
try {
debugCodeCall("getFetchDirection");
checkClosed();
return ResultSet.FETCH_FORWARD;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the number of rows suggested to read in one step.
*
* @return the current fetch size
*/
@Override
public int getFetchSize() throws SQLException {
try {
debugCodeCall("getFetchSize");
checkClosed();
return result.getFetchSize();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the number of rows suggested to read in one step. This value cannot
* be higher than the maximum rows (setMaxRows) set by the statement or
* prepared statement, otherwise an exception is throws. Setting the value
* to 0 will set the default value. The default value can be changed using
* the system property h2.serverResultSetFetchSize.
*
* @param rows the number of rows
*/
@Override
public void setFetchSize(int rows) throws SQLException {
try {
debugCodeCall("setFetchSize", rows);
checkClosed();
if (rows < 0) {
throw DbException.getInvalidValueException("rows", rows);
} else if (rows > 0) {
if (stat != null) {
int maxRows = stat.getMaxRows();
if (maxRows > 0 && rows > maxRows) {
throw DbException.getInvalidValueException("rows", rows);
}
}
} else {
rows = SysProperties.SERVER_RESULT_SET_FETCH_SIZE;
}
result.setFetchSize(rows);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
* Sets (changes) the fetch direction for this result set. This method
* should only be called for scrollable result sets, otherwise it will throw
* an exception (no matter what direction is used).
*
* @param direction the new fetch direction
* @throws SQLException Unsupported Feature if the method is called for a
* forward-only result set
*/
@Override
public void setFetchDirection(int direction) throws SQLException {
debugCodeCall("setFetchDirection", direction);
// ignore FETCH_FORWARD, that's the default value, which we do support
if (direction != ResultSet.FETCH_FORWARD) {
throw unsupported("setFetchDirection");
}
}
/**
* Get the result set type.
*
* @return the result set type (TYPE_FORWARD_ONLY, TYPE_SCROLL_INSENSITIVE
* or TYPE_SCROLL_SENSITIVE)
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public int getType() throws SQLException {
try {
debugCodeCall("getType");
checkClosed();
return stat == null ? ResultSet.TYPE_FORWARD_ONLY : stat.resultSetType;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if the current position is before the first row, that means next()
* was not called yet, and there is at least one row.
*
* @return if there are results and the current position is before the first
* row
* @throws SQLException if the result set is closed
*/
@Override
public boolean isBeforeFirst() throws SQLException {
try {
debugCodeCall("isBeforeFirst");
checkClosed();
return result.getRowId() < 0 && result.hasNext();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if the current position is after the last row, that means next()
* was called and returned false, and there was at least one row.
*
* @return if there are results and the current position is after the last
* row
* @throws SQLException if the result set is closed
*/
@Override
public boolean isAfterLast() throws SQLException {
try {
debugCodeCall("isAfterLast");
checkClosed();
return result.getRowId() > 0 && result.isAfterLast();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if the current position is row 1, that means next() was called
* once and returned true.
*
* @return if the current position is the first row
* @throws SQLException if the result set is closed
*/
@Override
public boolean isFirst() throws SQLException {
try {
debugCodeCall("isFirst");
checkClosed();
return result.getRowId() == 0 && !result.isAfterLast();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if the current position is the last row, that means next() was
* called and did not yet returned false, but will in the next call.
*
* @return if the current position is the last row
* @throws SQLException if the result set is closed
*/
@Override
public boolean isLast() throws SQLException {
try {
debugCodeCall("isLast");
checkClosed();
int rowId = result.getRowId();
return rowId >= 0 && !result.isAfterLast() && !result.hasNext();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves the current position to before the first row, that means resets the
* result set.
*
* @throws SQLException if the result set is closed
*/
@Override
public void beforeFirst() throws SQLException {
try {
debugCodeCall("beforeFirst");
checkClosed();
if (result.getRowId() >= 0) {
resetResult();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves the current position to after the last row, that means after the
* end.
*
* @throws SQLException if the result set is closed
*/
@Override
public void afterLast() throws SQLException {
try {
debugCodeCall("afterLast");
checkClosed();
while (nextRow()) {
// nothing
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves the current position to the first row. This is the same as calling
* beforeFirst() followed by next().
*
* @return true if there is a row available, false if not
* @throws SQLException if the result set is closed
*/
@Override
public boolean first() throws SQLException {
try {
debugCodeCall("first");
checkClosed();
if (result.getRowId() >= 0) {
resetResult();
}
return nextRow();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves the current position to the last row.
*
* @return true if there is a row available, false if not
* @throws SQLException if the result set is closed
*/
@Override
public boolean last() throws SQLException {
try {
debugCodeCall("last");
checkClosed();
if (result.isAfterLast()) {
resetResult();
}
while (result.hasNext()) {
nextRow();
}
return isOnValidRow();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves the current position to a specific row.
*
* @param rowNumber the row number. 0 is not allowed, 1 means the first row,
* 2 the second. -1 means the last row, -2 the row before the
* last row. If the value is too large, the position is moved
* after the last row, if if the value is too small it is moved
* before the first row.
* @return true if there is a row available, false if not
* @throws SQLException if the result set is closed
*/
@Override
public boolean absolute(int rowNumber) throws SQLException {
try {
debugCodeCall("absolute", rowNumber);
checkClosed();
if (rowNumber < 0) {
rowNumber = result.getRowCount() + rowNumber + 1;
}
if (--rowNumber < result.getRowId()) {
resetResult();
}
while (result.getRowId() < rowNumber) {
if (!nextRow()) {
return false;
}
}
return isOnValidRow();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves the current position to a specific row relative to the current row.
*
* @param rowCount 0 means don't do anything, 1 is the next row, -1 the
* previous. If the value is too large, the position is moved
* after the last row, if if the value is too small it is moved
* before the first row.
* @return true if there is a row available, false if not
* @throws SQLException if the result set is closed
*/
@Override
public boolean relative(int rowCount) throws SQLException {
try {
debugCodeCall("relative", rowCount);
checkClosed();
if (rowCount < 0) {
rowCount = result.getRowId() + rowCount + 1;
resetResult();
}
for (int i = 0; i < rowCount; i++) {
if (!nextRow()) {
return false;
}
}
return isOnValidRow();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves the cursor to the last row, or row before first row if the current
* position is the first row.
*
* @return true if there is a row available, false if not
* @throws SQLException if the result set is closed
*/
@Override
public boolean previous() throws SQLException {
try {
debugCodeCall("previous");
checkClosed();
return relative(-1);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves the current position to the insert row. The current row is
* remembered.
*
* @throws SQLException if the result set is closed or is not updatable
*/
@Override
public void moveToInsertRow() throws SQLException {
try {
debugCodeCall("moveToInsertRow");
checkUpdatable();
insertRow = new Value[columnCount];
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves the current position to the current row.
*
* @throws SQLException if the result set is closed or is not updatable
*/
@Override
public void moveToCurrentRow() throws SQLException {
try {
debugCodeCall("moveToCurrentRow");
checkUpdatable();
insertRow = null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Detects if the row was updated (by somebody else or the caller).
*
* @return false because this driver does not detect this
*/
@Override
public boolean rowUpdated() throws SQLException {
try {
debugCodeCall("rowUpdated");
return false;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Detects if the row was inserted.
*
* @return false because this driver does not detect this
*/
@Override
public boolean rowInserted() throws SQLException {
try {
debugCodeCall("rowInserted");
return false;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Detects if the row was deleted (by somebody else or the caller).
*
* @return false because this driver does not detect this
*/
@Override
public boolean rowDeleted() throws SQLException {
try {
debugCodeCall("rowDeleted");
return false;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Inserts the current row. The current position must be the insert row.
*
* @throws SQLException if the result set is closed or if not on the insert
* row, or if the result set it not updatable
*/
@Override
public void insertRow() throws SQLException {
try {
debugCodeCall("insertRow");
checkUpdatable();
if (insertRow == null) {
throw DbException.get(ErrorCode.NOT_ON_UPDATABLE_ROW);
}
getUpdatableRow().insertRow(insertRow);
insertRow = null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates the current row.
*
* @throws SQLException if the result set is closed, if the current row is
* the insert row or if not on a valid row, or if the result set
* it not updatable
*/
@Override
public void updateRow() throws SQLException {
try {
debugCodeCall("updateRow");
checkUpdatable();
if (insertRow != null) {
throw DbException.get(ErrorCode.NOT_ON_UPDATABLE_ROW);
}
checkOnValidRow();
if (updateRow != null) {
UpdatableRow row = getUpdatableRow();
Value[] current = new Value[columnCount];
for (int i = 0; i < updateRow.length; i++) {
current[i] = get(i + 1);
}
row.updateRow(current, updateRow);
for (int i = 0; i < updateRow.length; i++) {
if (updateRow[i] == null) {
updateRow[i] = current[i];
}
}
Value[] patch = row.readRow(updateRow);
patchCurrentRow(patch);
updateRow = null;
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Deletes the current row.
*
* @throws SQLException if the result set is closed, if the current row is
* the insert row or if not on a valid row, or if the result set
* it not updatable
*/
@Override
public void deleteRow() throws SQLException {
try {
debugCodeCall("deleteRow");
checkUpdatable();
if (insertRow != null) {
throw DbException.get(ErrorCode.NOT_ON_UPDATABLE_ROW);
}
checkOnValidRow();
getUpdatableRow().deleteRow(result.currentRow());
updateRow = null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Re-reads the current row from the database.
*
* @throws SQLException if the result set is closed or if the current row is
* the insert row or if the row has been deleted or if not on a
* valid row
*/
@Override
public void refreshRow() throws SQLException {
try {
debugCodeCall("refreshRow");
checkClosed();
if (insertRow != null) {
throw DbException.get(ErrorCode.NO_DATA_AVAILABLE);
}
checkOnValidRow();
patchCurrentRow(getUpdatableRow().readRow(result.currentRow()));
updateRow = null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Cancels updating a row.
*
* @throws SQLException if the result set is closed or if the current row is
* the insert row
*/
@Override
public void cancelRowUpdates() throws SQLException {
try {
debugCodeCall("cancelRowUpdates");
checkClosed();
if (insertRow != null) {
throw DbException.get(ErrorCode.NO_DATA_AVAILABLE);
}
updateRow = null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
// =============================================================
private UpdatableRow getUpdatableRow() throws SQLException {
UpdatableRow row = new UpdatableRow(conn, result);
if (!row.isUpdatable()) {
throw DbException.get(ErrorCode.RESULT_SET_NOT_UPDATABLE);
}
return row;
}
private int getColumnIndex(String columnLabel) {
checkClosed();
if (columnLabel == null) {
throw DbException.getInvalidValueException("columnLabel", null);
}
if (columnCount >= 3) {
// use a hash table if more than 2 columns
if (columnLabelMap == null) {
HashMap<String, Integer> map = new HashMap<>(columnCount);
// column labels have higher priority
for (int i = 0; i < columnCount; i++) {
String c = StringUtils.toUpperEnglish(result.getAlias(i));
mapColumn(map, c, i);
}
for (int i = 0; i < columnCount; i++) {
String colName = result.getColumnName(i);
if (colName != null) {
colName = StringUtils.toUpperEnglish(colName);
mapColumn(map, colName, i);
String tabName = result.getTableName(i);
if (tabName != null) {
colName = StringUtils.toUpperEnglish(tabName) + "." + colName;
mapColumn(map, colName, i);
}
}
}
// assign at the end so concurrent access is supported
columnLabelMap = map;
if (preparedStatement != null) {
preparedStatement.setCachedColumnLabelMap(columnLabelMap);
}
}
Integer index = columnLabelMap.get(StringUtils.toUpperEnglish(columnLabel));
if (index == null) {
throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, columnLabel);
}
return index.intValue() + 1;
}
for (int i = 0; i < columnCount; i++) {
if (columnLabel.equalsIgnoreCase(result.getAlias(i))) {
return i + 1;
}
}
int idx = columnLabel.indexOf('.');
if (idx > 0) {
String table = columnLabel.substring(0, idx);
String col = columnLabel.substring(idx+1);
for (int i = 0; i < columnCount; i++) {
if (table.equalsIgnoreCase(result.getTableName(i)) &&
col.equalsIgnoreCase(result.getColumnName(i))) {
return i + 1;
}
}
} else {
for (int i = 0; i < columnCount; i++) {
if (columnLabel.equalsIgnoreCase(result.getColumnName(i))) {
return i + 1;
}
}
}
throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, columnLabel);
}
private static void mapColumn(HashMap<String, Integer> map, String label,
int index) {
// put the index (usually that's the only operation)
Integer old = map.put(label, index);
if (old != null) {
// if there was a clash (which is seldom),
// put the old one back
map.put(label, old);
}
}
private void checkColumnIndex(int columnIndex) {
checkClosed();
if (columnIndex < 1 || columnIndex > columnCount) {
throw DbException.getInvalidValueException("columnIndex", columnIndex);
}
}
/**
* Check if this result set is closed.
*
* @throws DbException if it is closed
*/
void checkClosed() {
if (result == null) {
throw DbException.get(ErrorCode.OBJECT_CLOSED);
}
if (stat != null) {
stat.checkClosed();
}
if (conn != null) {
conn.checkClosed();
}
}
private boolean isOnValidRow() {
return result.getRowId() >= 0 && !result.isAfterLast();
}
private void checkOnValidRow() {
if (!isOnValidRow()) {
throw DbException.get(ErrorCode.NO_DATA_AVAILABLE);
}
}
/**
* INTERNAL
*
* @param columnIndex
* index of a column
* @return internal representation of the value in the specified column
*/
public Value get(int columnIndex) {
checkColumnIndex(columnIndex);
checkOnValidRow();
Value[] list;
if (patchedRows == null) {
list = result.currentRow();
} else {
list = patchedRows.get(result.getRowId());
if (list == null) {
list = result.currentRow();
}
}
Value value = list[columnIndex - 1];
wasNull = value == ValueNull.INSTANCE;
return value;
}
private Value get(String columnLabel) {
int columnIndex = getColumnIndex(columnLabel);
return get(columnIndex);
}
private void update(String columnLabel, Value v) {
int columnIndex = getColumnIndex(columnLabel);
update(columnIndex, v);
}
private void update(int columnIndex, Value v) {
checkUpdatable();
checkColumnIndex(columnIndex);
if (insertRow != null) {
insertRow[columnIndex - 1] = v;
} else {
if (updateRow == null) {
updateRow = new Value[columnCount];
}
updateRow[columnIndex - 1] = v;
}
}
private boolean nextRow() {
if (result.isLazy() && stat.isCancelled()) {
throw DbException.get(ErrorCode.STATEMENT_WAS_CANCELED);
}
boolean next = result.next();
if (!next && !scrollable) {
result.close();
}
return next;
}
private void resetResult() {
if (!scrollable) {
throw DbException.get(ErrorCode.RESULT_SET_NOT_SCROLLABLE);
}
result.reset();
}
/**
* [Not supported] Returns the value of the specified column as a row id.
*
* @param columnIndex (1,2,...)
*/
@Override
public RowId getRowId(int columnIndex) throws SQLException {
throw unsupported("rowId");
}
/**
* [Not supported] Returns the value of the specified column as a row id.
*
* @param columnLabel the column label
*/
@Override
public RowId getRowId(String columnLabel) throws SQLException {
throw unsupported("rowId");
}
/**
* [Not supported] Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
*/
@Override
public void updateRowId(int columnIndex, RowId x) throws SQLException {
throw unsupported("rowId");
}
/**
* [Not supported] Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
*/
@Override
public void updateRowId(String columnLabel, RowId x) throws SQLException {
throw unsupported("rowId");
}
/**
* Returns the current result set holdability.
*
* @return the holdability
* @throws SQLException if the connection is closed
*/
@Override
public int getHoldability() throws SQLException {
try {
debugCodeCall("getHoldability");
checkClosed();
return conn.getHoldability();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns whether this result set is closed.
*
* @return true if the result set is closed
*/
@Override
public boolean isClosed() throws SQLException {
try {
debugCodeCall("isClosed");
return result == null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNString(int columnIndex, String x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateNString("+columnIndex+", "+quote(x)+");");
}
update(columnIndex, x == null ? (Value)
ValueNull.INSTANCE : ValueString.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNString(String columnLabel, String x) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateNString("+quote(columnLabel)+", "+quote(x)+");");
}
update(columnLabel, x == null ? (Value) ValueNull.INSTANCE :
ValueString.get(x));
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
*/
@Override
public void updateNClob(int columnIndex, NClob x) throws SQLException {
throw unsupported("NClob");
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNClob(int columnIndex, Reader x) throws SQLException {
updateClob(columnIndex, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the length
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNClob(int columnIndex, Reader x, long length)
throws SQLException {
updateClob(columnIndex, x, length);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNClob(String columnLabel, Reader x) throws SQLException {
updateClob(columnLabel, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the length
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNClob(String columnLabel, Reader x, long length)
throws SQLException {
updateClob(columnLabel, x, length);
}
/**
* [Not supported]
*/
@Override
public void updateNClob(String columnLabel, NClob x) throws SQLException {
throw unsupported("NClob");
}
/**
* Returns the value of the specified column as a Clob.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public NClob getNClob(int columnIndex) throws SQLException {
try {
int id = getNextId(TraceObject.CLOB);
if (isDebugEnabled()) {
debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + columnIndex + ")");
}
Value v = get(columnIndex);
return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a Clob.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public NClob getNClob(String columnLabel) throws SQLException {
try {
int id = getNextId(TraceObject.CLOB);
if (isDebugEnabled()) {
debugCodeAssign("NClob", TraceObject.CLOB, id, "getNClob(" + columnLabel + ")");
}
Value v = get(columnLabel);
return v == ValueNull.INSTANCE ? null : new JdbcClob(conn, v, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported] Returns the value of the specified column as a SQLXML
* object.
*/
@Override
public SQLXML getSQLXML(int columnIndex) throws SQLException {
throw unsupported("SQLXML");
}
/**
* [Not supported] Returns the value of the specified column as a SQLXML
* object.
*/
@Override
public SQLXML getSQLXML(String columnLabel) throws SQLException {
throw unsupported("SQLXML");
}
/**
* [Not supported] Updates a column in the current or insert row.
*/
@Override
public void updateSQLXML(int columnIndex, SQLXML xmlObject)
throws SQLException {
throw unsupported("SQLXML");
}
/**
* [Not supported] Updates a column in the current or insert row.
*/
@Override
public void updateSQLXML(String columnLabel, SQLXML xmlObject)
throws SQLException {
throw unsupported("SQLXML");
}
/**
* Returns the value of the specified column as a String.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public String getNString(int columnIndex) throws SQLException {
try {
debugCodeCall("getNString", columnIndex);
return get(columnIndex).getString();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a String.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public String getNString(String columnLabel) throws SQLException {
try {
debugCodeCall("getNString", columnLabel);
return get(columnLabel).getString();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a reader.
*
* @param columnIndex (1,2,...)
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Reader getNCharacterStream(int columnIndex) throws SQLException {
try {
debugCodeCall("getNCharacterStream", columnIndex);
return get(columnIndex).getReader();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the value of the specified column as a reader.
*
* @param columnLabel the column label
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public Reader getNCharacterStream(String columnLabel) throws SQLException {
try {
debugCodeCall("getNCharacterStream", columnLabel);
return get(columnLabel).getReader();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNCharacterStream(int columnIndex, Reader x)
throws SQLException {
updateNCharacterStream(columnIndex, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnIndex (1,2,...)
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNCharacterStream(int columnIndex, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateNCharacterStream("+columnIndex+", x, "+length+"L);");
}
checkClosed();
Value v = conn.createClob(x, length);
update(columnIndex, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNCharacterStream(String columnLabel, Reader x)
throws SQLException {
updateNCharacterStream(columnLabel, x, -1);
}
/**
* Updates a column in the current or insert row.
*
* @param columnLabel the column label
* @param x the value
* @param length the number of characters
* @throws SQLException if the result set is closed or not updatable
*/
@Override
public void updateNCharacterStream(String columnLabel, Reader x, long length)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("updateNCharacterStream("+quote(columnLabel)+", x, "+length+"L);");
}
checkClosed();
Value v = conn.createClob(x, length);
update(columnLabel, v);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Return an object of this class if possible.
*
* @param iface the class
* @return this
*/
@Override
@SuppressWarnings("unchecked")
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
if (isWrapperFor(iface)) {
return (T) this;
}
throw DbException.getInvalidValueException("iface", iface);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if unwrap can return an object of this class.
*
* @param iface the class
* @return whether or not the interface is assignable from this class
*/
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface != null && iface.isAssignableFrom(getClass());
}
/**
* Returns a column value as a Java object. The data is
* de-serialized into a Java object (on the client side).
*
* @param columnIndex the column index (1, 2, ...)
* @param type the class of the returned value
* @return the value
* @throws SQLException if the column is not found or if the result set is
* closed
*/
@Override
public <T> T getObject(int columnIndex, Class<T> type) throws SQLException {
try {
if (type == null) {
throw DbException.getInvalidValueException("type", type);
}
debugCodeCall("getObject", columnIndex);
Value value = get(columnIndex);
return extractObjectOfType(type, value);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns a column value as a Java object. The data is
* de-serialized into a Java object (on the client side).
*
* @param columnName the column name
* @param type the class of the returned value
* @return the value
*/
@Override
public <T> T getObject(String columnName, Class<T> type) throws SQLException {
try {
if (type == null) {
throw DbException.getInvalidValueException("type", type);
}
debugCodeCall("getObject", columnName);
Value value = get(columnName);
return extractObjectOfType(type, value);
} catch (Exception e) {
throw logAndConvert(e);
}
}
private <T> T extractObjectOfType(Class<T> type, Value value) throws SQLException {
if (value == ValueNull.INSTANCE) {
return null;
}
if (type == BigDecimal.class) {
return type.cast(value.getBigDecimal());
} else if (type == BigInteger.class) {
return type.cast(value.getBigDecimal().toBigInteger());
} else if (type == String.class) {
return type.cast(value.getString());
} else if (type == Boolean.class) {
return type.cast(value.getBoolean());
} else if (type == Byte.class) {
return type.cast(value.getByte());
} else if (type == Short.class) {
return type.cast(value.getShort());
} else if (type == Integer.class) {
return type.cast(value.getInt());
} else if (type == Long.class) {
return type.cast(value.getLong());
} else if (type == Float.class) {
return type.cast(value.getFloat());
} else if (type == Double.class) {
return type.cast(value.getDouble());
} else if (type == Date.class) {
return type.cast(value.getDate());
} else if (type == Time.class) {
return type.cast(value.getTime());
} else if (type == Timestamp.class) {
return type.cast(value.getTimestamp());
} else if (type == java.util.Date.class) {
return type.cast(new java.util.Date(value.getTimestamp().getTime()));
} else if (type == Calendar.class) {
Calendar calendar = DateTimeUtils.createGregorianCalendar();
calendar.setTime(value.getTimestamp());
return type.cast(calendar);
} else if (type == UUID.class) {
return type.cast(value.getObject());
} else if (type == byte[].class) {
return type.cast(value.getBytes());
} else if (type == java.sql.Array.class) {
int id = getNextId(TraceObject.ARRAY);
return type.cast(value == ValueNull.INSTANCE ? null : new JdbcArray(conn, value, id));
} else if (type == Blob.class) {
int id = getNextId(TraceObject.BLOB);
return type.cast(value == ValueNull.INSTANCE ? null : new JdbcBlob(conn, value, id));
} else if (type == Clob.class) {
int id = getNextId(TraceObject.CLOB);
return type.cast(value == ValueNull.INSTANCE ? null : new JdbcClob(conn, value, id));
} else if (type == TimestampWithTimeZone.class) {
return type.cast(value.getObject());
} else if (DataType.isGeometryClass(type)) {
return type.cast(value.getObject());
} else if (type == LocalDateTimeUtils.LOCAL_DATE) {
return type.cast(LocalDateTimeUtils.valueToLocalDate(value));
} else if (type == LocalDateTimeUtils.LOCAL_TIME) {
return type.cast(LocalDateTimeUtils.valueToLocalTime(value));
} else if (type == LocalDateTimeUtils.LOCAL_DATE_TIME) {
return type.cast(LocalDateTimeUtils.valueToLocalDateTime(value));
} else if (type == LocalDateTimeUtils.INSTANT) {
return type.cast(LocalDateTimeUtils.valueToInstant(value));
} else if (type == LocalDateTimeUtils.OFFSET_DATE_TIME) {
return type.cast(LocalDateTimeUtils.valueToOffsetDateTime(value));
} else {
throw unsupported(type.getName());
}
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": " + result;
}
private void patchCurrentRow(Value[] row) {
boolean changed = false;
Value[] current = result.currentRow();
CompareMode mode = conn.getCompareMode();
for (int i = 0; i < row.length; i++) {
if (row[i].compareTo(current[i], mode) != 0) {
changed = true;
break;
}
}
if (patchedRows == null) {
patchedRows = new HashMap<>();
}
Integer rowId = result.getRowId();
if (!changed) {
patchedRows.remove(rowId);
} else {
patchedRows.put(rowId, row);
}
}
private Value convertToUnknownValue(Object x) {
checkClosed();
return DataType.convertToValue(conn.getSession(), x, Value.UNKNOWN);
}
private void checkUpdatable() {
checkClosed();
if (!updatable) {
throw DbException.get(ErrorCode.RESULT_SET_READONLY);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcResultSetBackwardsCompat.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
/**
* Allows us to compile on older platforms, while still implementing the methods
* from the newer JDBC API.
*/
public interface JdbcResultSetBackwardsCompat {
// compatibility interface
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcResultSetMetaData.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.message.TraceObject;
import org.h2.result.ResultInterface;
import org.h2.util.MathUtils;
import org.h2.value.DataType;
/**
* Represents the meta data for a ResultSet.
*/
public class JdbcResultSetMetaData extends TraceObject implements
ResultSetMetaData {
private final String catalog;
private final JdbcResultSet rs;
private final JdbcPreparedStatement prep;
private final ResultInterface result;
private final int columnCount;
JdbcResultSetMetaData(JdbcResultSet rs, JdbcPreparedStatement prep,
ResultInterface result, String catalog, Trace trace, int id) {
setTrace(trace, TraceObject.RESULT_SET_META_DATA, id);
this.catalog = catalog;
this.rs = rs;
this.prep = prep;
this.result = result;
this.columnCount = result.getVisibleColumnCount();
}
/**
* Returns the number of columns.
*
* @return the number of columns
* @throws SQLException if the result set is closed or invalid
*/
@Override
public int getColumnCount() throws SQLException {
try {
debugCodeCall("getColumnCount");
checkClosed();
return columnCount;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the column label.
*
* @param column the column index (1,2,...)
* @return the column label
* @throws SQLException if the result set is closed or invalid
*/
@Override
public String getColumnLabel(int column) throws SQLException {
try {
debugCodeCall("getColumnLabel", column);
checkColumnIndex(column);
return result.getAlias(--column);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the column name.
*
* @param column the column index (1,2,...)
* @return the column name
* @throws SQLException if the result set is closed or invalid
*/
@Override
public String getColumnName(int column) throws SQLException {
try {
debugCodeCall("getColumnName", column);
checkColumnIndex(column);
return result.getColumnName(--column);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the data type of a column.
* See also java.sql.Type.
*
* @param column the column index (1,2,...)
* @return the data type
* @throws SQLException if the result set is closed or invalid
*/
@Override
public int getColumnType(int column) throws SQLException {
try {
debugCodeCall("getColumnType", column);
checkColumnIndex(column);
int type = result.getColumnType(--column);
return DataType.convertTypeToSQLType(type);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the data type name of a column.
*
* @param column the column index (1,2,...)
* @return the data type name
* @throws SQLException if the result set is closed or invalid
*/
@Override
public String getColumnTypeName(int column) throws SQLException {
try {
debugCodeCall("getColumnTypeName", column);
checkColumnIndex(column);
int type = result.getColumnType(--column);
return DataType.getDataType(type).name;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the schema name.
*
* @param column the column index (1,2,...)
* @return the schema name, or "" (an empty string) if not applicable
* @throws SQLException if the result set is closed or invalid
*/
@Override
public String getSchemaName(int column) throws SQLException {
try {
debugCodeCall("getSchemaName", column);
checkColumnIndex(column);
String schema = result.getSchemaName(--column);
return schema == null ? "" : schema;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the table name.
*
* @param column the column index (1,2,...)
* @return the table name
* @throws SQLException if the result set is closed or invalid
*/
@Override
public String getTableName(int column) throws SQLException {
try {
debugCodeCall("getTableName", column);
checkColumnIndex(column);
String table = result.getTableName(--column);
return table == null ? "" : table;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the catalog name.
*
* @param column the column index (1,2,...)
* @return the catalog name
* @throws SQLException if the result set is closed or invalid
*/
@Override
public String getCatalogName(int column) throws SQLException {
try {
debugCodeCall("getCatalogName", column);
checkColumnIndex(column);
return catalog == null ? "" : catalog;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this an autoincrement column.
*
* @param column the column index (1,2,...)
* @return false
* @throws SQLException if the result set is closed or invalid
*/
@Override
public boolean isAutoIncrement(int column) throws SQLException {
try {
debugCodeCall("isAutoIncrement", column);
checkColumnIndex(column);
return result.isAutoIncrement(--column);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this column is case sensitive.
* It always returns true.
*
* @param column the column index (1,2,...)
* @return true
* @throws SQLException if the result set is closed or invalid
*/
@Override
public boolean isCaseSensitive(int column) throws SQLException {
try {
debugCodeCall("isCaseSensitive", column);
checkColumnIndex(column);
return true;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this column is searchable.
* It always returns true.
*
* @param column the column index (1,2,...)
* @return true
* @throws SQLException if the result set is closed or invalid
*/
@Override
public boolean isSearchable(int column) throws SQLException {
try {
debugCodeCall("isSearchable", column);
checkColumnIndex(column);
return true;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this is a currency column.
* It always returns false.
*
* @param column the column index (1,2,...)
* @return false
* @throws SQLException if the result set is closed or invalid
*/
@Override
public boolean isCurrency(int column) throws SQLException {
try {
debugCodeCall("isCurrency", column);
checkColumnIndex(column);
return false;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this is nullable column. Returns
* ResultSetMetaData.columnNullableUnknown if this is not a column of a
* table. Otherwise, it returns ResultSetMetaData.columnNoNulls if the
* column is not nullable, and ResultSetMetaData.columnNullable if it is
* nullable.
*
* @param column the column index (1,2,...)
* @return ResultSetMetaData.column*
* @throws SQLException if the result set is closed or invalid
*/
@Override
public int isNullable(int column) throws SQLException {
try {
debugCodeCall("isNullable", column);
checkColumnIndex(column);
return result.getNullable(--column);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this column is signed.
* It always returns true.
*
* @param column the column index (1,2,...)
* @return true
* @throws SQLException if the result set is closed or invalid
*/
@Override
public boolean isSigned(int column) throws SQLException {
try {
debugCodeCall("isSigned", column);
checkColumnIndex(column);
return true;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if this column is read only.
* It always returns false.
*
* @param column the column index (1,2,...)
* @return false
* @throws SQLException if the result set is closed or invalid
*/
@Override
public boolean isReadOnly(int column) throws SQLException {
try {
debugCodeCall("isReadOnly", column);
checkColumnIndex(column);
return false;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks whether it is possible for a write on this column to succeed.
* It always returns true.
*
* @param column the column index (1,2,...)
* @return true
* @throws SQLException if the result set is closed or invalid
*/
@Override
public boolean isWritable(int column) throws SQLException {
try {
debugCodeCall("isWritable", column);
checkColumnIndex(column);
return true;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks whether a write on this column will definitely succeed.
* It always returns false.
*
* @param column the column index (1,2,...)
* @return false
* @throws SQLException if the result set is closed or invalid
*/
@Override
public boolean isDefinitelyWritable(int column) throws SQLException {
try {
debugCodeCall("isDefinitelyWritable", column);
checkColumnIndex(column);
return false;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the Java class name of the object that will be returned
* if ResultSet.getObject is called.
*
* @param column the column index (1,2,...)
* @return the Java class name
* @throws SQLException if the result set is closed or invalid
*/
@Override
public String getColumnClassName(int column) throws SQLException {
try {
debugCodeCall("getColumnClassName", column);
checkColumnIndex(column);
int type = result.getColumnType(--column);
return DataType.getTypeClassName(type);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the precision for this column.
*
* @param column the column index (1,2,...)
* @return the precision
* @throws SQLException if the result set is closed or invalid
*/
@Override
public int getPrecision(int column) throws SQLException {
try {
debugCodeCall("getPrecision", column);
checkColumnIndex(column);
long prec = result.getColumnPrecision(--column);
return MathUtils.convertLongToInt(prec);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the scale for this column.
*
* @param column the column index (1,2,...)
* @return the scale
* @throws SQLException if the result set is closed or invalid
*/
@Override
public int getScale(int column) throws SQLException {
try {
debugCodeCall("getScale", column);
checkColumnIndex(column);
return result.getColumnScale(--column);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the maximum display size for this column.
*
* @param column the column index (1,2,...)
* @return the display size
* @throws SQLException if the result set is closed or invalid
*/
@Override
public int getColumnDisplaySize(int column) throws SQLException {
try {
debugCodeCall("getColumnDisplaySize", column);
checkColumnIndex(column);
return result.getDisplaySize(--column);
} catch (Exception e) {
throw logAndConvert(e);
}
}
private void checkClosed() {
if (rs != null) {
rs.checkClosed();
}
if (prep != null) {
prep.checkClosed();
}
}
private void checkColumnIndex(int columnIndex) {
checkClosed();
if (columnIndex < 1 || columnIndex > columnCount) {
throw DbException.getInvalidValueException("columnIndex", columnIndex);
}
}
/**
* Return an object of this class if possible.
*
* @param iface the class
* @return this
*/
@Override
@SuppressWarnings("unchecked")
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
if (isWrapperFor(iface)) {
return (T) this;
}
throw DbException.getInvalidValueException("iface", iface);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if unwrap can return an object of this class.
*
* @param iface the class
* @return whether or not the interface is assignable from this class
*/
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface != null && iface.isAssignableFrom(getClass());
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": columns=" + columnCount;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcSQLException.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.sql.SQLException;
import org.h2.engine.Constants;
/**
* Represents a database exception.
*/
public class JdbcSQLException extends SQLException {
/**
* If the SQL statement contains this text, then it is never added to the
* SQL exception. Hiding the SQL statement may be important if it contains a
* passwords, such as a CREATE LINKED TABLE statement.
*/
public static final String HIDE_SQL = "--hide--";
private static final long serialVersionUID = 1L;
private final String originalMessage;
private final Throwable cause;
private final String stackTrace;
private String message;
private String sql;
/**
* Creates a SQLException.
*
* @param message the reason
* @param sql the SQL statement
* @param state the SQL state
* @param errorCode the error code
* @param cause the exception that was the reason for this exception
* @param stackTrace the stack trace
*/
public JdbcSQLException(String message, String sql, String state,
int errorCode, Throwable cause, String stackTrace) {
super(message, state, errorCode);
this.originalMessage = message;
setSQL(sql);
this.cause = cause;
this.stackTrace = stackTrace;
buildMessage();
initCause(cause);
}
/**
* Get the detail error message.
*
* @return the message
*/
@Override
public String getMessage() {
return message;
}
/**
* INTERNAL
*/
public String getOriginalMessage() {
return originalMessage;
}
/**
* Prints the stack trace to the standard error stream.
*/
@Override
public void printStackTrace() {
// The default implementation already does that,
// but we do it again to avoid problems.
// If it is not implemented, somebody might implement it
// later on which would be a problem if done in the wrong way.
printStackTrace(System.err);
}
/**
* Prints the stack trace to the specified print writer.
*
* @param s the print writer
*/
@Override
public void printStackTrace(PrintWriter s) {
if (s != null) {
super.printStackTrace(s);
// getNextException().printStackTrace(s) would be very very slow
// if many exceptions are joined
SQLException next = getNextException();
for (int i = 0; i < 100 && next != null; i++) {
s.println(next.toString());
next = next.getNextException();
}
if (next != null) {
s.println("(truncated)");
}
}
}
/**
* Prints the stack trace to the specified print stream.
*
* @param s the print stream
*/
@Override
public void printStackTrace(PrintStream s) {
if (s != null) {
super.printStackTrace(s);
// getNextException().printStackTrace(s) would be very very slow
// if many exceptions are joined
SQLException next = getNextException();
for (int i = 0; i < 100 && next != null; i++) {
s.println(next.toString());
next = next.getNextException();
}
if (next != null) {
s.println("(truncated)");
}
}
}
/**
* INTERNAL
*/
public Throwable getOriginalCause() {
return cause;
}
/**
* Returns the SQL statement.
* SQL statements that contain '--hide--' are not listed.
*
* @return the SQL statement
*/
public String getSQL() {
return sql;
}
/**
* INTERNAL
*/
public void setSQL(String sql) {
if (sql != null && sql.contains(HIDE_SQL)) {
sql = "-";
}
this.sql = sql;
buildMessage();
}
private void buildMessage() {
StringBuilder buff = new StringBuilder(originalMessage == null ?
"- " : originalMessage);
if (sql != null) {
buff.append("; SQL statement:\n").append(sql);
}
buff.append(" [").append(getErrorCode()).
append('-').append(Constants.BUILD_ID).append(']');
message = buff.toString();
}
/**
* Returns the class name, the message, and in the server mode, the stack
* trace of the server
*
* @return the string representation
*/
@Override
public String toString() {
if (stackTrace == null) {
return super.toString();
}
return stackTrace;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcSavepoint.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.sql.SQLException;
import java.sql.Savepoint;
import org.h2.api.ErrorCode;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.message.TraceObject;
import org.h2.util.StringUtils;
/**
* A savepoint is a point inside a transaction to where a transaction can be
* rolled back. The tasks that where done before the savepoint are not rolled
* back in this case.
*/
public class JdbcSavepoint extends TraceObject implements Savepoint {
private static final String SYSTEM_SAVEPOINT_PREFIX = "SYSTEM_SAVEPOINT_";
private final int savepointId;
private final String name;
private JdbcConnection conn;
JdbcSavepoint(JdbcConnection conn, int savepointId, String name,
Trace trace, int id) {
setTrace(trace, TraceObject.SAVEPOINT, id);
this.conn = conn;
this.savepointId = savepointId;
this.name = name;
}
/**
* Release this savepoint. This method only set the connection to null and
* does not execute a statement.
*/
void release() {
this.conn = null;
}
/**
* Get the savepoint name for this name or id.
* If the name is null, the id is used.
*
* @param name the name (may be null)
* @param id the id
* @return the savepoint name
*/
static String getName(String name, int id) {
if (name != null) {
return StringUtils.quoteJavaString(name);
}
return SYSTEM_SAVEPOINT_PREFIX + id;
}
/**
* Roll back to this savepoint.
*/
void rollback() {
checkValid();
conn.prepareCommand(
"ROLLBACK TO SAVEPOINT " + getName(name, savepointId),
Integer.MAX_VALUE).executeUpdate(false);
}
private void checkValid() {
if (conn == null) {
throw DbException.get(ErrorCode.SAVEPOINT_IS_INVALID_1,
getName(name, savepointId));
}
}
/**
* Get the generated id of this savepoint.
* @return the id
*/
@Override
public int getSavepointId() throws SQLException {
try {
debugCodeCall("getSavepointId");
checkValid();
if (name != null) {
throw DbException.get(ErrorCode.SAVEPOINT_IS_NAMED);
}
return savepointId;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Get the name of this savepoint.
* @return the name
*/
@Override
public String getSavepointName() throws SQLException {
try {
debugCodeCall("getSavepointName");
checkValid();
if (name == null) {
throw DbException.get(ErrorCode.SAVEPOINT_IS_UNNAMED);
}
return name;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": id=" + savepointId + " name=" + name;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcStatement.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.Statement;
import java.util.ArrayList;
import org.h2.api.ErrorCode;
import org.h2.command.CommandInterface;
import org.h2.engine.SessionInterface;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.result.ResultInterface;
import org.h2.result.ResultWithGeneratedKeys;
import org.h2.tools.SimpleResultSet;
import org.h2.util.New;
import org.h2.util.ParserUtil;
import org.h2.util.StringUtils;
/**
* Represents a statement.
*/
public class JdbcStatement extends TraceObject implements Statement, JdbcStatementBackwardsCompat {
protected JdbcConnection conn;
protected SessionInterface session;
protected JdbcResultSet resultSet;
protected int maxRows;
protected int fetchSize = SysProperties.SERVER_RESULT_SET_FETCH_SIZE;
protected int updateCount;
protected JdbcResultSet generatedKeys;
protected final int resultSetType;
protected final int resultSetConcurrency;
protected final boolean closedByResultSet;
private volatile CommandInterface executingCommand;
private int lastExecutedCommandType;
private ArrayList<String> batchCommands;
private boolean escapeProcessing = true;
private volatile boolean cancelled;
JdbcStatement(JdbcConnection conn, int id, int resultSetType,
int resultSetConcurrency, boolean closeWithResultSet) {
this.conn = conn;
this.session = conn.getSession();
setTrace(session.getTrace(), TraceObject.STATEMENT, id);
this.resultSetType = resultSetType;
this.resultSetConcurrency = resultSetConcurrency;
this.closedByResultSet = closeWithResultSet;
}
/**
* Executes a query (select statement) and returns the result set.
* If another result set exists for this statement, this will be closed
* (even if this statement fails).
*
* @param sql the SQL statement to execute
* @return the result set
*/
@Override
public ResultSet executeQuery(String sql) throws SQLException {
try {
int id = getNextId(TraceObject.RESULT_SET);
if (isDebugEnabled()) {
debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id,
"executeQuery(" + quote(sql) + ")");
}
synchronized (session) {
checkClosed();
closeOldResultSet();
sql = JdbcConnection.translateSQL(sql, escapeProcessing);
CommandInterface command = conn.prepareCommand(sql, fetchSize);
ResultInterface result;
boolean lazy = false;
boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY;
boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE;
setExecutingStatement(command);
try {
result = command.executeQuery(maxRows, scrollable);
lazy = result.isLazy();
} finally {
if (!lazy) {
setExecutingStatement(null);
}
}
if (!lazy) {
command.close();
}
resultSet = new JdbcResultSet(conn, this, command, result, id,
closedByResultSet, scrollable, updatable);
}
return resultSet;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement (insert, update, delete, create, drop)
* and returns the update count.
* If another result set exists for this statement, this will be closed
* (even if this statement fails).
*
* If auto commit is on, this statement will be committed.
* If the statement is a DDL statement (create, drop, alter) and does not
* throw an exception, the current transaction (if any) is committed after
* executing the statement.
*
* @param sql the SQL statement
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public int executeUpdate(String sql) throws SQLException {
try {
debugCodeCall("executeUpdate", sql);
return executeUpdateInternal(sql, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement (insert, update, delete, create, drop)
* and returns the update count.
* If another result set exists for this statement, this will be closed
* (even if this statement fails).
*
* If auto commit is on, this statement will be committed.
* If the statement is a DDL statement (create, drop, alter) and does not
* throw an exception, the current transaction (if any) is committed after
* executing the statement.
*
* @param sql the SQL statement
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public long executeLargeUpdate(String sql) throws SQLException {
try {
debugCodeCall("executeLargeUpdate", sql);
return executeUpdateInternal(sql, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
private int executeUpdateInternal(String sql, Object generatedKeysRequest) throws SQLException {
checkClosedForWrite();
try {
closeOldResultSet();
sql = JdbcConnection.translateSQL(sql, escapeProcessing);
CommandInterface command = conn.prepareCommand(sql, fetchSize);
synchronized (session) {
setExecutingStatement(command);
try {
ResultWithGeneratedKeys result = command.executeUpdate(
conn.scopeGeneratedKeys() ? false : generatedKeysRequest);
updateCount = result.getUpdateCount();
ResultInterface gk = result.getGeneratedKeys();
if (gk != null) {
int id = getNextId(TraceObject.RESULT_SET);
generatedKeys = new JdbcResultSet(conn, this, command, gk, id,
false, true, false);
}
} finally {
setExecutingStatement(null);
}
}
command.close();
return updateCount;
} finally {
afterWriting();
}
}
/**
* Executes an arbitrary statement. If another result set exists for this
* statement, this will be closed (even if this statement fails).
*
* If the statement is a create or drop and does not throw an exception, the
* current transaction (if any) is committed after executing the statement.
* If auto commit is on, and the statement is not a select, this statement
* will be committed.
*
* @param sql the SQL statement to execute
* @return true if a result set is available, false if not
*/
@Override
public boolean execute(String sql) throws SQLException {
try {
debugCodeCall("execute", sql);
return executeInternal(sql, false);
} catch (Exception e) {
throw logAndConvert(e);
}
}
private boolean executeInternal(String sql, Object generatedKeysRequest) throws SQLException {
int id = getNextId(TraceObject.RESULT_SET);
checkClosedForWrite();
try {
closeOldResultSet();
sql = JdbcConnection.translateSQL(sql, escapeProcessing);
CommandInterface command = conn.prepareCommand(sql, fetchSize);
boolean lazy = false;
boolean returnsResultSet;
synchronized (session) {
setExecutingStatement(command);
try {
if (command.isQuery()) {
returnsResultSet = true;
boolean scrollable = resultSetType != ResultSet.TYPE_FORWARD_ONLY;
boolean updatable = resultSetConcurrency == ResultSet.CONCUR_UPDATABLE;
ResultInterface result = command.executeQuery(maxRows, scrollable);
lazy = result.isLazy();
resultSet = new JdbcResultSet(conn, this, command, result, id,
closedByResultSet, scrollable, updatable);
} else {
returnsResultSet = false;
ResultWithGeneratedKeys result = command.executeUpdate(
conn.scopeGeneratedKeys() ? false : generatedKeysRequest);
updateCount = result.getUpdateCount();
ResultInterface gk = result.getGeneratedKeys();
if (gk != null) {
generatedKeys = new JdbcResultSet(conn, this, command, gk, id,
false, true, false);
}
}
} finally {
if (!lazy) {
setExecutingStatement(null);
}
}
}
if (!lazy) {
command.close();
}
return returnsResultSet;
} finally {
afterWriting();
}
}
/**
* Returns the last result set produces by this statement.
*
* @return the result set
*/
@Override
public ResultSet getResultSet() throws SQLException {
try {
checkClosed();
if (resultSet != null) {
int id = resultSet.getTraceId();
debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "getResultSet()");
} else {
debugCodeCall("getResultSet");
}
return resultSet;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the last update count of this statement.
*
* @return the update count (number of row affected by an insert, update or
* delete, or 0 if no rows or the statement was a create, drop,
* commit or rollback; -1 if the statement was a select).
* @throws SQLException if this object is closed or invalid
*/
@Override
public int getUpdateCount() throws SQLException {
try {
debugCodeCall("getUpdateCount");
checkClosed();
return updateCount;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the last update count of this statement.
*
* @return the update count (number of row affected by an insert, update or
* delete, or 0 if no rows or the statement was a create, drop,
* commit or rollback; -1 if the statement was a select).
* @throws SQLException if this object is closed or invalid
*/
@Override
public long getLargeUpdateCount() throws SQLException {
try {
debugCodeCall("getLargeUpdateCount");
checkClosed();
return updateCount;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Closes this statement.
* All result sets that where created by this statement
* become invalid after calling this method.
*/
@Override
public void close() throws SQLException {
try {
debugCodeCall("close");
synchronized (session) {
closeOldResultSet();
if (conn != null) {
conn = null;
}
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Returns the connection that created this object.
*
* @return the connection
*/
@Override
public Connection getConnection() {
debugCodeCall("getConnection");
return conn;
}
/**
* Gets the first warning reported by calls on this object.
* This driver does not support warnings, and will always return null.
*
* @return null
*/
@Override
public SQLWarning getWarnings() throws SQLException {
try {
debugCodeCall("getWarnings");
checkClosed();
return null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Clears all warnings. As this driver does not support warnings,
* this call is ignored.
*/
@Override
public void clearWarnings() throws SQLException {
try {
debugCodeCall("clearWarnings");
checkClosed();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the name of the cursor. This call is ignored.
*
* @param name ignored
* @throws SQLException if this object is closed
*/
@Override
public void setCursorName(String name) throws SQLException {
try {
debugCodeCall("setCursorName", name);
checkClosed();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the fetch direction.
* This call is ignored by this driver.
*
* @param direction ignored
* @throws SQLException if this object is closed
*/
@Override
public void setFetchDirection(int direction) throws SQLException {
try {
debugCodeCall("setFetchDirection", direction);
checkClosed();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the fetch direction.
*
* @return FETCH_FORWARD
* @throws SQLException if this object is closed
*/
@Override
public int getFetchDirection() throws SQLException {
try {
debugCodeCall("getFetchDirection");
checkClosed();
return ResultSet.FETCH_FORWARD;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the maximum number of rows for a ResultSet.
*
* @return the number of rows where 0 means no limit
* @throws SQLException if this object is closed
*/
@Override
public int getMaxRows() throws SQLException {
try {
debugCodeCall("getMaxRows");
checkClosed();
return maxRows;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the maximum number of rows for a ResultSet.
*
* @return the number of rows where 0 means no limit
* @throws SQLException if this object is closed
*/
@Override
public long getLargeMaxRows() throws SQLException {
try {
debugCodeCall("getLargeMaxRows");
checkClosed();
return maxRows;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the maximum number of rows for a ResultSet.
*
* @param maxRows the number of rows where 0 means no limit
* @throws SQLException if this object is closed
*/
@Override
public void setMaxRows(int maxRows) throws SQLException {
try {
debugCodeCall("setMaxRows", maxRows);
checkClosed();
if (maxRows < 0) {
throw DbException.getInvalidValueException("maxRows", maxRows);
}
this.maxRows = maxRows;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the maximum number of rows for a ResultSet.
*
* @param maxRows the number of rows where 0 means no limit
* @throws SQLException if this object is closed
*/
@Override
public void setLargeMaxRows(long maxRows) throws SQLException {
try {
debugCodeCall("setLargeMaxRows", maxRows);
checkClosed();
if (maxRows < 0) {
throw DbException.getInvalidValueException("maxRows", maxRows);
}
this.maxRows = maxRows <= Integer.MAX_VALUE ? (int) maxRows : 0;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the number of rows suggested to read in one step.
* This value cannot be higher than the maximum rows (setMaxRows)
* set by the statement or prepared statement, otherwise an exception
* is throws. Setting the value to 0 will set the default value.
* The default value can be changed using the system property
* h2.serverResultSetFetchSize.
*
* @param rows the number of rows
* @throws SQLException if this object is closed
*/
@Override
public void setFetchSize(int rows) throws SQLException {
try {
debugCodeCall("setFetchSize", rows);
checkClosed();
if (rows < 0 || (rows > 0 && maxRows > 0 && rows > maxRows)) {
throw DbException.getInvalidValueException("rows", rows);
}
if (rows == 0) {
rows = SysProperties.SERVER_RESULT_SET_FETCH_SIZE;
}
fetchSize = rows;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the number of rows suggested to read in one step.
*
* @return the current fetch size
* @throws SQLException if this object is closed
*/
@Override
public int getFetchSize() throws SQLException {
try {
debugCodeCall("getFetchSize");
checkClosed();
return fetchSize;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the result set concurrency created by this object.
*
* @return the concurrency
*/
@Override
public int getResultSetConcurrency() throws SQLException {
try {
debugCodeCall("getResultSetConcurrency");
checkClosed();
return resultSetConcurrency;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the result set type.
*
* @return the type
* @throws SQLException if this object is closed
*/
@Override
public int getResultSetType() throws SQLException {
try {
debugCodeCall("getResultSetType");
checkClosed();
return resultSetType;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the maximum number of bytes for a result set column.
*
* @return always 0 for no limit
* @throws SQLException if this object is closed
*/
@Override
public int getMaxFieldSize() throws SQLException {
try {
debugCodeCall("getMaxFieldSize");
checkClosed();
return 0;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the maximum number of bytes for a result set column.
* This method does currently do nothing for this driver.
*
* @param max the maximum size - ignored
* @throws SQLException if this object is closed
*/
@Override
public void setMaxFieldSize(int max) throws SQLException {
try {
debugCodeCall("setMaxFieldSize", max);
checkClosed();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Enables or disables processing or JDBC escape syntax.
* See also Connection.nativeSQL.
*
* @param enable - true (default) or false (no conversion is attempted)
* @throws SQLException if this object is closed
*/
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("setEscapeProcessing("+enable+");");
}
checkClosed();
escapeProcessing = enable;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Cancels a currently running statement.
* This method must be called from within another
* thread than the execute method.
* Operations on large objects are not interrupted,
* only operations that process many rows.
*
* @throws SQLException if this object is closed
*/
@Override
public void cancel() throws SQLException {
try {
debugCodeCall("cancel");
checkClosed();
// executingCommand can be reset by another thread
CommandInterface c = executingCommand;
try {
if (c != null) {
c.cancel();
cancelled = true;
}
} finally {
setExecutingStatement(null);
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Check whether the statement was cancelled.
*
* @return true if yes
*/
public boolean isCancelled() {
return cancelled;
}
/**
* Gets the current query timeout in seconds.
* This method will return 0 if no query timeout is set.
* The result is rounded to the next second.
* For performance reasons, only the first call to this method
* will query the database. If the query timeout was changed in another
* way than calling setQueryTimeout, this method will always return
* the last value.
*
* @return the timeout in seconds
* @throws SQLException if this object is closed
*/
@Override
public int getQueryTimeout() throws SQLException {
try {
debugCodeCall("getQueryTimeout");
checkClosed();
return conn.getQueryTimeout();
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Sets the current query timeout in seconds.
* Changing the value will affect all statements of this connection.
* This method does not commit a transaction,
* and rolling back a transaction does not affect this setting.
*
* @param seconds the timeout in seconds - 0 means no timeout, values
* smaller 0 will throw an exception
* @throws SQLException if this object is closed
*/
@Override
public void setQueryTimeout(int seconds) throws SQLException {
try {
debugCodeCall("setQueryTimeout", seconds);
checkClosed();
if (seconds < 0) {
throw DbException.getInvalidValueException("seconds", seconds);
}
conn.setQueryTimeout(seconds);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Adds a statement to the batch.
*
* @param sql the SQL statement
*/
@Override
public void addBatch(String sql) throws SQLException {
try {
debugCodeCall("addBatch", sql);
checkClosed();
sql = JdbcConnection.translateSQL(sql, escapeProcessing);
if (batchCommands == null) {
batchCommands = New.arrayList();
}
batchCommands.add(sql);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Clears the batch.
*/
@Override
public void clearBatch() throws SQLException {
try {
debugCodeCall("clearBatch");
checkClosed();
batchCommands = null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes the batch.
* If one of the batched statements fails, this database will continue.
*
* @return the array of update counts
*/
@Override
public int[] executeBatch() throws SQLException {
try {
debugCodeCall("executeBatch");
checkClosedForWrite();
try {
if (batchCommands == null) {
// TODO batch: check what other database do if no commands
// are set
batchCommands = New.arrayList();
}
int size = batchCommands.size();
int[] result = new int[size];
boolean error = false;
SQLException next = null;
for (int i = 0; i < size; i++) {
String sql = batchCommands.get(i);
try {
result[i] = executeUpdateInternal(sql, false);
} catch (Exception re) {
SQLException e = logAndConvert(re);
if (next == null) {
next = e;
} else {
e.setNextException(next);
next = e;
}
result[i] = Statement.EXECUTE_FAILED;
error = true;
}
}
batchCommands = null;
if (error) {
throw new JdbcBatchUpdateException(next, result);
}
return result;
} finally {
afterWriting();
}
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes the batch.
* If one of the batched statements fails, this database will continue.
*
* @return the array of update counts
*/
@Override
public long[] executeLargeBatch() throws SQLException {
int[] intResult = executeBatch();
int count = intResult.length;
long[] longResult = new long[count];
for (int i = 0; i < count; i++) {
longResult[i] = intResult[i];
}
return longResult;
}
/**
* Return a result set that contains the last generated auto-increment key
* for this connection, if there was one. If no key was generated by the
* last modification statement, then an empty result set is returned.
* The returned result set only contains the data for the very last row.
*
* @return the result set with one row and one column containing the key
* @throws SQLException if this object is closed
*/
@Override
public ResultSet getGeneratedKeys() throws SQLException {
try {
int id = getNextId(TraceObject.RESULT_SET);
if (isDebugEnabled()) {
debugCodeAssign("ResultSet", TraceObject.RESULT_SET, id, "getGeneratedKeys()");
}
checkClosed();
if (!conn.scopeGeneratedKeys()) {
if (generatedKeys != null) {
return generatedKeys;
}
if (session.isSupportsGeneratedKeys()) {
return new SimpleResultSet();
}
}
// Compatibility mode or an old server, so use SCOPE_IDENTITY()
return conn.getGeneratedKeys(this, id);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Moves to the next result set - however there is always only one result
* set. This call also closes the current result set (if there is one).
* Returns true if there is a next result set (that means - it always
* returns false).
*
* @return false
* @throws SQLException if this object is closed.
*/
@Override
public boolean getMoreResults() throws SQLException {
try {
debugCodeCall("getMoreResults");
checkClosed();
closeOldResultSet();
return false;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Move to the next result set.
* This method always returns false.
*
* @param current Statement.CLOSE_CURRENT_RESULT,
* Statement.KEEP_CURRENT_RESULT,
* or Statement.CLOSE_ALL_RESULTS
* @return false
*/
@Override
public boolean getMoreResults(int current) throws SQLException {
try {
debugCodeCall("getMoreResults", current);
switch (current) {
case Statement.CLOSE_CURRENT_RESULT:
case Statement.CLOSE_ALL_RESULTS:
checkClosed();
closeOldResultSet();
break;
case Statement.KEEP_CURRENT_RESULT:
// nothing to do
break;
default:
throw DbException.getInvalidValueException("current", current);
}
return false;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement and returns the update count.
*
* @param sql the SQL statement
* @param autoGeneratedKeys
* {@link Statement.RETURN_GENERATED_KEYS} if generated keys should
* be available for retrieval, {@link Statement.NO_GENERATED_KEYS} if
* generated keys should not be available
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public int executeUpdate(String sql, int autoGeneratedKeys)
throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeUpdate("+quote(sql)+", "+autoGeneratedKeys+");");
}
return executeUpdateInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement and returns the update count.
*
* @param sql the SQL statement
* @param autoGeneratedKeys
* {@link Statement.RETURN_GENERATED_KEYS} if generated keys should
* be available for retrieval, {@link Statement.NO_GENERATED_KEYS} if
* generated keys should not be available
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeLargeUpdate("+quote(sql)+", "+autoGeneratedKeys+");");
}
return executeUpdateInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement and returns the update count.
*
* @param sql the SQL statement
* @param columnIndexes
* an array of column indexes indicating the columns with generated
* keys that should be returned from the inserted row
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeUpdate("+quote(sql)+", "+quoteIntArray(columnIndexes)+");");
}
return executeUpdateInternal(sql, columnIndexes);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement and returns the update count.
*
* @param sql the SQL statement
* @param columnIndexes
* an array of column indexes indicating the columns with generated
* keys that should be returned from the inserted row
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public long executeLargeUpdate(String sql, int columnIndexes[]) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeLargeUpdate("+quote(sql)+", "+quoteIntArray(columnIndexes)+");");
}
return executeUpdateInternal(sql, columnIndexes);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement and returns the update count.
*
* @param sql the SQL statement
* @param columnNames
* an array of column names indicating the columns with generated
* keys that should be returned from the inserted row
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeUpdate("+quote(sql)+", "+quoteArray(columnNames)+");");
}
return executeUpdateInternal(sql, columnNames);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement and returns the update count.
*
* @param sql the SQL statement
* @param columnNames
* an array of column names indicating the columns with generated
* keys that should be returned from the inserted row
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public long executeLargeUpdate(String sql, String columnNames[]) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("executeLargeUpdate("+quote(sql)+", "+quoteArray(columnNames)+");");
}
return executeUpdateInternal(sql, columnNames);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement and returns the update count.
*
* @param sql the SQL statement
* @param autoGeneratedKeys
* {@link Statement.RETURN_GENERATED_KEYS} if generated keys should
* be available for retrieval, {@link Statement.NO_GENERATED_KEYS} if
* generated keys should not be available
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("execute("+quote(sql)+", "+autoGeneratedKeys+");");
}
return executeInternal(sql, autoGeneratedKeys == RETURN_GENERATED_KEYS);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement and returns the update count.
*
* @param sql the SQL statement
* @param columnIndexes
* an array of column indexes indicating the columns with generated
* keys that should be returned from the inserted row
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("execute("+quote(sql)+", "+quoteIntArray(columnIndexes)+");");
}
return executeInternal(sql, columnIndexes);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Executes a statement and returns the update count.
*
* @param sql the SQL statement
* @param columnNames
* an array of column names indicating the columns with generated
* keys that should be returned from the inserted row
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
@Override
public boolean execute(String sql, String[] columnNames) throws SQLException {
try {
if (isDebugEnabled()) {
debugCode("execute("+quote(sql)+", "+quoteArray(columnNames)+");");
}
return executeInternal(sql, columnNames);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Gets the result set holdability.
*
* @return the holdability
*/
@Override
public int getResultSetHoldability() throws SQLException {
try {
debugCodeCall("getResultSetHoldability");
checkClosed();
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* [Not supported]
*/
@Override
public void closeOnCompletion() {
// not supported
}
/**
* [Not supported]
*/
@Override
public boolean isCloseOnCompletion() {
return true;
}
// =============================================================
/**
* Check if this connection is closed.
* The next operation is a read request.
*
* @return true if the session was re-connected
* @throws DbException if the connection or session is closed
*/
boolean checkClosed() {
return checkClosed(false);
}
/**
* Check if this connection is closed.
* The next operation may be a write request.
*
* @return true if the session was re-connected
* @throws DbException if the connection or session is closed
*/
boolean checkClosedForWrite() {
return checkClosed(true);
}
/**
* INTERNAL.
* Check if the statement is closed.
*
* @param write if the next operation is possibly writing
* @return true if a reconnect was required
* @throws DbException if it is closed
*/
protected boolean checkClosed(boolean write) {
if (conn == null) {
throw DbException.get(ErrorCode.OBJECT_CLOSED);
}
conn.checkClosed(write);
SessionInterface s = conn.getSession();
if (s != session) {
session = s;
trace = session.getTrace();
return true;
}
return false;
}
/**
* Called after each write operation.
*/
void afterWriting() {
if (conn != null) {
conn.afterWriting();
}
}
/**
* INTERNAL.
* Close and old result set if there is still one open.
*/
protected void closeOldResultSet() throws SQLException {
try {
if (!closedByResultSet) {
if (resultSet != null) {
resultSet.closeInternal();
}
if (generatedKeys != null) {
generatedKeys.closeInternal();
}
}
} finally {
cancelled = false;
resultSet = null;
updateCount = -1;
generatedKeys = null;
}
}
/**
* INTERNAL.
* Set the statement that is currently running.
*
* @param c the command
*/
protected void setExecutingStatement(CommandInterface c) {
if (c == null) {
conn.setExecutingStatement(null);
} else {
conn.setExecutingStatement(this);
lastExecutedCommandType = c.getCommandType();
}
executingCommand = c;
}
/**
* Called when the result set is closed.
*
* @param command the command
* @param closeCommand whether to close the command
*/
void onLazyResultSetClose(CommandInterface command, boolean closeCommand) {
setExecutingStatement(null);
command.stop();
if (closeCommand) {
command.close();
}
}
/**
* INTERNAL.
* Get the command type of the last executed command.
*/
public int getLastExecutedCommandType() {
return lastExecutedCommandType;
}
/**
* Returns whether this statement is closed.
*
* @return true if the statement is closed
*/
@Override
public boolean isClosed() throws SQLException {
try {
debugCodeCall("isClosed");
return conn == null;
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Return an object of this class if possible.
*
* @param iface the class
* @return this
*/
@Override
@SuppressWarnings("unchecked")
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
if (isWrapperFor(iface)) {
return (T) this;
}
throw DbException.getInvalidValueException("iface", iface);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if unwrap can return an object of this class.
*
* @param iface the class
* @return whether or not the interface is assignable from this class
*/
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface != null && iface.isAssignableFrom(getClass());
}
/**
* Returns whether this object is poolable.
* @return false
*/
@Override
public boolean isPoolable() {
debugCodeCall("isPoolable");
return false;
}
/**
* Requests that this object should be pooled or not.
* This call is ignored.
*
* @param poolable the requested value
*/
@Override
public void setPoolable(boolean poolable) {
if (isDebugEnabled()) {
debugCode("setPoolable("+poolable+");");
}
}
/**
* @param identifier
* identifier to quote if required
* @param alwaysQuote
* if {@code true} identifier will be quoted unconditionally
* @return specified identifier quoted if required or explicitly requested
*/
@Override
public String enquoteIdentifier(String identifier, boolean alwaysQuote) throws SQLException {
if (alwaysQuote || !isSimpleIdentifier(identifier)) {
return StringUtils.quoteIdentifier(identifier);
}
return identifier;
}
/**
* @param identifier
* identifier to check
* @return is specified identifier may be used without quotes
*/
@Override
public boolean isSimpleIdentifier(String identifier) throws SQLException {
return ParserUtil.isSimpleIdentifier(identifier, true);
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbc/JdbcStatementBackwardsCompat.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbc;
import java.sql.SQLException;
/**
* Allows us to compile on older platforms, while still implementing the methods
* from the newer JDBC API.
*/
public interface JdbcStatementBackwardsCompat {
// compatibility interface
// JDBC 4.2
/**
* Returns the last update count of this statement.
*
* @return the update count (number of row affected by an insert, update or
* delete, or 0 if no rows or the statement was a create, drop,
* commit or rollback; -1 if the statement was a select).
* @throws SQLException if this object is closed or invalid
*/
long getLargeUpdateCount() throws SQLException;
/**
* Gets the maximum number of rows for a ResultSet.
*
* @param max the number of rows where 0 means no limit
* @throws SQLException if this object is closed
*/
void setLargeMaxRows(long max) throws SQLException;
/**
* Gets the maximum number of rows for a ResultSet.
*
* @return the number of rows where 0 means no limit
* @throws SQLException if this object is closed
*/
long getLargeMaxRows() throws SQLException;
/**
* Executes the batch.
* If one of the batched statements fails, this database will continue.
*
* @return the array of update counts
*/
long[] executeLargeBatch() throws SQLException;
/**
* Executes a statement (insert, update, delete, create, drop)
* and returns the update count.
* If another result set exists for this statement, this will be closed
* (even if this statement fails).
*
* If auto commit is on, this statement will be committed.
* If the statement is a DDL statement (create, drop, alter) and does not
* throw an exception, the current transaction (if any) is committed after
* executing the statement.
*
* @param sql the SQL statement
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
long executeLargeUpdate(String sql) throws SQLException;
/**
* Executes a statement and returns the update count.
* This method just calls executeUpdate(String sql) internally.
* The method getGeneratedKeys supports at most one columns and row.
*
* @param sql the SQL statement
* @param autoGeneratedKeys ignored
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException;
/**
* Executes a statement and returns the update count.
* This method just calls executeUpdate(String sql) internally.
* The method getGeneratedKeys supports at most one columns and row.
*
* @param sql the SQL statement
* @param columnIndexes ignored
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
long executeLargeUpdate(String sql, int columnIndexes[]) throws SQLException;
/**
* Executes a statement and returns the update count.
* This method just calls executeUpdate(String sql) internally.
* The method getGeneratedKeys supports at most one columns and row.
*
* @param sql the SQL statement
* @param columnNames ignored
* @return the update count (number of row affected by an insert,
* update or delete, or 0 if no rows or the statement was a
* create, drop, commit or rollback)
* @throws SQLException if a database error occurred or a
* select statement was executed
*/
long executeLargeUpdate(String sql, String columnNames[]) throws SQLException;
// JDBC 4.3 (incomplete)
/**
* Enquotes the specified identifier.
*
* @param identifier
* identifier to quote if required
* @param alwaysQuote
* if {@code true} identifier will be quoted unconditionally
* @return specified identifier quoted if required or explicitly requested
*/
String enquoteIdentifier(String identifier, boolean alwaysQuote) throws SQLException;
/**
* Checks if specified identifier may be used without quotes.
*
* @param identifier
* identifier to check
* @return is specified identifier may be used without quotes
*/
boolean isSimpleIdentifier(String identifier) throws SQLException;
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbcx/JdbcConnectionPool.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: Christian d'Heureuse, www.source-code.biz
*
* This class is multi-licensed under LGPL, MPL 2.0, and EPL 1.0.
*
* This module is free software: you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, either
* version 3 of the License, or (at your option) any later version.
* See http://www.gnu.org/licenses/lgpl.html
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*/
package org.h2.jdbcx;
import java.io.PrintWriter;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import javax.sql.ConnectionEvent;
import javax.sql.ConnectionEventListener;
import javax.sql.ConnectionPoolDataSource;
import javax.sql.DataSource;
import javax.sql.PooledConnection;
import org.h2.message.DbException;
import org.h2.util.New;
/**
* A simple standalone JDBC connection pool.
* It is based on the
* <a href="http://www.source-code.biz/snippets/java/8.htm">
* MiniConnectionPoolManager written by Christian d'Heureuse (Java 1.5)
* </a>. It is used as follows:
* <pre>
* import java.sql.*;
* import org.h2.jdbcx.JdbcConnectionPool;
* public class Test {
* public static void main(String... args) throws Exception {
* JdbcConnectionPool cp = JdbcConnectionPool.create(
* "jdbc:h2:~/test", "sa", "sa");
* for (String sql : args) {
* Connection conn = cp.getConnection();
* conn.createStatement().execute(sql);
* conn.close();
* }
* cp.dispose();
* }
* }
* </pre>
*
* @author Christian d'Heureuse
* (<a href="http://www.source-code.biz">www.source-code.biz</a>)
* @author Thomas Mueller
*/
public class JdbcConnectionPool implements DataSource, ConnectionEventListener,
JdbcConnectionPoolBackwardsCompat {
private static final int DEFAULT_TIMEOUT = 30;
private static final int DEFAULT_MAX_CONNECTIONS = 10;
private final ConnectionPoolDataSource dataSource;
private final ArrayList<PooledConnection> recycledConnections = New.arrayList();
private PrintWriter logWriter;
private int maxConnections = DEFAULT_MAX_CONNECTIONS;
private int timeout = DEFAULT_TIMEOUT;
private int activeConnections;
private boolean isDisposed;
protected JdbcConnectionPool(ConnectionPoolDataSource dataSource) {
this.dataSource = dataSource;
if (dataSource != null) {
try {
logWriter = dataSource.getLogWriter();
} catch (SQLException e) {
// ignore
}
}
}
/**
* Constructs a new connection pool.
*
* @param dataSource the data source to create connections
* @return the connection pool
*/
public static JdbcConnectionPool create(ConnectionPoolDataSource dataSource) {
return new JdbcConnectionPool(dataSource);
}
/**
* Constructs a new connection pool for H2 databases.
*
* @param url the database URL of the H2 connection
* @param user the user name
* @param password the password
* @return the connection pool
*/
public static JdbcConnectionPool create(String url, String user,
String password) {
JdbcDataSource ds = new JdbcDataSource();
ds.setURL(url);
ds.setUser(user);
ds.setPassword(password);
return new JdbcConnectionPool(ds);
}
/**
* Sets the maximum number of connections to use from now on.
* The default value is 10 connections.
*
* @param max the maximum number of connections
*/
public synchronized void setMaxConnections(int max) {
if (max < 1) {
throw new IllegalArgumentException("Invalid maxConnections value: " + max);
}
this.maxConnections = max;
// notify waiting threads if the value was increased
notifyAll();
}
/**
* Gets the maximum number of connections to use.
*
* @return the max the maximum number of connections
*/
public synchronized int getMaxConnections() {
return maxConnections;
}
/**
* Gets the maximum time in seconds to wait for a free connection.
*
* @return the timeout in seconds
*/
@Override
public synchronized int getLoginTimeout() {
return timeout;
}
/**
* Sets the maximum time in seconds to wait for a free connection.
* The default timeout is 30 seconds. Calling this method with the
* value 0 will set the timeout to the default value.
*
* @param seconds the timeout, 0 meaning the default
*/
@Override
public synchronized void setLoginTimeout(int seconds) {
if (seconds == 0) {
seconds = DEFAULT_TIMEOUT;
}
this.timeout = seconds;
}
/**
* Closes all unused pooled connections.
* Exceptions while closing are written to the log stream (if set).
*/
public synchronized void dispose() {
if (isDisposed) {
return;
}
isDisposed = true;
for (PooledConnection aList : recycledConnections) {
closeConnection(aList);
}
}
/**
* Retrieves a connection from the connection pool. If
* <code>maxConnections</code> connections are already in use, the method
* waits until a connection becomes available or <code>timeout</code>
* seconds elapsed. When the application is finished using the connection,
* it must close it in order to return it to the pool.
* If no connection becomes available within the given timeout, an exception
* with SQL state 08001 and vendor code 8001 is thrown.
*
* @return a new Connection object.
* @throws SQLException when a new connection could not be established,
* or a timeout occurred
*/
@Override
public Connection getConnection() throws SQLException {
long max = System.nanoTime() + TimeUnit.SECONDS.toNanos(timeout);
do {
synchronized (this) {
if (activeConnections < maxConnections) {
return getConnectionNow();
}
try {
wait(1000);
} catch (InterruptedException e) {
// ignore
}
}
} while (System.nanoTime() <= max);
throw new SQLException("Login timeout", "08001", 8001);
}
/**
* INTERNAL
*/
@Override
public Connection getConnection(String user, String password) {
throw new UnsupportedOperationException();
}
private Connection getConnectionNow() throws SQLException {
if (isDisposed) {
throw new IllegalStateException("Connection pool has been disposed.");
}
PooledConnection pc;
if (!recycledConnections.isEmpty()) {
pc = recycledConnections.remove(recycledConnections.size() - 1);
} else {
pc = dataSource.getPooledConnection();
}
Connection conn = pc.getConnection();
activeConnections++;
pc.addConnectionEventListener(this);
return conn;
}
/**
* This method usually puts the connection back into the pool. There are
* some exceptions: if the pool is disposed, the connection is disposed as
* well. If the pool is full, the connection is closed.
*
* @param pc the pooled connection
*/
synchronized void recycleConnection(PooledConnection pc) {
if (activeConnections <= 0) {
throw new AssertionError();
}
activeConnections--;
if (!isDisposed && activeConnections < maxConnections) {
recycledConnections.add(pc);
} else {
closeConnection(pc);
}
if (activeConnections >= maxConnections - 1) {
notifyAll();
}
}
private void closeConnection(PooledConnection pc) {
try {
pc.close();
} catch (SQLException e) {
if (logWriter != null) {
e.printStackTrace(logWriter);
}
}
}
/**
* INTERNAL
*/
@Override
public void connectionClosed(ConnectionEvent event) {
PooledConnection pc = (PooledConnection) event.getSource();
pc.removeConnectionEventListener(this);
recycleConnection(pc);
}
/**
* INTERNAL
*/
@Override
public void connectionErrorOccurred(ConnectionEvent event) {
// not used
}
/**
* Returns the number of active (open) connections of this pool. This is the
* number of <code>Connection</code> objects that have been issued by
* getConnection() for which <code>Connection.close()</code> has
* not yet been called.
*
* @return the number of active connections.
*/
public synchronized int getActiveConnections() {
return activeConnections;
}
/**
* INTERNAL
*/
@Override
public PrintWriter getLogWriter() {
return logWriter;
}
/**
* INTERNAL
*/
@Override
public void setLogWriter(PrintWriter logWriter) {
this.logWriter = logWriter;
}
/**
* [Not supported] Return an object of this class if possible.
*
* @param iface the class
*/
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
throw DbException.getUnsupportedException("unwrap");
}
/**
* [Not supported] Checks if unwrap can return an object of this class.
*
* @param iface the class
*/
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw DbException.getUnsupportedException("isWrapperFor");
}
/**
* [Not supported]
*/
@Override
public Logger getParentLogger() {
return null;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbcx/JdbcConnectionPoolBackwardsCompat.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbcx;
/**
* Allows us to compile on older platforms, while still implementing the methods
* from the newer JDBC API.
*/
public interface JdbcConnectionPoolBackwardsCompat {
// compatibility interface
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbcx/JdbcDataSource.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbcx;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.PrintWriter;
import java.io.Serializable;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Properties;
import java.util.logging.Logger;
import javax.naming.Reference;
import javax.naming.Referenceable;
import javax.naming.StringRefAddr;
import javax.sql.ConnectionPoolDataSource;
import javax.sql.DataSource;
import javax.sql.PooledConnection;
import javax.sql.XAConnection;
import javax.sql.XADataSource;
import org.h2.Driver;
import org.h2.jdbc.JdbcConnection;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.util.StringUtils;
/**
* A data source for H2 database connections. It is a factory for XAConnection
* and Connection objects. This class is usually registered in a JNDI naming
* service. To create a data source object and register it with a JNDI service,
* use the following code:
*
* <pre>
* import org.h2.jdbcx.JdbcDataSource;
* import javax.naming.Context;
* import javax.naming.InitialContext;
* JdbcDataSource ds = new JdbcDataSource();
* ds.setURL("jdbc:h2:˜/test");
* ds.setUser("sa");
* ds.setPassword("sa");
* Context ctx = new InitialContext();
* ctx.bind("jdbc/dsName", ds);
* </pre>
*
* To use a data source that is already registered, use the following code:
*
* <pre>
* import java.sql.Connection;
* import javax.sql.DataSource;
* import javax.naming.Context;
* import javax.naming.InitialContext;
* Context ctx = new InitialContext();
* DataSource ds = (DataSource) ctx.lookup("jdbc/dsName");
* Connection conn = ds.getConnection();
* </pre>
*
* In this example the user name and password are serialized as
* well; this may be a security problem in some cases.
*/
public class JdbcDataSource extends TraceObject implements XADataSource,
DataSource, ConnectionPoolDataSource, Serializable, Referenceable,
JdbcDataSourceBackwardsCompat {
private static final long serialVersionUID = 1288136338451857771L;
private transient JdbcDataSourceFactory factory;
private transient PrintWriter logWriter;
private int loginTimeout;
private String userName = "";
private char[] passwordChars = { };
private String url = "";
private String description;
static {
org.h2.Driver.load();
}
/**
* The public constructor.
*/
public JdbcDataSource() {
initFactory();
int id = getNextId(TraceObject.DATA_SOURCE);
setTrace(factory.getTrace(), TraceObject.DATA_SOURCE, id);
}
/**
* Called when de-serializing the object.
*
* @param in the input stream
*/
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
initFactory();
in.defaultReadObject();
}
private void initFactory() {
factory = new JdbcDataSourceFactory();
}
/**
* Get the login timeout in seconds, 0 meaning no timeout.
*
* @return the timeout in seconds
*/
@Override
public int getLoginTimeout() {
debugCodeCall("getLoginTimeout");
return loginTimeout;
}
/**
* Set the login timeout in seconds, 0 meaning no timeout.
* The default value is 0.
* This value is ignored by this database.
*
* @param timeout the timeout in seconds
*/
@Override
public void setLoginTimeout(int timeout) {
debugCodeCall("setLoginTimeout", timeout);
this.loginTimeout = timeout;
}
/**
* Get the current log writer for this object.
*
* @return the log writer
*/
@Override
public PrintWriter getLogWriter() {
debugCodeCall("getLogWriter");
return logWriter;
}
/**
* Set the current log writer for this object.
* This value is ignored by this database.
*
* @param out the log writer
*/
@Override
public void setLogWriter(PrintWriter out) {
debugCodeCall("setLogWriter(out)");
logWriter = out;
}
/**
* Open a new connection using the current URL, user name and password.
*
* @return the connection
*/
@Override
public Connection getConnection() throws SQLException {
debugCodeCall("getConnection");
return getJdbcConnection(userName,
StringUtils.cloneCharArray(passwordChars));
}
/**
* Open a new connection using the current URL and the specified user name
* and password.
*
* @param user the user name
* @param password the password
* @return the connection
*/
@Override
public Connection getConnection(String user, String password)
throws SQLException {
if (isDebugEnabled()) {
debugCode("getConnection("+quote(user)+", \"\");");
}
return getJdbcConnection(user, convertToCharArray(password));
}
private JdbcConnection getJdbcConnection(String user, char[] password)
throws SQLException {
if (isDebugEnabled()) {
debugCode("getJdbcConnection("+quote(user)+", new char[0]);");
}
Properties info = new Properties();
info.setProperty("user", user);
info.put("password", password);
Connection conn = Driver.load().connect(url, info);
if (conn == null) {
throw new SQLException("No suitable driver found for " + url,
"08001", 8001);
} else if (!(conn instanceof JdbcConnection)) {
throw new SQLException(
"Connecting with old version is not supported: " + url,
"08001", 8001);
}
return (JdbcConnection) conn;
}
/**
* Get the current URL.
*
* @return the URL
*/
public String getURL() {
debugCodeCall("getURL");
return url;
}
/**
* Set the current URL.
*
* @param url the new URL
*/
public void setURL(String url) {
debugCodeCall("setURL", url);
this.url = url;
}
/**
* Get the current URL.
* This method does the same as getURL, but this methods signature conforms
* the JavaBean naming convention.
*
* @return the URL
*/
public String getUrl() {
debugCodeCall("getUrl");
return url;
}
/**
* Set the current URL.
* This method does the same as setURL, but this methods signature conforms
* the JavaBean naming convention.
*
* @param url the new URL
*/
public void setUrl(String url) {
debugCodeCall("setUrl", url);
this.url = url;
}
/**
* Set the current password.
*
* @param password the new password.
*/
public void setPassword(String password) {
debugCodeCall("setPassword", "");
this.passwordChars = convertToCharArray(password);
}
/**
* Set the current password in the form of a char array.
*
* @param password the new password in the form of a char array.
*/
public void setPasswordChars(char[] password) {
if (isDebugEnabled()) {
debugCode("setPasswordChars(new char[0]);");
}
this.passwordChars = password;
}
private static char[] convertToCharArray(String s) {
return s == null ? null : s.toCharArray();
}
private static String convertToString(char[] a) {
return a == null ? null : new String(a);
}
/**
* Get the current password.
*
* @return the password
*/
public String getPassword() {
debugCodeCall("getPassword");
return convertToString(passwordChars);
}
/**
* Get the current user name.
*
* @return the user name
*/
public String getUser() {
debugCodeCall("getUser");
return userName;
}
/**
* Set the current user name.
*
* @param user the new user name
*/
public void setUser(String user) {
debugCodeCall("setUser", user);
this.userName = user;
}
/**
* Get the current description.
*
* @return the description
*/
public String getDescription() {
debugCodeCall("getDescription");
return description;
}
/**
* Set the description.
*
* @param description the new description
*/
public void setDescription(String description) {
debugCodeCall("getDescription", description);
this.description = description;
}
/**
* Get a new reference for this object, using the current settings.
*
* @return the new reference
*/
@Override
public Reference getReference() {
debugCodeCall("getReference");
String factoryClassName = JdbcDataSourceFactory.class.getName();
Reference ref = new Reference(getClass().getName(), factoryClassName, null);
ref.add(new StringRefAddr("url", url));
ref.add(new StringRefAddr("user", userName));
ref.add(new StringRefAddr("password", convertToString(passwordChars)));
ref.add(new StringRefAddr("loginTimeout", String.valueOf(loginTimeout)));
ref.add(new StringRefAddr("description", description));
return ref;
}
/**
* Open a new XA connection using the current URL, user name and password.
*
* @return the connection
*/
@Override
public XAConnection getXAConnection() throws SQLException {
debugCodeCall("getXAConnection");
int id = getNextId(XA_DATA_SOURCE);
return new JdbcXAConnection(factory, id, getJdbcConnection(userName,
StringUtils.cloneCharArray(passwordChars)));
}
/**
* Open a new XA connection using the current URL and the specified user
* name and password.
*
* @param user the user name
* @param password the password
* @return the connection
*/
@Override
public XAConnection getXAConnection(String user, String password)
throws SQLException {
if (isDebugEnabled()) {
debugCode("getXAConnection("+quote(user)+", \"\");");
}
int id = getNextId(XA_DATA_SOURCE);
return new JdbcXAConnection(factory, id, getJdbcConnection(user,
convertToCharArray(password)));
}
/**
* Open a new pooled connection using the current URL, user name and
* password.
*
* @return the connection
*/
@Override
public PooledConnection getPooledConnection() throws SQLException {
debugCodeCall("getPooledConnection");
return getXAConnection();
}
/**
* Open a new pooled connection using the current URL and the specified user
* name and password.
*
* @param user the user name
* @param password the password
* @return the connection
*/
@Override
public PooledConnection getPooledConnection(String user, String password)
throws SQLException {
if (isDebugEnabled()) {
debugCode("getPooledConnection("+quote(user)+", \"\");");
}
return getXAConnection(user, password);
}
/**
* Return an object of this class if possible.
*
* @param iface the class
* @return this
*/
@Override
@SuppressWarnings("unchecked")
public <T> T unwrap(Class<T> iface) throws SQLException {
try {
if (isWrapperFor(iface)) {
return (T) this;
}
throw DbException.getInvalidValueException("iface", iface);
} catch (Exception e) {
throw logAndConvert(e);
}
}
/**
* Checks if unwrap can return an object of this class.
*
* @param iface the class
* @return whether or not the interface is assignable from this class
*/
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
return iface != null && iface.isAssignableFrom(getClass());
}
/**
* [Not supported]
*/
@Override
public Logger getParentLogger() {
return null;
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": url=" + url + " user=" + userName;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbcx/JdbcDataSourceBackwardsCompat.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbcx;
/**
* Allows us to compile on older platforms, while still implementing the methods
* from the newer JDBC API.
*/
public interface JdbcDataSourceBackwardsCompat {
// compatibility interface
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbcx/JdbcDataSourceFactory.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbcx;
import java.util.Hashtable;
import javax.naming.Context;
import javax.naming.Name;
import javax.naming.Reference;
import javax.naming.spi.ObjectFactory;
import org.h2.engine.Constants;
import org.h2.engine.SysProperties;
import org.h2.message.Trace;
import org.h2.message.TraceSystem;
/**
* This class is used to create new DataSource objects.
* An application should not use this class directly.
*/
public class JdbcDataSourceFactory implements ObjectFactory {
private static TraceSystem cachedTraceSystem;
private final Trace trace;
static {
org.h2.Driver.load();
}
/**
* The public constructor to create new factory objects.
*/
public JdbcDataSourceFactory() {
trace = getTraceSystem().getTrace(Trace.JDBCX);
}
/**
* Creates a new object using the specified location or reference
* information.
*
* @param obj the reference (this factory only supports objects of type
* javax.naming.Reference)
* @param name unused
* @param nameCtx unused
* @param environment unused
* @return the new JdbcDataSource, or null if the reference class name is
* not JdbcDataSource.
*/
@Override
public synchronized Object getObjectInstance(Object obj, Name name,
Context nameCtx, Hashtable<?, ?> environment) {
if (trace.isDebugEnabled()) {
trace.debug("getObjectInstance obj={0} name={1} " +
"nameCtx={2} environment={3}", obj, name, nameCtx, environment);
}
if (obj instanceof Reference) {
Reference ref = (Reference) obj;
if (ref.getClassName().equals(JdbcDataSource.class.getName())) {
JdbcDataSource dataSource = new JdbcDataSource();
dataSource.setURL((String) ref.get("url").getContent());
dataSource.setUser((String) ref.get("user").getContent());
dataSource.setPassword((String) ref.get("password").getContent());
dataSource.setDescription((String) ref.get("description").getContent());
String s = (String) ref.get("loginTimeout").getContent();
dataSource.setLoginTimeout(Integer.parseInt(s));
return dataSource;
}
}
return null;
}
/**
* INTERNAL
*/
public static TraceSystem getTraceSystem() {
synchronized (JdbcDataSourceFactory.class) {
if (cachedTraceSystem == null) {
cachedTraceSystem = new TraceSystem(
SysProperties.CLIENT_TRACE_DIRECTORY + "h2datasource" +
Constants.SUFFIX_TRACE_FILE);
cachedTraceSystem.setLevelFile(SysProperties.DATASOURCE_TRACE_LEVEL);
}
return cachedTraceSystem;
}
}
Trace getTrace() {
return trace;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbcx/JdbcXAConnection.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbcx;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import javax.sql.ConnectionEvent;
import javax.sql.ConnectionEventListener;
import javax.sql.StatementEventListener;
import javax.sql.XAConnection;
import javax.transaction.xa.XAException;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import org.h2.api.ErrorCode;
import org.h2.jdbc.JdbcConnection;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.util.New;
/**
* This class provides support for distributed transactions.
* An application developer usually does not use this interface.
* It is used by the transaction manager internally.
*/
public class JdbcXAConnection extends TraceObject implements XAConnection,
XAResource {
private final JdbcDataSourceFactory factory;
// this connection is kept open as long as the XAConnection is alive
private JdbcConnection physicalConn;
// this connection is replaced whenever getConnection is called
private volatile Connection handleConn;
private final ArrayList<ConnectionEventListener> listeners = New.arrayList();
private Xid currentTransaction;
private boolean prepared;
static {
org.h2.Driver.load();
}
JdbcXAConnection(JdbcDataSourceFactory factory, int id,
JdbcConnection physicalConn) {
this.factory = factory;
setTrace(factory.getTrace(), TraceObject.XA_DATA_SOURCE, id);
this.physicalConn = physicalConn;
}
/**
* Get the XAResource object.
*
* @return itself
*/
@Override
public XAResource getXAResource() {
debugCodeCall("getXAResource");
return this;
}
/**
* Close the physical connection.
* This method is usually called by the connection pool.
*/
@Override
public void close() throws SQLException {
debugCodeCall("close");
Connection lastHandle = handleConn;
if (lastHandle != null) {
listeners.clear();
lastHandle.close();
}
if (physicalConn != null) {
try {
physicalConn.close();
} finally {
physicalConn = null;
}
}
}
/**
* Get a connection that is a handle to the physical connection. This method
* is usually called by the connection pool. This method closes the last
* connection handle if one exists.
*
* @return the connection
*/
@Override
public Connection getConnection() throws SQLException {
debugCodeCall("getConnection");
Connection lastHandle = handleConn;
if (lastHandle != null) {
lastHandle.close();
}
// this will ensure the rollback command is cached
physicalConn.rollback();
handleConn = new PooledJdbcConnection(physicalConn);
return handleConn;
}
/**
* Register a new listener for the connection.
*
* @param listener the event listener
*/
@Override
public void addConnectionEventListener(ConnectionEventListener listener) {
debugCode("addConnectionEventListener(listener);");
listeners.add(listener);
}
/**
* Remove the event listener.
*
* @param listener the event listener
*/
@Override
public void removeConnectionEventListener(ConnectionEventListener listener) {
debugCode("removeConnectionEventListener(listener);");
listeners.remove(listener);
}
/**
* INTERNAL
*/
void closedHandle() {
debugCode("closedHandle();");
ConnectionEvent event = new ConnectionEvent(this);
// go backward so that a listener can remove itself
// (otherwise we need to clone the list)
for (int i = listeners.size() - 1; i >= 0; i--) {
ConnectionEventListener listener = listeners.get(i);
listener.connectionClosed(event);
}
handleConn = null;
}
/**
* Get the transaction timeout.
*
* @return 0
*/
@Override
public int getTransactionTimeout() {
debugCodeCall("getTransactionTimeout");
return 0;
}
/**
* Set the transaction timeout.
*
* @param seconds ignored
* @return false
*/
@Override
public boolean setTransactionTimeout(int seconds) {
debugCodeCall("setTransactionTimeout", seconds);
return false;
}
/**
* Checks if this is the same XAResource.
*
* @param xares the other object
* @return true if this is the same object
*/
@Override
public boolean isSameRM(XAResource xares) {
debugCode("isSameRM(xares);");
return xares == this;
}
/**
* Get the list of prepared transaction branches. This method is called by
* the transaction manager during recovery.
*
* @param flag TMSTARTRSCAN, TMENDRSCAN, or TMNOFLAGS. If no other flags are
* set, TMNOFLAGS must be used.
* @return zero or more Xid objects
*/
@Override
public Xid[] recover(int flag) throws XAException {
debugCodeCall("recover", quoteFlags(flag));
checkOpen();
try (Statement stat = physicalConn.createStatement()) {
ResultSet rs = stat.executeQuery("SELECT * FROM " +
"INFORMATION_SCHEMA.IN_DOUBT ORDER BY TRANSACTION");
ArrayList<Xid> list = New.arrayList();
while (rs.next()) {
String tid = rs.getString("TRANSACTION");
int id = getNextId(XID);
Xid xid = new JdbcXid(factory, id, tid);
list.add(xid);
}
rs.close();
Xid[] result = list.toArray(new Xid[0]);
if (!list.isEmpty()) {
prepared = true;
}
return result;
} catch (SQLException e) {
XAException xa = new XAException(XAException.XAER_RMERR);
xa.initCause(e);
throw xa;
}
}
/**
* Prepare a transaction.
*
* @param xid the transaction id
* @return XA_OK
*/
@Override
public int prepare(Xid xid) throws XAException {
if (isDebugEnabled()) {
debugCode("prepare("+JdbcXid.toString(xid)+");");
}
checkOpen();
if (!currentTransaction.equals(xid)) {
throw new XAException(XAException.XAER_INVAL);
}
try (Statement stat = physicalConn.createStatement()) {
stat.execute("PREPARE COMMIT " + JdbcXid.toString(xid));
prepared = true;
} catch (SQLException e) {
throw convertException(e);
}
return XA_OK;
}
/**
* Forget a transaction.
* This method does not have an effect for this database.
*
* @param xid the transaction id
*/
@Override
public void forget(Xid xid) {
if (isDebugEnabled()) {
debugCode("forget("+JdbcXid.toString(xid)+");");
}
prepared = false;
}
/**
* Roll back a transaction.
*
* @param xid the transaction id
*/
@Override
public void rollback(Xid xid) throws XAException {
if (isDebugEnabled()) {
debugCode("rollback("+JdbcXid.toString(xid)+");");
}
try {
if (prepared) {
try (Statement stat = physicalConn.createStatement()) {
stat.execute("ROLLBACK TRANSACTION " + JdbcXid.toString(xid));
}
prepared = false;
} else {
physicalConn.rollback();
}
physicalConn.setAutoCommit(true);
} catch (SQLException e) {
throw convertException(e);
}
currentTransaction = null;
}
/**
* End a transaction.
*
* @param xid the transaction id
* @param flags TMSUCCESS, TMFAIL, or TMSUSPEND
*/
@Override
public void end(Xid xid, int flags) throws XAException {
if (isDebugEnabled()) {
debugCode("end("+JdbcXid.toString(xid)+", "+quoteFlags(flags)+");");
}
// TODO transaction end: implement this method
if (flags == TMSUSPEND) {
return;
}
if (!currentTransaction.equals(xid)) {
throw new XAException(XAException.XAER_OUTSIDE);
}
prepared = false;
}
/**
* Start or continue to work on a transaction.
*
* @param xid the transaction id
* @param flags TMNOFLAGS, TMJOIN, or TMRESUME
*/
@Override
public void start(Xid xid, int flags) throws XAException {
if (isDebugEnabled()) {
debugCode("start("+JdbcXid.toString(xid)+", "+quoteFlags(flags)+");");
}
if (flags == TMRESUME) {
return;
}
if (flags == TMJOIN) {
if (currentTransaction != null && !currentTransaction.equals(xid)) {
throw new XAException(XAException.XAER_RMERR);
}
} else if (currentTransaction != null) {
throw new XAException(XAException.XAER_NOTA);
}
try {
physicalConn.setAutoCommit(false);
} catch (SQLException e) {
throw convertException(e);
}
currentTransaction = xid;
prepared = false;
}
/**
* Commit a transaction.
*
* @param xid the transaction id
* @param onePhase use a one-phase protocol if true
*/
@Override
public void commit(Xid xid, boolean onePhase) throws XAException {
if (isDebugEnabled()) {
debugCode("commit("+JdbcXid.toString(xid)+", "+onePhase+");");
}
try {
if (onePhase) {
physicalConn.commit();
} else {
try (Statement stat = physicalConn.createStatement()) {
stat.execute("COMMIT TRANSACTION " + JdbcXid.toString(xid));
prepared = false;
}
}
physicalConn.setAutoCommit(true);
} catch (SQLException e) {
throw convertException(e);
}
currentTransaction = null;
}
/**
* [Not supported] Add a statement event listener.
*
* @param listener the new statement event listener
*/
@Override
public void addStatementEventListener(StatementEventListener listener) {
throw new UnsupportedOperationException();
}
/**
* [Not supported] Remove a statement event listener.
*
* @param listener the statement event listener
*/
@Override
public void removeStatementEventListener(StatementEventListener listener) {
throw new UnsupportedOperationException();
}
/**
* INTERNAL
*/
@Override
public String toString() {
return getTraceObjectName() + ": " + physicalConn;
}
private static XAException convertException(SQLException e) {
XAException xa = new XAException(e.getMessage());
xa.initCause(e);
return xa;
}
private static String quoteFlags(int flags) {
StringBuilder buff = new StringBuilder();
if ((flags & XAResource.TMENDRSCAN) != 0) {
buff.append("|XAResource.TMENDRSCAN");
}
if ((flags & XAResource.TMFAIL) != 0) {
buff.append("|XAResource.TMFAIL");
}
if ((flags & XAResource.TMJOIN) != 0) {
buff.append("|XAResource.TMJOIN");
}
if ((flags & XAResource.TMONEPHASE) != 0) {
buff.append("|XAResource.TMONEPHASE");
}
if ((flags & XAResource.TMRESUME) != 0) {
buff.append("|XAResource.TMRESUME");
}
if ((flags & XAResource.TMSTARTRSCAN) != 0) {
buff.append("|XAResource.TMSTARTRSCAN");
}
if ((flags & XAResource.TMSUCCESS) != 0) {
buff.append("|XAResource.TMSUCCESS");
}
if ((flags & XAResource.TMSUSPEND) != 0) {
buff.append("|XAResource.TMSUSPEND");
}
if ((flags & XAResource.XA_RDONLY) != 0) {
buff.append("|XAResource.XA_RDONLY");
}
if (buff.length() == 0) {
buff.append("|XAResource.TMNOFLAGS");
}
return buff.toString().substring(1);
}
private void checkOpen() throws XAException {
if (physicalConn == null) {
throw new XAException(XAException.XAER_RMERR);
}
}
/**
* A pooled connection.
*/
class PooledJdbcConnection extends JdbcConnection {
private boolean isClosed;
public PooledJdbcConnection(JdbcConnection conn) {
super(conn);
}
@Override
public synchronized void close() throws SQLException {
if (!isClosed) {
try {
rollback();
setAutoCommit(true);
} catch (SQLException e) {
// ignore
}
closedHandle();
isClosed = true;
}
}
@Override
public synchronized boolean isClosed() throws SQLException {
return isClosed || super.isClosed();
}
@Override
protected synchronized void checkClosed(boolean write) {
if (isClosed) {
throw DbException.get(ErrorCode.OBJECT_CLOSED);
}
super.checkClosed(write);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jdbcx/JdbcXid.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jdbcx;
import java.util.StringTokenizer;
import javax.transaction.xa.Xid;
import org.h2.api.ErrorCode;
import org.h2.message.DbException;
import org.h2.message.TraceObject;
import org.h2.util.StringUtils;
/**
* An object of this class represents a transaction id.
*/
public class JdbcXid extends TraceObject implements Xid {
private static final String PREFIX = "XID";
private final int formatId;
private final byte[] branchQualifier;
private final byte[] globalTransactionId;
JdbcXid(JdbcDataSourceFactory factory, int id, String tid) {
setTrace(factory.getTrace(), TraceObject.XID, id);
try {
StringTokenizer tokenizer = new StringTokenizer(tid, "_");
String prefix = tokenizer.nextToken();
if (!PREFIX.equals(prefix)) {
throw DbException.get(ErrorCode.WRONG_XID_FORMAT_1, tid);
}
formatId = Integer.parseInt(tokenizer.nextToken());
branchQualifier = StringUtils.convertHexToBytes(tokenizer.nextToken());
globalTransactionId = StringUtils.convertHexToBytes(tokenizer.nextToken());
} catch (RuntimeException e) {
throw DbException.get(ErrorCode.WRONG_XID_FORMAT_1, tid);
}
}
/**
* INTERNAL
*/
public static String toString(Xid xid) {
return PREFIX + '_' + xid.getFormatId() + '_' + StringUtils.convertBytesToHex(xid.getBranchQualifier()) + '_'
+ StringUtils.convertBytesToHex(xid.getGlobalTransactionId());
}
/**
* Get the format id.
*
* @return the format id
*/
@Override
public int getFormatId() {
debugCodeCall("getFormatId");
return formatId;
}
/**
* The transaction branch identifier.
*
* @return the identifier
*/
@Override
public byte[] getBranchQualifier() {
debugCodeCall("getBranchQualifier");
return branchQualifier;
}
/**
* The global transaction identifier.
*
* @return the transaction id
*/
@Override
public byte[] getGlobalTransactionId() {
debugCodeCall("getGlobalTransactionId");
return globalTransactionId;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jmx/DatabaseInfo.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jmx;
import java.lang.management.ManagementFactory;
import java.sql.Timestamp;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Map;
import java.util.TreeMap;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.h2.command.Command;
import org.h2.engine.ConnectionInfo;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.store.PageStore;
import org.h2.table.Table;
/**
* The MBean implementation.
*
* @author Eric Dong
* @author Thomas Mueller
*/
public class DatabaseInfo implements DatabaseInfoMBean {
private static final Map<String, ObjectName> MBEANS = new HashMap<>();
/** Database. */
private final Database database;
private DatabaseInfo(Database database) {
if (database == null) {
throw new IllegalArgumentException("Argument 'database' must not be null");
}
this.database = database;
}
/**
* Returns a JMX new ObjectName instance.
*
* @param name name of the MBean
* @param path the path
* @return a new ObjectName instance
* @throws JMException if the ObjectName could not be created
*/
private static ObjectName getObjectName(String name, String path)
throws JMException {
name = name.replace(':', '_');
path = path.replace(':', '_');
Hashtable<String, String> map = new Hashtable<>();
map.put("name", name);
map.put("path", path);
return new ObjectName("org.h2", map);
}
/**
* Registers an MBean for the database.
*
* @param connectionInfo connection info
* @param database database
*/
public static void registerMBean(ConnectionInfo connectionInfo,
Database database) throws JMException {
String path = connectionInfo.getName();
if (!MBEANS.containsKey(path)) {
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
String name = database.getShortName();
ObjectName mbeanObjectName = getObjectName(name, path);
MBEANS.put(path, mbeanObjectName);
DatabaseInfo info = new DatabaseInfo(database);
Object mbean = new DocumentedMBean(info, DatabaseInfoMBean.class);
mbeanServer.registerMBean(mbean, mbeanObjectName);
}
}
/**
* Unregisters the MBean for the database if one is registered.
*
* @param name database name
*/
public static void unregisterMBean(String name) throws Exception {
ObjectName mbeanObjectName = MBEANS.remove(name);
if (mbeanObjectName != null) {
MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
mbeanServer.unregisterMBean(mbeanObjectName);
}
}
@Override
public boolean isExclusive() {
return database.getExclusiveSession() != null;
}
@Override
public boolean isReadOnly() {
return database.isReadOnly();
}
@Override
public String getMode() {
return database.getMode().getName();
}
@Override
public boolean isMultiThreaded() {
return database.isMultiThreaded();
}
@Override
public boolean isMvcc() {
return database.isMultiVersion();
}
@Override
public int getLogMode() {
return database.getLogMode();
}
@Override
public void setLogMode(int value) {
database.setLogMode(value);
}
@Override
public int getTraceLevel() {
return database.getTraceSystem().getLevelFile();
}
@Override
public void setTraceLevel(int level) {
database.getTraceSystem().setLevelFile(level);
}
@Override
public long getFileWriteCountTotal() {
if (!database.isPersistent()) {
return 0;
}
PageStore p = database.getPageStore();
if (p != null) {
return p.getWriteCountTotal();
}
// TODO remove this method when removing the page store
// (the MVStore doesn't support it)
return 0;
}
@Override
public long getFileWriteCount() {
if (!database.isPersistent()) {
return 0;
}
PageStore p = database.getPageStore();
if (p != null) {
return p.getWriteCount();
}
return database.getMvStore().getStore().getFileStore().getReadCount();
}
@Override
public long getFileReadCount() {
if (!database.isPersistent()) {
return 0;
}
PageStore p = database.getPageStore();
if (p != null) {
return p.getReadCount();
}
return database.getMvStore().getStore().getFileStore().getReadCount();
}
@Override
public long getFileSize() {
if (!database.isPersistent()) {
return 0;
}
PageStore p = database.getPageStore();
if (p != null) {
return p.getPageCount() * p.getPageSize() / 1024;
}
return database.getMvStore().getStore().getFileStore().size();
}
@Override
public int getCacheSizeMax() {
if (!database.isPersistent()) {
return 0;
}
PageStore p = database.getPageStore();
if (p != null) {
return p.getCache().getMaxMemory();
}
return database.getMvStore().getStore().getCacheSize() * 1024;
}
@Override
public void setCacheSizeMax(int kb) {
if (database.isPersistent()) {
database.setCacheSize(kb);
}
}
@Override
public int getCacheSize() {
if (!database.isPersistent()) {
return 0;
}
PageStore p = database.getPageStore();
if (p != null) {
return p.getCache().getMemory();
}
return database.getMvStore().getStore().getCacheSizeUsed() * 1024;
}
@Override
public String getVersion() {
return Constants.getFullVersion();
}
@Override
public String listSettings() {
StringBuilder buff = new StringBuilder();
for (Map.Entry<String, String> e :
new TreeMap<>(
database.getSettings().getSettings()).entrySet()) {
buff.append(e.getKey()).append(" = ").append(e.getValue()).append('\n');
}
return buff.toString();
}
@Override
public String listSessions() {
StringBuilder buff = new StringBuilder();
for (Session session : database.getSessions(false)) {
buff.append("session id: ").append(session.getId());
buff.append(" user: ").
append(session.getUser().getName()).
append('\n');
buff.append("connected: ").
append(new Timestamp(session.getSessionStart())).
append('\n');
Command command = session.getCurrentCommand();
if (command != null) {
buff.append("statement: ").
append(session.getCurrentCommand()).
append('\n');
long commandStart = session.getCurrentCommandStart();
if (commandStart != 0) {
buff.append("started: ").append(
new Timestamp(commandStart)).
append('\n');
}
}
Table[] t = session.getLocks();
if (t.length > 0) {
for (Table table : session.getLocks()) {
if (table.isLockedExclusivelyBy(session)) {
buff.append("write lock on ");
} else {
buff.append("read lock on ");
}
buff.append(table.getSchema().getName()).
append('.').append(table.getName()).
append('\n');
}
}
buff.append('\n');
}
return buff.toString();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jmx/DatabaseInfoMBean.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jmx;
/**
* Information and management operations for the given database.
* @h2.resource
*
* @author Eric Dong
* @author Thomas Mueller
*/
public interface DatabaseInfoMBean {
/**
* Is the database open in exclusive mode?
* @h2.resource
*
* @return true if the database is open in exclusive mode, false otherwise
*/
boolean isExclusive();
/**
* Is the database read-only?
* @h2.resource
*
* @return true if the database is read-only, false otherwise
*/
boolean isReadOnly();
/**
* The database compatibility mode (REGULAR if no compatibility mode is
* used).
* @h2.resource
*
* @return the database mode
*/
String getMode();
/**
* Is multi-threading enabled?
* @h2.resource
*
* @return true if multi-threading is enabled, false otherwise
*/
boolean isMultiThreaded();
/**
* Is MVCC (multi version concurrency) enabled?
* @h2.resource
*
* @return true if MVCC is enabled, false otherwise
*/
boolean isMvcc();
/**
* The transaction log mode (0 disabled, 1 without sync, 2 enabled).
* @h2.resource
*
* @return the transaction log mode
*/
int getLogMode();
/**
* Set the transaction log mode.
*
* @param value the new log mode
*/
void setLogMode(int value);
/**
* The number of write operations since the database was created.
* @h2.resource
*
* @return the total write count
*/
long getFileWriteCountTotal();
/**
* The number of write operations since the database was opened.
* @h2.resource
*
* @return the write count
*/
long getFileWriteCount();
/**
* The file read count since the database was opened.
* @h2.resource
*
* @return the read count
*/
long getFileReadCount();
/**
* The database file size in KB.
* @h2.resource
*
* @return the number of pages
*/
long getFileSize();
/**
* The maximum cache size in KB.
* @h2.resource
*
* @return the maximum size
*/
int getCacheSizeMax();
/**
* Change the maximum size.
*
* @param kb the cache size in KB.
*/
void setCacheSizeMax(int kb);
/**
* The current cache size in KB.
* @h2.resource
*
* @return the current size
*/
int getCacheSize();
/**
* The database version.
* @h2.resource
*
* @return the version
*/
String getVersion();
/**
* The trace level (0 disabled, 1 error, 2 info, 3 debug).
* @h2.resource
*
* @return the level
*/
int getTraceLevel();
/**
* Set the trace level.
*
* @param level the new value
*/
void setTraceLevel(int level);
/**
* List the database settings.
* @h2.resource
*
* @return the database settings
*/
String listSettings();
/**
* List sessions, including the queries that are in
* progress, and locked tables.
* @h2.resource
*
* @return information about the sessions
*/
String listSessions();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/jmx/DocumentedMBean.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.jmx;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.Properties;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
import javax.management.MBeanOperationInfo;
import javax.management.NotCompliantMBeanException;
import javax.management.StandardMBean;
import org.h2.util.Utils;
/**
* An MBean that reads the documentation from a resource file.
*/
public class DocumentedMBean extends StandardMBean {
private final String interfaceName;
private Properties resources;
public <T> DocumentedMBean(T impl, Class<T> mbeanInterface)
throws NotCompliantMBeanException {
super(impl, mbeanInterface);
this.interfaceName = impl.getClass().getName() + "MBean";
}
private Properties getResources() {
if (resources == null) {
resources = new Properties();
String resourceName = "/org/h2/res/javadoc.properties";
try {
byte[] buff = Utils.getResource(resourceName);
if (buff != null) {
resources.load(new ByteArrayInputStream(buff));
}
} catch (IOException e) {
// ignore
}
}
return resources;
}
@Override
protected String getDescription(MBeanInfo info) {
String s = getResources().getProperty(interfaceName);
return s == null ? super.getDescription(info) : s;
}
@Override
protected String getDescription(MBeanOperationInfo op) {
String s = getResources().getProperty(interfaceName + "." + op.getName());
return s == null ? super.getDescription(op) : s;
}
@Override
protected String getDescription(MBeanAttributeInfo info) {
String prefix = info.isIs() ? "is" : "get";
String s = getResources().getProperty(
interfaceName + "." + prefix + info.getName());
return s == null ? super.getDescription(info) : s;
}
@Override
protected int getImpact(MBeanOperationInfo info) {
if (info.getName().startsWith("list")) {
return MBeanOperationInfo.INFO;
}
return MBeanOperationInfo.ACTION;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/message/DbException.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.message;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.lang.reflect.InvocationTargetException;
import java.nio.charset.StandardCharsets;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.text.MessageFormat;
import java.util.Locale;
import java.util.Map.Entry;
import java.util.Properties;
import org.h2.api.ErrorCode;
import org.h2.jdbc.JdbcSQLException;
import org.h2.util.SortedProperties;
import org.h2.util.StringUtils;
import org.h2.util.Utils;
/**
* This exception wraps a checked exception.
* It is used in methods where checked exceptions are not supported,
* for example in a Comparator.
*/
public class DbException extends RuntimeException {
private static final long serialVersionUID = 1L;
private static final Properties MESSAGES = new Properties();
private Object source;
static {
try {
byte[] messages = Utils.getResource(
"/org/h2/res/_messages_en.prop");
if (messages != null) {
MESSAGES.load(new ByteArrayInputStream(messages));
}
String language = Locale.getDefault().getLanguage();
if (!"en".equals(language)) {
byte[] translations = Utils.getResource(
"/org/h2/res/_messages_" + language + ".prop");
// message: translated message + english
// (otherwise certain applications don't work)
if (translations != null) {
Properties p = SortedProperties.fromLines(
new String(translations, StandardCharsets.UTF_8));
for (Entry<Object, Object> e : p.entrySet()) {
String key = (String) e.getKey();
String translation = (String) e.getValue();
if (translation != null && !translation.startsWith("#")) {
String original = MESSAGES.getProperty(key);
String message = translation + "\n" + original;
MESSAGES.put(key, message);
}
}
}
}
} catch (OutOfMemoryError e) {
DbException.traceThrowable(e);
} catch (IOException e) {
DbException.traceThrowable(e);
}
}
private DbException(SQLException e) {
super(e.getMessage(), e);
}
private static String translate(String key, String... params) {
String message = null;
if (MESSAGES != null) {
// Tomcat sets final static fields to null sometimes
message = MESSAGES.getProperty(key);
}
if (message == null) {
message = "(Message " + key + " not found)";
}
if (params != null) {
for (int i = 0; i < params.length; i++) {
String s = params[i];
if (s != null && s.length() > 0) {
params[i] = StringUtils.quoteIdentifier(s);
}
}
message = MessageFormat.format(message, (Object[]) params);
}
return message;
}
/**
* Get the SQLException object.
*
* @return the exception
*/
public SQLException getSQLException() {
return (SQLException) getCause();
}
/**
* Get the error code.
*
* @return the error code
*/
public int getErrorCode() {
return getSQLException().getErrorCode();
}
/**
* Set the SQL statement of the given exception.
* This method may create a new object.
*
* @param sql the SQL statement
* @return the exception
*/
public DbException addSQL(String sql) {
SQLException e = getSQLException();
if (e instanceof JdbcSQLException) {
JdbcSQLException j = (JdbcSQLException) e;
if (j.getSQL() == null) {
j.setSQL(sql);
}
return this;
}
e = new JdbcSQLException(e.getMessage(), sql, e.getSQLState(),
e.getErrorCode(), e, null);
return new DbException(e);
}
/**
* Create a database exception for a specific error code.
*
* @param errorCode the error code
* @return the exception
*/
public static DbException get(int errorCode) {
return get(errorCode, (String) null);
}
/**
* Create a database exception for a specific error code.
*
* @param errorCode the error code
* @param p1 the first parameter of the message
* @return the exception
*/
public static DbException get(int errorCode, String p1) {
return get(errorCode, new String[] { p1 });
}
/**
* Create a database exception for a specific error code.
*
* @param errorCode the error code
* @param cause the cause of the exception
* @param params the list of parameters of the message
* @return the exception
*/
public static DbException get(int errorCode, Throwable cause,
String... params) {
return new DbException(getJdbcSQLException(errorCode, cause, params));
}
/**
* Create a database exception for a specific error code.
*
* @param errorCode the error code
* @param params the list of parameters of the message
* @return the exception
*/
public static DbException get(int errorCode, String... params) {
return new DbException(getJdbcSQLException(errorCode, null, params));
}
/**
* Create a database exception for an arbitrary SQLState.
*
* @param sqlstate the state to use
* @param message the message to use
* @return the exception
*/
public static DbException fromUser(String sqlstate, String message) {
// do not translate as sqlstate is arbitrary : avoid "message not found"
return new DbException(new JdbcSQLException(message, null, sqlstate, 0, null, null));
}
/**
* Create a syntax error exception.
*
* @param sql the SQL statement
* @param index the position of the error in the SQL statement
* @return the exception
*/
public static DbException getSyntaxError(String sql, int index) {
sql = StringUtils.addAsterisk(sql, index);
return get(ErrorCode.SYNTAX_ERROR_1, sql);
}
/**
* Create a syntax error exception.
*
* @param sql the SQL statement
* @param index the position of the error in the SQL statement
* @param message the message
* @return the exception
*/
public static DbException getSyntaxError(String sql, int index,
String message) {
sql = StringUtils.addAsterisk(sql, index);
return new DbException(getJdbcSQLException(ErrorCode.SYNTAX_ERROR_2,
null, sql, message));
}
/**
* Gets a SQL exception meaning this feature is not supported.
*
* @param message what exactly is not supported
* @return the exception
*/
public static DbException getUnsupportedException(String message) {
return get(ErrorCode.FEATURE_NOT_SUPPORTED_1, message);
}
/**
* Gets a SQL exception meaning this value is invalid.
*
* @param param the name of the parameter
* @param value the value passed
* @return the IllegalArgumentException object
*/
public static DbException getInvalidValueException(String param,
Object value) {
return get(ErrorCode.INVALID_VALUE_2,
value == null ? "null" : value.toString(), param);
}
/**
* Throw an internal error. This method seems to return an exception object,
* so that it can be used instead of 'return', but in fact it always throws
* the exception.
*
* @param s the message
* @return the RuntimeException object
* @throws RuntimeException the exception
*/
public static RuntimeException throwInternalError(String s) {
RuntimeException e = new RuntimeException(s);
DbException.traceThrowable(e);
throw e;
}
/**
* Throw an internal error. This method seems to return an exception object,
* so that it can be used instead of 'return', but in fact it always throws
* the exception.
*
* @return the RuntimeException object
*/
public static RuntimeException throwInternalError() {
return throwInternalError("Unexpected code path");
}
/**
* Convert an exception to a SQL exception using the default mapping.
*
* @param e the root cause
* @return the SQL exception object
*/
public static SQLException toSQLException(Throwable e) {
if (e instanceof SQLException) {
return (SQLException) e;
}
return convert(e).getSQLException();
}
/**
* Convert a throwable to an SQL exception using the default mapping. All
* errors except the following are re-thrown: StackOverflowError,
* LinkageError.
*
* @param e the root cause
* @return the exception object
*/
public static DbException convert(Throwable e) {
if (e instanceof DbException) {
return (DbException) e;
} else if (e instanceof SQLException) {
return new DbException((SQLException) e);
} else if (e instanceof InvocationTargetException) {
return convertInvocation((InvocationTargetException) e, null);
} else if (e instanceof IOException) {
return get(ErrorCode.IO_EXCEPTION_1, e, e.toString());
} else if (e instanceof OutOfMemoryError) {
return get(ErrorCode.OUT_OF_MEMORY, e);
} else if (e instanceof StackOverflowError || e instanceof LinkageError) {
return get(ErrorCode.GENERAL_ERROR_1, e, e.toString());
} else if (e instanceof Error) {
throw (Error) e;
}
return get(ErrorCode.GENERAL_ERROR_1, e, e.toString());
}
/**
* Convert an InvocationTarget exception to a database exception.
*
* @param te the root cause
* @param message the added message or null
* @return the database exception object
*/
public static DbException convertInvocation(InvocationTargetException te,
String message) {
Throwable t = te.getTargetException();
if (t instanceof SQLException || t instanceof DbException) {
return convert(t);
}
message = message == null ? t.getMessage() : message + ": " + t.getMessage();
return get(ErrorCode.EXCEPTION_IN_FUNCTION_1, t, message);
}
/**
* Convert an IO exception to a database exception.
*
* @param e the root cause
* @param message the message or null
* @return the database exception object
*/
public static DbException convertIOException(IOException e, String message) {
if (message == null) {
Throwable t = e.getCause();
if (t instanceof DbException) {
return (DbException) t;
}
return get(ErrorCode.IO_EXCEPTION_1, e, e.toString());
}
return get(ErrorCode.IO_EXCEPTION_2, e, e.toString(), message);
}
/**
* Gets the SQL exception object for a specific error code.
*
* @param errorCode the error code
* @param cause the cause of the exception
* @param params the list of parameters of the message
* @return the SQLException object
*/
private static JdbcSQLException getJdbcSQLException(int errorCode,
Throwable cause, String... params) {
String sqlstate = ErrorCode.getState(errorCode);
String message = translate(sqlstate, params);
return new JdbcSQLException(message, null, sqlstate, errorCode, cause, null);
}
/**
* Convert an exception to an IO exception.
*
* @param e the root cause
* @return the IO exception
*/
public static IOException convertToIOException(Throwable e) {
if (e instanceof IOException) {
return (IOException) e;
}
if (e instanceof JdbcSQLException) {
JdbcSQLException e2 = (JdbcSQLException) e;
if (e2.getOriginalCause() != null) {
e = e2.getOriginalCause();
}
}
return new IOException(e.toString(), e);
}
public Object getSource() {
return source;
}
public void setSource(Object source) {
this.source = source;
}
/**
* Write the exception to the driver manager log writer if configured.
*
* @param e the exception
*/
public static void traceThrowable(Throwable e) {
PrintWriter writer = DriverManager.getLogWriter();
if (writer != null) {
e.printStackTrace(writer);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/message/Trace.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.message;
import java.text.MessageFormat;
import java.util.ArrayList;
import org.h2.engine.SysProperties;
import org.h2.expression.ParameterInterface;
import org.h2.util.StatementBuilder;
import org.h2.util.StringUtils;
import org.h2.value.Value;
/**
* This class represents a trace module.
*/
public class Trace {
/**
* The trace module id for commands.
*/
public static final int COMMAND = 0;
/**
* The trace module id for constraints.
*/
public static final int CONSTRAINT = 1;
/**
* The trace module id for databases.
*/
public static final int DATABASE = 2;
/**
* The trace module id for functions.
*/
public static final int FUNCTION = 3;
/**
* The trace module id for file locks.
*/
public static final int FILE_LOCK = 4;
/**
* The trace module id for indexes.
*/
public static final int INDEX = 5;
/**
* The trace module id for the JDBC API.
*/
public static final int JDBC = 6;
/**
* The trace module id for locks.
*/
public static final int LOCK = 7;
/**
* The trace module id for schemas.
*/
public static final int SCHEMA = 8;
/**
* The trace module id for sequences.
*/
public static final int SEQUENCE = 9;
/**
* The trace module id for settings.
*/
public static final int SETTING = 10;
/**
* The trace module id for tables.
*/
public static final int TABLE = 11;
/**
* The trace module id for triggers.
*/
public static final int TRIGGER = 12;
/**
* The trace module id for users.
*/
public static final int USER = 13;
/**
* The trace module id for the page store.
*/
public static final int PAGE_STORE = 14;
/**
* The trace module id for the JDBCX API
*/
public static final int JDBCX = 15;
/**
* Module names by their ids as array indexes.
*/
public static final String[] MODULE_NAMES = {
"command",
"constraint",
"database",
"function",
"fileLock",
"index",
"jdbc",
"lock",
"schema",
"sequence",
"setting",
"table",
"trigger",
"user",
"pageStore",
"JDBCX"
};
private final TraceWriter traceWriter;
private final String module;
private final String lineSeparator;
private int traceLevel = TraceSystem.PARENT;
Trace(TraceWriter traceWriter, int moduleId) {
this(traceWriter, MODULE_NAMES[moduleId]);
}
Trace(TraceWriter traceWriter, String module) {
this.traceWriter = traceWriter;
this.module = module;
this.lineSeparator = SysProperties.LINE_SEPARATOR;
}
/**
* Set the trace level of this component. This setting overrides the parent
* trace level.
*
* @param level the new level
*/
public void setLevel(int level) {
this.traceLevel = level;
}
private boolean isEnabled(int level) {
if (this.traceLevel == TraceSystem.PARENT) {
return traceWriter.isEnabled(level);
}
return level <= this.traceLevel;
}
/**
* Check if the trace level is equal or higher than INFO.
*
* @return true if it is
*/
public boolean isInfoEnabled() {
return isEnabled(TraceSystem.INFO);
}
/**
* Check if the trace level is equal or higher than DEBUG.
*
* @return true if it is
*/
public boolean isDebugEnabled() {
return isEnabled(TraceSystem.DEBUG);
}
/**
* Write a message with trace level ERROR to the trace system.
*
* @param t the exception
* @param s the message
*/
public void error(Throwable t, String s) {
if (isEnabled(TraceSystem.ERROR)) {
traceWriter.write(TraceSystem.ERROR, module, s, t);
}
}
/**
* Write a message with trace level ERROR to the trace system.
*
* @param t the exception
* @param s the message
* @param params the parameters
*/
public void error(Throwable t, String s, Object... params) {
if (isEnabled(TraceSystem.ERROR)) {
s = MessageFormat.format(s, params);
traceWriter.write(TraceSystem.ERROR, module, s, t);
}
}
/**
* Write a message with trace level INFO to the trace system.
*
* @param s the message
*/
public void info(String s) {
if (isEnabled(TraceSystem.INFO)) {
traceWriter.write(TraceSystem.INFO, module, s, null);
}
}
/**
* Write a message with trace level INFO to the trace system.
*
* @param s the message
* @param params the parameters
*/
public void info(String s, Object... params) {
if (isEnabled(TraceSystem.INFO)) {
s = MessageFormat.format(s, params);
traceWriter.write(TraceSystem.INFO, module, s, null);
}
}
/**
* Write a message with trace level INFO to the trace system.
*
* @param t the exception
* @param s the message
*/
void info(Throwable t, String s) {
if (isEnabled(TraceSystem.INFO)) {
traceWriter.write(TraceSystem.INFO, module, s, t);
}
}
/**
* Format the parameter list.
*
* @param parameters the parameter list
* @return the formatted text
*/
public static String formatParams(
ArrayList<? extends ParameterInterface> parameters) {
if (parameters.isEmpty()) {
return "";
}
StatementBuilder buff = new StatementBuilder();
int i = 0;
boolean params = false;
for (ParameterInterface p : parameters) {
if (p.isValueSet()) {
if (!params) {
buff.append(" {");
params = true;
}
buff.appendExceptFirst(", ");
Value v = p.getParamValue();
buff.append(++i).append(": ").append(v.getTraceSQL());
}
}
if (params) {
buff.append('}');
}
return buff.toString();
}
/**
* Write a SQL statement with trace level INFO to the trace system.
*
* @param sql the SQL statement
* @param params the parameters used, in the for {1:...}
* @param count the update count
* @param time the time it took to run the statement in ms
*/
public void infoSQL(String sql, String params, int count, long time) {
if (!isEnabled(TraceSystem.INFO)) {
return;
}
StringBuilder buff = new StringBuilder(sql.length() + params.length() + 20);
buff.append(lineSeparator).append("/*SQL");
boolean space = false;
if (params.length() > 0) {
// This looks like a bug, but it is intentional:
// If there are no parameters, the SQL statement is
// the rest of the line. If there are parameters, they
// are appended at the end of the line. Knowing the size
// of the statement simplifies separating the SQL statement
// from the parameters (no need to parse).
space = true;
buff.append(" l:").append(sql.length());
}
if (count > 0) {
space = true;
buff.append(" #:").append(count);
}
if (time > 0) {
space = true;
buff.append(" t:").append(time);
}
if (!space) {
buff.append(' ');
}
buff.append("*/").
append(StringUtils.javaEncode(sql)).
append(StringUtils.javaEncode(params)).
append(';');
sql = buff.toString();
traceWriter.write(TraceSystem.INFO, module, sql, null);
}
/**
* Write a message with trace level DEBUG to the trace system.
*
* @param s the message
* @param params the parameters
*/
public void debug(String s, Object... params) {
if (isEnabled(TraceSystem.DEBUG)) {
s = MessageFormat.format(s, params);
traceWriter.write(TraceSystem.DEBUG, module, s, null);
}
}
/**
* Write a message with trace level DEBUG to the trace system.
*
* @param s the message
*/
public void debug(String s) {
if (isEnabled(TraceSystem.DEBUG)) {
traceWriter.write(TraceSystem.DEBUG, module, s, null);
}
}
/**
* Write a message with trace level DEBUG to the trace system.
* @param t the exception
* @param s the message
*/
public void debug(Throwable t, String s) {
if (isEnabled(TraceSystem.DEBUG)) {
traceWriter.write(TraceSystem.DEBUG, module, s, t);
}
}
/**
* Write Java source code with trace level INFO to the trace system.
*
* @param java the source code
*/
public void infoCode(String java) {
if (isEnabled(TraceSystem.INFO)) {
traceWriter.write(TraceSystem.INFO, module, lineSeparator +
"/**/" + java, null);
}
}
/**
* Write Java source code with trace level DEBUG to the trace system.
*
* @param java the source code
*/
void debugCode(String java) {
if (isEnabled(TraceSystem.DEBUG)) {
traceWriter.write(TraceSystem.DEBUG, module, lineSeparator +
"/**/" + java, null);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/message/TraceObject.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.message;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.Map;
import java.util.concurrent.atomic.AtomicIntegerArray;
import org.h2.util.StringUtils;
/**
* The base class for objects that can print trace information about themselves.
*/
public class TraceObject {
/**
* The trace type id for callable statements.
*/
protected static final int CALLABLE_STATEMENT = 0;
/**
* The trace type id for connections.
*/
protected static final int CONNECTION = 1;
/**
* The trace type id for database meta data objects.
*/
protected static final int DATABASE_META_DATA = 2;
/**
* The trace type id for prepared statements.
*/
protected static final int PREPARED_STATEMENT = 3;
/**
* The trace type id for result sets.
*/
protected static final int RESULT_SET = 4;
/**
* The trace type id for result set meta data objects.
*/
protected static final int RESULT_SET_META_DATA = 5;
/**
* The trace type id for savepoint objects.
*/
protected static final int SAVEPOINT = 6;
/**
* The trace type id for statements.
*/
protected static final int STATEMENT = 8;
/**
* The trace type id for blobs.
*/
protected static final int BLOB = 9;
/**
* The trace type id for clobs.
*/
protected static final int CLOB = 10;
/**
* The trace type id for parameter meta data objects.
*/
protected static final int PARAMETER_META_DATA = 11;
/**
* The trace type id for data sources.
*/
protected static final int DATA_SOURCE = 12;
/**
* The trace type id for XA data sources.
*/
protected static final int XA_DATA_SOURCE = 13;
/**
* The trace type id for transaction ids.
*/
protected static final int XID = 15;
/**
* The trace type id for array objects.
*/
protected static final int ARRAY = 16;
private static final int LAST = ARRAY + 1;
private static final AtomicIntegerArray ID = new AtomicIntegerArray(LAST);
private static final String[] PREFIX = { "call", "conn", "dbMeta", "prep",
"rs", "rsMeta", "sp", "ex", "stat", "blob", "clob", "pMeta", "ds",
"xads", "xares", "xid", "ar" };
/**
* The trace module used by this object.
*/
protected Trace trace;
private int traceType;
private int id;
/**
* Set the options to use when writing trace message.
*
* @param trace the trace object
* @param type the trace object type
* @param id the trace object id
*/
protected void setTrace(Trace trace, int type, int id) {
this.trace = trace;
this.traceType = type;
this.id = id;
}
/**
* INTERNAL
*/
public int getTraceId() {
return id;
}
/**
* INTERNAL
*/
public String getTraceObjectName() {
return PREFIX[traceType] + id;
}
/**
* Get the next trace object id for this object type.
*
* @param type the object type
* @return the new trace object id
*/
protected static int getNextId(int type) {
return ID.getAndIncrement(type);
}
/**
* Check if the debug trace level is enabled.
*
* @return true if it is
*/
protected boolean isDebugEnabled() {
return trace.isDebugEnabled();
}
/**
* Check if info trace level is enabled.
*
* @return true if it is
*/
protected boolean isInfoEnabled() {
return trace.isInfoEnabled();
}
/**
* Write trace information as an assignment in the form
* className prefixId = objectName.value.
*
* @param className the class name of the result
* @param newType the prefix type
* @param newId the trace object id of the created object
* @param value the value to assign this new object to
*/
protected void debugCodeAssign(String className, int newType, int newId,
String value) {
if (trace.isDebugEnabled()) {
trace.debugCode(className + " " + PREFIX[newType] +
newId + " = " + getTraceObjectName() + "." + value + ";");
}
}
/**
* Write trace information as a method call in the form
* objectName.methodName().
*
* @param methodName the method name
*/
protected void debugCodeCall(String methodName) {
if (trace.isDebugEnabled()) {
trace.debugCode(getTraceObjectName() + "." + methodName + "();");
}
}
/**
* Write trace information as a method call in the form
* objectName.methodName(param) where the parameter is formatted as a long
* value.
*
* @param methodName the method name
* @param param one single long parameter
*/
protected void debugCodeCall(String methodName, long param) {
if (trace.isDebugEnabled()) {
trace.debugCode(getTraceObjectName() + "." +
methodName + "(" + param + ");");
}
}
/**
* Write trace information as a method call in the form
* objectName.methodName(param) where the parameter is formatted as a Java
* string.
*
* @param methodName the method name
* @param param one single string parameter
*/
protected void debugCodeCall(String methodName, String param) {
if (trace.isDebugEnabled()) {
trace.debugCode(getTraceObjectName() + "." +
methodName + "(" + quote(param) + ");");
}
}
/**
* Write trace information in the form objectName.text.
*
* @param text the trace text
*/
protected void debugCode(String text) {
if (trace.isDebugEnabled()) {
trace.debugCode(getTraceObjectName() + "." + text);
}
}
/**
* Format a string as a Java string literal.
*
* @param s the string to convert
* @return the Java string literal
*/
protected static String quote(String s) {
return StringUtils.quoteJavaString(s);
}
/**
* Format a time to the Java source code that represents this object.
*
* @param x the time to convert
* @return the Java source code
*/
protected static String quoteTime(java.sql.Time x) {
if (x == null) {
return "null";
}
return "Time.valueOf(\"" + x.toString() + "\")";
}
/**
* Format a timestamp to the Java source code that represents this object.
*
* @param x the timestamp to convert
* @return the Java source code
*/
protected static String quoteTimestamp(java.sql.Timestamp x) {
if (x == null) {
return "null";
}
return "Timestamp.valueOf(\"" + x.toString() + "\")";
}
/**
* Format a date to the Java source code that represents this object.
*
* @param x the date to convert
* @return the Java source code
*/
protected static String quoteDate(java.sql.Date x) {
if (x == null) {
return "null";
}
return "Date.valueOf(\"" + x.toString() + "\")";
}
/**
* Format a big decimal to the Java source code that represents this object.
*
* @param x the big decimal to convert
* @return the Java source code
*/
protected static String quoteBigDecimal(BigDecimal x) {
if (x == null) {
return "null";
}
return "new BigDecimal(\"" + x.toString() + "\")";
}
/**
* Format a byte array to the Java source code that represents this object.
*
* @param x the byte array to convert
* @return the Java source code
*/
protected static String quoteBytes(byte[] x) {
if (x == null) {
return "null";
}
return "org.h2.util.StringUtils.convertHexToBytes(\"" +
StringUtils.convertBytesToHex(x) + "\")";
}
/**
* Format a string array to the Java source code that represents this
* object.
*
* @param s the string array to convert
* @return the Java source code
*/
protected static String quoteArray(String[] s) {
return StringUtils.quoteJavaStringArray(s);
}
/**
* Format an int array to the Java source code that represents this object.
*
* @param s the int array to convert
* @return the Java source code
*/
protected static String quoteIntArray(int[] s) {
return StringUtils.quoteJavaIntArray(s);
}
/**
* Format a map to the Java source code that represents this object.
*
* @param map the map to convert
* @return the Java source code
*/
protected static String quoteMap(Map<String, Class<?>> map) {
if (map == null) {
return "null";
}
if (map.size() == 0) {
return "new Map()";
}
return "new Map() /* " + map.toString() + " */";
}
/**
* Log an exception and convert it to a SQL exception if required.
*
* @param ex the exception
* @return the SQL exception object
*/
protected SQLException logAndConvert(Throwable ex) {
SQLException e = null;
try {
e = DbException.toSQLException(ex);
if (trace == null) {
DbException.traceThrowable(e);
} else {
int errorCode = e.getErrorCode();
if (errorCode >= 23000 && errorCode < 24000) {
trace.info(e, "exception");
} else {
trace.error(e, "exception");
}
}
} catch(Throwable ignore) {
if (e == null) {
e = new SQLException("", "HY000", ex);
}
e.addSuppressed(ignore);
}
return e;
}
/**
* Get a SQL exception meaning this feature is not supported.
*
* @param message the message
* @return the SQL exception
*/
protected SQLException unsupported(String message) {
try {
throw DbException.getUnsupportedException(message);
} catch (Exception e) {
return logAndConvert(e);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/message/TraceSystem.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.message;
import java.io.IOException;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.io.Writer;
import java.text.SimpleDateFormat;
import java.util.concurrent.atomic.AtomicReferenceArray;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.jdbc.JdbcSQLException;
import org.h2.store.fs.FileUtils;
import org.h2.util.IOUtils;
/**
* The trace mechanism is the logging facility of this database. There is
* usually one trace system per database. It is called 'trace' because the term
* 'log' is already used in the database domain and means 'transaction log'. It
* is possible to write after close was called, but that means for each write
* the file will be opened and closed again (which is slower).
*/
public class TraceSystem implements TraceWriter {
/**
* The parent trace level should be used.
*/
public static final int PARENT = -1;
/**
* This trace level means nothing should be written.
*/
public static final int OFF = 0;
/**
* This trace level means only errors should be written.
*/
public static final int ERROR = 1;
/**
* This trace level means errors and informational messages should be
* written.
*/
public static final int INFO = 2;
/**
* This trace level means all type of messages should be written.
*/
public static final int DEBUG = 3;
/**
* This trace level means all type of messages should be written, but
* instead of using the trace file the messages should be written to SLF4J.
*/
public static final int ADAPTER = 4;
/**
* The default level for system out trace messages.
*/
public static final int DEFAULT_TRACE_LEVEL_SYSTEM_OUT = OFF;
/**
* The default level for file trace messages.
*/
public static final int DEFAULT_TRACE_LEVEL_FILE = ERROR;
/**
* The default maximum trace file size. It is currently 64 MB. Additionally,
* there could be a .old file of the same size.
*/
private static final int DEFAULT_MAX_FILE_SIZE = 64 * 1024 * 1024;
private static final int CHECK_SIZE_EACH_WRITES = 4096;
private int levelSystemOut = DEFAULT_TRACE_LEVEL_SYSTEM_OUT;
private int levelFile = DEFAULT_TRACE_LEVEL_FILE;
private int levelMax;
private int maxFileSize = DEFAULT_MAX_FILE_SIZE;
private String fileName;
private final AtomicReferenceArray<Trace> traces =
new AtomicReferenceArray<>(Trace.MODULE_NAMES.length);
private SimpleDateFormat dateFormat;
private Writer fileWriter;
private PrintWriter printWriter;
private int checkSize;
private boolean closed;
private boolean writingErrorLogged;
private TraceWriter writer = this;
private PrintStream sysOut = System.out;
/**
* Create a new trace system object.
*
* @param fileName the file name
*/
public TraceSystem(String fileName) {
this.fileName = fileName;
updateLevel();
}
private void updateLevel() {
levelMax = Math.max(levelSystemOut, levelFile);
}
/**
* Set the print stream to use instead of System.out.
*
* @param out the new print stream
*/
public void setSysOut(PrintStream out) {
this.sysOut = out;
}
/**
* Get or create a trace object for this module id. Trace modules with id
* are cached.
*
* @param moduleId module id
* @return the trace object
*/
public Trace getTrace(int moduleId) {
Trace t = traces.get(moduleId);
if (t == null) {
t = new Trace(writer, moduleId);
if (!traces.compareAndSet(moduleId, null, t)) {
t = traces.get(moduleId);
}
}
return t;
}
/**
* Create a trace object for this module. Trace modules with names are not
* cached.
*
* @param module the module name
* @return the trace object
*/
public Trace getTrace(String module) {
return new Trace(writer, module);
}
@Override
public boolean isEnabled(int level) {
if (levelMax == ADAPTER) {
return writer.isEnabled(level);
}
return level <= this.levelMax;
}
/**
* Set the trace file name.
*
* @param name the file name
*/
public void setFileName(String name) {
this.fileName = name;
}
/**
* Set the maximum trace file size in bytes.
*
* @param max the maximum size
*/
public void setMaxFileSize(int max) {
this.maxFileSize = max;
}
/**
* Set the trace level to use for System.out
*
* @param level the new level
*/
public void setLevelSystemOut(int level) {
levelSystemOut = level;
updateLevel();
}
/**
* Set the file trace level.
*
* @param level the new level
*/
public void setLevelFile(int level) {
if (level == ADAPTER) {
String adapterClass = "org.h2.message.TraceWriterAdapter";
try {
writer = (TraceWriter) Class.forName(adapterClass).newInstance();
} catch (Throwable e) {
e = DbException.get(ErrorCode.CLASS_NOT_FOUND_1, e, adapterClass);
write(ERROR, Trace.DATABASE, adapterClass, e);
return;
}
String name = fileName;
if (name != null) {
if (name.endsWith(Constants.SUFFIX_TRACE_FILE)) {
name = name.substring(0, name.length() - Constants.SUFFIX_TRACE_FILE.length());
}
int idx = Math.max(name.lastIndexOf('/'), name.lastIndexOf('\\'));
if (idx >= 0) {
name = name.substring(idx + 1);
}
writer.setName(name);
}
}
levelFile = level;
updateLevel();
}
public int getLevelFile() {
return levelFile;
}
private synchronized String format(String module, String s) {
if (dateFormat == null) {
dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss ");
}
return dateFormat.format(System.currentTimeMillis()) + module + ": " + s;
}
@Override
public void write(int level, int moduleId, String s, Throwable t) {
write(level, Trace.MODULE_NAMES[moduleId], s, t);
}
@Override
public void write(int level, String module, String s, Throwable t) {
if (level <= levelSystemOut || level > this.levelMax) {
// level <= levelSystemOut: the system out level is set higher
// level > this.level: the level for this module is set higher
sysOut.println(format(module, s));
if (t != null && levelSystemOut == DEBUG) {
t.printStackTrace(sysOut);
}
}
if (fileName != null) {
if (level <= levelFile) {
writeFile(format(module, s), t);
}
}
}
private synchronized void writeFile(String s, Throwable t) {
try {
if (checkSize++ >= CHECK_SIZE_EACH_WRITES) {
checkSize = 0;
closeWriter();
if (maxFileSize > 0 && FileUtils.size(fileName) > maxFileSize) {
String old = fileName + ".old";
FileUtils.delete(old);
FileUtils.move(fileName, old);
}
}
if (!openWriter()) {
return;
}
printWriter.println(s);
if (t != null) {
if (levelFile == ERROR && t instanceof JdbcSQLException) {
JdbcSQLException se = (JdbcSQLException) t;
int code = se.getErrorCode();
if (ErrorCode.isCommon(code)) {
printWriter.println(t.toString());
} else {
t.printStackTrace(printWriter);
}
} else {
t.printStackTrace(printWriter);
}
}
printWriter.flush();
if (closed) {
closeWriter();
}
} catch (Exception e) {
logWritingError(e);
}
}
private void logWritingError(Exception e) {
if (writingErrorLogged) {
return;
}
writingErrorLogged = true;
Exception se = DbException.get(
ErrorCode.TRACE_FILE_ERROR_2, e, fileName, e.toString());
// print this error only once
fileName = null;
sysOut.println(se);
se.printStackTrace();
}
private boolean openWriter() {
if (printWriter == null) {
try {
FileUtils.createDirectories(FileUtils.getParent(fileName));
if (FileUtils.exists(fileName) && !FileUtils.canWrite(fileName)) {
// read only database: don't log error if the trace file
// can't be opened
return false;
}
fileWriter = IOUtils.getBufferedWriter(
FileUtils.newOutputStream(fileName, true));
printWriter = new PrintWriter(fileWriter, true);
} catch (Exception e) {
logWritingError(e);
return false;
}
}
return true;
}
private synchronized void closeWriter() {
if (printWriter != null) {
printWriter.flush();
printWriter.close();
printWriter = null;
}
if (fileWriter != null) {
try {
fileWriter.close();
} catch (IOException e) {
// ignore
}
fileWriter = null;
}
}
/**
* Close the writers, and the files if required. It is still possible to
* write after closing, however after each write the file is closed again
* (slowing down tracing).
*/
public void close() {
closeWriter();
closed = true;
}
@Override
public void setName(String name) {
// nothing to do (the file name is already set)
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/message/TraceWriter.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.message;
/**
* The backend of the trace system must implement this interface. Two
* implementations are supported: the (default) native trace writer
* implementation that can write to a file and to system out, and an adapter
* that uses SLF4J (Simple Logging Facade for Java).
*/
interface TraceWriter {
/**
* Set the name of the database or trace object.
*
* @param name the new name
*/
void setName(String name);
/**
* Write a message.
*
* @param level the trace level
* @param module the name of the module
* @param s the message
* @param t the exception (may be null)
*/
void write(int level, String module, String s, Throwable t);
/**
* Write a message.
*
* @param level the trace level
* @param moduleId the id of the module
* @param s the message
* @param t the exception (may be null)
*/
void write(int level, int moduleId, String s, Throwable t);
/**
* Check the given trace / log level is enabled.
*
* @param level the level
* @return true if the level is enabled
*/
boolean isEnabled(int level);
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/message/TraceWriterAdapter.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This adapter sends log output to SLF4J. SLF4J supports multiple
* implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), JDK
* 1.4 logging, x4juli, and Simple Log. To use SLF4J, you need to add the
* required jar files to the classpath, and set the trace level to 4 when
* opening a database:
*
* <pre>
* jdbc:h2:˜/test;TRACE_LEVEL_FILE=4
* </pre>
*
* The logger name is 'h2database'.
*/
public class TraceWriterAdapter implements TraceWriter {
private String name;
private final Logger logger = LoggerFactory.getLogger("h2database");
@Override
public void setName(String name) {
this.name = name;
}
@Override
public boolean isEnabled(int level) {
switch (level) {
case TraceSystem.DEBUG:
return logger.isDebugEnabled();
case TraceSystem.INFO:
return logger.isInfoEnabled();
case TraceSystem.ERROR:
return logger.isErrorEnabled();
default:
return false;
}
}
@Override
public void write(int level, int moduleId, String s, Throwable t) {
write(level, Trace.MODULE_NAMES[moduleId], s, t);
}
@Override
public void write(int level, String module, String s, Throwable t) {
if (isEnabled(level)) {
if (name != null) {
s = name + ":" + module + " " + s;
} else {
s = module + " " + s;
}
switch (level) {
case TraceSystem.DEBUG:
logger.debug(s, t);
break;
case TraceSystem.INFO:
logger.info(s, t);
break;
case TraceSystem.ERROR:
logger.error(s, t);
break;
default:
}
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mode/FunctionsMySQL.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: Jason Brittain (jason.brittain at gmail.com)
*/
package org.h2.mode;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
import org.h2.util.StringUtils;
/**
* This class implements some MySQL-specific functions.
*
* @author Jason Brittain
* @author Thomas Mueller
*/
public class FunctionsMySQL {
/**
* The date format of a MySQL formatted date/time.
* Example: 2008-09-25 08:40:59
*/
private static final String DATE_TIME_FORMAT = "yyyy-MM-dd HH:mm:ss";
/**
* Format replacements for MySQL date formats.
* See
* http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_date-format
*/
private static final String[] FORMAT_REPLACE = {
"%a", "EEE",
"%b", "MMM",
"%c", "MM",
"%d", "dd",
"%e", "d",
"%H", "HH",
"%h", "hh",
"%I", "hh",
"%i", "mm",
"%j", "DDD",
"%k", "H",
"%l", "h",
"%M", "MMMM",
"%m", "MM",
"%p", "a",
"%r", "hh:mm:ss a",
"%S", "ss",
"%s", "ss",
"%T", "HH:mm:ss",
"%W", "EEEE",
"%w", "F",
"%Y", "yyyy",
"%y", "yy",
"%%", "%",
};
/**
* Register the functionality in the database.
* Nothing happens if the functions are already registered.
*
* @param conn the connection
*/
public static void register(Connection conn) throws SQLException {
String[] init = {
"UNIX_TIMESTAMP", "unixTimestamp",
"FROM_UNIXTIME", "fromUnixTime",
"DATE", "date",
};
Statement stat = conn.createStatement();
for (int i = 0; i < init.length; i += 2) {
String alias = init[i], method = init[i + 1];
stat.execute(
"CREATE ALIAS IF NOT EXISTS " + alias +
" FOR \"" + FunctionsMySQL.class.getName() + "." + method + "\"");
}
}
/**
* Get the seconds since 1970-01-01 00:00:00 UTC.
* See
* http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_unix-timestamp
*
* @return the current timestamp in seconds (not milliseconds).
*/
public static int unixTimestamp() {
return (int) (System.currentTimeMillis() / 1000L);
}
/**
* Get the seconds since 1970-01-01 00:00:00 UTC of the given timestamp.
* See
* http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_unix-timestamp
*
* @param timestamp the timestamp
* @return the current timestamp in seconds (not milliseconds).
*/
public static int unixTimestamp(java.sql.Timestamp timestamp) {
return (int) (timestamp.getTime() / 1000L);
}
/**
* See
* http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime
*
* @param seconds The current timestamp in seconds.
* @return a formatted date/time String in the format "yyyy-MM-dd HH:mm:ss".
*/
public static String fromUnixTime(int seconds) {
SimpleDateFormat formatter = new SimpleDateFormat(DATE_TIME_FORMAT,
Locale.ENGLISH);
return formatter.format(new Date(seconds * 1000L));
}
/**
* See
* http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_from-unixtime
*
* @param seconds The current timestamp in seconds.
* @param format The format of the date/time String to return.
* @return a formatted date/time String in the given format.
*/
public static String fromUnixTime(int seconds, String format) {
format = convertToSimpleDateFormat(format);
SimpleDateFormat formatter = new SimpleDateFormat(format, Locale.ENGLISH);
return formatter.format(new Date(seconds * 1000L));
}
private static String convertToSimpleDateFormat(String format) {
String[] replace = FORMAT_REPLACE;
for (int i = 0; i < replace.length; i += 2) {
format = StringUtils.replaceAll(format, replace[i], replace[i + 1]);
}
return format;
}
/**
* See
* http://dev.mysql.com/doc/refman/5.1/en/date-and-time-functions.html#function_date
* This function is dependent on the exact formatting of the MySQL date/time
* string.
*
* @param dateTime The date/time String from which to extract just the date
* part.
* @return the date part of the given date/time String argument.
*/
public static String date(String dateTime) {
if (dateTime == null) {
return null;
}
int index = dateTime.indexOf(' ');
if (index != -1) {
return dateTime.substring(0, index);
}
return dateTime;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/Chunk.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
/**
* A chunk of data, containing one or multiple pages.
* <p>
* Chunks are page aligned (each page is usually 4096 bytes).
* There are at most 67 million (2^26) chunks,
* each chunk is at most 2 GB large.
*/
public class Chunk {
/**
* The maximum chunk id.
*/
public static final int MAX_ID = (1 << 26) - 1;
/**
* The maximum length of a chunk header, in bytes.
*/
static final int MAX_HEADER_LENGTH = 1024;
/**
* The length of the chunk footer. The longest footer is:
* chunk:ffffffff,block:ffffffffffffffff,
* version:ffffffffffffffff,fletcher:ffffffff
*/
static final int FOOTER_LENGTH = 128;
/**
* The chunk id.
*/
public final int id;
/**
* The start block number within the file.
*/
public long block;
/**
* The length in number of blocks.
*/
public int len;
/**
* The total number of pages in this chunk.
*/
public int pageCount;
/**
* The number of pages still alive.
*/
public int pageCountLive;
/**
* The sum of the max length of all pages.
*/
public long maxLen;
/**
* The sum of the max length of all pages that are in use.
*/
public long maxLenLive;
/**
* The garbage collection priority. Priority 0 means it needs to be
* collected, a high value means low priority.
*/
public int collectPriority;
/**
* The position of the meta root.
*/
public long metaRootPos;
/**
* The version stored in this chunk.
*/
public long version;
/**
* When this chunk was created, in milliseconds after the store was created.
*/
public long time;
/**
* When this chunk was no longer needed, in milliseconds after the store was
* created. After this, the chunk is kept alive a bit longer (in case it is
* referenced in older versions).
*/
public long unused;
/**
* The last used map id.
*/
public int mapId;
/**
* The predicted position of the next chunk.
*/
public long next;
Chunk(int id) {
this.id = id;
}
/**
* Read the header from the byte buffer.
*
* @param buff the source buffer
* @param start the start of the chunk in the file
* @return the chunk
*/
static Chunk readChunkHeader(ByteBuffer buff, long start) {
int pos = buff.position();
byte[] data = new byte[Math.min(buff.remaining(), MAX_HEADER_LENGTH)];
buff.get(data);
try {
for (int i = 0; i < data.length; i++) {
if (data[i] == '\n') {
// set the position to the start of the first page
buff.position(pos + i + 1);
String s = new String(data, 0, i, StandardCharsets.ISO_8859_1).trim();
return fromString(s);
}
}
} catch (Exception e) {
// there could be various reasons
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupt reading chunk at position {0}", start, e);
}
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupt reading chunk at position {0}", start);
}
/**
* Write the chunk header.
*
* @param buff the target buffer
* @param minLength the minimum length
*/
void writeChunkHeader(WriteBuffer buff, int minLength) {
long pos = buff.position();
buff.put(asString().getBytes(StandardCharsets.ISO_8859_1));
while (buff.position() - pos < minLength - 1) {
buff.put((byte) ' ');
}
if (minLength != 0 && buff.position() > minLength) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_INTERNAL,
"Chunk metadata too long");
}
buff.put((byte) '\n');
}
/**
* Get the metadata key for the given chunk id.
*
* @param chunkId the chunk id
* @return the metadata key
*/
static String getMetaKey(int chunkId) {
return "chunk." + Integer.toHexString(chunkId);
}
/**
* Build a block from the given string.
*
* @param s the string
* @return the block
*/
public static Chunk fromString(String s) {
HashMap<String, String> map = DataUtils.parseMap(s);
int id = DataUtils.readHexInt(map, "chunk", 0);
Chunk c = new Chunk(id);
c.block = DataUtils.readHexLong(map, "block", 0);
c.len = DataUtils.readHexInt(map, "len", 0);
c.pageCount = DataUtils.readHexInt(map, "pages", 0);
c.pageCountLive = DataUtils.readHexInt(map, "livePages", c.pageCount);
c.mapId = DataUtils.readHexInt(map, "map", 0);
c.maxLen = DataUtils.readHexLong(map, "max", 0);
c.maxLenLive = DataUtils.readHexLong(map, "liveMax", c.maxLen);
c.metaRootPos = DataUtils.readHexLong(map, "root", 0);
c.time = DataUtils.readHexLong(map, "time", 0);
c.unused = DataUtils.readHexLong(map, "unused", 0);
c.version = DataUtils.readHexLong(map, "version", id);
c.next = DataUtils.readHexLong(map, "next", 0);
return c;
}
/**
* Calculate the fill rate in %. 0 means empty, 100 means full.
*
* @return the fill rate
*/
public int getFillRate() {
if (maxLenLive <= 0) {
return 0;
} else if (maxLenLive == maxLen) {
return 100;
}
return 1 + (int) (98 * maxLenLive / maxLen);
}
@Override
public int hashCode() {
return id;
}
@Override
public boolean equals(Object o) {
return o instanceof Chunk && ((Chunk) o).id == id;
}
/**
* Get the chunk data as a string.
*
* @return the string
*/
public String asString() {
StringBuilder buff = new StringBuilder(240);
DataUtils.appendMap(buff, "chunk", id);
DataUtils.appendMap(buff, "block", block);
DataUtils.appendMap(buff, "len", len);
if (maxLen != maxLenLive) {
DataUtils.appendMap(buff, "liveMax", maxLenLive);
}
if (pageCount != pageCountLive) {
DataUtils.appendMap(buff, "livePages", pageCountLive);
}
DataUtils.appendMap(buff, "map", mapId);
DataUtils.appendMap(buff, "max", maxLen);
if (next != 0) {
DataUtils.appendMap(buff, "next", next);
}
DataUtils.appendMap(buff, "pages", pageCount);
DataUtils.appendMap(buff, "root", metaRootPos);
DataUtils.appendMap(buff, "time", time);
if (unused != 0) {
DataUtils.appendMap(buff, "unused", unused);
}
DataUtils.appendMap(buff, "version", version);
return buff.toString();
}
byte[] getFooterBytes() {
StringBuilder buff = new StringBuilder(FOOTER_LENGTH);
DataUtils.appendMap(buff, "chunk", id);
DataUtils.appendMap(buff, "block", block);
DataUtils.appendMap(buff, "version", version);
byte[] bytes = buff.toString().getBytes(StandardCharsets.ISO_8859_1);
int checksum = DataUtils.getFletcher32(bytes, 0, bytes.length);
DataUtils.appendMap(buff, "fletcher", checksum);
while (buff.length() < FOOTER_LENGTH - 1) {
buff.append(' ');
}
buff.append('\n');
return buff.toString().getBytes(StandardCharsets.ISO_8859_1);
}
@Override
public String toString() {
return asString();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/ConcurrentArrayList.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.util.Arrays;
import java.util.Iterator;
/**
* A very simple array list that supports concurrent access.
* Internally, it uses immutable objects.
*
* @param <K> the key type
*/
public class ConcurrentArrayList<K> {
/**
* The array.
*/
@SuppressWarnings("unchecked")
K[] array = (K[]) new Object[0];
/**
* Get the first element, or null if none.
*
* @return the first element
*/
public K peekFirst() {
K[] a = array;
return a.length == 0 ? null : a[0];
}
/**
* Get the last element, or null if none.
*
* @return the last element
*/
public K peekLast() {
K[] a = array;
int len = a.length;
return len == 0 ? null : a[len - 1];
}
/**
* Add an element at the end.
*
* @param obj the element
*/
public synchronized void add(K obj) {
if (obj == null) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_INTERNAL, "adding null value to list");
}
int len = array.length;
array = Arrays.copyOf(array, len + 1);
array[len] = obj;
}
/**
* Remove the first element, if it matches.
*
* @param obj the element to remove
* @return true if the element matched and was removed
*/
public synchronized boolean removeFirst(K obj) {
if (peekFirst() != obj) {
return false;
}
int len = array.length;
@SuppressWarnings("unchecked")
K[] a = (K[]) new Object[len - 1];
System.arraycopy(array, 1, a, 0, len - 1);
array = a;
return true;
}
/**
* Remove the last element, if it matches.
*
* @param obj the element to remove
* @return true if the element matched and was removed
*/
public synchronized boolean removeLast(K obj) {
if (peekLast() != obj) {
return false;
}
array = Arrays.copyOf(array, array.length - 1);
return true;
}
/**
* Get an iterator over all entries.
*
* @return the iterator
*/
public Iterator<K> iterator() {
return new Iterator<K>() {
K[] a = array;
int index;
@Override
public boolean hasNext() {
return index < a.length;
}
@Override
public K next() {
return a[index++];
}
@Override
public void remove() {
throw DataUtils.newUnsupportedOperationException("remove");
}
};
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/Cursor.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.util.Iterator;
/**
* A cursor to iterate over elements in ascending order.
*
* @param <K> the key type
* @param <V> the value type
*/
public class Cursor<K, V> implements Iterator<K> {
private final MVMap<K, ?> map;
private final K from;
private CursorPos pos;
private K current, last;
private V currentValue, lastValue;
private Page lastPage;
private final Page root;
private boolean initialized;
Cursor(MVMap<K, ?> map, Page root, K from) {
this.map = map;
this.root = root;
this.from = from;
}
@Override
public boolean hasNext() {
if (!initialized) {
min(root, from);
initialized = true;
fetchNext();
}
return current != null;
}
@Override
public K next() {
hasNext();
K c = current;
last = current;
lastValue = currentValue;
lastPage = pos == null ? null : pos.page;
fetchNext();
return c;
}
/**
* Get the last read key if there was one.
*
* @return the key or null
*/
public K getKey() {
return last;
}
/**
* Get the last read value if there was one.
*
* @return the value or null
*/
public V getValue() {
return lastValue;
}
Page getPage() {
return lastPage;
}
/**
* Skip over that many entries. This method is relatively fast (for this map
* implementation) even if many entries need to be skipped.
*
* @param n the number of entries to skip
*/
public void skip(long n) {
if (!hasNext()) {
return;
}
if (n < 10) {
while (n-- > 0) {
fetchNext();
}
return;
}
long index = map.getKeyIndex(current);
K k = map.getKey(index + n);
pos = null;
min(root, k);
fetchNext();
}
@Override
public void remove() {
throw DataUtils.newUnsupportedOperationException(
"Removing is not supported");
}
/**
* Fetch the next entry that is equal or larger than the given key, starting
* from the given page. This method retains the stack.
*
* @param p the page to start
* @param from the key to search
*/
private void min(Page p, K from) {
while (true) {
if (p.isLeaf()) {
int x = from == null ? 0 : p.binarySearch(from);
if (x < 0) {
x = -x - 1;
}
pos = new CursorPos(p, x, pos);
break;
}
int x = from == null ? -1 : p.binarySearch(from);
if (x < 0) {
x = -x - 1;
} else {
x++;
}
pos = new CursorPos(p, x + 1, pos);
p = p.getChildPage(x);
}
}
/**
* Fetch the next entry if there is one.
*/
@SuppressWarnings("unchecked")
private void fetchNext() {
while (pos != null) {
if (pos.index < pos.page.getKeyCount()) {
int index = pos.index++;
current = (K) pos.page.getKey(index);
currentValue = (V) pos.page.getValue(index);
return;
}
pos = pos.parent;
if (pos == null) {
break;
}
if (pos.index < map.getChildPageCount(pos.page)) {
min(pos.page.getChildPage(pos.index++), null);
}
}
current = null;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/CursorPos.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
/**
* A position in a cursor
*/
public class CursorPos {
/**
* The current page.
*/
public Page page;
/**
* The current index.
*/
public int index;
/**
* The position in the parent page, if any.
*/
public final CursorPos parent;
public CursorPos(Page page, int index, CursorPos parent) {
this.page = page;
this.index = index;
this.parent = parent;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/DataUtils.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.text.MessageFormat;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import org.h2.engine.Constants;
/**
* Utility methods
*/
public final class DataUtils {
/**
* An error occurred while reading from the file.
*/
public static final int ERROR_READING_FAILED = 1;
/**
* An error occurred when trying to write to the file.
*/
public static final int ERROR_WRITING_FAILED = 2;
/**
* An internal error occurred. This could be a bug, or a memory corruption
* (for example caused by out of memory).
*/
public static final int ERROR_INTERNAL = 3;
/**
* The object is already closed.
*/
public static final int ERROR_CLOSED = 4;
/**
* The file format is not supported.
*/
public static final int ERROR_UNSUPPORTED_FORMAT = 5;
/**
* The file is corrupt or (for encrypted files) the encryption key is wrong.
*/
public static final int ERROR_FILE_CORRUPT = 6;
/**
* The file is locked.
*/
public static final int ERROR_FILE_LOCKED = 7;
/**
* An error occurred when serializing or de-serializing.
*/
public static final int ERROR_SERIALIZATION = 8;
/**
* The application was trying to read data from a chunk that is no longer
* available.
*/
public static final int ERROR_CHUNK_NOT_FOUND = 9;
/**
* The block in the stream store was not found.
*/
public static final int ERROR_BLOCK_NOT_FOUND = 50;
/**
* The transaction store is corrupt.
*/
public static final int ERROR_TRANSACTION_CORRUPT = 100;
/**
* An entry is still locked by another transaction.
*/
public static final int ERROR_TRANSACTION_LOCKED = 101;
/**
* There are too many open transactions.
*/
public static final int ERROR_TOO_MANY_OPEN_TRANSACTIONS = 102;
/**
* The transaction store is in an illegal state (for example, not yet
* initialized).
*/
public static final int ERROR_TRANSACTION_ILLEGAL_STATE = 103;
/**
* The type for leaf page.
*/
public static final int PAGE_TYPE_LEAF = 0;
/**
* The type for node page.
*/
public static final int PAGE_TYPE_NODE = 1;
/**
* The bit mask for compressed pages (compression level fast).
*/
public static final int PAGE_COMPRESSED = 2;
/**
* The bit mask for compressed pages (compression level high).
*/
public static final int PAGE_COMPRESSED_HIGH = 2 + 4;
/**
* The maximum length of a variable size int.
*/
public static final int MAX_VAR_INT_LEN = 5;
/**
* The maximum length of a variable size long.
*/
public static final int MAX_VAR_LONG_LEN = 10;
/**
* The maximum integer that needs less space when using variable size
* encoding (only 3 bytes instead of 4).
*/
public static final int COMPRESSED_VAR_INT_MAX = 0x1fffff;
/**
* The maximum long that needs less space when using variable size
* encoding (only 7 bytes instead of 8).
*/
public static final long COMPRESSED_VAR_LONG_MAX = 0x1ffffffffffffL;
/**
* The estimated number of bytes used per page object.
*/
public static final int PAGE_MEMORY = 128;
/**
* The estimated number of bytes used per child entry.
*/
public static final int PAGE_MEMORY_CHILD = 16;
/**
* The marker size of a very large page.
*/
public static final int PAGE_LARGE = 2 * 1024 * 1024;
/**
* Get the length of the variable size int.
*
* @param x the value
* @return the length in bytes
*/
public static int getVarIntLen(int x) {
if ((x & (-1 << 7)) == 0) {
return 1;
} else if ((x & (-1 << 14)) == 0) {
return 2;
} else if ((x & (-1 << 21)) == 0) {
return 3;
} else if ((x & (-1 << 28)) == 0) {
return 4;
}
return 5;
}
/**
* Get the length of the variable size long.
*
* @param x the value
* @return the length in bytes
*/
public static int getVarLongLen(long x) {
int i = 1;
while (true) {
x >>>= 7;
if (x == 0) {
return i;
}
i++;
}
}
/**
* Read a variable size int.
*
* @param buff the source buffer
* @return the value
*/
public static int readVarInt(ByteBuffer buff) {
int b = buff.get();
if (b >= 0) {
return b;
}
// a separate function so that this one can be inlined
return readVarIntRest(buff, b);
}
private static int readVarIntRest(ByteBuffer buff, int b) {
int x = b & 0x7f;
b = buff.get();
if (b >= 0) {
return x | (b << 7);
}
x |= (b & 0x7f) << 7;
b = buff.get();
if (b >= 0) {
return x | (b << 14);
}
x |= (b & 0x7f) << 14;
b = buff.get();
if (b >= 0) {
return x | b << 21;
}
x |= ((b & 0x7f) << 21) | (buff.get() << 28);
return x;
}
/**
* Read a variable size long.
*
* @param buff the source buffer
* @return the value
*/
public static long readVarLong(ByteBuffer buff) {
long x = buff.get();
if (x >= 0) {
return x;
}
x &= 0x7f;
for (int s = 7; s < 64; s += 7) {
long b = buff.get();
x |= (b & 0x7f) << s;
if (b >= 0) {
break;
}
}
return x;
}
/**
* Write a variable size int.
*
* @param out the output stream
* @param x the value
*/
public static void writeVarInt(OutputStream out, int x) throws IOException {
while ((x & ~0x7f) != 0) {
out.write((byte) (0x80 | (x & 0x7f)));
x >>>= 7;
}
out.write((byte) x);
}
/**
* Write a variable size int.
*
* @param buff the source buffer
* @param x the value
*/
public static void writeVarInt(ByteBuffer buff, int x) {
while ((x & ~0x7f) != 0) {
buff.put((byte) (0x80 | (x & 0x7f)));
x >>>= 7;
}
buff.put((byte) x);
}
/**
* Write characters from a string (without the length).
*
* @param buff the target buffer (must be large enough)
* @param s the string
* @param len the number of characters
*/
public static void writeStringData(ByteBuffer buff,
String s, int len) {
for (int i = 0; i < len; i++) {
int c = s.charAt(i);
if (c < 0x80) {
buff.put((byte) c);
} else if (c >= 0x800) {
buff.put((byte) (0xe0 | (c >> 12)));
buff.put((byte) (((c >> 6) & 0x3f)));
buff.put((byte) (c & 0x3f));
} else {
buff.put((byte) (0xc0 | (c >> 6)));
buff.put((byte) (c & 0x3f));
}
}
}
/**
* Read a string.
*
* @param buff the source buffer
* @param len the number of characters
* @return the value
*/
public static String readString(ByteBuffer buff, int len) {
char[] chars = new char[len];
for (int i = 0; i < len; i++) {
int x = buff.get() & 0xff;
if (x < 0x80) {
chars[i] = (char) x;
} else if (x >= 0xe0) {
chars[i] = (char) (((x & 0xf) << 12)
+ ((buff.get() & 0x3f) << 6) + (buff.get() & 0x3f));
} else {
chars[i] = (char) (((x & 0x1f) << 6) + (buff.get() & 0x3f));
}
}
return new String(chars);
}
/**
* Write a variable size long.
*
* @param buff the target buffer
* @param x the value
*/
public static void writeVarLong(ByteBuffer buff, long x) {
while ((x & ~0x7f) != 0) {
buff.put((byte) (0x80 | (x & 0x7f)));
x >>>= 7;
}
buff.put((byte) x);
}
/**
* Write a variable size long.
*
* @param out the output stream
* @param x the value
*/
public static void writeVarLong(OutputStream out, long x)
throws IOException {
while ((x & ~0x7f) != 0) {
out.write((byte) (0x80 | (x & 0x7f)));
x >>>= 7;
}
out.write((byte) x);
}
/**
* Copy the elements of an array, with a gap.
*
* @param src the source array
* @param dst the target array
* @param oldSize the size of the old array
* @param gapIndex the index of the gap
*/
public static void copyWithGap(Object src, Object dst, int oldSize,
int gapIndex) {
if (gapIndex > 0) {
System.arraycopy(src, 0, dst, 0, gapIndex);
}
if (gapIndex < oldSize) {
System.arraycopy(src, gapIndex, dst, gapIndex + 1, oldSize
- gapIndex);
}
}
/**
* Copy the elements of an array, and remove one element.
*
* @param src the source array
* @param dst the target array
* @param oldSize the size of the old array
* @param removeIndex the index of the entry to remove
*/
public static void copyExcept(Object src, Object dst, int oldSize,
int removeIndex) {
if (removeIndex > 0 && oldSize > 0) {
System.arraycopy(src, 0, dst, 0, removeIndex);
}
if (removeIndex < oldSize) {
System.arraycopy(src, removeIndex + 1, dst, removeIndex, oldSize
- removeIndex - 1);
}
}
/**
* Read from a file channel until the buffer is full.
* The buffer is rewind after reading.
*
* @param file the file channel
* @param pos the absolute position within the file
* @param dst the byte buffer
* @throws IllegalStateException if some data could not be read
*/
public static void readFully(FileChannel file, long pos, ByteBuffer dst) {
try {
do {
int len = file.read(dst, pos);
if (len < 0) {
throw new EOFException();
}
pos += len;
} while (dst.remaining() > 0);
dst.rewind();
} catch (IOException e) {
long size;
try {
size = file.size();
} catch (IOException e2) {
size = -1;
}
throw newIllegalStateException(
ERROR_READING_FAILED,
"Reading from {0} failed; file length {1} " +
"read length {2} at {3}",
file, size, dst.remaining(), pos, e);
}
}
/**
* Write to a file channel.
*
* @param file the file channel
* @param pos the absolute position within the file
* @param src the source buffer
*/
public static void writeFully(FileChannel file, long pos, ByteBuffer src) {
try {
int off = 0;
do {
int len = file.write(src, pos + off);
off += len;
} while (src.remaining() > 0);
} catch (IOException e) {
throw newIllegalStateException(
ERROR_WRITING_FAILED,
"Writing to {0} failed; length {1} at {2}",
file, src.remaining(), pos, e);
}
}
/**
* Convert the length to a length code 0..31. 31 means more than 1 MB.
*
* @param len the length
* @return the length code
*/
public static int encodeLength(int len) {
if (len <= 32) {
return 0;
}
int code = Integer.numberOfLeadingZeros(len);
int remaining = len << (code + 1);
code += code;
if ((remaining & (1 << 31)) != 0) {
code--;
}
if ((remaining << 1) != 0) {
code--;
}
code = Math.min(31, 52 - code);
// alternative code (slower):
// int x = len;
// int shift = 0;
// while (x > 3) {
// shift++;
// x = (x >>> 1) + (x & 1);
// }
// shift = Math.max(0, shift - 4);
// int code = (shift << 1) + (x & 1);
// code = Math.min(31, code);
return code;
}
/**
* Get the chunk id from the position.
*
* @param pos the position
* @return the chunk id
*/
public static int getPageChunkId(long pos) {
return (int) (pos >>> 38);
}
/**
* Get the maximum length for the given code.
* For the code 31, PAGE_LARGE is returned.
*
* @param pos the position
* @return the maximum length
*/
public static int getPageMaxLength(long pos) {
int code = (int) ((pos >> 1) & 31);
if (code == 31) {
return PAGE_LARGE;
}
return (2 + (code & 1)) << ((code >> 1) + 4);
}
/**
* Get the offset from the position.
*
* @param pos the position
* @return the offset
*/
public static int getPageOffset(long pos) {
return (int) (pos >> 6);
}
/**
* Get the page type from the position.
*
* @param pos the position
* @return the page type (PAGE_TYPE_NODE or PAGE_TYPE_LEAF)
*/
public static int getPageType(long pos) {
return ((int) pos) & 1;
}
/**
* Get the position of this page. The following information is encoded in
* the position: the chunk id, the offset, the maximum length, and the type
* (node or leaf).
*
* @param chunkId the chunk id
* @param offset the offset
* @param length the length
* @param type the page type (1 for node, 0 for leaf)
* @return the position
*/
public static long getPagePos(int chunkId, int offset,
int length, int type) {
long pos = (long) chunkId << 38;
pos |= (long) offset << 6;
pos |= encodeLength(length) << 1;
pos |= type;
return pos;
}
/**
* Calculate a check value for the given integer. A check value is mean to
* verify the data is consistent with a high probability, but not meant to
* protect against media failure or deliberate changes.
*
* @param x the value
* @return the check value
*/
public static short getCheckValue(int x) {
return (short) ((x >> 16) ^ x);
}
/**
* Append a map to the string builder, sorted by key.
*
* @param buff the target buffer
* @param map the map
* @return the string builder
*/
public static StringBuilder appendMap(StringBuilder buff, HashMap<String, ?> map) {
Object[] keys = map.keySet().toArray();
Arrays.sort(keys);
for (Object k : keys) {
String key = (String) k;
Object value = map.get(key);
if (value instanceof Long) {
appendMap(buff, key, (long) value);
} else if (value instanceof Integer) {
appendMap(buff, key, (int) value);
} else {
appendMap(buff, key, value.toString());
}
}
return buff;
}
private static StringBuilder appendMapKey(StringBuilder buff, String key) {
if (buff.length() > 0) {
buff.append(',');
}
return buff.append(key).append(':');
}
/**
* Append a key-value pair to the string builder. Keys may not contain a
* colon. Values that contain a comma or a double quote are enclosed in
* double quotes, with special characters escaped using a backslash.
*
* @param buff the target buffer
* @param key the key
* @param value the value
*/
public static void appendMap(StringBuilder buff, String key, String value) {
appendMapKey(buff, key);
if (value.indexOf(',') < 0 && value.indexOf('\"') < 0) {
buff.append(value);
} else {
buff.append('\"');
for (int i = 0, size = value.length(); i < size; i++) {
char c = value.charAt(i);
if (c == '\"') {
buff.append('\\');
}
buff.append(c);
}
buff.append('\"');
}
}
/**
* Append a key-value pair to the string builder. Keys may not contain a
* colon.
*
* @param buff the target buffer
* @param key the key
* @param value the value
*/
public static void appendMap(StringBuilder buff, String key, long value) {
appendMapKey(buff, key).append(Long.toHexString(value));
}
/**
* Append a key-value pair to the string builder. Keys may not contain a
* colon.
*
* @param buff the target buffer
* @param key the key
* @param value the value
*/
public static void appendMap(StringBuilder buff, String key, int value) {
appendMapKey(buff, key).append(Integer.toHexString(value));
}
/**
* @param buff output buffer, should be empty
* @param s parsed string
* @param i offset to parse from
* @param size stop offset (exclusive)
* @return new offset
*/
private static int parseMapValue(StringBuilder buff, String s, int i, int size) {
while (i < size) {
char c = s.charAt(i++);
if (c == ',') {
break;
} else if (c == '\"') {
while (i < size) {
c = s.charAt(i++);
if (c == '\\') {
if (i == size) {
throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s);
}
c = s.charAt(i++);
} else if (c == '\"') {
break;
}
buff.append(c);
}
} else {
buff.append(c);
}
}
return i;
}
/**
* Parse a key-value pair list.
*
* @param s the list
* @return the map
* @throws IllegalStateException if parsing failed
*/
public static HashMap<String, String> parseMap(String s) {
HashMap<String, String> map = new HashMap<>();
StringBuilder buff = new StringBuilder();
for (int i = 0, size = s.length(); i < size;) {
int startKey = i;
i = s.indexOf(':', i);
if (i < 0) {
throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s);
}
String key = s.substring(startKey, i++);
i = parseMapValue(buff, s, i, size);
map.put(key, buff.toString());
buff.setLength(0);
}
return map;
}
/**
* Parse a key-value pair list and checks its checksum.
*
* @param bytes encoded map
* @return the map without mapping for {@code "fletcher"}, or {@code null} if checksum is wrong
* @throws IllegalStateException if parsing failed
*/
public static HashMap<String, String> parseChecksummedMap(byte[] bytes) {
int start = 0, end = bytes.length;
while (start < end && bytes[start] <= ' ') {
start++;
}
while (start < end && bytes[end - 1] <= ' ') {
end--;
}
String s = new String(bytes, start, end - start, StandardCharsets.ISO_8859_1);
HashMap<String, String> map = new HashMap<>();
StringBuilder buff = new StringBuilder();
for (int i = 0, size = s.length(); i < size;) {
int startKey = i;
i = s.indexOf(':', i);
if (i < 0) {
throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s);
}
if (i - startKey == 8 && s.regionMatches(startKey, "fletcher", 0, 8)) {
parseMapValue(buff, s, i + 1, size);
int check = (int) Long.parseLong(buff.toString(), 16);
if (check == getFletcher32(bytes, start, startKey - 1)) {
return map;
}
// Corrupted map
return null;
}
String key = s.substring(startKey, i++);
i = parseMapValue(buff, s, i, size);
map.put(key, buff.toString());
buff.setLength(0);
}
// Corrupted map
return null;
}
/**
* Parse a name from key-value pair list.
*
* @param s the list
* @return value of name item, or {@code null}
* @throws IllegalStateException if parsing failed
*/
public static String getMapName(String s) {
return getFromMap(s, "name");
}
/**
* Parse a specified pair from key-value pair list.
*
* @param s the list
* @param key the name of the key
* @return value of the specified item, or {@code null}
* @throws IllegalStateException if parsing failed
*/
public static String getFromMap(String s, String key) {
int keyLength = key.length();
for (int i = 0, size = s.length(); i < size;) {
int startKey = i;
i = s.indexOf(':', i);
if (i < 0) {
throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s);
}
if (i++ - startKey == keyLength && s.regionMatches(startKey, key, 0, keyLength)) {
StringBuilder buff = new StringBuilder();
parseMapValue(buff, s, i, size);
return buff.toString();
} else {
while (i < size) {
char c = s.charAt(i++);
if (c == ',') {
break;
} else if (c == '\"') {
while (i < size) {
c = s.charAt(i++);
if (c == '\\') {
if (i++ == size) {
throw newIllegalStateException(ERROR_FILE_CORRUPT, "Not a map: {0}", s);
}
} else if (c == '\"') {
break;
}
}
}
}
}
}
return null;
}
/**
* Calculate the Fletcher32 checksum.
*
* @param bytes the bytes
* @param offset initial offset
* @param length the message length (if odd, 0 is appended)
* @return the checksum
*/
public static int getFletcher32(byte[] bytes, int offset, int length) {
int s1 = 0xffff, s2 = 0xffff;
int i = offset, len = offset + (length & ~1);
while (i < len) {
// reduce after 360 words (each word is two bytes)
for (int end = Math.min(i + 720, len); i < end;) {
int x = ((bytes[i++] & 0xff) << 8) | (bytes[i++] & 0xff);
s2 += s1 += x;
}
s1 = (s1 & 0xffff) + (s1 >>> 16);
s2 = (s2 & 0xffff) + (s2 >>> 16);
}
if ((length & 1) != 0) {
// odd length: append 0
int x = (bytes[i] & 0xff) << 8;
s2 += s1 += x;
}
s1 = (s1 & 0xffff) + (s1 >>> 16);
s2 = (s2 & 0xffff) + (s2 >>> 16);
return (s2 << 16) | s1;
}
/**
* Throw an IllegalArgumentException if the argument is invalid.
*
* @param test true if the argument is valid
* @param message the message
* @param arguments the arguments
* @throws IllegalArgumentException if the argument is invalid
*/
public static void checkArgument(boolean test, String message,
Object... arguments) {
if (!test) {
throw newIllegalArgumentException(message, arguments);
}
}
/**
* Create a new IllegalArgumentException.
*
* @param message the message
* @param arguments the arguments
* @return the exception
*/
public static IllegalArgumentException newIllegalArgumentException(
String message, Object... arguments) {
return initCause(new IllegalArgumentException(
formatMessage(0, message, arguments)),
arguments);
}
/**
* Create a new UnsupportedOperationException.
*
* @param message the message
* @return the exception
*/
public static UnsupportedOperationException
newUnsupportedOperationException(String message) {
return new UnsupportedOperationException(formatMessage(0, message));
}
/**
* Create a new IllegalStateException.
*
* @param errorCode the error code
* @param message the message
* @param arguments the arguments
* @return the exception
*/
public static IllegalStateException newIllegalStateException(
int errorCode, String message, Object... arguments) {
return initCause(new IllegalStateException(
formatMessage(errorCode, message, arguments)),
arguments);
}
private static <T extends Exception> T initCause(T e, Object... arguments) {
int size = arguments.length;
if (size > 0) {
Object o = arguments[size - 1];
if (o instanceof Throwable) {
e.initCause((Throwable) o);
}
}
return e;
}
/**
* Format an error message.
*
* @param errorCode the error code
* @param message the message
* @param arguments the arguments
* @return the formatted message
*/
public static String formatMessage(int errorCode, String message,
Object... arguments) {
// convert arguments to strings, to avoid locale specific formatting
arguments = arguments.clone();
for (int i = 0; i < arguments.length; i++) {
Object a = arguments[i];
if (!(a instanceof Exception)) {
String s = a == null ? "null" : a.toString();
if (s.length() > 1000) {
s = s.substring(0, 1000) + "...";
}
arguments[i] = s;
}
}
return MessageFormat.format(message, arguments) +
" [" + Constants.VERSION_MAJOR + "." +
Constants.VERSION_MINOR + "." + Constants.BUILD_ID +
"/" + errorCode + "]";
}
/**
* Get the error code from an exception message.
*
* @param m the message
* @return the error code, or 0 if none
*/
public static int getErrorCode(String m) {
if (m != null && m.endsWith("]")) {
int dash = m.lastIndexOf('/');
if (dash >= 0) {
String s = m.substring(dash + 1, m.length() - 1);
try {
return Integer.parseInt(s);
} catch (NumberFormatException e) {
// no error code
}
}
}
return 0;
}
/**
* Read a hex long value from a map.
*
* @param map the map
* @param key the key
* @param defaultValue if the value is null
* @return the parsed value
* @throws IllegalStateException if parsing fails
*/
public static long readHexLong(Map<String, ?> map, String key, long defaultValue) {
Object v = map.get(key);
if (v == null) {
return defaultValue;
} else if (v instanceof Long) {
return (Long) v;
}
try {
return parseHexLong((String) v);
} catch (NumberFormatException e) {
throw newIllegalStateException(ERROR_FILE_CORRUPT,
"Error parsing the value {0}", v, e);
}
}
/**
* Parse an unsigned, hex long.
*
* @param x the string
* @return the parsed value
* @throws IllegalStateException if parsing fails
*/
public static long parseHexLong(String x) {
try {
if (x.length() == 16) {
// avoid problems with overflow
// in Java 8, this special case is not needed
return (Long.parseLong(x.substring(0, 8), 16) << 32) |
Long.parseLong(x.substring(8, 16), 16);
}
return Long.parseLong(x, 16);
} catch (NumberFormatException e) {
throw newIllegalStateException(ERROR_FILE_CORRUPT,
"Error parsing the value {0}", x, e);
}
}
/**
* Parse an unsigned, hex long.
*
* @param x the string
* @return the parsed value
* @throws IllegalStateException if parsing fails
*/
public static int parseHexInt(String x) {
try {
// avoid problems with overflow
// in Java 8, we can use Integer.parseLong(x, 16);
return (int) Long.parseLong(x, 16);
} catch (NumberFormatException e) {
throw newIllegalStateException(ERROR_FILE_CORRUPT,
"Error parsing the value {0}", x, e);
}
}
/**
* Read a hex int value from a map.
*
* @param map the map
* @param key the key
* @param defaultValue if the value is null
* @return the parsed value
* @throws IllegalStateException if parsing fails
*/
public static int readHexInt(HashMap<String, ?> map, String key, int defaultValue) {
Object v = map.get(key);
if (v == null) {
return defaultValue;
} else if (v instanceof Integer) {
return (Integer) v;
}
try {
// support unsigned hex value
return (int) Long.parseLong((String) v, 16);
} catch (NumberFormatException e) {
throw newIllegalStateException(ERROR_FILE_CORRUPT,
"Error parsing the value {0}", v, e);
}
}
/**
* An entry of a map.
*
* @param <K> the key type
* @param <V> the value type
*/
public static final class MapEntry<K, V> implements Map.Entry<K, V> {
private final K key;
private final V value;
public MapEntry(K key, V value) {
this.key = key;
this.value = value;
}
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return value;
}
@Override
public V setValue(V value) {
throw newUnsupportedOperationException("Updating the value is not supported");
}
}
/**
* Get the configuration parameter value, or default.
*
* @param config the configuration
* @param key the key
* @param defaultValue the default
* @return the configured value or default
*/
public static int getConfigParam(Map<String, ?> config, String key, int defaultValue) {
Object o = config.get(key);
if (o instanceof Number) {
return ((Number) o).intValue();
} else if (o != null) {
try {
return Integer.decode(o.toString());
} catch (NumberFormatException e) {
// ignore
}
}
return defaultValue;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/FileStore.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.util.concurrent.atomic.AtomicLong;
import org.h2.mvstore.cache.FilePathCache;
import org.h2.store.fs.FilePath;
import org.h2.store.fs.FilePathDisk;
import org.h2.store.fs.FilePathEncrypt;
import org.h2.store.fs.FilePathNio;
/**
* The default storage mechanism of the MVStore. This implementation persists
* data to a file. The file store is responsible to persist data and for free
* space management.
*/
public class FileStore {
/**
* The number of read operations.
*/
protected final AtomicLong readCount = new AtomicLong(0);
/**
* The number of read bytes.
*/
protected final AtomicLong readBytes = new AtomicLong(0);
/**
* The number of write operations.
*/
protected final AtomicLong writeCount = new AtomicLong(0);
/**
* The number of written bytes.
*/
protected final AtomicLong writeBytes = new AtomicLong(0);
/**
* The free spaces between the chunks. The first block to use is block 2
* (the first two blocks are the store header).
*/
protected final FreeSpaceBitSet freeSpace =
new FreeSpaceBitSet(2, MVStore.BLOCK_SIZE);
/**
* The file name.
*/
protected String fileName;
/**
* Whether this store is read-only.
*/
protected boolean readOnly;
/**
* The file size (cached).
*/
protected long fileSize;
/**
* The file.
*/
protected FileChannel file;
/**
* The encrypted file (if encryption is used).
*/
protected FileChannel encryptedFile;
/**
* The file lock.
*/
protected FileLock fileLock;
@Override
public String toString() {
return fileName;
}
/**
* Read from the file.
*
* @param pos the write position
* @param len the number of bytes to read
* @return the byte buffer
*/
public ByteBuffer readFully(long pos, int len) {
ByteBuffer dst = ByteBuffer.allocate(len);
DataUtils.readFully(file, pos, dst);
readCount.incrementAndGet();
readBytes.addAndGet(len);
return dst;
}
/**
* Write to the file.
*
* @param pos the write position
* @param src the source buffer
*/
public void writeFully(long pos, ByteBuffer src) {
int len = src.remaining();
fileSize = Math.max(fileSize, pos + len);
DataUtils.writeFully(file, pos, src);
writeCount.incrementAndGet();
writeBytes.addAndGet(len);
}
/**
* Try to open the file.
*
* @param fileName the file name
* @param readOnly whether the file should only be opened in read-only mode,
* even if the file is writable
* @param encryptionKey the encryption key, or null if encryption is not
* used
*/
public void open(String fileName, boolean readOnly, char[] encryptionKey) {
if (file != null) {
return;
}
if (fileName != null) {
// ensure the Cache file system is registered
FilePathCache.INSTANCE.getScheme();
FilePath p = FilePath.get(fileName);
// if no explicit scheme was specified, NIO is used
if (p instanceof FilePathDisk &&
!fileName.startsWith(p.getScheme() + ":")) {
// ensure the NIO file system is registered
FilePathNio.class.getName();
fileName = "nio:" + fileName;
}
}
this.fileName = fileName;
FilePath f = FilePath.get(fileName);
FilePath parent = f.getParent();
if (parent != null && !parent.exists()) {
throw DataUtils.newIllegalArgumentException(
"Directory does not exist: {0}", parent);
}
if (f.exists() && !f.canWrite()) {
readOnly = true;
}
this.readOnly = readOnly;
try {
file = f.open(readOnly ? "r" : "rw");
if (encryptionKey != null) {
byte[] key = FilePathEncrypt.getPasswordBytes(encryptionKey);
encryptedFile = file;
file = new FilePathEncrypt.FileEncrypt(fileName, key, file);
}
try {
if (readOnly) {
fileLock = file.tryLock(0, Long.MAX_VALUE, true);
} else {
fileLock = file.tryLock();
}
} catch (OverlappingFileLockException e) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_LOCKED,
"The file is locked: {0}", fileName, e);
}
if (fileLock == null) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_LOCKED,
"The file is locked: {0}", fileName);
}
fileSize = file.size();
} catch (IOException e) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_READING_FAILED,
"Could not open file {0}", fileName, e);
}
}
/**
* Close this store.
*/
public void close() {
try {
if (fileLock != null) {
fileLock.release();
fileLock = null;
}
file.close();
freeSpace.clear();
} catch (Exception e) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_WRITING_FAILED,
"Closing failed for file {0}", fileName, e);
} finally {
file = null;
}
}
/**
* Flush all changes.
*/
public void sync() {
try {
file.force(true);
} catch (IOException e) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_WRITING_FAILED,
"Could not sync file {0}", fileName, e);
}
}
/**
* Get the file size.
*
* @return the file size
*/
public long size() {
return fileSize;
}
/**
* Truncate the file.
*
* @param size the new file size
*/
public void truncate(long size) {
try {
writeCount.incrementAndGet();
file.truncate(size);
fileSize = Math.min(fileSize, size);
} catch (IOException e) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_WRITING_FAILED,
"Could not truncate file {0} to size {1}",
fileName, size, e);
}
}
/**
* Get the file instance in use.
* <p>
* The application may read from the file (for example for online backup),
* but not write to it or truncate it.
*
* @return the file
*/
public FileChannel getFile() {
return file;
}
/**
* Get the encrypted file instance, if encryption is used.
* <p>
* The application may read from the file (for example for online backup),
* but not write to it or truncate it.
*
* @return the encrypted file, or null if encryption is not used
*/
public FileChannel getEncryptedFile() {
return encryptedFile;
}
/**
* Get the number of write operations since this store was opened.
* For file based stores, this is the number of file write operations.
*
* @return the number of write operations
*/
public long getWriteCount() {
return writeCount.get();
}
/**
* Get the number of written bytes since this store was opened.
*
* @return the number of write operations
*/
public long getWriteBytes() {
return writeBytes.get();
}
/**
* Get the number of read operations since this store was opened.
* For file based stores, this is the number of file read operations.
*
* @return the number of read operations
*/
public long getReadCount() {
return readCount.get();
}
/**
* Get the number of read bytes since this store was opened.
*
* @return the number of write operations
*/
public long getReadBytes() {
return readBytes.get();
}
public boolean isReadOnly() {
return readOnly;
}
/**
* Get the default retention time for this store in milliseconds.
*
* @return the retention time
*/
public int getDefaultRetentionTime() {
return 45_000;
}
/**
* Mark the space as in use.
*
* @param pos the position in bytes
* @param length the number of bytes
*/
public void markUsed(long pos, int length) {
freeSpace.markUsed(pos, length);
}
/**
* Allocate a number of blocks and mark them as used.
*
* @param length the number of bytes to allocate
* @return the start position in bytes
*/
public long allocate(int length) {
return freeSpace.allocate(length);
}
/**
* Mark the space as free.
*
* @param pos the position in bytes
* @param length the number of bytes
*/
public void free(long pos, int length) {
freeSpace.free(pos, length);
}
public int getFillRate() {
return freeSpace.getFillRate();
}
long getFirstFree() {
return freeSpace.getFirstFree();
}
long getFileLengthInUse() {
return freeSpace.getLastFree();
}
/**
* Mark the file as empty.
*/
public void clear() {
freeSpace.clear();
}
/**
* Get the file name.
*
* @return the file name
*/
public String getFileName() {
return fileName;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/FreeSpaceBitSet.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.util.BitSet;
import org.h2.util.MathUtils;
/**
* A free space bit set.
*/
public class FreeSpaceBitSet {
private static final boolean DETAILED_INFO = false;
/**
* The first usable block.
*/
private final int firstFreeBlock;
/**
* The block size in bytes.
*/
private final int blockSize;
/**
* The bit set.
*/
private final BitSet set = new BitSet();
/**
* Create a new free space map.
*
* @param firstFreeBlock the first free block
* @param blockSize the block size
*/
public FreeSpaceBitSet(int firstFreeBlock, int blockSize) {
this.firstFreeBlock = firstFreeBlock;
this.blockSize = blockSize;
clear();
}
/**
* Reset the list.
*/
public void clear() {
set.clear();
set.set(0, firstFreeBlock);
}
/**
* Check whether one of the blocks is in use.
*
* @param pos the position in bytes
* @param length the number of bytes
* @return true if a block is in use
*/
public boolean isUsed(long pos, int length) {
int start = getBlock(pos);
int blocks = getBlockCount(length);
for (int i = start; i < start + blocks; i++) {
if (!set.get(i)) {
return false;
}
}
return true;
}
/**
* Check whether one of the blocks is free.
*
* @param pos the position in bytes
* @param length the number of bytes
* @return true if a block is free
*/
public boolean isFree(long pos, int length) {
int start = getBlock(pos);
int blocks = getBlockCount(length);
for (int i = start; i < start + blocks; i++) {
if (set.get(i)) {
return false;
}
}
return true;
}
/**
* Allocate a number of blocks and mark them as used.
*
* @param length the number of bytes to allocate
* @return the start position in bytes
*/
public long allocate(int length) {
int blocks = getBlockCount(length);
for (int i = 0;;) {
int start = set.nextClearBit(i);
int end = set.nextSetBit(start + 1);
if (end < 0 || end - start >= blocks) {
set.set(start, start + blocks);
return getPos(start);
}
i = end;
}
}
/**
* Mark the space as in use.
*
* @param pos the position in bytes
* @param length the number of bytes
*/
public void markUsed(long pos, int length) {
int start = getBlock(pos);
int blocks = getBlockCount(length);
set.set(start, start + blocks);
}
/**
* Mark the space as free.
*
* @param pos the position in bytes
* @param length the number of bytes
*/
public void free(long pos, int length) {
int start = getBlock(pos);
int blocks = getBlockCount(length);
set.clear(start, start + blocks);
}
private long getPos(int block) {
return (long) block * (long) blockSize;
}
private int getBlock(long pos) {
return (int) (pos / blockSize);
}
private int getBlockCount(int length) {
return MathUtils.roundUpInt(length, blockSize) / blockSize;
}
/**
* Get the fill rate of the space in percent. The value 0 means the space is
* completely free, and 100 means it is completely full.
*
* @return the fill rate (0 - 100)
*/
public int getFillRate() {
int total = set.length(), count = 0;
for (int i = 0; i < total; i++) {
if (set.get(i)) {
count++;
}
}
if (count == 0) {
return 0;
}
return Math.max(1, (int) (100L * count / total));
}
/**
* Get the position of the first free space.
*
* @return the position.
*/
public long getFirstFree() {
return getPos(set.nextClearBit(0));
}
/**
* Get the position of the last (infinite) free space.
*
* @return the position.
*/
public long getLastFree() {
return getPos(set.previousSetBit(set.size()-1) + 1);
}
@Override
public String toString() {
StringBuilder buff = new StringBuilder();
if (DETAILED_INFO) {
int onCount = 0, offCount = 0;
int on = 0;
for (int i = 0; i < set.length(); i++) {
if (set.get(i)) {
onCount++;
on++;
} else {
offCount++;
}
if ((i & 1023) == 1023) {
buff.append(String.format("%3x", on)).append(' ');
on = 0;
}
}
buff.append('\n')
.append(" on ").append(onCount).append(" off ").append(offCount)
.append(' ').append(100 * onCount / (onCount+offCount)).append("% used ");
}
buff.append('[');
for (int i = 0;;) {
if (i > 0) {
buff.append(", ");
}
int start = set.nextClearBit(i);
buff.append(Integer.toHexString(start)).append('-');
int end = set.nextSetBit(start + 1);
if (end < 0) {
break;
}
buff.append(Integer.toHexString(end - 1));
i = end + 1;
}
buff.append(']');
return buff.toString();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/MVMap.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.util.AbstractList;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import org.h2.mvstore.type.DataType;
import org.h2.mvstore.type.ObjectDataType;
/**
* A stored map.
* <p>
* Read operations can happen concurrently with all other
* operations, without risk of corruption.
* <p>
* Write operations first read the relevant area from disk to memory
* concurrently, and only then modify the data. The in-memory part of write
* operations is synchronized. For scalable concurrent in-memory write
* operations, the map should be split into multiple smaller sub-maps that are
* then synchronized independently.
*
* @param <K> the key class
* @param <V> the value class
*/
public class MVMap<K, V> extends AbstractMap<K, V>
implements ConcurrentMap<K, V> {
/**
* The store.
*/
protected MVStore store;
/**
* The current root page (may not be null).
*/
protected volatile Page root;
/**
* The version used for writing.
*/
protected volatile long writeVersion;
private int id;
private long createVersion;
private final DataType keyType;
private final DataType valueType;
private final ConcurrentArrayList<Page> oldRoots =
new ConcurrentArrayList<>();
/**
* Whether the map is closed. Volatile so we don't accidentally write to a
* closed map in multithreaded mode.
*/
private volatile boolean closed;
private boolean readOnly;
private boolean isVolatile;
protected MVMap(DataType keyType, DataType valueType) {
this.keyType = keyType;
this.valueType = valueType;
}
/**
* Get the metadata key for the root of the given map id.
*
* @param mapId the map id
* @return the metadata key
*/
static String getMapRootKey(int mapId) {
return "root." + Integer.toHexString(mapId);
}
/**
* Get the metadata key for the given map id.
*
* @param mapId the map id
* @return the metadata key
*/
static String getMapKey(int mapId) {
return "map." + Integer.toHexString(mapId);
}
/**
* Open this map with the given store and configuration.
*
* @param store the store
* @param id map id
* @param createVersion version in which this map was created
*/
protected void init(MVStore store, int id, long createVersion) {
this.store = store;
this.id = id;
this.createVersion = createVersion;
this.writeVersion = store.getCurrentVersion();
this.root = Page.createEmpty(this, -1);
}
/**
* Add or replace a key-value pair.
*
* @param key the key (may not be null)
* @param value the value (may not be null)
* @return the old value if the key existed, or null otherwise
*/
@Override
@SuppressWarnings("unchecked")
public synchronized V put(K key, V value) {
DataUtils.checkArgument(value != null, "The value may not be null");
beforeWrite();
long v = writeVersion;
Page p = root.copy(v);
p = splitRootIfNeeded(p, v);
Object result = put(p, v, key, value);
newRoot(p);
return (V) result;
}
/**
* Split the root page if necessary.
*
* @param p the page
* @param writeVersion the write version
* @return the new sibling
*/
protected Page splitRootIfNeeded(Page p, long writeVersion) {
if (p.getMemory() <= store.getPageSplitSize() || p.getKeyCount() <= 1) {
return p;
}
int at = p.getKeyCount() / 2;
long totalCount = p.getTotalCount();
Object k = p.getKey(at);
Page split = p.split(at);
Object[] keys = { k };
Page.PageReference[] children = {
new Page.PageReference(p, p.getPos(), p.getTotalCount()),
new Page.PageReference(split, split.getPos(), split.getTotalCount()),
};
p = Page.create(this, writeVersion,
keys, null,
children,
totalCount, 0);
return p;
}
/**
* Add or update a key-value pair.
*
* @param p the page
* @param writeVersion the write version
* @param key the key (may not be null)
* @param value the value (may not be null)
* @return the old value, or null
*/
protected Object put(Page p, long writeVersion, Object key, Object value) {
int index = p.binarySearch(key);
if (p.isLeaf()) {
if (index < 0) {
index = -index - 1;
p.insertLeaf(index, key, value);
return null;
}
return p.setValue(index, value);
}
// p is a node
if (index < 0) {
index = -index - 1;
} else {
index++;
}
Page c = p.getChildPage(index).copy(writeVersion);
if (c.getMemory() > store.getPageSplitSize() && c.getKeyCount() > 1) {
// split on the way down
int at = c.getKeyCount() / 2;
Object k = c.getKey(at);
Page split = c.split(at);
p.setChild(index, split);
p.insertNode(index, k, c);
// now we are not sure where to add
return put(p, writeVersion, key, value);
}
Object result = put(c, writeVersion, key, value);
p.setChild(index, c);
return result;
}
/**
* Get the first key, or null if the map is empty.
*
* @return the first key, or null
*/
public K firstKey() {
return getFirstLast(true);
}
/**
* Get the last key, or null if the map is empty.
*
* @return the last key, or null
*/
public K lastKey() {
return getFirstLast(false);
}
/**
* Get the key at the given index.
* <p>
* This is a O(log(size)) operation.
*
* @param index the index
* @return the key
*/
@SuppressWarnings("unchecked")
public K getKey(long index) {
if (index < 0 || index >= size()) {
return null;
}
Page p = root;
long offset = 0;
while (true) {
if (p.isLeaf()) {
if (index >= offset + p.getKeyCount()) {
return null;
}
return (K) p.getKey((int) (index - offset));
}
int i = 0, size = getChildPageCount(p);
for (; i < size; i++) {
long c = p.getCounts(i);
if (index < c + offset) {
break;
}
offset += c;
}
if (i == size) {
return null;
}
p = p.getChildPage(i);
}
}
/**
* Get the key list. The list is a read-only representation of all keys.
* <p>
* The get and indexOf methods are O(log(size)) operations. The result of
* indexOf is cast to an int.
*
* @return the key list
*/
public List<K> keyList() {
return new AbstractList<K>() {
@Override
public K get(int index) {
return getKey(index);
}
@Override
public int size() {
return MVMap.this.size();
}
@Override
@SuppressWarnings("unchecked")
public int indexOf(Object key) {
return (int) getKeyIndex((K) key);
}
};
}
/**
* Get the index of the given key in the map.
* <p>
* This is a O(log(size)) operation.
* <p>
* If the key was found, the returned value is the index in the key array.
* If not found, the returned value is negative, where -1 means the provided
* key is smaller than any keys. See also Arrays.binarySearch.
*
* @param key the key
* @return the index
*/
public long getKeyIndex(K key) {
if (size() == 0) {
return -1;
}
Page p = root;
long offset = 0;
while (true) {
int x = p.binarySearch(key);
if (p.isLeaf()) {
if (x < 0) {
return -offset + x;
}
return offset + x;
}
if (x < 0) {
x = -x - 1;
} else {
x++;
}
for (int i = 0; i < x; i++) {
offset += p.getCounts(i);
}
p = p.getChildPage(x);
}
}
/**
* Get the first (lowest) or last (largest) key.
*
* @param first whether to retrieve the first key
* @return the key, or null if the map is empty
*/
@SuppressWarnings("unchecked")
protected K getFirstLast(boolean first) {
if (size() == 0) {
return null;
}
Page p = root;
while (true) {
if (p.isLeaf()) {
return (K) p.getKey(first ? 0 : p.getKeyCount() - 1);
}
p = p.getChildPage(first ? 0 : getChildPageCount(p) - 1);
}
}
/**
* Get the smallest key that is larger than the given key, or null if no
* such key exists.
*
* @param key the key
* @return the result
*/
public K higherKey(K key) {
return getMinMax(key, false, true);
}
/**
* Get the smallest key that is larger or equal to this key.
*
* @param key the key
* @return the result
*/
public K ceilingKey(K key) {
return getMinMax(key, false, false);
}
/**
* Get the largest key that is smaller or equal to this key.
*
* @param key the key
* @return the result
*/
public K floorKey(K key) {
return getMinMax(key, true, false);
}
/**
* Get the largest key that is smaller than the given key, or null if no
* such key exists.
*
* @param key the key
* @return the result
*/
public K lowerKey(K key) {
return getMinMax(key, true, true);
}
/**
* Get the smallest or largest key using the given bounds.
*
* @param key the key
* @param min whether to retrieve the smallest key
* @param excluding if the given upper/lower bound is exclusive
* @return the key, or null if no such key exists
*/
protected K getMinMax(K key, boolean min, boolean excluding) {
return getMinMax(root, key, min, excluding);
}
@SuppressWarnings("unchecked")
private K getMinMax(Page p, K key, boolean min, boolean excluding) {
if (p.isLeaf()) {
int x = p.binarySearch(key);
if (x < 0) {
x = -x - (min ? 2 : 1);
} else if (excluding) {
x += min ? -1 : 1;
}
if (x < 0 || x >= p.getKeyCount()) {
return null;
}
return (K) p.getKey(x);
}
int x = p.binarySearch(key);
if (x < 0) {
x = -x - 1;
} else {
x++;
}
while (true) {
if (x < 0 || x >= getChildPageCount(p)) {
return null;
}
K k = getMinMax(p.getChildPage(x), key, min, excluding);
if (k != null) {
return k;
}
x += min ? -1 : 1;
}
}
/**
* Get a value.
*
* @param key the key
* @return the value, or null if not found
*/
@Override
@SuppressWarnings("unchecked")
public V get(Object key) {
return (V) binarySearch(root, key);
}
/**
* Get the value for the given key, or null if not found.
*
* @param p the page
* @param key the key
* @return the value or null
*/
protected Object binarySearch(Page p, Object key) {
int x = p.binarySearch(key);
if (!p.isLeaf()) {
if (x < 0) {
x = -x - 1;
} else {
x++;
}
p = p.getChildPage(x);
return binarySearch(p, key);
}
if (x >= 0) {
return p.getValue(x);
}
return null;
}
@Override
public boolean containsKey(Object key) {
return get(key) != null;
}
/**
* Remove all entries.
*/
@Override
public synchronized void clear() {
beforeWrite();
root.removeAllRecursive();
newRoot(Page.createEmpty(this, writeVersion));
}
/**
* Close the map. Accessing the data is still possible (to allow concurrent
* reads), but it is marked as closed.
*/
void close() {
closed = true;
}
public boolean isClosed() {
return closed;
}
/**
* Remove a key-value pair, if the key exists.
*
* @param key the key (may not be null)
* @return the old value if the key existed, or null otherwise
*/
@Override
@SuppressWarnings("unchecked")
public V remove(Object key) {
beforeWrite();
V result = get(key);
if (result == null) {
return null;
}
long v = writeVersion;
synchronized (this) {
Page p = root.copy(v);
result = (V) remove(p, v, key);
if (!p.isLeaf() && p.getTotalCount() == 0) {
p.removePage();
p = Page.createEmpty(this, p.getVersion());
}
newRoot(p);
}
return result;
}
/**
* Add a key-value pair if it does not yet exist.
*
* @param key the key (may not be null)
* @param value the new value
* @return the old value if the key existed, or null otherwise
*/
@Override
public synchronized V putIfAbsent(K key, V value) {
V old = get(key);
if (old == null) {
put(key, value);
}
return old;
}
/**
* Remove a key-value pair if the value matches the stored one.
*
* @param key the key (may not be null)
* @param value the expected value
* @return true if the item was removed
*/
@Override
public synchronized boolean remove(Object key, Object value) {
V old = get(key);
if (areValuesEqual(old, value)) {
remove(key);
return true;
}
return false;
}
/**
* Check whether the two values are equal.
*
* @param a the first value
* @param b the second value
* @return true if they are equal
*/
public boolean areValuesEqual(Object a, Object b) {
if (a == b) {
return true;
} else if (a == null || b == null) {
return false;
}
return valueType.compare(a, b) == 0;
}
/**
* Replace a value for an existing key, if the value matches.
*
* @param key the key (may not be null)
* @param oldValue the expected value
* @param newValue the new value
* @return true if the value was replaced
*/
@Override
public synchronized boolean replace(K key, V oldValue, V newValue) {
V old = get(key);
if (areValuesEqual(old, oldValue)) {
put(key, newValue);
return true;
}
return false;
}
/**
* Replace a value for an existing key.
*
* @param key the key (may not be null)
* @param value the new value
* @return the old value, if the value was replaced, or null
*/
@Override
public synchronized V replace(K key, V value) {
V old = get(key);
if (old != null) {
put(key, value);
return old;
}
return null;
}
/**
* Remove a key-value pair.
*
* @param p the page (may not be null)
* @param writeVersion the write version
* @param key the key
* @return the old value, or null if the key did not exist
*/
protected Object remove(Page p, long writeVersion, Object key) {
int index = p.binarySearch(key);
Object result = null;
if (p.isLeaf()) {
if (index >= 0) {
result = p.getValue(index);
p.remove(index);
}
return result;
}
// node
if (index < 0) {
index = -index - 1;
} else {
index++;
}
Page cOld = p.getChildPage(index);
Page c = cOld.copy(writeVersion);
result = remove(c, writeVersion, key);
if (result == null || c.getTotalCount() != 0) {
// no change, or
// there are more nodes
p.setChild(index, c);
} else {
// this child was deleted
if (p.getKeyCount() == 0) {
p.setChild(index, c);
c.removePage();
} else {
p.remove(index);
}
}
return result;
}
/**
* Use the new root page from now on.
*
* @param newRoot the new root page
*/
protected void newRoot(Page newRoot) {
if (root != newRoot) {
removeUnusedOldVersions();
if (root.getVersion() != newRoot.getVersion()) {
Page last = oldRoots.peekLast();
if (last == null || last.getVersion() != root.getVersion()) {
oldRoots.add(root);
}
}
root = newRoot;
}
}
/**
* Compare two keys.
*
* @param a the first key
* @param b the second key
* @return -1 if the first key is smaller, 1 if bigger, 0 if equal
*/
int compare(Object a, Object b) {
return keyType.compare(a, b);
}
/**
* Get the key type.
*
* @return the key type
*/
public DataType getKeyType() {
return keyType;
}
/**
* Get the value type.
*
* @return the value type
*/
public DataType getValueType() {
return valueType;
}
/**
* Read a page.
*
* @param pos the position of the page
* @return the page
*/
Page readPage(long pos) {
return store.readPage(this, pos);
}
/**
* Set the position of the root page.
*
* @param rootPos the position, 0 for empty
* @param version the version of the root
*/
void setRootPos(long rootPos, long version) {
root = rootPos == 0 ? Page.createEmpty(this, -1) : readPage(rootPos);
root.setVersion(version);
}
/**
* Iterate over a number of keys.
*
* @param from the first key to return
* @return the iterator
*/
public Iterator<K> keyIterator(K from) {
return new Cursor<K, V>(this, root, from);
}
/**
* Re-write any pages that belong to one of the chunks in the given set.
*
* @param set the set of chunk ids
* @return whether rewriting was successful
*/
boolean rewrite(Set<Integer> set) {
// read from old version, to avoid concurrent reads
long previousVersion = store.getCurrentVersion() - 1;
if (previousVersion < createVersion) {
// a new map
return true;
}
MVMap<K, V> readMap;
try {
readMap = openVersion(previousVersion);
} catch (IllegalArgumentException e) {
// unknown version: ok
// TODO should not rely on exception handling
return true;
}
try {
rewrite(readMap.root, set);
return true;
} catch (IllegalStateException e) {
// TODO should not rely on exception handling
if (DataUtils.getErrorCode(e.getMessage()) == DataUtils.ERROR_CHUNK_NOT_FOUND) {
// ignore
return false;
}
throw e;
}
}
private int rewrite(Page p, Set<Integer> set) {
if (p.isLeaf()) {
long pos = p.getPos();
int chunkId = DataUtils.getPageChunkId(pos);
if (!set.contains(chunkId)) {
return 0;
}
if (p.getKeyCount() > 0) {
@SuppressWarnings("unchecked")
K key = (K) p.getKey(0);
V value = get(key);
if (value != null) {
replace(key, value, value);
}
}
return 1;
}
int writtenPageCount = 0;
for (int i = 0; i < getChildPageCount(p); i++) {
long childPos = p.getChildPagePos(i);
if (childPos != 0 && DataUtils.getPageType(childPos) == DataUtils.PAGE_TYPE_LEAF) {
// we would need to load the page, and it's a leaf:
// only do that if it's within the set of chunks we are
// interested in
int chunkId = DataUtils.getPageChunkId(childPos);
if (!set.contains(chunkId)) {
continue;
}
}
writtenPageCount += rewrite(p.getChildPage(i), set);
}
if (writtenPageCount == 0) {
long pos = p.getPos();
int chunkId = DataUtils.getPageChunkId(pos);
if (set.contains(chunkId)) {
// an inner node page that is in one of the chunks,
// but only points to chunks that are not in the set:
// if no child was changed, we need to do that now
// (this is not needed if anyway one of the children
// was changed, as this would have updated this
// page as well)
Page p2 = p;
while (!p2.isLeaf()) {
p2 = p2.getChildPage(0);
}
@SuppressWarnings("unchecked")
K key = (K) p2.getKey(0);
V value = get(key);
if (value != null) {
replace(key, value, value);
}
writtenPageCount++;
}
}
return writtenPageCount;
}
/**
* Get a cursor to iterate over a number of keys and values.
*
* @param from the first key to return
* @return the cursor
*/
public Cursor<K, V> cursor(K from) {
return new Cursor<>(this, root, from);
}
@Override
public Set<Map.Entry<K, V>> entrySet() {
final MVMap<K, V> map = this;
final Page root = this.root;
return new AbstractSet<Entry<K, V>>() {
@Override
public Iterator<Entry<K, V>> iterator() {
final Cursor<K, V> cursor = new Cursor<>(map, root, null);
return new Iterator<Entry<K, V>>() {
@Override
public boolean hasNext() {
return cursor.hasNext();
}
@Override
public Entry<K, V> next() {
K k = cursor.next();
return new DataUtils.MapEntry<>(k, cursor.getValue());
}
@Override
public void remove() {
throw DataUtils.newUnsupportedOperationException(
"Removing is not supported");
}
};
}
@Override
public int size() {
return MVMap.this.size();
}
@Override
public boolean contains(Object o) {
return MVMap.this.containsKey(o);
}
};
}
@Override
public Set<K> keySet() {
final MVMap<K, V> map = this;
final Page root = this.root;
return new AbstractSet<K>() {
@Override
public Iterator<K> iterator() {
return new Cursor<K, V>(map, root, null);
}
@Override
public int size() {
return MVMap.this.size();
}
@Override
public boolean contains(Object o) {
return MVMap.this.containsKey(o);
}
};
}
/**
* Get the root page.
*
* @return the root page
*/
public Page getRoot() {
return root;
}
/**
* Get the map name.
*
* @return the name
*/
public String getName() {
return store.getMapName(id);
}
public MVStore getStore() {
return store;
}
/**
* Get the map id. Please note the map id may be different after compacting
* a store.
*
* @return the map id
*/
public int getId() {
return id;
}
/**
* Rollback to the given version.
*
* @param version the version
*/
void rollbackTo(long version) {
beforeWrite();
if (version <= createVersion) {
// the map is removed later
} else if (root.getVersion() >= version) {
while (true) {
Page last = oldRoots.peekLast();
if (last == null) {
break;
}
// slow, but rollback is not a common operation
oldRoots.removeLast(last);
root = last;
if (root.getVersion() < version) {
break;
}
}
}
}
/**
* Forget those old versions that are no longer needed.
*/
void removeUnusedOldVersions() {
long oldest = store.getOldestVersionToKeep();
if (oldest == -1) {
return;
}
Page last = oldRoots.peekLast();
while (true) {
Page p = oldRoots.peekFirst();
if (p == null || p.getVersion() >= oldest || p == last) {
break;
}
oldRoots.removeFirst(p);
}
}
public boolean isReadOnly() {
return readOnly;
}
/**
* Set the volatile flag of the map.
*
* @param isVolatile the volatile flag
*/
public void setVolatile(boolean isVolatile) {
this.isVolatile = isVolatile;
}
/**
* Whether this is volatile map, meaning that changes
* are not persisted. By default (even if the store is not persisted),
* maps are not volatile.
*
* @return whether this map is volatile
*/
public boolean isVolatile() {
return isVolatile;
}
/**
* This method is called before writing to the map. The default
* implementation checks whether writing is allowed, and tries
* to detect concurrent modification.
*
* @throws UnsupportedOperationException if the map is read-only,
* or if another thread is concurrently writing
*/
protected void beforeWrite() {
if (closed) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_CLOSED, "This map is closed");
}
if (readOnly) {
throw DataUtils.newUnsupportedOperationException(
"This map is read-only");
}
store.beforeWrite(this);
}
@Override
public int hashCode() {
return id;
}
@Override
public boolean equals(Object o) {
return this == o;
}
/**
* Get the number of entries, as a integer. Integer.MAX_VALUE is returned if
* there are more than this entries.
*
* @return the number of entries, as an integer
*/
@Override
public int size() {
long size = sizeAsLong();
return size > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) size;
}
/**
* Get the number of entries, as a long.
*
* @return the number of entries
*/
public long sizeAsLong() {
return root.getTotalCount();
}
@Override
public boolean isEmpty() {
// could also use (sizeAsLong() == 0)
return root.isLeaf() && root.getKeyCount() == 0;
}
public long getCreateVersion() {
return createVersion;
}
/**
* Remove the given page (make the space available).
*
* @param pos the position of the page to remove
* @param memory the number of bytes used for this page
*/
protected void removePage(long pos, int memory) {
store.removePage(this, pos, memory);
}
/**
* Open an old version for the given map.
*
* @param version the version
* @return the map
*/
public MVMap<K, V> openVersion(long version) {
if (readOnly) {
throw DataUtils.newUnsupportedOperationException(
"This map is read-only; need to call " +
"the method on the writable map");
}
DataUtils.checkArgument(version >= createVersion,
"Unknown version {0}; this map was created in version is {1}",
version, createVersion);
Page newest = null;
// need to copy because it can change
Page r = root;
if (version >= r.getVersion() &&
(version == writeVersion ||
r.getVersion() >= 0 ||
version <= createVersion ||
store.getFileStore() == null)) {
newest = r;
} else {
Page last = oldRoots.peekFirst();
if (last == null || version < last.getVersion()) {
// smaller than all in-memory versions
return store.openMapVersion(version, id, this);
}
Iterator<Page> it = oldRoots.iterator();
while (it.hasNext()) {
Page p = it.next();
if (p.getVersion() > version) {
break;
}
last = p;
}
newest = last;
}
MVMap<K, V> m = openReadOnly();
m.root = newest;
return m;
}
/**
* Open a copy of the map in read-only mode.
*
* @return the opened map
*/
MVMap<K, V> openReadOnly() {
MVMap<K, V> m = new MVMap<>(keyType, valueType);
m.readOnly = true;
m.init(store, id, createVersion);
m.root = root;
return m;
}
public long getVersion() {
return root.getVersion();
}
/**
* Get the child page count for this page. This is to allow another map
* implementation to override the default, in case the last child is not to
* be used.
*
* @param p the page
* @return the number of direct children
*/
protected int getChildPageCount(Page p) {
return p.getRawChildPageCount();
}
/**
* Get the map type. When opening an existing map, the map type must match.
*
* @return the map type
*/
public String getType() {
return null;
}
/**
* Get the map metadata as a string.
*
* @param name the map name (or null)
* @return the string
*/
String asString(String name) {
StringBuilder buff = new StringBuilder();
if (name != null) {
DataUtils.appendMap(buff, "name", name);
}
if (createVersion != 0) {
DataUtils.appendMap(buff, "createVersion", createVersion);
}
String type = getType();
if (type != null) {
DataUtils.appendMap(buff, "type", type);
}
return buff.toString();
}
void setWriteVersion(long writeVersion) {
this.writeVersion = writeVersion;
}
/**
* Copy a map. All pages are copied.
*
* @param sourceMap the source map
*/
void copyFrom(MVMap<K, V> sourceMap) {
beforeWrite();
newRoot(copy(sourceMap.root, null));
}
private Page copy(Page source, CursorPos parent) {
Page target = Page.create(this, writeVersion, source);
if (source.isLeaf()) {
Page child = target;
for (CursorPos p = parent; p != null; p = p.parent) {
p.page.setChild(p.index, child);
p.page = p.page.copy(writeVersion);
child = p.page;
if (p.parent == null) {
newRoot(p.page);
beforeWrite();
}
}
} else {
// temporarily, replace child pages with empty pages,
// to ensure there are no links to the old store
for (int i = 0; i < getChildPageCount(target); i++) {
target.setChild(i, null);
}
CursorPos pos = new CursorPos(target, 0, parent);
for (int i = 0; i < getChildPageCount(target); i++) {
pos.index = i;
long p = source.getChildPagePos(i);
if (p != 0) {
// p == 0 means no child
// (for example the last entry of an r-tree node)
// (the MVMap is also used for r-trees for compacting)
copy(source.getChildPage(i), pos);
}
}
target = pos.page;
}
return target;
}
@Override
public String toString() {
return asString(null);
}
/**
* A builder for maps.
*
* @param <M> the map type
* @param <K> the key type
* @param <V> the value type
*/
public interface MapBuilder<M extends MVMap<K, V>, K, V> {
/**
* Create a new map of the given type.
*
* @return the map
*/
M create();
}
/**
* A builder for this class.
*
* @param <K> the key type
* @param <V> the value type
*/
public static class Builder<K, V> implements MapBuilder<MVMap<K, V>, K, V> {
protected DataType keyType;
protected DataType valueType;
/**
* Create a new builder with the default key and value data types.
*/
public Builder() {
// ignore
}
/**
* Set the key data type.
*
* @param keyType the key type
* @return this
*/
public Builder<K, V> keyType(DataType keyType) {
this.keyType = keyType;
return this;
}
public DataType getKeyType() {
return keyType;
}
public DataType getValueType() {
return valueType;
}
/**
* Set the value data type.
*
* @param valueType the value type
* @return this
*/
public Builder<K, V> valueType(DataType valueType) {
this.valueType = valueType;
return this;
}
@Override
public MVMap<K, V> create() {
if (keyType == null) {
keyType = new ObjectDataType();
}
if (valueType == null) {
valueType = new ObjectDataType();
}
return new MVMap<>(keyType, valueType);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/MVMapConcurrent.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import org.h2.mvstore.type.DataType;
import org.h2.mvstore.type.ObjectDataType;
/**
* A class used for backward compatibility.
*
* @param <K> the key type
* @param <V> the value type
*/
public class MVMapConcurrent<K, V> extends MVMap<K, V> {
public MVMapConcurrent(DataType keyType, DataType valueType) {
super(keyType, valueType);
}
/**
* A builder for this class.
*
* @param <K> the key type
* @param <V> the value type
*/
public static class Builder<K, V> implements
MapBuilder<MVMapConcurrent<K, V>, K, V> {
protected DataType keyType;
protected DataType valueType;
/**
* Create a new builder with the default key and value data types.
*/
public Builder() {
// ignore
}
/**
* Set the key data type.
*
* @param keyType the key type
* @return this
*/
public Builder<K, V> keyType(DataType keyType) {
this.keyType = keyType;
return this;
}
/**
* Set the key data type.
*
* @param valueType the key type
* @return this
*/
public Builder<K, V> valueType(DataType valueType) {
this.valueType = valueType;
return this;
}
@Override
public MVMapConcurrent<K, V> create() {
if (keyType == null) {
keyType = new ObjectDataType();
}
if (valueType == null) {
valueType = new ObjectDataType();
}
return new MVMapConcurrent<>(keyType, valueType);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/MVStore.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.lang.Thread.UncaughtExceptionHandler;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.h2.compress.CompressDeflate;
import org.h2.compress.CompressLZF;
import org.h2.compress.Compressor;
import org.h2.mvstore.Page.PageChildren;
import org.h2.mvstore.cache.CacheLongKeyLIRS;
import org.h2.mvstore.type.StringDataType;
import org.h2.util.MathUtils;
import org.h2.util.New;
/*
TODO:
Documentation
- rolling docs review: at "Metadata Map"
- better document that writes are in background thread
- better document how to do non-unique indexes
- document pluggable store and OffHeapStore
TransactionStore:
- ability to disable the transaction log,
if there is only one connection
MVStore:
- better and clearer memory usage accounting rules
(heap memory versus disk memory), so that even there is
never an out of memory
even for a small heap, and so that chunks
are still relatively big on average
- make sure serialization / deserialization errors don't corrupt the file
- test and possibly improve compact operation (for large dbs)
- automated 'kill process' and 'power failure' test
- defragment (re-creating maps, specially those with small pages)
- store number of write operations per page (maybe defragment
if much different than count)
- r-tree: nearest neighbor search
- use a small object value cache (StringCache), test on Android
for default serialization
- MVStoreTool.dump should dump the data if possible;
possibly using a callback for serialization
- implement a sharded map (in one store, multiple stores)
to support concurrent updates and writes, and very large maps
- to save space when persisting very small transactions,
use a transaction log where only the deltas are stored
- serialization for lists, sets, sets, sorted sets, maps, sorted maps
- maybe rename 'rollback' to 'revert' to distinguish from transactions
- support other compression algorithms (deflate, LZ4,...)
- remove features that are not really needed; simplify the code
possibly using a separate layer or tools
(retainVersion?)
- optional pluggable checksum mechanism (per page), which
requires that everything is a page (including headers)
- rename "store" to "save", as "store" is used in "storeVersion"
- rename setStoreVersion to setDataVersion, setSchemaVersion or similar
- temporary file storage
- simple rollback method (rollback to last committed version)
- MVMap to implement SortedMap, then NavigableMap
- storage that splits database into multiple files,
to speed up compact and allow using trim
(by truncating / deleting empty files)
- add new feature to the file system API to avoid copying data
(reads that returns a ByteBuffer instead of writing into one)
for memory mapped files and off-heap storage
- support log structured merge style operations (blind writes)
using one map per level plus bloom filter
- have a strict call order MVStore -> MVMap -> Page -> FileStore
- autocommit commits, stores, and compacts from time to time;
the background thread should wait at least 90% of the
configured write delay to store changes
- compact* should also store uncommitted changes (if there are any)
- write a LSM-tree (log structured merge tree) utility on top of the MVStore
with blind writes and/or a bloom filter that
internally uses regular maps and merge sort
- chunk metadata: maybe split into static and variable,
or use a small page size for metadata
- data type "string": maybe use prefix compression for keys
- test chunk id rollover
- feature to auto-compact from time to time and on close
- compact very small chunks
- Page: to save memory, combine keys & values into one array
(also children & counts). Maybe remove some other
fields (childrenCount for example)
- Support SortedMap for MVMap
- compact: copy whole pages (without having to open all maps)
- maybe change the length code to have lower gaps
- test with very low limits (such as: short chunks, small pages)
- maybe allow to read beyond the retention time:
when compacting, move live pages in old chunks
to a map (possibly the metadata map) -
this requires a change in the compaction code, plus
a map lookup when reading old data; also, this
old data map needs to be cleaned up somehow;
maybe using an additional timeout
- rollback of removeMap should restore the data -
which has big consequences, as the metadata map
would probably need references to the root nodes of all maps
*/
/**
* A persistent storage for maps.
*/
public final class MVStore {
/**
* Whether assertions are enabled.
*/
public static final boolean ASSERT = false;
/**
* The block size (physical sector size) of the disk. The store header is
* written twice, one copy in each block, to ensure it survives a crash.
*/
static final int BLOCK_SIZE = 4 * 1024;
private static final int FORMAT_WRITE = 1;
private static final int FORMAT_READ = 1;
/**
* Used to mark a chunk as free, when it was detected that live bookkeeping
* is incorrect.
*/
private static final int MARKED_FREE = 10_000_000;
/**
* The background thread, if any.
*/
volatile BackgroundWriterThread backgroundWriterThread;
private volatile boolean reuseSpace = true;
private volatile boolean closed;
private final FileStore fileStore;
private final boolean fileStoreIsProvided;
private final int pageSplitSize;
/**
* The page cache. The default size is 16 MB, and the average size is 2 KB.
* It is split in 16 segments. The stack move distance is 2% of the expected
* number of entries.
*/
private final CacheLongKeyLIRS<Page> cache;
/**
* The page chunk references cache. The default size is 4 MB, and the
* average size is 2 KB. It is split in 16 segments. The stack move distance
* is 2% of the expected number of entries.
*/
private final CacheLongKeyLIRS<PageChildren> cacheChunkRef;
/**
* The newest chunk. If nothing was stored yet, this field is not set.
*/
private Chunk lastChunk;
/**
* The map of chunks.
*/
private final ConcurrentHashMap<Integer, Chunk> chunks =
new ConcurrentHashMap<>();
/**
* The map of temporarily freed storage space caused by freed pages. The key
* is the unsaved version, the value is the map of chunks. The maps contains
* the number of freed entries per chunk.
* <p>
* Access is partially synchronized, hence the need for concurrent maps.
* Sometimes we hold the MVStore lock, sometimes the MVMap lock, and sometimes
* we even sync on the ConcurrentHashMap<Integer, Chunk> object.
*/
private final ConcurrentHashMap<Long,
ConcurrentHashMap<Integer, Chunk>> freedPageSpace =
new ConcurrentHashMap<>();
/**
* The metadata map. Write access to this map needs to be synchronized on
* the store.
*/
private final MVMap<String, String> meta;
private final ConcurrentHashMap<Integer, MVMap<?, ?>> maps =
new ConcurrentHashMap<>();
private final HashMap<String, Object> storeHeader = new HashMap<>();
private WriteBuffer writeBuffer;
private int lastMapId;
private int versionsToKeep = 5;
/**
* The compression level for new pages (0 for disabled, 1 for fast, 2 for
* high). Even if disabled, the store may contain (old) compressed pages.
*/
private final int compressionLevel;
private Compressor compressorFast;
private Compressor compressorHigh;
private final UncaughtExceptionHandler backgroundExceptionHandler;
private volatile long currentVersion;
/**
* The version of the last stored chunk, or -1 if nothing was stored so far.
*/
private long lastStoredVersion;
/**
* The estimated memory used by unsaved pages. This number is not accurate,
* also because it may be changed concurrently, and because temporary pages
* are counted.
*/
private int unsavedMemory;
private final int autoCommitMemory;
private boolean saveNeeded;
/**
* The time the store was created, in milliseconds since 1970.
*/
private long creationTime;
/**
* How long to retain old, persisted chunks, in milliseconds. For larger or
* equal to zero, a chunk is never directly overwritten if unused, but
* instead, the unused field is set. If smaller zero, chunks are directly
* overwritten if unused.
*/
private int retentionTime;
private long lastCommitTime;
/**
* The earliest chunk to retain, if any.
*/
private Chunk retainChunk;
/**
* The version of the current store operation (if any).
*/
private volatile long currentStoreVersion = -1;
private Thread currentStoreThread;
private volatile boolean metaChanged;
/**
* The delay in milliseconds to automatically commit and write changes.
*/
private int autoCommitDelay;
private final int autoCompactFillRate;
private long autoCompactLastFileOpCount;
private final Object compactSync = new Object();
private IllegalStateException panicException;
private long lastTimeAbsolute;
private long lastFreeUnusedChunks;
/**
* Create and open the store.
*
* @param config the configuration to use
* @throws IllegalStateException if the file is corrupt, or an exception
* occurred while opening
* @throws IllegalArgumentException if the directory does not exist
*/
MVStore(Map<String, Object> config) {
this.compressionLevel = DataUtils.getConfigParam(config, "compress", 0);
String fileName = (String) config.get("fileName");
FileStore fileStore = (FileStore) config.get("fileStore");
fileStoreIsProvided = fileStore != null;
if(fileStore == null && fileName != null) {
fileStore = new FileStore();
}
this.fileStore = fileStore;
int pgSplitSize = 48; // for "mem:" case it is # of keys
CacheLongKeyLIRS.Config cc = null;
if (this.fileStore != null) {
int mb = DataUtils.getConfigParam(config, "cacheSize", 16);
if (mb > 0) {
cc = new CacheLongKeyLIRS.Config();
cc.maxMemory = mb * 1024L * 1024L;
Object o = config.get("cacheConcurrency");
if (o != null) {
cc.segmentCount = (Integer)o;
}
}
pgSplitSize = 16 * 1024;
}
if (cc != null) {
cache = new CacheLongKeyLIRS<>(cc);
cc.maxMemory /= 4;
cacheChunkRef = new CacheLongKeyLIRS<>(cc);
} else {
cache = null;
cacheChunkRef = null;
}
pgSplitSize = DataUtils.getConfigParam(config, "pageSplitSize", pgSplitSize);
// Make sure pages will fit into cache
if (cache != null && pgSplitSize > cache.getMaxItemSize()) {
pgSplitSize = (int)cache.getMaxItemSize();
}
pageSplitSize = pgSplitSize;
backgroundExceptionHandler =
(UncaughtExceptionHandler)config.get("backgroundExceptionHandler");
meta = new MVMap<>(StringDataType.INSTANCE,
StringDataType.INSTANCE);
meta.init(this, 0, currentVersion);
if (this.fileStore != null) {
retentionTime = this.fileStore.getDefaultRetentionTime();
int kb = DataUtils.getConfigParam(config, "autoCommitBufferSize", 1024);
// 19 KB memory is about 1 KB storage
autoCommitMemory = kb * 1024 * 19;
autoCompactFillRate = DataUtils.getConfigParam(config, "autoCompactFillRate", 40);
char[] encryptionKey = (char[]) config.get("encryptionKey");
try {
if (!fileStoreIsProvided) {
boolean readOnly = config.containsKey("readOnly");
this.fileStore.open(fileName, readOnly, encryptionKey);
}
if (this.fileStore.size() == 0) {
creationTime = getTimeAbsolute();
lastCommitTime = creationTime;
storeHeader.put("H", 2);
storeHeader.put("blockSize", BLOCK_SIZE);
storeHeader.put("format", FORMAT_WRITE);
storeHeader.put("created", creationTime);
writeStoreHeader();
} else {
readStoreHeader();
}
} catch (IllegalStateException e) {
panic(e);
} finally {
if (encryptionKey != null) {
Arrays.fill(encryptionKey, (char) 0);
}
}
lastCommitTime = getTimeSinceCreation();
// setAutoCommitDelay starts the thread, but only if
// the parameter is different from the old value
int delay = DataUtils.getConfigParam(config, "autoCommitDelay", 1000);
setAutoCommitDelay(delay);
} else {
autoCommitMemory = 0;
autoCompactFillRate = 0;
}
}
private void panic(IllegalStateException e) {
handleException(e);
panicException = e;
closeImmediately();
throw e;
}
/**
* Open a store in exclusive mode. For a file-based store, the parent
* directory must already exist.
*
* @param fileName the file name (null for in-memory)
* @return the store
*/
public static MVStore open(String fileName) {
HashMap<String, Object> config = new HashMap<>();
config.put("fileName", fileName);
return new MVStore(config);
}
/**
* Open an old, stored version of a map.
*
* @param version the version
* @param mapId the map id
* @param template the template map
* @return the read-only map
*/
@SuppressWarnings("unchecked")
<T extends MVMap<?, ?>> T openMapVersion(long version, int mapId,
MVMap<?, ?> template) {
MVMap<String, String> oldMeta = getMetaMap(version);
long rootPos = getRootPos(oldMeta, mapId);
MVMap<?, ?> m = template.openReadOnly();
m.setRootPos(rootPos, version);
return (T) m;
}
/**
* Open a map with the default settings. The map is automatically create if
* it does not yet exist. If a map with this name is already open, this map
* is returned.
*
* @param <K> the key type
* @param <V> the value type
* @param name the name of the map
* @return the map
*/
public <K, V> MVMap<K, V> openMap(String name) {
return openMap(name, new MVMap.Builder<K, V>());
}
/**
* Open a map with the given builder. The map is automatically create if it
* does not yet exist. If a map with this name is already open, this map is
* returned.
*
* @param <K> the key type
* @param <V> the value type
* @param name the name of the map
* @param builder the map builder
* @return the map
*/
public synchronized <M extends MVMap<K, V>, K, V> M openMap(
String name, MVMap.MapBuilder<M, K, V> builder) {
checkOpen();
String x = meta.get("name." + name);
int id;
long root;
M map;
if (x != null) {
id = DataUtils.parseHexInt(x);
@SuppressWarnings("unchecked")
M old = (M) maps.get(id);
if (old != null) {
return old;
}
map = builder.create();
String config = meta.get(MVMap.getMapKey(id));
String v = DataUtils.getFromMap(config, "createVersion");
map.init(this, id, v != null ? DataUtils.parseHexLong(v): 0);
root = getRootPos(meta, id);
} else {
id = ++lastMapId;
map = builder.create();
map.init(this, id, currentVersion);
markMetaChanged();
x = Integer.toHexString(id);
meta.put(MVMap.getMapKey(id), map.asString(name));
meta.put("name." + name, x);
root = 0;
}
map.setRootPos(root, -1);
maps.put(id, map);
return map;
}
/**
* Get the set of all map names.
*
* @return the set of names
*/
public synchronized Set<String> getMapNames() {
HashSet<String> set = new HashSet<>();
checkOpen();
for (Iterator<String> it = meta.keyIterator("name."); it.hasNext();) {
String x = it.next();
if (!x.startsWith("name.")) {
break;
}
set.add(x.substring("name.".length()));
}
return set;
}
/**
* Get the metadata map. This data is for informational purposes only. The
* data is subject to change in future versions.
* <p>
* The data in this map should not be modified (changing system data may
* corrupt the store). If modifications are needed, they need be
* synchronized on the store.
* <p>
* The metadata map contains the following entries:
* <pre>
* chunk.{chunkId} = {chunk metadata}
* name.{name} = {mapId}
* map.{mapId} = {map metadata}
* root.{mapId} = {root position}
* setting.storeVersion = {version}
* </pre>
*
* @return the metadata map
*/
public MVMap<String, String> getMetaMap() {
checkOpen();
return meta;
}
private MVMap<String, String> getMetaMap(long version) {
Chunk c = getChunkForVersion(version);
DataUtils.checkArgument(c != null, "Unknown version {0}", version);
c = readChunkHeader(c.block);
MVMap<String, String> oldMeta = meta.openReadOnly();
oldMeta.setRootPos(c.metaRootPos, version);
return oldMeta;
}
private Chunk getChunkForVersion(long version) {
Chunk newest = null;
for (Chunk c : chunks.values()) {
if (c.version <= version) {
if (newest == null || c.id > newest.id) {
newest = c;
}
}
}
return newest;
}
/**
* Check whether a given map exists.
*
* @param name the map name
* @return true if it exists
*/
public boolean hasMap(String name) {
return meta.containsKey("name." + name);
}
private void markMetaChanged() {
// changes in the metadata alone are usually not detected, as the meta
// map is changed after storing
metaChanged = true;
}
private synchronized void readStoreHeader() {
Chunk newest = null;
boolean validStoreHeader = false;
// find out which chunk and version are the newest
// read the first two blocks
ByteBuffer fileHeaderBlocks = fileStore.readFully(0, 2 * BLOCK_SIZE);
byte[] buff = new byte[BLOCK_SIZE];
for (int i = 0; i <= BLOCK_SIZE; i += BLOCK_SIZE) {
fileHeaderBlocks.get(buff);
// the following can fail for various reasons
try {
HashMap<String, String> m = DataUtils.parseChecksummedMap(buff);
if (m == null) {
continue;
}
int blockSize = DataUtils.readHexInt(
m, "blockSize", BLOCK_SIZE);
if (blockSize != BLOCK_SIZE) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_UNSUPPORTED_FORMAT,
"Block size {0} is currently not supported",
blockSize);
}
long version = DataUtils.readHexLong(m, "version", 0);
if (newest == null || version > newest.version) {
validStoreHeader = true;
storeHeader.putAll(m);
creationTime = DataUtils.readHexLong(m, "created", 0);
int chunkId = DataUtils.readHexInt(m, "chunk", 0);
long block = DataUtils.readHexLong(m, "block", 0);
Chunk test = readChunkHeaderAndFooter(block);
if (test != null && test.id == chunkId) {
newest = test;
}
}
} catch (Exception e) {
}
}
if (!validStoreHeader) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Store header is corrupt: {0}", fileStore);
}
long format = DataUtils.readHexLong(storeHeader, "format", 1);
if (format > FORMAT_WRITE && !fileStore.isReadOnly()) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_UNSUPPORTED_FORMAT,
"The write format {0} is larger " +
"than the supported format {1}, " +
"and the file was not opened in read-only mode",
format, FORMAT_WRITE);
}
format = DataUtils.readHexLong(storeHeader, "formatRead", format);
if (format > FORMAT_READ) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_UNSUPPORTED_FORMAT,
"The read format {0} is larger " +
"than the supported format {1}",
format, FORMAT_READ);
}
lastStoredVersion = -1;
chunks.clear();
long now = System.currentTimeMillis();
// calculate the year (doesn't have to be exact;
// we assume 365.25 days per year, * 4 = 1461)
int year = 1970 + (int) (now / (1000L * 60 * 60 * 6 * 1461));
if (year < 2014) {
// if the year is before 2014,
// we assume the system doesn't have a real-time clock,
// and we set the creationTime to the past, so that
// existing chunks are overwritten
creationTime = now - fileStore.getDefaultRetentionTime();
} else if (now < creationTime) {
// the system time was set to the past:
// we change the creation time
creationTime = now;
storeHeader.put("created", creationTime);
}
Chunk test = readChunkFooter(fileStore.size());
if (test != null) {
test = readChunkHeaderAndFooter(test.block);
if (test != null) {
if (newest == null || test.version > newest.version) {
newest = test;
}
}
}
if (newest == null) {
// no chunk
return;
}
// read the chunk header and footer,
// and follow the chain of next chunks
while (true) {
if (newest.next == 0 ||
newest.next >= fileStore.size() / BLOCK_SIZE) {
// no (valid) next
break;
}
test = readChunkHeaderAndFooter(newest.next);
if (test == null || test.id <= newest.id) {
break;
}
newest = test;
}
setLastChunk(newest);
loadChunkMeta();
// read all chunk headers and footers within the retention time,
// to detect unwritten data after a power failure
verifyLastChunks();
// build the free space list
for (Chunk c : chunks.values()) {
if (c.pageCountLive == 0) {
// remove this chunk in the next save operation
registerFreePage(currentVersion, c.id, 0, 0);
}
long start = c.block * BLOCK_SIZE;
int length = c.len * BLOCK_SIZE;
fileStore.markUsed(start, length);
}
}
private void loadChunkMeta() {
// load the chunk metadata: we can load in any order,
// because loading chunk metadata might recursively load another chunk
for (Iterator<String> it = meta.keyIterator("chunk."); it.hasNext();) {
String s = it.next();
if (!s.startsWith("chunk.")) {
break;
}
s = meta.get(s);
Chunk c = Chunk.fromString(s);
if (chunks.putIfAbsent(c.id, c) == null) {
if (c.block == Long.MAX_VALUE) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Chunk {0} is invalid", c.id);
}
}
}
}
private void setLastChunk(Chunk last) {
lastChunk = last;
if (last == null) {
// no valid chunk
lastMapId = 0;
currentVersion = 0;
meta.setRootPos(0, -1);
} else {
lastMapId = last.mapId;
currentVersion = last.version;
chunks.put(last.id, last);
meta.setRootPos(last.metaRootPos, -1);
}
setWriteVersion(currentVersion);
}
private void verifyLastChunks() {
long time = getTimeSinceCreation();
ArrayList<Integer> ids = new ArrayList<>(chunks.keySet());
Collections.sort(ids);
int newestValidChunk = -1;
Chunk old = null;
for (Integer chunkId : ids) {
Chunk c = chunks.get(chunkId);
if (old != null && c.time < old.time) {
// old chunk (maybe leftover from a previous crash)
break;
}
old = c;
if (c.time + retentionTime < time) {
// old chunk, no need to verify
newestValidChunk = c.id;
continue;
}
Chunk test = readChunkHeaderAndFooter(c.block);
if (test == null || test.id != c.id) {
break;
}
newestValidChunk = chunkId;
}
Chunk newest = chunks.get(newestValidChunk);
if (newest != lastChunk) {
// to avoid re-using newer chunks later on, we could clear
// the headers and footers of those, but we might not know about all
// of them, so that could be incomplete - but we check that newer
// chunks are written after older chunks, so we are safe
rollbackTo(newest == null ? 0 : newest.version);
}
}
/**
* Read a chunk header and footer, and verify the stored data is consistent.
*
* @param block the block
* @return the chunk, or null if the header or footer don't match or are not
* consistent
*/
private Chunk readChunkHeaderAndFooter(long block) {
Chunk header;
try {
header = readChunkHeader(block);
} catch (Exception e) {
// invalid chunk header: ignore, but stop
return null;
}
if (header == null) {
return null;
}
Chunk footer = readChunkFooter((block + header.len) * BLOCK_SIZE);
if (footer == null || footer.id != header.id) {
return null;
}
return header;
}
/**
* Try to read a chunk footer.
*
* @param end the end of the chunk
* @return the chunk, or null if not successful
*/
private Chunk readChunkFooter(long end) {
// the following can fail for various reasons
try {
// read the chunk footer of the last block of the file
ByteBuffer lastBlock = fileStore.readFully(
end - Chunk.FOOTER_LENGTH, Chunk.FOOTER_LENGTH);
byte[] buff = new byte[Chunk.FOOTER_LENGTH];
lastBlock.get(buff);
HashMap<String, String> m = DataUtils.parseChecksummedMap(buff);
if (m != null) {
int chunk = DataUtils.readHexInt(m, "chunk", 0);
Chunk c = new Chunk(chunk);
c.version = DataUtils.readHexLong(m, "version", 0);
c.block = DataUtils.readHexLong(m, "block", 0);
return c;
}
} catch (Exception e) {
// ignore
}
return null;
}
private void writeStoreHeader() {
StringBuilder buff = new StringBuilder(112);
if (lastChunk != null) {
storeHeader.put("block", lastChunk.block);
storeHeader.put("chunk", lastChunk.id);
storeHeader.put("version", lastChunk.version);
}
DataUtils.appendMap(buff, storeHeader);
byte[] bytes = buff.toString().getBytes(StandardCharsets.ISO_8859_1);
int checksum = DataUtils.getFletcher32(bytes, 0, bytes.length);
DataUtils.appendMap(buff, "fletcher", checksum);
buff.append('\n');
bytes = buff.toString().getBytes(StandardCharsets.ISO_8859_1);
ByteBuffer header = ByteBuffer.allocate(2 * BLOCK_SIZE);
header.put(bytes);
header.position(BLOCK_SIZE);
header.put(bytes);
header.rewind();
write(0, header);
}
private void write(long pos, ByteBuffer buffer) {
try {
fileStore.writeFully(pos, buffer);
} catch (IllegalStateException e) {
panic(e);
throw e;
}
}
/**
* Close the file and the store. Unsaved changes are written to disk first.
*/
public void close() {
if (closed) {
return;
}
FileStore f = fileStore;
if (f != null && !f.isReadOnly()) {
stopBackgroundThread();
if (hasUnsavedChanges()) {
commitAndSave();
}
}
closeStore(true);
}
/**
* Close the file and the store, without writing anything. This will stop
* the background thread. This method ignores all errors.
*/
public void closeImmediately() {
try {
closeStore(false);
} catch (Throwable e) {
handleException(e);
}
}
private void closeStore(boolean shrinkIfPossible) {
if (closed) {
return;
}
// can not synchronize on this yet, because
// the thread also synchronized on this, which
// could result in a deadlock
stopBackgroundThread();
closed = true;
synchronized (this) {
if (fileStore != null && shrinkIfPossible) {
shrinkFileIfPossible(0);
}
// release memory early - this is important when called
// because of out of memory
if (cache != null) {
cache.clear();
}
if (cacheChunkRef != null) {
cacheChunkRef.clear();
}
for (MVMap<?, ?> m : new ArrayList<>(maps.values())) {
m.close();
}
chunks.clear();
maps.clear();
if (fileStore != null && !fileStoreIsProvided) {
fileStore.close();
}
}
}
/**
* Get the chunk for the given position.
*
* @param pos the position
* @return the chunk
*/
private Chunk getChunk(long pos) {
Chunk c = getChunkIfFound(pos);
if (c == null) {
int chunkId = DataUtils.getPageChunkId(pos);
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Chunk {0} not found", chunkId);
}
return c;
}
private Chunk getChunkIfFound(long pos) {
int chunkId = DataUtils.getPageChunkId(pos);
Chunk c = chunks.get(chunkId);
if (c == null) {
checkOpen();
if (!Thread.holdsLock(this)) {
// it could also be unsynchronized metadata
// access (if synchronization on this was forgotten)
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_CHUNK_NOT_FOUND,
"Chunk {0} no longer exists",
chunkId);
}
String s = meta.get(Chunk.getMetaKey(chunkId));
if (s == null) {
return null;
}
c = Chunk.fromString(s);
if (c.block == Long.MAX_VALUE) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Chunk {0} is invalid", chunkId);
}
chunks.put(c.id, c);
}
return c;
}
private void setWriteVersion(long version) {
for (MVMap<?, ?> map : maps.values()) {
map.setWriteVersion(version);
}
MVMap<String, String> m = meta;
if (m == null) {
checkOpen();
}
m.setWriteVersion(version);
}
/**
* Commit the changes.
* <p>
* For in-memory stores, this method increments the version.
* <p>
* For persistent stores, it also writes changes to disk. It does nothing if
* there are no unsaved changes, and returns the old version. It is not
* necessary to call this method when auto-commit is enabled (the default
* setting), as in this case it is automatically called from time to time or
* when enough changes have accumulated. However, it may still be called to
* flush all changes to disk.
*
* @return the new version
*/
public synchronized long commit() {
if (fileStore != null) {
return commitAndSave();
}
long v = ++currentVersion;
setWriteVersion(v);
return v;
}
/**
* Commit all changes and persist them to disk. This method does nothing if
* there are no unsaved changes, otherwise it increments the current version
* and stores the data (for file based stores).
* <p>
* At most one store operation may run at any time.
*
* @return the new version (incremented if there were changes)
*/
private synchronized long commitAndSave() {
if (closed) {
return currentVersion;
}
if (fileStore == null) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_WRITING_FAILED,
"This is an in-memory store");
}
if (currentStoreVersion >= 0) {
// store is possibly called within store, if the meta map changed
return currentVersion;
}
if (!hasUnsavedChanges()) {
return currentVersion;
}
if (fileStore.isReadOnly()) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_WRITING_FAILED, "This store is read-only");
}
try {
currentStoreVersion = currentVersion;
currentStoreThread = Thread.currentThread();
return storeNow();
} finally {
// in any case reset the current store version,
// to allow closing the store
currentStoreVersion = -1;
currentStoreThread = null;
}
}
private long storeNow() {
try {
return storeNowTry();
} catch (IllegalStateException e) {
panic(e);
return -1;
}
}
private long storeNowTry() {
long time = getTimeSinceCreation();
freeUnusedIfNeeded(time);
int currentUnsavedPageCount = unsavedMemory;
long storeVersion = currentStoreVersion;
long version = ++currentVersion;
lastCommitTime = time;
retainChunk = null;
// the metadata of the last chunk was not stored so far, and needs to be
// set now (it's better not to update right after storing, because that
// would modify the meta map again)
int lastChunkId;
if (lastChunk == null) {
lastChunkId = 0;
} else {
lastChunkId = lastChunk.id;
meta.put(Chunk.getMetaKey(lastChunkId), lastChunk.asString());
// never go backward in time
time = Math.max(lastChunk.time, time);
}
int newChunkId = lastChunkId;
while (true) {
newChunkId = (newChunkId + 1) % Chunk.MAX_ID;
Chunk old = chunks.get(newChunkId);
if (old == null) {
break;
}
if (old.block == Long.MAX_VALUE) {
IllegalStateException e = DataUtils.newIllegalStateException(
DataUtils.ERROR_INTERNAL,
"Last block not stored, possibly due to out-of-memory");
panic(e);
}
}
Chunk c = new Chunk(newChunkId);
c.pageCount = Integer.MAX_VALUE;
c.pageCountLive = Integer.MAX_VALUE;
c.maxLen = Long.MAX_VALUE;
c.maxLenLive = Long.MAX_VALUE;
c.metaRootPos = Long.MAX_VALUE;
c.block = Long.MAX_VALUE;
c.len = Integer.MAX_VALUE;
c.time = time;
c.version = version;
c.mapId = lastMapId;
c.next = Long.MAX_VALUE;
chunks.put(c.id, c);
// force a metadata update
meta.put(Chunk.getMetaKey(c.id), c.asString());
meta.remove(Chunk.getMetaKey(c.id));
ArrayList<MVMap<?, ?>> list = new ArrayList<>(maps.values());
ArrayList<MVMap<?, ?>> changed = New.arrayList();
for (MVMap<?, ?> m : list) {
m.setWriteVersion(version);
long v = m.getVersion();
if (m.getCreateVersion() > storeVersion) {
// the map was created after storing started
continue;
}
if (m.isVolatile()) {
continue;
}
if (v >= 0 && v >= lastStoredVersion) {
MVMap<?, ?> r = m.openVersion(storeVersion);
if (r.getRoot().getPos() == 0) {
changed.add(r);
}
}
}
applyFreedSpace(storeVersion);
WriteBuffer buff = getWriteBuffer();
// need to patch the header later
c.writeChunkHeader(buff, 0);
int headerLength = buff.position();
c.pageCount = 0;
c.pageCountLive = 0;
c.maxLen = 0;
c.maxLenLive = 0;
for (MVMap<?, ?> m : changed) {
Page p = m.getRoot();
String key = MVMap.getMapRootKey(m.getId());
if (p.getTotalCount() == 0) {
meta.put(key, "0");
} else {
p.writeUnsavedRecursive(c, buff);
long root = p.getPos();
meta.put(key, Long.toHexString(root));
}
}
meta.setWriteVersion(version);
Page metaRoot = meta.getRoot();
metaRoot.writeUnsavedRecursive(c, buff);
int chunkLength = buff.position();
// add the store header and round to the next block
int length = MathUtils.roundUpInt(chunkLength +
Chunk.FOOTER_LENGTH, BLOCK_SIZE);
buff.limit(length);
// the length of the file that is still in use
// (not necessarily the end of the file)
long end = getFileLengthInUse();
long filePos;
if (reuseSpace) {
filePos = fileStore.allocate(length);
} else {
filePos = end;
}
// end is not necessarily the end of the file
boolean storeAtEndOfFile = filePos + length >= fileStore.size();
if (!reuseSpace) {
// we can not mark it earlier, because it
// might have been allocated by one of the
// removed chunks
fileStore.markUsed(end, length);
}
c.block = filePos / BLOCK_SIZE;
c.len = length / BLOCK_SIZE;
c.metaRootPos = metaRoot.getPos();
// calculate and set the likely next position
if (reuseSpace) {
int predictBlocks = c.len;
long predictedNextStart = fileStore.allocate(
predictBlocks * BLOCK_SIZE);
fileStore.free(predictedNextStart, predictBlocks * BLOCK_SIZE);
c.next = predictedNextStart / BLOCK_SIZE;
} else {
// just after this chunk
c.next = 0;
}
buff.position(0);
c.writeChunkHeader(buff, headerLength);
revertTemp(storeVersion);
buff.position(buff.limit() - Chunk.FOOTER_LENGTH);
buff.put(c.getFooterBytes());
buff.position(0);
write(filePos, buff.getBuffer());
releaseWriteBuffer(buff);
// whether we need to write the store header
boolean writeStoreHeader = false;
if (!storeAtEndOfFile) {
if (lastChunk == null) {
writeStoreHeader = true;
} else if (lastChunk.next != c.block) {
// the last prediction did not matched
writeStoreHeader = true;
} else {
long headerVersion = DataUtils.readHexLong(
storeHeader, "version", 0);
if (lastChunk.version - headerVersion > 20) {
// we write after at least 20 entries
writeStoreHeader = true;
} else {
int chunkId = DataUtils.readHexInt(storeHeader, "chunk", 0);
while (true) {
Chunk old = chunks.get(chunkId);
if (old == null) {
// one of the chunks in between
// was removed
writeStoreHeader = true;
break;
}
if (chunkId == lastChunk.id) {
break;
}
chunkId++;
}
}
}
}
lastChunk = c;
if (writeStoreHeader) {
writeStoreHeader();
}
if (!storeAtEndOfFile) {
// may only shrink after the store header was written
shrinkFileIfPossible(1);
}
for (MVMap<?, ?> m : changed) {
Page p = m.getRoot();
if (p.getTotalCount() > 0) {
p.writeEnd();
}
}
metaRoot.writeEnd();
// some pages might have been changed in the meantime (in the newest
// version)
unsavedMemory = Math.max(0, unsavedMemory
- currentUnsavedPageCount);
metaChanged = false;
lastStoredVersion = storeVersion;
return version;
}
/**
* Try to free unused chunks. This method doesn't directly write, but can
* change the metadata, and therefore cause a background write.
*/
private void freeUnusedIfNeeded(long time) {
int freeDelay = retentionTime / 5;
if (time >= lastFreeUnusedChunks + freeDelay) {
// set early in case it fails (out of memory or so)
lastFreeUnusedChunks = time;
freeUnusedChunks();
// set it here as well, to avoid calling it often if it was slow
lastFreeUnusedChunks = getTimeSinceCreation();
}
}
private synchronized void freeUnusedChunks() {
if (lastChunk == null || !reuseSpace) {
return;
}
Set<Integer> referenced = collectReferencedChunks();
long time = getTimeSinceCreation();
for (Iterator<Chunk> it = chunks.values().iterator(); it.hasNext(); ) {
Chunk c = it.next();
if (!referenced.contains(c.id)) {
if (canOverwriteChunk(c, time)) {
it.remove();
markMetaChanged();
meta.remove(Chunk.getMetaKey(c.id));
long start = c.block * BLOCK_SIZE;
int length = c.len * BLOCK_SIZE;
fileStore.free(start, length);
} else {
if (c.unused == 0) {
c.unused = time;
meta.put(Chunk.getMetaKey(c.id), c.asString());
markMetaChanged();
}
}
}
}
}
private Set<Integer> collectReferencedChunks() {
long testVersion = lastChunk.version;
DataUtils.checkArgument(testVersion > 0, "Collect references on version 0");
long readCount = getFileStore().readCount.get();
Set<Integer> referenced = new HashSet<>();
for (Cursor<String, String> c = meta.cursor("root."); c.hasNext();) {
String key = c.next();
if (!key.startsWith("root.")) {
break;
}
long pos = DataUtils.parseHexLong(c.getValue());
if (pos == 0) {
continue;
}
int mapId = DataUtils.parseHexInt(key.substring("root.".length()));
collectReferencedChunks(referenced, mapId, pos, 0);
}
long pos = lastChunk.metaRootPos;
collectReferencedChunks(referenced, 0, pos, 0);
readCount = fileStore.readCount.get() - readCount;
return referenced;
}
private void collectReferencedChunks(Set<Integer> targetChunkSet,
int mapId, long pos, int level) {
int c = DataUtils.getPageChunkId(pos);
targetChunkSet.add(c);
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
return;
}
PageChildren refs = readPageChunkReferences(mapId, pos, -1);
if (!refs.chunkList) {
Set<Integer> target = new HashSet<>();
for (int i = 0; i < refs.children.length; i++) {
long p = refs.children[i];
collectReferencedChunks(target, mapId, p, level + 1);
}
// we don't need a reference to this chunk
target.remove(c);
long[] children = new long[target.size()];
int i = 0;
for (Integer p : target) {
children[i++] = DataUtils.getPagePos(p, 0, 0,
DataUtils.PAGE_TYPE_LEAF);
}
refs.children = children;
refs.chunkList = true;
if (cacheChunkRef != null) {
cacheChunkRef.put(refs.pos, refs, refs.getMemory());
}
}
for (long p : refs.children) {
targetChunkSet.add(DataUtils.getPageChunkId(p));
}
}
private PageChildren readPageChunkReferences(int mapId, long pos, int parentChunk) {
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
return null;
}
PageChildren r;
if (cacheChunkRef != null) {
r = cacheChunkRef.get(pos);
} else {
r = null;
}
if (r == null) {
// if possible, create it from the cached page
if (cache != null) {
Page p = cache.get(pos);
if (p != null) {
r = new PageChildren(p);
}
}
if (r == null) {
// page was not cached: read the data
Chunk c = getChunk(pos);
long filePos = c.block * BLOCK_SIZE;
filePos += DataUtils.getPageOffset(pos);
if (filePos < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Negative position {0}; p={1}, c={2}", filePos, pos, c.toString());
}
long maxPos = (c.block + c.len) * BLOCK_SIZE;
r = PageChildren.read(fileStore, pos, mapId, filePos, maxPos);
}
r.removeDuplicateChunkReferences();
if (cacheChunkRef != null) {
cacheChunkRef.put(pos, r, r.getMemory());
}
}
if (r.children.length == 0) {
int chunk = DataUtils.getPageChunkId(pos);
if (chunk == parentChunk) {
return null;
}
}
return r;
}
/**
* Get a buffer for writing. This caller must synchronize on the store
* before calling the method and until after using the buffer.
*
* @return the buffer
*/
private WriteBuffer getWriteBuffer() {
WriteBuffer buff;
if (writeBuffer != null) {
buff = writeBuffer;
buff.clear();
} else {
buff = new WriteBuffer();
}
return buff;
}
/**
* Release a buffer for writing. This caller must synchronize on the store
* before calling the method and until after using the buffer.
*
* @param buff the buffer than can be re-used
*/
private void releaseWriteBuffer(WriteBuffer buff) {
if (buff.capacity() <= 4 * 1024 * 1024) {
writeBuffer = buff;
}
}
private boolean canOverwriteChunk(Chunk c, long time) {
if (retentionTime >= 0) {
if (c.time + retentionTime > time) {
return false;
}
if (c.unused == 0 || c.unused + retentionTime / 2 > time) {
return false;
}
}
Chunk r = retainChunk;
if (r != null && c.version > r.version) {
return false;
}
return true;
}
private long getTimeSinceCreation() {
return Math.max(0, getTimeAbsolute() - creationTime);
}
private long getTimeAbsolute() {
long now = System.currentTimeMillis();
if (lastTimeAbsolute != 0 && now < lastTimeAbsolute) {
// time seems to have run backwards - this can happen
// when the system time is adjusted, for example
// on a leap second
now = lastTimeAbsolute;
} else {
lastTimeAbsolute = now;
}
return now;
}
/**
* Apply the freed space to the chunk metadata. The metadata is updated, but
* completely free chunks are not removed from the set of chunks, and the
* disk space is not yet marked as free.
*
* @param storeVersion apply up to the given version
*/
private void applyFreedSpace(long storeVersion) {
while (true) {
ArrayList<Chunk> modified = New.arrayList();
Iterator<Entry<Long, ConcurrentHashMap<Integer, Chunk>>> it;
it = freedPageSpace.entrySet().iterator();
while (it.hasNext()) {
Entry<Long, ConcurrentHashMap<Integer, Chunk>> e = it.next();
long v = e.getKey();
if (v > storeVersion) {
continue;
}
ConcurrentHashMap<Integer, Chunk> freed = e.getValue();
for (Chunk f : freed.values()) {
Chunk c = chunks.get(f.id);
if (c == null) {
// already removed
continue;
}
// no need to synchronize, as old entries
// are not concurrently modified
c.maxLenLive += f.maxLenLive;
c.pageCountLive += f.pageCountLive;
if (c.pageCountLive < 0 && c.pageCountLive > -MARKED_FREE) {
// can happen after a rollback
c.pageCountLive = 0;
}
if (c.maxLenLive < 0 && c.maxLenLive > -MARKED_FREE) {
// can happen after a rollback
c.maxLenLive = 0;
}
modified.add(c);
}
it.remove();
}
for (Chunk c : modified) {
meta.put(Chunk.getMetaKey(c.id), c.asString());
}
if (modified.isEmpty()) {
break;
}
}
}
/**
* Shrink the file if possible, and if at least a given percentage can be
* saved.
*
* @param minPercent the minimum percentage to save
*/
private void shrinkFileIfPossible(int minPercent) {
if (fileStore.isReadOnly()) {
return;
}
long end = getFileLengthInUse();
long fileSize = fileStore.size();
if (end >= fileSize) {
return;
}
if (minPercent > 0 && fileSize - end < BLOCK_SIZE) {
return;
}
int savedPercent = (int) (100 - (end * 100 / fileSize));
if (savedPercent < minPercent) {
return;
}
if (!closed) {
sync();
}
fileStore.truncate(end);
}
/**
* Get the position right after the last used byte.
*
* @return the position
*/
private long getFileLengthInUse() {
long result = fileStore.getFileLengthInUse();
assert result == measureFileLengthInUse() : result + " != " + measureFileLengthInUse();
return result;
}
private long measureFileLengthInUse() {
long size = 2;
for (Chunk c : chunks.values()) {
if (c.len != Integer.MAX_VALUE) {
size = Math.max(size, c.block + c.len);
}
}
return size * BLOCK_SIZE;
}
/**
* Check whether there are any unsaved changes.
*
* @return if there are any changes
*/
public boolean hasUnsavedChanges() {
checkOpen();
if (metaChanged) {
return true;
}
for (MVMap<?, ?> m : maps.values()) {
if (!m.isClosed()) {
long v = m.getVersion();
if (v >= 0 && v > lastStoredVersion) {
return true;
}
}
}
return false;
}
private Chunk readChunkHeader(long block) {
long p = block * BLOCK_SIZE;
ByteBuffer buff = fileStore.readFully(p, Chunk.MAX_HEADER_LENGTH);
return Chunk.readChunkHeader(buff, p);
}
/**
* Compact the store by moving all live pages to new chunks.
*
* @return if anything was written
*/
public synchronized boolean compactRewriteFully() {
checkOpen();
if (lastChunk == null) {
// nothing to do
return false;
}
for (MVMap<?, ?> m : maps.values()) {
@SuppressWarnings("unchecked")
MVMap<Object, Object> map = (MVMap<Object, Object>) m;
Cursor<Object, Object> cursor = map.cursor(null);
Page lastPage = null;
while (cursor.hasNext()) {
cursor.next();
Page p = cursor.getPage();
if (p == lastPage) {
continue;
}
Object k = p.getKey(0);
Object v = p.getValue(0);
map.put(k, v);
lastPage = p;
}
}
commitAndSave();
return true;
}
/**
* Compact by moving all chunks next to each other.
*
* @return if anything was written
*/
public synchronized boolean compactMoveChunks() {
return compactMoveChunks(100, Long.MAX_VALUE);
}
/**
* Compact the store by moving all chunks next to each other, if there is
* free space between chunks. This might temporarily increase the file size.
* Chunks are overwritten irrespective of the current retention time. Before
* overwriting chunks and before resizing the file, syncFile() is called.
*
* @param targetFillRate do nothing if the file store fill rate is higher
* than this
* @param moveSize the number of bytes to move
* @return if anything was written
*/
public synchronized boolean compactMoveChunks(int targetFillRate, long moveSize) {
checkOpen();
if (lastChunk == null || !reuseSpace) {
// nothing to do
return false;
}
int oldRetentionTime = retentionTime;
boolean oldReuse = reuseSpace;
try {
retentionTime = -1;
freeUnusedChunks();
if (fileStore.getFillRate() > targetFillRate) {
return false;
}
long start = fileStore.getFirstFree() / BLOCK_SIZE;
ArrayList<Chunk> move = compactGetMoveBlocks(start, moveSize);
compactMoveChunks(move);
freeUnusedChunks();
storeNow();
} finally {
reuseSpace = oldReuse;
retentionTime = oldRetentionTime;
}
return true;
}
private ArrayList<Chunk> compactGetMoveBlocks(long startBlock, long moveSize) {
ArrayList<Chunk> move = New.arrayList();
for (Chunk c : chunks.values()) {
if (c.block > startBlock) {
move.add(c);
}
}
// sort by block
Collections.sort(move, new Comparator<Chunk>() {
@Override
public int compare(Chunk o1, Chunk o2) {
return Long.signum(o1.block - o2.block);
}
});
// find which is the last block to keep
int count = 0;
long size = 0;
for (Chunk c : move) {
long chunkSize = c.len * (long) BLOCK_SIZE;
if (size + chunkSize > moveSize) {
break;
}
size += chunkSize;
count++;
}
// move the first block (so the first gap is moved),
// and the one at the end (so the file shrinks)
while (move.size() > count && move.size() > 1) {
move.remove(1);
}
return move;
}
private void compactMoveChunks(ArrayList<Chunk> move) {
for (Chunk c : move) {
WriteBuffer buff = getWriteBuffer();
long start = c.block * BLOCK_SIZE;
int length = c.len * BLOCK_SIZE;
buff.limit(length);
ByteBuffer readBuff = fileStore.readFully(start, length);
Chunk.readChunkHeader(readBuff, start);
int chunkHeaderLen = readBuff.position();
buff.position(chunkHeaderLen);
buff.put(readBuff);
long end = getFileLengthInUse();
fileStore.markUsed(end, length);
fileStore.free(start, length);
c.block = end / BLOCK_SIZE;
c.next = 0;
buff.position(0);
c.writeChunkHeader(buff, chunkHeaderLen);
buff.position(length - Chunk.FOOTER_LENGTH);
buff.put(c.getFooterBytes());
buff.position(0);
write(end, buff.getBuffer());
releaseWriteBuffer(buff);
markMetaChanged();
meta.put(Chunk.getMetaKey(c.id), c.asString());
}
// update the metadata (store at the end of the file)
reuseSpace = false;
commitAndSave();
sync();
// now re-use the empty space
reuseSpace = true;
for (Chunk c : move) {
if (!chunks.containsKey(c.id)) {
// already removed during the
// previous store operation
continue;
}
WriteBuffer buff = getWriteBuffer();
long start = c.block * BLOCK_SIZE;
int length = c.len * BLOCK_SIZE;
buff.limit(length);
ByteBuffer readBuff = fileStore.readFully(start, length);
Chunk.readChunkHeader(readBuff, 0);
int chunkHeaderLen = readBuff.position();
buff.position(chunkHeaderLen);
buff.put(readBuff);
long pos = fileStore.allocate(length);
fileStore.free(start, length);
buff.position(0);
c.block = pos / BLOCK_SIZE;
c.writeChunkHeader(buff, chunkHeaderLen);
buff.position(length - Chunk.FOOTER_LENGTH);
buff.put(c.getFooterBytes());
buff.position(0);
write(pos, buff.getBuffer());
releaseWriteBuffer(buff);
markMetaChanged();
meta.put(Chunk.getMetaKey(c.id), c.asString());
}
// update the metadata (within the file)
commitAndSave();
sync();
shrinkFileIfPossible(0);
}
/**
* Force all stored changes to be written to the storage. The default
* implementation calls FileChannel.force(true).
*/
public void sync() {
checkOpen();
FileStore f = fileStore;
if (f != null) {
f.sync();
}
}
/**
* Try to increase the fill rate by re-writing partially full chunks. Chunks
* with a low number of live items are re-written.
* <p>
* If the current fill rate is higher than the target fill rate, nothing is
* done.
* <p>
* Please note this method will not necessarily reduce the file size, as
* empty chunks are not overwritten.
* <p>
* Only data of open maps can be moved. For maps that are not open, the old
* chunk is still referenced. Therefore, it is recommended to open all maps
* before calling this method.
*
* @param targetFillRate the minimum percentage of live entries
* @param write the minimum number of bytes to write
* @return if a chunk was re-written
*/
public boolean compact(int targetFillRate, int write) {
if (!reuseSpace) {
return false;
}
synchronized (compactSync) {
checkOpen();
ArrayList<Chunk> old;
synchronized (this) {
old = compactGetOldChunks(targetFillRate, write);
}
if (old == null || old.isEmpty()) {
return false;
}
compactRewrite(old);
return true;
}
}
/**
* Get the current fill rate (percentage of used space in the file). Unlike
* the fill rate of the store, here we only account for chunk data; the fill
* rate here is how much of the chunk data is live (still referenced). Young
* chunks are considered live.
*
* @return the fill rate, in percent (100 is completely full)
*/
public int getCurrentFillRate() {
long maxLengthSum = 1;
long maxLengthLiveSum = 1;
long time = getTimeSinceCreation();
for (Chunk c : chunks.values()) {
maxLengthSum += c.maxLen;
if (c.time + retentionTime > time) {
// young chunks (we don't optimize those):
// assume if they are fully live
// so that we don't try to optimize yet
// until they get old
maxLengthLiveSum += c.maxLen;
} else {
maxLengthLiveSum += c.maxLenLive;
}
}
// the fill rate of all chunks combined
if (maxLengthSum <= 0) {
// avoid division by 0
maxLengthSum = 1;
}
int fillRate = (int) (100 * maxLengthLiveSum / maxLengthSum);
return fillRate;
}
private ArrayList<Chunk> compactGetOldChunks(int targetFillRate, int write) {
if (lastChunk == null) {
// nothing to do
return null;
}
long time = getTimeSinceCreation();
int fillRate = getCurrentFillRate();
if (fillRate >= targetFillRate) {
return null;
}
// the 'old' list contains the chunks we want to free up
ArrayList<Chunk> old = New.arrayList();
Chunk last = chunks.get(lastChunk.id);
for (Chunk c : chunks.values()) {
// only look at chunk older than the retention time
// (it's possible to compact chunks earlier, but right
// now we don't do that)
if (c.time + retentionTime > time) {
continue;
}
long age = last.version - c.version + 1;
c.collectPriority = (int) (c.getFillRate() * 1000 / age);
old.add(c);
}
if (old.isEmpty()) {
return null;
}
// sort the list, so the first entry should be collected first
Collections.sort(old, new Comparator<Chunk>() {
@Override
public int compare(Chunk o1, Chunk o2) {
int comp = Integer.compare(o1.collectPriority,
o2.collectPriority);
if (comp == 0) {
comp = Long.compare(o1.maxLenLive,
o2.maxLenLive);
}
return comp;
}
});
// find out up to were in the old list we need to move
long written = 0;
int chunkCount = 0;
Chunk move = null;
for (Chunk c : old) {
if (move != null) {
if (c.collectPriority > 0 && written > write) {
break;
}
}
written += c.maxLenLive;
chunkCount++;
move = c;
}
if (chunkCount < 1) {
return null;
}
// remove the chunks we want to keep from this list
boolean remove = false;
for (Iterator<Chunk> it = old.iterator(); it.hasNext();) {
Chunk c = it.next();
if (move == c) {
remove = true;
} else if (remove) {
it.remove();
}
}
return old;
}
private void compactRewrite(ArrayList<Chunk> old) {
HashSet<Integer> set = new HashSet<>();
for (Chunk c : old) {
set.add(c.id);
}
for (MVMap<?, ?> m : maps.values()) {
@SuppressWarnings("unchecked")
MVMap<Object, Object> map = (MVMap<Object, Object>) m;
if (!map.rewrite(set)) {
return;
}
}
if (!meta.rewrite(set)) {
return;
}
freeUnusedChunks();
commitAndSave();
}
/**
* Read a page.
*
* @param map the map
* @param pos the page position
* @return the page
*/
Page readPage(MVMap<?, ?> map, long pos) {
if (pos == 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT, "Position 0");
}
Page p = cache == null ? null : cache.get(pos);
if (p == null) {
Chunk c = getChunk(pos);
long filePos = c.block * BLOCK_SIZE;
filePos += DataUtils.getPageOffset(pos);
if (filePos < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Negative position {0}", filePos);
}
long maxPos = (c.block + c.len) * BLOCK_SIZE;
p = Page.read(fileStore, pos, map, filePos, maxPos);
cachePage(pos, p, p.getMemory());
}
return p;
}
/**
* Remove a page.
*
* @param map the map the page belongs to
* @param pos the position of the page
* @param memory the memory usage
*/
void removePage(MVMap<?, ?> map, long pos, int memory) {
// we need to keep temporary pages,
// to support reading old versions and rollback
if (pos == 0) {
// the page was not yet stored:
// just using "unsavedMemory -= memory" could result in negative
// values, because in some cases a page is allocated, but never
// stored, so we need to use max
unsavedMemory = Math.max(0, unsavedMemory - memory);
return;
}
// This could result in a cache miss if the operation is rolled back,
// but we don't optimize for rollback.
// We could also keep the page in the cache, as somebody
// could still read it (reading the old version).
if (cache != null) {
if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) {
// keep nodes in the cache, because they are still used for
// garbage collection
cache.remove(pos);
}
}
Chunk c = getChunk(pos);
long version = currentVersion;
if (map == meta && currentStoreVersion >= 0) {
if (Thread.currentThread() == currentStoreThread) {
// if the meta map is modified while storing,
// then this freed page needs to be registered
// with the stored chunk, so that the old chunk
// can be re-used
version = currentStoreVersion;
}
}
registerFreePage(version, c.id,
DataUtils.getPageMaxLength(pos), 1);
}
private void registerFreePage(long version, int chunkId,
long maxLengthLive, int pageCount) {
ConcurrentHashMap<Integer, Chunk> freed = freedPageSpace.get(version);
if (freed == null) {
freed = new ConcurrentHashMap<>();
ConcurrentHashMap<Integer, Chunk> f2 = freedPageSpace.putIfAbsent(version,
freed);
if (f2 != null) {
freed = f2;
}
}
// synchronize, because pages could be freed concurrently
synchronized (freed) {
Chunk chunk = freed.get(chunkId);
if (chunk == null) {
chunk = new Chunk(chunkId);
Chunk chunk2 = freed.putIfAbsent(chunkId, chunk);
if (chunk2 != null) {
chunk = chunk2;
}
}
chunk.maxLenLive -= maxLengthLive;
chunk.pageCountLive -= pageCount;
}
}
Compressor getCompressorFast() {
if (compressorFast == null) {
compressorFast = new CompressLZF();
}
return compressorFast;
}
Compressor getCompressorHigh() {
if (compressorHigh == null) {
compressorHigh = new CompressDeflate();
}
return compressorHigh;
}
int getCompressionLevel() {
return compressionLevel;
}
public int getPageSplitSize() {
return pageSplitSize;
}
public boolean getReuseSpace() {
return reuseSpace;
}
/**
* Whether empty space in the file should be re-used. If enabled, old data
* is overwritten (default). If disabled, writes are appended at the end of
* the file.
* <p>
* This setting is specially useful for online backup. To create an online
* backup, disable this setting, then copy the file (starting at the
* beginning of the file). In this case, concurrent backup and write
* operations are possible (obviously the backup process needs to be faster
* than the write operations).
*
* @param reuseSpace the new value
*/
public void setReuseSpace(boolean reuseSpace) {
this.reuseSpace = reuseSpace;
}
public int getRetentionTime() {
return retentionTime;
}
/**
* How long to retain old, persisted chunks, in milliseconds. Chunks that
* are older may be overwritten once they contain no live data.
* <p>
* The default value is 45000 (45 seconds) when using the default file
* store. It is assumed that a file system and hard disk will flush all
* write buffers within this time. Using a lower value might be dangerous,
* unless the file system and hard disk flush the buffers earlier. To
* manually flush the buffers, use
* <code>MVStore.getFile().force(true)</code>, however please note that
* according to various tests this does not always work as expected
* depending on the operating system and hardware.
* <p>
* The retention time needs to be long enough to allow reading old chunks
* while traversing over the entries of a map.
* <p>
* This setting is not persisted.
*
* @param ms how many milliseconds to retain old chunks (0 to overwrite them
* as early as possible)
*/
public void setRetentionTime(int ms) {
this.retentionTime = ms;
}
/**
* How many versions to retain for in-memory stores. If not set, 5 old
* versions are retained.
*
* @param count the number of versions to keep
*/
public void setVersionsToKeep(int count) {
this.versionsToKeep = count;
}
/**
* Get the oldest version to retain in memory (for in-memory stores).
*
* @return the version
*/
public long getVersionsToKeep() {
return versionsToKeep;
}
/**
* Get the oldest version to retain in memory, which is the manually set
* retain version, or the current store version (whatever is older).
*
* @return the version
*/
long getOldestVersionToKeep() {
long v = currentVersion;
if (fileStore == null) {
return v - versionsToKeep;
}
long storeVersion = currentStoreVersion;
if (storeVersion > -1) {
v = Math.min(v, storeVersion);
}
return v;
}
/**
* Check whether all data can be read from this version. This requires that
* all chunks referenced by this version are still available (not
* overwritten).
*
* @param version the version
* @return true if all data can be read
*/
private boolean isKnownVersion(long version) {
if (version > currentVersion || version < 0) {
return false;
}
if (version == currentVersion || chunks.size() == 0) {
// no stored data
return true;
}
// need to check if a chunk for this version exists
Chunk c = getChunkForVersion(version);
if (c == null) {
return false;
}
// also, all chunks referenced by this version
// need to be available in the file
MVMap<String, String> oldMeta = getMetaMap(version);
if (oldMeta == null) {
return false;
}
try {
for (Iterator<String> it = oldMeta.keyIterator("chunk.");
it.hasNext();) {
String chunkKey = it.next();
if (!chunkKey.startsWith("chunk.")) {
break;
}
if (!meta.containsKey(chunkKey)) {
String s = oldMeta.get(chunkKey);
Chunk c2 = Chunk.fromString(s);
Chunk test = readChunkHeaderAndFooter(c2.block);
if (test == null || test.id != c2.id) {
return false;
}
// we store this chunk
chunks.put(c2.id, c2);
}
}
} catch (IllegalStateException e) {
// the chunk missing where the metadata is stored
return false;
}
return true;
}
/**
* Increment the number of unsaved pages.
*
* @param memory the memory usage of the page
*/
void registerUnsavedPage(int memory) {
unsavedMemory += memory;
int newValue = unsavedMemory;
if (newValue > autoCommitMemory && autoCommitMemory > 0) {
saveNeeded = true;
}
}
/**
* This method is called before writing to a map.
*
* @param map the map
*/
void beforeWrite(MVMap<?, ?> map) {
if (saveNeeded) {
if (map == meta) {
// to, don't save while the metadata map is locked
// this is to avoid deadlocks that could occur when we
// synchronize on the store and then on the metadata map
// TODO there should be no deadlocks possible
return;
}
saveNeeded = false;
// check again, because it could have been written by now
if (unsavedMemory > autoCommitMemory && autoCommitMemory > 0) {
commitAndSave();
}
}
}
/**
* Get the store version. The store version is usually used to upgrade the
* structure of the store after upgrading the application. Initially the
* store version is 0, until it is changed.
*
* @return the store version
*/
public int getStoreVersion() {
checkOpen();
String x = meta.get("setting.storeVersion");
return x == null ? 0 : DataUtils.parseHexInt(x);
}
/**
* Update the store version.
*
* @param version the new store version
*/
public synchronized void setStoreVersion(int version) {
checkOpen();
markMetaChanged();
meta.put("setting.storeVersion", Integer.toHexString(version));
}
/**
* Revert to the beginning of the current version, reverting all uncommitted
* changes.
*/
public void rollback() {
rollbackTo(currentVersion);
}
/**
* Revert to the beginning of the given version. All later changes (stored
* or not) are forgotten. All maps that were created later are closed. A
* rollback to a version before the last stored version is immediately
* persisted. Rollback to version 0 means all data is removed.
*
* @param version the version to revert to
*/
public synchronized void rollbackTo(long version) {
checkOpen();
if (version == 0) {
// special case: remove all data
for (MVMap<?, ?> m : maps.values()) {
m.close();
}
meta.clear();
chunks.clear();
if (fileStore != null) {
fileStore.clear();
}
maps.clear();
freedPageSpace.clear();
currentVersion = version;
setWriteVersion(version);
metaChanged = false;
return;
}
DataUtils.checkArgument(
isKnownVersion(version),
"Unknown version {0}", version);
for (MVMap<?, ?> m : maps.values()) {
m.rollbackTo(version);
}
for (long v = currentVersion; v >= version; v--) {
if (freedPageSpace.size() == 0) {
break;
}
freedPageSpace.remove(v);
}
meta.rollbackTo(version);
metaChanged = false;
boolean loadFromFile = false;
// find out which chunks to remove,
// and which is the newest chunk to keep
// (the chunk list can have gaps)
ArrayList<Integer> remove = new ArrayList<>();
Chunk keep = null;
for (Chunk c : chunks.values()) {
if (c.version > version) {
remove.add(c.id);
} else if (keep == null || keep.id < c.id) {
keep = c;
}
}
if (!remove.isEmpty()) {
// remove the youngest first, so we don't create gaps
// (in case we remove many chunks)
Collections.sort(remove, Collections.reverseOrder());
revertTemp(version);
loadFromFile = true;
for (int id : remove) {
Chunk c = chunks.remove(id);
long start = c.block * BLOCK_SIZE;
int length = c.len * BLOCK_SIZE;
fileStore.free(start, length);
// overwrite the chunk,
// so it is not be used later on
WriteBuffer buff = getWriteBuffer();
buff.limit(length);
// buff.clear() does not set the data
Arrays.fill(buff.getBuffer().array(), (byte) 0);
write(start, buff.getBuffer());
releaseWriteBuffer(buff);
// only really needed if we remove many chunks, when writes are
// re-ordered - but we do it always, because rollback is not
// performance critical
sync();
}
lastChunk = keep;
writeStoreHeader();
readStoreHeader();
}
for (MVMap<?, ?> m : new ArrayList<>(maps.values())) {
int id = m.getId();
if (m.getCreateVersion() >= version) {
m.close();
maps.remove(id);
} else {
if (loadFromFile) {
m.setRootPos(getRootPos(meta, id), -1);
}
}
}
// rollback might have rolled back the stored chunk metadata as well
if (lastChunk != null) {
for (Chunk c : chunks.values()) {
meta.put(Chunk.getMetaKey(c.id), c.asString());
}
}
currentVersion = version;
setWriteVersion(version);
}
private static long getRootPos(MVMap<String, String> map, int mapId) {
String root = map.get(MVMap.getMapRootKey(mapId));
return root == null ? 0 : DataUtils.parseHexLong(root);
}
private void revertTemp(long storeVersion) {
for (Iterator<Entry<Long, ConcurrentHashMap<Integer, Chunk>>> it =
freedPageSpace.entrySet().iterator(); it.hasNext(); ) {
Entry<Long, ConcurrentHashMap<Integer, Chunk>> entry = it.next();
Long v = entry.getKey();
if (v <= storeVersion) {
it.remove();
}
}
for (MVMap<?, ?> m : maps.values()) {
m.removeUnusedOldVersions();
}
}
/**
* Get the current version of the data. When a new store is created, the
* version is 0.
*
* @return the version
*/
public long getCurrentVersion() {
return currentVersion;
}
/**
* Get the file store.
*
* @return the file store
*/
public FileStore getFileStore() {
return fileStore;
}
/**
* Get the store header. This data is for informational purposes only. The
* data is subject to change in future versions. The data should not be
* modified (doing so may corrupt the store).
*
* @return the store header
*/
public Map<String, Object> getStoreHeader() {
return storeHeader;
}
private void checkOpen() {
if (closed) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_CLOSED,
"This store is closed", panicException);
}
}
/**
* Rename a map.
*
* @param map the map
* @param newName the new name
*/
public synchronized void renameMap(MVMap<?, ?> map, String newName) {
checkOpen();
DataUtils.checkArgument(map != meta,
"Renaming the meta map is not allowed");
int id = map.getId();
String oldName = getMapName(id);
if (oldName.equals(newName)) {
return;
}
DataUtils.checkArgument(
!meta.containsKey("name." + newName),
"A map named {0} already exists", newName);
markMetaChanged();
String x = Integer.toHexString(id);
meta.remove("name." + oldName);
meta.put(MVMap.getMapKey(id), map.asString(newName));
meta.put("name." + newName, x);
}
/**
* Remove a map. Please note rolling back this operation does not restore
* the data; if you need this ability, use Map.clear().
*
* @param map the map to remove
*/
public synchronized void removeMap(MVMap<?, ?> map) {
checkOpen();
DataUtils.checkArgument(map != meta,
"Removing the meta map is not allowed");
map.clear();
int id = map.getId();
String name = getMapName(id);
markMetaChanged();
meta.remove(MVMap.getMapKey(id));
meta.remove("name." + name);
meta.remove(MVMap.getMapRootKey(id));
maps.remove(id);
}
/**
* Get the name of the given map.
*
* @param id the map id
* @return the name, or null if not found
*/
public synchronized String getMapName(int id) {
checkOpen();
String m = meta.get(MVMap.getMapKey(id));
return m == null ? null : DataUtils.getMapName(m);
}
/**
* Commit and save all changes, if there are any, and compact the store if
* needed.
*/
void writeInBackground() {
try {
if (closed) {
return;
}
// could also commit when there are many unsaved pages,
// but according to a test it doesn't really help
long time = getTimeSinceCreation();
if (time <= lastCommitTime + autoCommitDelay) {
return;
}
if (hasUnsavedChanges()) {
try {
commitAndSave();
} catch (Throwable e) {
handleException(e);
return;
}
}
if (autoCompactFillRate > 0) {
// whether there were file read or write operations since
// the last time
boolean fileOps;
long fileOpCount = fileStore.getWriteCount() + fileStore.getReadCount();
if (autoCompactLastFileOpCount != fileOpCount) {
fileOps = true;
} else {
fileOps = false;
}
// use a lower fill rate if there were any file operations
int targetFillRate = fileOps ? autoCompactFillRate / 3 : autoCompactFillRate;
compact(targetFillRate, autoCommitMemory);
autoCompactLastFileOpCount = fileStore.getWriteCount() + fileStore.getReadCount();
}
} catch (Throwable e) {
handleException(e);
}
}
private void handleException(Throwable ex) {
if (backgroundExceptionHandler != null) {
try {
backgroundExceptionHandler.uncaughtException(null, ex);
} catch(Throwable ignore) {
if (ex != ignore) { // OOME may be the same
ex.addSuppressed(ignore);
}
}
}
}
/**
* Set the read cache size in MB.
*
* @param mb the cache size in MB.
*/
public void setCacheSize(int mb) {
final long bytes = (long) mb * 1024 * 1024;
if (cache != null) {
cache.setMaxMemory(bytes);
cache.clear();
}
if (cacheChunkRef != null) {
cacheChunkRef.setMaxMemory(bytes / 4);
cacheChunkRef.clear();
}
}
public boolean isClosed() {
return closed;
}
private void stopBackgroundThread() {
BackgroundWriterThread t = backgroundWriterThread;
if (t == null) {
return;
}
backgroundWriterThread = null;
if (Thread.currentThread() == t) {
// within the thread itself - can not join
return;
}
synchronized (t.sync) {
t.sync.notifyAll();
}
if (Thread.holdsLock(this)) {
// called from storeNow: can not join,
// because that could result in a deadlock
return;
}
try {
t.join();
} catch (Exception e) {
// ignore
}
}
/**
* Set the maximum delay in milliseconds to auto-commit changes.
* <p>
* To disable auto-commit, set the value to 0. In this case, changes are
* only committed when explicitly calling commit.
* <p>
* The default is 1000, meaning all changes are committed after at most one
* second.
*
* @param millis the maximum delay
*/
public void setAutoCommitDelay(int millis) {
if (autoCommitDelay == millis) {
return;
}
autoCommitDelay = millis;
if (fileStore == null || fileStore.isReadOnly()) {
return;
}
stopBackgroundThread();
// start the background thread if needed
if (millis > 0) {
int sleep = Math.max(1, millis / 10);
BackgroundWriterThread t =
new BackgroundWriterThread(this, sleep,
fileStore.toString());
t.start();
backgroundWriterThread = t;
}
}
/**
* Get the auto-commit delay.
*
* @return the delay in milliseconds, or 0 if auto-commit is disabled.
*/
public int getAutoCommitDelay() {
return autoCommitDelay;
}
/**
* Get the maximum memory (in bytes) used for unsaved pages. If this number
* is exceeded, unsaved changes are stored to disk.
*
* @return the memory in bytes
*/
public int getAutoCommitMemory() {
return autoCommitMemory;
}
/**
* Get the estimated memory (in bytes) of unsaved data. If the value exceeds
* the auto-commit memory, the changes are committed.
* <p>
* The returned value is an estimation only.
*
* @return the memory in bytes
*/
public int getUnsavedMemory() {
return unsavedMemory;
}
/**
* Put the page in the cache.
*
* @param pos the page position
* @param page the page
* @param memory the memory used
*/
void cachePage(long pos, Page page, int memory) {
if (cache != null) {
cache.put(pos, page, memory);
}
}
/**
* Get the amount of memory used for caching, in MB.
* Note that this does not include the page chunk references cache, which is
* 25% of the size of the page cache.
*
* @return the amount of memory used for caching
*/
public int getCacheSizeUsed() {
if (cache == null) {
return 0;
}
return (int) (cache.getUsedMemory() / 1024 / 1024);
}
/**
* Get the maximum cache size, in MB.
* Note that this does not include the page chunk references cache, which is
* 25% of the size of the page cache.
*
* @return the cache size
*/
public int getCacheSize() {
if (cache == null) {
return 0;
}
return (int) (cache.getMaxMemory() / 1024 / 1024);
}
/**
* Get the cache.
*
* @return the cache
*/
public CacheLongKeyLIRS<Page> getCache() {
return cache;
}
/**
* Whether the store is read-only.
*
* @return true if it is
*/
public boolean isReadOnly() {
return fileStore == null ? false : fileStore.isReadOnly();
}
/**
* A background writer thread to automatically store changes from time to
* time.
*/
private static class BackgroundWriterThread extends Thread {
public final Object sync = new Object();
private final MVStore store;
private final int sleep;
BackgroundWriterThread(MVStore store, int sleep, String fileStoreName) {
super("MVStore background writer " + fileStoreName);
this.store = store;
this.sleep = sleep;
setDaemon(true);
}
@Override
public void run() {
while (true) {
Thread t = store.backgroundWriterThread;
if (t == null) {
break;
}
synchronized (sync) {
try {
sync.wait(sleep);
} catch (InterruptedException e) {
continue;
}
}
store.writeInBackground();
}
}
}
/**
* A builder for an MVStore.
*/
public static class Builder {
private final HashMap<String, Object> config;
private Builder(HashMap<String, Object> config) {
this.config = config;
}
/**
* Creates new instance of MVStore.Builder.
*/
public Builder() {
config = new HashMap<>();
}
private Builder set(String key, Object value) {
config.put(key, value);
return this;
}
/**
* Disable auto-commit, by setting the auto-commit delay and auto-commit
* buffer size to 0.
*
* @return this
*/
public Builder autoCommitDisabled() {
// we have a separate config option so that
// no thread is started if the write delay is 0
// (if we only had a setter in the MVStore,
// the thread would need to be started in any case)
set("autoCommitBufferSize", 0);
return set("autoCommitDelay", 0);
}
/**
* Set the size of the write buffer, in KB disk space (for file-based
* stores). Unless auto-commit is disabled, changes are automatically
* saved if there are more than this amount of changes.
* <p>
* The default is 1024 KB.
* <p>
* When the value is set to 0 or lower, data is not automatically
* stored.
*
* @param kb the write buffer size, in kilobytes
* @return this
*/
public Builder autoCommitBufferSize(int kb) {
return set("autoCommitBufferSize", kb);
}
/**
* Set the auto-compact target fill rate. If the average fill rate (the
* percentage of the storage space that contains active data) of the
* chunks is lower, then the chunks with a low fill rate are re-written.
* Also, if the percentage of empty space between chunks is higher than
* this value, then chunks at the end of the file are moved. Compaction
* stops if the target fill rate is reached.
* <p>
* The default value is 40 (40%). The value 0 disables auto-compacting.
* <p>
*
* @param percent the target fill rate
* @return this
*/
public Builder autoCompactFillRate(int percent) {
return set("autoCompactFillRate", percent);
}
/**
* Use the following file name. If the file does not exist, it is
* automatically created. The parent directory already must exist.
*
* @param fileName the file name
* @return this
*/
public Builder fileName(String fileName) {
return set("fileName", fileName);
}
/**
* Encrypt / decrypt the file using the given password. This method has
* no effect for in-memory stores. The password is passed as a
* char array so that it can be cleared as soon as possible. Please note
* there is still a small risk that password stays in memory (due to
* Java garbage collection). Also, the hashed encryption key is kept in
* memory as long as the file is open.
*
* @param password the password
* @return this
*/
public Builder encryptionKey(char[] password) {
return set("encryptionKey", password);
}
/**
* Open the file in read-only mode. In this case, a shared lock will be
* acquired to ensure the file is not concurrently opened in write mode.
* <p>
* If this option is not used, the file is locked exclusively.
* <p>
* Please note a store may only be opened once in every JVM (no matter
* whether it is opened in read-only or read-write mode), because each
* file may be locked only once in a process.
*
* @return this
*/
public Builder readOnly() {
return set("readOnly", 1);
}
/**
* Set the read cache size in MB. The default is 16 MB.
*
* @param mb the cache size in megabytes
* @return this
*/
public Builder cacheSize(int mb) {
return set("cacheSize", mb);
}
/**
* Set the read cache concurrency. The default is 16, meaning 16
* segments are used.
*
* @param concurrency the cache concurrency
* @return this
*/
public Builder cacheConcurrency(int concurrency) {
return set("cacheConcurrency", concurrency);
}
/**
* Compress data before writing using the LZF algorithm. This will save
* about 50% of the disk space, but will slow down read and write
* operations slightly.
* <p>
* This setting only affects writes; it is not necessary to enable
* compression when reading, even if compression was enabled when
* writing.
*
* @return this
*/
public Builder compress() {
return set("compress", 1);
}
/**
* Compress data before writing using the Deflate algorithm. This will
* save more disk space, but will slow down read and write operations
* quite a bit.
* <p>
* This setting only affects writes; it is not necessary to enable
* compression when reading, even if compression was enabled when
* writing.
*
* @return this
*/
public Builder compressHigh() {
return set("compress", 2);
}
/**
* Set the amount of memory a page should contain at most, in bytes,
* before it is split. The default is 16 KB for persistent stores and 4
* KB for in-memory stores. This is not a limit in the page size, as
* pages with one entry can get larger. It is just the point where pages
* that contain more than one entry are split.
*
* @param pageSplitSize the page size
* @return this
*/
public Builder pageSplitSize(int pageSplitSize) {
return set("pageSplitSize", pageSplitSize);
}
/**
* Set the listener to be used for exceptions that occur when writing in
* the background thread.
*
* @param exceptionHandler the handler
* @return this
*/
public Builder backgroundExceptionHandler(
Thread.UncaughtExceptionHandler exceptionHandler) {
return set("backgroundExceptionHandler", exceptionHandler);
}
/**
* Use the provided file store instead of the default one.
* <p>
* File stores passed in this way need to be open. They are not closed
* when closing the store.
* <p>
* Please note that any kind of store (including an off-heap store) is
* considered a "persistence", while an "in-memory store" means objects
* are not persisted and fully kept in the JVM heap.
*
* @param store the file store
* @return this
*/
public Builder fileStore(FileStore store) {
return set("fileStore", store);
}
/**
* Open the store.
*
* @return the opened store
*/
public MVStore open() {
return new MVStore(config);
}
@Override
public String toString() {
return DataUtils.appendMap(new StringBuilder(), config).toString();
}
/**
* Read the configuration from a string.
*
* @param s the string representation
* @return the builder
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public static Builder fromString(String s) {
// Cast from HashMap<String, String> to HashMap<String, Object> is safe
return new Builder((HashMap) DataUtils.parseMap(s));
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/MVStoreTool.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.io.Writer;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import org.h2.compress.CompressDeflate;
import org.h2.compress.CompressLZF;
import org.h2.compress.Compressor;
import org.h2.engine.Constants;
import org.h2.message.DbException;
import org.h2.mvstore.type.DataType;
import org.h2.mvstore.type.StringDataType;
import org.h2.store.fs.FilePath;
import org.h2.store.fs.FileUtils;
import org.h2.util.Utils;
/**
* Utility methods used in combination with the MVStore.
*/
public class MVStoreTool {
/**
* Runs this tool.
* Options are case sensitive. Supported options are:
* <table summary="command line options">
* <tr><td>[-dump <fileName>]</td>
* <td>Dump the contends of the file</td></tr>
* <tr><td>[-info <fileName>]</td>
* <td>Get summary information about a file</td></tr>
* <tr><td>[-compact <fileName>]</td>
* <td>Compact a store</td></tr>
* <tr><td>[-compress <fileName>]</td>
* <td>Compact a store with compression enabled</td></tr>
* </table>
*
* @param args the command line arguments
*/
public static void main(String... args) {
for (int i = 0; i < args.length; i++) {
if ("-dump".equals(args[i])) {
String fileName = args[++i];
dump(fileName, new PrintWriter(System.out), true);
} else if ("-info".equals(args[i])) {
String fileName = args[++i];
info(fileName, new PrintWriter(System.out));
} else if ("-compact".equals(args[i])) {
String fileName = args[++i];
compact(fileName, false);
} else if ("-compress".equals(args[i])) {
String fileName = args[++i];
compact(fileName, true);
} else if ("-rollback".equals(args[i])) {
String fileName = args[++i];
long targetVersion = Long.decode(args[++i]);
rollback(fileName, targetVersion, new PrintWriter(System.out));
} else if ("-repair".equals(args[i])) {
String fileName = args[++i];
repair(fileName);
}
}
}
/**
* Read the contents of the file and write them to system out.
*
* @param fileName the name of the file
* @param details whether to print details
*/
public static void dump(String fileName, boolean details) {
dump(fileName, new PrintWriter(System.out), details);
}
/**
* Read the summary information of the file and write them to system out.
*
* @param fileName the name of the file
*/
public static void info(String fileName) {
info(fileName, new PrintWriter(System.out));
}
/**
* Read the contents of the file and display them in a human-readable
* format.
*
* @param fileName the name of the file
* @param writer the print writer
* @param details print the page details
*/
public static void dump(String fileName, Writer writer, boolean details) {
PrintWriter pw = new PrintWriter(writer, true);
if (!FilePath.get(fileName).exists()) {
pw.println("File not found: " + fileName);
return;
}
long size = FileUtils.size(fileName);
pw.printf("File %s, %d bytes, %d MB\n", fileName, size, size / 1024 / 1024);
FileChannel file = null;
int blockSize = MVStore.BLOCK_SIZE;
TreeMap<Integer, Long> mapSizesTotal =
new TreeMap<>();
long pageSizeTotal = 0;
try {
file = FilePath.get(fileName).open("r");
long fileSize = file.size();
int len = Long.toHexString(fileSize).length();
ByteBuffer block = ByteBuffer.allocate(4096);
long pageCount = 0;
for (long pos = 0; pos < fileSize;) {
block.rewind();
DataUtils.readFully(file, pos, block);
block.rewind();
int headerType = block.get();
if (headerType == 'H') {
String header = new String(block.array(), StandardCharsets.ISO_8859_1).trim();
pw.printf("%0" + len + "x fileHeader %s%n",
pos, header);
pos += blockSize;
continue;
}
if (headerType != 'c') {
pos += blockSize;
continue;
}
block.position(0);
Chunk c = null;
try {
c = Chunk.readChunkHeader(block, pos);
} catch (IllegalStateException e) {
pos += blockSize;
continue;
}
if (c.len <= 0) {
// not a chunk
pos += blockSize;
continue;
}
int length = c.len * MVStore.BLOCK_SIZE;
pw.printf("%n%0" + len + "x chunkHeader %s%n",
pos, c.toString());
ByteBuffer chunk = ByteBuffer.allocate(length);
DataUtils.readFully(file, pos, chunk);
int p = block.position();
pos += length;
int remaining = c.pageCount;
pageCount += c.pageCount;
TreeMap<Integer, Integer> mapSizes =
new TreeMap<>();
int pageSizeSum = 0;
while (remaining > 0) {
int start = p;
try {
chunk.position(p);
} catch (IllegalArgumentException e) {
// too far
pw.printf("ERROR illegal position %d%n", p);
break;
}
int pageSize = chunk.getInt();
// check value (ignored)
chunk.getShort();
int mapId = DataUtils.readVarInt(chunk);
int entries = DataUtils.readVarInt(chunk);
int type = chunk.get();
boolean compressed = (type & DataUtils.PAGE_COMPRESSED) != 0;
boolean node = (type & 1) != 0;
if (details) {
pw.printf(
"+%0" + len +
"x %s, map %x, %d entries, %d bytes, maxLen %x%n",
p,
(node ? "node" : "leaf") +
(compressed ? " compressed" : ""),
mapId,
node ? entries + 1 : entries,
pageSize,
DataUtils.getPageMaxLength(DataUtils.getPagePos(0, 0, pageSize, 0))
);
}
p += pageSize;
Integer mapSize = mapSizes.get(mapId);
if (mapSize == null) {
mapSize = 0;
}
mapSizes.put(mapId, mapSize + pageSize);
Long total = mapSizesTotal.get(mapId);
if (total == null) {
total = 0L;
}
mapSizesTotal.put(mapId, total + pageSize);
pageSizeSum += pageSize;
pageSizeTotal += pageSize;
remaining--;
long[] children = null;
long[] counts = null;
if (node) {
children = new long[entries + 1];
for (int i = 0; i <= entries; i++) {
children[i] = chunk.getLong();
}
counts = new long[entries + 1];
for (int i = 0; i <= entries; i++) {
long s = DataUtils.readVarLong(chunk);
counts[i] = s;
}
}
String[] keys = new String[entries];
if (mapId == 0 && details) {
ByteBuffer data;
if (compressed) {
boolean fast = !((type & DataUtils.PAGE_COMPRESSED_HIGH) ==
DataUtils.PAGE_COMPRESSED_HIGH);
Compressor compressor = getCompressor(fast);
int lenAdd = DataUtils.readVarInt(chunk);
int compLen = pageSize + start - chunk.position();
byte[] comp = Utils.newBytes(compLen);
chunk.get(comp);
int l = compLen + lenAdd;
data = ByteBuffer.allocate(l);
compressor.expand(comp, 0, compLen, data.array(), 0, l);
} else {
data = chunk;
}
for (int i = 0; i < entries; i++) {
String k = StringDataType.INSTANCE.read(data);
keys[i] = k;
}
if (node) {
// meta map node
for (int i = 0; i < entries; i++) {
long cp = children[i];
pw.printf(" %d children < %s @ " +
"chunk %x +%0" +
len + "x%n",
counts[i],
keys[i],
DataUtils.getPageChunkId(cp),
DataUtils.getPageOffset(cp));
}
long cp = children[entries];
pw.printf(" %d children >= %s @ chunk %x +%0" +
len + "x%n",
counts[entries],
keys.length >= entries ? null : keys[entries],
DataUtils.getPageChunkId(cp),
DataUtils.getPageOffset(cp));
} else {
// meta map leaf
String[] values = new String[entries];
for (int i = 0; i < entries; i++) {
String v = StringDataType.INSTANCE.read(data);
values[i] = v;
}
for (int i = 0; i < entries; i++) {
pw.println(" " + keys[i] +
" = " + values[i]);
}
}
} else {
if (node && details) {
for (int i = 0; i <= entries; i++) {
long cp = children[i];
pw.printf(" %d children @ chunk %x +%0" +
len + "x%n",
counts[i],
DataUtils.getPageChunkId(cp),
DataUtils.getPageOffset(cp));
}
}
}
}
pageSizeSum = Math.max(1, pageSizeSum);
for (Integer mapId : mapSizes.keySet()) {
int percent = 100 * mapSizes.get(mapId) / pageSizeSum;
pw.printf("map %x: %d bytes, %d%%%n", mapId, mapSizes.get(mapId), percent);
}
int footerPos = chunk.limit() - Chunk.FOOTER_LENGTH;
try {
chunk.position(footerPos);
pw.printf(
"+%0" + len + "x chunkFooter %s%n",
footerPos,
new String(chunk.array(), chunk.position(),
Chunk.FOOTER_LENGTH, StandardCharsets.ISO_8859_1).trim());
} catch (IllegalArgumentException e) {
// too far
pw.printf("ERROR illegal footer position %d%n", footerPos);
}
}
pw.printf("%n%0" + len + "x eof%n", fileSize);
pw.printf("\n");
pageCount = Math.max(1, pageCount);
pw.printf("page size total: %d bytes, page count: %d, average page size: %d bytes\n",
pageSizeTotal, pageCount, pageSizeTotal / pageCount);
pageSizeTotal = Math.max(1, pageSizeTotal);
for (Integer mapId : mapSizesTotal.keySet()) {
int percent = (int) (100 * mapSizesTotal.get(mapId) / pageSizeTotal);
pw.printf("map %x: %d bytes, %d%%%n", mapId, mapSizesTotal.get(mapId), percent);
}
} catch (IOException e) {
pw.println("ERROR: " + e);
e.printStackTrace(pw);
} finally {
if (file != null) {
try {
file.close();
} catch (IOException e) {
// ignore
}
}
}
pw.flush();
}
private static Compressor getCompressor(boolean fast) {
return fast ? new CompressLZF() : new CompressDeflate();
}
/**
* Read the summary information of the file and write them to system out.
*
* @param fileName the name of the file
* @param writer the print writer
* @return null if successful (if there was no error), otherwise the error
* message
*/
public static String info(String fileName, Writer writer) {
PrintWriter pw = new PrintWriter(writer, true);
if (!FilePath.get(fileName).exists()) {
pw.println("File not found: " + fileName);
return "File not found: " + fileName;
}
long fileLength = FileUtils.size(fileName);
MVStore store = new MVStore.Builder().
fileName(fileName).
readOnly().open();
try {
MVMap<String, String> meta = store.getMetaMap();
Map<String, Object> header = store.getStoreHeader();
long fileCreated = DataUtils.readHexLong(header, "created", 0L);
TreeMap<Integer, Chunk> chunks = new TreeMap<>();
long chunkLength = 0;
long maxLength = 0;
long maxLengthLive = 0;
long maxLengthNotEmpty = 0;
for (Entry<String, String> e : meta.entrySet()) {
String k = e.getKey();
if (k.startsWith("chunk.")) {
Chunk c = Chunk.fromString(e.getValue());
chunks.put(c.id, c);
chunkLength += c.len * MVStore.BLOCK_SIZE;
maxLength += c.maxLen;
maxLengthLive += c.maxLenLive;
if (c.maxLenLive > 0) {
maxLengthNotEmpty += c.maxLen;
}
}
}
pw.printf("Created: %s\n", formatTimestamp(fileCreated, fileCreated));
pw.printf("Last modified: %s\n",
formatTimestamp(FileUtils.lastModified(fileName), fileCreated));
pw.printf("File length: %d\n", fileLength);
pw.printf("The last chunk is not listed\n");
pw.printf("Chunk length: %d\n", chunkLength);
pw.printf("Chunk count: %d\n", chunks.size());
pw.printf("Used space: %d%%\n", getPercent(chunkLength, fileLength));
pw.printf("Chunk fill rate: %d%%\n", maxLength == 0 ? 100 :
getPercent(maxLengthLive, maxLength));
pw.printf("Chunk fill rate excluding empty chunks: %d%%\n",
maxLengthNotEmpty == 0 ? 100 :
getPercent(maxLengthLive, maxLengthNotEmpty));
for (Entry<Integer, Chunk> e : chunks.entrySet()) {
Chunk c = e.getValue();
long created = fileCreated + c.time;
pw.printf(" Chunk %d: %s, %d%% used, %d blocks",
c.id, formatTimestamp(created, fileCreated),
getPercent(c.maxLenLive, c.maxLen),
c.len
);
if (c.maxLenLive == 0) {
pw.printf(", unused: %s",
formatTimestamp(fileCreated + c.unused, fileCreated));
}
pw.printf("\n");
}
pw.printf("\n");
} catch (Exception e) {
pw.println("ERROR: " + e);
e.printStackTrace(pw);
return e.getMessage();
} finally {
store.close();
}
pw.flush();
return null;
}
private static String formatTimestamp(long t, long start) {
String x = new Timestamp(t).toString();
String s = x.substring(0, 19);
s += " (+" + ((t - start) / 1000) + " s)";
return s;
}
private static int getPercent(long value, long max) {
if (value == 0) {
return 0;
} else if (value == max) {
return 100;
}
return (int) (1 + 98 * value / Math.max(1, max));
}
/**
* Compress the store by creating a new file and copying the live pages
* there. Temporarily, a file with the suffix ".tempFile" is created. This
* file is then renamed, replacing the original file, if possible. If not,
* the new file is renamed to ".newFile", then the old file is removed, and
* the new file is renamed. This might be interrupted, so it's better to
* compactCleanUp before opening a store, in case this method was used.
*
* @param fileName the file name
* @param compress whether to compress the data
*/
public static void compact(String fileName, boolean compress) {
String tempName = fileName + Constants.SUFFIX_MV_STORE_TEMP_FILE;
FileUtils.delete(tempName);
compact(fileName, tempName, compress);
try {
FileUtils.moveAtomicReplace(tempName, fileName);
} catch (DbException e) {
String newName = fileName + Constants.SUFFIX_MV_STORE_NEW_FILE;
FileUtils.delete(newName);
FileUtils.move(tempName, newName);
FileUtils.delete(fileName);
FileUtils.move(newName, fileName);
}
}
/**
* Clean up if needed, in a case a compact operation was interrupted due to
* killing the process or a power failure. This will delete temporary files
* (if any), and in case atomic file replacements were not used, rename the
* new file.
*
* @param fileName the file name
*/
public static void compactCleanUp(String fileName) {
String tempName = fileName + Constants.SUFFIX_MV_STORE_TEMP_FILE;
if (FileUtils.exists(tempName)) {
FileUtils.delete(tempName);
}
String newName = fileName + Constants.SUFFIX_MV_STORE_NEW_FILE;
if (FileUtils.exists(newName)) {
if (FileUtils.exists(fileName)) {
FileUtils.delete(newName);
} else {
FileUtils.move(newName, fileName);
}
}
}
/**
* Copy all live pages from the source store to the target store.
*
* @param sourceFileName the name of the source store
* @param targetFileName the name of the target store
* @param compress whether to compress the data
*/
public static void compact(String sourceFileName, String targetFileName, boolean compress) {
MVStore source = new MVStore.Builder().
fileName(sourceFileName).
readOnly().
open();
FileUtils.delete(targetFileName);
MVStore.Builder b = new MVStore.Builder().
fileName(targetFileName);
if (compress) {
b.compress();
}
MVStore target = b.open();
compact(source, target);
target.close();
source.close();
}
/**
* Copy all live pages from the source store to the target store.
*
* @param source the source store
* @param target the target store
*/
public static void compact(MVStore source, MVStore target) {
MVMap<String, String> sourceMeta = source.getMetaMap();
MVMap<String, String> targetMeta = target.getMetaMap();
for (Entry<String, String> m : sourceMeta.entrySet()) {
String key = m.getKey();
if (key.startsWith("chunk.")) {
// ignore
} else if (key.startsWith("map.")) {
// ignore
} else if (key.startsWith("name.")) {
// ignore
} else if (key.startsWith("root.")) {
// ignore
} else {
targetMeta.put(key, m.getValue());
}
}
for (String mapName : source.getMapNames()) {
MVMap.Builder<Object, Object> mp =
new MVMap.Builder<>().
keyType(new GenericDataType()).
valueType(new GenericDataType());
MVMap<Object, Object> sourceMap = source.openMap(mapName, mp);
MVMap<Object, Object> targetMap = target.openMap(mapName, mp);
targetMap.copyFrom(sourceMap);
}
}
/**
* Repair a store by rolling back to the newest good version.
*
* @param fileName the file name
*/
public static void repair(String fileName) {
PrintWriter pw = new PrintWriter(System.out);
long version = Long.MAX_VALUE;
OutputStream ignore = new OutputStream() {
@Override
public void write(int b) throws IOException {
// ignore
}
};
while (version >= 0) {
pw.println(version == Long.MAX_VALUE ? "Trying latest version"
: ("Trying version " + version));
pw.flush();
version = rollback(fileName, version, new PrintWriter(ignore));
try {
String error = info(fileName + ".temp", new PrintWriter(ignore));
if (error == null) {
FilePath.get(fileName).moveTo(FilePath.get(fileName + ".back"), true);
FilePath.get(fileName + ".temp").moveTo(FilePath.get(fileName), true);
pw.println("Success");
break;
}
pw.println(" ... failed: " + error);
} catch (Exception e) {
pw.println("Fail: " + e.getMessage());
pw.flush();
}
version--;
}
pw.flush();
}
/**
* Roll back to a given revision into a a file called *.temp.
*
* @param fileName the file name
* @param targetVersion the version to roll back to (Long.MAX_VALUE for the
* latest version)
* @param writer the log writer
* @return the version rolled back to (-1 if no version)
*/
public static long rollback(String fileName, long targetVersion, Writer writer) {
long newestVersion = -1;
PrintWriter pw = new PrintWriter(writer, true);
if (!FilePath.get(fileName).exists()) {
pw.println("File not found: " + fileName);
return newestVersion;
}
FileChannel file = null;
FileChannel target = null;
int blockSize = MVStore.BLOCK_SIZE;
try {
file = FilePath.get(fileName).open("r");
FilePath.get(fileName + ".temp").delete();
target = FilePath.get(fileName + ".temp").open("rw");
long fileSize = file.size();
ByteBuffer block = ByteBuffer.allocate(4096);
Chunk newestChunk = null;
for (long pos = 0; pos < fileSize;) {
block.rewind();
DataUtils.readFully(file, pos, block);
block.rewind();
int headerType = block.get();
if (headerType == 'H') {
block.rewind();
target.write(block, pos);
pos += blockSize;
continue;
}
if (headerType != 'c') {
pos += blockSize;
continue;
}
Chunk c = null;
try {
c = Chunk.readChunkHeader(block, pos);
} catch (IllegalStateException e) {
pos += blockSize;
continue;
}
if (c.len <= 0) {
// not a chunk
pos += blockSize;
continue;
}
int length = c.len * MVStore.BLOCK_SIZE;
ByteBuffer chunk = ByteBuffer.allocate(length);
DataUtils.readFully(file, pos, chunk);
if (c.version > targetVersion) {
// newer than the requested version
pos += length;
continue;
}
chunk.rewind();
target.write(chunk, pos);
if (newestChunk == null || c.version > newestChunk.version) {
newestChunk = c;
newestVersion = c.version;
}
pos += length;
}
int length = newestChunk.len * MVStore.BLOCK_SIZE;
ByteBuffer chunk = ByteBuffer.allocate(length);
DataUtils.readFully(file, newestChunk.block * MVStore.BLOCK_SIZE, chunk);
chunk.rewind();
target.write(chunk, fileSize);
} catch (IOException e) {
pw.println("ERROR: " + e);
e.printStackTrace(pw);
} finally {
if (file != null) {
try {
file.close();
} catch (IOException e) {
// ignore
}
}
if (target != null) {
try {
target.close();
} catch (IOException e) {
// ignore
}
}
}
pw.flush();
return newestVersion;
}
/**
* A data type that can read any data that is persisted, and converts it to
* a byte array.
*/
static class GenericDataType implements DataType {
@Override
public int compare(Object a, Object b) {
throw DataUtils.newUnsupportedOperationException("Can not compare");
}
@Override
public int getMemory(Object obj) {
return obj == null ? 0 : ((byte[]) obj).length * 8;
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (obj != null) {
buff.put((byte[]) obj);
}
}
@Override
public void write(WriteBuffer buff, Object[] obj, int len, boolean key) {
for (Object o : obj) {
write(buff, o);
}
}
@Override
public Object read(ByteBuffer buff) {
int len = buff.remaining();
if (len == 0) {
return null;
}
byte[] data = new byte[len];
buff.get(data);
return data;
}
@Override
public void read(ByteBuffer buff, Object[] obj, int len, boolean key) {
for (int i = 0; i < obj.length; i++) {
obj[i] = read(buff);
}
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/OffHeapStore.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.TreeMap;
/**
* A storage mechanism that "persists" data in the off-heap area of the main
* memory.
*/
public class OffHeapStore extends FileStore {
private final TreeMap<Long, ByteBuffer> memory =
new TreeMap<>();
@Override
public void open(String fileName, boolean readOnly, char[] encryptionKey) {
memory.clear();
}
@Override
public String toString() {
return memory.toString();
}
@Override
public ByteBuffer readFully(long pos, int len) {
Entry<Long, ByteBuffer> memEntry = memory.floorEntry(pos);
if (memEntry == null) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_READING_FAILED,
"Could not read from position {0}", pos);
}
readCount.incrementAndGet();
readBytes.addAndGet(len);
ByteBuffer buff = memEntry.getValue();
ByteBuffer read = buff.duplicate();
int offset = (int) (pos - memEntry.getKey());
read.position(offset);
read.limit(len + offset);
return read.slice();
}
@Override
public void free(long pos, int length) {
freeSpace.free(pos, length);
ByteBuffer buff = memory.remove(pos);
if (buff == null) {
// nothing was written (just allocated)
} else if (buff.remaining() != length) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_READING_FAILED,
"Partial remove is not supported at position {0}", pos);
}
}
@Override
public void writeFully(long pos, ByteBuffer src) {
fileSize = Math.max(fileSize, pos + src.remaining());
Entry<Long, ByteBuffer> mem = memory.floorEntry(pos);
if (mem == null) {
// not found: create a new entry
writeNewEntry(pos, src);
return;
}
long prevPos = mem.getKey();
ByteBuffer buff = mem.getValue();
int prevLength = buff.capacity();
int length = src.remaining();
if (prevPos == pos) {
if (prevLength != length) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_READING_FAILED,
"Could not write to position {0}; " +
"partial overwrite is not supported", pos);
}
writeCount.incrementAndGet();
writeBytes.addAndGet(length);
buff.rewind();
buff.put(src);
return;
}
if (prevPos + prevLength > pos) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_READING_FAILED,
"Could not write to position {0}; " +
"partial overwrite is not supported", pos);
}
writeNewEntry(pos, src);
}
private void writeNewEntry(long pos, ByteBuffer src) {
int length = src.remaining();
writeCount.incrementAndGet();
writeBytes.addAndGet(length);
ByteBuffer buff = ByteBuffer.allocateDirect(length);
buff.put(src);
buff.rewind();
memory.put(pos, buff);
}
@Override
public void truncate(long size) {
writeCount.incrementAndGet();
if (size == 0) {
fileSize = 0;
memory.clear();
return;
}
fileSize = size;
for (Iterator<Long> it = memory.keySet().iterator(); it.hasNext();) {
long pos = it.next();
if (pos < size) {
break;
}
ByteBuffer buff = memory.get(pos);
if (buff.capacity() > size) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_READING_FAILED,
"Could not truncate to {0}; " +
"partial truncate is not supported", pos);
}
it.remove();
}
}
@Override
public void close() {
memory.clear();
}
@Override
public void sync() {
// nothing to do
}
@Override
public int getDefaultRetentionTime() {
return 0;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/Page.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.nio.ByteBuffer;
import java.util.HashSet;
import org.h2.compress.Compressor;
import org.h2.mvstore.type.DataType;
import org.h2.util.Utils;
/**
* A page (a node or a leaf).
* <p>
* For b-tree nodes, the key at a given index is larger than the largest key of
* the child at the same index.
* <p>
* File format:
* page length (including length): int
* check value: short
* map id: varInt
* number of keys: varInt
* type: byte (0: leaf, 1: node; +2: compressed)
* compressed: bytes saved (varInt)
* keys
* leaf: values (one for each key)
* node: children (1 more than keys)
*/
public class Page {
/**
* An empty object array.
*/
public static final Object[] EMPTY_OBJECT_ARRAY = new Object[0];
private static final int IN_MEMORY = Integer.MIN_VALUE;
private final MVMap<?, ?> map;
private long version;
private long pos;
/**
* The total entry count of this page and all children.
*/
private long totalCount;
/**
* The last result of a find operation is cached.
*/
private int cachedCompare;
/**
* The estimated memory used in persistent case, IN_MEMORY marker value otherwise.
*/
private int memory;
/**
* The keys.
* <p>
* The array might be larger than needed, to avoid frequent re-sizing.
*/
private Object[] keys;
/**
* The values.
* <p>
* The array might be larger than needed, to avoid frequent re-sizing.
*/
private Object[] values;
/**
* The child page references.
* <p>
* The array might be larger than needed, to avoid frequent re-sizing.
*/
private PageReference[] children;
/**
* Whether the page is an in-memory (not stored, or not yet stored) page,
* and it is removed. This is to keep track of pages that concurrently
* changed while they are being stored, in which case the live bookkeeping
* needs to be aware of such cases.
*/
private volatile boolean removedInMemory;
Page(MVMap<?, ?> map, long version) {
this.map = map;
this.version = version;
}
/**
* Create a new, empty page.
*
* @param map the map
* @param version the version
* @return the new page
*/
static Page createEmpty(MVMap<?, ?> map, long version) {
return create(map, version,
EMPTY_OBJECT_ARRAY, EMPTY_OBJECT_ARRAY,
null,
0, DataUtils.PAGE_MEMORY);
}
/**
* Create a new page. The arrays are not cloned.
*
* @param map the map
* @param version the version
* @param keys the keys
* @param values the values
* @param children the child page positions
* @param totalCount the total number of keys
* @param memory the memory used in bytes
* @return the page
*/
public static Page create(MVMap<?, ?> map, long version,
Object[] keys, Object[] values, PageReference[] children,
long totalCount, int memory) {
Page p = new Page(map, version);
// the position is 0
p.keys = keys;
p.values = values;
p.children = children;
p.totalCount = totalCount;
MVStore store = map.store;
if(store.getFileStore() == null) {
p.memory = IN_MEMORY;
} else if (memory == 0) {
p.recalculateMemory();
} else {
p.addMemory(memory);
}
if(store.getFileStore() != null) {
store.registerUnsavedPage(p.memory);
}
return p;
}
/**
* Create a copy of a page.
*
* @param map the map
* @param version the version
* @param source the source page
* @return the page
*/
public static Page create(MVMap<?, ?> map, long version, Page source) {
return create(map, version, source.keys, source.values, source.children,
source.totalCount, source.memory);
}
/**
* Read a page.
*
* @param fileStore the file store
* @param pos the position
* @param map the map
* @param filePos the position in the file
* @param maxPos the maximum position (the end of the chunk)
* @return the page
*/
static Page read(FileStore fileStore, long pos, MVMap<?, ?> map,
long filePos, long maxPos) {
ByteBuffer buff;
int maxLength = DataUtils.getPageMaxLength(pos);
if (maxLength == DataUtils.PAGE_LARGE) {
buff = fileStore.readFully(filePos, 128);
maxLength = buff.getInt();
// read the first bytes again
}
maxLength = (int) Math.min(maxPos - filePos, maxLength);
int length = maxLength;
if (length < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ",
length, filePos, maxPos);
}
buff = fileStore.readFully(filePos, length);
Page p = new Page(map, 0);
p.pos = pos;
int chunkId = DataUtils.getPageChunkId(pos);
int offset = DataUtils.getPageOffset(pos);
p.read(buff, chunkId, offset, maxLength);
return p;
}
/**
* Get the key at the given index.
*
* @param index the index
* @return the key
*/
public Object getKey(int index) {
return keys[index];
}
/**
* Get the child page at the given index.
*
* @param index the index
* @return the child page
*/
public Page getChildPage(int index) {
PageReference ref = children[index];
return ref.page != null ? ref.page : map.readPage(ref.pos);
}
/**
* Get the position of the child.
*
* @param index the index
* @return the position
*/
public long getChildPagePos(int index) {
return children[index].pos;
}
/**
* Get the value at the given index.
*
* @param index the index
* @return the value
*/
public Object getValue(int index) {
return values[index];
}
/**
* Get the number of keys in this page.
*
* @return the number of keys
*/
public int getKeyCount() {
return keys.length;
}
/**
* Check whether this is a leaf page.
*
* @return true if it is a leaf
*/
public boolean isLeaf() {
return children == null;
}
/**
* Get the position of the page
*
* @return the position
*/
public long getPos() {
return pos;
}
@Override
public String toString() {
StringBuilder buff = new StringBuilder();
buff.append("id: ").append(System.identityHashCode(this)).append('\n');
buff.append("version: ").append(Long.toHexString(version)).append('\n');
buff.append("pos: ").append(Long.toHexString(pos)).append('\n');
if (pos != 0) {
int chunkId = DataUtils.getPageChunkId(pos);
buff.append("chunk: ").append(Long.toHexString(chunkId)).append('\n');
}
for (int i = 0; i <= keys.length; i++) {
if (i > 0) {
buff.append(" ");
}
if (children != null) {
buff.append('[').append(Long.toHexString(children[i].pos)).append("] ");
}
if (i < keys.length) {
buff.append(keys[i]);
if (values != null) {
buff.append(':');
buff.append(values[i]);
}
}
}
return buff.toString();
}
/**
* Create a copy of this page.
*
* @param version the new version
* @return a page with the given version
*/
public Page copy(long version) {
Page newPage = create(map, version,
keys, values,
children, totalCount,
memory);
// mark the old as deleted
removePage();
newPage.cachedCompare = cachedCompare;
return newPage;
}
/**
* Search the key in this page using a binary search. Instead of always
* starting the search in the middle, the last found index is cached.
* <p>
* If the key was found, the returned value is the index in the key array.
* If not found, the returned value is negative, where -1 means the provided
* key is smaller than any keys in this page. See also Arrays.binarySearch.
*
* @param key the key
* @return the value or null
*/
public int binarySearch(Object key) {
int low = 0, high = keys.length - 1;
// the cached index minus one, so that
// for the first time (when cachedCompare is 0),
// the default value is used
int x = cachedCompare - 1;
if (x < 0 || x > high) {
x = high >>> 1;
}
Object[] k = keys;
while (low <= high) {
int compare = map.compare(key, k[x]);
if (compare > 0) {
low = x + 1;
} else if (compare < 0) {
high = x - 1;
} else {
cachedCompare = x + 1;
return x;
}
x = (low + high) >>> 1;
}
cachedCompare = low;
return -(low + 1);
// regular binary search (without caching)
// int low = 0, high = keys.length - 1;
// while (low <= high) {
// int x = (low + high) >>> 1;
// int compare = map.compare(key, keys[x]);
// if (compare > 0) {
// low = x + 1;
// } else if (compare < 0) {
// high = x - 1;
// } else {
// return x;
// }
// }
// return -(low + 1);
}
/**
* Split the page. This modifies the current page.
*
* @param at the split index
* @return the page with the entries after the split index
*/
Page split(int at) {
Page page = isLeaf() ? splitLeaf(at) : splitNode(at);
if(isPersistent()) {
recalculateMemory();
}
return page;
}
private Page splitLeaf(int at) {
int b = keys.length - at;
Object[] aKeys = new Object[at];
Object[] bKeys = new Object[b];
System.arraycopy(keys, 0, aKeys, 0, at);
System.arraycopy(keys, at, bKeys, 0, b);
keys = aKeys;
Object[] aValues = new Object[at];
Object[] bValues = new Object[b];
bValues = new Object[b];
System.arraycopy(values, 0, aValues, 0, at);
System.arraycopy(values, at, bValues, 0, b);
values = aValues;
totalCount = at;
return create(map, version,
bKeys, bValues,
null,
b, 0);
}
private Page splitNode(int at) {
int b = keys.length - at;
Object[] aKeys = new Object[at];
Object[] bKeys = new Object[b - 1];
System.arraycopy(keys, 0, aKeys, 0, at);
System.arraycopy(keys, at + 1, bKeys, 0, b - 1);
keys = aKeys;
PageReference[] aChildren = new PageReference[at + 1];
PageReference[] bChildren = new PageReference[b];
System.arraycopy(children, 0, aChildren, 0, at + 1);
System.arraycopy(children, at + 1, bChildren, 0, b);
children = aChildren;
long t = 0;
for (PageReference x : aChildren) {
t += x.count;
}
totalCount = t;
t = 0;
for (PageReference x : bChildren) {
t += x.count;
}
return create(map, version,
bKeys, null,
bChildren,
t, 0);
}
/**
* Get the total number of key-value pairs, including child pages.
*
* @return the number of key-value pairs
*/
public long getTotalCount() {
if (MVStore.ASSERT) {
long check = 0;
if (isLeaf()) {
check = keys.length;
} else {
for (PageReference x : children) {
check += x.count;
}
}
if (check != totalCount) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_INTERNAL,
"Expected: {0} got: {1}", check, totalCount);
}
}
return totalCount;
}
/**
* Get the descendant counts for the given child.
*
* @param index the child index
* @return the descendant count
*/
long getCounts(int index) {
return children[index].count;
}
/**
* Replace the child page.
*
* @param index the index
* @param c the new child page
*/
public void setChild(int index, Page c) {
if (c == null) {
long oldCount = children[index].count;
// this is slightly slower:
// children = Arrays.copyOf(children, children.length);
children = children.clone();
PageReference ref = new PageReference(null, 0, 0);
children[index] = ref;
totalCount -= oldCount;
} else if (c != children[index].page ||
c.getPos() != children[index].pos) {
long oldCount = children[index].count;
// this is slightly slower:
// children = Arrays.copyOf(children, children.length);
children = children.clone();
PageReference ref = new PageReference(c, c.pos, c.totalCount);
children[index] = ref;
totalCount += c.totalCount - oldCount;
}
}
/**
* Replace the key at an index in this page.
*
* @param index the index
* @param key the new key
*/
public void setKey(int index, Object key) {
// this is slightly slower:
// keys = Arrays.copyOf(keys, keys.length);
keys = keys.clone();
if(isPersistent()) {
Object old = keys[index];
DataType keyType = map.getKeyType();
int mem = keyType.getMemory(key);
if (old != null) {
mem -= keyType.getMemory(old);
}
addMemory(mem);
}
keys[index] = key;
}
/**
* Replace the value at an index in this page.
*
* @param index the index
* @param value the new value
* @return the old value
*/
public Object setValue(int index, Object value) {
Object old = values[index];
// this is slightly slower:
// values = Arrays.copyOf(values, values.length);
values = values.clone();
DataType valueType = map.getValueType();
if(isPersistent()) {
addMemory(valueType.getMemory(value) -
valueType.getMemory(old));
}
values[index] = value;
return old;
}
/**
* Remove this page and all child pages.
*/
void removeAllRecursive() {
if (children != null) {
for (int i = 0, size = map.getChildPageCount(this); i < size; i++) {
PageReference ref = children[i];
if (ref.page != null) {
ref.page.removeAllRecursive();
} else {
long c = children[i].pos;
int type = DataUtils.getPageType(c);
if (type == DataUtils.PAGE_TYPE_LEAF) {
int mem = DataUtils.getPageMaxLength(c);
map.removePage(c, mem);
} else {
map.readPage(c).removeAllRecursive();
}
}
}
}
removePage();
}
/**
* Insert a key-value pair into this leaf.
*
* @param index the index
* @param key the key
* @param value the value
*/
public void insertLeaf(int index, Object key, Object value) {
int len = keys.length + 1;
Object[] newKeys = new Object[len];
DataUtils.copyWithGap(keys, newKeys, len - 1, index);
keys = newKeys;
Object[] newValues = new Object[len];
DataUtils.copyWithGap(values, newValues, len - 1, index);
values = newValues;
keys[index] = key;
values[index] = value;
totalCount++;
if(isPersistent()) {
addMemory(map.getKeyType().getMemory(key) +
map.getValueType().getMemory(value));
}
}
/**
* Insert a child page into this node.
*
* @param index the index
* @param key the key
* @param childPage the child page
*/
public void insertNode(int index, Object key, Page childPage) {
Object[] newKeys = new Object[keys.length + 1];
DataUtils.copyWithGap(keys, newKeys, keys.length, index);
newKeys[index] = key;
keys = newKeys;
int childCount = children.length;
PageReference[] newChildren = new PageReference[childCount + 1];
DataUtils.copyWithGap(children, newChildren, childCount, index);
newChildren[index] = new PageReference(
childPage, childPage.getPos(), childPage.totalCount);
children = newChildren;
totalCount += childPage.totalCount;
if(isPersistent()) {
addMemory(map.getKeyType().getMemory(key) +
DataUtils.PAGE_MEMORY_CHILD);
}
}
/**
* Remove the key and value (or child) at the given index.
*
* @param index the index
*/
public void remove(int index) {
int keyLength = keys.length;
int keyIndex = index >= keyLength ? index - 1 : index;
if(isPersistent()) {
Object old = keys[keyIndex];
addMemory(-map.getKeyType().getMemory(old));
}
Object[] newKeys = new Object[keyLength - 1];
DataUtils.copyExcept(keys, newKeys, keyLength, keyIndex);
keys = newKeys;
if (values != null) {
if(isPersistent()) {
Object old = values[index];
addMemory(-map.getValueType().getMemory(old));
}
Object[] newValues = new Object[keyLength - 1];
DataUtils.copyExcept(values, newValues, keyLength, index);
values = newValues;
totalCount--;
}
if (children != null) {
if(isPersistent()) {
addMemory(-DataUtils.PAGE_MEMORY_CHILD);
}
long countOffset = children[index].count;
int childCount = children.length;
PageReference[] newChildren = new PageReference[childCount - 1];
DataUtils.copyExcept(children, newChildren, childCount, index);
children = newChildren;
totalCount -= countOffset;
}
}
/**
* Read the page from the buffer.
*
* @param buff the buffer
* @param chunkId the chunk id
* @param offset the offset within the chunk
* @param maxLength the maximum length
*/
void read(ByteBuffer buff, int chunkId, int offset, int maxLength) {
int start = buff.position();
int pageLength = buff.getInt();
if (pageLength > maxLength || pageLength < 4) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length 4..{1}, got {2}",
chunkId, maxLength, pageLength);
}
buff.limit(start + pageLength);
short check = buff.getShort();
int mapId = DataUtils.readVarInt(buff);
if (mapId != map.getId()) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}",
chunkId, map.getId(), mapId);
}
int checkTest = DataUtils.getCheckValue(chunkId)
^ DataUtils.getCheckValue(offset)
^ DataUtils.getCheckValue(pageLength);
if (check != (short) checkTest) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}",
chunkId, checkTest, check);
}
int len = DataUtils.readVarInt(buff);
keys = new Object[len];
int type = buff.get();
boolean node = (type & 1) == DataUtils.PAGE_TYPE_NODE;
if (node) {
children = new PageReference[len + 1];
long[] p = new long[len + 1];
for (int i = 0; i <= len; i++) {
p[i] = buff.getLong();
}
long total = 0;
for (int i = 0; i <= len; i++) {
long s = DataUtils.readVarLong(buff);
total += s;
children[i] = new PageReference(null, p[i], s);
}
totalCount = total;
}
boolean compressed = (type & DataUtils.PAGE_COMPRESSED) != 0;
if (compressed) {
Compressor compressor;
if ((type & DataUtils.PAGE_COMPRESSED_HIGH) ==
DataUtils.PAGE_COMPRESSED_HIGH) {
compressor = map.getStore().getCompressorHigh();
} else {
compressor = map.getStore().getCompressorFast();
}
int lenAdd = DataUtils.readVarInt(buff);
int compLen = pageLength + start - buff.position();
byte[] comp = Utils.newBytes(compLen);
buff.get(comp);
int l = compLen + lenAdd;
buff = ByteBuffer.allocate(l);
compressor.expand(comp, 0, compLen, buff.array(),
buff.arrayOffset(), l);
}
map.getKeyType().read(buff, keys, len, true);
if (!node) {
values = new Object[len];
map.getValueType().read(buff, values, len, false);
totalCount = len;
}
recalculateMemory();
}
/**
* Store the page and update the position.
*
* @param chunk the chunk
* @param buff the target buffer
* @return the position of the buffer just after the type
*/
private int write(Chunk chunk, WriteBuffer buff) {
int start = buff.position();
int len = keys.length;
int type = children != null ? DataUtils.PAGE_TYPE_NODE
: DataUtils.PAGE_TYPE_LEAF;
buff.putInt(0).
putShort((byte) 0).
putVarInt(map.getId()).
putVarInt(len);
int typePos = buff.position();
buff.put((byte) type);
if (type == DataUtils.PAGE_TYPE_NODE) {
writeChildren(buff);
for (int i = 0; i <= len; i++) {
buff.putVarLong(children[i].count);
}
}
int compressStart = buff.position();
map.getKeyType().write(buff, keys, len, true);
if (type == DataUtils.PAGE_TYPE_LEAF) {
map.getValueType().write(buff, values, len, false);
}
MVStore store = map.getStore();
int expLen = buff.position() - compressStart;
if (expLen > 16) {
int compressionLevel = store.getCompressionLevel();
if (compressionLevel > 0) {
Compressor compressor;
int compressType;
if (compressionLevel == 1) {
compressor = map.getStore().getCompressorFast();
compressType = DataUtils.PAGE_COMPRESSED;
} else {
compressor = map.getStore().getCompressorHigh();
compressType = DataUtils.PAGE_COMPRESSED_HIGH;
}
byte[] exp = new byte[expLen];
buff.position(compressStart).get(exp);
byte[] comp = new byte[expLen * 2];
int compLen = compressor.compress(exp, expLen, comp, 0);
int plus = DataUtils.getVarIntLen(compLen - expLen);
if (compLen + plus < expLen) {
buff.position(typePos).
put((byte) (type + compressType));
buff.position(compressStart).
putVarInt(expLen - compLen).
put(comp, 0, compLen);
}
}
}
int pageLength = buff.position() - start;
int chunkId = chunk.id;
int check = DataUtils.getCheckValue(chunkId)
^ DataUtils.getCheckValue(start)
^ DataUtils.getCheckValue(pageLength);
buff.putInt(start, pageLength).
putShort(start + 4, (short) check);
if (pos != 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_INTERNAL, "Page already stored");
}
pos = DataUtils.getPagePos(chunkId, start, pageLength, type);
store.cachePage(pos, this, getMemory());
if (type == DataUtils.PAGE_TYPE_NODE) {
// cache again - this will make sure nodes stays in the cache
// for a longer time
store.cachePage(pos, this, getMemory());
}
long max = DataUtils.getPageMaxLength(pos);
chunk.maxLen += max;
chunk.maxLenLive += max;
chunk.pageCount++;
chunk.pageCountLive++;
if (removedInMemory) {
// if the page was removed _before_ the position was assigned, we
// need to mark it removed here, so the fields are updated
// when the next chunk is stored
map.removePage(pos, memory);
}
return typePos + 1;
}
private void writeChildren(WriteBuffer buff) {
int len = keys.length;
for (int i = 0; i <= len; i++) {
buff.putLong(children[i].pos);
}
}
/**
* Store this page and all children that are changed, in reverse order, and
* update the position and the children.
*
* @param chunk the chunk
* @param buff the target buffer
*/
void writeUnsavedRecursive(Chunk chunk, WriteBuffer buff) {
if (pos != 0) {
// already stored before
return;
}
int patch = write(chunk, buff);
if (!isLeaf()) {
int len = children.length;
for (int i = 0; i < len; i++) {
Page p = children[i].page;
if (p != null) {
p.writeUnsavedRecursive(chunk, buff);
children[i] = new PageReference(p, p.getPos(), p.totalCount);
}
}
int old = buff.position();
buff.position(patch);
writeChildren(buff);
buff.position(old);
}
}
/**
* Unlink the children recursively after all data is written.
*/
void writeEnd() {
if (isLeaf()) {
return;
}
int len = children.length;
for (int i = 0; i < len; i++) {
PageReference ref = children[i];
if (ref.page != null) {
if (ref.page.getPos() == 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_INTERNAL, "Page not written");
}
ref.page.writeEnd();
children[i] = new PageReference(null, ref.pos, ref.count);
}
}
}
long getVersion() {
return version;
}
public int getRawChildPageCount() {
return children.length;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other instanceof Page) {
if (pos != 0 && ((Page) other).pos == pos) {
return true;
}
return this == other;
}
return false;
}
@Override
public int hashCode() {
return pos != 0 ? (int) (pos | (pos >>> 32)) : super.hashCode();
}
private boolean isPersistent() {
return memory != IN_MEMORY;
}
public int getMemory() {
if (isPersistent()) {
if (MVStore.ASSERT) {
int mem = memory;
recalculateMemory();
if (mem != memory) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_INTERNAL, "Memory calculation error");
}
}
return memory;
}
return getKeyCount();
}
private void addMemory(int mem) {
memory += mem;
}
private void recalculateMemory() {
int mem = DataUtils.PAGE_MEMORY;
DataType keyType = map.getKeyType();
for (Object key : keys) {
mem += keyType.getMemory(key);
}
if (this.isLeaf()) {
DataType valueType = map.getValueType();
for (int i = 0; i < keys.length; i++) {
mem += valueType.getMemory(values[i]);
}
} else {
mem += this.getRawChildPageCount() * DataUtils.PAGE_MEMORY_CHILD;
}
addMemory(mem - memory);
}
void setVersion(long version) {
this.version = version;
}
/**
* Remove the page.
*/
public void removePage() {
if(isPersistent()) {
long p = pos;
if (p == 0) {
removedInMemory = true;
}
map.removePage(p, memory);
}
}
/**
* A pointer to a page, either in-memory or using a page position.
*/
public static class PageReference {
/**
* The position, if known, or 0.
*/
final long pos;
/**
* The page, if in memory, or null.
*/
final Page page;
/**
* The descendant count for this child page.
*/
final long count;
public PageReference(Page page, long pos, long count) {
this.page = page;
this.pos = pos;
this.count = count;
}
}
/**
* Contains information about which other pages are referenced (directly or
* indirectly) by the given page. This is a subset of the page data, for
* pages of type node. This information is used for garbage collection (to
* quickly find out which chunks are still in use).
*/
public static class PageChildren {
/**
* An empty array of type long.
*/
public static final long[] EMPTY_ARRAY = new long[0];
/**
* The position of the page.
*/
final long pos;
/**
* The page positions of (direct or indirect) children. Depending on the
* use case, this can be the complete list, or only a subset of all
* children, for example only only one reference to a child in another
* chunk.
*/
long[] children;
/**
* Whether this object only contains the list of chunks.
*/
boolean chunkList;
private PageChildren(long pos, long[] children) {
this.pos = pos;
this.children = children;
}
PageChildren(Page p) {
this.pos = p.getPos();
int count = p.getRawChildPageCount();
this.children = new long[count];
for (int i = 0; i < count; i++) {
children[i] = p.getChildPagePos(i);
}
}
int getMemory() {
return 64 + 8 * children.length;
}
/**
* Read an inner node page from the buffer, but ignore the keys and
* values.
*
* @param fileStore the file store
* @param pos the position
* @param mapId the map id
* @param filePos the position in the file
* @param maxPos the maximum position (the end of the chunk)
* @return the page children object
*/
static PageChildren read(FileStore fileStore, long pos, int mapId,
long filePos, long maxPos) {
ByteBuffer buff;
int maxLength = DataUtils.getPageMaxLength(pos);
if (maxLength == DataUtils.PAGE_LARGE) {
buff = fileStore.readFully(filePos, 128);
maxLength = buff.getInt();
// read the first bytes again
}
maxLength = (int) Math.min(maxPos - filePos, maxLength);
int length = maxLength;
if (length < 0) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"Illegal page length {0} reading at {1}; max pos {2} ",
length, filePos, maxPos);
}
buff = fileStore.readFully(filePos, length);
int chunkId = DataUtils.getPageChunkId(pos);
int offset = DataUtils.getPageOffset(pos);
int start = buff.position();
int pageLength = buff.getInt();
if (pageLength > maxLength) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected page length =< {1}, got {2}",
chunkId, maxLength, pageLength);
}
buff.limit(start + pageLength);
short check = buff.getShort();
int m = DataUtils.readVarInt(buff);
if (m != mapId) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected map id {1}, got {2}",
chunkId, mapId, m);
}
int checkTest = DataUtils.getCheckValue(chunkId)
^ DataUtils.getCheckValue(offset)
^ DataUtils.getCheckValue(pageLength);
if (check != (short) checkTest) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT,
"File corrupted in chunk {0}, expected check value {1}, got {2}",
chunkId, checkTest, check);
}
int len = DataUtils.readVarInt(buff);
int type = buff.get();
boolean node = (type & 1) == DataUtils.PAGE_TYPE_NODE;
if (!node) {
return null;
}
long[] children = new long[len + 1];
for (int i = 0; i <= len; i++) {
children[i] = buff.getLong();
}
return new PageChildren(pos, children);
}
/**
* Only keep one reference to the same chunk. Only leaf references are
* removed (references to inner nodes are not removed, as they could
* indirectly point to other chunks).
*/
void removeDuplicateChunkReferences() {
HashSet<Integer> chunks = new HashSet<>();
// we don't need references to leaves in the same chunk
chunks.add(DataUtils.getPageChunkId(pos));
for (int i = 0; i < children.length; i++) {
long p = children[i];
int chunkId = DataUtils.getPageChunkId(p);
boolean wasNew = chunks.add(chunkId);
if (DataUtils.getPageType(p) == DataUtils.PAGE_TYPE_NODE) {
continue;
}
if (wasNew) {
continue;
}
removeChild(i--);
}
}
private void removeChild(int index) {
if (index == 0 && children.length == 1) {
children = EMPTY_ARRAY;
return;
}
long[] c2 = new long[children.length - 1];
DataUtils.copyExcept(children, c2, children.length, index);
children = c2;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/StreamStore.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
/**
* A facility to store streams in a map. Streams are split into blocks, which
* are stored in a map. Very small streams are inlined in the stream id.
* <p>
* The key of the map is a long (incremented for each stored block). The default
* initial value is 0. Before storing blocks into the map, the stream store
* checks if there is already a block with the next key, and if necessary
* searches the next free entry using a binary search (0 to Long.MAX_VALUE).
* <p>
* Before storing
* <p>
* The format of the binary id is: An empty id represents 0 bytes of data.
* In-place data is encoded as 0, the size (a variable size int), then the data.
* A stored block is encoded as 1, the length of the block (a variable size
* int), then the key (a variable size long). Multiple ids can be concatenated
* to concatenate the data. If the id is large, it is stored itself, which is
* encoded as 2, the total length (a variable size long), and the key of the
* block that contains the id (a variable size long).
*/
public class StreamStore {
private final Map<Long, byte[]> map;
private int minBlockSize = 256;
private int maxBlockSize = 256 * 1024;
private final AtomicLong nextKey = new AtomicLong();
private final AtomicReference<byte[]> nextBuffer =
new AtomicReference<>();
/**
* Create a stream store instance.
*
* @param map the map to store blocks of data
*/
public StreamStore(Map<Long, byte[]> map) {
this.map = map;
}
public Map<Long, byte[]> getMap() {
return map;
}
public void setNextKey(long nextKey) {
this.nextKey.set(nextKey);
}
public long getNextKey() {
return nextKey.get();
}
/**
* Set the minimum block size. The default is 256 bytes.
*
* @param minBlockSize the new value
*/
public void setMinBlockSize(int minBlockSize) {
this.minBlockSize = minBlockSize;
}
public int getMinBlockSize() {
return minBlockSize;
}
/**
* Set the maximum block size. The default is 256 KB.
*
* @param maxBlockSize the new value
*/
public void setMaxBlockSize(int maxBlockSize) {
this.maxBlockSize = maxBlockSize;
}
public long getMaxBlockSize() {
return maxBlockSize;
}
/**
* Store the stream, and return the id. The stream is not closed.
*
* @param in the stream
* @return the id (potentially an empty array)
*/
@SuppressWarnings("resource")
public byte[] put(InputStream in) throws IOException {
ByteArrayOutputStream id = new ByteArrayOutputStream();
int level = 0;
try {
while (!put(id, in, level)) {
if (id.size() > maxBlockSize / 2) {
id = putIndirectId(id);
level++;
}
}
} catch (IOException e) {
remove(id.toByteArray());
throw e;
}
if (id.size() > minBlockSize * 2) {
id = putIndirectId(id);
}
return id.toByteArray();
}
private boolean put(ByteArrayOutputStream id, InputStream in, int level)
throws IOException {
if (level > 0) {
ByteArrayOutputStream id2 = new ByteArrayOutputStream();
while (true) {
boolean eof = put(id2, in, level - 1);
if (id2.size() > maxBlockSize / 2) {
id2 = putIndirectId(id2);
id2.writeTo(id);
return eof;
} else if (eof) {
id2.writeTo(id);
return true;
}
}
}
byte[] readBuffer = nextBuffer.getAndSet(null);
if (readBuffer == null) {
readBuffer = new byte[maxBlockSize];
}
byte[] buff = read(in, readBuffer);
if (buff != readBuffer) {
// re-use the buffer if the result was shorter
nextBuffer.set(readBuffer);
}
int len = buff.length;
if (len == 0) {
return true;
}
boolean eof = len < maxBlockSize;
if (len < minBlockSize) {
// in-place: 0, len (int), data
id.write(0);
DataUtils.writeVarInt(id, len);
id.write(buff);
} else {
// block: 1, len (int), blockId (long)
id.write(1);
DataUtils.writeVarInt(id, len);
DataUtils.writeVarLong(id, writeBlock(buff));
}
return eof;
}
private static byte[] read(InputStream in, byte[] target)
throws IOException {
int copied = 0;
int remaining = target.length;
while (remaining > 0) {
try {
int len = in.read(target, copied, remaining);
if (len < 0) {
return Arrays.copyOf(target, copied);
}
copied += len;
remaining -= len;
} catch (RuntimeException e) {
throw new IOException(e);
}
}
return target;
}
private ByteArrayOutputStream putIndirectId(ByteArrayOutputStream id)
throws IOException {
byte[] data = id.toByteArray();
id = new ByteArrayOutputStream();
// indirect: 2, total len (long), blockId (long)
id.write(2);
DataUtils.writeVarLong(id, length(data));
DataUtils.writeVarLong(id, writeBlock(data));
return id;
}
private long writeBlock(byte[] data) {
long key = getAndIncrementNextKey();
map.put(key, data);
onStore(data.length);
return key;
}
/**
* This method is called after a block of data is stored. Override this
* method to persist data if necessary.
*
* @param len the length of the stored block.
*/
@SuppressWarnings("unused")
protected void onStore(int len) {
// do nothing by default
}
/**
* Generate a new key.
*
* @return the new key
*/
private long getAndIncrementNextKey() {
long key = nextKey.getAndIncrement();
if (!map.containsKey(key)) {
return key;
}
// search the next free id using binary search
synchronized (this) {
long low = key, high = Long.MAX_VALUE;
while (low < high) {
long x = (low + high) >>> 1;
if (map.containsKey(x)) {
low = x + 1;
} else {
high = x;
}
}
key = low;
nextKey.set(key + 1);
return key;
}
}
/**
* Get the key of the biggest block, of -1 for inline data.
* This method is used to garbage collect orphaned blocks.
*
* @param id the id
* @return the key, or -1
*/
public long getMaxBlockKey(byte[] id) {
long maxKey = -1;
ByteBuffer idBuffer = ByteBuffer.wrap(id);
while (idBuffer.hasRemaining()) {
switch (idBuffer.get()) {
case 0:
// in-place: 0, len (int), data
int len = DataUtils.readVarInt(idBuffer);
idBuffer.position(idBuffer.position() + len);
break;
case 1:
// block: 1, len (int), blockId (long)
DataUtils.readVarInt(idBuffer);
long k = DataUtils.readVarLong(idBuffer);
maxKey = Math.max(maxKey, k);
break;
case 2:
// indirect: 2, total len (long), blockId (long)
DataUtils.readVarLong(idBuffer);
long k2 = DataUtils.readVarLong(idBuffer);
maxKey = k2;
byte[] r = map.get(k2);
// recurse
long m = getMaxBlockKey(r);
if (m >= 0) {
maxKey = Math.max(maxKey, m);
}
break;
default:
throw DataUtils.newIllegalArgumentException(
"Unsupported id {0}", Arrays.toString(id));
}
}
return maxKey;
}
/**
* Remove all stored blocks for the given id.
*
* @param id the id
*/
public void remove(byte[] id) {
ByteBuffer idBuffer = ByteBuffer.wrap(id);
while (idBuffer.hasRemaining()) {
switch (idBuffer.get()) {
case 0:
// in-place: 0, len (int), data
int len = DataUtils.readVarInt(idBuffer);
idBuffer.position(idBuffer.position() + len);
break;
case 1:
// block: 1, len (int), blockId (long)
DataUtils.readVarInt(idBuffer);
long k = DataUtils.readVarLong(idBuffer);
map.remove(k);
break;
case 2:
// indirect: 2, total len (long), blockId (long)
DataUtils.readVarLong(idBuffer);
long k2 = DataUtils.readVarLong(idBuffer);
// recurse
remove(map.get(k2));
map.remove(k2);
break;
default:
throw DataUtils.newIllegalArgumentException(
"Unsupported id {0}", Arrays.toString(id));
}
}
}
/**
* Convert the id to a human readable string.
*
* @param id the stream id
* @return the string
*/
public static String toString(byte[] id) {
StringBuilder buff = new StringBuilder();
ByteBuffer idBuffer = ByteBuffer.wrap(id);
long length = 0;
while (idBuffer.hasRemaining()) {
long block;
int len;
switch (idBuffer.get()) {
case 0:
// in-place: 0, len (int), data
len = DataUtils.readVarInt(idBuffer);
idBuffer.position(idBuffer.position() + len);
buff.append("data len=").append(len);
length += len;
break;
case 1:
// block: 1, len (int), blockId (long)
len = DataUtils.readVarInt(idBuffer);
length += len;
block = DataUtils.readVarLong(idBuffer);
buff.append("block ").append(block).append(" len=").append(len);
break;
case 2:
// indirect: 2, total len (long), blockId (long)
len = DataUtils.readVarInt(idBuffer);
length += DataUtils.readVarLong(idBuffer);
block = DataUtils.readVarLong(idBuffer);
buff.append("indirect block ").append(block).append(" len=").append(len);
break;
default:
buff.append("error");
}
buff.append(", ");
}
buff.append("length=").append(length);
return buff.toString();
}
/**
* Calculate the number of data bytes for the given id. As the length is
* encoded in the id, this operation does not cause any reads in the map.
*
* @param id the id
* @return the length
*/
public long length(byte[] id) {
ByteBuffer idBuffer = ByteBuffer.wrap(id);
long length = 0;
while (idBuffer.hasRemaining()) {
switch (idBuffer.get()) {
case 0:
// in-place: 0, len (int), data
int len = DataUtils.readVarInt(idBuffer);
idBuffer.position(idBuffer.position() + len);
length += len;
break;
case 1:
// block: 1, len (int), blockId (long)
length += DataUtils.readVarInt(idBuffer);
DataUtils.readVarLong(idBuffer);
break;
case 2:
// indirect: 2, total len (long), blockId (long)
length += DataUtils.readVarLong(idBuffer);
DataUtils.readVarLong(idBuffer);
break;
default:
throw DataUtils.newIllegalArgumentException(
"Unsupported id {0}", Arrays.toString(id));
}
}
return length;
}
/**
* Check whether the id itself contains all the data. This operation does
* not cause any reads in the map.
*
* @param id the id
* @return if the id contains the data
*/
public boolean isInPlace(byte[] id) {
ByteBuffer idBuffer = ByteBuffer.wrap(id);
while (idBuffer.hasRemaining()) {
if (idBuffer.get() != 0) {
return false;
}
int len = DataUtils.readVarInt(idBuffer);
idBuffer.position(idBuffer.position() + len);
}
return true;
}
/**
* Open an input stream to read data.
*
* @param id the id
* @return the stream
*/
public InputStream get(byte[] id) {
return new Stream(this, id);
}
/**
* Get the block.
*
* @param key the key
* @return the block
*/
byte[] getBlock(long key) {
byte[] data = map.get(key);
if (data == null) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_BLOCK_NOT_FOUND,
"Block {0} not found", key);
}
return data;
}
/**
* A stream backed by a map.
*/
static class Stream extends InputStream {
private final StreamStore store;
private byte[] oneByteBuffer;
private ByteBuffer idBuffer;
private ByteArrayInputStream buffer;
private long skip;
private final long length;
private long pos;
Stream(StreamStore store, byte[] id) {
this.store = store;
this.length = store.length(id);
this.idBuffer = ByteBuffer.wrap(id);
}
@Override
public int read() throws IOException {
byte[] buffer = oneByteBuffer;
if (buffer == null) {
buffer = oneByteBuffer = new byte[1];
}
int len = read(buffer, 0, 1);
return len == -1 ? -1 : (buffer[0] & 255);
}
@Override
public long skip(long n) {
n = Math.min(length - pos, n);
if (n == 0) {
return 0;
}
if (buffer != null) {
long s = buffer.skip(n);
if (s > 0) {
n = s;
} else {
buffer = null;
skip += n;
}
} else {
skip += n;
}
pos += n;
return n;
}
@Override
public void close() {
buffer = null;
idBuffer.position(idBuffer.limit());
pos = length;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (len <= 0) {
return 0;
}
while (true) {
if (buffer == null) {
try {
buffer = nextBuffer();
} catch (IllegalStateException e) {
String msg = DataUtils.formatMessage(
DataUtils.ERROR_BLOCK_NOT_FOUND,
"Block not found in id {0}",
Arrays.toString(idBuffer.array()));
throw new IOException(msg, e);
}
if (buffer == null) {
return -1;
}
}
int result = buffer.read(b, off, len);
if (result > 0) {
pos += result;
return result;
}
buffer = null;
}
}
private ByteArrayInputStream nextBuffer() {
while (idBuffer.hasRemaining()) {
switch (idBuffer.get()) {
case 0: {
int len = DataUtils.readVarInt(idBuffer);
if (skip >= len) {
skip -= len;
idBuffer.position(idBuffer.position() + len);
continue;
}
int p = (int) (idBuffer.position() + skip);
int l = (int) (len - skip);
idBuffer.position(p + l);
return new ByteArrayInputStream(idBuffer.array(), p, l);
}
case 1: {
int len = DataUtils.readVarInt(idBuffer);
long key = DataUtils.readVarLong(idBuffer);
if (skip >= len) {
skip -= len;
continue;
}
byte[] data = store.getBlock(key);
int s = (int) skip;
skip = 0;
return new ByteArrayInputStream(data, s, data.length - s);
}
case 2: {
long len = DataUtils.readVarLong(idBuffer);
long key = DataUtils.readVarLong(idBuffer);
if (skip >= len) {
skip -= len;
continue;
}
byte[] k = store.getBlock(key);
ByteBuffer newBuffer = ByteBuffer.allocate(k.length
+ idBuffer.limit() - idBuffer.position());
newBuffer.put(k);
newBuffer.put(idBuffer);
newBuffer.flip();
idBuffer = newBuffer;
return nextBuffer();
}
default:
throw DataUtils.newIllegalArgumentException(
"Unsupported id {0}",
Arrays.toString(idBuffer.array()));
}
}
return null;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/WriteBuffer.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore;
import java.nio.ByteBuffer;
/**
* An auto-resize buffer to write data into a ByteBuffer.
*/
public class WriteBuffer {
/**
* The maximum size of the buffer in order to be re-used after a clear
* operation.
*/
private static final int MAX_REUSE_CAPACITY = 4 * 1024 * 1024;
/**
* The minimum number of bytes to grow a buffer at a time.
*/
private static final int MIN_GROW = 1024 * 1024;
/**
* The buffer that is used after a clear operation.
*/
private ByteBuffer reuse;
/**
* The current buffer (may be replaced if it is too small).
*/
private ByteBuffer buff;
public WriteBuffer(int initialSize) {
reuse = ByteBuffer.allocate(initialSize);
buff = reuse;
}
public WriteBuffer() {
this(MIN_GROW);
}
/**
* Write a variable size integer.
*
* @param x the value
* @return this
*/
public WriteBuffer putVarInt(int x) {
DataUtils.writeVarInt(ensureCapacity(5), x);
return this;
}
/**
* Write a variable size long.
*
* @param x the value
* @return this
*/
public WriteBuffer putVarLong(long x) {
DataUtils.writeVarLong(ensureCapacity(10), x);
return this;
}
/**
* Write the characters of a string in a format similar to UTF-8.
*
* @param s the string
* @param len the number of characters to write
* @return this
*/
public WriteBuffer putStringData(String s, int len) {
ByteBuffer b = ensureCapacity(3 * len);
DataUtils.writeStringData(b, s, len);
return this;
}
/**
* Put a byte.
*
* @param x the value
* @return this
*/
public WriteBuffer put(byte x) {
ensureCapacity(1).put(x);
return this;
}
/**
* Put a character.
*
* @param x the value
* @return this
*/
public WriteBuffer putChar(char x) {
ensureCapacity(2).putChar(x);
return this;
}
/**
* Put a short.
*
* @param x the value
* @return this
*/
public WriteBuffer putShort(short x) {
ensureCapacity(2).putShort(x);
return this;
}
/**
* Put an integer.
*
* @param x the value
* @return this
*/
public WriteBuffer putInt(int x) {
ensureCapacity(4).putInt(x);
return this;
}
/**
* Put a long.
*
* @param x the value
* @return this
*/
public WriteBuffer putLong(long x) {
ensureCapacity(8).putLong(x);
return this;
}
/**
* Put a float.
*
* @param x the value
* @return this
*/
public WriteBuffer putFloat(float x) {
ensureCapacity(4).putFloat(x);
return this;
}
/**
* Put a double.
*
* @param x the value
* @return this
*/
public WriteBuffer putDouble(double x) {
ensureCapacity(8).putDouble(x);
return this;
}
/**
* Put a byte array.
*
* @param bytes the value
* @return this
*/
public WriteBuffer put(byte[] bytes) {
ensureCapacity(bytes.length).put(bytes);
return this;
}
/**
* Put a byte array.
*
* @param bytes the value
* @param offset the source offset
* @param length the number of bytes
* @return this
*/
public WriteBuffer put(byte[] bytes, int offset, int length) {
ensureCapacity(length).put(bytes, offset, length);
return this;
}
/**
* Put the contents of a byte buffer.
*
* @param src the source buffer
* @return this
*/
public WriteBuffer put(ByteBuffer src) {
ensureCapacity(src.remaining()).put(src);
return this;
}
/**
* Set the limit, possibly growing the buffer.
*
* @param newLimit the new limit
* @return this
*/
public WriteBuffer limit(int newLimit) {
ensureCapacity(newLimit - buff.position()).limit(newLimit);
return this;
}
/**
* Get the capacity.
*
* @return the capacity
*/
public int capacity() {
return buff.capacity();
}
/**
* Set the position.
*
* @param newPosition the new position
* @return the new position
*/
public WriteBuffer position(int newPosition) {
buff.position(newPosition);
return this;
}
/**
* Get the limit.
*
* @return the limit
*/
public int limit() {
return buff.limit();
}
/**
* Get the current position.
*
* @return the position
*/
public int position() {
return buff.position();
}
/**
* Copy the data into the destination array.
*
* @param dst the destination array
* @return this
*/
public WriteBuffer get(byte[] dst) {
buff.get(dst);
return this;
}
/**
* Update an integer at the given index.
*
* @param index the index
* @param value the value
* @return this
*/
public WriteBuffer putInt(int index, int value) {
buff.putInt(index, value);
return this;
}
/**
* Update a short at the given index.
*
* @param index the index
* @param value the value
* @return this
*/
public WriteBuffer putShort(int index, short value) {
buff.putShort(index, value);
return this;
}
/**
* Clear the buffer after use.
*
* @return this
*/
public WriteBuffer clear() {
if (buff.limit() > MAX_REUSE_CAPACITY) {
buff = reuse;
} else if (buff != reuse) {
reuse = buff;
}
buff.clear();
return this;
}
/**
* Get the byte buffer.
*
* @return the byte buffer
*/
public ByteBuffer getBuffer() {
return buff;
}
private ByteBuffer ensureCapacity(int len) {
if (buff.remaining() < len) {
grow(len);
}
return buff;
}
private void grow(int additional) {
ByteBuffer temp = buff;
int needed = additional - temp.remaining();
// grow at least MIN_GROW
long grow = Math.max(needed, MIN_GROW);
// grow at least 50% of the current size
grow = Math.max(temp.capacity() / 2, grow);
// the new capacity is at most Integer.MAX_VALUE
int newCapacity = (int) Math.min(Integer.MAX_VALUE, temp.capacity() + grow);
if (newCapacity < needed) {
throw new OutOfMemoryError("Capacity: " + newCapacity + " needed: " + needed);
}
try {
buff = ByteBuffer.allocate(newCapacity);
} catch (OutOfMemoryError e) {
throw new OutOfMemoryError("Capacity: " + newCapacity);
}
temp.flip();
buff.put(temp);
if (newCapacity <= MAX_REUSE_CAPACITY) {
reuse = buff;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/cache/CacheLongKeyLIRS.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.cache;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.h2.mvstore.DataUtils;
/**
* A scan resistant cache that uses keys of type long. It is meant to cache
* objects that are relatively costly to acquire, for example file content.
* <p>
* This implementation is multi-threading safe and supports concurrent access.
* Null keys or null values are not allowed. The map fill factor is at most 75%.
* <p>
* Each entry is assigned a distinct memory size, and the cache will try to use
* at most the specified amount of memory. The memory unit is not relevant,
* however it is suggested to use bytes as the unit.
* <p>
* This class implements an approximation of the the LIRS replacement algorithm
* invented by Xiaodong Zhang and Song Jiang as described in
* http://www.cse.ohio-state.edu/~zhang/lirs-sigmetrics-02.html with a few
* smaller changes: An additional queue for non-resident entries is used, to
* prevent unbound memory usage. The maximum size of this queue is at most the
* size of the rest of the stack. About 6.25% of the mapped entries are cold.
* <p>
* Internally, the cache is split into a number of segments, and each segment is
* an individual LIRS cache.
* <p>
* Accessed entries are only moved to the top of the stack if at least a number
* of other entries have been moved to the front (8 per segment by default).
* Write access and moving entries to the top of the stack is synchronized per
* segment.
*
* @author Thomas Mueller
* @param <V> the value type
*/
public class CacheLongKeyLIRS<V> {
/**
* The maximum memory this cache should use.
*/
private long maxMemory;
private final Segment<V>[] segments;
private final int segmentCount;
private final int segmentShift;
private final int segmentMask;
private final int stackMoveDistance;
private final int nonResidentQueueSize;
/**
* Create a new cache with the given memory size.
*
* @param config the configuration
*/
@SuppressWarnings("unchecked")
public CacheLongKeyLIRS(Config config) {
setMaxMemory(config.maxMemory);
this.nonResidentQueueSize = config.nonResidentQueueSize;
DataUtils.checkArgument(
Integer.bitCount(config.segmentCount) == 1,
"The segment count must be a power of 2, is {0}", config.segmentCount);
this.segmentCount = config.segmentCount;
this.segmentMask = segmentCount - 1;
this.stackMoveDistance = config.stackMoveDistance;
segments = new Segment[segmentCount];
clear();
// use the high bits for the segment
this.segmentShift = 32 - Integer.bitCount(segmentMask);
}
/**
* Remove all entries.
*/
public void clear() {
long max = getMaxItemSize();
for (int i = 0; i < segmentCount; i++) {
segments[i] = new Segment<>(
max, stackMoveDistance, 8, nonResidentQueueSize);
}
}
/**
* Determines max size of the data item size to fit into cache
* @return data items size limit
*/
public long getMaxItemSize() {
return Math.max(1, maxMemory / segmentCount);
}
private Entry<V> find(long key) {
int hash = getHash(key);
return getSegment(hash).find(key, hash);
}
/**
* Check whether there is a resident entry for the given key. This
* method does not adjust the internal state of the cache.
*
* @param key the key (may not be null)
* @return true if there is a resident entry
*/
public boolean containsKey(long key) {
int hash = getHash(key);
return getSegment(hash).containsKey(key, hash);
}
/**
* Get the value for the given key if the entry is cached. This method does
* not modify the internal state.
*
* @param key the key (may not be null)
* @return the value, or null if there is no resident entry
*/
public V peek(long key) {
Entry<V> e = find(key);
return e == null ? null : e.value;
}
/**
* Add an entry to the cache using the average memory size.
*
* @param key the key (may not be null)
* @param value the value (may not be null)
* @return the old value, or null if there was no resident entry
*/
public V put(long key, V value) {
return put(key, value, sizeOf(value));
}
/**
* Add an entry to the cache. The entry may or may not exist in the
* cache yet. This method will usually mark unknown entries as cold and
* known entries as hot.
*
* @param key the key (may not be null)
* @param value the value (may not be null)
* @param memory the memory used for the given entry
* @return the old value, or null if there was no resident entry
*/
public V put(long key, V value, int memory) {
int hash = getHash(key);
int segmentIndex = getSegmentIndex(hash);
Segment<V> s = segments[segmentIndex];
// check whether resize is required: synchronize on s, to avoid
// concurrent resizes (concurrent reads read
// from the old segment)
synchronized (s) {
s = resizeIfNeeded(s, segmentIndex);
return s.put(key, hash, value, memory);
}
}
private Segment<V> resizeIfNeeded(Segment<V> s, int segmentIndex) {
int newLen = s.getNewMapLen();
if (newLen == 0) {
return s;
}
// another thread might have resized
// (as we retrieved the segment before synchronizing on it)
Segment<V> s2 = segments[segmentIndex];
if (s == s2) {
// no other thread resized, so we do
s = new Segment<>(s, newLen);
segments[segmentIndex] = s;
}
return s;
}
/**
* Get the size of the given value. The default implementation returns 1.
*
* @param value the value
* @return the size
*/
@SuppressWarnings("unused")
protected int sizeOf(V value) {
return 1;
}
/**
* Remove an entry. Both resident and non-resident entries can be
* removed.
*
* @param key the key (may not be null)
* @return the old value, or null if there was no resident entry
*/
public V remove(long key) {
int hash = getHash(key);
int segmentIndex = getSegmentIndex(hash);
Segment<V> s = segments[segmentIndex];
// check whether resize is required: synchronize on s, to avoid
// concurrent resizes (concurrent reads read
// from the old segment)
synchronized (s) {
s = resizeIfNeeded(s, segmentIndex);
return s.remove(key, hash);
}
}
/**
* Get the memory used for the given key.
*
* @param key the key (may not be null)
* @return the memory, or 0 if there is no resident entry
*/
public int getMemory(long key) {
int hash = getHash(key);
return getSegment(hash).getMemory(key, hash);
}
/**
* Get the value for the given key if the entry is cached. This method
* adjusts the internal state of the cache sometimes, to ensure commonly
* used entries stay in the cache.
*
* @param key the key (may not be null)
* @return the value, or null if there is no resident entry
*/
public V get(long key) {
int hash = getHash(key);
return getSegment(hash).get(key, hash);
}
private Segment<V> getSegment(int hash) {
return segments[getSegmentIndex(hash)];
}
private int getSegmentIndex(int hash) {
return (hash >>> segmentShift) & segmentMask;
}
/**
* Get the hash code for the given key. The hash code is
* further enhanced to spread the values more evenly.
*
* @param key the key
* @return the hash code
*/
static int getHash(long key) {
int hash = (int) ((key >>> 32) ^ key);
// a supplemental secondary hash function
// to protect against hash codes that don't differ much
hash = ((hash >>> 16) ^ hash) * 0x45d9f3b;
hash = ((hash >>> 16) ^ hash) * 0x45d9f3b;
hash = (hash >>> 16) ^ hash;
return hash;
}
/**
* Get the currently used memory.
*
* @return the used memory
*/
public long getUsedMemory() {
long x = 0;
for (Segment<V> s : segments) {
x += s.usedMemory;
}
return x;
}
/**
* Set the maximum memory this cache should use. This will not
* immediately cause entries to get removed however; it will only change
* the limit. To resize the internal array, call the clear method.
*
* @param maxMemory the maximum size (1 or larger) in bytes
*/
public void setMaxMemory(long maxMemory) {
DataUtils.checkArgument(
maxMemory > 0,
"Max memory must be larger than 0, is {0}", maxMemory);
this.maxMemory = maxMemory;
if (segments != null) {
long max = 1 + maxMemory / segments.length;
for (Segment<V> s : segments) {
s.setMaxMemory(max);
}
}
}
/**
* Get the maximum memory to use.
*
* @return the maximum memory
*/
public long getMaxMemory() {
return maxMemory;
}
/**
* Get the entry set for all resident entries.
*
* @return the entry set
*/
public synchronized Set<Map.Entry<Long, V>> entrySet() {
HashMap<Long, V> map = new HashMap<>();
for (long k : keySet()) {
map.put(k, find(k).value);
}
return map.entrySet();
}
/**
* Get the set of keys for resident entries.
*
* @return the set of keys
*/
public Set<Long> keySet() {
HashSet<Long> set = new HashSet<>();
for (Segment<V> s : segments) {
set.addAll(s.keySet());
}
return set;
}
/**
* Get the number of non-resident entries in the cache.
*
* @return the number of non-resident entries
*/
public int sizeNonResident() {
int x = 0;
for (Segment<V> s : segments) {
x += s.queue2Size;
}
return x;
}
/**
* Get the length of the internal map array.
*
* @return the size of the array
*/
public int sizeMapArray() {
int x = 0;
for (Segment<V> s : segments) {
x += s.entries.length;
}
return x;
}
/**
* Get the number of hot entries in the cache.
*
* @return the number of hot entries
*/
public int sizeHot() {
int x = 0;
for (Segment<V> s : segments) {
x += s.mapSize - s.queueSize - s.queue2Size;
}
return x;
}
/**
* Get the number of cache hits.
*
* @return the cache hits
*/
public long getHits() {
long x = 0;
for (Segment<V> s : segments) {
x += s.hits;
}
return x;
}
/**
* Get the number of cache misses.
*
* @return the cache misses
*/
public long getMisses() {
int x = 0;
for (Segment<V> s : segments) {
x += s.misses;
}
return x;
}
/**
* Get the number of resident entries.
*
* @return the number of entries
*/
public int size() {
int x = 0;
for (Segment<V> s : segments) {
x += s.mapSize - s.queue2Size;
}
return x;
}
/**
* Get the list of keys. This method allows to read the internal state of
* the cache.
*
* @param cold if true, only keys for the cold entries are returned
* @param nonResident true for non-resident entries
* @return the key list
*/
public List<Long> keys(boolean cold, boolean nonResident) {
ArrayList<Long> keys = new ArrayList<>();
for (Segment<V> s : segments) {
keys.addAll(s.keys(cold, nonResident));
}
return keys;
}
/**
* Get the values for all resident entries.
*
* @return the entry set
*/
public List<V> values() {
ArrayList<V> list = new ArrayList<>();
for (long k : keySet()) {
V value = find(k).value;
if (value != null) {
list.add(value);
}
}
return list;
}
/**
* Check whether the cache is empty.
*
* @return true if it is empty
*/
public boolean isEmpty() {
return size() == 0;
}
/**
* Check whether the given value is stored.
*
* @param value the value
* @return true if it is stored
*/
public boolean containsValue(Object value) {
return getMap().containsValue(value);
}
/**
* Convert this cache to a map.
*
* @return the map
*/
public Map<Long, V> getMap() {
HashMap<Long, V> map = new HashMap<>();
for (long k : keySet()) {
V x = find(k).value;
if (x != null) {
map.put(k, x);
}
}
return map;
}
/**
* Add all elements of the map to this cache.
*
* @param m the map
*/
public void putAll(Map<Long, ? extends V> m) {
for (Map.Entry<Long, ? extends V> e : m.entrySet()) {
// copy only non-null entries
put(e.getKey(), e.getValue());
}
}
/**
* A cache segment
*
* @param <V> the value type
*/
private static class Segment<V> {
/**
* The number of (hot, cold, and non-resident) entries in the map.
*/
int mapSize;
/**
* The size of the LIRS queue for resident cold entries.
*/
int queueSize;
/**
* The size of the LIRS queue for non-resident cold entries.
*/
int queue2Size;
/**
* The number of cache hits.
*/
long hits;
/**
* The number of cache misses.
*/
long misses;
/**
* The map array. The size is always a power of 2.
*/
final Entry<V>[] entries;
/**
* The currently used memory.
*/
long usedMemory;
/**
* How many other item are to be moved to the top of the stack before
* the current item is moved.
*/
private final int stackMoveDistance;
/**
* The maximum memory this cache should use in bytes.
*/
private long maxMemory;
/**
* The bit mask that is applied to the key hash code to get the index in
* the map array. The mask is the length of the array minus one.
*/
private final int mask;
/**
* The number of entries in the non-resident queue, as a factor of the
* number of entries in the map.
*/
private final int nonResidentQueueSize;
/**
* The stack of recently referenced elements. This includes all hot
* entries, and the recently referenced cold entries. Resident cold
* entries that were not recently referenced, as well as non-resident
* cold entries, are not in the stack.
* <p>
* There is always at least one entry: the head entry.
*/
private final Entry<V> stack;
/**
* The number of entries in the stack.
*/
private int stackSize;
/**
* The queue of resident cold entries.
* <p>
* There is always at least one entry: the head entry.
*/
private final Entry<V> queue;
/**
* The queue of non-resident cold entries.
* <p>
* There is always at least one entry: the head entry.
*/
private final Entry<V> queue2;
/**
* The number of times any item was moved to the top of the stack.
*/
private int stackMoveCounter;
/**
* Create a new cache segment.
*
* @param maxMemory the maximum memory to use
* @param stackMoveDistance the number of other entries to be moved to
* the top of the stack before moving an entry to the top
* @param len the number of hash table buckets (must be a power of 2)
* @param nonResidentQueueSize the non-resident queue size factor
*/
Segment(long maxMemory, int stackMoveDistance, int len,
int nonResidentQueueSize) {
setMaxMemory(maxMemory);
this.stackMoveDistance = stackMoveDistance;
this.nonResidentQueueSize = nonResidentQueueSize;
// the bit mask has all bits set
mask = len - 1;
// initialize the stack and queue heads
stack = new Entry<>();
stack.stackPrev = stack.stackNext = stack;
queue = new Entry<>();
queue.queuePrev = queue.queueNext = queue;
queue2 = new Entry<>();
queue2.queuePrev = queue2.queueNext = queue2;
@SuppressWarnings("unchecked")
Entry<V>[] e = new Entry[len];
entries = e;
}
/**
* Create a new cache segment from an existing one.
* The caller must synchronize on the old segment, to avoid
* concurrent modifications.
*
* @param old the old segment
* @param len the number of hash table buckets (must be a power of 2)
*/
Segment(Segment<V> old, int len) {
this(old.maxMemory, old.stackMoveDistance, len, old.nonResidentQueueSize);
hits = old.hits;
misses = old.misses;
Entry<V> s = old.stack.stackPrev;
while (s != old.stack) {
Entry<V> e = copy(s);
addToMap(e);
addToStack(e);
s = s.stackPrev;
}
s = old.queue.queuePrev;
while (s != old.queue) {
Entry<V> e = find(s.key, getHash(s.key));
if (e == null) {
e = copy(s);
addToMap(e);
}
addToQueue(queue, e);
s = s.queuePrev;
}
s = old.queue2.queuePrev;
while (s != old.queue2) {
Entry<V> e = find(s.key, getHash(s.key));
if (e == null) {
e = copy(s);
addToMap(e);
}
addToQueue(queue2, e);
s = s.queuePrev;
}
}
/**
* Calculate the new number of hash table buckets if the internal map
* should be re-sized.
*
* @return 0 if no resizing is needed, or the new length
*/
int getNewMapLen() {
int len = mask + 1;
if (len * 3 < mapSize * 4 && len < (1 << 28)) {
// more than 75% usage
return len * 2;
} else if (len > 32 && len / 8 > mapSize) {
// less than 12% usage
return len / 2;
}
return 0;
}
private void addToMap(Entry<V> e) {
int index = getHash(e.key) & mask;
e.mapNext = entries[index];
entries[index] = e;
usedMemory += e.memory;
mapSize++;
}
private static <V> Entry<V> copy(Entry<V> old) {
Entry<V> e = new Entry<>();
e.key = old.key;
e.value = old.value;
e.memory = old.memory;
e.topMove = old.topMove;
return e;
}
/**
* Get the memory used for the given key.
*
* @param key the key (may not be null)
* @param hash the hash
* @return the memory, or 0 if there is no resident entry
*/
int getMemory(long key, int hash) {
Entry<V> e = find(key, hash);
return e == null ? 0 : e.memory;
}
/**
* Get the value for the given key if the entry is cached. This method
* adjusts the internal state of the cache sometimes, to ensure commonly
* used entries stay in the cache.
*
* @param key the key (may not be null)
* @param hash the hash
* @return the value, or null if there is no resident entry
*/
V get(long key, int hash) {
Entry<V> e = find(key, hash);
if (e == null) {
// the entry was not found
misses++;
return null;
}
V value = e.value;
if (value == null) {
// it was a non-resident entry
misses++;
return null;
}
if (e.isHot()) {
if (e != stack.stackNext) {
if (stackMoveDistance == 0 ||
stackMoveCounter - e.topMove > stackMoveDistance) {
access(key, hash);
}
}
} else {
access(key, hash);
}
hits++;
return value;
}
/**
* Access an item, moving the entry to the top of the stack or front of
* the queue if found.
*
* @param key the key
*/
private synchronized void access(long key, int hash) {
Entry<V> e = find(key, hash);
if (e == null || e.value == null) {
return;
}
if (e.isHot()) {
if (e != stack.stackNext) {
if (stackMoveDistance == 0 ||
stackMoveCounter - e.topMove > stackMoveDistance) {
// move a hot entry to the top of the stack
// unless it is already there
boolean wasEnd = e == stack.stackPrev;
removeFromStack(e);
if (wasEnd) {
// if moving the last entry, the last entry
// could now be cold, which is not allowed
pruneStack();
}
addToStack(e);
}
}
} else {
removeFromQueue(e);
if (e.stackNext != null) {
// resident cold entries become hot
// if they are on the stack
removeFromStack(e);
// which means a hot entry needs to become cold
// (this entry is cold, that means there is at least one
// more entry in the stack, which must be hot)
convertOldestHotToCold();
} else {
// cold entries that are not on the stack
// move to the front of the queue
addToQueue(queue, e);
}
// in any case, the cold entry is moved to the top of the stack
addToStack(e);
}
}
/**
* Add an entry to the cache. The entry may or may not exist in the
* cache yet. This method will usually mark unknown entries as cold and
* known entries as hot.
*
* @param key the key (may not be null)
* @param hash the hash
* @param value the value (may not be null)
* @param memory the memory used for the given entry
* @return the old value, or null if there was no resident entry
*/
synchronized V put(long key, int hash, V value, int memory) {
if (value == null) {
throw DataUtils.newIllegalArgumentException(
"The value may not be null");
}
V old;
Entry<V> e = find(key, hash);
boolean existed;
if (e == null) {
existed = false;
old = null;
} else {
existed = true;
old = e.value;
remove(key, hash);
}
if (memory > maxMemory) {
// the new entry is too big to fit
return old;
}
e = new Entry<>();
e.key = key;
e.value = value;
e.memory = memory;
int index = hash & mask;
e.mapNext = entries[index];
entries[index] = e;
usedMemory += memory;
if (usedMemory > maxMemory) {
// old entries needs to be removed
evict();
// if the cache is full, the new entry is
// cold if possible
if (stackSize > 0) {
// the new cold entry is at the top of the queue
addToQueue(queue, e);
}
}
mapSize++;
// added entries are always added to the stack
addToStack(e);
if (existed) {
// if it was there before (even non-resident), it becomes hot
access(key, hash);
}
return old;
}
/**
* Remove an entry. Both resident and non-resident entries can be
* removed.
*
* @param key the key (may not be null)
* @param hash the hash
* @return the old value, or null if there was no resident entry
*/
synchronized V remove(long key, int hash) {
int index = hash & mask;
Entry<V> e = entries[index];
if (e == null) {
return null;
}
V old;
if (e.key == key) {
old = e.value;
entries[index] = e.mapNext;
} else {
Entry<V> last;
do {
last = e;
e = e.mapNext;
if (e == null) {
return null;
}
} while (e.key != key);
old = e.value;
last.mapNext = e.mapNext;
}
mapSize--;
usedMemory -= e.memory;
if (e.stackNext != null) {
removeFromStack(e);
}
if (e.isHot()) {
// when removing a hot entry, the newest cold entry gets hot,
// so the number of hot entries does not change
e = queue.queueNext;
if (e != queue) {
removeFromQueue(e);
if (e.stackNext == null) {
addToStackBottom(e);
}
}
} else {
removeFromQueue(e);
}
pruneStack();
return old;
}
/**
* Evict cold entries (resident and non-resident) until the memory limit
* is reached. The new entry is added as a cold entry, except if it is
* the only entry.
*/
private void evict() {
do {
evictBlock();
} while (usedMemory > maxMemory);
}
private void evictBlock() {
// ensure there are not too many hot entries: right shift of 5 is
// division by 32, that means if there are only 1/32 (3.125%) or
// less cold entries, a hot entry needs to become cold
while (queueSize <= (mapSize >>> 5) && stackSize > 0) {
convertOldestHotToCold();
}
// the oldest resident cold entries become non-resident
while (usedMemory > maxMemory && queueSize > 0) {
Entry<V> e = queue.queuePrev;
usedMemory -= e.memory;
removeFromQueue(e);
e.value = null;
e.memory = 0;
addToQueue(queue2, e);
// the size of the non-resident-cold entries needs to be limited
int maxQueue2Size = nonResidentQueueSize * (mapSize - queue2Size);
if (maxQueue2Size >= 0) {
while (queue2Size > maxQueue2Size) {
e = queue2.queuePrev;
int hash = getHash(e.key);
remove(e.key, hash);
}
}
}
}
private void convertOldestHotToCold() {
// the last entry of the stack is known to be hot
Entry<V> last = stack.stackPrev;
if (last == stack) {
// never remove the stack head itself (this would mean the
// internal structure of the cache is corrupt)
throw new IllegalStateException();
}
// remove from stack - which is done anyway in the stack pruning,
// but we can do it here as well
removeFromStack(last);
// adding an entry to the queue will make it cold
addToQueue(queue, last);
pruneStack();
}
/**
* Ensure the last entry of the stack is cold.
*/
private void pruneStack() {
while (true) {
Entry<V> last = stack.stackPrev;
// must stop at a hot entry or the stack head,
// but the stack head itself is also hot, so we
// don't have to test it
if (last.isHot()) {
break;
}
// the cold entry is still in the queue
removeFromStack(last);
}
}
/**
* Try to find an entry in the map.
*
* @param key the key
* @param hash the hash
* @return the entry (might be a non-resident)
*/
Entry<V> find(long key, int hash) {
int index = hash & mask;
Entry<V> e = entries[index];
while (e != null && e.key != key) {
e = e.mapNext;
}
return e;
}
private void addToStack(Entry<V> e) {
e.stackPrev = stack;
e.stackNext = stack.stackNext;
e.stackNext.stackPrev = e;
stack.stackNext = e;
stackSize++;
e.topMove = stackMoveCounter++;
}
private void addToStackBottom(Entry<V> e) {
e.stackNext = stack;
e.stackPrev = stack.stackPrev;
e.stackPrev.stackNext = e;
stack.stackPrev = e;
stackSize++;
}
/**
* Remove the entry from the stack. The head itself must not be removed.
*
* @param e the entry
*/
private void removeFromStack(Entry<V> e) {
e.stackPrev.stackNext = e.stackNext;
e.stackNext.stackPrev = e.stackPrev;
e.stackPrev = e.stackNext = null;
stackSize--;
}
private void addToQueue(Entry<V> q, Entry<V> e) {
e.queuePrev = q;
e.queueNext = q.queueNext;
e.queueNext.queuePrev = e;
q.queueNext = e;
if (e.value != null) {
queueSize++;
} else {
queue2Size++;
}
}
private void removeFromQueue(Entry<V> e) {
e.queuePrev.queueNext = e.queueNext;
e.queueNext.queuePrev = e.queuePrev;
e.queuePrev = e.queueNext = null;
if (e.value != null) {
queueSize--;
} else {
queue2Size--;
}
}
/**
* Get the list of keys. This method allows to read the internal state
* of the cache.
*
* @param cold if true, only keys for the cold entries are returned
* @param nonResident true for non-resident entries
* @return the key list
*/
synchronized List<Long> keys(boolean cold, boolean nonResident) {
ArrayList<Long> keys = new ArrayList<>();
if (cold) {
Entry<V> start = nonResident ? queue2 : queue;
for (Entry<V> e = start.queueNext; e != start;
e = e.queueNext) {
keys.add(e.key);
}
} else {
for (Entry<V> e = stack.stackNext; e != stack;
e = e.stackNext) {
keys.add(e.key);
}
}
return keys;
}
/**
* Check whether there is a resident entry for the given key. This
* method does not adjust the internal state of the cache.
*
* @param key the key (may not be null)
* @param hash the hash
* @return true if there is a resident entry
*/
boolean containsKey(long key, int hash) {
Entry<V> e = find(key, hash);
return e != null && e.value != null;
}
/**
* Get the set of keys for resident entries.
*
* @return the set of keys
*/
synchronized Set<Long> keySet() {
HashSet<Long> set = new HashSet<>();
for (Entry<V> e = stack.stackNext; e != stack; e = e.stackNext) {
set.add(e.key);
}
for (Entry<V> e = queue.queueNext; e != queue; e = e.queueNext) {
set.add(e.key);
}
return set;
}
/**
* Set the maximum memory this cache should use. This will not
* immediately cause entries to get removed however; it will only change
* the limit. To resize the internal array, call the clear method.
*
* @param maxMemory the maximum size (1 or larger) in bytes
*/
void setMaxMemory(long maxMemory) {
this.maxMemory = maxMemory;
}
}
/**
* A cache entry. Each entry is either hot (low inter-reference recency;
* LIR), cold (high inter-reference recency; HIR), or non-resident-cold. Hot
* entries are in the stack only. Cold entries are in the queue, and may be
* in the stack. Non-resident-cold entries have their value set to null and
* are in the stack and in the non-resident queue.
*
* @param <V> the value type
*/
static class Entry<V> {
/**
* The key.
*/
long key;
/**
* The value. Set to null for non-resident-cold entries.
*/
V value;
/**
* The estimated memory used.
*/
int memory;
/**
* When the item was last moved to the top of the stack.
*/
int topMove;
/**
* The next entry in the stack.
*/
Entry<V> stackNext;
/**
* The previous entry in the stack.
*/
Entry<V> stackPrev;
/**
* The next entry in the queue (either the resident queue or the
* non-resident queue).
*/
Entry<V> queueNext;
/**
* The previous entry in the queue.
*/
Entry<V> queuePrev;
/**
* The next entry in the map (the chained entry).
*/
Entry<V> mapNext;
/**
* Whether this entry is hot. Cold entries are in one of the two queues.
*
* @return whether the entry is hot
*/
boolean isHot() {
return queueNext == null;
}
}
/**
* The cache configuration.
*/
public static class Config {
/**
* The maximum memory to use (1 or larger).
*/
public long maxMemory = 1;
/**
* The number of cache segments (must be a power of 2).
*/
public int segmentCount = 16;
/**
* How many other item are to be moved to the top of the stack before
* the current item is moved.
*/
public int stackMoveDistance = 32;
/**
* The number of entries in the non-resident queue, as a factor of the
* number of all other entries in the map.
*/
public final int nonResidentQueueSize = 3;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/cache/FilePathCache.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.cache;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import org.h2.store.fs.FileBase;
import org.h2.store.fs.FilePath;
import org.h2.store.fs.FilePathWrapper;
/**
* A file with a read cache.
*/
public class FilePathCache extends FilePathWrapper {
/**
* The instance.
*/
public static final FilePathCache INSTANCE = new FilePathCache();
/**
* Register the file system.
*/
static {
FilePath.register(INSTANCE);
}
public static FileChannel wrap(FileChannel f) {
return new FileCache(f);
}
@Override
public FileChannel open(String mode) throws IOException {
return new FileCache(getBase().open(mode));
}
@Override
public String getScheme() {
return "cache";
}
/**
* A file with a read cache.
*/
public static class FileCache extends FileBase {
private static final int CACHE_BLOCK_SIZE = 4 * 1024;
private final FileChannel base;
private final CacheLongKeyLIRS<ByteBuffer> cache;
{
CacheLongKeyLIRS.Config cc = new CacheLongKeyLIRS.Config();
// 1 MB cache size
cc.maxMemory = 1024 * 1024;
cache = new CacheLongKeyLIRS<>(cc);
}
FileCache(FileChannel base) {
this.base = base;
}
@Override
protected void implCloseChannel() throws IOException {
base.close();
}
@Override
public FileChannel position(long newPosition) throws IOException {
base.position(newPosition);
return this;
}
@Override
public long position() throws IOException {
return base.position();
}
@Override
public int read(ByteBuffer dst) throws IOException {
return base.read(dst);
}
@Override
public synchronized int read(ByteBuffer dst, long position) throws IOException {
long cachePos = getCachePos(position);
int off = (int) (position - cachePos);
int len = CACHE_BLOCK_SIZE - off;
len = Math.min(len, dst.remaining());
ByteBuffer buff = cache.get(cachePos);
if (buff == null) {
buff = ByteBuffer.allocate(CACHE_BLOCK_SIZE);
long pos = cachePos;
while (true) {
int read = base.read(buff, pos);
if (read <= 0) {
break;
}
if (buff.remaining() == 0) {
break;
}
pos += read;
}
int read = buff.position();
if (read == CACHE_BLOCK_SIZE) {
cache.put(cachePos, buff, CACHE_BLOCK_SIZE);
} else {
if (read <= 0) {
return -1;
}
len = Math.min(len, read - off);
}
}
dst.put(buff.array(), off, len);
return len == 0 ? -1 : len;
}
private static long getCachePos(long pos) {
return (pos / CACHE_BLOCK_SIZE) * CACHE_BLOCK_SIZE;
}
@Override
public long size() throws IOException {
return base.size();
}
@Override
public synchronized FileChannel truncate(long newSize) throws IOException {
cache.clear();
base.truncate(newSize);
return this;
}
@Override
public synchronized int write(ByteBuffer src, long position) throws IOException {
clearCache(src, position);
return base.write(src, position);
}
@Override
public synchronized int write(ByteBuffer src) throws IOException {
clearCache(src, position());
return base.write(src);
}
private void clearCache(ByteBuffer src, long position) {
if (cache.size() > 0) {
int len = src.remaining();
long p = getCachePos(position);
while (len > 0) {
cache.remove(p);
p += CACHE_BLOCK_SIZE;
len -= CACHE_BLOCK_SIZE;
}
}
}
@Override
public void force(boolean metaData) throws IOException {
base.force(metaData);
}
@Override
public FileLock tryLock(long position, long size, boolean shared)
throws IOException {
return base.tryLock(position, size, shared);
}
@Override
public String toString() {
return "cache:" + base.toString();
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/db/MVDelegateIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.db;
import java.util.HashSet;
import java.util.List;
import org.h2.engine.Session;
import org.h2.index.BaseIndex;
import org.h2.index.Cursor;
import org.h2.index.IndexType;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableFilter;
import org.h2.value.ValueLong;
/**
* An index that delegates indexing to another index.
*/
public class MVDelegateIndex extends BaseIndex implements MVIndex {
private final MVPrimaryIndex mainIndex;
public MVDelegateIndex(MVTable table, int id, String name,
MVPrimaryIndex mainIndex,
IndexType indexType) {
IndexColumn[] cols = IndexColumn.wrap(new Column[] { table
.getColumn(mainIndex.getMainIndexColumn()) });
this.initBaseIndex(table, id, name, cols, indexType);
this.mainIndex = mainIndex;
if (id < 0) {
throw DbException.throwInternalError("" + name);
}
}
@Override
public void addRowsToBuffer(List<Row> rows, String bufferName) {
throw DbException.throwInternalError();
}
@Override
public void addBufferedRows(List<String> bufferNames) {
throw DbException.throwInternalError();
}
@Override
public void add(Session session, Row row) {
// nothing to do
}
@Override
public boolean canGetFirstOrLast() {
return true;
}
@Override
public void close(Session session) {
// nothing to do
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
ValueLong min = mainIndex.getKey(first, ValueLong.MIN, ValueLong.MIN);
// ifNull is MIN as well, because the column is never NULL
// so avoid returning all rows (returning one row is OK)
ValueLong max = mainIndex.getKey(last, ValueLong.MAX, ValueLong.MIN);
return mainIndex.find(session, min, max);
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
return mainIndex.findFirstOrLast(session, first);
}
@Override
public int getColumnIndex(Column col) {
if (col.getColumnId() == mainIndex.getMainIndexColumn()) {
return 0;
}
return -1;
}
@Override
public boolean isFirstColumn(Column column) {
return getColumnIndex(column) == 0;
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return 10 * getCostRangeIndex(masks, mainIndex.getRowCountApproximation(),
filters, filter, sortOrder, true, allColumnsSet);
}
@Override
public boolean needRebuild() {
return false;
}
@Override
public void remove(Session session, Row row) {
// nothing to do
}
@Override
public void remove(Session session) {
mainIndex.setMainIndexColumn(-1);
}
@Override
public void truncate(Session session) {
// nothing to do
}
@Override
public void checkRename() {
// ok
}
@Override
public long getRowCount(Session session) {
return mainIndex.getRowCount(session);
}
@Override
public long getRowCountApproximation() {
return mainIndex.getRowCountApproximation();
}
@Override
public long getDiskSpaceUsed() {
return 0;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/db/MVIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.db;
import java.util.List;
import org.h2.index.Index;
import org.h2.result.Row;
/**
* An index that stores the data in an MVStore.
*/
public interface MVIndex extends Index {
/**
* Add the rows to a temporary storage (not to the index yet). The rows are
* sorted by the index columns. This is to more quickly build the index.
*
* @param rows the rows
* @param bufferName the name of the temporary storage
*/
void addRowsToBuffer(List<Row> rows, String bufferName);
/**
* Add all the index data from the buffers to the index. The index will
* typically use merge sort to add the data more quickly in sorted order.
*
* @param bufferNames the names of the temporary storage
*/
void addBufferedRows(List<String> bufferNames);
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/db/MVPrimaryIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.db;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicLong;
import org.h2.api.ErrorCode;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.index.BaseIndex;
import org.h2.index.Cursor;
import org.h2.index.IndexType;
import org.h2.message.DbException;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.db.TransactionStore.Transaction;
import org.h2.mvstore.db.TransactionStore.TransactionMap;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableFilter;
import org.h2.value.Value;
import org.h2.value.ValueArray;
import org.h2.value.ValueLong;
import org.h2.value.ValueNull;
/**
* A table stored in a MVStore.
*/
public class MVPrimaryIndex extends BaseIndex {
private final MVTable mvTable;
private final String mapName;
private final TransactionMap<Value, Value> dataMap;
private final AtomicLong lastKey = new AtomicLong(0);
private int mainIndexColumn = -1;
public MVPrimaryIndex(Database db, MVTable table, int id,
IndexColumn[] columns, IndexType indexType) {
this.mvTable = table;
initBaseIndex(table, id, table.getName() + "_DATA", columns, indexType);
int[] sortTypes = new int[columns.length];
for (int i = 0; i < columns.length; i++) {
sortTypes[i] = SortOrder.ASCENDING;
}
ValueDataType keyType = new ValueDataType(null, null, null);
ValueDataType valueType = new ValueDataType(db.getCompareMode(), db,
sortTypes);
mapName = "table." + getId();
Transaction t = mvTable.getTransactionBegin();
dataMap = t.openMap(mapName, keyType, valueType);
t.commit();
if (!table.isPersistData()) {
dataMap.map.setVolatile(true);
}
Value k = dataMap.map.lastKey(); // include uncommitted keys as well
lastKey.set(k == null ? 0 : k.getLong());
}
@Override
public String getCreateSQL() {
return null;
}
@Override
public String getPlanSQL() {
return table.getSQL() + ".tableScan";
}
public void setMainIndexColumn(int mainIndexColumn) {
this.mainIndexColumn = mainIndexColumn;
}
public int getMainIndexColumn() {
return mainIndexColumn;
}
@Override
public void close(Session session) {
// ok
}
@Override
public void add(Session session, Row row) {
if (mainIndexColumn == -1) {
if (row.getKey() == 0) {
row.setKey(lastKey.incrementAndGet());
}
} else {
long c = row.getValue(mainIndexColumn).getLong();
row.setKey(c);
}
if (mvTable.getContainsLargeObject()) {
for (int i = 0, len = row.getColumnCount(); i < len; i++) {
Value v = row.getValue(i);
Value v2 = v.copy(database, getId());
if (v2.isLinkedToTable()) {
session.removeAtCommitStop(v2);
}
if (v != v2) {
row.setValue(i, v2);
}
}
}
TransactionMap<Value, Value> map = getMap(session);
Value key = ValueLong.get(row.getKey());
Value old = map.getLatest(key);
if (old != null) {
String sql = "PRIMARY KEY ON " + table.getSQL();
if (mainIndexColumn >= 0 && mainIndexColumn < indexColumns.length) {
sql += "(" + indexColumns[mainIndexColumn].getSQL() + ")";
}
DbException e = DbException.get(ErrorCode.DUPLICATE_KEY_1, sql);
e.setSource(this);
throw e;
}
try {
map.put(key, ValueArray.get(row.getValueList()));
} catch (IllegalStateException e) {
throw mvTable.convertException(e);
}
// because it's possible to directly update the key using the _rowid_
// syntax
if (row.getKey() > lastKey.get()) {
lastKey.set(row.getKey());
}
}
@Override
public void remove(Session session, Row row) {
if (mvTable.getContainsLargeObject()) {
for (int i = 0, len = row.getColumnCount(); i < len; i++) {
Value v = row.getValue(i);
if (v.isLinkedToTable()) {
session.removeAtCommit(v);
}
}
}
TransactionMap<Value, Value> map = getMap(session);
try {
Value old = map.remove(ValueLong.get(row.getKey()));
if (old == null) {
throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1,
getSQL() + ": " + row.getKey());
}
} catch (IllegalStateException e) {
throw mvTable.convertException(e);
}
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
ValueLong min, max;
if (first == null) {
min = ValueLong.MIN;
} else if (mainIndexColumn < 0) {
min = ValueLong.get(first.getKey());
} else {
ValueLong v = (ValueLong) first.getValue(mainIndexColumn);
if (v == null) {
min = ValueLong.get(first.getKey());
} else {
min = v;
}
}
if (last == null) {
max = ValueLong.MAX;
} else if (mainIndexColumn < 0) {
max = ValueLong.get(last.getKey());
} else {
ValueLong v = (ValueLong) last.getValue(mainIndexColumn);
if (v == null) {
max = ValueLong.get(last.getKey());
} else {
max = v;
}
}
TransactionMap<Value, Value> map = getMap(session);
return new MVStoreCursor(session, map.entryIterator(min, max));
}
@Override
public MVTable getTable() {
return mvTable;
}
@Override
public Row getRow(Session session, long key) {
TransactionMap<Value, Value> map = getMap(session);
Value v = map.get(ValueLong.get(key));
if (v == null) {
throw DbException.get(ErrorCode.ROW_NOT_FOUND_IN_PRIMARY_INDEX,
getSQL() + ": " + key);
}
ValueArray array = (ValueArray) v;
Row row = session.createRow(array.getList(), 0);
row.setKey(key);
return row;
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
try {
return 10 * getCostRangeIndex(masks, dataMap.sizeAsLongMax(),
filters, filter, sortOrder, true, allColumnsSet);
} catch (IllegalStateException e) {
throw DbException.get(ErrorCode.OBJECT_CLOSED, e);
}
}
@Override
public int getColumnIndex(Column col) {
// can not use this index - use the delegate index instead
return -1;
}
@Override
public boolean isFirstColumn(Column column) {
return false;
}
@Override
public void remove(Session session) {
TransactionMap<Value, Value> map = getMap(session);
if (!map.isClosed()) {
Transaction t = session.getTransaction();
t.removeMap(map);
}
}
@Override
public void truncate(Session session) {
TransactionMap<Value, Value> map = getMap(session);
if (mvTable.getContainsLargeObject()) {
database.getLobStorage().removeAllForTable(table.getId());
}
map.clear();
}
@Override
public boolean canGetFirstOrLast() {
return true;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
TransactionMap<Value, Value> map = getMap(session);
ValueLong v = (ValueLong) (first ? map.firstKey() : map.lastKey());
if (v == null) {
return new MVStoreCursor(session,
Collections.<Entry<Value, Value>> emptyList().iterator());
}
Value value = map.get(v);
Entry<Value, Value> e = new DataUtils.MapEntry<Value, Value>(v, value);
List<Entry<Value, Value>> list = Collections.singletonList(e);
MVStoreCursor c = new MVStoreCursor(session, list.iterator());
c.next();
return c;
}
@Override
public boolean needRebuild() {
return false;
}
@Override
public long getRowCount(Session session) {
TransactionMap<Value, Value> map = getMap(session);
return map.sizeAsLong();
}
/**
* The maximum number of rows, including uncommitted rows of any session.
*
* @return the maximum number of rows
*/
public long getRowCountMax() {
try {
return dataMap.sizeAsLongMax();
} catch (IllegalStateException e) {
throw DbException.get(ErrorCode.OBJECT_CLOSED, e);
}
}
@Override
public long getRowCountApproximation() {
return getRowCountMax();
}
@Override
public long getDiskSpaceUsed() {
// TODO estimate disk space usage
return 0;
}
public String getMapName() {
return mapName;
}
@Override
public void checkRename() {
// ok
}
/**
* Get the key from the row.
*
* @param row the row
* @param ifEmpty the value to use if the row is empty
* @param ifNull the value to use if the column is NULL
* @return the key
*/
ValueLong getKey(SearchRow row, ValueLong ifEmpty, ValueLong ifNull) {
if (row == null) {
return ifEmpty;
}
Value v = row.getValue(mainIndexColumn);
if (v == null) {
throw DbException.throwInternalError(row.toString());
} else if (v == ValueNull.INSTANCE) {
return ifNull;
}
return (ValueLong) v.convertTo(Value.LONG);
}
/**
* Search for a specific row or a set of rows.
*
* @param session the session
* @param first the key of the first row
* @param last the key of the last row
* @return the cursor
*/
Cursor find(Session session, ValueLong first, ValueLong last) {
TransactionMap<Value, Value> map = getMap(session);
return new MVStoreCursor(session, map.entryIterator(first, last));
}
@Override
public boolean isRowIdIndex() {
return true;
}
/**
* Get the map to store the data.
*
* @param session the session
* @return the map
*/
TransactionMap<Value, Value> getMap(Session session) {
if (session == null) {
return dataMap;
}
Transaction t = session.getTransaction();
return dataMap.getInstance(t, Long.MAX_VALUE);
}
/**
* A cursor.
*/
class MVStoreCursor implements Cursor {
private final Session session;
private final Iterator<Entry<Value, Value>> it;
private Entry<Value, Value> current;
private Row row;
public MVStoreCursor(Session session, Iterator<Entry<Value, Value>> it) {
this.session = session;
this.it = it;
}
@Override
public Row get() {
if (row == null) {
if (current != null) {
ValueArray array = (ValueArray) current.getValue();
row = session.createRow(array.getList(), 0);
row.setKey(current.getKey().getLong());
}
}
return row;
}
@Override
public SearchRow getSearchRow() {
return get();
}
@Override
public boolean next() {
current = it.hasNext() ? it.next() : null;
row = null;
return current != null;
}
@Override
public boolean previous() {
throw DbException.getUnsupportedException("previous");
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/db/MVSecondaryIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.db;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.PriorityQueue;
import java.util.Queue;
import org.h2.api.ErrorCode;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.index.BaseIndex;
import org.h2.index.Cursor;
import org.h2.index.IndexType;
import org.h2.message.DbException;
import org.h2.mvstore.MVMap;
import org.h2.mvstore.db.TransactionStore.Transaction;
import org.h2.mvstore.db.TransactionStore.TransactionMap;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableFilter;
import org.h2.util.New;
import org.h2.value.CompareMode;
import org.h2.value.Value;
import org.h2.value.ValueArray;
import org.h2.value.ValueLong;
import org.h2.value.ValueNull;
/**
* A table stored in a MVStore.
*/
public final class MVSecondaryIndex extends BaseIndex implements MVIndex {
/**
* The multi-value table.
*/
final MVTable mvTable;
private final int keyColumns;
private final TransactionMap<Value,Value> dataMap;
public MVSecondaryIndex(Database db, MVTable table, int id, String indexName,
IndexColumn[] columns, IndexType indexType) {
this.mvTable = table;
initBaseIndex(table, id, indexName, columns, indexType);
if (!database.isStarting()) {
checkIndexColumnTypes(columns);
}
// always store the row key in the map key,
// even for unique indexes, as some of the index columns could be null
keyColumns = columns.length + 1;
String mapName = "index." + getId();
int[] sortTypes = new int[keyColumns];
for (int i = 0; i < columns.length; i++) {
sortTypes[i] = columns[i].sortType;
}
sortTypes[keyColumns - 1] = SortOrder.ASCENDING;
ValueDataType keyType = new ValueDataType(
db.getCompareMode(), db, sortTypes);
ValueDataType valueType = new ValueDataType(null, null, null);
Transaction t = mvTable.getTransactionBegin();
dataMap = t.openMap(mapName, keyType, valueType);
t.commit();
if (!keyType.equals(dataMap.getKeyType())) {
throw DbException.throwInternalError("Incompatible key type");
}
}
@Override
public void addRowsToBuffer(List<Row> rows, String bufferName) {
MVMap<ValueArray, Value> map = openMap(bufferName);
for (Row row : rows) {
ValueArray key = convertToKey(row);
map.put(key, ValueNull.INSTANCE);
}
}
private static final class Source {
private final Iterator<ValueArray> iterator;
ValueArray currentRowData;
public Source(Iterator<ValueArray> iterator) {
this.iterator = iterator;
this.currentRowData = iterator.next();
}
public boolean hasNext() {
boolean result = iterator.hasNext();
if(result) {
currentRowData = iterator.next();
}
return result;
}
public ValueArray next() {
return currentRowData;
}
public static final class Comparator implements java.util.Comparator<Source> {
private final CompareMode compareMode;
public Comparator(CompareMode compareMode) {
this.compareMode = compareMode;
}
@Override
public int compare(Source one, Source two) {
return one.currentRowData.compareTo(two.currentRowData, compareMode);
}
}
}
@Override
public void addBufferedRows(List<String> bufferNames) {
ArrayList<String> mapNames = new ArrayList<>(bufferNames);
CompareMode compareMode = database.getCompareMode();
int buffersCount = bufferNames.size();
Queue<Source> queue = new PriorityQueue<>(buffersCount, new Source.Comparator(compareMode));
for (String bufferName : bufferNames) {
Iterator<ValueArray> iter = openMap(bufferName).keyIterator(null);
if (iter.hasNext()) {
queue.add(new Source(iter));
}
}
try {
while (!queue.isEmpty()) {
Source s = queue.remove();
ValueArray rowData = s.next();
if (indexType.isUnique()) {
Value[] array = rowData.getList();
// don't change the original value
array = array.clone();
array[keyColumns - 1] = ValueLong.MIN;
ValueArray unique = ValueArray.get(array);
SearchRow row = convertToSearchRow(rowData);
if (!mayHaveNullDuplicates(row)) {
requireUnique(row, dataMap, unique);
}
}
dataMap.putCommitted(rowData, ValueNull.INSTANCE);
if (s.hasNext()) {
queue.offer(s);
}
}
} finally {
for (String tempMapName : mapNames) {
MVMap<ValueArray, Value> map = openMap(tempMapName);
map.getStore().removeMap(map);
}
}
}
private MVMap<ValueArray, Value> openMap(String mapName) {
int[] sortTypes = new int[keyColumns];
for (int i = 0; i < indexColumns.length; i++) {
sortTypes[i] = indexColumns[i].sortType;
}
sortTypes[keyColumns - 1] = SortOrder.ASCENDING;
ValueDataType keyType = new ValueDataType(
database.getCompareMode(), database, sortTypes);
ValueDataType valueType = new ValueDataType(null, null, null);
MVMap.Builder<ValueArray, Value> builder =
new MVMap.Builder<ValueArray, Value>().keyType(keyType).valueType(valueType);
MVMap<ValueArray, Value> map = database.getMvStore().
getStore().openMap(mapName, builder);
if (!keyType.equals(map.getKeyType())) {
throw DbException.throwInternalError("Incompatible key type");
}
return map;
}
@Override
public void close(Session session) {
// ok
}
@Override
public void add(Session session, Row row) {
TransactionMap<Value, Value> map = getMap(session);
ValueArray array = convertToKey(row);
ValueArray unique = null;
if (indexType.isUnique()) {
// this will detect committed entries only
unique = convertToKey(row);
unique.getList()[keyColumns - 1] = ValueLong.MIN;
if (mayHaveNullDuplicates(row)) {
// No further unique checks required
unique = null;
} else {
requireUnique(row, map, unique);
}
}
try {
map.put(array, ValueNull.INSTANCE);
} catch (IllegalStateException e) {
throw mvTable.convertException(e);
}
if (unique != null) {
// This code expects that mayHaveDuplicates(row) == false
Iterator<Value> it = map.keyIterator(unique, true);
while (it.hasNext()) {
ValueArray k = (ValueArray) it.next();
if (compareRows(row, convertToSearchRow(k)) != 0) {
break;
}
if (map.isSameTransaction(k)) {
continue;
}
if (map.get(k) != null) {
// committed
throw getDuplicateKeyException(k.toString());
}
throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, table.getName());
}
}
}
private void requireUnique(SearchRow row, TransactionMap<Value, Value> map, ValueArray unique) {
Value key = map.ceilingKey(unique);
if (key != null) {
ValueArray k = (ValueArray) key;
if (compareRows(row, convertToSearchRow(k)) == 0) {
// committed
throw getDuplicateKeyException(k.toString());
}
}
}
@Override
public void remove(Session session, Row row) {
ValueArray array = convertToKey(row);
TransactionMap<Value, Value> map = getMap(session);
try {
Value old = map.remove(array);
if (old == null) {
throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1,
getSQL() + ": " + row.getKey());
}
} catch (IllegalStateException e) {
throw mvTable.convertException(e);
}
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
return find(session, first, false, last);
}
private Cursor find(Session session, SearchRow first, boolean bigger, SearchRow last) {
ValueArray min = convertToKey(first);
if (min != null) {
min.getList()[keyColumns - 1] = ValueLong.MIN;
}
TransactionMap<Value, Value> map = getMap(session);
if (bigger && min != null) {
// search for the next: first skip 1, then 2, 4, 8, until
// we have a higher key; then skip 4, 2,...
// (binary search), until 1
int offset = 1;
while (true) {
ValueArray v = (ValueArray) map.relativeKey(min, offset);
if (v != null) {
boolean foundHigher = false;
for (int i = 0; i < keyColumns - 1; i++) {
int idx = columnIds[i];
Value b = first.getValue(idx);
if (b == null) {
break;
}
Value a = v.getList()[i];
if (database.compare(a, b) > 0) {
foundHigher = true;
break;
}
}
if (!foundHigher) {
offset += offset;
min = v;
continue;
}
}
if (offset > 1) {
offset /= 2;
continue;
}
if (map.get(v) == null) {
min = (ValueArray) map.higherKey(min);
if (min == null) {
break;
}
continue;
}
min = v;
break;
}
if (min == null) {
return new MVStoreCursor(session,
Collections.<Value>emptyList().iterator(), null);
}
}
return new MVStoreCursor(session, map.keyIterator(min), last);
}
private ValueArray convertToKey(SearchRow r) {
if (r == null) {
return null;
}
Value[] array = new Value[keyColumns];
for (int i = 0; i < columns.length; i++) {
Column c = columns[i];
int idx = c.getColumnId();
Value v = r.getValue(idx);
if (v != null) {
array[i] = v.convertTo(c.getType());
}
}
array[keyColumns - 1] = ValueLong.get(r.getKey());
return ValueArray.get(array);
}
/**
* Convert array of values to a SearchRow.
*
* @param key the index key
* @return the row
*/
SearchRow convertToSearchRow(ValueArray key) {
Value[] array = key.getList();
SearchRow searchRow = mvTable.getTemplateRow();
searchRow.setKey((array[array.length - 1]).getLong());
Column[] cols = getColumns();
for (int i = 0; i < array.length - 1; i++) {
Column c = cols[i];
int idx = c.getColumnId();
Value v = array[i];
searchRow.setValue(idx, v);
}
return searchRow;
}
@Override
public MVTable getTable() {
return mvTable;
}
@Override
public double getCost(Session session, int[] masks,
TableFilter[] filters, int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
try {
return 10 * getCostRangeIndex(masks, dataMap.sizeAsLongMax(),
filters, filter, sortOrder, false, allColumnsSet);
} catch (IllegalStateException e) {
throw DbException.get(ErrorCode.OBJECT_CLOSED, e);
}
}
@Override
public void remove(Session session) {
TransactionMap<Value, Value> map = getMap(session);
if (!map.isClosed()) {
Transaction t = session.getTransaction();
t.removeMap(map);
}
}
@Override
public void truncate(Session session) {
TransactionMap<Value, Value> map = getMap(session);
map.clear();
}
@Override
public boolean canGetFirstOrLast() {
return true;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
TransactionMap<Value, Value> map = getMap(session);
Value key = first ? map.firstKey() : map.lastKey();
while (true) {
if (key == null) {
return new MVStoreCursor(session,
Collections.<Value>emptyList().iterator(), null);
}
if (((ValueArray) key).getList()[0] != ValueNull.INSTANCE) {
break;
}
key = first ? map.higherKey(key) : map.lowerKey(key);
}
ArrayList<Value> list = New.arrayList();
list.add(key);
MVStoreCursor cursor = new MVStoreCursor(session, list.iterator(), null);
cursor.next();
return cursor;
}
@Override
public boolean needRebuild() {
try {
return dataMap.sizeAsLongMax() == 0;
} catch (IllegalStateException e) {
throw DbException.get(ErrorCode.OBJECT_CLOSED, e);
}
}
@Override
public long getRowCount(Session session) {
TransactionMap<Value, Value> map = getMap(session);
return map.sizeAsLong();
}
@Override
public long getRowCountApproximation() {
try {
return dataMap.sizeAsLongMax();
} catch (IllegalStateException e) {
throw DbException.get(ErrorCode.OBJECT_CLOSED, e);
}
}
@Override
public long getDiskSpaceUsed() {
// TODO estimate disk space usage
return 0;
}
@Override
public boolean canFindNext() {
return true;
}
@Override
public Cursor findNext(Session session, SearchRow higherThan, SearchRow last) {
return find(session, higherThan, true, last);
}
@Override
public void checkRename() {
// ok
}
/**
* Get the map to store the data.
*
* @param session the session
* @return the map
*/
private TransactionMap<Value, Value> getMap(Session session) {
if (session == null) {
return dataMap;
}
Transaction t = session.getTransaction();
return dataMap.getInstance(t, Long.MAX_VALUE);
}
/**
* A cursor.
*/
final class MVStoreCursor implements Cursor {
private final Session session;
private final Iterator<Value> it;
private final SearchRow last;
private Value current;
private SearchRow searchRow;
private Row row;
MVStoreCursor(Session session, Iterator<Value> it, SearchRow last) {
this.session = session;
this.it = it;
this.last = last;
}
@Override
public Row get() {
if (row == null) {
SearchRow r = getSearchRow();
if (r != null) {
row = mvTable.getRow(session, r.getKey());
}
}
return row;
}
@Override
public SearchRow getSearchRow() {
if (searchRow == null) {
if (current != null) {
searchRow = convertToSearchRow((ValueArray) current);
}
}
return searchRow;
}
@Override
public boolean next() {
current = it.hasNext() ? it.next() : null;
searchRow = null;
if (current != null) {
if (last != null && compareRows(getSearchRow(), last) > 0) {
searchRow = null;
current = null;
}
}
row = null;
return current != null;
}
@Override
public boolean previous() {
throw DbException.getUnsupportedException("previous");
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/db/MVSpatialIndex.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.db;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import org.h2.api.ErrorCode;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.index.BaseIndex;
import org.h2.index.Cursor;
import org.h2.index.IndexType;
import org.h2.index.SpatialIndex;
import org.h2.index.SpatialTreeIndex;
import org.h2.message.DbException;
import org.h2.mvstore.db.TransactionStore.Transaction;
import org.h2.mvstore.db.TransactionStore.TransactionMap;
import org.h2.mvstore.db.TransactionStore.VersionedValue;
import org.h2.mvstore.db.TransactionStore.VersionedValueType;
import org.h2.mvstore.rtree.MVRTreeMap;
import org.h2.mvstore.rtree.MVRTreeMap.RTreeCursor;
import org.h2.mvstore.rtree.SpatialKey;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.TableFilter;
import org.h2.value.Value;
import org.h2.value.ValueGeometry;
import org.h2.value.ValueLong;
import org.h2.value.ValueNull;
import org.locationtech.jts.geom.Envelope;
import org.locationtech.jts.geom.Geometry;
/**
* This is an index based on a MVRTreeMap.
*
* @author Thomas Mueller
* @author Noel Grandin
* @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888
*/
public class MVSpatialIndex extends BaseIndex implements SpatialIndex, MVIndex {
/**
* The multi-value table.
*/
final MVTable mvTable;
private final String mapName;
private final TransactionMap<SpatialKey, Value> dataMap;
private final MVRTreeMap<VersionedValue> spatialMap;
/**
* Constructor.
*
* @param db the database
* @param table the table instance
* @param id the index id
* @param indexName the index name
* @param columns the indexed columns (only one geometry column allowed)
* @param indexType the index type (only spatial index)
*/
public MVSpatialIndex(
Database db, MVTable table, int id, String indexName,
IndexColumn[] columns, IndexType indexType) {
if (columns.length != 1) {
throw DbException.getUnsupportedException(
"Can only index one column");
}
IndexColumn col = columns[0];
if ((col.sortType & SortOrder.DESCENDING) != 0) {
throw DbException.getUnsupportedException(
"Cannot index in descending order");
}
if ((col.sortType & SortOrder.NULLS_FIRST) != 0) {
throw DbException.getUnsupportedException(
"Nulls first is not supported");
}
if ((col.sortType & SortOrder.NULLS_LAST) != 0) {
throw DbException.getUnsupportedException(
"Nulls last is not supported");
}
if (col.column.getType() != Value.GEOMETRY) {
throw DbException.getUnsupportedException(
"Spatial index on non-geometry column, "
+ col.column.getCreateSQL());
}
this.mvTable = table;
initBaseIndex(table, id, indexName, columns, indexType);
if (!database.isStarting()) {
checkIndexColumnTypes(columns);
}
mapName = "index." + getId();
ValueDataType vt = new ValueDataType(null, null, null);
VersionedValueType valueType = new VersionedValueType(vt);
MVRTreeMap.Builder<VersionedValue> mapBuilder =
new MVRTreeMap.Builder<VersionedValue>().
valueType(valueType);
spatialMap = db.getMvStore().getStore().openMap(mapName, mapBuilder);
Transaction t = mvTable.getTransactionBegin();
dataMap = t.openMap(spatialMap);
t.commit();
}
@Override
public void addRowsToBuffer(List<Row> rows, String bufferName) {
throw DbException.throwInternalError();
}
@Override
public void addBufferedRows(List<String> bufferNames) {
throw DbException.throwInternalError();
}
@Override
public void close(Session session) {
// ok
}
@Override
public void add(Session session, Row row) {
TransactionMap<SpatialKey, Value> map = getMap(session);
SpatialKey key = getKey(row);
if (key.isNull()) {
return;
}
if (indexType.isUnique()) {
// this will detect committed entries only
RTreeCursor cursor = spatialMap.findContainedKeys(key);
Iterator<SpatialKey> it = map.wrapIterator(cursor, false);
while (it.hasNext()) {
SpatialKey k = it.next();
if (k.equalsIgnoringId(key)) {
throw getDuplicateKeyException(key.toString());
}
}
}
try {
map.put(key, ValueLong.get(0));
} catch (IllegalStateException e) {
throw mvTable.convertException(e);
}
if (indexType.isUnique()) {
// check if there is another (uncommitted) entry
RTreeCursor cursor = spatialMap.findContainedKeys(key);
Iterator<SpatialKey> it = map.wrapIterator(cursor, true);
while (it.hasNext()) {
SpatialKey k = it.next();
if (k.equalsIgnoringId(key)) {
if (map.isSameTransaction(k)) {
continue;
}
map.remove(key);
if (map.get(k) != null) {
// committed
throw getDuplicateKeyException(k.toString());
}
throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1, table.getName());
}
}
}
}
@Override
public void remove(Session session, Row row) {
SpatialKey key = getKey(row);
if (key.isNull()) {
return;
}
TransactionMap<SpatialKey, Value> map = getMap(session);
try {
Value old = map.remove(key);
if (old == null) {
old = map.remove(key);
throw DbException.get(ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1,
getSQL() + ": " + row.getKey());
}
} catch (IllegalStateException e) {
throw mvTable.convertException(e);
}
}
@Override
public Cursor find(TableFilter filter, SearchRow first, SearchRow last) {
return find(filter.getSession());
}
@Override
public Cursor find(Session session, SearchRow first, SearchRow last) {
return find(session);
}
private Cursor find(Session session) {
Iterator<SpatialKey> cursor = spatialMap.keyIterator(null);
TransactionMap<SpatialKey, Value> map = getMap(session);
Iterator<SpatialKey> it = map.wrapIterator(cursor, false);
return new MVStoreCursor(session, it);
}
@Override
public Cursor findByGeometry(TableFilter filter, SearchRow first,
SearchRow last, SearchRow intersection) {
Session session = filter.getSession();
if (intersection == null) {
return find(session, first, last);
}
Iterator<SpatialKey> cursor =
spatialMap.findIntersectingKeys(getKey(intersection));
TransactionMap<SpatialKey, Value> map = getMap(session);
Iterator<SpatialKey> it = map.wrapIterator(cursor, false);
return new MVStoreCursor(session, it);
}
private SpatialKey getKey(SearchRow row) {
Value v = row.getValue(columnIds[0]);
if (v == ValueNull.INSTANCE) {
return new SpatialKey(row.getKey());
}
Geometry g = ((ValueGeometry) v.convertTo(Value.GEOMETRY)).getGeometryNoCopy();
Envelope env = g.getEnvelopeInternal();
return new SpatialKey(row.getKey(),
(float) env.getMinX(), (float) env.getMaxX(),
(float) env.getMinY(), (float) env.getMaxY());
}
/**
* Get the row with the given index key.
*
* @param key the index key
* @return the row
*/
SearchRow getRow(SpatialKey key) {
SearchRow searchRow = mvTable.getTemplateRow();
searchRow.setKey(key.getId());
return searchRow;
}
@Override
public MVTable getTable() {
return mvTable;
}
@Override
public double getCost(Session session, int[] masks, TableFilter[] filters,
int filter, SortOrder sortOrder,
HashSet<Column> allColumnsSet) {
return SpatialTreeIndex.getCostRangeIndex(masks, columns);
}
@Override
public void remove(Session session) {
TransactionMap<SpatialKey, Value> map = getMap(session);
if (!map.isClosed()) {
Transaction t = session.getTransaction();
t.removeMap(map);
}
}
@Override
public void truncate(Session session) {
TransactionMap<SpatialKey, Value> map = getMap(session);
map.clear();
}
@Override
public boolean canGetFirstOrLast() {
return true;
}
@Override
public Cursor findFirstOrLast(Session session, boolean first) {
if (!first) {
throw DbException.throwInternalError(
"Spatial Index can only be fetch in ascending order");
}
return find(session);
}
@Override
public boolean needRebuild() {
try {
return dataMap.sizeAsLongMax() == 0;
} catch (IllegalStateException e) {
throw DbException.get(ErrorCode.OBJECT_CLOSED, e);
}
}
@Override
public long getRowCount(Session session) {
TransactionMap<SpatialKey, Value> map = getMap(session);
return map.sizeAsLong();
}
@Override
public long getRowCountApproximation() {
try {
return dataMap.sizeAsLongMax();
} catch (IllegalStateException e) {
throw DbException.get(ErrorCode.OBJECT_CLOSED, e);
}
}
@Override
public long getDiskSpaceUsed() {
// TODO estimate disk space usage
return 0;
}
@Override
public void checkRename() {
// ok
}
/**
* Get the map to store the data.
*
* @param session the session
* @return the map
*/
TransactionMap<SpatialKey, Value> getMap(Session session) {
if (session == null) {
return dataMap;
}
Transaction t = session.getTransaction();
return dataMap.getInstance(t, Long.MAX_VALUE);
}
/**
* A cursor.
*/
class MVStoreCursor implements Cursor {
private final Session session;
private final Iterator<SpatialKey> it;
private SpatialKey current;
private SearchRow searchRow;
private Row row;
public MVStoreCursor(Session session, Iterator<SpatialKey> it) {
this.session = session;
this.it = it;
}
@Override
public Row get() {
if (row == null) {
SearchRow r = getSearchRow();
if (r != null) {
row = mvTable.getRow(session, r.getKey());
}
}
return row;
}
@Override
public SearchRow getSearchRow() {
if (searchRow == null) {
if (current != null) {
searchRow = getRow(current);
}
}
return searchRow;
}
@Override
public boolean next() {
current = it.next();
searchRow = null;
row = null;
return current != null;
}
@Override
public boolean previous() {
throw DbException.getUnsupportedException("previous");
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/db/MVTable.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.db;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import org.h2.api.DatabaseEventListener;
import org.h2.api.ErrorCode;
import org.h2.command.ddl.CreateTableData;
import org.h2.constraint.Constraint;
import org.h2.constraint.ConstraintReferential;
import org.h2.engine.Constants;
import org.h2.engine.DbObject;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.index.Cursor;
import org.h2.index.Index;
import org.h2.index.IndexType;
import org.h2.index.MultiVersionIndex;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.db.MVTableEngine.Store;
import org.h2.mvstore.db.TransactionStore.Transaction;
import org.h2.result.Row;
import org.h2.result.SortOrder;
import org.h2.schema.SchemaObject;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.Table;
import org.h2.table.TableBase;
import org.h2.table.TableType;
import org.h2.util.DebuggingThreadLocal;
import org.h2.util.MathUtils;
import org.h2.util.New;
import org.h2.value.DataType;
import org.h2.value.Value;
/**
* A table stored in a MVStore.
*/
public class MVTable extends TableBase {
/**
* The table name this thread is waiting to lock.
*/
public static final DebuggingThreadLocal<String> WAITING_FOR_LOCK;
/**
* The table names this thread has exclusively locked.
*/
public static final DebuggingThreadLocal<ArrayList<String>> EXCLUSIVE_LOCKS;
/**
* The tables names this thread has a shared lock on.
*/
public static final DebuggingThreadLocal<ArrayList<String>> SHARED_LOCKS;
/**
* The type of trace lock events
*/
private enum TraceLockEvent{
TRACE_LOCK_OK("ok"),
TRACE_LOCK_WAITING_FOR("waiting for"),
TRACE_LOCK_REQUESTING_FOR("requesting for"),
TRACE_LOCK_TIMEOUT_AFTER("timeout after "),
TRACE_LOCK_UNLOCK("unlock"),
TRACE_LOCK_ADDED_FOR("added for"),
TRACE_LOCK_ADD_UPGRADED_FOR("add (upgraded) for ");
private final String eventText;
TraceLockEvent(String eventText) {
this.eventText = eventText;
}
public String getEventText() {
return eventText;
}
}
private static final String NO_EXTRA_INFO = "";
static {
if (SysProperties.THREAD_DEADLOCK_DETECTOR) {
WAITING_FOR_LOCK = new DebuggingThreadLocal<>();
EXCLUSIVE_LOCKS = new DebuggingThreadLocal<>();
SHARED_LOCKS = new DebuggingThreadLocal<>();
} else {
WAITING_FOR_LOCK = null;
EXCLUSIVE_LOCKS = null;
SHARED_LOCKS = null;
}
}
private MVPrimaryIndex primaryIndex;
private final ArrayList<Index> indexes = New.arrayList();
private volatile long lastModificationId;
private volatile Session lockExclusiveSession;
// using a ConcurrentHashMap as a set
private final ConcurrentHashMap<Session, Session> lockSharedSessions =
new ConcurrentHashMap<>();
/**
* The queue of sessions waiting to lock the table. It is a FIFO queue to
* prevent starvation, since Java's synchronized locking is biased.
*/
private final ArrayDeque<Session> waitingSessions = new ArrayDeque<>();
private final Trace traceLock;
private int changesSinceAnalyze;
private int nextAnalyze;
private final boolean containsLargeObject;
private Column rowIdColumn;
private final MVTableEngine.Store store;
private final TransactionStore transactionStore;
public MVTable(CreateTableData data, MVTableEngine.Store store) {
super(data);
nextAnalyze = database.getSettings().analyzeAuto;
this.store = store;
this.transactionStore = store.getTransactionStore();
this.isHidden = data.isHidden;
boolean b = false;
for (Column col : getColumns()) {
if (DataType.isLargeObject(col.getType())) {
b = true;
break;
}
}
containsLargeObject = b;
traceLock = database.getTrace(Trace.LOCK);
}
/**
* Initialize the table.
*
* @param session the session
*/
void init(Session session) {
primaryIndex = new MVPrimaryIndex(session.getDatabase(), this, getId(),
IndexColumn.wrap(getColumns()), IndexType.createScan(true));
indexes.add(primaryIndex);
}
public String getMapName() {
return primaryIndex.getMapName();
}
@Override
public boolean lock(Session session, boolean exclusive,
boolean forceLockEvenInMvcc) {
int lockMode = database.getLockMode();
if (lockMode == Constants.LOCK_MODE_OFF) {
return false;
}
if (!forceLockEvenInMvcc && database.isMultiVersion()) {
// MVCC: update, delete, and insert use a shared lock.
// Select doesn't lock except when using FOR UPDATE and
// the system property h2.selectForUpdateMvcc
// is not enabled
if (exclusive) {
exclusive = false;
} else {
if (lockExclusiveSession == null) {
return false;
}
}
}
if (lockExclusiveSession == session) {
return true;
}
if (!exclusive && lockSharedSessions.containsKey(session)) {
return true;
}
synchronized (getLockSyncObject()) {
if (!exclusive && lockSharedSessions.containsKey(session)) {
return true;
}
session.setWaitForLock(this, Thread.currentThread());
if (SysProperties.THREAD_DEADLOCK_DETECTOR) {
WAITING_FOR_LOCK.set(getName());
}
waitingSessions.addLast(session);
try {
doLock1(session, lockMode, exclusive);
} finally {
session.setWaitForLock(null, null);
if (SysProperties.THREAD_DEADLOCK_DETECTOR) {
WAITING_FOR_LOCK.remove();
}
waitingSessions.remove(session);
}
}
return false;
}
/**
* The the object on which to synchronize and wait on. For the
* multi-threaded mode, this is this object, but for non-multi-threaded, it
* is the database, as in this case all operations are synchronized on the
* database object.
*
* @return the lock sync object
*/
private Object getLockSyncObject() {
if (database.isMultiThreaded()) {
return this;
}
return database;
}
private void doLock1(Session session, int lockMode, boolean exclusive) {
traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_REQUESTING_FOR, NO_EXTRA_INFO);
// don't get the current time unless necessary
long max = 0;
boolean checkDeadlock = false;
while (true) {
// if I'm the next one in the queue
if (waitingSessions.getFirst() == session) {
if (doLock2(session, lockMode, exclusive)) {
return;
}
}
if (checkDeadlock) {
ArrayList<Session> sessions = checkDeadlock(session, null, null);
if (sessions != null) {
throw DbException.get(ErrorCode.DEADLOCK_1,
getDeadlockDetails(sessions, exclusive));
}
} else {
// check for deadlocks from now on
checkDeadlock = true;
}
long now = System.nanoTime();
if (max == 0) {
// try at least one more time
max = now + TimeUnit.MILLISECONDS.toNanos(session.getLockTimeout());
} else if (now >= max) {
traceLock(session, exclusive,
TraceLockEvent.TRACE_LOCK_TIMEOUT_AFTER, NO_EXTRA_INFO+session.getLockTimeout());
throw DbException.get(ErrorCode.LOCK_TIMEOUT_1, getName());
}
try {
traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_WAITING_FOR, NO_EXTRA_INFO);
if (database.getLockMode() == Constants.LOCK_MODE_TABLE_GC) {
for (int i = 0; i < 20; i++) {
long free = Runtime.getRuntime().freeMemory();
System.gc();
long free2 = Runtime.getRuntime().freeMemory();
if (free == free2) {
break;
}
}
}
// don't wait too long so that deadlocks are detected early
long sleep = Math.min(Constants.DEADLOCK_CHECK,
TimeUnit.NANOSECONDS.toMillis(max - now));
if (sleep == 0) {
sleep = 1;
}
getLockSyncObject().wait(sleep);
} catch (InterruptedException e) {
// ignore
}
}
}
private boolean doLock2(Session session, int lockMode, boolean exclusive) {
if (exclusive) {
if (lockExclusiveSession == null) {
if (lockSharedSessions.isEmpty()) {
traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_ADDED_FOR, NO_EXTRA_INFO);
session.addLock(this);
lockExclusiveSession = session;
if (SysProperties.THREAD_DEADLOCK_DETECTOR) {
if (EXCLUSIVE_LOCKS.get() == null) {
EXCLUSIVE_LOCKS.set(new ArrayList<String>());
}
EXCLUSIVE_LOCKS.get().add(getName());
}
return true;
} else if (lockSharedSessions.size() == 1 &&
lockSharedSessions.containsKey(session)) {
traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_ADD_UPGRADED_FOR, NO_EXTRA_INFO);
lockExclusiveSession = session;
if (SysProperties.THREAD_DEADLOCK_DETECTOR) {
if (EXCLUSIVE_LOCKS.get() == null) {
EXCLUSIVE_LOCKS.set(new ArrayList<String>());
}
EXCLUSIVE_LOCKS.get().add(getName());
}
return true;
}
}
} else {
if (lockExclusiveSession == null) {
if (lockMode == Constants.LOCK_MODE_READ_COMMITTED) {
if (!database.isMultiThreaded() &&
!database.isMultiVersion()) {
// READ_COMMITTED: a read lock is acquired,
// but released immediately after the operation
// is complete.
// When allowing only one thread, no lock is
// required.
// Row level locks work like read committed.
return true;
}
}
if (!lockSharedSessions.containsKey(session)) {
traceLock(session, exclusive, TraceLockEvent.TRACE_LOCK_OK, NO_EXTRA_INFO);
session.addLock(this);
lockSharedSessions.put(session, session);
if (SysProperties.THREAD_DEADLOCK_DETECTOR) {
if (SHARED_LOCKS.get() == null) {
SHARED_LOCKS.set(new ArrayList<String>());
}
SHARED_LOCKS.get().add(getName());
}
}
return true;
}
}
return false;
}
private static String getDeadlockDetails(ArrayList<Session> sessions, boolean exclusive) {
// We add the thread details here to make it easier for customers to
// match up these error messages with their own logs.
StringBuilder buff = new StringBuilder();
for (Session s : sessions) {
Table lock = s.getWaitForLock();
Thread thread = s.getWaitForLockThread();
buff.append("\nSession ").append(s.toString())
.append(" on thread ").append(thread.getName())
.append(" is waiting to lock ").append(lock.toString())
.append(exclusive ? " (exclusive)" : " (shared)")
.append(" while locking ");
int i = 0;
for (Table t : s.getLocks()) {
if (i++ > 0) {
buff.append(", ");
}
buff.append(t.toString());
if (t instanceof MVTable) {
if (((MVTable) t).lockExclusiveSession == s) {
buff.append(" (exclusive)");
} else {
buff.append(" (shared)");
}
}
}
buff.append('.');
}
return buff.toString();
}
@Override
public ArrayList<Session> checkDeadlock(Session session, Session clash,
Set<Session> visited) {
// only one deadlock check at any given time
synchronized (MVTable.class) {
if (clash == null) {
// verification is started
clash = session;
visited = new HashSet<>();
} else if (clash == session) {
// we found a circle where this session is involved
return New.arrayList();
} else if (visited.contains(session)) {
// we have already checked this session.
// there is a circle, but the sessions in the circle need to
// find it out themselves
return null;
}
visited.add(session);
ArrayList<Session> error = null;
for (Session s : lockSharedSessions.keySet()) {
if (s == session) {
// it doesn't matter if we have locked the object already
continue;
}
Table t = s.getWaitForLock();
if (t != null) {
error = t.checkDeadlock(s, clash, visited);
if (error != null) {
error.add(session);
break;
}
}
}
// take a local copy so we don't see inconsistent data, since we are
// not locked while checking the lockExclusiveSession value
Session copyOfLockExclusiveSession = lockExclusiveSession;
if (error == null && copyOfLockExclusiveSession != null) {
Table t = copyOfLockExclusiveSession.getWaitForLock();
if (t != null) {
error = t.checkDeadlock(copyOfLockExclusiveSession, clash,
visited);
if (error != null) {
error.add(session);
}
}
}
return error;
}
}
private void traceLock(Session session, boolean exclusive, TraceLockEvent eventEnum, String extraInfo) {
if (traceLock.isDebugEnabled()) {
traceLock.debug("{0} {1} {2} {3} {4}", session.getId(),
exclusive ? "exclusive write lock" : "shared read lock", eventEnum.getEventText(),
getName(), extraInfo);
}
}
@Override
public boolean isLockedExclusively() {
return lockExclusiveSession != null;
}
@Override
public boolean isLockedExclusivelyBy(Session session) {
return lockExclusiveSession == session;
}
@Override
public void unlock(Session s) {
if (database != null) {
traceLock(s, lockExclusiveSession == s, TraceLockEvent.TRACE_LOCK_UNLOCK, NO_EXTRA_INFO);
if (lockExclusiveSession == s) {
lockSharedSessions.remove(s);
lockExclusiveSession = null;
if (SysProperties.THREAD_DEADLOCK_DETECTOR) {
if (EXCLUSIVE_LOCKS.get() != null) {
EXCLUSIVE_LOCKS.get().remove(getName());
}
}
}
synchronized (getLockSyncObject()) {
if (lockSharedSessions.size() > 0) {
lockSharedSessions.remove(s);
if (SysProperties.THREAD_DEADLOCK_DETECTOR) {
if (SHARED_LOCKS.get() != null) {
SHARED_LOCKS.get().remove(getName());
}
}
}
if (!waitingSessions.isEmpty()) {
getLockSyncObject().notifyAll();
}
}
}
}
@Override
public boolean canTruncate() {
if (getCheckForeignKeyConstraints() &&
database.getReferentialIntegrity()) {
ArrayList<Constraint> constraints = getConstraints();
if (constraints != null) {
for (Constraint c : constraints) {
if (c.getConstraintType() != Constraint.Type.REFERENTIAL) {
continue;
}
ConstraintReferential ref = (ConstraintReferential) c;
if (ref.getRefTable() == this) {
return false;
}
}
}
}
return true;
}
@Override
public void close(Session session) {
// ignore
}
@Override
public Row getRow(Session session, long key) {
return primaryIndex.getRow(session, key);
}
@Override
public Index addIndex(Session session, String indexName, int indexId,
IndexColumn[] cols, IndexType indexType, boolean create,
String indexComment) {
if (indexType.isPrimaryKey()) {
for (IndexColumn c : cols) {
Column column = c.column;
if (column.isNullable()) {
throw DbException.get(
ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1,
column.getName());
}
column.setPrimaryKey(true);
}
}
boolean isSessionTemporary = isTemporary() && !isGlobalTemporary();
if (!isSessionTemporary) {
database.lockMeta(session);
}
MVIndex index;
int mainIndexColumn;
mainIndexColumn = getMainIndexColumn(indexType, cols);
if (database.isStarting()) {
if (transactionStore.store.hasMap("index." + indexId)) {
mainIndexColumn = -1;
}
} else if (primaryIndex.getRowCountMax() != 0) {
mainIndexColumn = -1;
}
if (mainIndexColumn != -1) {
primaryIndex.setMainIndexColumn(mainIndexColumn);
index = new MVDelegateIndex(this, indexId, indexName, primaryIndex,
indexType);
} else if (indexType.isSpatial()) {
index = new MVSpatialIndex(session.getDatabase(), this, indexId,
indexName, cols, indexType);
} else {
index = new MVSecondaryIndex(session.getDatabase(), this, indexId,
indexName, cols, indexType);
}
if (index.needRebuild()) {
rebuildIndex(session, index, indexName);
}
index.setTemporary(isTemporary());
if (index.getCreateSQL() != null) {
index.setComment(indexComment);
if (isSessionTemporary) {
session.addLocalTempTableIndex(index);
} else {
database.addSchemaObject(session, index);
}
}
indexes.add(index);
setModified();
return index;
}
private void rebuildIndex(Session session, MVIndex index, String indexName) {
try {
if (session.getDatabase().getMvStore() == null ||
index instanceof MVSpatialIndex) {
// in-memory
rebuildIndexBuffered(session, index);
} else {
rebuildIndexBlockMerge(session, index);
}
} catch (DbException e) {
getSchema().freeUniqueName(indexName);
try {
index.remove(session);
} catch (DbException e2) {
// this could happen, for example on failure in the storage
// but if that is not the case it means
// there is something wrong with the database
trace.error(e2, "could not remove index");
throw e2;
}
throw e;
}
}
private void rebuildIndexBlockMerge(Session session, MVIndex index) {
if (index instanceof MVSpatialIndex) {
// the spatial index doesn't support multi-way merge sort
rebuildIndexBuffered(session, index);
}
// Read entries in memory, sort them, write to a new map (in sorted
// order); repeat (using a new map for every block of 1 MB) until all
// record are read. Merge all maps to the target (using merge sort;
// duplicates are detected in the target). For randomly ordered data,
// this should use relatively few write operations.
// A possible optimization is: change the buffer size from "row count"
// to "amount of memory", and buffer index keys instead of rows.
Index scan = getScanIndex(session);
long remaining = scan.getRowCount(session);
long total = remaining;
Cursor cursor = scan.find(session, null, null);
long i = 0;
Store store = session.getDatabase().getMvStore();
int bufferSize = database.getMaxMemoryRows() / 2;
ArrayList<Row> buffer = new ArrayList<>(bufferSize);
String n = getName() + ":" + index.getName();
int t = MathUtils.convertLongToInt(total);
ArrayList<String> bufferNames = New.arrayList();
while (cursor.next()) {
Row row = cursor.get();
buffer.add(row);
database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n,
MathUtils.convertLongToInt(i++), t);
if (buffer.size() >= bufferSize) {
sortRows(buffer, index);
String mapName = store.nextTemporaryMapName();
index.addRowsToBuffer(buffer, mapName);
bufferNames.add(mapName);
buffer.clear();
}
remaining--;
}
sortRows(buffer, index);
if (!bufferNames.isEmpty()) {
String mapName = store.nextTemporaryMapName();
index.addRowsToBuffer(buffer, mapName);
bufferNames.add(mapName);
buffer.clear();
index.addBufferedRows(bufferNames);
} else {
addRowsToIndex(session, buffer, index);
}
if (SysProperties.CHECK && remaining != 0) {
DbException.throwInternalError("rowcount remaining=" + remaining +
" " + getName());
}
}
private void rebuildIndexBuffered(Session session, Index index) {
Index scan = getScanIndex(session);
long remaining = scan.getRowCount(session);
long total = remaining;
Cursor cursor = scan.find(session, null, null);
long i = 0;
int bufferSize = (int) Math.min(total, database.getMaxMemoryRows());
ArrayList<Row> buffer = new ArrayList<>(bufferSize);
String n = getName() + ":" + index.getName();
int t = MathUtils.convertLongToInt(total);
while (cursor.next()) {
Row row = cursor.get();
buffer.add(row);
database.setProgress(DatabaseEventListener.STATE_CREATE_INDEX, n,
MathUtils.convertLongToInt(i++), t);
if (buffer.size() >= bufferSize) {
addRowsToIndex(session, buffer, index);
}
remaining--;
}
addRowsToIndex(session, buffer, index);
if (SysProperties.CHECK && remaining != 0) {
DbException.throwInternalError("rowcount remaining=" + remaining +
" " + getName());
}
}
private int getMainIndexColumn(IndexType indexType, IndexColumn[] cols) {
if (primaryIndex.getMainIndexColumn() != -1) {
return -1;
}
if (!indexType.isPrimaryKey() || cols.length != 1) {
return -1;
}
IndexColumn first = cols[0];
if (first.sortType != SortOrder.ASCENDING) {
return -1;
}
switch (first.column.getType()) {
case Value.BYTE:
case Value.SHORT:
case Value.INT:
case Value.LONG:
break;
default:
return -1;
}
return first.column.getColumnId();
}
private static void addRowsToIndex(Session session, ArrayList<Row> list,
Index index) {
sortRows(list, index);
for (Row row : list) {
index.add(session, row);
}
list.clear();
}
private static void sortRows(ArrayList<Row> list, final Index index) {
Collections.sort(list, new Comparator<Row>() {
@Override
public int compare(Row r1, Row r2) {
return index.compareRows(r1, r2);
}
});
}
@Override
public void removeRow(Session session, Row row) {
lastModificationId = database.getNextModificationDataId();
Transaction t = session.getTransaction();
long savepoint = t.setSavepoint();
try {
for (int i = indexes.size() - 1; i >= 0; i--) {
Index index = indexes.get(i);
index.remove(session, row);
}
} catch (Throwable e) {
t.rollbackToSavepoint(savepoint);
throw DbException.convert(e);
}
analyzeIfRequired(session);
}
@Override
public void truncate(Session session) {
lastModificationId = database.getNextModificationDataId();
for (int i = indexes.size() - 1; i >= 0; i--) {
Index index = indexes.get(i);
index.truncate(session);
}
changesSinceAnalyze = 0;
}
@Override
public void addRow(Session session, Row row) {
lastModificationId = database.getNextModificationDataId();
Transaction t = session.getTransaction();
long savepoint = t.setSavepoint();
try {
for (Index index : indexes) {
index.add(session, row);
}
} catch (Throwable e) {
t.rollbackToSavepoint(savepoint);
DbException de = DbException.convert(e);
if (de.getErrorCode() == ErrorCode.DUPLICATE_KEY_1) {
for (Index index : indexes) {
if (index.getIndexType().isUnique() &&
index instanceof MultiVersionIndex) {
MultiVersionIndex mv = (MultiVersionIndex) index;
if (mv.isUncommittedFromOtherSession(session, row)) {
throw DbException.get(
ErrorCode.CONCURRENT_UPDATE_1,
index.getName());
}
}
}
}
throw de;
}
analyzeIfRequired(session);
}
private void analyzeIfRequired(Session session) {
synchronized (this) {
if (nextAnalyze == 0 || nextAnalyze > changesSinceAnalyze++) {
return;
}
changesSinceAnalyze = 0;
int n = 2 * nextAnalyze;
if (n > 0) {
nextAnalyze = n;
}
}
session.markTableForAnalyze(this);
}
@Override
public void checkSupportAlter() {
// ok
}
@Override
public TableType getTableType() {
return TableType.TABLE;
}
@Override
public Index getScanIndex(Session session) {
return primaryIndex;
}
@Override
public Index getUniqueIndex() {
return primaryIndex;
}
@Override
public ArrayList<Index> getIndexes() {
return indexes;
}
@Override
public long getMaxDataModificationId() {
return lastModificationId;
}
public boolean getContainsLargeObject() {
return containsLargeObject;
}
@Override
public boolean isDeterministic() {
return true;
}
@Override
public boolean canGetRowCount() {
return true;
}
@Override
public boolean canDrop() {
return true;
}
@Override
public void removeChildrenAndResources(Session session) {
if (containsLargeObject) {
// unfortunately, the data is gone on rollback
truncate(session);
database.getLobStorage().removeAllForTable(getId());
database.lockMeta(session);
}
database.getMvStore().removeTable(this);
super.removeChildrenAndResources(session);
// go backwards because database.removeIndex will
// call table.removeIndex
while (indexes.size() > 1) {
Index index = indexes.get(1);
if (index.getName() != null) {
database.removeSchemaObject(session, index);
}
// needed for session temporary indexes
indexes.remove(index);
}
if (SysProperties.CHECK) {
for (SchemaObject obj : database
.getAllSchemaObjects(DbObject.INDEX)) {
Index index = (Index) obj;
if (index.getTable() == this) {
DbException.throwInternalError("index not dropped: " +
index.getName());
}
}
}
primaryIndex.remove(session);
database.removeMeta(session, getId());
close(session);
invalidate();
}
@Override
public long getRowCount(Session session) {
return primaryIndex.getRowCount(session);
}
@Override
public long getRowCountApproximation() {
return primaryIndex.getRowCountApproximation();
}
@Override
public long getDiskSpaceUsed() {
return primaryIndex.getDiskSpaceUsed();
}
@Override
public void checkRename() {
// ok
}
/**
* Get a new transaction.
*
* @return the transaction
*/
Transaction getTransactionBegin() {
// TODO need to commit/rollback the transaction
return transactionStore.begin();
}
@Override
public Column getRowIdColumn() {
if (rowIdColumn == null) {
rowIdColumn = new Column(Column.ROWID, Value.LONG);
rowIdColumn.setTable(this, -1);
}
return rowIdColumn;
}
@Override
public String toString() {
return getSQL();
}
@Override
public boolean isMVStore() {
return true;
}
/**
* Mark the transaction as committed, so that the modification counter of
* the database is incremented.
*/
public void commit() {
if (database != null) {
lastModificationId = database.getNextModificationDataId();
}
}
/**
* Convert the illegal state exception to a database exception.
*
* @param e the illegal state exception
* @return the database exception
*/
DbException convertException(IllegalStateException e) {
if (DataUtils.getErrorCode(e.getMessage()) ==
DataUtils.ERROR_TRANSACTION_LOCKED) {
throw DbException.get(ErrorCode.CONCURRENT_UPDATE_1,
e, getName());
}
return store.convertIllegalStateException(e);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/db/MVTableEngine.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.db;
import java.io.InputStream;
import java.lang.Thread.UncaughtExceptionHandler;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import org.h2.api.ErrorCode;
import org.h2.api.TableEngine;
import org.h2.command.ddl.CreateTableData;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.FileStore;
import org.h2.mvstore.MVMap;
import org.h2.mvstore.MVStore;
import org.h2.mvstore.MVStoreTool;
import org.h2.mvstore.db.TransactionStore.Transaction;
import org.h2.mvstore.db.TransactionStore.TransactionMap;
import org.h2.store.InDoubtTransaction;
import org.h2.store.fs.FileChannelInputStream;
import org.h2.store.fs.FileUtils;
import org.h2.table.TableBase;
import org.h2.util.BitField;
import org.h2.util.New;
/**
* A table engine that internally uses the MVStore.
*/
public class MVTableEngine implements TableEngine {
/**
* Initialize the MVStore.
*
* @param db the database
* @return the store
*/
public static Store init(final Database db) {
Store store = db.getMvStore();
if (store != null) {
return store;
}
byte[] key = db.getFileEncryptionKey();
String dbPath = db.getDatabasePath();
MVStore.Builder builder = new MVStore.Builder();
store = new Store();
boolean encrypted = false;
if (dbPath != null) {
String fileName = dbPath + Constants.SUFFIX_MV_FILE;
MVStoreTool.compactCleanUp(fileName);
builder.fileName(fileName);
builder.pageSplitSize(db.getPageSize());
if (db.isReadOnly()) {
builder.readOnly();
} else {
// possibly create the directory
boolean exists = FileUtils.exists(fileName);
if (exists && !FileUtils.canWrite(fileName)) {
// read only
} else {
String dir = FileUtils.getParent(fileName);
FileUtils.createDirectories(dir);
}
}
if (key != null) {
encrypted = true;
char[] password = new char[key.length / 2];
for (int i = 0; i < password.length; i++) {
password[i] = (char) (((key[i + i] & 255) << 16) |
((key[i + i + 1]) & 255));
}
builder.encryptionKey(password);
}
if (db.getSettings().compressData) {
builder.compress();
// use a larger page split size to improve the compression ratio
builder.pageSplitSize(64 * 1024);
}
builder.backgroundExceptionHandler(new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
db.setBackgroundException(DbException.convert(e));
}
});
}
store.open(db, builder, encrypted);
db.setMvStore(store);
return store;
}
@Override
public TableBase createTable(CreateTableData data) {
Database db = data.session.getDatabase();
Store store = init(db);
MVTable table = new MVTable(data, store);
table.init(data.session);
store.tableMap.put(table.getMapName(), table);
return table;
}
/**
* A store with open tables.
*/
public static class Store {
/**
* The map of open tables.
* Key: the map name, value: the table.
*/
final ConcurrentHashMap<String, MVTable> tableMap =
new ConcurrentHashMap<>();
/**
* The store.
*/
private MVStore store;
/**
* The transaction store.
*/
private TransactionStore transactionStore;
private long statisticsStart;
private int temporaryMapId;
private boolean encrypted;
private String fileName;
/**
* Open the store for this database.
*
* @param db the database
* @param builder the builder
* @param encrypted whether the store is encrypted
*/
void open(Database db, MVStore.Builder builder, boolean encrypted) {
this.encrypted = encrypted;
try {
this.store = builder.open();
FileStore fs = store.getFileStore();
if (fs != null) {
this.fileName = fs.getFileName();
}
if (!db.getSettings().reuseSpace) {
store.setReuseSpace(false);
}
this.transactionStore = new TransactionStore(
store,
new ValueDataType(db.getCompareMode(), db, null));
transactionStore.init();
} catch (IllegalStateException e) {
throw convertIllegalStateException(e);
}
}
/**
* Convert the illegal state exception to the correct database
* exception.
*
* @param e the illegal state exception
* @return the database exception
*/
DbException convertIllegalStateException(IllegalStateException e) {
int errorCode = DataUtils.getErrorCode(e.getMessage());
if (errorCode == DataUtils.ERROR_FILE_CORRUPT) {
if (encrypted) {
throw DbException.get(
ErrorCode.FILE_ENCRYPTION_ERROR_1,
e, fileName);
}
} else if (errorCode == DataUtils.ERROR_FILE_LOCKED) {
throw DbException.get(
ErrorCode.DATABASE_ALREADY_OPEN_1,
e, fileName);
} else if (errorCode == DataUtils.ERROR_READING_FAILED) {
throw DbException.get(
ErrorCode.IO_EXCEPTION_1,
e, fileName);
}
throw DbException.get(
ErrorCode.FILE_CORRUPTED_1,
e, fileName);
}
public MVStore getStore() {
return store;
}
public TransactionStore getTransactionStore() {
return transactionStore;
}
public HashMap<String, MVTable> getTables() {
return new HashMap<>(tableMap);
}
/**
* Remove a table.
*
* @param table the table
*/
public void removeTable(MVTable table) {
tableMap.remove(table.getMapName());
}
/**
* Store all pending changes.
*/
public void flush() {
FileStore s = store.getFileStore();
if (s == null || s.isReadOnly()) {
return;
}
if (!store.compact(50, 4 * 1024 * 1024)) {
store.commit();
}
}
/**
* Close the store, without persisting changes.
*/
public void closeImmediately() {
if (store.isClosed()) {
return;
}
store.closeImmediately();
}
/**
* Commit all transactions that are in the committing state, and
* rollback all open transactions.
*/
public void initTransactions() {
List<Transaction> list = transactionStore.getOpenTransactions();
for (Transaction t : list) {
if (t.getStatus() == Transaction.STATUS_COMMITTING) {
t.commit();
} else if (t.getStatus() != Transaction.STATUS_PREPARED) {
t.rollback();
}
}
}
/**
* Remove all temporary maps.
*
* @param objectIds the ids of the objects to keep
*/
public void removeTemporaryMaps(BitField objectIds) {
for (String mapName : store.getMapNames()) {
if (mapName.startsWith("temp.")) {
MVMap<?, ?> map = store.openMap(mapName);
store.removeMap(map);
} else if (mapName.startsWith("table.") || mapName.startsWith("index.")) {
int id = Integer.parseInt(mapName.substring(1 + mapName.indexOf('.')));
if (!objectIds.get(id)) {
ValueDataType keyType = new ValueDataType(null, null, null);
ValueDataType valueType = new ValueDataType(null, null, null);
Transaction t = transactionStore.begin();
TransactionMap<?, ?> m = t.openMap(mapName, keyType, valueType);
transactionStore.removeMap(m);
t.commit();
}
}
}
}
/**
* Get the name of the next available temporary map.
*
* @return the map name
*/
public synchronized String nextTemporaryMapName() {
return "temp." + temporaryMapId++;
}
/**
* Prepare a transaction.
*
* @param session the session
* @param transactionName the transaction name (may be null)
*/
public void prepareCommit(Session session, String transactionName) {
Transaction t = session.getTransaction();
t.setName(transactionName);
t.prepare();
store.commit();
}
public ArrayList<InDoubtTransaction> getInDoubtTransactions() {
List<Transaction> list = transactionStore.getOpenTransactions();
ArrayList<InDoubtTransaction> result = New.arrayList();
for (Transaction t : list) {
if (t.getStatus() == Transaction.STATUS_PREPARED) {
result.add(new MVInDoubtTransaction(store, t));
}
}
return result;
}
/**
* Set the maximum memory to be used by the cache.
*
* @param kb the maximum size in KB
*/
public void setCacheSize(int kb) {
store.setCacheSize(Math.max(1, kb / 1024));
}
public InputStream getInputStream() {
FileChannel fc = store.getFileStore().getEncryptedFile();
if (fc == null) {
fc = store.getFileStore().getFile();
}
return new FileChannelInputStream(fc, false);
}
/**
* Force the changes to disk.
*/
public void sync() {
flush();
store.sync();
}
/**
* Compact the database file, that is, compact blocks that have a low
* fill rate, and move chunks next to each other. This will typically
* shrink the database file. Changes are flushed to the file, and old
* chunks are overwritten.
*
* @param maxCompactTime the maximum time in milliseconds to compact
*/
public void compactFile(long maxCompactTime) {
store.setRetentionTime(0);
long start = System.nanoTime();
while (store.compact(95, 16 * 1024 * 1024)) {
store.sync();
store.compactMoveChunks(95, 16 * 1024 * 1024);
long time = System.nanoTime() - start;
if (time > TimeUnit.MILLISECONDS.toNanos(maxCompactTime)) {
break;
}
}
}
/**
* Close the store. Pending changes are persisted. Chunks with a low
* fill rate are compacted, but old chunks are kept for some time, so
* most likely the database file will not shrink.
*
* @param maxCompactTime the maximum time in milliseconds to compact
*/
public void close(long maxCompactTime) {
try {
if (!store.isClosed() && store.getFileStore() != null) {
boolean compactFully = false;
if (!store.getFileStore().isReadOnly()) {
transactionStore.close();
if (maxCompactTime == Long.MAX_VALUE) {
compactFully = true;
}
}
String fileName = store.getFileStore().getFileName();
store.close();
if (compactFully && FileUtils.exists(fileName)) {
// the file could have been deleted concurrently,
// so only compact if the file still exists
MVStoreTool.compact(fileName, true);
}
}
} catch (IllegalStateException e) {
int errorCode = DataUtils.getErrorCode(e.getMessage());
if (errorCode == DataUtils.ERROR_WRITING_FAILED) {
// disk full - ok
} else if (errorCode == DataUtils.ERROR_FILE_CORRUPT) {
// wrong encryption key - ok
}
store.closeImmediately();
throw DbException.get(ErrorCode.IO_EXCEPTION_1, e, "Closing");
}
}
/**
* Start collecting statistics.
*/
public void statisticsStart() {
FileStore fs = store.getFileStore();
statisticsStart = fs == null ? 0 : fs.getReadCount();
}
/**
* Stop collecting statistics.
*
* @return the statistics
*/
public Map<String, Integer> statisticsEnd() {
HashMap<String, Integer> map = new HashMap<>();
FileStore fs = store.getFileStore();
int reads = fs == null ? 0 : (int) (fs.getReadCount() - statisticsStart);
map.put("reads", reads);
return map;
}
}
/**
* An in-doubt transaction.
*/
private static class MVInDoubtTransaction implements InDoubtTransaction {
private final MVStore store;
private final Transaction transaction;
private int state = InDoubtTransaction.IN_DOUBT;
MVInDoubtTransaction(MVStore store, Transaction transaction) {
this.store = store;
this.transaction = transaction;
}
@Override
public void setState(int state) {
if (state == InDoubtTransaction.COMMIT) {
transaction.commit();
} else {
transaction.rollback();
}
store.commit();
this.state = state;
}
@Override
public String getState() {
switch (state) {
case IN_DOUBT:
return "IN_DOUBT";
case COMMIT:
return "COMMIT";
case ROLLBACK:
return "ROLLBACK";
default:
throw DbException.throwInternalError("state="+state);
}
}
@Override
public String getTransactionName() {
return transaction.getName();
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/db/TransactionStore.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.db;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.h2.mvstore.Cursor;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.MVMap;
import org.h2.mvstore.MVStore;
import org.h2.mvstore.WriteBuffer;
import org.h2.mvstore.type.DataType;
import org.h2.mvstore.type.ObjectDataType;
import org.h2.util.New;
/**
* A store that supports concurrent MVCC read-committed transactions.
*/
public class TransactionStore {
/**
* The store.
*/
final MVStore store;
/**
* The persisted map of prepared transactions.
* Key: transactionId, value: [ status, name ].
*/
final MVMap<Integer, Object[]> preparedTransactions;
/**
* The undo log.
* <p>
* If the first entry for a transaction doesn't have a logId
* of 0, then the transaction is partially committed (which means rollback
* is not possible). Log entries are written before the data is changed
* (write-ahead).
* <p>
* Key: opId, value: [ mapId, key, oldValue ].
*/
final MVMap<Long, Object[]> undoLog;
/**
* the reader/writer lock for the undo-log. Allows us to process multiple
* selects in parallel.
*/
final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
/**
* The map of maps.
*/
private final HashMap<Integer, MVMap<Object, VersionedValue>> maps =
new HashMap<>();
private final DataType dataType;
private final BitSet openTransactions = new BitSet();
private boolean init;
private int maxTransactionId = 0xffff;
/**
* The next id of a temporary map.
*/
private int nextTempMapId;
/**
* Create a new transaction store.
*
* @param store the store
*/
public TransactionStore(MVStore store) {
this(store, new ObjectDataType());
}
/**
* Create a new transaction store.
*
* @param store the store
* @param dataType the data type for map keys and values
*/
public TransactionStore(MVStore store, DataType dataType) {
this.store = store;
this.dataType = dataType;
preparedTransactions = store.openMap("openTransactions",
new MVMap.Builder<Integer, Object[]>());
VersionedValueType oldValueType = new VersionedValueType(dataType);
ArrayType undoLogValueType = new ArrayType(new DataType[]{
new ObjectDataType(), dataType, oldValueType
});
MVMap.Builder<Long, Object[]> builder =
new MVMap.Builder<Long, Object[]>().
valueType(undoLogValueType);
undoLog = store.openMap("undoLog", builder);
if (undoLog.getValueType() != undoLogValueType) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_TRANSACTION_CORRUPT,
"Undo map open with a different value type");
}
}
/**
* Initialize the store. This is needed before a transaction can be opened.
* If the transaction store is corrupt, this method can throw an exception,
* in which case the store can only be used for reading.
*/
public synchronized void init() {
init = true;
// remove all temporary maps
for (String mapName : store.getMapNames()) {
if (mapName.startsWith("temp.")) {
MVMap<Object, Integer> temp = openTempMap(mapName);
store.removeMap(temp);
}
}
rwLock.writeLock().lock();
try {
if (undoLog.size() > 0) {
for (Long key : undoLog.keySet()) {
int transactionId = getTransactionId(key);
openTransactions.set(transactionId);
}
}
} finally {
rwLock.writeLock().unlock();
}
}
/**
* Set the maximum transaction id, after which ids are re-used. If the old
* transaction is still in use when re-using an old id, the new transaction
* fails.
*
* @param max the maximum id
*/
public void setMaxTransactionId(int max) {
this.maxTransactionId = max;
}
/**
* Combine the transaction id and the log id to an operation id.
*
* @param transactionId the transaction id
* @param logId the log id
* @return the operation id
*/
static long getOperationId(int transactionId, long logId) {
DataUtils.checkArgument(transactionId >= 0 && transactionId < (1 << 24),
"Transaction id out of range: {0}", transactionId);
DataUtils.checkArgument(logId >= 0 && logId < (1L << 40),
"Transaction log id out of range: {0}", logId);
return ((long) transactionId << 40) | logId;
}
/**
* Get the transaction id for the given operation id.
*
* @param operationId the operation id
* @return the transaction id
*/
static int getTransactionId(long operationId) {
return (int) (operationId >>> 40);
}
/**
* Get the log id for the given operation id.
*
* @param operationId the operation id
* @return the log id
*/
static long getLogId(long operationId) {
return operationId & ((1L << 40) - 1);
}
/**
* Get the list of unclosed transactions that have pending writes.
*
* @return the list of transactions (sorted by id)
*/
public List<Transaction> getOpenTransactions() {
rwLock.readLock().lock();
try {
ArrayList<Transaction> list = New.arrayList();
Long key = undoLog.firstKey();
while (key != null) {
int transactionId = getTransactionId(key);
key = undoLog.lowerKey(getOperationId(transactionId + 1, 0));
long logId = getLogId(key) + 1;
Object[] data = preparedTransactions.get(transactionId);
int status;
String name;
if (data == null) {
if (undoLog.containsKey(getOperationId(transactionId, 0))) {
status = Transaction.STATUS_OPEN;
} else {
status = Transaction.STATUS_COMMITTING;
}
name = null;
} else {
status = (Integer) data[0];
name = (String) data[1];
}
Transaction t = new Transaction(this, transactionId, status,
name, logId);
list.add(t);
key = undoLog.ceilingKey(getOperationId(transactionId + 1, 0));
}
return list;
} finally {
rwLock.readLock().unlock();
}
}
/**
* Close the transaction store.
*/
public synchronized void close() {
store.commit();
}
/**
* Begin a new transaction.
*
* @return the transaction
*/
public synchronized Transaction begin() {
int transactionId;
int status;
if (!init) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE,
"Not initialized");
}
transactionId = openTransactions.nextClearBit(1);
if (transactionId > maxTransactionId) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_TOO_MANY_OPEN_TRANSACTIONS,
"There are {0} open transactions",
transactionId - 1);
}
openTransactions.set(transactionId);
status = Transaction.STATUS_OPEN;
return new Transaction(this, transactionId, status, null, 0);
}
/**
* Store a transaction.
*
* @param t the transaction
*/
synchronized void storeTransaction(Transaction t) {
if (t.getStatus() == Transaction.STATUS_PREPARED ||
t.getName() != null) {
Object[] v = { t.getStatus(), t.getName() };
preparedTransactions.put(t.getId(), v);
}
}
/**
* Log an entry.
*
* @param t the transaction
* @param logId the log id
* @param mapId the map id
* @param key the key
* @param oldValue the old value
*/
void log(Transaction t, long logId, int mapId,
Object key, Object oldValue) {
Long undoKey = getOperationId(t.getId(), logId);
Object[] log = { mapId, key, oldValue };
rwLock.writeLock().lock();
try {
if (logId == 0) {
if (undoLog.containsKey(undoKey)) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_TOO_MANY_OPEN_TRANSACTIONS,
"An old transaction with the same id " +
"is still open: {0}",
t.getId());
}
}
undoLog.put(undoKey, log);
} finally {
rwLock.writeLock().unlock();
}
}
/**
* Remove a log entry.
*
* @param t the transaction
* @param logId the log id
*/
public void logUndo(Transaction t, long logId) {
Long undoKey = getOperationId(t.getId(), logId);
rwLock.writeLock().lock();
try {
Object[] old = undoLog.remove(undoKey);
if (old == null) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE,
"Transaction {0} was concurrently rolled back",
t.getId());
}
} finally {
rwLock.writeLock().unlock();
}
}
/**
* Remove the given map.
*
* @param <K> the key type
* @param <V> the value type
* @param map the map
*/
synchronized <K, V> void removeMap(TransactionMap<K, V> map) {
maps.remove(map.mapId);
store.removeMap(map.map);
}
/**
* Commit a transaction.
*
* @param t the transaction
* @param maxLogId the last log id
*/
void commit(Transaction t, long maxLogId) {
if (store.isClosed()) {
return;
}
// TODO could synchronize on blocks (100 at a time or so)
rwLock.writeLock().lock();
int oldStatus = t.getStatus();
try {
t.setStatus(Transaction.STATUS_COMMITTING);
for (long logId = 0; logId < maxLogId; logId++) {
Long undoKey = getOperationId(t.getId(), logId);
Object[] op = undoLog.get(undoKey);
if (op == null) {
// partially committed: load next
undoKey = undoLog.ceilingKey(undoKey);
if (undoKey == null ||
getTransactionId(undoKey) != t.getId()) {
break;
}
logId = getLogId(undoKey) - 1;
continue;
}
int mapId = (Integer) op[0];
MVMap<Object, VersionedValue> map = openMap(mapId);
if (map != null) { // might be null if map was removed later
Object key = op[1];
VersionedValue value = map.get(key);
if (value != null) {
// only commit (remove/update) value if we've reached
// last undoLog entry for a given key
if (value.operationId == undoKey) {
if (value.value == null) {
map.remove(key);
} else {
map.put(key, new VersionedValue(0L, value.value));
}
}
}
}
undoLog.remove(undoKey);
}
} finally {
rwLock.writeLock().unlock();
}
endTransaction(t, oldStatus);
}
/**
* Open the map with the given name.
*
* @param <K> the key type
* @param name the map name
* @param keyType the key type
* @param valueType the value type
* @return the map
*/
synchronized <K> MVMap<K, VersionedValue> openMap(String name,
DataType keyType, DataType valueType) {
if (keyType == null) {
keyType = new ObjectDataType();
}
if (valueType == null) {
valueType = new ObjectDataType();
}
VersionedValueType vt = new VersionedValueType(valueType);
MVMap<K, VersionedValue> map;
MVMap.Builder<K, VersionedValue> builder =
new MVMap.Builder<K, VersionedValue>().
keyType(keyType).valueType(vt);
map = store.openMap(name, builder);
@SuppressWarnings("unchecked")
MVMap<Object, VersionedValue> m = (MVMap<Object, VersionedValue>) map;
maps.put(map.getId(), m);
return map;
}
/**
* Open the map with the given id.
*
* @param mapId the id
* @return the map
*/
synchronized MVMap<Object, VersionedValue> openMap(int mapId) {
MVMap<Object, VersionedValue> map = maps.get(mapId);
if (map != null) {
return map;
}
String mapName = store.getMapName(mapId);
if (mapName == null) {
// the map was removed later on
return null;
}
VersionedValueType vt = new VersionedValueType(dataType);
MVMap.Builder<Object, VersionedValue> mapBuilder =
new MVMap.Builder<Object, VersionedValue>().
keyType(dataType).valueType(vt);
map = store.openMap(mapName, mapBuilder);
maps.put(mapId, map);
return map;
}
/**
* Create a temporary map. Such maps are removed when opening the store.
*
* @return the map
*/
synchronized MVMap<Object, Integer> createTempMap() {
String mapName = "temp." + nextTempMapId++;
return openTempMap(mapName);
}
/**
* Open a temporary map.
*
* @param mapName the map name
* @return the map
*/
MVMap<Object, Integer> openTempMap(String mapName) {
MVMap.Builder<Object, Integer> mapBuilder =
new MVMap.Builder<Object, Integer>().
keyType(dataType);
return store.openMap(mapName, mapBuilder);
}
/**
* End this transaction
*
* @param t the transaction
* @param oldStatus status of this transaction
*/
synchronized void endTransaction(Transaction t, int oldStatus) {
if (oldStatus == Transaction.STATUS_PREPARED) {
preparedTransactions.remove(t.getId());
}
t.setStatus(Transaction.STATUS_CLOSED);
openTransactions.clear(t.transactionId);
if (oldStatus == Transaction.STATUS_PREPARED || store.getAutoCommitDelay() == 0) {
store.commit();
return;
}
// to avoid having to store the transaction log,
// if there is no open transaction,
// and if there have been many changes, store them now
if (undoLog.isEmpty()) {
int unsaved = store.getUnsavedMemory();
int max = store.getAutoCommitMemory();
// save at 3/4 capacity
if (unsaved * 4 > max * 3) {
store.commit();
}
}
}
/**
* Rollback to an old savepoint.
*
* @param t the transaction
* @param maxLogId the last log id
* @param toLogId the log id to roll back to
*/
void rollbackTo(Transaction t, long maxLogId, long toLogId) {
// TODO could synchronize on blocks (100 at a time or so)
rwLock.writeLock().lock();
try {
for (long logId = maxLogId - 1; logId >= toLogId; logId--) {
Long undoKey = getOperationId(t.getId(), logId);
Object[] op = undoLog.get(undoKey);
if (op == null) {
// partially rolled back: load previous
undoKey = undoLog.floorKey(undoKey);
if (undoKey == null ||
getTransactionId(undoKey) != t.getId()) {
break;
}
logId = getLogId(undoKey) + 1;
continue;
}
int mapId = ((Integer) op[0]).intValue();
MVMap<Object, VersionedValue> map = openMap(mapId);
if (map != null) {
Object key = op[1];
VersionedValue oldValue = (VersionedValue) op[2];
if (oldValue == null) {
// this transaction added the value
map.remove(key);
} else {
// this transaction updated the value
map.put(key, oldValue);
}
}
undoLog.remove(undoKey);
}
} finally {
rwLock.writeLock().unlock();
}
}
/**
* Get the changes of the given transaction, starting from the latest log id
* back to the given log id.
*
* @param t the transaction
* @param maxLogId the maximum log id
* @param toLogId the minimum log id
* @return the changes
*/
Iterator<Change> getChanges(final Transaction t, final long maxLogId,
final long toLogId) {
return new Iterator<Change>() {
private long logId = maxLogId - 1;
private Change current;
{
fetchNext();
}
private void fetchNext() {
rwLock.writeLock().lock();
try {
while (logId >= toLogId) {
Long undoKey = getOperationId(t.getId(), logId);
Object[] op = undoLog.get(undoKey);
logId--;
if (op == null) {
// partially rolled back: load previous
undoKey = undoLog.floorKey(undoKey);
if (undoKey == null ||
getTransactionId(undoKey) != t.getId()) {
break;
}
logId = getLogId(undoKey);
continue;
}
int mapId = ((Integer) op[0]).intValue();
MVMap<Object, VersionedValue> m = openMap(mapId);
if (m == null) {
// map was removed later on
} else {
current = new Change();
current.mapName = m.getName();
current.key = op[1];
VersionedValue oldValue = (VersionedValue) op[2];
current.value = oldValue == null ?
null : oldValue.value;
return;
}
}
} finally {
rwLock.writeLock().unlock();
}
current = null;
}
@Override
public boolean hasNext() {
return current != null;
}
@Override
public Change next() {
if (current == null) {
throw DataUtils.newUnsupportedOperationException("no data");
}
Change result = current;
fetchNext();
return result;
}
@Override
public void remove() {
throw DataUtils.newUnsupportedOperationException("remove");
}
};
}
/**
* A change in a map.
*/
public static class Change {
/**
* The name of the map where the change occurred.
*/
public String mapName;
/**
* The key.
*/
public Object key;
/**
* The value.
*/
public Object value;
}
/**
* A transaction.
*/
public static class Transaction {
/**
* The status of a closed transaction (committed or rolled back).
*/
public static final int STATUS_CLOSED = 0;
/**
* The status of an open transaction.
*/
public static final int STATUS_OPEN = 1;
/**
* The status of a prepared transaction.
*/
public static final int STATUS_PREPARED = 2;
/**
* The status of a transaction that is being committed, but possibly not
* yet finished. A transactions can go into this state when the store is
* closed while the transaction is committing. When opening a store,
* such transactions should be committed.
*/
public static final int STATUS_COMMITTING = 3;
/**
* The transaction store.
*/
final TransactionStore store;
/**
* The transaction id.
*/
final int transactionId;
/**
* The log id of the last entry in the undo log map.
*/
long logId;
private int status;
private String name;
Transaction(TransactionStore store, int transactionId, int status,
String name, long logId) {
this.store = store;
this.transactionId = transactionId;
this.status = status;
this.name = name;
this.logId = logId;
}
public int getId() {
return transactionId;
}
public int getStatus() {
return status;
}
void setStatus(int status) {
this.status = status;
}
public void setName(String name) {
checkNotClosed();
this.name = name;
store.storeTransaction(this);
}
public String getName() {
return name;
}
/**
* Create a new savepoint.
*
* @return the savepoint id
*/
public long setSavepoint() {
return logId;
}
/**
* Add a log entry.
*
* @param mapId the map id
* @param key the key
* @param oldValue the old value
*/
void log(int mapId, Object key, Object oldValue) {
store.log(this, logId, mapId, key, oldValue);
// only increment the log id if logging was successful
logId++;
}
/**
* Remove the last log entry.
*/
void logUndo() {
store.logUndo(this, --logId);
}
/**
* Open a data map.
*
* @param <K> the key type
* @param <V> the value type
* @param name the name of the map
* @return the transaction map
*/
public <K, V> TransactionMap<K, V> openMap(String name) {
return openMap(name, null, null);
}
/**
* Open the map to store the data.
*
* @param <K> the key type
* @param <V> the value type
* @param name the name of the map
* @param keyType the key data type
* @param valueType the value data type
* @return the transaction map
*/
public <K, V> TransactionMap<K, V> openMap(String name,
DataType keyType, DataType valueType) {
checkNotClosed();
MVMap<K, VersionedValue> map = store.openMap(name, keyType,
valueType);
int mapId = map.getId();
return new TransactionMap<>(this, map, mapId);
}
/**
* Open the transactional version of the given map.
*
* @param <K> the key type
* @param <V> the value type
* @param map the base map
* @return the transactional map
*/
public <K, V> TransactionMap<K, V> openMap(
MVMap<K, VersionedValue> map) {
checkNotClosed();
int mapId = map.getId();
return new TransactionMap<>(this, map, mapId);
}
/**
* Prepare the transaction. Afterwards, the transaction can only be
* committed or rolled back.
*/
public void prepare() {
checkNotClosed();
status = STATUS_PREPARED;
store.storeTransaction(this);
}
/**
* Commit the transaction. Afterwards, this transaction is closed.
*/
public void commit() {
checkNotClosed();
store.commit(this, logId);
}
/**
* Roll back to the given savepoint. This is only allowed if the
* transaction is open.
*
* @param savepointId the savepoint id
*/
public void rollbackToSavepoint(long savepointId) {
checkNotClosed();
store.rollbackTo(this, logId, savepointId);
logId = savepointId;
}
/**
* Roll the transaction back. Afterwards, this transaction is closed.
*/
public void rollback() {
checkNotClosed();
store.rollbackTo(this, logId, 0);
store.endTransaction(this, status);
}
/**
* Get the list of changes, starting with the latest change, up to the
* given savepoint (in reverse order than they occurred). The value of
* the change is the value before the change was applied.
*
* @param savepointId the savepoint id, 0 meaning the beginning of the
* transaction
* @return the changes
*/
public Iterator<Change> getChanges(long savepointId) {
return store.getChanges(this, logId, savepointId);
}
/**
* Check whether this transaction is open or prepared.
*/
void checkNotClosed() {
if (status == STATUS_CLOSED) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_CLOSED, "Transaction is closed");
}
}
/**
* Remove the map.
*
* @param map the map
*/
public <K, V> void removeMap(TransactionMap<K, V> map) {
store.removeMap(map);
}
@Override
public String toString() {
return "" + transactionId;
}
}
/**
* A map that supports transactions.
*
* @param <K> the key type
* @param <V> the value type
*/
public static class TransactionMap<K, V> {
/**
* The map id.
*/
final int mapId;
/**
* If a record was read that was updated by this transaction, and the
* update occurred before this log id, the older version is read. This
* is so that changes are not immediately visible, to support statement
* processing (for example "update test set id = id + 1").
*/
long readLogId = Long.MAX_VALUE;
/**
* The map used for writing (the latest version).
* <p>
* Key: key the key of the data.
* Value: { transactionId, oldVersion, value }
*/
final MVMap<K, VersionedValue> map;
/**
* The transaction which is used for this map.
*/
final Transaction transaction;
TransactionMap(Transaction transaction, MVMap<K, VersionedValue> map,
int mapId) {
this.transaction = transaction;
this.map = map;
this.mapId = mapId;
}
/**
* Set the savepoint. Afterwards, reads are based on the specified
* savepoint.
*
* @param savepoint the savepoint
*/
public void setSavepoint(long savepoint) {
this.readLogId = savepoint;
}
/**
* Get a clone of this map for the given transaction.
*
* @param transaction the transaction
* @param savepoint the savepoint
* @return the map
*/
public TransactionMap<K, V> getInstance(Transaction transaction,
long savepoint) {
TransactionMap<K, V> m =
new TransactionMap<>(transaction, map, mapId);
m.setSavepoint(savepoint);
return m;
}
/**
* Get the size of the raw map. This includes uncommitted entries, and
* transiently removed entries, so it is the maximum number of entries.
*
* @return the maximum size
*/
public long sizeAsLongMax() {
return map.sizeAsLong();
}
/**
* Get the size of the map as seen by this transaction.
*
* @return the size
*/
public long sizeAsLong() {
transaction.store.rwLock.readLock().lock();
try {
long sizeRaw = map.sizeAsLong();
MVMap<Long, Object[]> undo = transaction.store.undoLog;
long undoLogSize;
synchronized (undo) {
undoLogSize = undo.sizeAsLong();
}
if (undoLogSize == 0) {
return sizeRaw;
}
if (undoLogSize > sizeRaw) {
// the undo log is larger than the map -
// count the entries of the map
long size = 0;
Cursor<K, VersionedValue> cursor = map.cursor(null);
while (cursor.hasNext()) {
K key = cursor.next();
// cursor.getValue() returns outdated value
VersionedValue data = map.get(key);
data = getValue(key, readLogId, data);
if (data != null && data.value != null) {
size++;
}
}
return size;
}
// the undo log is smaller than the map -
// scan the undo log and subtract invisible entries
synchronized (undo) {
// re-fetch in case any transaction was committed now
long size = map.sizeAsLong();
MVMap<Object, Integer> temp = transaction.store
.createTempMap();
try {
for (Entry<Long, Object[]> e : undo.entrySet()) {
Object[] op = e.getValue();
int m = (Integer) op[0];
if (m != mapId) {
// a different map - ignore
continue;
}
@SuppressWarnings("unchecked")
K key = (K) op[1];
if (get(key) == null) {
Integer old = temp.put(key, 1);
// count each key only once (there might be
// multiple
// changes for the same key)
if (old == null) {
size--;
}
}
}
} finally {
transaction.store.store.removeMap(temp);
}
return size;
}
} finally {
transaction.store.rwLock.readLock().unlock();
}
}
/**
* Remove an entry.
* <p>
* If the row is locked, this method will retry until the row could be
* updated or until a lock timeout.
*
* @param key the key
* @throws IllegalStateException if a lock timeout occurs
*/
public V remove(K key) {
return set(key, null);
}
/**
* Update the value for the given key.
* <p>
* If the row is locked, this method will retry until the row could be
* updated or until a lock timeout.
*
* @param key the key
* @param value the new value (not null)
* @return the old value
* @throws IllegalStateException if a lock timeout occurs
*/
public V put(K key, V value) {
DataUtils.checkArgument(value != null, "The value may not be null");
return set(key, value);
}
/**
* Update the value for the given key, without adding an undo log entry.
*
* @param key the key
* @param value the value
* @return the old value
*/
@SuppressWarnings("unchecked")
public V putCommitted(K key, V value) {
DataUtils.checkArgument(value != null, "The value may not be null");
VersionedValue newValue = new VersionedValue(0L, value);
VersionedValue oldValue = map.put(key, newValue);
return (V) (oldValue == null ? null : oldValue.value);
}
private V set(K key, V value) {
transaction.checkNotClosed();
V old = get(key);
boolean ok = trySet(key, value, false);
if (ok) {
return old;
}
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_TRANSACTION_LOCKED, "Entry is locked");
}
/**
* Try to remove the value for the given key.
* <p>
* This will fail if the row is locked by another transaction (that
* means, if another open transaction changed the row).
*
* @param key the key
* @return whether the entry could be removed
*/
public boolean tryRemove(K key) {
return trySet(key, null, false);
}
/**
* Try to update the value for the given key.
* <p>
* This will fail if the row is locked by another transaction (that
* means, if another open transaction changed the row).
*
* @param key the key
* @param value the new value
* @return whether the entry could be updated
*/
public boolean tryPut(K key, V value) {
DataUtils.checkArgument(value != null, "The value may not be null");
return trySet(key, value, false);
}
/**
* Try to set or remove the value. When updating only unchanged entries,
* then the value is only changed if it was not changed after opening
* the map.
*
* @param key the key
* @param value the new value (null to remove the value)
* @param onlyIfUnchanged only set the value if it was not changed (by
* this or another transaction) since the map was opened
* @return true if the value was set, false if there was a concurrent
* update
*/
public boolean trySet(K key, V value, boolean onlyIfUnchanged) {
VersionedValue current = map.get(key);
if (onlyIfUnchanged) {
VersionedValue old = getValue(key, readLogId);
if (!map.areValuesEqual(old, current)) {
long tx = getTransactionId(current.operationId);
if (tx == transaction.transactionId) {
if (value == null) {
// ignore removing an entry
// if it was added or changed
// in the same statement
return true;
} else if (current.value == null) {
// add an entry that was removed
// in the same statement
} else {
return false;
}
} else {
return false;
}
}
}
VersionedValue newValue = new VersionedValue(
getOperationId(transaction.transactionId, transaction.logId),
value);
if (current == null) {
// a new value
transaction.log(mapId, key, current);
VersionedValue old = map.putIfAbsent(key, newValue);
if (old != null) {
transaction.logUndo();
return false;
}
return true;
}
long id = current.operationId;
if (id == 0) {
// committed
transaction.log(mapId, key, current);
// the transaction is committed:
// overwrite the value
if (!map.replace(key, current, newValue)) {
// somebody else was faster
transaction.logUndo();
return false;
}
return true;
}
int tx = getTransactionId(current.operationId);
if (tx == transaction.transactionId) {
// added or updated by this transaction
transaction.log(mapId, key, current);
if (!map.replace(key, current, newValue)) {
// strange, somebody overwrote the value
// even though the change was not committed
transaction.logUndo();
return false;
}
return true;
}
// the transaction is not yet committed
return false;
}
/**
* Get the value for the given key at the time when this map was opened.
*
* @param key the key
* @return the value or null
*/
public V get(K key) {
return get(key, readLogId);
}
/**
* Get the most recent value for the given key.
*
* @param key the key
* @return the value or null
*/
public V getLatest(K key) {
return get(key, Long.MAX_VALUE);
}
/**
* Whether the map contains the key.
*
* @param key the key
* @return true if the map contains an entry for this key
*/
public boolean containsKey(K key) {
return get(key) != null;
}
/**
* Get the value for the given key.
*
* @param key the key
* @param maxLogId the maximum log id
* @return the value or null
*/
@SuppressWarnings("unchecked")
public V get(K key, long maxLogId) {
VersionedValue data = getValue(key, maxLogId);
return data == null ? null : (V) data.value;
}
/**
* Whether the entry for this key was added or removed from this
* session.
*
* @param key the key
* @return true if yes
*/
public boolean isSameTransaction(K key) {
VersionedValue data = map.get(key);
if (data == null) {
// doesn't exist or deleted by a committed transaction
return false;
}
int tx = getTransactionId(data.operationId);
return tx == transaction.transactionId;
}
private VersionedValue getValue(K key, long maxLog) {
transaction.store.rwLock.readLock().lock();
try {
VersionedValue data = map.get(key);
return getValue(key, maxLog, data);
} finally {
transaction.store.rwLock.readLock().unlock();
}
}
/**
* Get the versioned value for the given key.
*
* @param key the key
* @param maxLog the maximum log id of the entry
* @param data the value stored in the main map
* @return the value
*/
VersionedValue getValue(K key, long maxLog, VersionedValue data) {
while (true) {
if (data == null) {
// doesn't exist or deleted by a committed transaction
return null;
}
long id = data.operationId;
if (id == 0) {
// it is committed
return data;
}
int tx = getTransactionId(id);
if (tx == transaction.transactionId) {
// added by this transaction
if (getLogId(id) < maxLog) {
return data;
}
}
// get the value before the uncommitted transaction
Object[] d;
d = transaction.store.undoLog.get(id);
if (d == null) {
if (transaction.store.store.isReadOnly()) {
// uncommitted transaction for a read-only store
return null;
}
// this entry should be committed or rolled back
// in the meantime (the transaction might still be open)
// or it might be changed again in a different
// transaction (possibly one with the same id)
data = map.get(key);
} else {
data = (VersionedValue) d[2];
}
}
}
/**
* Check whether this map is closed.
*
* @return true if closed
*/
public boolean isClosed() {
return map.isClosed();
}
/**
* Clear the map.
*/
public void clear() {
// TODO truncate transactionally?
map.clear();
}
/**
* Get the first key.
*
* @return the first key, or null if empty
*/
public K firstKey() {
Iterator<K> it = keyIterator(null);
return it.hasNext() ? it.next() : null;
}
/**
* Get the last key.
*
* @return the last key, or null if empty
*/
public K lastKey() {
K k = map.lastKey();
while (k != null && get(k) == null) {
k = map.lowerKey(k);
}
return k;
}
/**
* Get the smallest key that is larger than the given key, or null if no
* such key exists.
*
* @param key the key (may not be null)
* @return the result
*/
public K higherKey(K key) {
do {
key = map.higherKey(key);
} while (key != null && get(key) == null);
return key;
}
/**
* Get the smallest key that is larger than or equal to this key,
* or null if no such key exists.
*
* @param key the key (may not be null)
* @return the result
*/
public K ceilingKey(K key) {
Iterator<K> it = keyIterator(key);
return it.hasNext() ? it.next() : null;
}
/**
* Get one of the previous or next keys. There might be no value
* available for the returned key.
*
* @param key the key (may not be null)
* @param offset how many keys to skip (-1 for previous, 1 for next)
* @return the key
*/
public K relativeKey(K key, long offset) {
K k = offset > 0 ? map.ceilingKey(key) : map.floorKey(key);
if (k == null) {
return k;
}
long index = map.getKeyIndex(k);
return map.getKey(index + offset);
}
/**
* Get the largest key that is smaller than or equal to this key,
* or null if no such key exists.
*
* @param key the key (may not be null)
* @return the result
*/
public K floorKey(K key) {
key = map.floorKey(key);
while (key != null && get(key) == null) {
// Use lowerKey() for the next attempts, otherwise we'll get an infinite loop
key = map.lowerKey(key);
}
return key;
}
/**
* Get the largest key that is smaller than the given key, or null if no
* such key exists.
*
* @param key the key (may not be null)
* @return the result
*/
public K lowerKey(K key) {
do {
key = map.lowerKey(key);
} while (key != null && get(key) == null);
return key;
}
/**
* Iterate over keys.
*
* @param from the first key to return
* @return the iterator
*/
public Iterator<K> keyIterator(K from) {
return keyIterator(from, false);
}
/**
* Iterate over keys.
*
* @param from the first key to return
* @param includeUncommitted whether uncommitted entries should be
* included
* @return the iterator
*/
public Iterator<K> keyIterator(final K from, final boolean includeUncommitted) {
return new Iterator<K>() {
private K currentKey = from;
private Cursor<K, VersionedValue> cursor = map.cursor(currentKey);
{
fetchNext();
}
private void fetchNext() {
while (cursor.hasNext()) {
K k;
try {
k = cursor.next();
} catch (IllegalStateException e) {
// TODO this is a bit ugly
if (DataUtils.getErrorCode(e.getMessage()) ==
DataUtils.ERROR_CHUNK_NOT_FOUND) {
cursor = map.cursor(currentKey);
// we (should) get the current key again,
// we need to ignore that one
if (!cursor.hasNext()) {
break;
}
cursor.next();
if (!cursor.hasNext()) {
break;
}
k = cursor.next();
} else {
throw e;
}
}
currentKey = k;
if (includeUncommitted) {
return;
}
if (containsKey(k)) {
return;
}
}
currentKey = null;
}
@Override
public boolean hasNext() {
return currentKey != null;
}
@Override
public K next() {
K result = currentKey;
fetchNext();
return result;
}
@Override
public void remove() {
throw DataUtils.newUnsupportedOperationException(
"Removing is not supported");
}
};
}
/**
* Iterate over entries.
*
* @param from the first key to return
* @param to the last key to return
* @return the iterator
*/
public Iterator<Entry<K, V>> entryIterator(final K from, final K to) {
return new Iterator<Entry<K, V>>() {
private Entry<K, V> current;
private K currentKey = from;
private Cursor<K, VersionedValue> cursor = map.cursor(currentKey);
{
fetchNext();
}
private void fetchNext() {
while (cursor.hasNext()) {
transaction.store.rwLock.readLock().lock();
try {
K k;
try {
k = cursor.next();
} catch (IllegalStateException e) {
// TODO this is a bit ugly
if (DataUtils.getErrorCode(e.getMessage()) ==
DataUtils.ERROR_CHUNK_NOT_FOUND) {
cursor = map.cursor(currentKey);
// we (should) get the current key again,
// we need to ignore that one
if (!cursor.hasNext()) {
break;
}
cursor.next();
if (!cursor.hasNext()) {
break;
}
k = cursor.next();
} else {
throw e;
}
}
final K key = k;
if (to != null && map.getKeyType().compare(k, to) > 0) {
break;
}
// cursor.getValue() returns outdated value
VersionedValue data = map.get(key);
data = getValue(key, readLogId, data);
if (data != null && data.value != null) {
@SuppressWarnings("unchecked")
final V value = (V) data.value;
current = new DataUtils.MapEntry<>(key, value);
currentKey = key;
return;
}
} finally {
transaction.store.rwLock.readLock().unlock();
}
}
current = null;
currentKey = null;
}
@Override
public boolean hasNext() {
return current != null;
}
@Override
public Entry<K, V> next() {
Entry<K, V> result = current;
fetchNext();
return result;
}
@Override
public void remove() {
throw DataUtils.newUnsupportedOperationException(
"Removing is not supported");
}
};
}
/**
* Iterate over keys.
*
* @param iterator the iterator to wrap
* @param includeUncommitted whether uncommitted entries should be
* included
* @return the iterator
*/
public Iterator<K> wrapIterator(final Iterator<K> iterator,
final boolean includeUncommitted) {
// TODO duplicate code for wrapIterator and entryIterator
return new Iterator<K>() {
private K current;
{
fetchNext();
}
private void fetchNext() {
while (iterator.hasNext()) {
current = iterator.next();
if (includeUncommitted) {
return;
}
if (containsKey(current)) {
return;
}
}
current = null;
}
@Override
public boolean hasNext() {
return current != null;
}
@Override
public K next() {
K result = current;
fetchNext();
return result;
}
@Override
public void remove() {
throw DataUtils.newUnsupportedOperationException(
"Removing is not supported");
}
};
}
public Transaction getTransaction() {
return transaction;
}
public DataType getKeyType() {
return map.getKeyType();
}
}
/**
* A versioned value (possibly null). It contains a pointer to the old
* value, and the value itself.
*/
static class VersionedValue {
/**
* The operation id.
*/
final long operationId;
/**
* The value.
*/
final Object value;
VersionedValue(long operationId, Object value) {
this.operationId = operationId;
this.value = value;
}
@Override
public String toString() {
return value + (operationId == 0 ? "" : (
" " +
getTransactionId(operationId) + "/" +
getLogId(operationId)));
}
}
/**
* The value type for a versioned value.
*/
public static class VersionedValueType implements DataType {
private final DataType valueType;
VersionedValueType(DataType valueType) {
this.valueType = valueType;
}
@Override
public int getMemory(Object obj) {
VersionedValue v = (VersionedValue) obj;
return valueType.getMemory(v.value) + 8;
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj == bObj) {
return 0;
}
VersionedValue a = (VersionedValue) aObj;
VersionedValue b = (VersionedValue) bObj;
long comp = a.operationId - b.operationId;
if (comp == 0) {
return valueType.compare(a.value, b.value);
}
return Long.signum(comp);
}
@Override
public void read(ByteBuffer buff, Object[] obj, int len, boolean key) {
if (buff.get() == 0) {
// fast path (no op ids or null entries)
for (int i = 0; i < len; i++) {
obj[i] = new VersionedValue(0L, valueType.read(buff));
}
} else {
// slow path (some entries may be null)
for (int i = 0; i < len; i++) {
obj[i] = read(buff);
}
}
}
@Override
public Object read(ByteBuffer buff) {
long operationId = DataUtils.readVarLong(buff);
Object value;
if (buff.get() == 1) {
value = valueType.read(buff);
} else {
value = null;
}
return new VersionedValue(operationId, value);
}
@Override
public void write(WriteBuffer buff, Object[] obj, int len, boolean key) {
boolean fastPath = true;
for (int i = 0; i < len; i++) {
VersionedValue v = (VersionedValue) obj[i];
if (v.operationId != 0 || v.value == null) {
fastPath = false;
}
}
if (fastPath) {
buff.put((byte) 0);
for (int i = 0; i < len; i++) {
VersionedValue v = (VersionedValue) obj[i];
valueType.write(buff, v.value);
}
} else {
// slow path:
// store op ids, and some entries may be null
buff.put((byte) 1);
for (int i = 0; i < len; i++) {
write(buff, obj[i]);
}
}
}
@Override
public void write(WriteBuffer buff, Object obj) {
VersionedValue v = (VersionedValue) obj;
buff.putVarLong(v.operationId);
if (v.value == null) {
buff.put((byte) 0);
} else {
buff.put((byte) 1);
valueType.write(buff, v.value);
}
}
}
/**
* A data type that contains an array of objects with the specified data
* types.
*/
public static class ArrayType implements DataType {
private final int arrayLength;
private final DataType[] elementTypes;
ArrayType(DataType[] elementTypes) {
this.arrayLength = elementTypes.length;
this.elementTypes = elementTypes;
}
@Override
public int getMemory(Object obj) {
Object[] array = (Object[]) obj;
int size = 0;
for (int i = 0; i < arrayLength; i++) {
DataType t = elementTypes[i];
Object o = array[i];
if (o != null) {
size += t.getMemory(o);
}
}
return size;
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj == bObj) {
return 0;
}
Object[] a = (Object[]) aObj;
Object[] b = (Object[]) bObj;
for (int i = 0; i < arrayLength; i++) {
DataType t = elementTypes[i];
int comp = t.compare(a[i], b[i]);
if (comp != 0) {
return comp;
}
}
return 0;
}
@Override
public void read(ByteBuffer buff, Object[] obj,
int len, boolean key) {
for (int i = 0; i < len; i++) {
obj[i] = read(buff);
}
}
@Override
public void write(WriteBuffer buff, Object[] obj,
int len, boolean key) {
for (int i = 0; i < len; i++) {
write(buff, obj[i]);
}
}
@Override
public void write(WriteBuffer buff, Object obj) {
Object[] array = (Object[]) obj;
for (int i = 0; i < arrayLength; i++) {
DataType t = elementTypes[i];
Object o = array[i];
if (o == null) {
buff.put((byte) 0);
} else {
buff.put((byte) 1);
t.write(buff, o);
}
}
}
@Override
public Object read(ByteBuffer buff) {
Object[] array = new Object[arrayLength];
for (int i = 0; i < arrayLength; i++) {
DataType t = elementTypes[i];
if (buff.get() == 1) {
array[i] = t.read(buff);
}
}
return array;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/db/ValueDataType.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.db;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.Arrays;
import org.h2.api.ErrorCode;
import org.h2.message.DbException;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.WriteBuffer;
import org.h2.mvstore.rtree.SpatialDataType;
import org.h2.mvstore.rtree.SpatialKey;
import org.h2.mvstore.type.DataType;
import org.h2.result.SortOrder;
import org.h2.store.DataHandler;
import org.h2.tools.SimpleResultSet;
import org.h2.util.JdbcUtils;
import org.h2.util.Utils;
import org.h2.value.CompareMode;
import org.h2.value.Value;
import org.h2.value.ValueArray;
import org.h2.value.ValueBoolean;
import org.h2.value.ValueByte;
import org.h2.value.ValueBytes;
import org.h2.value.ValueDate;
import org.h2.value.ValueDecimal;
import org.h2.value.ValueDouble;
import org.h2.value.ValueFloat;
import org.h2.value.ValueGeometry;
import org.h2.value.ValueInt;
import org.h2.value.ValueJavaObject;
import org.h2.value.ValueLobDb;
import org.h2.value.ValueLong;
import org.h2.value.ValueNull;
import org.h2.value.ValueResultSet;
import org.h2.value.ValueShort;
import org.h2.value.ValueString;
import org.h2.value.ValueStringFixed;
import org.h2.value.ValueStringIgnoreCase;
import org.h2.value.ValueTime;
import org.h2.value.ValueTimestamp;
import org.h2.value.ValueTimestampTimeZone;
import org.h2.value.ValueUuid;
/**
* A row type.
*/
public class ValueDataType implements DataType {
private static final int INT_0_15 = 32;
private static final int LONG_0_7 = 48;
private static final int DECIMAL_0_1 = 56;
private static final int DECIMAL_SMALL_0 = 58;
private static final int DECIMAL_SMALL = 59;
private static final int DOUBLE_0_1 = 60;
private static final int FLOAT_0_1 = 62;
private static final int BOOLEAN_FALSE = 64;
private static final int BOOLEAN_TRUE = 65;
private static final int INT_NEG = 66;
private static final int LONG_NEG = 67;
private static final int STRING_0_31 = 68;
private static final int BYTES_0_31 = 100;
private static final int SPATIAL_KEY_2D = 132;
private static final int CUSTOM_DATA_TYPE = 133;
final DataHandler handler;
final CompareMode compareMode;
final int[] sortTypes;
SpatialDataType spatialType;
public ValueDataType(CompareMode compareMode, DataHandler handler,
int[] sortTypes) {
this.compareMode = compareMode;
this.handler = handler;
this.sortTypes = sortTypes;
}
private SpatialDataType getSpatialDataType() {
if (spatialType == null) {
spatialType = new SpatialDataType(2);
}
return spatialType;
}
@Override
public int compare(Object a, Object b) {
if (a == b) {
return 0;
}
if (a instanceof ValueArray && b instanceof ValueArray) {
Value[] ax = ((ValueArray) a).getList();
Value[] bx = ((ValueArray) b).getList();
int al = ax.length;
int bl = bx.length;
int len = Math.min(al, bl);
for (int i = 0; i < len; i++) {
int sortType = sortTypes == null ? SortOrder.ASCENDING : sortTypes[i];
int comp = compareValues(ax[i], bx[i], sortType);
if (comp != 0) {
return comp;
}
}
if (len < al) {
return -1;
} else if (len < bl) {
return 1;
}
return 0;
}
return compareValues((Value) a, (Value) b, SortOrder.ASCENDING);
}
private int compareValues(Value a, Value b, int sortType) {
if (a == b) {
return 0;
}
// null is never stored;
// comparison with null is used to retrieve all entries
// in which case null is always lower than all entries
// (even for descending ordered indexes)
if (a == null) {
return -1;
} else if (b == null) {
return 1;
}
boolean aNull = a == ValueNull.INSTANCE;
boolean bNull = b == ValueNull.INSTANCE;
if (aNull || bNull) {
return SortOrder.compareNull(aNull, sortType);
}
int comp = a.compareTypeSafe(b, compareMode);
if ((sortType & SortOrder.DESCENDING) != 0) {
comp = -comp;
}
return comp;
}
@Override
public int getMemory(Object obj) {
if (obj instanceof SpatialKey) {
return getSpatialDataType().getMemory(obj);
}
return getMemory((Value) obj);
}
private static int getMemory(Value v) {
return v == null ? 0 : v.getMemory();
}
@Override
public void read(ByteBuffer buff, Object[] obj, int len, boolean key) {
for (int i = 0; i < len; i++) {
obj[i] = read(buff);
}
}
@Override
public void write(WriteBuffer buff, Object[] obj, int len, boolean key) {
for (int i = 0; i < len; i++) {
write(buff, obj[i]);
}
}
@Override
public Object read(ByteBuffer buff) {
return readValue(buff);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (obj instanceof SpatialKey) {
buff.put((byte) SPATIAL_KEY_2D);
getSpatialDataType().write(buff, obj);
return;
}
Value x = (Value) obj;
writeValue(buff, x);
}
private void writeValue(WriteBuffer buff, Value v) {
if (v == ValueNull.INSTANCE) {
buff.put((byte) 0);
return;
}
int type = v.getType();
switch (type) {
case Value.BOOLEAN:
buff.put((byte) (v.getBoolean() ? BOOLEAN_TRUE : BOOLEAN_FALSE));
break;
case Value.BYTE:
buff.put((byte) type).put(v.getByte());
break;
case Value.SHORT:
buff.put((byte) type).putShort(v.getShort());
break;
case Value.ENUM:
case Value.INT: {
int x = v.getInt();
if (x < 0) {
buff.put((byte) INT_NEG).putVarInt(-x);
} else if (x < 16) {
buff.put((byte) (INT_0_15 + x));
} else {
buff.put((byte) type).putVarInt(x);
}
break;
}
case Value.LONG: {
long x = v.getLong();
if (x < 0) {
buff.put((byte) LONG_NEG).putVarLong(-x);
} else if (x < 8) {
buff.put((byte) (LONG_0_7 + x));
} else {
buff.put((byte) type).putVarLong(x);
}
break;
}
case Value.DECIMAL: {
BigDecimal x = v.getBigDecimal();
if (BigDecimal.ZERO.equals(x)) {
buff.put((byte) DECIMAL_0_1);
} else if (BigDecimal.ONE.equals(x)) {
buff.put((byte) (DECIMAL_0_1 + 1));
} else {
int scale = x.scale();
BigInteger b = x.unscaledValue();
int bits = b.bitLength();
if (bits <= 63) {
if (scale == 0) {
buff.put((byte) DECIMAL_SMALL_0).
putVarLong(b.longValue());
} else {
buff.put((byte) DECIMAL_SMALL).
putVarInt(scale).
putVarLong(b.longValue());
}
} else {
byte[] bytes = b.toByteArray();
buff.put((byte) type).
putVarInt(scale).
putVarInt(bytes.length).
put(bytes);
}
}
break;
}
case Value.TIME: {
ValueTime t = (ValueTime) v;
long nanos = t.getNanos();
long millis = nanos / 1000000;
nanos -= millis * 1000000;
buff.put((byte) type).
putVarLong(millis).
putVarLong(nanos);
break;
}
case Value.DATE: {
long x = ((ValueDate) v).getDateValue();
buff.put((byte) type).putVarLong(x);
break;
}
case Value.TIMESTAMP: {
ValueTimestamp ts = (ValueTimestamp) v;
long dateValue = ts.getDateValue();
long nanos = ts.getTimeNanos();
long millis = nanos / 1000000;
nanos -= millis * 1000000;
buff.put((byte) type).
putVarLong(dateValue).
putVarLong(millis).
putVarLong(nanos);
break;
}
case Value.TIMESTAMP_TZ: {
ValueTimestampTimeZone ts = (ValueTimestampTimeZone) v;
long dateValue = ts.getDateValue();
long nanos = ts.getTimeNanos();
long millis = nanos / 1000000;
nanos -= millis * 1000000;
buff.put((byte) type).
putVarLong(dateValue).
putVarLong(millis).
putVarLong(nanos).
putVarInt(ts.getTimeZoneOffsetMins());
break;
}
case Value.JAVA_OBJECT: {
byte[] b = v.getBytesNoCopy();
buff.put((byte) type).
putVarInt(b.length).
put(b);
break;
}
case Value.BYTES: {
byte[] b = v.getBytesNoCopy();
int len = b.length;
if (len < 32) {
buff.put((byte) (BYTES_0_31 + len)).
put(b);
} else {
buff.put((byte) type).
putVarInt(b.length).
put(b);
}
break;
}
case Value.UUID: {
ValueUuid uuid = (ValueUuid) v;
buff.put((byte) type).
putLong(uuid.getHigh()).
putLong(uuid.getLow());
break;
}
case Value.STRING: {
String s = v.getString();
int len = s.length();
if (len < 32) {
buff.put((byte) (STRING_0_31 + len)).
putStringData(s, len);
} else {
buff.put((byte) type);
writeString(buff, s);
}
break;
}
case Value.STRING_IGNORECASE:
case Value.STRING_FIXED:
buff.put((byte) type);
writeString(buff, v.getString());
break;
case Value.DOUBLE: {
double x = v.getDouble();
if (x == 1.0d) {
buff.put((byte) (DOUBLE_0_1 + 1));
} else {
long d = Double.doubleToLongBits(x);
if (d == ValueDouble.ZERO_BITS) {
buff.put((byte) DOUBLE_0_1);
} else {
buff.put((byte) type).
putVarLong(Long.reverse(d));
}
}
break;
}
case Value.FLOAT: {
float x = v.getFloat();
if (x == 1.0f) {
buff.put((byte) (FLOAT_0_1 + 1));
} else {
int f = Float.floatToIntBits(x);
if (f == ValueFloat.ZERO_BITS) {
buff.put((byte) FLOAT_0_1);
} else {
buff.put((byte) type).
putVarInt(Integer.reverse(f));
}
}
break;
}
case Value.BLOB:
case Value.CLOB: {
buff.put((byte) type);
ValueLobDb lob = (ValueLobDb) v;
byte[] small = lob.getSmall();
if (small == null) {
buff.putVarInt(-3).
putVarInt(lob.getTableId()).
putVarLong(lob.getLobId()).
putVarLong(lob.getPrecision());
} else {
buff.putVarInt(small.length).
put(small);
}
break;
}
case Value.ARRAY: {
Value[] list = ((ValueArray) v).getList();
buff.put((byte) type).putVarInt(list.length);
for (Value x : list) {
writeValue(buff, x);
}
break;
}
case Value.RESULT_SET: {
buff.put((byte) type);
try {
ResultSet rs = ((ValueResultSet) v).getResultSet();
rs.beforeFirst();
ResultSetMetaData meta = rs.getMetaData();
int columnCount = meta.getColumnCount();
buff.putVarInt(columnCount);
for (int i = 0; i < columnCount; i++) {
writeString(buff, meta.getColumnName(i + 1));
buff.putVarInt(meta.getColumnType(i + 1)).
putVarInt(meta.getPrecision(i + 1)).
putVarInt(meta.getScale(i + 1));
}
while (rs.next()) {
buff.put((byte) 1);
for (int i = 0; i < columnCount; i++) {
int t = org.h2.value.DataType.
getValueTypeFromResultSet(meta, i + 1);
Value val = org.h2.value.DataType.readValue(
null, rs, i + 1, t);
writeValue(buff, val);
}
}
buff.put((byte) 0);
rs.beforeFirst();
} catch (SQLException e) {
throw DbException.convert(e);
}
break;
}
case Value.GEOMETRY: {
byte[] b = v.getBytes();
int len = b.length;
buff.put((byte) type).
putVarInt(len).
put(b);
break;
}
default:
if (JdbcUtils.customDataTypesHandler != null) {
byte[] b = v.getBytesNoCopy();
buff.put((byte)CUSTOM_DATA_TYPE).
putVarInt(type).
putVarInt(b.length).
put(b);
break;
}
DbException.throwInternalError("type=" + v.getType());
}
}
private static void writeString(WriteBuffer buff, String s) {
int len = s.length();
buff.putVarInt(len).putStringData(s, len);
}
/**
* Read a value.
*
* @return the value
*/
private Object readValue(ByteBuffer buff) {
int type = buff.get() & 255;
switch (type) {
case Value.NULL:
return ValueNull.INSTANCE;
case BOOLEAN_TRUE:
return ValueBoolean.TRUE;
case BOOLEAN_FALSE:
return ValueBoolean.FALSE;
case INT_NEG:
return ValueInt.get(-readVarInt(buff));
case Value.ENUM:
case Value.INT:
return ValueInt.get(readVarInt(buff));
case LONG_NEG:
return ValueLong.get(-readVarLong(buff));
case Value.LONG:
return ValueLong.get(readVarLong(buff));
case Value.BYTE:
return ValueByte.get(buff.get());
case Value.SHORT:
return ValueShort.get(buff.getShort());
case DECIMAL_0_1:
return ValueDecimal.ZERO;
case DECIMAL_0_1 + 1:
return ValueDecimal.ONE;
case DECIMAL_SMALL_0:
return ValueDecimal.get(BigDecimal.valueOf(
readVarLong(buff)));
case DECIMAL_SMALL: {
int scale = readVarInt(buff);
return ValueDecimal.get(BigDecimal.valueOf(
readVarLong(buff), scale));
}
case Value.DECIMAL: {
int scale = readVarInt(buff);
int len = readVarInt(buff);
byte[] buff2 = Utils.newBytes(len);
buff.get(buff2, 0, len);
BigInteger b = new BigInteger(buff2);
return ValueDecimal.get(new BigDecimal(b, scale));
}
case Value.DATE: {
return ValueDate.fromDateValue(readVarLong(buff));
}
case Value.TIME: {
long nanos = readVarLong(buff) * 1000000 + readVarLong(buff);
return ValueTime.fromNanos(nanos);
}
case Value.TIMESTAMP: {
long dateValue = readVarLong(buff);
long nanos = readVarLong(buff) * 1000000 + readVarLong(buff);
return ValueTimestamp.fromDateValueAndNanos(dateValue, nanos);
}
case Value.TIMESTAMP_TZ: {
long dateValue = readVarLong(buff);
long nanos = readVarLong(buff) * 1000000 + readVarLong(buff);
short tz = (short) readVarInt(buff);
return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, nanos, tz);
}
case Value.BYTES: {
int len = readVarInt(buff);
byte[] b = Utils.newBytes(len);
buff.get(b, 0, len);
return ValueBytes.getNoCopy(b);
}
case Value.JAVA_OBJECT: {
int len = readVarInt(buff);
byte[] b = Utils.newBytes(len);
buff.get(b, 0, len);
return ValueJavaObject.getNoCopy(null, b, handler);
}
case Value.UUID:
return ValueUuid.get(buff.getLong(), buff.getLong());
case Value.STRING:
return ValueString.get(readString(buff));
case Value.STRING_IGNORECASE:
return ValueStringIgnoreCase.get(readString(buff));
case Value.STRING_FIXED:
return ValueStringFixed.get(readString(buff));
case FLOAT_0_1:
return ValueFloat.get(0);
case FLOAT_0_1 + 1:
return ValueFloat.get(1);
case DOUBLE_0_1:
return ValueDouble.get(0);
case DOUBLE_0_1 + 1:
return ValueDouble.get(1);
case Value.DOUBLE:
return ValueDouble.get(Double.longBitsToDouble(
Long.reverse(readVarLong(buff))));
case Value.FLOAT:
return ValueFloat.get(Float.intBitsToFloat(
Integer.reverse(readVarInt(buff))));
case Value.BLOB:
case Value.CLOB: {
int smallLen = readVarInt(buff);
if (smallLen >= 0) {
byte[] small = Utils.newBytes(smallLen);
buff.get(small, 0, smallLen);
return ValueLobDb.createSmallLob(type, small);
} else if (smallLen == -3) {
int tableId = readVarInt(buff);
long lobId = readVarLong(buff);
long precision = readVarLong(buff);
return ValueLobDb.create(type,
handler, tableId, lobId, null, precision);
} else {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"lob type: " + smallLen);
}
}
case Value.ARRAY: {
int len = readVarInt(buff);
Value[] list = new Value[len];
for (int i = 0; i < len; i++) {
list[i] = (Value) readValue(buff);
}
return ValueArray.get(list);
}
case Value.RESULT_SET: {
SimpleResultSet rs = new SimpleResultSet();
rs.setAutoClose(false);
int columns = readVarInt(buff);
for (int i = 0; i < columns; i++) {
rs.addColumn(readString(buff),
readVarInt(buff),
readVarInt(buff),
readVarInt(buff));
}
while (buff.get() != 0) {
Object[] o = new Object[columns];
for (int i = 0; i < columns; i++) {
o[i] = ((Value) readValue(buff)).getObject();
}
rs.addRow(o);
}
return ValueResultSet.get(rs);
}
case Value.GEOMETRY: {
int len = readVarInt(buff);
byte[] b = Utils.newBytes(len);
buff.get(b, 0, len);
return ValueGeometry.get(b);
}
case SPATIAL_KEY_2D:
return getSpatialDataType().read(buff);
case CUSTOM_DATA_TYPE: {
if (JdbcUtils.customDataTypesHandler != null) {
int customType = readVarInt(buff);
int len = readVarInt(buff);
byte[] b = Utils.newBytes(len);
buff.get(b, 0, len);
return JdbcUtils.customDataTypesHandler.convert(
ValueBytes.getNoCopy(b), customType);
}
throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1,
"No CustomDataTypesHandler has been set up");
}
default:
if (type >= INT_0_15 && type < INT_0_15 + 16) {
return ValueInt.get(type - INT_0_15);
} else if (type >= LONG_0_7 && type < LONG_0_7 + 8) {
return ValueLong.get(type - LONG_0_7);
} else if (type >= BYTES_0_31 && type < BYTES_0_31 + 32) {
int len = type - BYTES_0_31;
byte[] b = Utils.newBytes(len);
buff.get(b, 0, len);
return ValueBytes.getNoCopy(b);
} else if (type >= STRING_0_31 && type < STRING_0_31 + 32) {
return ValueString.get(readString(buff, type - STRING_0_31));
}
throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "type: " + type);
}
}
private static int readVarInt(ByteBuffer buff) {
return DataUtils.readVarInt(buff);
}
private static long readVarLong(ByteBuffer buff) {
return DataUtils.readVarLong(buff);
}
private static String readString(ByteBuffer buff, int len) {
return DataUtils.readString(buff, len);
}
private static String readString(ByteBuffer buff) {
int len = readVarInt(buff);
return DataUtils.readString(buff, len);
}
@Override
public int hashCode() {
return compareMode.hashCode() ^ Arrays.hashCode(sortTypes);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
} else if (!(obj instanceof ValueDataType)) {
return false;
}
ValueDataType v = (ValueDataType) obj;
if (!compareMode.equals(v.compareMode)) {
return false;
}
return Arrays.equals(sortTypes, v.sortTypes);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/rtree/MVRTreeMap.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.rtree;
import java.util.ArrayList;
import java.util.Iterator;
import org.h2.mvstore.CursorPos;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.MVMap;
import org.h2.mvstore.Page;
import org.h2.mvstore.type.DataType;
import org.h2.mvstore.type.ObjectDataType;
import org.h2.util.New;
/**
* An r-tree implementation. It supports both the linear and the quadratic split
* algorithm.
*
* @param <V> the value class
*/
public class MVRTreeMap<V> extends MVMap<SpatialKey, V> {
/**
* The spatial key type.
*/
final SpatialDataType keyType;
private boolean quadraticSplit;
public MVRTreeMap(int dimensions, DataType valueType) {
super(new SpatialDataType(dimensions), valueType);
this.keyType = (SpatialDataType) getKeyType();
}
/**
* Create a new map with the given dimensions and value type.
*
* @param <V> the value type
* @param dimensions the number of dimensions
* @param valueType the value type
* @return the map
*/
public static <V> MVRTreeMap<V> create(int dimensions, DataType valueType) {
return new MVRTreeMap<>(dimensions, valueType);
}
@Override
@SuppressWarnings("unchecked")
public V get(Object key) {
return (V) get(root, key);
}
/**
* Iterate over all keys that have an intersection with the given rectangle.
*
* @param x the rectangle
* @return the iterator
*/
public RTreeCursor findIntersectingKeys(SpatialKey x) {
return new RTreeCursor(root, x) {
@Override
protected boolean check(boolean leaf, SpatialKey key,
SpatialKey test) {
return keyType.isOverlap(key, test);
}
};
}
/**
* Iterate over all keys that are fully contained within the given
* rectangle.
*
* @param x the rectangle
* @return the iterator
*/
public RTreeCursor findContainedKeys(SpatialKey x) {
return new RTreeCursor(root, x) {
@Override
protected boolean check(boolean leaf, SpatialKey key,
SpatialKey test) {
if (leaf) {
return keyType.isInside(key, test);
}
return keyType.isOverlap(key, test);
}
};
}
private boolean contains(Page p, int index, Object key) {
return keyType.contains(p.getKey(index), key);
}
/**
* Get the object for the given key. An exact match is required.
*
* @param p the page
* @param key the key
* @return the value, or null if not found
*/
protected Object get(Page p, Object key) {
if (!p.isLeaf()) {
for (int i = 0; i < p.getKeyCount(); i++) {
if (contains(p, i, key)) {
Object o = get(p.getChildPage(i), key);
if (o != null) {
return o;
}
}
}
} else {
for (int i = 0; i < p.getKeyCount(); i++) {
if (keyType.equals(p.getKey(i), key)) {
return p.getValue(i);
}
}
}
return null;
}
@Override
protected synchronized Object remove(Page p, long writeVersion, Object key) {
Object result = null;
if (p.isLeaf()) {
for (int i = 0; i < p.getKeyCount(); i++) {
if (keyType.equals(p.getKey(i), key)) {
result = p.getValue(i);
p.remove(i);
break;
}
}
return result;
}
for (int i = 0; i < p.getKeyCount(); i++) {
if (contains(p, i, key)) {
Page cOld = p.getChildPage(i);
// this will mark the old page as deleted
// so we need to update the parent in any case
// (otherwise the old page might be deleted again)
Page c = cOld.copy(writeVersion);
long oldSize = c.getTotalCount();
result = remove(c, writeVersion, key);
p.setChild(i, c);
if (oldSize == c.getTotalCount()) {
continue;
}
if (c.getTotalCount() == 0) {
// this child was deleted
p.remove(i);
if (p.getKeyCount() == 0) {
c.removePage();
}
break;
}
Object oldBounds = p.getKey(i);
if (!keyType.isInside(key, oldBounds)) {
p.setKey(i, getBounds(c));
}
break;
}
}
return result;
}
private Object getBounds(Page x) {
Object bounds = keyType.createBoundingBox(x.getKey(0));
for (int i = 1; i < x.getKeyCount(); i++) {
keyType.increaseBounds(bounds, x.getKey(i));
}
return bounds;
}
@Override
@SuppressWarnings("unchecked")
public V put(SpatialKey key, V value) {
return (V) putOrAdd(key, value, false);
}
/**
* Add a given key-value pair. The key should not exist (if it exists, the
* result is undefined).
*
* @param key the key
* @param value the value
*/
public void add(SpatialKey key, V value) {
putOrAdd(key, value, true);
}
private synchronized Object putOrAdd(SpatialKey key, V value, boolean alwaysAdd) {
beforeWrite();
long v = writeVersion;
Page p = root.copy(v);
Object result;
if (alwaysAdd || get(key) == null) {
if (p.getMemory() > store.getPageSplitSize() &&
p.getKeyCount() > 3) {
// only possible if this is the root, else we would have
// split earlier (this requires pageSplitSize is fixed)
long totalCount = p.getTotalCount();
Page split = split(p, v);
Object k1 = getBounds(p);
Object k2 = getBounds(split);
Object[] keys = { k1, k2 };
Page.PageReference[] children = {
new Page.PageReference(p, p.getPos(), p.getTotalCount()),
new Page.PageReference(split, split.getPos(), split.getTotalCount()),
new Page.PageReference(null, 0, 0)
};
p = Page.create(this, v,
keys, null,
children,
totalCount, 0);
// now p is a node; continues
}
add(p, v, key, value);
result = null;
} else {
result = set(p, v, key, value);
}
newRoot(p);
return result;
}
/**
* Update the value for the given key. The key must exist.
*
* @param p the page
* @param writeVersion the write version
* @param key the key
* @param value the new value
* @return the old value (never null)
*/
private Object set(Page p, long writeVersion, Object key, Object value) {
if (p.isLeaf()) {
for (int i = 0; i < p.getKeyCount(); i++) {
if (keyType.equals(p.getKey(i), key)) {
p.setKey(i, key);
return p.setValue(i, value);
}
}
} else {
for (int i = 0; i < p.getKeyCount(); i++) {
if (contains(p, i, key)) {
Page c = p.getChildPage(i);
if (get(c, key) != null) {
c = c.copy(writeVersion);
Object result = set(c, writeVersion, key, value);
p.setChild(i, c);
return result;
}
}
}
}
throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL,
"Not found: {0}", key);
}
private void add(Page p, long writeVersion, Object key, Object value) {
if (p.isLeaf()) {
p.insertLeaf(p.getKeyCount(), key, value);
return;
}
// p is a node
int index = -1;
for (int i = 0; i < p.getKeyCount(); i++) {
if (contains(p, i, key)) {
index = i;
break;
}
}
if (index < 0) {
// a new entry, we don't know where to add yet
float min = Float.MAX_VALUE;
for (int i = 0; i < p.getKeyCount(); i++) {
Object k = p.getKey(i);
float areaIncrease = keyType.getAreaIncrease(k, key);
if (areaIncrease < min) {
index = i;
min = areaIncrease;
}
}
}
Page c = p.getChildPage(index).copy(writeVersion);
if (c.getMemory() > store.getPageSplitSize() && c.getKeyCount() > 4) {
// split on the way down
Page split = split(c, writeVersion);
p.setKey(index, getBounds(c));
p.setChild(index, c);
p.insertNode(index, getBounds(split), split);
// now we are not sure where to add
add(p, writeVersion, key, value);
return;
}
add(c, writeVersion, key, value);
Object bounds = p.getKey(index);
keyType.increaseBounds(bounds, key);
p.setKey(index, bounds);
p.setChild(index, c);
}
private Page split(Page p, long writeVersion) {
return quadraticSplit ?
splitQuadratic(p, writeVersion) :
splitLinear(p, writeVersion);
}
private Page splitLinear(Page p, long writeVersion) {
ArrayList<Object> keys = New.arrayList();
for (int i = 0; i < p.getKeyCount(); i++) {
keys.add(p.getKey(i));
}
int[] extremes = keyType.getExtremes(keys);
if (extremes == null) {
return splitQuadratic(p, writeVersion);
}
Page splitA = newPage(p.isLeaf(), writeVersion);
Page splitB = newPage(p.isLeaf(), writeVersion);
move(p, splitA, extremes[0]);
if (extremes[1] > extremes[0]) {
extremes[1]--;
}
move(p, splitB, extremes[1]);
Object boundsA = keyType.createBoundingBox(splitA.getKey(0));
Object boundsB = keyType.createBoundingBox(splitB.getKey(0));
while (p.getKeyCount() > 0) {
Object o = p.getKey(0);
float a = keyType.getAreaIncrease(boundsA, o);
float b = keyType.getAreaIncrease(boundsB, o);
if (a < b) {
keyType.increaseBounds(boundsA, o);
move(p, splitA, 0);
} else {
keyType.increaseBounds(boundsB, o);
move(p, splitB, 0);
}
}
while (splitB.getKeyCount() > 0) {
move(splitB, p, 0);
}
return splitA;
}
private Page splitQuadratic(Page p, long writeVersion) {
Page splitA = newPage(p.isLeaf(), writeVersion);
Page splitB = newPage(p.isLeaf(), writeVersion);
float largest = Float.MIN_VALUE;
int ia = 0, ib = 0;
for (int a = 0; a < p.getKeyCount(); a++) {
Object objA = p.getKey(a);
for (int b = 0; b < p.getKeyCount(); b++) {
if (a == b) {
continue;
}
Object objB = p.getKey(b);
float area = keyType.getCombinedArea(objA, objB);
if (area > largest) {
largest = area;
ia = a;
ib = b;
}
}
}
move(p, splitA, ia);
if (ia < ib) {
ib--;
}
move(p, splitB, ib);
Object boundsA = keyType.createBoundingBox(splitA.getKey(0));
Object boundsB = keyType.createBoundingBox(splitB.getKey(0));
while (p.getKeyCount() > 0) {
float diff = 0, bestA = 0, bestB = 0;
int best = 0;
for (int i = 0; i < p.getKeyCount(); i++) {
Object o = p.getKey(i);
float incA = keyType.getAreaIncrease(boundsA, o);
float incB = keyType.getAreaIncrease(boundsB, o);
float d = Math.abs(incA - incB);
if (d > diff) {
diff = d;
bestA = incA;
bestB = incB;
best = i;
}
}
if (bestA < bestB) {
keyType.increaseBounds(boundsA, p.getKey(best));
move(p, splitA, best);
} else {
keyType.increaseBounds(boundsB, p.getKey(best));
move(p, splitB, best);
}
}
while (splitB.getKeyCount() > 0) {
move(splitB, p, 0);
}
return splitA;
}
private Page newPage(boolean leaf, long writeVersion) {
Object[] values;
Page.PageReference[] refs;
if (leaf) {
values = Page.EMPTY_OBJECT_ARRAY;
refs = null;
} else {
values = null;
refs = new Page.PageReference[] {
new Page.PageReference(null, 0, 0)};
}
return Page.create(this, writeVersion,
Page.EMPTY_OBJECT_ARRAY, values,
refs, 0, 0);
}
private static void move(Page source, Page target, int sourceIndex) {
Object k = source.getKey(sourceIndex);
if (source.isLeaf()) {
Object v = source.getValue(sourceIndex);
target.insertLeaf(0, k, v);
} else {
Page c = source.getChildPage(sourceIndex);
target.insertNode(0, k, c);
}
source.remove(sourceIndex);
}
/**
* Add all node keys (including internal bounds) to the given list.
* This is mainly used to visualize the internal splits.
*
* @param list the list
* @param p the root page
*/
public void addNodeKeys(ArrayList<SpatialKey> list, Page p) {
if (p != null && !p.isLeaf()) {
for (int i = 0; i < p.getKeyCount(); i++) {
list.add((SpatialKey) p.getKey(i));
addNodeKeys(list, p.getChildPage(i));
}
}
}
public boolean isQuadraticSplit() {
return quadraticSplit;
}
public void setQuadraticSplit(boolean quadraticSplit) {
this.quadraticSplit = quadraticSplit;
}
@Override
protected int getChildPageCount(Page p) {
return p.getRawChildPageCount() - 1;
}
/**
* A cursor to iterate over a subset of the keys.
*/
public static class RTreeCursor implements Iterator<SpatialKey> {
private final SpatialKey filter;
private CursorPos pos;
private SpatialKey current;
private final Page root;
private boolean initialized;
protected RTreeCursor(Page root, SpatialKey filter) {
this.root = root;
this.filter = filter;
}
@Override
public boolean hasNext() {
if (!initialized) {
// init
pos = new CursorPos(root, 0, null);
fetchNext();
initialized = true;
}
return current != null;
}
/**
* Skip over that many entries. This method is relatively fast (for this
* map implementation) even if many entries need to be skipped.
*
* @param n the number of entries to skip
*/
public void skip(long n) {
while (hasNext() && n-- > 0) {
fetchNext();
}
}
@Override
public SpatialKey next() {
if (!hasNext()) {
return null;
}
SpatialKey c = current;
fetchNext();
return c;
}
@Override
public void remove() {
throw DataUtils.newUnsupportedOperationException(
"Removing is not supported");
}
/**
* Fetch the next entry if there is one.
*/
protected void fetchNext() {
while (pos != null) {
Page p = pos.page;
if (p.isLeaf()) {
while (pos.index < p.getKeyCount()) {
SpatialKey c = (SpatialKey) p.getKey(pos.index++);
if (filter == null || check(true, c, filter)) {
current = c;
return;
}
}
} else {
boolean found = false;
while (pos.index < p.getKeyCount()) {
int index = pos.index++;
SpatialKey c = (SpatialKey) p.getKey(index);
if (filter == null || check(false, c, filter)) {
Page child = pos.page.getChildPage(index);
pos = new CursorPos(child, 0, pos);
found = true;
break;
}
}
if (found) {
continue;
}
}
// parent
pos = pos.parent;
}
current = null;
}
/**
* Check a given key.
*
* @param leaf if the key is from a leaf page
* @param key the stored key
* @param test the user-supplied test key
* @return true if there is a match
*/
@SuppressWarnings("unused")
protected boolean check(boolean leaf, SpatialKey key, SpatialKey test) {
return true;
}
}
@Override
public String getType() {
return "rtree";
}
/**
* A builder for this class.
*
* @param <V> the value type
*/
public static class Builder<V> implements
MVMap.MapBuilder<MVRTreeMap<V>, SpatialKey, V> {
private int dimensions = 2;
private DataType valueType;
/**
* Create a new builder for maps with 2 dimensions.
*/
public Builder() {
// default
}
/**
* Set the dimensions.
*
* @param dimensions the dimensions to use
* @return this
*/
public Builder<V> dimensions(int dimensions) {
this.dimensions = dimensions;
return this;
}
/**
* Set the key data type.
*
* @param valueType the key type
* @return this
*/
public Builder<V> valueType(DataType valueType) {
this.valueType = valueType;
return this;
}
@Override
public MVRTreeMap<V> create() {
if (valueType == null) {
valueType = new ObjectDataType();
}
return new MVRTreeMap<>(dimensions, valueType);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/rtree/SpatialDataType.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.rtree;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.WriteBuffer;
import org.h2.mvstore.type.DataType;
import org.h2.util.New;
/**
* A spatial data type. This class supports up to 31 dimensions. Each dimension
* can have a minimum and a maximum value of type float. For each dimension, the
* maximum value is only stored when it is not the same as the minimum.
*/
public class SpatialDataType implements DataType {
private final int dimensions;
public SpatialDataType(int dimensions) {
// Because of how we are storing the
// min-max-flag in the read/write method
// the number of dimensions must be < 32.
DataUtils.checkArgument(
dimensions >= 1 && dimensions < 32,
"Dimensions must be between 1 and 31, is {0}", dimensions);
this.dimensions = dimensions;
}
@Override
public int compare(Object a, Object b) {
if (a == b) {
return 0;
} else if (a == null) {
return -1;
} else if (b == null) {
return 1;
}
long la = ((SpatialKey) a).getId();
long lb = ((SpatialKey) b).getId();
return Long.compare(la, lb);
}
/**
* Check whether two spatial values are equal.
*
* @param a the first value
* @param b the second value
* @return true if they are equal
*/
public boolean equals(Object a, Object b) {
if (a == b) {
return true;
} else if (a == null || b == null) {
return false;
}
long la = ((SpatialKey) a).getId();
long lb = ((SpatialKey) b).getId();
return la == lb;
}
@Override
public int getMemory(Object obj) {
return 40 + dimensions * 4;
}
@Override
public void read(ByteBuffer buff, Object[] obj, int len, boolean key) {
for (int i = 0; i < len; i++) {
obj[i] = read(buff);
}
}
@Override
public void write(WriteBuffer buff, Object[] obj, int len, boolean key) {
for (int i = 0; i < len; i++) {
write(buff, obj[i]);
}
}
@Override
public void write(WriteBuffer buff, Object obj) {
SpatialKey k = (SpatialKey) obj;
if (k.isNull()) {
buff.putVarInt(-1);
buff.putVarLong(k.getId());
return;
}
int flags = 0;
for (int i = 0; i < dimensions; i++) {
if (k.min(i) == k.max(i)) {
flags |= 1 << i;
}
}
buff.putVarInt(flags);
for (int i = 0; i < dimensions; i++) {
buff.putFloat(k.min(i));
if ((flags & (1 << i)) == 0) {
buff.putFloat(k.max(i));
}
}
buff.putVarLong(k.getId());
}
@Override
public Object read(ByteBuffer buff) {
int flags = DataUtils.readVarInt(buff);
if (flags == -1) {
long id = DataUtils.readVarLong(buff);
return new SpatialKey(id);
}
float[] minMax = new float[dimensions * 2];
for (int i = 0; i < dimensions; i++) {
float min = buff.getFloat();
float max;
if ((flags & (1 << i)) != 0) {
max = min;
} else {
max = buff.getFloat();
}
minMax[i + i] = min;
minMax[i + i + 1] = max;
}
long id = DataUtils.readVarLong(buff);
return new SpatialKey(id, minMax);
}
/**
* Check whether the two objects overlap.
*
* @param objA the first object
* @param objB the second object
* @return true if they overlap
*/
public boolean isOverlap(Object objA, Object objB) {
SpatialKey a = (SpatialKey) objA;
SpatialKey b = (SpatialKey) objB;
if (a.isNull() || b.isNull()) {
return false;
}
for (int i = 0; i < dimensions; i++) {
if (a.max(i) < b.min(i) || a.min(i) > b.max(i)) {
return false;
}
}
return true;
}
/**
* Increase the bounds in the given spatial object.
*
* @param bounds the bounds (may be modified)
* @param add the value
*/
public void increaseBounds(Object bounds, Object add) {
SpatialKey a = (SpatialKey) add;
SpatialKey b = (SpatialKey) bounds;
if (a.isNull() || b.isNull()) {
return;
}
for (int i = 0; i < dimensions; i++) {
b.setMin(i, Math.min(b.min(i), a.min(i)));
b.setMax(i, Math.max(b.max(i), a.max(i)));
}
}
/**
* Get the area increase by extending a to contain b.
*
* @param objA the bounding box
* @param objB the object
* @return the area
*/
public float getAreaIncrease(Object objA, Object objB) {
SpatialKey b = (SpatialKey) objB;
SpatialKey a = (SpatialKey) objA;
if (a.isNull() || b.isNull()) {
return 0;
}
float min = a.min(0);
float max = a.max(0);
float areaOld = max - min;
min = Math.min(min, b.min(0));
max = Math.max(max, b.max(0));
float areaNew = max - min;
for (int i = 1; i < dimensions; i++) {
min = a.min(i);
max = a.max(i);
areaOld *= max - min;
min = Math.min(min, b.min(i));
max = Math.max(max, b.max(i));
areaNew *= max - min;
}
return areaNew - areaOld;
}
/**
* Get the combined area of both objects.
*
* @param objA the first object
* @param objB the second object
* @return the area
*/
float getCombinedArea(Object objA, Object objB) {
SpatialKey a = (SpatialKey) objA;
SpatialKey b = (SpatialKey) objB;
if (a.isNull()) {
return getArea(b);
} else if (b.isNull()) {
return getArea(a);
}
float area = 1;
for (int i = 0; i < dimensions; i++) {
float min = Math.min(a.min(i), b.min(i));
float max = Math.max(a.max(i), b.max(i));
area *= max - min;
}
return area;
}
private float getArea(SpatialKey a) {
if (a.isNull()) {
return 0;
}
float area = 1;
for (int i = 0; i < dimensions; i++) {
area *= a.max(i) - a.min(i);
}
return area;
}
/**
* Check whether a contains b.
*
* @param objA the bounding box
* @param objB the object
* @return the area
*/
public boolean contains(Object objA, Object objB) {
SpatialKey a = (SpatialKey) objA;
SpatialKey b = (SpatialKey) objB;
if (a.isNull() || b.isNull()) {
return false;
}
for (int i = 0; i < dimensions; i++) {
if (a.min(i) > b.min(i) || a.max(i) < b.max(i)) {
return false;
}
}
return true;
}
/**
* Check whether a is completely inside b and does not touch the
* given bound.
*
* @param objA the object to check
* @param objB the bounds
* @return true if a is completely inside b
*/
public boolean isInside(Object objA, Object objB) {
SpatialKey a = (SpatialKey) objA;
SpatialKey b = (SpatialKey) objB;
if (a.isNull() || b.isNull()) {
return false;
}
for (int i = 0; i < dimensions; i++) {
if (a.min(i) <= b.min(i) || a.max(i) >= b.max(i)) {
return false;
}
}
return true;
}
/**
* Create a bounding box starting with the given object.
*
* @param objA the object
* @return the bounding box
*/
Object createBoundingBox(Object objA) {
SpatialKey a = (SpatialKey) objA;
if (a.isNull()) {
return a;
}
float[] minMax = new float[dimensions * 2];
for (int i = 0; i < dimensions; i++) {
minMax[i + i] = a.min(i);
minMax[i + i + 1] = a.max(i);
}
return new SpatialKey(0, minMax);
}
/**
* Get the most extreme pair (elements that are as far apart as possible).
* This method is used to split a page (linear split). If no extreme objects
* could be found, this method returns null.
*
* @param list the objects
* @return the indexes of the extremes
*/
public int[] getExtremes(ArrayList<Object> list) {
list = getNotNull(list);
if (list.isEmpty()) {
return null;
}
SpatialKey bounds = (SpatialKey) createBoundingBox(list.get(0));
SpatialKey boundsInner = (SpatialKey) createBoundingBox(bounds);
for (int i = 0; i < dimensions; i++) {
float t = boundsInner.min(i);
boundsInner.setMin(i, boundsInner.max(i));
boundsInner.setMax(i, t);
}
for (Object o : list) {
increaseBounds(bounds, o);
increaseMaxInnerBounds(boundsInner, o);
}
double best = 0;
int bestDim = 0;
for (int i = 0; i < dimensions; i++) {
float inner = boundsInner.max(i) - boundsInner.min(i);
if (inner < 0) {
continue;
}
float outer = bounds.max(i) - bounds.min(i);
float d = inner / outer;
if (d > best) {
best = d;
bestDim = i;
}
}
if (best <= 0) {
return null;
}
float min = boundsInner.min(bestDim);
float max = boundsInner.max(bestDim);
int firstIndex = -1, lastIndex = -1;
for (int i = 0; i < list.size() &&
(firstIndex < 0 || lastIndex < 0); i++) {
SpatialKey o = (SpatialKey) list.get(i);
if (firstIndex < 0 && o.max(bestDim) == min) {
firstIndex = i;
} else if (lastIndex < 0 && o.min(bestDim) == max) {
lastIndex = i;
}
}
return new int[] { firstIndex, lastIndex };
}
private static ArrayList<Object> getNotNull(ArrayList<Object> list) {
ArrayList<Object> result = null;
for (Object o : list) {
SpatialKey a = (SpatialKey) o;
if (a.isNull()) {
result = New.arrayList();
break;
}
}
if (result == null) {
return list;
}
for (Object o : list) {
SpatialKey a = (SpatialKey) o;
if (!a.isNull()) {
result.add(a);
}
}
return result;
}
private void increaseMaxInnerBounds(Object bounds, Object add) {
SpatialKey b = (SpatialKey) bounds;
SpatialKey a = (SpatialKey) add;
for (int i = 0; i < dimensions; i++) {
b.setMin(i, Math.min(b.min(i), a.max(i)));
b.setMax(i, Math.max(b.max(i), a.min(i)));
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/rtree/SpatialKey.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.rtree;
import java.util.Arrays;
/**
* A unique spatial key.
*/
public class SpatialKey {
private final long id;
private final float[] minMax;
/**
* Create a new key.
*
* @param id the id
* @param minMax min x, max x, min y, max y, and so on
*/
public SpatialKey(long id, float... minMax) {
this.id = id;
this.minMax = minMax;
}
/**
* Get the minimum value for the given dimension.
*
* @param dim the dimension
* @return the value
*/
public float min(int dim) {
return minMax[dim + dim];
}
/**
* Set the minimum value for the given dimension.
*
* @param dim the dimension
* @param x the value
*/
public void setMin(int dim, float x) {
minMax[dim + dim] = x;
}
/**
* Get the maximum value for the given dimension.
*
* @param dim the dimension
* @return the value
*/
public float max(int dim) {
return minMax[dim + dim + 1];
}
/**
* Set the maximum value for the given dimension.
*
* @param dim the dimension
* @param x the value
*/
public void setMax(int dim, float x) {
minMax[dim + dim + 1] = x;
}
public long getId() {
return id;
}
public boolean isNull() {
return minMax.length == 0;
}
@Override
public String toString() {
StringBuilder buff = new StringBuilder();
buff.append(id).append(": (");
for (int i = 0; i < minMax.length; i += 2) {
if (i > 0) {
buff.append(", ");
}
buff.append(minMax[i]).append('/').append(minMax[i + 1]);
}
return buff.append(")").toString();
}
@Override
public int hashCode() {
return (int) ((id >>> 32) ^ id);
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (!(other instanceof SpatialKey)) {
return false;
}
SpatialKey o = (SpatialKey) other;
if (id != o.id) {
return false;
}
return equalsIgnoringId(o);
}
/**
* Check whether two objects are equals, but do not compare the id fields.
*
* @param o the other key
* @return true if the contents are the same
*/
public boolean equalsIgnoringId(SpatialKey o) {
return Arrays.equals(minMax, o.minMax);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/type/DataType.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.type;
import java.nio.ByteBuffer;
import org.h2.mvstore.WriteBuffer;
/**
* A data type.
*/
public interface DataType {
/**
* Compare two keys.
*
* @param a the first key
* @param b the second key
* @return -1 if the first key is smaller, 1 if larger, and 0 if equal
* @throws UnsupportedOperationException if the type is not orderable
*/
int compare(Object a, Object b);
/**
* Estimate the used memory in bytes.
*
* @param obj the object
* @return the used memory
*/
int getMemory(Object obj);
/**
* Write an object.
*
* @param buff the target buffer
* @param obj the value
*/
void write(WriteBuffer buff, Object obj);
/**
* Write a list of objects.
*
* @param buff the target buffer
* @param obj the objects
* @param len the number of objects to write
* @param key whether the objects are keys
*/
void write(WriteBuffer buff, Object[] obj, int len, boolean key);
/**
* Read an object.
*
* @param buff the source buffer
* @return the object
*/
Object read(ByteBuffer buff);
/**
* Read a list of objects.
*
* @param buff the target buffer
* @param obj the objects
* @param len the number of objects to read
* @param key whether the objects are keys
*/
void read(ByteBuffer buff, Object[] obj, int len, boolean key);
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/type/ObjectDataType.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.type;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.lang.reflect.Array;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.UUID;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.WriteBuffer;
import org.h2.util.Utils;
/**
* A data type implementation for the most common data types, including
* serializable objects.
*/
public class ObjectDataType implements DataType {
/**
* The type constants are also used as tag values.
*/
static final int TYPE_NULL = 0;
static final int TYPE_BOOLEAN = 1;
static final int TYPE_BYTE = 2;
static final int TYPE_SHORT = 3;
static final int TYPE_INT = 4;
static final int TYPE_LONG = 5;
static final int TYPE_BIG_INTEGER = 6;
static final int TYPE_FLOAT = 7;
static final int TYPE_DOUBLE = 8;
static final int TYPE_BIG_DECIMAL = 9;
static final int TYPE_CHAR = 10;
static final int TYPE_STRING = 11;
static final int TYPE_UUID = 12;
static final int TYPE_DATE = 13;
static final int TYPE_ARRAY = 14;
static final int TYPE_SERIALIZED_OBJECT = 19;
/**
* For very common values (e.g. 0 and 1) we save space by encoding the value
* in the tag. e.g. TAG_BOOLEAN_TRUE and TAG_FLOAT_0.
*/
static final int TAG_BOOLEAN_TRUE = 32;
static final int TAG_INTEGER_NEGATIVE = 33;
static final int TAG_INTEGER_FIXED = 34;
static final int TAG_LONG_NEGATIVE = 35;
static final int TAG_LONG_FIXED = 36;
static final int TAG_BIG_INTEGER_0 = 37;
static final int TAG_BIG_INTEGER_1 = 38;
static final int TAG_BIG_INTEGER_SMALL = 39;
static final int TAG_FLOAT_0 = 40;
static final int TAG_FLOAT_1 = 41;
static final int TAG_FLOAT_FIXED = 42;
static final int TAG_DOUBLE_0 = 43;
static final int TAG_DOUBLE_1 = 44;
static final int TAG_DOUBLE_FIXED = 45;
static final int TAG_BIG_DECIMAL_0 = 46;
static final int TAG_BIG_DECIMAL_1 = 47;
static final int TAG_BIG_DECIMAL_SMALL = 48;
static final int TAG_BIG_DECIMAL_SMALL_SCALED = 49;
/**
* For small-values/small-arrays, we encode the value/array-length in the
* tag.
*/
static final int TAG_INTEGER_0_15 = 64;
static final int TAG_LONG_0_7 = 80;
static final int TAG_STRING_0_15 = 88;
static final int TAG_BYTE_ARRAY_0_15 = 104;
/**
* Constants for floating point synchronization.
*/
static final int FLOAT_ZERO_BITS = Float.floatToIntBits(0.0f);
static final int FLOAT_ONE_BITS = Float.floatToIntBits(1.0f);
static final long DOUBLE_ZERO_BITS = Double.doubleToLongBits(0.0d);
static final long DOUBLE_ONE_BITS = Double.doubleToLongBits(1.0d);
static final Class<?>[] COMMON_CLASSES = { boolean.class, byte.class,
short.class, char.class, int.class, long.class, float.class,
double.class, Object.class, Boolean.class, Byte.class, Short.class,
Character.class, Integer.class, Long.class, BigInteger.class,
Float.class, Double.class, BigDecimal.class, String.class,
UUID.class, Date.class };
private static final HashMap<Class<?>, Integer> COMMON_CLASSES_MAP = new HashMap<>(COMMON_CLASSES.length);
private AutoDetectDataType last = new StringType(this);
@Override
public int compare(Object a, Object b) {
return last.compare(a, b);
}
@Override
public int getMemory(Object obj) {
return last.getMemory(obj);
}
@Override
public void read(ByteBuffer buff, Object[] obj, int len, boolean key) {
for (int i = 0; i < len; i++) {
obj[i] = read(buff);
}
}
@Override
public void write(WriteBuffer buff, Object[] obj, int len, boolean key) {
for (int i = 0; i < len; i++) {
write(buff, obj[i]);
}
}
@Override
public void write(WriteBuffer buff, Object obj) {
last.write(buff, obj);
}
private AutoDetectDataType newType(int typeId) {
switch (typeId) {
case TYPE_NULL:
return new NullType(this);
case TYPE_BOOLEAN:
return new BooleanType(this);
case TYPE_BYTE:
return new ByteType(this);
case TYPE_SHORT:
return new ShortType(this);
case TYPE_CHAR:
return new CharacterType(this);
case TYPE_INT:
return new IntegerType(this);
case TYPE_LONG:
return new LongType(this);
case TYPE_FLOAT:
return new FloatType(this);
case TYPE_DOUBLE:
return new DoubleType(this);
case TYPE_BIG_INTEGER:
return new BigIntegerType(this);
case TYPE_BIG_DECIMAL:
return new BigDecimalType(this);
case TYPE_STRING:
return new StringType(this);
case TYPE_UUID:
return new UUIDType(this);
case TYPE_DATE:
return new DateType(this);
case TYPE_ARRAY:
return new ObjectArrayType(this);
case TYPE_SERIALIZED_OBJECT:
return new SerializedObjectType(this);
}
throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL,
"Unsupported type {0}", typeId);
}
@Override
public Object read(ByteBuffer buff) {
int tag = buff.get();
int typeId;
if (tag <= TYPE_SERIALIZED_OBJECT) {
typeId = tag;
} else {
switch (tag) {
case TAG_BOOLEAN_TRUE:
typeId = TYPE_BOOLEAN;
break;
case TAG_INTEGER_NEGATIVE:
case TAG_INTEGER_FIXED:
typeId = TYPE_INT;
break;
case TAG_LONG_NEGATIVE:
case TAG_LONG_FIXED:
typeId = TYPE_LONG;
break;
case TAG_BIG_INTEGER_0:
case TAG_BIG_INTEGER_1:
case TAG_BIG_INTEGER_SMALL:
typeId = TYPE_BIG_INTEGER;
break;
case TAG_FLOAT_0:
case TAG_FLOAT_1:
case TAG_FLOAT_FIXED:
typeId = TYPE_FLOAT;
break;
case TAG_DOUBLE_0:
case TAG_DOUBLE_1:
case TAG_DOUBLE_FIXED:
typeId = TYPE_DOUBLE;
break;
case TAG_BIG_DECIMAL_0:
case TAG_BIG_DECIMAL_1:
case TAG_BIG_DECIMAL_SMALL:
case TAG_BIG_DECIMAL_SMALL_SCALED:
typeId = TYPE_BIG_DECIMAL;
break;
default:
if (tag >= TAG_INTEGER_0_15 && tag <= TAG_INTEGER_0_15 + 15) {
typeId = TYPE_INT;
} else if (tag >= TAG_STRING_0_15
&& tag <= TAG_STRING_0_15 + 15) {
typeId = TYPE_STRING;
} else if (tag >= TAG_LONG_0_7 && tag <= TAG_LONG_0_7 + 7) {
typeId = TYPE_LONG;
} else if (tag >= TAG_BYTE_ARRAY_0_15
&& tag <= TAG_BYTE_ARRAY_0_15 + 15) {
typeId = TYPE_ARRAY;
} else {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_FILE_CORRUPT, "Unknown tag {0}",
tag);
}
}
}
AutoDetectDataType t = last;
if (typeId != t.typeId) {
last = t = newType(typeId);
}
return t.read(buff, tag);
}
private static int getTypeId(Object obj) {
if (obj instanceof Integer) {
return TYPE_INT;
} else if (obj instanceof String) {
return TYPE_STRING;
} else if (obj instanceof Long) {
return TYPE_LONG;
} else if (obj instanceof Double) {
return TYPE_DOUBLE;
} else if (obj instanceof Float) {
return TYPE_FLOAT;
} else if (obj instanceof Boolean) {
return TYPE_BOOLEAN;
} else if (obj instanceof UUID) {
return TYPE_UUID;
} else if (obj instanceof Byte) {
return TYPE_BYTE;
} else if (obj instanceof Short) {
return TYPE_SHORT;
} else if (obj instanceof Character) {
return TYPE_CHAR;
} else if (obj == null) {
return TYPE_NULL;
} else if (isDate(obj)) {
return TYPE_DATE;
} else if (isBigInteger(obj)) {
return TYPE_BIG_INTEGER;
} else if (isBigDecimal(obj)) {
return TYPE_BIG_DECIMAL;
} else if (obj.getClass().isArray()) {
return TYPE_ARRAY;
}
return TYPE_SERIALIZED_OBJECT;
}
/**
* Switch the last remembered type to match the type of the given object.
*
* @param obj the object
* @return the auto-detected type used
*/
AutoDetectDataType switchType(Object obj) {
int typeId = getTypeId(obj);
AutoDetectDataType l = last;
if (typeId != l.typeId) {
last = l = newType(typeId);
}
return l;
}
/**
* Check whether this object is a BigInteger.
*
* @param obj the object
* @return true if yes
*/
static boolean isBigInteger(Object obj) {
return obj instanceof BigInteger && obj.getClass() == BigInteger.class;
}
/**
* Check whether this object is a BigDecimal.
*
* @param obj the object
* @return true if yes
*/
static boolean isBigDecimal(Object obj) {
return obj instanceof BigDecimal && obj.getClass() == BigDecimal.class;
}
/**
* Check whether this object is a date.
*
* @param obj the object
* @return true if yes
*/
static boolean isDate(Object obj) {
return obj instanceof Date && obj.getClass() == Date.class;
}
/**
* Check whether this object is an array.
*
* @param obj the object
* @return true if yes
*/
static boolean isArray(Object obj) {
return obj != null && obj.getClass().isArray();
}
/**
* Get the class id, or null if not found.
*
* @param clazz the class
* @return the class id or null
*/
static Integer getCommonClassId(Class<?> clazz) {
HashMap<Class<?>, Integer> map = COMMON_CLASSES_MAP;
if (map.size() == 0) {
// lazy initialization
// synchronized, because the COMMON_CLASSES_MAP is not
synchronized (map) {
if (map.size() == 0) {
for (int i = 0, size = COMMON_CLASSES.length; i < size; i++) {
map.put(COMMON_CLASSES[i], i);
}
}
}
}
return map.get(clazz);
}
/**
* Serialize the object to a byte array.
*
* @param obj the object to serialize
* @return the byte array
*/
public static byte[] serialize(Object obj) {
try {
ByteArrayOutputStream out = new ByteArrayOutputStream();
ObjectOutputStream os = new ObjectOutputStream(out);
os.writeObject(obj);
return out.toByteArray();
} catch (Throwable e) {
throw DataUtils.newIllegalArgumentException(
"Could not serialize {0}", obj, e);
}
}
/**
* De-serialize the byte array to an object.
*
* @param data the byte array
* @return the object
*/
public static Object deserialize(byte[] data) {
try {
ByteArrayInputStream in = new ByteArrayInputStream(data);
ObjectInputStream is = new ObjectInputStream(in);
return is.readObject();
} catch (Throwable e) {
throw DataUtils.newIllegalArgumentException(
"Could not deserialize {0}", Arrays.toString(data), e);
}
}
/**
* Compare the contents of two byte arrays. If the content or length of the
* first array is smaller than the second array, -1 is returned. If the
* content or length of the second array is smaller than the first array, 1
* is returned. If the contents and lengths are the same, 0 is returned.
* <p>
* This method interprets bytes as unsigned.
*
* @param data1 the first byte array (must not be null)
* @param data2 the second byte array (must not be null)
* @return the result of the comparison (-1, 1 or 0)
*/
public static int compareNotNull(byte[] data1, byte[] data2) {
if (data1 == data2) {
return 0;
}
int len = Math.min(data1.length, data2.length);
for (int i = 0; i < len; i++) {
int b = data1[i] & 255;
int b2 = data2[i] & 255;
if (b != b2) {
return b > b2 ? 1 : -1;
}
}
return Integer.signum(data1.length - data2.length);
}
/**
* The base class for auto-detect data types.
*/
abstract static class AutoDetectDataType implements DataType {
protected final ObjectDataType base;
protected final int typeId;
AutoDetectDataType(ObjectDataType base, int typeId) {
this.base = base;
this.typeId = typeId;
}
@Override
public int getMemory(Object o) {
return getType(o).getMemory(o);
}
@Override
public int compare(Object aObj, Object bObj) {
AutoDetectDataType aType = getType(aObj);
AutoDetectDataType bType = getType(bObj);
int typeDiff = aType.typeId - bType.typeId;
if (typeDiff == 0) {
return aType.compare(aObj, bObj);
}
return Integer.signum(typeDiff);
}
@Override
public void write(WriteBuffer buff, Object[] obj,
int len, boolean key) {
for (int i = 0; i < len; i++) {
write(buff, obj[i]);
}
}
@Override
public void write(WriteBuffer buff, Object o) {
getType(o).write(buff, o);
}
@Override
public void read(ByteBuffer buff, Object[] obj,
int len, boolean key) {
for (int i = 0; i < len; i++) {
obj[i] = read(buff);
}
}
@Override
public final Object read(ByteBuffer buff) {
throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL,
"Internal error");
}
/**
* Get the type for the given object.
*
* @param o the object
* @return the type
*/
AutoDetectDataType getType(Object o) {
return base.switchType(o);
}
/**
* Read an object from the buffer.
*
* @param buff the buffer
* @param tag the first byte of the object (usually the type)
* @return the read object
*/
abstract Object read(ByteBuffer buff, int tag);
}
/**
* The type for the null value
*/
static class NullType extends AutoDetectDataType {
NullType(ObjectDataType base) {
super(base, TYPE_NULL);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj == null && bObj == null) {
return 0;
} else if (aObj == null) {
return -1;
} else if (bObj == null) {
return 1;
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return obj == null ? 0 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (obj != null) {
super.write(buff, obj);
return;
}
buff.put((byte) TYPE_NULL);
}
@Override
public Object read(ByteBuffer buff, int tag) {
return null;
}
}
/**
* The type for boolean true and false.
*/
static class BooleanType extends AutoDetectDataType {
BooleanType(ObjectDataType base) {
super(base, TYPE_BOOLEAN);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof Boolean && bObj instanceof Boolean) {
Boolean a = (Boolean) aObj;
Boolean b = (Boolean) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return obj instanceof Boolean ? 0 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof Boolean)) {
super.write(buff, obj);
return;
}
int tag = ((Boolean) obj) ? TAG_BOOLEAN_TRUE : TYPE_BOOLEAN;
buff.put((byte) tag);
}
@Override
public Object read(ByteBuffer buff, int tag) {
return tag == TYPE_BOOLEAN ? Boolean.FALSE : Boolean.TRUE;
}
}
/**
* The type for byte objects.
*/
static class ByteType extends AutoDetectDataType {
ByteType(ObjectDataType base) {
super(base, TYPE_BYTE);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof Byte && bObj instanceof Byte) {
Byte a = (Byte) aObj;
Byte b = (Byte) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return obj instanceof Byte ? 0 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof Byte)) {
super.write(buff, obj);
return;
}
buff.put((byte) TYPE_BYTE);
buff.put(((Byte) obj).byteValue());
}
@Override
public Object read(ByteBuffer buff, int tag) {
return buff.get();
}
}
/**
* The type for character objects.
*/
static class CharacterType extends AutoDetectDataType {
CharacterType(ObjectDataType base) {
super(base, TYPE_CHAR);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof Character && bObj instanceof Character) {
Character a = (Character) aObj;
Character b = (Character) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return obj instanceof Character ? 24 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof Character)) {
super.write(buff, obj);
return;
}
buff.put((byte) TYPE_CHAR);
buff.putChar(((Character) obj).charValue());
}
@Override
public Object read(ByteBuffer buff, int tag) {
return buff.getChar();
}
}
/**
* The type for short objects.
*/
static class ShortType extends AutoDetectDataType {
ShortType(ObjectDataType base) {
super(base, TYPE_SHORT);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof Short && bObj instanceof Short) {
Short a = (Short) aObj;
Short b = (Short) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return obj instanceof Short ? 24 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof Short)) {
super.write(buff, obj);
return;
}
buff.put((byte) TYPE_SHORT);
buff.putShort(((Short) obj).shortValue());
}
@Override
public Object read(ByteBuffer buff, int tag) {
return buff.getShort();
}
}
/**
* The type for integer objects.
*/
static class IntegerType extends AutoDetectDataType {
IntegerType(ObjectDataType base) {
super(base, TYPE_INT);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof Integer && bObj instanceof Integer) {
Integer a = (Integer) aObj;
Integer b = (Integer) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return obj instanceof Integer ? 24 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof Integer)) {
super.write(buff, obj);
return;
}
int x = (Integer) obj;
if (x < 0) {
// -Integer.MIN_VALUE is smaller than 0
if (-x < 0 || -x > DataUtils.COMPRESSED_VAR_INT_MAX) {
buff.put((byte) TAG_INTEGER_FIXED).putInt(x);
} else {
buff.put((byte) TAG_INTEGER_NEGATIVE).putVarInt(-x);
}
} else if (x <= 15) {
buff.put((byte) (TAG_INTEGER_0_15 + x));
} else if (x <= DataUtils.COMPRESSED_VAR_INT_MAX) {
buff.put((byte) TYPE_INT).putVarInt(x);
} else {
buff.put((byte) TAG_INTEGER_FIXED).putInt(x);
}
}
@Override
public Object read(ByteBuffer buff, int tag) {
switch (tag) {
case TYPE_INT:
return DataUtils.readVarInt(buff);
case TAG_INTEGER_NEGATIVE:
return -DataUtils.readVarInt(buff);
case TAG_INTEGER_FIXED:
return buff.getInt();
}
return tag - TAG_INTEGER_0_15;
}
}
/**
* The type for long objects.
*/
static class LongType extends AutoDetectDataType {
LongType(ObjectDataType base) {
super(base, TYPE_LONG);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof Long && bObj instanceof Long) {
Long a = (Long) aObj;
Long b = (Long) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return obj instanceof Long ? 30 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof Long)) {
super.write(buff, obj);
return;
}
long x = (Long) obj;
if (x < 0) {
// -Long.MIN_VALUE is smaller than 0
if (-x < 0 || -x > DataUtils.COMPRESSED_VAR_LONG_MAX) {
buff.put((byte) TAG_LONG_FIXED);
buff.putLong(x);
} else {
buff.put((byte) TAG_LONG_NEGATIVE);
buff.putVarLong(-x);
}
} else if (x <= 7) {
buff.put((byte) (TAG_LONG_0_7 + x));
} else if (x <= DataUtils.COMPRESSED_VAR_LONG_MAX) {
buff.put((byte) TYPE_LONG);
buff.putVarLong(x);
} else {
buff.put((byte) TAG_LONG_FIXED);
buff.putLong(x);
}
}
@Override
public Object read(ByteBuffer buff, int tag) {
switch (tag) {
case TYPE_LONG:
return DataUtils.readVarLong(buff);
case TAG_LONG_NEGATIVE:
return -DataUtils.readVarLong(buff);
case TAG_LONG_FIXED:
return buff.getLong();
}
return (long) (tag - TAG_LONG_0_7);
}
}
/**
* The type for float objects.
*/
static class FloatType extends AutoDetectDataType {
FloatType(ObjectDataType base) {
super(base, TYPE_FLOAT);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof Float && bObj instanceof Float) {
Float a = (Float) aObj;
Float b = (Float) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return obj instanceof Float ? 24 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof Float)) {
super.write(buff, obj);
return;
}
float x = (Float) obj;
int f = Float.floatToIntBits(x);
if (f == ObjectDataType.FLOAT_ZERO_BITS) {
buff.put((byte) TAG_FLOAT_0);
} else if (f == ObjectDataType.FLOAT_ONE_BITS) {
buff.put((byte) TAG_FLOAT_1);
} else {
int value = Integer.reverse(f);
if (value >= 0 && value <= DataUtils.COMPRESSED_VAR_INT_MAX) {
buff.put((byte) TYPE_FLOAT).putVarInt(value);
} else {
buff.put((byte) TAG_FLOAT_FIXED).putFloat(x);
}
}
}
@Override
public Object read(ByteBuffer buff, int tag) {
switch (tag) {
case TAG_FLOAT_0:
return 0f;
case TAG_FLOAT_1:
return 1f;
case TAG_FLOAT_FIXED:
return buff.getFloat();
}
return Float.intBitsToFloat(Integer.reverse(DataUtils
.readVarInt(buff)));
}
}
/**
* The type for double objects.
*/
static class DoubleType extends AutoDetectDataType {
DoubleType(ObjectDataType base) {
super(base, TYPE_DOUBLE);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof Double && bObj instanceof Double) {
Double a = (Double) aObj;
Double b = (Double) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return obj instanceof Double ? 30 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof Double)) {
super.write(buff, obj);
return;
}
double x = (Double) obj;
long d = Double.doubleToLongBits(x);
if (d == ObjectDataType.DOUBLE_ZERO_BITS) {
buff.put((byte) TAG_DOUBLE_0);
} else if (d == ObjectDataType.DOUBLE_ONE_BITS) {
buff.put((byte) TAG_DOUBLE_1);
} else {
long value = Long.reverse(d);
if (value >= 0 && value <= DataUtils.COMPRESSED_VAR_LONG_MAX) {
buff.put((byte) TYPE_DOUBLE);
buff.putVarLong(value);
} else {
buff.put((byte) TAG_DOUBLE_FIXED);
buff.putDouble(x);
}
}
}
@Override
public Object read(ByteBuffer buff, int tag) {
switch (tag) {
case TAG_DOUBLE_0:
return 0d;
case TAG_DOUBLE_1:
return 1d;
case TAG_DOUBLE_FIXED:
return buff.getDouble();
}
return Double.longBitsToDouble(Long.reverse(DataUtils
.readVarLong(buff)));
}
}
/**
* The type for BigInteger objects.
*/
static class BigIntegerType extends AutoDetectDataType {
BigIntegerType(ObjectDataType base) {
super(base, TYPE_BIG_INTEGER);
}
@Override
public int compare(Object aObj, Object bObj) {
if (isBigInteger(aObj) && isBigInteger(bObj)) {
BigInteger a = (BigInteger) aObj;
BigInteger b = (BigInteger) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return isBigInteger(obj) ? 100 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!isBigInteger(obj)) {
super.write(buff, obj);
return;
}
BigInteger x = (BigInteger) obj;
if (BigInteger.ZERO.equals(x)) {
buff.put((byte) TAG_BIG_INTEGER_0);
} else if (BigInteger.ONE.equals(x)) {
buff.put((byte) TAG_BIG_INTEGER_1);
} else {
int bits = x.bitLength();
if (bits <= 63) {
buff.put((byte) TAG_BIG_INTEGER_SMALL).putVarLong(
x.longValue());
} else {
byte[] bytes = x.toByteArray();
buff.put((byte) TYPE_BIG_INTEGER).putVarInt(bytes.length)
.put(bytes);
}
}
}
@Override
public Object read(ByteBuffer buff, int tag) {
switch (tag) {
case TAG_BIG_INTEGER_0:
return BigInteger.ZERO;
case TAG_BIG_INTEGER_1:
return BigInteger.ONE;
case TAG_BIG_INTEGER_SMALL:
return BigInteger.valueOf(DataUtils.readVarLong(buff));
}
int len = DataUtils.readVarInt(buff);
byte[] bytes = Utils.newBytes(len);
buff.get(bytes);
return new BigInteger(bytes);
}
}
/**
* The type for BigDecimal objects.
*/
static class BigDecimalType extends AutoDetectDataType {
BigDecimalType(ObjectDataType base) {
super(base, TYPE_BIG_DECIMAL);
}
@Override
public int compare(Object aObj, Object bObj) {
if (isBigDecimal(aObj) && isBigDecimal(bObj)) {
BigDecimal a = (BigDecimal) aObj;
BigDecimal b = (BigDecimal) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public int getMemory(Object obj) {
return isBigDecimal(obj) ? 150 : super.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!isBigDecimal(obj)) {
super.write(buff, obj);
return;
}
BigDecimal x = (BigDecimal) obj;
if (BigDecimal.ZERO.equals(x)) {
buff.put((byte) TAG_BIG_DECIMAL_0);
} else if (BigDecimal.ONE.equals(x)) {
buff.put((byte) TAG_BIG_DECIMAL_1);
} else {
int scale = x.scale();
BigInteger b = x.unscaledValue();
int bits = b.bitLength();
if (bits < 64) {
if (scale == 0) {
buff.put((byte) TAG_BIG_DECIMAL_SMALL);
} else {
buff.put((byte) TAG_BIG_DECIMAL_SMALL_SCALED)
.putVarInt(scale);
}
buff.putVarLong(b.longValue());
} else {
byte[] bytes = b.toByteArray();
buff.put((byte) TYPE_BIG_DECIMAL).putVarInt(scale)
.putVarInt(bytes.length).put(bytes);
}
}
}
@Override
public Object read(ByteBuffer buff, int tag) {
switch (tag) {
case TAG_BIG_DECIMAL_0:
return BigDecimal.ZERO;
case TAG_BIG_DECIMAL_1:
return BigDecimal.ONE;
case TAG_BIG_DECIMAL_SMALL:
return BigDecimal.valueOf(DataUtils.readVarLong(buff));
case TAG_BIG_DECIMAL_SMALL_SCALED:
int scale = DataUtils.readVarInt(buff);
return BigDecimal.valueOf(DataUtils.readVarLong(buff), scale);
}
int scale = DataUtils.readVarInt(buff);
int len = DataUtils.readVarInt(buff);
byte[] bytes = Utils.newBytes(len);
buff.get(bytes);
BigInteger b = new BigInteger(bytes);
return new BigDecimal(b, scale);
}
}
/**
* The type for string objects.
*/
static class StringType extends AutoDetectDataType {
StringType(ObjectDataType base) {
super(base, TYPE_STRING);
}
@Override
public int getMemory(Object obj) {
if (!(obj instanceof String)) {
return super.getMemory(obj);
}
return 24 + 2 * obj.toString().length();
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof String && bObj instanceof String) {
return aObj.toString().compareTo(bObj.toString());
}
return super.compare(aObj, bObj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof String)) {
super.write(buff, obj);
return;
}
String s = (String) obj;
int len = s.length();
if (len <= 15) {
buff.put((byte) (TAG_STRING_0_15 + len));
} else {
buff.put((byte) TYPE_STRING).putVarInt(len);
}
buff.putStringData(s, len);
}
@Override
public Object read(ByteBuffer buff, int tag) {
int len;
if (tag == TYPE_STRING) {
len = DataUtils.readVarInt(buff);
} else {
len = tag - TAG_STRING_0_15;
}
return DataUtils.readString(buff, len);
}
}
/**
* The type for UUID objects.
*/
static class UUIDType extends AutoDetectDataType {
UUIDType(ObjectDataType base) {
super(base, TYPE_UUID);
}
@Override
public int getMemory(Object obj) {
return obj instanceof UUID ? 40 : super.getMemory(obj);
}
@Override
public int compare(Object aObj, Object bObj) {
if (aObj instanceof UUID && bObj instanceof UUID) {
UUID a = (UUID) aObj;
UUID b = (UUID) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!(obj instanceof UUID)) {
super.write(buff, obj);
return;
}
buff.put((byte) TYPE_UUID);
UUID a = (UUID) obj;
buff.putLong(a.getMostSignificantBits());
buff.putLong(a.getLeastSignificantBits());
}
@Override
public Object read(ByteBuffer buff, int tag) {
long a = buff.getLong(), b = buff.getLong();
return new UUID(a, b);
}
}
/**
* The type for java.util.Date objects.
*/
static class DateType extends AutoDetectDataType {
DateType(ObjectDataType base) {
super(base, TYPE_DATE);
}
@Override
public int getMemory(Object obj) {
return isDate(obj) ? 40 : super.getMemory(obj);
}
@Override
public int compare(Object aObj, Object bObj) {
if (isDate(aObj) && isDate(bObj)) {
Date a = (Date) aObj;
Date b = (Date) bObj;
return a.compareTo(b);
}
return super.compare(aObj, bObj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!isDate(obj)) {
super.write(buff, obj);
return;
}
buff.put((byte) TYPE_DATE);
Date a = (Date) obj;
buff.putLong(a.getTime());
}
@Override
public Object read(ByteBuffer buff, int tag) {
long a = buff.getLong();
return new Date(a);
}
}
/**
* The type for object arrays.
*/
static class ObjectArrayType extends AutoDetectDataType {
private final ObjectDataType elementType = new ObjectDataType();
ObjectArrayType(ObjectDataType base) {
super(base, TYPE_ARRAY);
}
@Override
public int getMemory(Object obj) {
if (!isArray(obj)) {
return super.getMemory(obj);
}
int size = 64;
Class<?> type = obj.getClass().getComponentType();
if (type.isPrimitive()) {
int len = Array.getLength(obj);
if (type == boolean.class) {
size += len;
} else if (type == byte.class) {
size += len;
} else if (type == char.class) {
size += len * 2;
} else if (type == short.class) {
size += len * 2;
} else if (type == int.class) {
size += len * 4;
} else if (type == float.class) {
size += len * 4;
} else if (type == double.class) {
size += len * 8;
} else if (type == long.class) {
size += len * 8;
}
} else {
for (Object x : (Object[]) obj) {
if (x != null) {
size += elementType.getMemory(x);
}
}
}
// we say they are larger, because these objects
// use quite a lot of disk space
return size * 2;
}
@Override
public int compare(Object aObj, Object bObj) {
if (!isArray(aObj) || !isArray(bObj)) {
return super.compare(aObj, bObj);
}
if (aObj == bObj) {
return 0;
}
Class<?> type = aObj.getClass().getComponentType();
Class<?> bType = bObj.getClass().getComponentType();
if (type != bType) {
Integer classA = getCommonClassId(type);
Integer classB = getCommonClassId(bType);
if (classA != null) {
if (classB != null) {
return classA.compareTo(classB);
}
return -1;
} else if (classB != null) {
return 1;
}
return type.getName().compareTo(bType.getName());
}
int aLen = Array.getLength(aObj);
int bLen = Array.getLength(bObj);
int len = Math.min(aLen, bLen);
if (type.isPrimitive()) {
if (type == byte.class) {
byte[] a = (byte[]) aObj;
byte[] b = (byte[]) bObj;
return compareNotNull(a, b);
}
for (int i = 0; i < len; i++) {
int x;
if (type == boolean.class) {
x = Integer.signum((((boolean[]) aObj)[i] ? 1 : 0)
- (((boolean[]) bObj)[i] ? 1 : 0));
} else if (type == char.class) {
x = Integer.signum((((char[]) aObj)[i])
- (((char[]) bObj)[i]));
} else if (type == short.class) {
x = Integer.signum((((short[]) aObj)[i])
- (((short[]) bObj)[i]));
} else if (type == int.class) {
int a = ((int[]) aObj)[i];
int b = ((int[]) bObj)[i];
x = Integer.compare(a, b);
} else if (type == float.class) {
x = Float.compare(((float[]) aObj)[i],
((float[]) bObj)[i]);
} else if (type == double.class) {
x = Double.compare(((double[]) aObj)[i],
((double[]) bObj)[i]);
} else {
long a = ((long[]) aObj)[i];
long b = ((long[]) bObj)[i];
x = Long.compare(a, b);
}
if (x != 0) {
return x;
}
}
} else {
Object[] a = (Object[]) aObj;
Object[] b = (Object[]) bObj;
for (int i = 0; i < len; i++) {
int comp = elementType.compare(a[i], b[i]);
if (comp != 0) {
return comp;
}
}
}
return Integer.compare(aLen, bLen);
}
@Override
public void write(WriteBuffer buff, Object obj) {
if (!isArray(obj)) {
super.write(buff, obj);
return;
}
Class<?> type = obj.getClass().getComponentType();
Integer classId = getCommonClassId(type);
if (classId != null) {
if (type.isPrimitive()) {
if (type == byte.class) {
byte[] data = (byte[]) obj;
int len = data.length;
if (len <= 15) {
buff.put((byte) (TAG_BYTE_ARRAY_0_15 + len));
} else {
buff.put((byte) TYPE_ARRAY)
.put((byte) classId.intValue())
.putVarInt(len);
}
buff.put(data);
return;
}
int len = Array.getLength(obj);
buff.put((byte) TYPE_ARRAY).put((byte) classId.intValue())
.putVarInt(len);
for (int i = 0; i < len; i++) {
if (type == boolean.class) {
buff.put((byte) (((boolean[]) obj)[i] ? 1 : 0));
} else if (type == char.class) {
buff.putChar(((char[]) obj)[i]);
} else if (type == short.class) {
buff.putShort(((short[]) obj)[i]);
} else if (type == int.class) {
buff.putInt(((int[]) obj)[i]);
} else if (type == float.class) {
buff.putFloat(((float[]) obj)[i]);
} else if (type == double.class) {
buff.putDouble(((double[]) obj)[i]);
} else {
buff.putLong(((long[]) obj)[i]);
}
}
return;
}
buff.put((byte) TYPE_ARRAY).put((byte) classId.intValue());
} else {
buff.put((byte) TYPE_ARRAY).put((byte) -1);
String c = type.getName();
StringDataType.INSTANCE.write(buff, c);
}
Object[] array = (Object[]) obj;
int len = array.length;
buff.putVarInt(len);
for (Object x : array) {
elementType.write(buff, x);
}
}
@Override
public Object read(ByteBuffer buff, int tag) {
if (tag != TYPE_ARRAY) {
byte[] data;
int len = tag - TAG_BYTE_ARRAY_0_15;
data = Utils.newBytes(len);
buff.get(data);
return data;
}
int ct = buff.get();
Class<?> clazz;
Object obj;
if (ct == -1) {
String componentType = StringDataType.INSTANCE.read(buff);
try {
clazz = Class.forName(componentType);
} catch (Exception e) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_SERIALIZATION,
"Could not get class {0}", componentType, e);
}
} else {
clazz = COMMON_CLASSES[ct];
}
int len = DataUtils.readVarInt(buff);
try {
obj = Array.newInstance(clazz, len);
} catch (Exception e) {
throw DataUtils.newIllegalStateException(
DataUtils.ERROR_SERIALIZATION,
"Could not create array of type {0} length {1}", clazz,
len, e);
}
if (clazz.isPrimitive()) {
for (int i = 0; i < len; i++) {
if (clazz == boolean.class) {
((boolean[]) obj)[i] = buff.get() == 1;
} else if (clazz == byte.class) {
((byte[]) obj)[i] = buff.get();
} else if (clazz == char.class) {
((char[]) obj)[i] = buff.getChar();
} else if (clazz == short.class) {
((short[]) obj)[i] = buff.getShort();
} else if (clazz == int.class) {
((int[]) obj)[i] = buff.getInt();
} else if (clazz == float.class) {
((float[]) obj)[i] = buff.getFloat();
} else if (clazz == double.class) {
((double[]) obj)[i] = buff.getDouble();
} else {
((long[]) obj)[i] = buff.getLong();
}
}
} else {
Object[] array = (Object[]) obj;
for (int i = 0; i < len; i++) {
array[i] = elementType.read(buff);
}
}
return obj;
}
}
/**
* The type for serialized objects.
*/
static class SerializedObjectType extends AutoDetectDataType {
private int averageSize = 10_000;
SerializedObjectType(ObjectDataType base) {
super(base, TYPE_SERIALIZED_OBJECT);
}
@SuppressWarnings("unchecked")
@Override
public int compare(Object aObj, Object bObj) {
if (aObj == bObj) {
return 0;
}
DataType ta = getType(aObj);
DataType tb = getType(bObj);
if (ta != this || tb != this) {
if (ta == tb) {
return ta.compare(aObj, bObj);
}
return super.compare(aObj, bObj);
}
// TODO ensure comparable type (both may be comparable but not
// with each other)
if (aObj instanceof Comparable) {
if (aObj.getClass().isAssignableFrom(bObj.getClass())) {
return ((Comparable<Object>) aObj).compareTo(bObj);
}
}
if (bObj instanceof Comparable) {
if (bObj.getClass().isAssignableFrom(aObj.getClass())) {
return -((Comparable<Object>) bObj).compareTo(aObj);
}
}
byte[] a = serialize(aObj);
byte[] b = serialize(bObj);
return compareNotNull(a, b);
}
@Override
public int getMemory(Object obj) {
DataType t = getType(obj);
if (t == this) {
return averageSize;
}
return t.getMemory(obj);
}
@Override
public void write(WriteBuffer buff, Object obj) {
DataType t = getType(obj);
if (t != this) {
t.write(buff, obj);
return;
}
byte[] data = serialize(obj);
// we say they are larger, because these objects
// use quite a lot of disk space
int size = data.length * 2;
// adjust the average size
// using an exponential moving average
averageSize = (size + 15 * averageSize) / 16;
buff.put((byte) TYPE_SERIALIZED_OBJECT).putVarInt(data.length)
.put(data);
}
@Override
public Object read(ByteBuffer buff, int tag) {
int len = DataUtils.readVarInt(buff);
byte[] data = Utils.newBytes(len);
int size = data.length * 2;
// adjust the average size
// using an exponential moving average
averageSize = (size + 15 * averageSize) / 16;
buff.get(data);
return deserialize(data);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/mvstore/type/StringDataType.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.mvstore.type;
import java.nio.ByteBuffer;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.WriteBuffer;
/**
* A string type.
*/
public class StringDataType implements DataType {
public static final StringDataType INSTANCE = new StringDataType();
@Override
public int compare(Object a, Object b) {
return a.toString().compareTo(b.toString());
}
@Override
public int getMemory(Object obj) {
return 24 + 2 * obj.toString().length();
}
@Override
public void read(ByteBuffer buff, Object[] obj, int len, boolean key) {
for (int i = 0; i < len; i++) {
obj[i] = read(buff);
}
}
@Override
public void write(WriteBuffer buff, Object[] obj, int len, boolean key) {
for (int i = 0; i < len; i++) {
write(buff, obj[i]);
}
}
@Override
public String read(ByteBuffer buff) {
int len = DataUtils.readVarInt(buff);
return DataUtils.readString(buff, len);
}
@Override
public void write(WriteBuffer buff, Object obj) {
String s = obj.toString();
int len = s.length();
buff.putVarInt(len).putStringData(s, len);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/LazyResult.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import org.h2.engine.SessionInterface;
import org.h2.expression.Expression;
import org.h2.message.DbException;
import org.h2.value.Value;
/**
* Lazy execution support for queries.
*
* @author Sergi Vladykin
*/
public abstract class LazyResult implements ResultInterface {
private final Expression[] expressions;
private int rowId = -1;
private Value[] currentRow;
private Value[] nextRow;
private boolean closed;
private boolean afterLast;
private int limit;
public LazyResult(Expression[] expressions) {
this.expressions = expressions;
}
public void setLimit(int limit) {
this.limit = limit;
}
@Override
public boolean isLazy() {
return true;
}
@Override
public void reset() {
if (closed) {
throw DbException.throwInternalError();
}
rowId = -1;
afterLast = false;
currentRow = null;
nextRow = null;
}
@Override
public Value[] currentRow() {
return currentRow;
}
@Override
public boolean next() {
if (hasNext()) {
rowId++;
currentRow = nextRow;
nextRow = null;
return true;
}
if (!afterLast) {
rowId++;
currentRow = null;
afterLast = true;
}
return false;
}
@Override
public boolean hasNext() {
if (closed || afterLast) {
return false;
}
if (nextRow == null && (limit <= 0 || rowId + 1 < limit)) {
nextRow = fetchNextRow();
}
return nextRow != null;
}
/**
* Fetch next row or null if none available.
*
* @return next row or null
*/
protected abstract Value[] fetchNextRow();
@Override
public boolean isAfterLast() {
return afterLast;
}
@Override
public int getRowId() {
return rowId;
}
@Override
public int getRowCount() {
throw DbException.getUnsupportedException("Row count is unknown for lazy result.");
}
@Override
public boolean needToClose() {
return true;
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public void close() {
closed = true;
}
@Override
public String getAlias(int i) {
return expressions[i].getAlias();
}
@Override
public String getSchemaName(int i) {
return expressions[i].getSchemaName();
}
@Override
public String getTableName(int i) {
return expressions[i].getTableName();
}
@Override
public String getColumnName(int i) {
return expressions[i].getColumnName();
}
@Override
public int getColumnType(int i) {
return expressions[i].getType();
}
@Override
public long getColumnPrecision(int i) {
return expressions[i].getPrecision();
}
@Override
public int getColumnScale(int i) {
return expressions[i].getScale();
}
@Override
public int getDisplaySize(int i) {
return expressions[i].getDisplaySize();
}
@Override
public boolean isAutoIncrement(int i) {
return expressions[i].isAutoIncrement();
}
@Override
public int getNullable(int i) {
return expressions[i].getNullable();
}
@Override
public void setFetchSize(int fetchSize) {
// ignore
}
@Override
public int getFetchSize() {
// We always fetch rows one by one.
return 1;
}
@Override
public ResultInterface createShallowCopy(SessionInterface targetSession) {
// Copying is impossible with lazy result.
return null;
}
@Override
public boolean containsDistinct(Value[] values) {
// We have to make sure that we do not allow lazy
// evaluation when this call is needed:
// WHERE x IN (SELECT ...).
throw DbException.throwInternalError();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/LocalResult.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.engine.SessionInterface;
import org.h2.expression.Expression;
import org.h2.message.DbException;
import org.h2.util.New;
import org.h2.util.ValueHashMap;
import org.h2.value.DataType;
import org.h2.value.Value;
import org.h2.value.ValueArray;
/**
* A local result set contains all row data of a result set.
* This is the object generated by engine,
* and it is also used directly by the ResultSet class in the embedded mode.
* If the result does not fit in memory, it is written to a temporary file.
*/
public class LocalResult implements ResultInterface, ResultTarget {
private int maxMemoryRows;
private Session session;
private int visibleColumnCount;
private Expression[] expressions;
private int rowId, rowCount;
private ArrayList<Value[]> rows;
private SortOrder sort;
private ValueHashMap<Value[]> distinctRows;
private Value[] currentRow;
private int offset;
private int limit = -1;
private ResultExternal external;
private int diskOffset;
private boolean distinct;
private boolean randomAccess;
private boolean closed;
private boolean containsLobs;
/**
* Construct a local result object.
*/
public LocalResult() {
// nothing to do
}
/**
* Construct a local result object.
*
* @param session the session
* @param expressions the expression array
* @param visibleColumnCount the number of visible columns
*/
public LocalResult(Session session, Expression[] expressions,
int visibleColumnCount) {
this.session = session;
if (session == null) {
this.maxMemoryRows = Integer.MAX_VALUE;
} else {
Database db = session.getDatabase();
if (db.isPersistent() && !db.isReadOnly()) {
this.maxMemoryRows = session.getDatabase().getMaxMemoryRows();
} else {
this.maxMemoryRows = Integer.MAX_VALUE;
}
}
rows = New.arrayList();
this.visibleColumnCount = visibleColumnCount;
rowId = -1;
this.expressions = expressions;
}
@Override
public boolean isLazy() {
return false;
}
public void setMaxMemoryRows(int maxValue) {
this.maxMemoryRows = maxValue;
}
/**
* Construct a local result set by reading all data from a regular result
* set.
*
* @param session the session
* @param rs the result set
* @param maxrows the maximum number of rows to read (0 for no limit)
* @return the local result set
*/
public static LocalResult read(Session session, ResultSet rs, int maxrows) {
Expression[] cols = Expression.getExpressionColumns(session, rs);
int columnCount = cols.length;
LocalResult result = new LocalResult(session, cols, columnCount);
try {
for (int i = 0; (maxrows == 0 || i < maxrows) && rs.next(); i++) {
Value[] list = new Value[columnCount];
for (int j = 0; j < columnCount; j++) {
int type = result.getColumnType(j);
list[j] = DataType.readValue(session, rs, j + 1, type);
}
result.addRow(list);
}
} catch (SQLException e) {
throw DbException.convert(e);
}
result.done();
return result;
}
/**
* Create a shallow copy of the result set. The data and a temporary table
* (if there is any) is not copied.
*
* @param targetSession the session of the copy
* @return the copy if possible, or null if copying is not possible
*/
@Override
public LocalResult createShallowCopy(SessionInterface targetSession) {
if (external == null && (rows == null || rows.size() < rowCount)) {
return null;
}
if (containsLobs) {
return null;
}
ResultExternal e2 = null;
if (external != null) {
e2 = external.createShallowCopy();
if (e2 == null) {
return null;
}
}
LocalResult copy = new LocalResult();
copy.maxMemoryRows = this.maxMemoryRows;
copy.session = (Session) targetSession;
copy.visibleColumnCount = this.visibleColumnCount;
copy.expressions = this.expressions;
copy.rowId = -1;
copy.rowCount = this.rowCount;
copy.rows = this.rows;
copy.sort = this.sort;
copy.distinctRows = this.distinctRows;
copy.distinct = distinct;
copy.randomAccess = randomAccess;
copy.currentRow = null;
copy.offset = 0;
copy.limit = -1;
copy.external = e2;
copy.diskOffset = this.diskOffset;
return copy;
}
/**
* Set the sort order.
*
* @param sort the sort order
*/
public void setSortOrder(SortOrder sort) {
this.sort = sort;
}
/**
* Remove duplicate rows.
*/
public void setDistinct() {
distinct = true;
distinctRows = ValueHashMap.newInstance();
}
/**
* Random access is required (containsDistinct).
*/
public void setRandomAccess() {
this.randomAccess = true;
}
/**
* Remove the row from the result set if it exists.
*
* @param values the row
*/
public void removeDistinct(Value[] values) {
if (!distinct) {
DbException.throwInternalError();
}
if (distinctRows != null) {
ValueArray array = ValueArray.get(values);
distinctRows.remove(array);
rowCount = distinctRows.size();
} else {
rowCount = external.removeRow(values);
}
}
/**
* Check if this result set contains the given row.
*
* @param values the row
* @return true if the row exists
*/
@Override
public boolean containsDistinct(Value[] values) {
if (external != null) {
return external.contains(values);
}
if (distinctRows == null) {
distinctRows = ValueHashMap.newInstance();
for (Value[] row : rows) {
ValueArray array = getArrayOfVisible(row);
distinctRows.put(array, array.getList());
}
}
ValueArray array = ValueArray.get(values);
return distinctRows.get(array) != null;
}
@Override
public void reset() {
rowId = -1;
currentRow = null;
if (external != null) {
external.reset();
if (diskOffset > 0) {
for (int i = 0; i < diskOffset; i++) {
external.next();
}
}
}
}
@Override
public Value[] currentRow() {
return currentRow;
}
@Override
public boolean next() {
if (!closed && rowId < rowCount) {
rowId++;
if (rowId < rowCount) {
if (external != null) {
currentRow = external.next();
} else {
currentRow = rows.get(rowId);
}
return true;
}
currentRow = null;
}
return false;
}
@Override
public int getRowId() {
return rowId;
}
@Override
public boolean isAfterLast() {
return rowId >= rowCount;
}
private void cloneLobs(Value[] values) {
for (int i = 0; i < values.length; i++) {
Value v = values[i];
Value v2 = v.copyToResult();
if (v2 != v) {
containsLobs = true;
session.addTemporaryLob(v2);
values[i] = v2;
}
}
}
private ValueArray getArrayOfVisible(Value[] values) {
if (values.length > visibleColumnCount) {
values = Arrays.copyOf(values, visibleColumnCount);
}
return ValueArray.get(values);
}
/**
* Add a row to this object.
*
* @param values the row to add
*/
@Override
public void addRow(Value[] values) {
cloneLobs(values);
if (distinct) {
if (distinctRows != null) {
ValueArray array = getArrayOfVisible(values);
distinctRows.put(array, values);
rowCount = distinctRows.size();
if (rowCount > maxMemoryRows) {
external = new ResultTempTable(session, expressions, true, sort);
rowCount = external.addRows(distinctRows.values());
distinctRows = null;
}
} else {
rowCount = external.addRow(values);
}
return;
}
rows.add(values);
rowCount++;
if (rows.size() > maxMemoryRows) {
if (external == null) {
external = new ResultTempTable(session, expressions, false, sort);
}
addRowsToDisk();
}
}
private void addRowsToDisk() {
rowCount = external.addRows(rows);
rows.clear();
}
@Override
public int getVisibleColumnCount() {
return visibleColumnCount;
}
/**
* This method is called after all rows have been added.
*/
public void done() {
if (distinct) {
if (distinctRows != null) {
rows = distinctRows.values();
} else {
if (external != null && sort != null) {
// external sort
ResultExternal temp = external;
external = null;
temp.reset();
rows = New.arrayList();
// TODO use offset directly if possible
while (true) {
Value[] list = temp.next();
if (list == null) {
break;
}
if (external == null) {
external = new ResultTempTable(session, expressions, true, sort);
}
rows.add(list);
if (rows.size() > maxMemoryRows) {
rowCount = external.addRows(rows);
rows.clear();
}
}
temp.close();
// the remaining data in rows is written in the following
// lines
}
}
}
if (external != null) {
addRowsToDisk();
external.done();
} else {
if (sort != null) {
if (offset > 0 || limit > 0) {
sort.sort(rows, offset, limit < 0 ? rows.size() : limit);
} else {
sort.sort(rows);
}
}
}
applyOffset();
applyLimit();
reset();
}
@Override
public int getRowCount() {
return rowCount;
}
@Override
public boolean hasNext() {
return !closed && rowId < rowCount - 1;
}
/**
* Set the number of rows that this result will return at the maximum.
*
* @param limit the limit (-1 means no limit, 0 means no rows)
*/
public void setLimit(int limit) {
this.limit = limit;
}
private void applyLimit() {
if (limit < 0) {
return;
}
if (external == null) {
if (rows.size() > limit) {
rows = new ArrayList<>(rows.subList(0, limit));
rowCount = limit;
distinctRows = null;
}
} else {
if (limit < rowCount) {
rowCount = limit;
distinctRows = null;
}
}
}
@Override
public boolean needToClose() {
return external != null;
}
@Override
public void close() {
if (external != null) {
external.close();
external = null;
closed = true;
}
}
@Override
public String getAlias(int i) {
return expressions[i].getAlias();
}
@Override
public String getTableName(int i) {
return expressions[i].getTableName();
}
@Override
public String getSchemaName(int i) {
return expressions[i].getSchemaName();
}
@Override
public int getDisplaySize(int i) {
return expressions[i].getDisplaySize();
}
@Override
public String getColumnName(int i) {
return expressions[i].getColumnName();
}
@Override
public int getColumnType(int i) {
return expressions[i].getType();
}
@Override
public long getColumnPrecision(int i) {
return expressions[i].getPrecision();
}
@Override
public int getNullable(int i) {
return expressions[i].getNullable();
}
@Override
public boolean isAutoIncrement(int i) {
return expressions[i].isAutoIncrement();
}
@Override
public int getColumnScale(int i) {
return expressions[i].getScale();
}
/**
* Set the offset of the first row to return.
*
* @param offset the offset
*/
public void setOffset(int offset) {
this.offset = offset;
}
private void applyOffset() {
if (offset <= 0) {
return;
}
if (external == null) {
if (offset >= rows.size()) {
rows.clear();
rowCount = 0;
} else {
// avoid copying the whole array for each row
int remove = Math.min(offset, rows.size());
rows = new ArrayList<>(rows.subList(remove, rows.size()));
rowCount -= remove;
}
} else {
if (offset >= rowCount) {
rowCount = 0;
} else {
diskOffset = offset;
rowCount -= offset;
}
}
distinctRows = null;
}
@Override
public String toString() {
return super.toString() + " columns: " + visibleColumnCount +
" rows: " + rowCount + " pos: " + rowId;
}
/**
* Check if this result set is closed.
*
* @return true if it is
*/
@Override
public boolean isClosed() {
return closed;
}
@Override
public int getFetchSize() {
return 0;
}
@Override
public void setFetchSize(int fetchSize) {
// ignore
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.