index
int64 | repo_id
string | file_path
string | content
string |
|---|---|---|---|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/ResultColumn.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import java.io.IOException;
import org.h2.value.Transfer;
/**
* A result set column of a remote result.
*/
public class ResultColumn {
/**
* The column alias.
*/
final String alias;
/**
* The schema name or null.
*/
final String schemaName;
/**
* The table name or null.
*/
final String tableName;
/**
* The column name or null.
*/
final String columnName;
/**
* The value type of this column.
*/
final int columnType;
/**
* The precision.
*/
final long precision;
/**
* The scale.
*/
final int scale;
/**
* The expected display size.
*/
final int displaySize;
/**
* True if this is an autoincrement column.
*/
final boolean autoIncrement;
/**
* True if this column is nullable.
*/
final int nullable;
/**
* Read an object from the given transfer object.
*
* @param in the object from where to read the data
*/
ResultColumn(Transfer in) throws IOException {
alias = in.readString();
schemaName = in.readString();
tableName = in.readString();
columnName = in.readString();
columnType = in.readInt();
precision = in.readLong();
scale = in.readInt();
displaySize = in.readInt();
autoIncrement = in.readBoolean();
nullable = in.readInt();
}
/**
* Write a result column to the given output.
*
* @param out the object to where to write the data
* @param result the result
* @param i the column index
*/
public static void writeColumn(Transfer out, ResultInterface result, int i)
throws IOException {
out.writeString(result.getAlias(i));
out.writeString(result.getSchemaName(i));
out.writeString(result.getTableName(i));
out.writeString(result.getColumnName(i));
out.writeInt(result.getColumnType(i));
out.writeLong(result.getColumnPrecision(i));
out.writeInt(result.getColumnScale(i));
out.writeInt(result.getDisplaySize(i));
out.writeBoolean(result.isAutoIncrement(i));
out.writeInt(result.getNullable(i));
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/ResultExternal.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import java.util.ArrayList;
import org.h2.value.Value;
/**
* This interface is used to extend the LocalResult class, if data does not fit
* in memory.
*/
public interface ResultExternal {
/**
* Reset the current position of this object.
*/
void reset();
/**
* Get the next row from the result.
*
* @return the next row or null
*/
Value[] next();
/**
* Add a row to this object.
*
* @param values the row to add
* @return the new number of rows in this object
*/
int addRow(Value[] values);
/**
* Add a number of rows to the result.
*
* @param rows the list of rows to add
* @return the new number of rows in this object
*/
int addRows(ArrayList<Value[]> rows);
/**
* This method is called after all rows have been added.
*/
void done();
/**
* Close this object and delete the temporary file.
*/
void close();
/**
* Remove the row with the given values from this object if such a row
* exists.
*
* @param values the row
* @return the new row count
*/
int removeRow(Value[] values);
/**
* Check if the given row exists in this object.
*
* @param values the row
* @return true if it exists
*/
boolean contains(Value[] values);
/**
* Create a shallow copy of this object if possible.
*
* @return the shallow copy, or null
*/
ResultExternal createShallowCopy();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/ResultInterface.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import org.h2.engine.SessionInterface;
import org.h2.value.Value;
/**
* The result interface is used by the LocalResult and ResultRemote class.
* A result may contain rows, or just an update count.
*/
public interface ResultInterface extends AutoCloseable {
/**
* Go to the beginning of the result, that means
* before the first row.
*/
void reset();
/**
* Get the current row.
*
* @return the row
*/
Value[] currentRow();
/**
* Go to the next row.
*
* @return true if a row exists
*/
boolean next();
/**
* Get the current row id, starting with 0.
* -1 is returned when next() was not called yet.
*
* @return the row id
*/
int getRowId();
/**
* Check if the current position is after last row.
*
* @return true if after last
*/
boolean isAfterLast();
/**
* Get the number of visible columns.
* More columns may exist internally for sorting or grouping.
*
* @return the number of columns
*/
int getVisibleColumnCount();
/**
* Get the number of rows in this object.
*
* @return the number of rows
*/
int getRowCount();
/**
* Check if this result has more rows to fetch.
*
* @return true if it has
*/
boolean hasNext();
/**
* Check if this result set should be closed, for example because it is
* buffered using a temporary file.
*
* @return true if close should be called.
*/
boolean needToClose();
/**
* Close the result and delete any temporary files
*/
@Override
void close();
/**
* Get the column alias name for the column.
*
* @param i the column number (starting with 0)
* @return the alias name
*/
String getAlias(int i);
/**
* Get the schema name for the column, if one exists.
*
* @param i the column number (starting with 0)
* @return the schema name or null
*/
String getSchemaName(int i);
/**
* Get the table name for the column, if one exists.
*
* @param i the column number (starting with 0)
* @return the table name or null
*/
String getTableName(int i);
/**
* Get the column name.
*
* @param i the column number (starting with 0)
* @return the column name
*/
String getColumnName(int i);
/**
* Get the column data type.
*
* @param i the column number (starting with 0)
* @return the column data type
*/
int getColumnType(int i);
/**
* Get the precision for this column.
*
* @param i the column number (starting with 0)
* @return the precision
*/
long getColumnPrecision(int i);
/**
* Get the scale for this column.
*
* @param i the column number (starting with 0)
* @return the scale
*/
int getColumnScale(int i);
/**
* Get the display size for this column.
*
* @param i the column number (starting with 0)
* @return the display size
*/
int getDisplaySize(int i);
/**
* Check if this is an auto-increment column.
*
* @param i the column number (starting with 0)
* @return true for auto-increment columns
*/
boolean isAutoIncrement(int i);
/**
* Check if this column is nullable.
*
* @param i the column number (starting with 0)
* @return Column.NULLABLE_*
*/
int getNullable(int i);
/**
* Set the fetch size for this result set.
*
* @param fetchSize the new fetch size
*/
void setFetchSize(int fetchSize);
/**
* Get the current fetch size for this result set.
*
* @return the fetch size
*/
int getFetchSize();
/**
* Check if this a lazy execution result.
*
* @return true if it is a lazy result
*/
boolean isLazy();
/**
* Check if this result set is closed.
*
* @return true if it is
*/
boolean isClosed();
/**
* Create a shallow copy of the result set. The data and a temporary table
* (if there is any) is not copied.
*
* @param targetSession the session of the copy
* @return the copy if possible, or null if copying is not possible
*/
ResultInterface createShallowCopy(SessionInterface targetSession);
/**
* Check if this result set contains the given row.
*
* @param values the row
* @return true if the row exists
*/
boolean containsDistinct(Value[] values);
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/ResultRemote.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import java.io.IOException;
import java.util.ArrayList;
import org.h2.engine.SessionInterface;
import org.h2.engine.SessionRemote;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.util.New;
import org.h2.value.Transfer;
import org.h2.value.Value;
/**
* The client side part of a result set that is kept on the server.
* In many cases, the complete data is kept on the client side,
* but for large results only a subset is in-memory.
*/
public class ResultRemote implements ResultInterface {
private int fetchSize;
private SessionRemote session;
private Transfer transfer;
private int id;
private final ResultColumn[] columns;
private Value[] currentRow;
private final int rowCount;
private int rowId, rowOffset;
private ArrayList<Value[]> result;
private final Trace trace;
public ResultRemote(SessionRemote session, Transfer transfer, int id,
int columnCount, int fetchSize) throws IOException {
this.session = session;
trace = session.getTrace();
this.transfer = transfer;
this.id = id;
this.columns = new ResultColumn[columnCount];
rowCount = transfer.readInt();
for (int i = 0; i < columnCount; i++) {
columns[i] = new ResultColumn(transfer);
}
rowId = -1;
result = New.arrayList();
this.fetchSize = fetchSize;
fetchRows(false);
}
@Override
public boolean isLazy() {
return false;
}
@Override
public String getAlias(int i) {
return columns[i].alias;
}
@Override
public String getSchemaName(int i) {
return columns[i].schemaName;
}
@Override
public String getTableName(int i) {
return columns[i].tableName;
}
@Override
public String getColumnName(int i) {
return columns[i].columnName;
}
@Override
public int getColumnType(int i) {
return columns[i].columnType;
}
@Override
public long getColumnPrecision(int i) {
return columns[i].precision;
}
@Override
public int getColumnScale(int i) {
return columns[i].scale;
}
@Override
public int getDisplaySize(int i) {
return columns[i].displaySize;
}
@Override
public boolean isAutoIncrement(int i) {
return columns[i].autoIncrement;
}
@Override
public int getNullable(int i) {
return columns[i].nullable;
}
@Override
public void reset() {
rowId = -1;
currentRow = null;
if (session == null) {
return;
}
synchronized (session) {
session.checkClosed();
try {
session.traceOperation("RESULT_RESET", id);
transfer.writeInt(SessionRemote.RESULT_RESET).writeInt(id).flush();
} catch (IOException e) {
throw DbException.convertIOException(e, null);
}
}
}
@Override
public Value[] currentRow() {
return currentRow;
}
@Override
public boolean next() {
if (rowId < rowCount) {
rowId++;
remapIfOld();
if (rowId < rowCount) {
if (rowId - rowOffset >= result.size()) {
fetchRows(true);
}
currentRow = result.get(rowId - rowOffset);
return true;
}
currentRow = null;
}
return false;
}
@Override
public int getRowId() {
return rowId;
}
@Override
public boolean isAfterLast() {
return rowId >= rowCount;
}
@Override
public int getVisibleColumnCount() {
return columns.length;
}
@Override
public int getRowCount() {
return rowCount;
}
@Override
public boolean hasNext() {
return rowId < rowCount - 1;
}
private void sendClose() {
if (session == null) {
return;
}
// TODO result sets: no reset possible for larger remote result sets
try {
synchronized (session) {
session.traceOperation("RESULT_CLOSE", id);
transfer.writeInt(SessionRemote.RESULT_CLOSE).writeInt(id);
}
} catch (IOException e) {
trace.error(e, "close");
} finally {
transfer = null;
session = null;
}
}
@Override
public void close() {
result = null;
sendClose();
}
private void remapIfOld() {
if (session == null) {
return;
}
try {
if (id <= session.getCurrentId() - SysProperties.SERVER_CACHED_OBJECTS / 2) {
// object is too old - we need to map it to a new id
int newId = session.getNextId();
session.traceOperation("CHANGE_ID", id);
transfer.writeInt(SessionRemote.CHANGE_ID).writeInt(id).writeInt(newId);
id = newId;
// TODO remote result set: very old result sets may be
// already removed on the server (theoretically) - how to
// solve this?
}
} catch (IOException e) {
throw DbException.convertIOException(e, null);
}
}
private void fetchRows(boolean sendFetch) {
synchronized (session) {
session.checkClosed();
try {
rowOffset += result.size();
result.clear();
int fetch = Math.min(fetchSize, rowCount - rowOffset);
if (sendFetch) {
session.traceOperation("RESULT_FETCH_ROWS", id);
transfer.writeInt(SessionRemote.RESULT_FETCH_ROWS).
writeInt(id).writeInt(fetch);
session.done(transfer);
}
for (int r = 0; r < fetch; r++) {
boolean row = transfer.readBoolean();
if (!row) {
break;
}
int len = columns.length;
Value[] values = new Value[len];
for (int i = 0; i < len; i++) {
Value v = transfer.readValue();
values[i] = v;
}
result.add(values);
}
if (rowOffset + result.size() >= rowCount) {
sendClose();
}
} catch (IOException e) {
throw DbException.convertIOException(e, null);
}
}
}
@Override
public String toString() {
return "columns: " + columns.length + " rows: " + rowCount + " pos: " + rowId;
}
@Override
public int getFetchSize() {
return fetchSize;
}
@Override
public void setFetchSize(int fetchSize) {
this.fetchSize = fetchSize;
}
@Override
public boolean needToClose() {
return true;
}
@Override
public ResultInterface createShallowCopy(SessionInterface targetSession) {
// The operation is not supported on remote result.
return null;
}
@Override
public boolean isClosed() {
return result == null;
}
@Override
public boolean containsDistinct(Value[] values) {
// We should never do this on remote result.
throw DbException.throwInternalError();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/ResultTarget.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import org.h2.value.Value;
/**
* A object where rows are written to.
*/
public interface ResultTarget {
/**
* Add the row to the result set.
*
* @param values the values
*/
void addRow(Value[] values);
/**
* Get the number of rows.
*
* @return the number of rows
*/
int getRowCount();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/ResultTempTable.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import java.util.ArrayList;
import java.util.Arrays;
import org.h2.command.ddl.CreateTableData;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.expression.Expression;
import org.h2.index.Cursor;
import org.h2.index.Index;
import org.h2.index.IndexType;
import org.h2.schema.Schema;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.Table;
import org.h2.value.Value;
import org.h2.value.ValueNull;
/**
* This class implements the temp table buffer for the LocalResult class.
*/
public class ResultTempTable implements ResultExternal {
private static final String COLUMN_NAME = "DATA";
private final boolean distinct;
private final SortOrder sort;
private Index index;
private final Session session;
private Table table;
private Cursor resultCursor;
private int rowCount;
private final int columnCount;
private final ResultTempTable parent;
private boolean closed;
private int childCount;
private final boolean containsLob;
ResultTempTable(Session session, Expression[] expressions, boolean distinct, SortOrder sort) {
this.session = session;
this.distinct = distinct;
this.sort = sort;
this.columnCount = expressions.length;
Schema schema = session.getDatabase().getSchema(Constants.SCHEMA_MAIN);
CreateTableData data = new CreateTableData();
boolean b = false;
for (int i = 0; i < expressions.length; i++) {
int type = expressions[i].getType();
Column col = new Column(COLUMN_NAME + i,
type);
if (type == Value.CLOB || type == Value.BLOB) {
b = true;
}
data.columns.add(col);
}
containsLob = b;
data.id = session.getDatabase().allocateObjectId();
data.tableName = "TEMP_RESULT_SET_" + data.id;
data.temporary = true;
data.persistIndexes = false;
data.persistData = true;
data.create = true;
data.session = session;
table = schema.createTable(data);
if (sort != null || distinct) {
createIndex();
}
parent = null;
}
private ResultTempTable(ResultTempTable parent) {
this.parent = parent;
this.columnCount = parent.columnCount;
this.distinct = parent.distinct;
this.session = parent.session;
this.table = parent.table;
this.index = parent.index;
this.rowCount = parent.rowCount;
this.sort = parent.sort;
this.containsLob = parent.containsLob;
reset();
}
private void createIndex() {
IndexColumn[] indexCols = null;
// If we need to do distinct, the distinct columns may not match the
// sort columns. So we need to disregard the sort. Not ideal.
if (sort != null && !distinct) {
int[] colIndex = sort.getQueryColumnIndexes();
indexCols = new IndexColumn[colIndex.length];
for (int i = 0; i < colIndex.length; i++) {
IndexColumn indexColumn = new IndexColumn();
indexColumn.column = table.getColumn(colIndex[i]);
indexColumn.sortType = sort.getSortTypes()[i];
indexColumn.columnName = COLUMN_NAME + i;
indexCols[i] = indexColumn;
}
} else {
indexCols = new IndexColumn[columnCount];
for (int i = 0; i < columnCount; i++) {
IndexColumn indexColumn = new IndexColumn();
indexColumn.column = table.getColumn(i);
indexColumn.columnName = COLUMN_NAME + i;
indexCols[i] = indexColumn;
}
}
String indexName = table.getSchema().getUniqueIndexName(session,
table, Constants.PREFIX_INDEX);
int indexId = session.getDatabase().allocateObjectId();
IndexType indexType = IndexType.createNonUnique(true);
index = table.addIndex(session, indexName, indexId, indexCols,
indexType, true, null);
}
@Override
public synchronized ResultExternal createShallowCopy() {
if (parent != null) {
return parent.createShallowCopy();
}
if (closed) {
return null;
}
childCount++;
return new ResultTempTable(this);
}
@Override
public int removeRow(Value[] values) {
Row row = convertToRow(values);
Cursor cursor = find(row);
if (cursor != null) {
row = cursor.get();
table.removeRow(session, row);
rowCount--;
}
return rowCount;
}
@Override
public boolean contains(Value[] values) {
return find(convertToRow(values)) != null;
}
@Override
public int addRow(Value[] values) {
Row row = convertToRow(values);
if (distinct) {
Cursor cursor = find(row);
if (cursor == null) {
table.addRow(session, row);
rowCount++;
}
} else {
table.addRow(session, row);
rowCount++;
}
return rowCount;
}
@Override
public int addRows(ArrayList<Value[]> rows) {
// speeds up inserting, but not really needed:
if (sort != null) {
sort.sort(rows);
}
for (Value[] values : rows) {
addRow(values);
}
return rowCount;
}
private synchronized void closeChild() {
if (--childCount == 0 && closed) {
dropTable();
}
}
@Override
public synchronized void close() {
if (closed) {
return;
}
closed = true;
if (parent != null) {
parent.closeChild();
} else {
if (childCount == 0) {
dropTable();
}
}
}
private void dropTable() {
if (table == null) {
return;
}
if (containsLob) {
// contains BLOB or CLOB: can not truncate now,
// otherwise the BLOB and CLOB entries are removed
return;
}
try {
Database database = session.getDatabase();
// Need to lock because not all of the code-paths
// that reach here have already taken this lock,
// notably via the close() paths.
synchronized (session) {
synchronized (database) {
table.truncate(session);
}
}
// This session may not lock the sys table (except if it already has
// locked it) because it must be committed immediately, otherwise
// other threads can not access the sys table. If the table is not
// removed now, it will be when the database is opened the next
// time. (the table is truncated, so this is just one record)
if (!database.isSysTableLocked()) {
Session sysSession = database.getSystemSession();
table.removeChildrenAndResources(sysSession);
if (index != null) {
// need to explicitly do this,
// as it's not registered in the system session
session.removeLocalTempTableIndex(index);
}
// the transaction must be committed immediately
// TODO this synchronization cascade is very ugly
synchronized (session) {
synchronized (sysSession) {
synchronized (database) {
sysSession.commit(false);
}
}
}
}
} finally {
table = null;
}
}
@Override
public void done() {
// nothing to do
}
@Override
public Value[] next() {
if (resultCursor == null) {
Index idx;
if (distinct || sort != null) {
idx = index;
} else {
idx = table.getScanIndex(session);
}
if (session.getDatabase().getMvStore() != null) {
// sometimes the transaction is already committed,
// in which case we can't use the session
if (idx.getRowCount(session) == 0 && rowCount > 0) {
// this means querying is not transactional
resultCursor = idx.find((Session) null, null, null);
} else {
// the transaction is still open
resultCursor = idx.find(session, null, null);
}
} else {
resultCursor = idx.find(session, null, null);
}
}
if (!resultCursor.next()) {
return null;
}
Row row = resultCursor.get();
return row.getValueList();
}
@Override
public void reset() {
resultCursor = null;
}
private Row convertToRow(Value[] values) {
if (values.length < columnCount) {
Value[] v2 = Arrays.copyOf(values, columnCount);
for (int i = values.length; i < columnCount; i++) {
v2[i] = ValueNull.INSTANCE;
}
values = v2;
}
return session.createRow(values, Row.MEMORY_CALCULATE);
}
private Cursor find(Row row) {
if (index == null) {
// for the case "in(select ...)", the query might
// use an optimization and not create the index
// up front
createIndex();
}
Cursor cursor = index.find(session, row, row);
while (cursor.next()) {
SearchRow found = cursor.getSearchRow();
boolean ok = true;
Database db = session.getDatabase();
for (int i = 0; i < row.getColumnCount(); i++) {
if (!db.areEqual(row.getValue(i), found.getValue(i))) {
ok = false;
break;
}
}
if (ok) {
return cursor;
}
}
return null;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/ResultWithGeneratedKeys.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
/**
* Result of update command with optional generated keys.
*/
public class ResultWithGeneratedKeys {
/**
* Result of update command with generated keys;
*/
public static final class WithKeys extends ResultWithGeneratedKeys {
private final ResultInterface generatedKeys;
/**
* Creates a result with update count and generated keys.
*
* @param updateCount
* update count
* @param generatedKeys
* generated keys
*/
public WithKeys(int updateCount, ResultInterface generatedKeys) {
super(updateCount);
this.generatedKeys = generatedKeys;
}
@Override
public ResultInterface getGeneratedKeys() {
return generatedKeys;
}
}
/**
* Returns a result with only update count.
*
* @param updateCount
* update count
* @return the result.
*/
public static ResultWithGeneratedKeys of(int updateCount) {
return new ResultWithGeneratedKeys(updateCount);
}
private final int updateCount;
ResultWithGeneratedKeys(int updateCount) {
this.updateCount = updateCount;
}
/**
* Returns generated keys, or {@code null}.
*
* @return generated keys, or {@code null}
*/
public ResultInterface getGeneratedKeys() {
return null;
}
/**
* Returns update count.
*
* @return update count
*/
public int getUpdateCount() {
return updateCount;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/Row.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import org.h2.store.Data;
import org.h2.value.Value;
/**
* Represents a row in a table.
*/
public interface Row extends SearchRow {
int MEMORY_CALCULATE = -1;
Row[] EMPTY_ARRAY = {};
/**
* Get a copy of the row that is distinct from (not equal to) this row.
* This is used for FOR UPDATE to allow pseudo-updating a row.
*
* @return a new row with the same data
*/
Row getCopy();
/**
* Set version.
*
* @param version row version
*/
void setVersion(int version);
/**
* Get the number of bytes required for the data.
*
* @param dummy the template buffer
* @return the number of bytes
*/
int getByteCount(Data dummy);
/**
* Check if this is an empty row.
*
* @return {@code true} if the row is empty
*/
boolean isEmpty();
/**
* Mark the row as deleted.
*
* @param deleted deleted flag
*/
void setDeleted(boolean deleted);
/**
* Set session id.
*
* @param sessionId the session id
*/
void setSessionId(int sessionId);
/**
* Get session id.
*
* @return the session id
*/
int getSessionId();
/**
* This record has been committed. The session id is reset.
*/
void commit();
/**
* Check if the row is deleted.
*
* @return {@code true} if the row is deleted
*/
boolean isDeleted();
/**
* Get values.
*
* @return values
*/
Value[] getValueList();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/RowFactory.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import org.h2.value.Value;
/**
* Creates rows.
*
* @author Sergi Vladykin
*/
public abstract class RowFactory {
/**
* Default implementation of row factory.
*/
public static final RowFactory DEFAULT = new DefaultRowFactory();
/**
* Create new row.
*
* @param data the values
* @param memory whether the row is in memory
* @return the created row
*/
public abstract Row createRow(Value[] data, int memory);
/**
* Default implementation of row factory.
*/
static final class DefaultRowFactory extends RowFactory {
@Override
public Row createRow(Value[] data, int memory) {
return new RowImpl(data, memory);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/RowImpl.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import java.util.Arrays;
import org.h2.engine.Constants;
import org.h2.store.Data;
import org.h2.util.StatementBuilder;
import org.h2.value.Value;
import org.h2.value.ValueLong;
/**
* Default row implementation.
*/
public class RowImpl implements Row {
private long key;
private final Value[] data;
private int memory;
private int version;
private boolean deleted;
private int sessionId;
public RowImpl(Value[] data, int memory) {
this.data = data;
this.memory = memory;
}
/**
* Get a copy of the row that is distinct from (not equal to) this row.
* This is used for FOR UPDATE to allow pseudo-updating a row.
*
* @return a new row with the same data
*/
@Override
public Row getCopy() {
Value[] d2 = Arrays.copyOf(data, data.length);
RowImpl r2 = new RowImpl(d2, memory);
r2.key = key;
r2.version = version + 1;
r2.sessionId = sessionId;
return r2;
}
@Override
public void setKeyAndVersion(SearchRow row) {
setKey(row.getKey());
setVersion(row.getVersion());
}
@Override
public int getVersion() {
return version;
}
@Override
public void setVersion(int version) {
this.version = version;
}
@Override
public long getKey() {
return key;
}
@Override
public void setKey(long key) {
this.key = key;
}
@Override
public Value getValue(int i) {
return i == -1 ? ValueLong.get(key) : data[i];
}
/**
* Get the number of bytes required for the data.
*
* @param dummy the template buffer
* @return the number of bytes
*/
@Override
public int getByteCount(Data dummy) {
int size = 0;
for (Value v : data) {
size += dummy.getValueLen(v);
}
return size;
}
@Override
public void setValue(int i, Value v) {
if (i == -1) {
this.key = v.getLong();
} else {
data[i] = v;
}
}
@Override
public boolean isEmpty() {
return data == null;
}
@Override
public int getColumnCount() {
return data.length;
}
@Override
public int getMemory() {
if (memory != MEMORY_CALCULATE) {
return memory;
}
int m = Constants.MEMORY_ROW;
if (data != null) {
int len = data.length;
m += Constants.MEMORY_OBJECT + len * Constants.MEMORY_POINTER;
for (Value v : data) {
if (v != null) {
m += v.getMemory();
}
}
}
this.memory = m;
return m;
}
@Override
public String toString() {
StatementBuilder buff = new StatementBuilder("( /* key:");
buff.append(getKey());
if (version != 0) {
buff.append(" v:").append(version);
}
if (isDeleted()) {
buff.append(" deleted");
}
buff.append(" */ ");
if (data != null) {
for (Value v : data) {
buff.appendExceptFirst(", ");
buff.append(v == null ? "null" : v.getTraceSQL());
}
}
return buff.append(')').toString();
}
@Override
public void setDeleted(boolean deleted) {
this.deleted = deleted;
}
@Override
public void setSessionId(int sessionId) {
this.sessionId = sessionId;
}
@Override
public int getSessionId() {
return sessionId;
}
/**
* This record has been committed. The session id is reset.
*/
@Override
public void commit() {
this.sessionId = 0;
}
@Override
public boolean isDeleted() {
return deleted;
}
@Override
public Value[] getValueList() {
return data;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/RowList.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import java.util.ArrayList;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.store.Data;
import org.h2.store.FileStore;
import org.h2.util.New;
import org.h2.value.Value;
/**
* A list of rows. If the list grows too large, it is buffered to disk
* automatically.
*/
public class RowList {
private final Session session;
private final ArrayList<Row> list = New.arrayList();
private int size;
private int index, listIndex;
private FileStore file;
private Data rowBuff;
private ArrayList<Value> lobs;
private final int maxMemory;
private int memory;
private boolean written;
private boolean readUncached;
/**
* Construct a new row list for this session.
*
* @param session the session
*/
public RowList(Session session) {
this.session = session;
if (session.getDatabase().isPersistent()) {
maxMemory = session.getDatabase().getMaxOperationMemory();
} else {
maxMemory = 0;
}
}
private void writeRow(Data buff, Row r) {
buff.checkCapacity(1 + Data.LENGTH_INT * 8);
buff.writeByte((byte) 1);
buff.writeInt(r.getMemory());
int columnCount = r.getColumnCount();
buff.writeInt(columnCount);
buff.writeLong(r.getKey());
buff.writeInt(r.getVersion());
buff.writeInt(r.isDeleted() ? 1 : 0);
buff.writeInt(r.getSessionId());
for (int i = 0; i < columnCount; i++) {
Value v = r.getValue(i);
buff.checkCapacity(1);
if (v == null) {
buff.writeByte((byte) 0);
} else {
buff.writeByte((byte) 1);
if (v.getType() == Value.CLOB || v.getType() == Value.BLOB) {
// need to keep a reference to temporary lobs,
// otherwise the temp file is deleted
if (v.getSmall() == null && v.getTableId() == 0) {
if (lobs == null) {
lobs = New.arrayList();
}
// need to create a copy, otherwise,
// if stored multiple times, it may be renamed
// and then not found
v = v.copyToTemp();
lobs.add(v);
}
}
buff.checkCapacity(buff.getValueLen(v));
buff.writeValue(v);
}
}
}
private void writeAllRows() {
if (file == null) {
Database db = session.getDatabase();
String fileName = db.createTempFile();
file = db.openFile(fileName, "rw", false);
file.setCheckedWriting(false);
file.seek(FileStore.HEADER_LENGTH);
rowBuff = Data.create(db, Constants.DEFAULT_PAGE_SIZE);
file.seek(FileStore.HEADER_LENGTH);
}
Data buff = rowBuff;
initBuffer(buff);
for (int i = 0, size = list.size(); i < size; i++) {
if (i > 0 && buff.length() > Constants.IO_BUFFER_SIZE) {
flushBuffer(buff);
initBuffer(buff);
}
Row r = list.get(i);
writeRow(buff, r);
}
flushBuffer(buff);
file.autoDelete();
list.clear();
memory = 0;
}
private static void initBuffer(Data buff) {
buff.reset();
buff.writeInt(0);
}
private void flushBuffer(Data buff) {
buff.checkCapacity(1);
buff.writeByte((byte) 0);
buff.fillAligned();
buff.setInt(0, buff.length() / Constants.FILE_BLOCK_SIZE);
file.write(buff.getBytes(), 0, buff.length());
}
/**
* Add a row to the list.
*
* @param r the row to add
*/
public void add(Row r) {
list.add(r);
memory += r.getMemory() + Constants.MEMORY_POINTER;
if (maxMemory > 0 && memory > maxMemory) {
writeAllRows();
}
size++;
}
/**
* Remove all rows from the list.
*/
public void reset() {
index = 0;
if (file != null) {
listIndex = 0;
if (!written) {
writeAllRows();
written = true;
}
list.clear();
file.seek(FileStore.HEADER_LENGTH);
}
}
/**
* Check if there are more rows in this list.
*
* @return true it there are more rows
*/
public boolean hasNext() {
return index < size;
}
private Row readRow(Data buff) {
if (buff.readByte() == 0) {
return null;
}
int mem = buff.readInt();
int columnCount = buff.readInt();
long key = buff.readLong();
int version = buff.readInt();
if (readUncached) {
key = 0;
}
boolean deleted = buff.readInt() == 1;
int sessionId = buff.readInt();
Value[] values = new Value[columnCount];
for (int i = 0; i < columnCount; i++) {
Value v;
if (buff.readByte() == 0) {
v = null;
} else {
v = buff.readValue();
if (v.isLinkedToTable()) {
// the table id is 0 if it was linked when writing
// a temporary entry
if (v.getTableId() == 0) {
session.removeAtCommit(v);
}
}
}
values[i] = v;
}
Row row = session.createRow(values, mem);
row.setKey(key);
row.setVersion(version);
row.setDeleted(deleted);
row.setSessionId(sessionId);
return row;
}
/**
* Get the next row from the list.
*
* @return the next row
*/
public Row next() {
Row r;
if (file == null) {
r = list.get(index++);
} else {
if (listIndex >= list.size()) {
list.clear();
listIndex = 0;
Data buff = rowBuff;
buff.reset();
int min = Constants.FILE_BLOCK_SIZE;
file.readFully(buff.getBytes(), 0, min);
int len = buff.readInt() * Constants.FILE_BLOCK_SIZE;
buff.checkCapacity(len);
if (len - min > 0) {
file.readFully(buff.getBytes(), min, len - min);
}
while (true) {
r = readRow(buff);
if (r == null) {
break;
}
list.add(r);
}
}
index++;
r = list.get(listIndex++);
}
return r;
}
/**
* Get the number of rows in this list.
*
* @return the number of rows
*/
public int size() {
return size;
}
/**
* Do not use the cache.
*/
public void invalidateCache() {
readUncached = true;
}
/**
* Close the result list and delete the temporary file.
*/
public void close() {
if (file != null) {
file.autoDelete();
file.closeAndDeleteSilently();
file = null;
rowBuff = null;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/SearchRow.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import org.h2.value.Value;
/**
* The interface for rows stored in a table, and for partial rows stored in the
* index.
*/
public interface SearchRow {
/**
* An empty array of SearchRow objects.
*/
SearchRow[] EMPTY_ARRAY = {};
/**
* Get the column count.
*
* @return the column count
*/
int getColumnCount();
/**
* Get the value for the column
*
* @param index the column number (starting with 0)
* @return the value
*/
Value getValue(int index);
/**
* Set the value for given column
*
* @param index the column number (starting with 0)
* @param v the new value
*/
void setValue(int index, Value v);
/**
* Set the position and version to match another row.
*
* @param old the other row.
*/
void setKeyAndVersion(SearchRow old);
/**
* Get the version of the row.
*
* @return the version
*/
int getVersion();
/**
* Set the unique key of the row.
*
* @param key the key
*/
void setKey(long key);
/**
* Get the unique key of the row.
*
* @return the key
*/
long getKey();
/**
* Get the estimated memory used for this row, in bytes.
*
* @return the memory
*/
int getMemory();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/SimpleRow.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import org.h2.engine.Constants;
import org.h2.util.StatementBuilder;
import org.h2.value.Value;
/**
* Represents a simple row without state.
*/
public class SimpleRow implements SearchRow {
private long key;
private int version;
private final Value[] data;
private int memory;
public SimpleRow(Value[] data) {
this.data = data;
}
@Override
public int getColumnCount() {
return data.length;
}
@Override
public long getKey() {
return key;
}
@Override
public void setKey(long key) {
this.key = key;
}
@Override
public void setKeyAndVersion(SearchRow row) {
key = row.getKey();
version = row.getVersion();
}
@Override
public int getVersion() {
return version;
}
@Override
public void setValue(int i, Value v) {
data[i] = v;
}
@Override
public Value getValue(int i) {
return data[i];
}
@Override
public String toString() {
StatementBuilder buff = new StatementBuilder("( /* key:");
buff.append(getKey());
if (version != 0) {
buff.append(" v:").append(version);
}
buff.append(" */ ");
for (Value v : data) {
buff.appendExceptFirst(", ");
buff.append(v == null ? "null" : v.getTraceSQL());
}
return buff.append(')').toString();
}
@Override
public int getMemory() {
if (memory == 0) {
int len = data.length;
memory = Constants.MEMORY_OBJECT + len * Constants.MEMORY_POINTER;
for (Value v : data) {
if (v != null) {
memory += v.getMemory();
}
}
}
return memory;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/SimpleRowValue.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import org.h2.engine.Constants;
import org.h2.value.Value;
/**
* A simple row that contains data for only one column.
*/
public class SimpleRowValue implements SearchRow {
private long key;
private int version;
private int index;
private final int virtualColumnCount;
private Value data;
public SimpleRowValue(int columnCount) {
this.virtualColumnCount = columnCount;
}
@Override
public void setKeyAndVersion(SearchRow row) {
key = row.getKey();
version = row.getVersion();
}
@Override
public int getVersion() {
return version;
}
@Override
public int getColumnCount() {
return virtualColumnCount;
}
@Override
public long getKey() {
return key;
}
@Override
public void setKey(long key) {
this.key = key;
}
@Override
public Value getValue(int idx) {
return idx == index ? data : null;
}
@Override
public void setValue(int idx, Value v) {
index = idx;
data = v;
}
@Override
public String toString() {
return "( /* " + key + " */ " + (data == null ?
"null" : data.getTraceSQL()) + " )";
}
@Override
public int getMemory() {
return Constants.MEMORY_OBJECT + (data == null ? 0 : data.getMemory());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/SortOrder.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import org.h2.command.dml.SelectOrderBy;
import org.h2.engine.Database;
import org.h2.engine.SysProperties;
import org.h2.expression.Expression;
import org.h2.expression.ExpressionColumn;
import org.h2.table.Column;
import org.h2.table.TableFilter;
import org.h2.util.StatementBuilder;
import org.h2.util.StringUtils;
import org.h2.util.Utils;
import org.h2.value.Value;
import org.h2.value.ValueNull;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
/**
* A sort order represents an ORDER BY clause in a query.
*/
public class SortOrder implements Comparator<Value[]> {
/**
* This bit mask means the values should be sorted in ascending order.
*/
public static final int ASCENDING = 0;
/**
* This bit mask means the values should be sorted in descending order.
*/
public static final int DESCENDING = 1;
/**
* This bit mask means NULLs should be sorted before other data, no matter
* if ascending or descending order is used.
*/
public static final int NULLS_FIRST = 2;
/**
* This bit mask means NULLs should be sorted after other data, no matter
* if ascending or descending order is used.
*/
public static final int NULLS_LAST = 4;
/**
* The default sort order for NULL.
*/
private static final int DEFAULT_NULL_SORT =
SysProperties.SORT_NULLS_HIGH ? 1 : -1;
/**
* The default sort order bit for NULLs last.
*/
private static final int DEFAULT_NULLS_LAST = SysProperties.SORT_NULLS_HIGH ? NULLS_LAST : NULLS_FIRST;
/**
* The default sort order bit for NULLs first.
*/
private static final int DEFAULT_NULLS_FIRST = SysProperties.SORT_NULLS_HIGH ? NULLS_FIRST : NULLS_LAST;
private final Database database;
/**
* The column indexes of the order by expressions within the query.
*/
private final int[] queryColumnIndexes;
/**
* The sort type bit mask (DESCENDING, NULLS_FIRST, NULLS_LAST).
*/
private final int[] sortTypes;
/**
* The order list.
*/
private final ArrayList<SelectOrderBy> orderList;
/**
* Construct a new sort order object.
*
* @param database the database
* @param queryColumnIndexes the column index list
* @param sortType the sort order bit masks
* @param orderList the original query order list (if this is a query)
*/
public SortOrder(Database database, int[] queryColumnIndexes,
int[] sortType, ArrayList<SelectOrderBy> orderList) {
this.database = database;
this.queryColumnIndexes = queryColumnIndexes;
this.sortTypes = sortType;
this.orderList = orderList;
}
/**
* Create the SQL snippet that describes this sort order.
* This is the SQL snippet that usually appears after the ORDER BY clause.
*
* @param list the expression list
* @param visible the number of columns in the select list
* @return the SQL snippet
*/
public String getSQL(Expression[] list, int visible) {
StatementBuilder buff = new StatementBuilder();
int i = 0;
for (int idx : queryColumnIndexes) {
buff.appendExceptFirst(", ");
if (idx < visible) {
buff.append(idx + 1);
} else {
buff.append('=').append(StringUtils.unEnclose(list[idx].getSQL()));
}
int type = sortTypes[i++];
if ((type & DESCENDING) != 0) {
buff.append(" DESC");
}
if ((type & NULLS_FIRST) != 0) {
buff.append(" NULLS FIRST");
} else if ((type & NULLS_LAST) != 0) {
buff.append(" NULLS LAST");
}
}
return buff.toString();
}
/**
* Compare two expressions where one of them is NULL.
*
* @param aNull whether the first expression is null
* @param sortType the sort bit mask to use
* @return the result of the comparison (-1 meaning the first expression
* should appear before the second, 0 if they are equal)
*/
public static int compareNull(boolean aNull, int sortType) {
if ((sortType & NULLS_FIRST) != 0) {
return aNull ? -1 : 1;
} else if ((sortType & NULLS_LAST) != 0) {
return aNull ? 1 : -1;
} else {
// see also JdbcDatabaseMetaData.nullsAreSorted*
int comp = aNull ? DEFAULT_NULL_SORT : -DEFAULT_NULL_SORT;
return (sortType & DESCENDING) == 0 ? comp : -comp;
}
}
/**
* Compare two expression lists.
*
* @param a the first expression list
* @param b the second expression list
* @return the result of the comparison
*/
@Override
public int compare(Value[] a, Value[] b) {
for (int i = 0, len = queryColumnIndexes.length; i < len; i++) {
int idx = queryColumnIndexes[i];
int type = sortTypes[i];
Value ao = a[idx];
Value bo = b[idx];
boolean aNull = ao == ValueNull.INSTANCE, bNull = bo == ValueNull.INSTANCE;
if (aNull || bNull) {
if (aNull == bNull) {
continue;
}
return compareNull(aNull, type);
}
int comp = database.compare(ao, bo);
if (comp != 0) {
return (type & DESCENDING) == 0 ? comp : -comp;
}
}
return 0;
}
/**
* Sort a list of rows.
*
* @param rows the list of rows
*/
public void sort(ArrayList<Value[]> rows) {
Collections.sort(rows, this);
}
/**
* Sort a list of rows using offset and limit.
*
* @param rows the list of rows
* @param offset the offset
* @param limit the limit
*/
public void sort(ArrayList<Value[]> rows, int offset, int limit) {
int rowsSize = rows.size();
if (rows.isEmpty() || offset >= rowsSize || limit == 0) {
return;
}
if (offset < 0) {
offset = 0;
}
if (offset + limit > rowsSize) {
limit = rowsSize - offset;
}
if (limit == 1 && offset == 0) {
rows.set(0, Collections.min(rows, this));
return;
}
Value[][] arr = rows.toArray(new Value[0][]);
Utils.sortTopN(arr, offset, limit, this);
for (int i = 0, end = Math.min(offset + limit, rowsSize); i < end; i++) {
rows.set(i, arr[i]);
}
}
/**
* Get the column index list. This is the column indexes of the order by
* expressions within the query.
* <p>
* For the query "select name, id from test order by id, name" this is {1,
* 0} as the first order by expression (the column "id") is the second
* column of the query, and the second order by expression ("name") is the
* first column of the query.
*
* @return the list
*/
public int[] getQueryColumnIndexes() {
return queryColumnIndexes;
}
/**
* Get the column for the given table filter, if the sort column is for this
* filter.
*
* @param index the column index (0, 1,..)
* @param filter the table filter
* @return the column, or null
*/
public Column getColumn(int index, TableFilter filter) {
if (orderList == null) {
return null;
}
SelectOrderBy order = orderList.get(index);
Expression expr = order.expression;
if (expr == null) {
return null;
}
expr = expr.getNonAliasExpression();
if (expr.isConstant()) {
return null;
}
if (!(expr instanceof ExpressionColumn)) {
return null;
}
ExpressionColumn exprCol = (ExpressionColumn) expr;
if (exprCol.getTableFilter() != filter) {
return null;
}
return exprCol.getColumn();
}
/**
* Get the sort order bit masks.
*
* @return the list
*/
public int[] getSortTypes() {
return sortTypes;
}
/**
* Returns sort order bit masks with {@link #NULLS_FIRST} or {@link #NULLS_LAST}
* explicitly set, depending on {@link SysProperties#SORT_NULLS_HIGH}.
*
* @return bit masks with either {@link #NULLS_FIRST} or {@link #NULLS_LAST} explicitly set.
*/
public int[] getSortTypesWithNullPosition() {
final int[] sortTypes = this.sortTypes.clone();
for (int i=0, length = sortTypes.length; i<length; i++) {
sortTypes[i] = addExplicitNullPosition(sortTypes[i]);
}
return sortTypes;
}
/**
* Returns a sort type bit mask with {@link #NULLS_FIRST} or {@link #NULLS_LAST}
* explicitly set, depending on {@link SysProperties#SORT_NULLS_HIGH}.
*
* @param sortType sort type bit mask
* @return bit mask with either {@link #NULLS_FIRST} or {@link #NULLS_LAST} explicitly set.
*/
public static int addExplicitNullPosition(final int sortType) {
if ((sortType & NULLS_FIRST) != NULLS_FIRST && (sortType & NULLS_LAST) != NULLS_LAST) {
return sortType | ((sortType & DESCENDING) == ASCENDING ? DEFAULT_NULLS_LAST : DEFAULT_NULLS_FIRST);
} else {
return sortType;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/result/UpdatableRow.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.result;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import org.h2.api.ErrorCode;
import org.h2.jdbc.JdbcConnection;
import org.h2.message.DbException;
import org.h2.util.New;
import org.h2.util.StatementBuilder;
import org.h2.util.StringUtils;
import org.h2.value.DataType;
import org.h2.value.Value;
import org.h2.value.ValueNull;
/**
* This class is used for updatable result sets. An updatable row provides
* functions to update the current row in a result set.
*/
public class UpdatableRow {
private final JdbcConnection conn;
private final ResultInterface result;
private final int columnCount;
private String schemaName;
private String tableName;
private ArrayList<String> key;
private boolean isUpdatable;
/**
* Construct a new object that is linked to the result set. The constructor
* reads the database meta data to find out if the result set is updatable.
*
* @param conn the database connection
* @param result the result
*/
public UpdatableRow(JdbcConnection conn, ResultInterface result)
throws SQLException {
this.conn = conn;
this.result = result;
columnCount = result.getVisibleColumnCount();
for (int i = 0; i < columnCount; i++) {
String t = result.getTableName(i);
String s = result.getSchemaName(i);
if (t == null || s == null) {
return;
}
if (tableName == null) {
tableName = t;
} else if (!tableName.equals(t)) {
return;
}
if (schemaName == null) {
schemaName = s;
} else if (!schemaName.equals(s)) {
return;
}
}
final DatabaseMetaData meta = conn.getMetaData();
ResultSet rs = meta.getTables(null,
StringUtils.escapeMetaDataPattern(schemaName),
StringUtils.escapeMetaDataPattern(tableName),
new String[] { "TABLE" });
if (!rs.next()) {
return;
}
if (rs.getString("SQL") == null) {
// system table
return;
}
String table = rs.getString("TABLE_NAME");
// if the table name in the database meta data is lower case,
// but the table in the result set meta data is not, then the column
// in the database meta data is also lower case
boolean toUpper = !table.equals(tableName) && table.equalsIgnoreCase(tableName);
key = New.arrayList();
rs = meta.getPrimaryKeys(null,
StringUtils.escapeMetaDataPattern(schemaName),
tableName);
while (rs.next()) {
String c = rs.getString("COLUMN_NAME");
key.add(toUpper ? StringUtils.toUpperEnglish(c) : c);
}
if (isIndexUsable(key)) {
isUpdatable = true;
return;
}
key.clear();
rs = meta.getIndexInfo(null,
StringUtils.escapeMetaDataPattern(schemaName),
tableName, true, true);
while (rs.next()) {
int pos = rs.getShort("ORDINAL_POSITION");
if (pos == 1) {
// check the last key if there was any
if (isIndexUsable(key)) {
isUpdatable = true;
return;
}
key.clear();
}
String c = rs.getString("COLUMN_NAME");
key.add(toUpper ? StringUtils.toUpperEnglish(c) : c);
}
if (isIndexUsable(key)) {
isUpdatable = true;
return;
}
key = null;
}
private boolean isIndexUsable(ArrayList<String> indexColumns) {
if (indexColumns.isEmpty()) {
return false;
}
for (String c : indexColumns) {
if (findColumnIndex(c) < 0) {
return false;
}
}
return true;
}
/**
* Check if this result set is updatable.
*
* @return true if it is
*/
public boolean isUpdatable() {
return isUpdatable;
}
private int findColumnIndex(String columnName) {
for (int i = 0; i < columnCount; i++) {
String col = result.getColumnName(i);
if (col.equals(columnName)) {
return i;
}
}
return -1;
}
private int getColumnIndex(String columnName) {
int index = findColumnIndex(columnName);
if (index < 0) {
throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, columnName);
}
return index;
}
private void appendColumnList(StatementBuilder buff, boolean set) {
buff.resetCount();
for (int i = 0; i < columnCount; i++) {
buff.appendExceptFirst(",");
String col = result.getColumnName(i);
buff.append(StringUtils.quoteIdentifier(col));
if (set) {
buff.append("=? ");
}
}
}
private void appendKeyCondition(StatementBuilder buff) {
buff.append(" WHERE ");
buff.resetCount();
for (String k : key) {
buff.appendExceptFirst(" AND ");
buff.append(StringUtils.quoteIdentifier(k)).append("=?");
}
}
private void setKey(PreparedStatement prep, int start, Value[] current)
throws SQLException {
for (int i = 0, size = key.size(); i < size; i++) {
String col = key.get(i);
int idx = getColumnIndex(col);
Value v = current[idx];
if (v == null || v == ValueNull.INSTANCE) {
// rows with a unique key containing NULL are not supported,
// as multiple such rows could exist
throw DbException.get(ErrorCode.NO_DATA_AVAILABLE);
}
v.set(prep, start + i);
}
}
// public boolean isRowDeleted(Value[] row) throws SQLException {
// StringBuilder buff = new StringBuilder();
// buff.append("SELECT COUNT(*) FROM ").
// append(StringUtils.quoteIdentifier(tableName));
// appendKeyCondition(buff);
// PreparedStatement prep = conn.prepareStatement(buff.toString());
// setKey(prep, 1, row);
// ResultSet rs = prep.executeQuery();
// rs.next();
// return rs.getInt(1) == 0;
// }
private void appendTableName(StatementBuilder buff) {
if (schemaName != null && schemaName.length() > 0) {
buff.append(StringUtils.quoteIdentifier(schemaName)).append('.');
}
buff.append(StringUtils.quoteIdentifier(tableName));
}
/**
* Re-reads a row from the database and updates the values in the array.
*
* @param row the values that contain the key
* @return the row
*/
public Value[] readRow(Value[] row) throws SQLException {
StatementBuilder buff = new StatementBuilder("SELECT ");
appendColumnList(buff, false);
buff.append(" FROM ");
appendTableName(buff);
appendKeyCondition(buff);
PreparedStatement prep = conn.prepareStatement(buff.toString());
setKey(prep, 1, row);
ResultSet rs = prep.executeQuery();
if (!rs.next()) {
throw DbException.get(ErrorCode.NO_DATA_AVAILABLE);
}
Value[] newRow = new Value[columnCount];
for (int i = 0; i < columnCount; i++) {
int type = result.getColumnType(i);
newRow[i] = DataType.readValue(conn.getSession(), rs, i + 1, type);
}
return newRow;
}
/**
* Delete the given row in the database.
*
* @param current the row
* @throws SQLException if this row has already been deleted
*/
public void deleteRow(Value[] current) throws SQLException {
StatementBuilder buff = new StatementBuilder("DELETE FROM ");
appendTableName(buff);
appendKeyCondition(buff);
PreparedStatement prep = conn.prepareStatement(buff.toString());
setKey(prep, 1, current);
int count = prep.executeUpdate();
if (count != 1) {
// the row has already been deleted
throw DbException.get(ErrorCode.NO_DATA_AVAILABLE);
}
}
/**
* Update a row in the database.
*
* @param current the old row
* @param updateRow the new row
* @throws SQLException if the row has been deleted
*/
public void updateRow(Value[] current, Value[] updateRow) throws SQLException {
StatementBuilder buff = new StatementBuilder("UPDATE ");
appendTableName(buff);
buff.append(" SET ");
appendColumnList(buff, true);
// TODO updatable result set: we could add all current values to the
// where clause
// - like this optimistic ('no') locking is possible
appendKeyCondition(buff);
PreparedStatement prep = conn.prepareStatement(buff.toString());
int j = 1;
for (int i = 0; i < columnCount; i++) {
Value v = updateRow[i];
if (v == null) {
v = current[i];
}
v.set(prep, j++);
}
setKey(prep, j, current);
int count = prep.executeUpdate();
if (count != 1) {
// the row has been deleted
throw DbException.get(ErrorCode.NO_DATA_AVAILABLE);
}
}
/**
* Insert a new row into the database.
*
* @param row the new row
* @throws SQLException if the row could not be inserted
*/
public void insertRow(Value[] row) throws SQLException {
StatementBuilder buff = new StatementBuilder("INSERT INTO ");
appendTableName(buff);
buff.append('(');
appendColumnList(buff, false);
buff.append(")VALUES(");
buff.resetCount();
for (int i = 0; i < columnCount; i++) {
buff.appendExceptFirst(",");
Value v = row[i];
if (v == null) {
buff.append("DEFAULT");
} else {
buff.append('?');
}
}
buff.append(')');
PreparedStatement prep = conn.prepareStatement(buff.toString());
for (int i = 0, j = 0; i < columnCount; i++) {
Value v = row[i];
if (v != null) {
v.set(prep, j++ + 1);
}
}
int count = prep.executeUpdate();
if (count != 1) {
throw DbException.get(ErrorCode.NO_DATA_AVAILABLE);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/schema/Constant.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.schema;
import org.h2.engine.DbObject;
import org.h2.engine.Session;
import org.h2.expression.ValueExpression;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.table.Table;
import org.h2.value.Value;
/**
* A user-defined constant as created by the SQL statement
* CREATE CONSTANT
*/
public class Constant extends SchemaObjectBase {
private Value value;
private ValueExpression expression;
public Constant(Schema schema, int id, String name) {
initSchemaObjectBase(schema, id, name, Trace.SCHEMA);
}
@Override
public String getCreateSQLForCopy(Table table, String quotedName) {
throw DbException.throwInternalError(toString());
}
@Override
public String getDropSQL() {
return null;
}
@Override
public String getCreateSQL() {
return "CREATE CONSTANT " + getSQL() + " VALUE " + value.getSQL();
}
@Override
public int getType() {
return DbObject.CONSTANT;
}
@Override
public void removeChildrenAndResources(Session session) {
database.removeMeta(session, getId());
invalidate();
}
@Override
public void checkRename() {
// ok
}
public void setValue(Value value) {
this.value = value;
expression = ValueExpression.get(value);
}
public ValueExpression getValue() {
return expression;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/schema/Schema.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.schema;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.h2.api.ErrorCode;
import org.h2.command.ddl.CreateSynonymData;
import org.h2.command.ddl.CreateTableData;
import org.h2.constraint.Constraint;
import org.h2.engine.Database;
import org.h2.engine.DbObject;
import org.h2.engine.DbObjectBase;
import org.h2.engine.DbSettings;
import org.h2.engine.FunctionAlias;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.engine.User;
import org.h2.ext.pulsar.PulsarExtension;
import org.h2.index.Index;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.mvstore.db.MVTableEngine;
import org.h2.table.RegularTable;
import org.h2.table.Table;
import org.h2.table.TableLink;
import org.h2.table.TableSynonym;
import org.h2.util.New;
import org.h2.util.StringUtils;
/**
* A schema as created by the SQL statement
* CREATE SCHEMA
*/
public class Schema extends DbObjectBase {
private User owner;
private final boolean system;
private ArrayList<String> tableEngineParams;
private final ConcurrentHashMap<String, Table> tablesAndViews;
private final ConcurrentHashMap<String, TableSynonym> synonyms;
private final ConcurrentHashMap<String, Index> indexes;
private final ConcurrentHashMap<String, Sequence> sequences;
private final ConcurrentHashMap<String, TriggerObject> triggers;
private final ConcurrentHashMap<String, Constraint> constraints;
private final ConcurrentHashMap<String, Constant> constants;
private final ConcurrentHashMap<String, FunctionAlias> functions;
/**
* The set of returned unique names that are not yet stored. It is used to
* avoid returning the same unique name twice when multiple threads
* concurrently create objects.
*/
private final HashSet<String> temporaryUniqueNames = new HashSet<>();
/**
* Create a new schema object.
*
* @param database the database
* @param id the object id
* @param schemaName the schema name
* @param owner the owner of the schema
* @param system if this is a system schema (such a schema can not be
* dropped)
*/
public Schema(Database database, int id, String schemaName, User owner,
boolean system) {
tablesAndViews = database.newConcurrentStringMap();
synonyms = database.newConcurrentStringMap();
indexes = database.newConcurrentStringMap();
sequences = database.newConcurrentStringMap();
triggers = database.newConcurrentStringMap();
constraints = database.newConcurrentStringMap();
constants = database.newConcurrentStringMap();
functions = database.newConcurrentStringMap();
initDbObjectBase(database, id, schemaName, Trace.SCHEMA);
this.owner = owner;
this.system = system;
}
/**
* Check if this schema can be dropped. System schemas can not be dropped.
*
* @return true if it can be dropped
*/
public boolean canDrop() {
return !system;
}
@Override
public String getCreateSQLForCopy(Table table, String quotedName) {
throw DbException.throwInternalError(toString());
}
@Override
public String getDropSQL() {
return null;
}
@Override
public String getCreateSQL() {
if (system) {
return null;
}
return "CREATE SCHEMA IF NOT EXISTS " +
getSQL() + " AUTHORIZATION " + owner.getSQL();
}
@Override
public int getType() {
return DbObject.SCHEMA;
}
/**
* Return whether is this schema is empty (does not contain any objects).
*
* @return {@code true} if this schema is empty, {@code false} otherwise
*/
public boolean isEmpty() {
return tablesAndViews.isEmpty() && synonyms.isEmpty() && indexes.isEmpty() && sequences.isEmpty()
&& triggers.isEmpty() && constraints.isEmpty() && constants.isEmpty() && functions.isEmpty();
}
@Override
public void removeChildrenAndResources(Session session) {
while (triggers != null && triggers.size() > 0) {
TriggerObject obj = (TriggerObject) triggers.values().toArray()[0];
database.removeSchemaObject(session, obj);
}
while (constraints != null && constraints.size() > 0) {
Constraint obj = (Constraint) constraints.values().toArray()[0];
database.removeSchemaObject(session, obj);
}
// There can be dependencies between tables e.g. using computed columns,
// so we might need to loop over them multiple times.
boolean runLoopAgain = false;
do {
runLoopAgain = false;
if (tablesAndViews != null) {
// Loop over a copy because the map is modified underneath us.
for (Table obj : new ArrayList<>(tablesAndViews.values())) {
// Check for null because multiple tables might be deleted
// in one go underneath us.
if (obj.getName() != null) {
if (database.getDependentTable(obj, obj) == null) {
database.removeSchemaObject(session, obj);
} else {
runLoopAgain = true;
}
}
}
}
} while (runLoopAgain);
while (indexes != null && indexes.size() > 0) {
Index obj = (Index) indexes.values().toArray()[0];
database.removeSchemaObject(session, obj);
}
while (sequences != null && sequences.size() > 0) {
Sequence obj = (Sequence) sequences.values().toArray()[0];
database.removeSchemaObject(session, obj);
}
while (constants != null && constants.size() > 0) {
Constant obj = (Constant) constants.values().toArray()[0];
database.removeSchemaObject(session, obj);
}
while (functions != null && functions.size() > 0) {
FunctionAlias obj = (FunctionAlias) functions.values().toArray()[0];
database.removeSchemaObject(session, obj);
}
database.removeMeta(session, getId());
owner = null;
invalidate();
}
@Override
public void checkRename() {
// ok
}
/**
* Get the owner of this schema.
*
* @return the owner
*/
public User getOwner() {
return owner;
}
/**
* Get table engine params of this schema.
*
* @return default table engine params
*/
public ArrayList<String> getTableEngineParams() {
return tableEngineParams;
}
/**
* Set table engine params of this schema.
* @param tableEngineParams default table engine params
*/
public void setTableEngineParams(ArrayList<String> tableEngineParams) {
this.tableEngineParams = tableEngineParams;
}
@SuppressWarnings("unchecked")
private Map<String, SchemaObject> getMap(int type) {
Map<String, ? extends SchemaObject> result;
switch (type) {
case DbObject.TABLE_OR_VIEW:
result = tablesAndViews;
break;
case DbObject.SYNONYM:
result = synonyms;
break;
case DbObject.SEQUENCE:
result = sequences;
break;
case DbObject.INDEX:
result = indexes;
break;
case DbObject.TRIGGER:
result = triggers;
break;
case DbObject.CONSTRAINT:
result = constraints;
break;
case DbObject.CONSTANT:
result = constants;
break;
case DbObject.FUNCTION_ALIAS:
result = functions;
break;
default:
throw DbException.throwInternalError("type=" + type);
}
return (Map<String, SchemaObject>) result;
}
/**
* Add an object to this schema.
* This method must not be called within CreateSchemaObject;
* use Database.addSchemaObject() instead
*
* @param obj the object to add
*/
public void add(SchemaObject obj) {
if (SysProperties.CHECK && obj.getSchema() != this) {
DbException.throwInternalError("wrong schema");
}
String name = obj.getName();
Map<String, SchemaObject> map = getMap(obj.getType());
if (SysProperties.CHECK && map.get(name) != null) {
DbException.throwInternalError("object already exists: " + name);
}
map.put(name, obj);
freeUniqueName(name);
}
/**
* Rename an object.
*
* @param obj the object to rename
* @param newName the new name
*/
public void rename(SchemaObject obj, String newName) {
int type = obj.getType();
Map<String, SchemaObject> map = getMap(type);
if (SysProperties.CHECK) {
if (!map.containsKey(obj.getName())) {
DbException.throwInternalError("not found: " + obj.getName());
}
if (obj.getName().equals(newName) || map.containsKey(newName)) {
DbException.throwInternalError("object already exists: " + newName);
}
}
obj.checkRename();
map.remove(obj.getName());
freeUniqueName(obj.getName());
obj.rename(newName);
map.put(newName, obj);
freeUniqueName(newName);
}
/**
* Try to find a table or view with this name. This method returns null if
* no object with this name exists. Local temporary tables are also
* returned. Synonyms are not returned or resolved.
*
* @param session the session
* @param name the object name
* @return the object or null
*/
public Table findTableOrView(Session session, String name) {
Table table = tablesAndViews.get(name);
if (table == null && session != null) {
table = session.findLocalTempTable(name);
}
return table;
}
/**
* Try to find a table or view with this name. This method returns null if
* no object with this name exists. Local temporary tables are also
* returned. If a synonym with this name exists, the backing table of the
* synonym is returned
*
* @param session the session
* @param name the object name
* @return the object or null
*/
public Table resolveTableOrView(Session session, String name) {
Table table = findTableOrView(session, name);
if (table == null) {
TableSynonym synonym = synonyms.get(name);
if (synonym != null) {
return synonym.getSynonymFor();
}
return null;
}
return table;
}
/**
* Try to find a synonym with this name. This method returns null if
* no object with this name exists.
*
* @param name the object name
* @return the object or null
*/
public TableSynonym getSynonym(String name) {
return synonyms.get(name);
}
/**
* Try to find an index with this name. This method returns null if
* no object with this name exists.
*
* @param session the session
* @param name the object name
* @return the object or null
*/
public Index findIndex(Session session, String name) {
Index index = indexes.get(name);
if (index == null) {
index = session.findLocalTempTableIndex(name);
}
return index;
}
/**
* Try to find a trigger with this name. This method returns null if
* no object with this name exists.
*
* @param name the object name
* @return the object or null
*/
public TriggerObject findTrigger(String name) {
return triggers.get(name);
}
/**
* Try to find a sequence with this name. This method returns null if
* no object with this name exists.
*
* @param sequenceName the object name
* @return the object or null
*/
public Sequence findSequence(String sequenceName) {
return sequences.get(sequenceName);
}
/**
* Try to find a constraint with this name. This method returns null if no
* object with this name exists.
*
* @param session the session
* @param name the object name
* @return the object or null
*/
public Constraint findConstraint(Session session, String name) {
Constraint constraint = constraints.get(name);
if (constraint == null) {
constraint = session.findLocalTempTableConstraint(name);
}
return constraint;
}
/**
* Try to find a user defined constant with this name. This method returns
* null if no object with this name exists.
*
* @param constantName the object name
* @return the object or null
*/
public Constant findConstant(String constantName) {
return constants.get(constantName);
}
/**
* Try to find a user defined function with this name. This method returns
* null if no object with this name exists.
*
* @author Vincent Zhang ivincent.zhang@gmail.com 2020/08/04
*
* @param functionAlias the object name
* @return the object or null
*/
public FunctionAlias findFunction(String functionAlias) {
// return functions.get(functionAlias);
return PulsarExtension.findFunction(functions, functionAlias);
}
/**
* Release a unique object name.
*
* @param name the object name
*/
public void freeUniqueName(String name) {
if (name != null) {
synchronized (temporaryUniqueNames) {
temporaryUniqueNames.remove(name);
}
}
}
private String getUniqueName(DbObject obj,
Map<String, ? extends SchemaObject> map, String prefix) {
String hash = StringUtils.toUpperEnglish(Integer.toHexString(obj.getName().hashCode()));
String name = null;
synchronized (temporaryUniqueNames) {
for (int i = 1, len = hash.length(); i < len; i++) {
name = prefix + hash.substring(0, i);
if (!map.containsKey(name) && !temporaryUniqueNames.contains(name)) {
break;
}
name = null;
}
if (name == null) {
prefix = prefix + hash + "_";
for (int i = 0;; i++) {
name = prefix + i;
if (!map.containsKey(name) && !temporaryUniqueNames.contains(name)) {
break;
}
}
}
temporaryUniqueNames.add(name);
}
return name;
}
/**
* Create a unique constraint name.
*
* @param session the session
* @param table the constraint table
* @return the unique name
*/
public String getUniqueConstraintName(Session session, Table table) {
Map<String, Constraint> tableConstraints;
if (table.isTemporary() && !table.isGlobalTemporary()) {
tableConstraints = session.getLocalTempTableConstraints();
} else {
tableConstraints = constraints;
}
return getUniqueName(table, tableConstraints, "CONSTRAINT_");
}
/**
* Create a unique index name.
*
* @param session the session
* @param table the indexed table
* @param prefix the index name prefix
* @return the unique name
*/
public String getUniqueIndexName(Session session, Table table, String prefix) {
Map<String, Index> tableIndexes;
if (table.isTemporary() && !table.isGlobalTemporary()) {
tableIndexes = session.getLocalTempTableIndexes();
} else {
tableIndexes = indexes;
}
return getUniqueName(table, tableIndexes, prefix);
}
/**
* Get the table or view with the given name.
* Local temporary tables are also returned.
*
* @param session the session
* @param name the table or view name
* @return the table or view
* @throws DbException if no such object exists
*/
public Table getTableOrView(Session session, String name) {
Table table = tablesAndViews.get(name);
if (table == null) {
if (session != null) {
table = session.findLocalTempTable(name);
}
if (table == null) {
throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, name);
}
}
return table;
}
/**
* Get the index with the given name.
*
* @param name the index name
* @return the index
* @throws DbException if no such object exists
*/
public Index getIndex(String name) {
Index index = indexes.get(name);
if (index == null) {
throw DbException.get(ErrorCode.INDEX_NOT_FOUND_1, name);
}
return index;
}
/**
* Get the constraint with the given name.
*
* @param name the constraint name
* @return the constraint
* @throws DbException if no such object exists
*/
public Constraint getConstraint(String name) {
Constraint constraint = constraints.get(name);
if (constraint == null) {
throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, name);
}
return constraint;
}
/**
* Get the user defined constant with the given name.
*
* @param constantName the constant name
* @return the constant
* @throws DbException if no such object exists
*/
public Constant getConstant(String constantName) {
Constant constant = constants.get(constantName);
if (constant == null) {
throw DbException.get(ErrorCode.CONSTANT_NOT_FOUND_1, constantName);
}
return constant;
}
/**
* Get the sequence with the given name.
*
* @param sequenceName the sequence name
* @return the sequence
* @throws DbException if no such object exists
*/
public Sequence getSequence(String sequenceName) {
Sequence sequence = sequences.get(sequenceName);
if (sequence == null) {
throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, sequenceName);
}
return sequence;
}
/**
* Get all objects.
*
* @return a (possible empty) list of all objects
*/
public ArrayList<SchemaObject> getAll() {
ArrayList<SchemaObject> all = New.arrayList();
all.addAll(getMap(DbObject.TABLE_OR_VIEW).values());
all.addAll(getMap(DbObject.SYNONYM).values());
all.addAll(getMap(DbObject.SEQUENCE).values());
all.addAll(getMap(DbObject.INDEX).values());
all.addAll(getMap(DbObject.TRIGGER).values());
all.addAll(getMap(DbObject.CONSTRAINT).values());
all.addAll(getMap(DbObject.CONSTANT).values());
all.addAll(getMap(DbObject.FUNCTION_ALIAS).values());
return all;
}
/**
* Get all objects of the given type.
*
* @param type the object type
* @return a (possible empty) list of all objects
*/
public ArrayList<SchemaObject> getAll(int type) {
Map<String, SchemaObject> map = getMap(type);
return new ArrayList<>(map.values());
}
/**
* Get all tables and views.
*
* @return a (possible empty) list of all objects
*/
public ArrayList<Table> getAllTablesAndViews() {
synchronized (database) {
return new ArrayList<>(tablesAndViews.values());
}
}
public ArrayList<TableSynonym> getAllSynonyms() {
synchronized (database) {
return new ArrayList<>(synonyms.values());
}
}
/**
* Get the table with the given name, if any.
*
* @param name the table name
* @return the table or null if not found
*/
public Table getTableOrViewByName(String name) {
synchronized (database) {
return tablesAndViews.get(name);
}
}
/**
* Remove an object from this schema.
*
* @param obj the object to remove
*/
public void remove(SchemaObject obj) {
String objName = obj.getName();
Map<String, SchemaObject> map = getMap(obj.getType());
if (SysProperties.CHECK && !map.containsKey(objName)) {
DbException.throwInternalError("not found: " + objName);
}
map.remove(objName);
freeUniqueName(objName);
}
/**
* Add a table to the schema.
*
* @param data the create table information
* @return the created {@link Table} object
*/
public Table createTable(CreateTableData data) {
synchronized (database) {
if (!data.temporary || data.globalTemporary) {
database.lockMeta(data.session);
}
data.schema = this;
if (data.tableEngine == null) {
DbSettings s = database.getSettings();
if (s.defaultTableEngine != null) {
data.tableEngine = s.defaultTableEngine;
} else if (s.mvStore) {
data.tableEngine = MVTableEngine.class.getName();
}
}
if (data.tableEngine != null) {
if (data.tableEngineParams == null) {
data.tableEngineParams = this.tableEngineParams;
}
return database.getTableEngine(data.tableEngine).createTable(data);
}
return new RegularTable(data);
}
}
/**
* Add a table synonym to the schema.
*
* @param data the create synonym information
* @return the created {@link TableSynonym} object
*/
public TableSynonym createSynonym(CreateSynonymData data) {
synchronized (database) {
database.lockMeta(data.session);
data.schema = this;
return new TableSynonym(data);
}
}
/**
* Add a linked table to the schema.
*
* @param id the object id
* @param tableName the table name of the alias
* @param driver the driver class name
* @param url the database URL
* @param user the user name
* @param password the password
* @param originalSchema the schema name of the target table
* @param originalTable the table name of the target table
* @param emitUpdates if updates should be emitted instead of delete/insert
* @param force create the object even if the database can not be accessed
* @return the {@link TableLink} object
*/
public TableLink createTableLink(int id, String tableName, String driver,
String url, String user, String password, String originalSchema,
String originalTable, boolean emitUpdates, boolean force) {
synchronized (database) {
return new TableLink(this, id, tableName,
driver, url, user, password,
originalSchema, originalTable, emitUpdates, force);
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/schema/SchemaObject.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.schema;
import org.h2.engine.DbObject;
/**
* Any database object that is stored in a schema.
*/
public interface SchemaObject extends DbObject {
/**
* Get the schema in which this object is defined
*
* @return the schema
*/
Schema getSchema();
/**
* Check whether this is a hidden object that doesn't appear in the meta
* data and in the script, and is not dropped on DROP ALL OBJECTS.
*
* @return true if it is hidden
*/
boolean isHidden();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/schema/SchemaObjectBase.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.schema;
import org.h2.engine.DbObjectBase;
/**
* The base class for classes implementing SchemaObject.
*/
public abstract class SchemaObjectBase extends DbObjectBase implements
SchemaObject {
private Schema schema;
/**
* Initialize some attributes of this object.
*
* @param newSchema the schema
* @param id the object id
* @param name the name
* @param traceModuleId the trace module id
*/
protected void initSchemaObjectBase(Schema newSchema, int id, String name,
int traceModuleId) {
initDbObjectBase(newSchema.getDatabase(), id, name, traceModuleId);
this.schema = newSchema;
}
@Override
public Schema getSchema() {
return schema;
}
@Override
public String getSQL() {
return schema.getSQL() + "." + super.getSQL();
}
@Override
public boolean isHidden() {
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/schema/Sequence.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.schema;
import java.math.BigInteger;
import org.h2.api.ErrorCode;
import org.h2.engine.DbObject;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.table.Table;
/**
* A sequence is created using the statement
* CREATE SEQUENCE
*/
public class Sequence extends SchemaObjectBase {
/**
* The default cache size for sequences.
*/
public static final int DEFAULT_CACHE_SIZE = 32;
private long value;
private long valueWithMargin;
private long increment;
private long cacheSize;
private long minValue;
private long maxValue;
private boolean cycle;
private boolean belongsToTable;
private boolean writeWithMargin;
/**
* Creates a new sequence for an auto-increment column.
*
* @param schema the schema
* @param id the object id
* @param name the sequence name
* @param startValue the first value to return
* @param increment the increment count
*/
public Sequence(Schema schema, int id, String name, long startValue,
long increment) {
this(schema, id, name, startValue, increment, null, null, null, false,
true);
}
/**
* Creates a new sequence.
*
* @param schema the schema
* @param id the object id
* @param name the sequence name
* @param startValue the first value to return
* @param increment the increment count
* @param cacheSize the number of entries to pre-fetch
* @param minValue the minimum value
* @param maxValue the maximum value
* @param cycle whether to jump back to the min value if needed
* @param belongsToTable whether this sequence belongs to a table (for
* auto-increment columns)
*/
public Sequence(Schema schema, int id, String name, Long startValue,
Long increment, Long cacheSize, Long minValue, Long maxValue,
boolean cycle, boolean belongsToTable) {
initSchemaObjectBase(schema, id, name, Trace.SEQUENCE);
this.increment = increment != null ?
increment : 1;
this.minValue = minValue != null ?
minValue : getDefaultMinValue(startValue, this.increment);
this.maxValue = maxValue != null ?
maxValue : getDefaultMaxValue(startValue, this.increment);
this.value = startValue != null ?
startValue : getDefaultStartValue(this.increment);
this.valueWithMargin = value;
this.cacheSize = cacheSize != null ?
Math.max(1, cacheSize) : DEFAULT_CACHE_SIZE;
this.cycle = cycle;
this.belongsToTable = belongsToTable;
if (!isValid(this.value, this.minValue, this.maxValue, this.increment)) {
throw DbException.get(ErrorCode.SEQUENCE_ATTRIBUTES_INVALID, name,
String.valueOf(this.value), String.valueOf(this.minValue),
String.valueOf(this.maxValue),
String.valueOf(this.increment));
}
}
/**
* Allows the start value, increment, min value and max value to be updated
* atomically, including atomic validation. Useful because setting these
* attributes one after the other could otherwise result in an invalid
* sequence state (e.g. min value > max value, start value < min value,
* etc).
*
* @param startValue the new start value (<code>null</code> if no change)
* @param minValue the new min value (<code>null</code> if no change)
* @param maxValue the new max value (<code>null</code> if no change)
* @param increment the new increment (<code>null</code> if no change)
*/
public synchronized void modify(Long startValue, Long minValue,
Long maxValue, Long increment) {
if (startValue == null) {
startValue = this.value;
}
if (minValue == null) {
minValue = this.minValue;
}
if (maxValue == null) {
maxValue = this.maxValue;
}
if (increment == null) {
increment = this.increment;
}
if (!isValid(startValue, minValue, maxValue, increment)) {
throw DbException.get(ErrorCode.SEQUENCE_ATTRIBUTES_INVALID,
getName(), String.valueOf(startValue),
String.valueOf(minValue),
String.valueOf(maxValue),
String.valueOf(increment));
}
this.value = startValue;
this.valueWithMargin = startValue;
this.minValue = minValue;
this.maxValue = maxValue;
this.increment = increment;
}
/**
* Validates the specified prospective start value, min value, max value and
* increment relative to each other, since each of their respective
* validities are contingent on the values of the other parameters.
*
* @param value the prospective start value
* @param minValue the prospective min value
* @param maxValue the prospective max value
* @param increment the prospective increment
*/
private static boolean isValid(long value, long minValue, long maxValue,
long increment) {
return minValue <= value &&
maxValue >= value &&
maxValue > minValue &&
increment != 0 &&
// Math.abs(increment) < maxValue - minValue
// use BigInteger to avoid overflows when maxValue and minValue
// are really big
BigInteger.valueOf(increment).abs().compareTo(
BigInteger.valueOf(maxValue).subtract(BigInteger.valueOf(minValue))) < 0;
}
private static long getDefaultMinValue(Long startValue, long increment) {
long v = increment >= 0 ? 1 : Long.MIN_VALUE;
if (startValue != null && increment >= 0 && startValue < v) {
v = startValue;
}
return v;
}
private static long getDefaultMaxValue(Long startValue, long increment) {
long v = increment >= 0 ? Long.MAX_VALUE : -1;
if (startValue != null && increment < 0 && startValue > v) {
v = startValue;
}
return v;
}
private long getDefaultStartValue(long increment) {
return increment >= 0 ? minValue : maxValue;
}
public boolean getBelongsToTable() {
return belongsToTable;
}
public long getIncrement() {
return increment;
}
public long getMinValue() {
return minValue;
}
public long getMaxValue() {
return maxValue;
}
public boolean getCycle() {
return cycle;
}
public void setCycle(boolean cycle) {
this.cycle = cycle;
}
@Override
public String getDropSQL() {
if (getBelongsToTable()) {
return null;
}
return "DROP SEQUENCE IF EXISTS " + getSQL();
}
@Override
public String getCreateSQLForCopy(Table table, String quotedName) {
throw DbException.throwInternalError(toString());
}
@Override
public synchronized String getCreateSQL() {
long v = writeWithMargin ? valueWithMargin : value;
StringBuilder buff = new StringBuilder("CREATE SEQUENCE ");
buff.append(getSQL()).append(" START WITH ").append(v);
if (increment != 1) {
buff.append(" INCREMENT BY ").append(increment);
}
if (minValue != getDefaultMinValue(v, increment)) {
buff.append(" MINVALUE ").append(minValue);
}
if (maxValue != getDefaultMaxValue(v, increment)) {
buff.append(" MAXVALUE ").append(maxValue);
}
if (cycle) {
buff.append(" CYCLE");
}
if (cacheSize != DEFAULT_CACHE_SIZE) {
buff.append(" CACHE ").append(cacheSize);
}
if (belongsToTable) {
buff.append(" BELONGS_TO_TABLE");
}
return buff.toString();
}
/**
* Get the next value for this sequence.
*
* @param session the session
* @return the next value
*/
public long getNext(Session session) {
boolean needsFlush = false;
long result;
synchronized (this) {
if ((increment > 0 && value >= valueWithMargin) ||
(increment < 0 && value <= valueWithMargin)) {
valueWithMargin += increment * cacheSize;
needsFlush = true;
}
if ((increment > 0 && value > maxValue) ||
(increment < 0 && value < minValue)) {
if (cycle) {
value = increment > 0 ? minValue : maxValue;
valueWithMargin = value + (increment * cacheSize);
needsFlush = true;
} else {
throw DbException.get(ErrorCode.SEQUENCE_EXHAUSTED, getName());
}
}
result = value;
value += increment;
}
if (needsFlush) {
flush(session);
}
return result;
}
/**
* Flush the current value to disk.
*/
public void flushWithoutMargin() {
if (valueWithMargin != value) {
valueWithMargin = value;
flush(null);
}
}
/**
* Flush the current value, including the margin, to disk.
*
* @param session the session
*/
public void flush(Session session) {
if (isTemporary()) {
return;
}
if (session == null || !database.isSysTableLockedBy(session)) {
// This session may not lock the sys table (except if it has already
// locked it) because it must be committed immediately, otherwise
// other threads can not access the sys table.
Session sysSession = database.getSystemSession();
synchronized (database.isMultiThreaded() ? sysSession : database) {
flushInternal(sysSession);
sysSession.commit(false);
}
} else {
synchronized (database.isMultiThreaded() ? session : database) {
flushInternal(session);
}
}
}
private void flushInternal(Session session) {
final boolean metaWasLocked = database.lockMeta(session);
// just for this case, use the value with the margin
try {
writeWithMargin = true;
database.updateMeta(session, this);
} finally {
writeWithMargin = false;
}
if (!metaWasLocked) {
database.unlockMeta(session);
}
}
/**
* Flush the current value to disk and close this object.
*/
public void close() {
flushWithoutMargin();
}
@Override
public int getType() {
return DbObject.SEQUENCE;
}
@Override
public void removeChildrenAndResources(Session session) {
database.removeMeta(session, getId());
invalidate();
}
@Override
public void checkRename() {
// nothing to do
}
public synchronized long getCurrentValue() {
return value - increment;
}
public void setBelongsToTable(boolean b) {
this.belongsToTable = b;
}
public void setCacheSize(long cacheSize) {
this.cacheSize = Math.max(1, cacheSize);
}
public long getCacheSize() {
return cacheSize;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/schema/TriggerObject.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.schema;
import java.lang.reflect.Method;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Arrays;
import org.h2.api.ErrorCode;
import org.h2.api.Trigger;
import org.h2.command.Parser;
import org.h2.engine.Constants;
import org.h2.engine.DbObject;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.result.Row;
import org.h2.table.Table;
import org.h2.util.JdbcUtils;
import org.h2.util.SourceCompiler;
import org.h2.util.StatementBuilder;
import org.h2.util.StringUtils;
import org.h2.value.DataType;
import org.h2.value.Value;
/**
*A trigger is created using the statement
* CREATE TRIGGER
*/
public class TriggerObject extends SchemaObjectBase {
/**
* The default queue size.
*/
public static final int DEFAULT_QUEUE_SIZE = 1024;
private boolean insteadOf;
private boolean before;
private int typeMask;
private boolean rowBased;
private boolean onRollback;
// TODO trigger: support queue and noWait = false as well
private int queueSize = DEFAULT_QUEUE_SIZE;
private boolean noWait;
private Table table;
private String triggerClassName;
private String triggerSource;
private Trigger triggerCallback;
public TriggerObject(Schema schema, int id, String name, Table table) {
initSchemaObjectBase(schema, id, name, Trace.TRIGGER);
this.table = table;
setTemporary(table.isTemporary());
}
public void setBefore(boolean before) {
this.before = before;
}
public void setInsteadOf(boolean insteadOf) {
this.insteadOf = insteadOf;
}
private synchronized void load() {
if (triggerCallback != null) {
return;
}
try {
Session sysSession = database.getSystemSession();
Connection c2 = sysSession.createConnection(false);
Object obj;
if (triggerClassName != null) {
obj = JdbcUtils.loadUserClass(triggerClassName).newInstance();
} else {
obj = loadFromSource();
}
triggerCallback = (Trigger) obj;
triggerCallback.init(c2, getSchema().getName(), getName(),
table.getName(), before, typeMask);
} catch (Throwable e) {
// try again later
triggerCallback = null;
throw DbException.get(ErrorCode.ERROR_CREATING_TRIGGER_OBJECT_3, e, getName(),
triggerClassName != null ? triggerClassName : "..source..", e.toString());
}
}
private Trigger loadFromSource() {
SourceCompiler compiler = database.getCompiler();
synchronized (compiler) {
String fullClassName = Constants.USER_PACKAGE + ".trigger." + getName();
compiler.setSource(fullClassName, triggerSource);
try {
if (SourceCompiler.isJavaxScriptSource(triggerSource)) {
return (Trigger) compiler.getCompiledScript(fullClassName).eval();
} else {
final Method m = compiler.getMethod(fullClassName);
if (m.getParameterTypes().length > 0) {
throw new IllegalStateException("No parameters are allowed for a trigger");
}
return (Trigger) m.invoke(null);
}
} catch (DbException e) {
throw e;
} catch (Exception e) {
throw DbException.get(ErrorCode.SYNTAX_ERROR_1, e, triggerSource);
}
}
}
/**
* Set the trigger class name and load the class if possible.
*
* @param triggerClassName the name of the trigger class
* @param force whether exceptions (due to missing class or access rights)
* should be ignored
*/
public void setTriggerClassName(String triggerClassName, boolean force) {
this.setTriggerAction(triggerClassName, null, force);
}
/**
* Set the trigger source code and compile it if possible.
*
* @param source the source code of a method returning a {@link Trigger}
* @param force whether exceptions (due to syntax error)
* should be ignored
*/
public void setTriggerSource(String source, boolean force) {
this.setTriggerAction(null, source, force);
}
private void setTriggerAction(String triggerClassName, String source, boolean force) {
this.triggerClassName = triggerClassName;
this.triggerSource = source;
try {
load();
} catch (DbException e) {
if (!force) {
throw e;
}
}
}
/**
* Call the trigger class if required. This method does nothing if the
* trigger is not defined for the given action. This method is called before
* or after any rows have been processed, once for each statement.
*
* @param session the session
* @param type the trigger type
* @param beforeAction if this method is called before applying the changes
*/
public void fire(Session session, int type, boolean beforeAction) {
if (rowBased || before != beforeAction || (typeMask & type) == 0) {
return;
}
load();
Connection c2 = session.createConnection(false);
boolean old = false;
if (type != Trigger.SELECT) {
old = session.setCommitOrRollbackDisabled(true);
}
Value identity = session.getLastScopeIdentity();
try {
triggerCallback.fire(c2, null, null);
} catch (Throwable e) {
throw DbException.get(ErrorCode.ERROR_EXECUTING_TRIGGER_3, e, getName(),
triggerClassName != null ? triggerClassName : "..source..", e.toString());
} finally {
if (session.getLastTriggerIdentity() != null) {
session.setLastScopeIdentity(session.getLastTriggerIdentity());
session.setLastTriggerIdentity(null);
} else {
session.setLastScopeIdentity(identity);
}
if (type != Trigger.SELECT) {
session.setCommitOrRollbackDisabled(old);
}
}
}
private static Object[] convertToObjectList(Row row) {
if (row == null) {
return null;
}
int len = row.getColumnCount();
Object[] list = new Object[len];
for (int i = 0; i < len; i++) {
list[i] = row.getValue(i).getObject();
}
return list;
}
/**
* Call the fire method of the user-defined trigger class if required. This
* method does nothing if the trigger is not defined for the given action.
* This method is called before or after a row is processed, possibly many
* times for each statement.
*
* @param session the session
* @param table the table
* @param oldRow the old row
* @param newRow the new row
* @param beforeAction true if this method is called before the operation is
* applied
* @param rollback when the operation occurred within a rollback
* @return true if no further action is required (for 'instead of' triggers)
*/
public boolean fireRow(Session session, Table table, Row oldRow, Row newRow,
boolean beforeAction, boolean rollback) {
if (!rowBased || before != beforeAction) {
return false;
}
if (rollback && !onRollback) {
return false;
}
load();
Object[] oldList;
Object[] newList;
boolean fire = false;
if ((typeMask & Trigger.INSERT) != 0) {
if (oldRow == null && newRow != null) {
fire = true;
}
}
if ((typeMask & Trigger.UPDATE) != 0) {
if (oldRow != null && newRow != null) {
fire = true;
}
}
if ((typeMask & Trigger.DELETE) != 0) {
if (oldRow != null && newRow == null) {
fire = true;
}
}
if (!fire) {
return false;
}
oldList = convertToObjectList(oldRow);
newList = convertToObjectList(newRow);
Object[] newListBackup;
if (before && newList != null) {
newListBackup = Arrays.copyOf(newList, newList.length);
} else {
newListBackup = null;
}
Connection c2 = session.createConnection(false);
boolean old = session.getAutoCommit();
boolean oldDisabled = session.setCommitOrRollbackDisabled(true);
Value identity = session.getLastScopeIdentity();
try {
session.setAutoCommit(false);
triggerCallback.fire(c2, oldList, newList);
if (newListBackup != null) {
for (int i = 0; i < newList.length; i++) {
Object o = newList[i];
if (o != newListBackup[i]) {
Value v = DataType.convertToValue(session, o, Value.UNKNOWN);
session.getGeneratedKeys().add(table.getColumn(i));
newRow.setValue(i, v);
}
}
}
} catch (Exception e) {
if (onRollback) {
// ignore
} else {
throw DbException.convert(e);
}
} finally {
if (session.getLastTriggerIdentity() != null) {
session.setLastScopeIdentity(session.getLastTriggerIdentity());
session.setLastTriggerIdentity(null);
} else {
session.setLastScopeIdentity(identity);
}
session.setCommitOrRollbackDisabled(oldDisabled);
session.setAutoCommit(old);
}
return insteadOf;
}
/**
* Set the trigger type.
*
* @param typeMask the type
*/
public void setTypeMask(int typeMask) {
this.typeMask = typeMask;
}
public void setRowBased(boolean rowBased) {
this.rowBased = rowBased;
}
public void setQueueSize(int size) {
this.queueSize = size;
}
public int getQueueSize() {
return queueSize;
}
public void setNoWait(boolean noWait) {
this.noWait = noWait;
}
public boolean isNoWait() {
return noWait;
}
public void setOnRollback(boolean onRollback) {
this.onRollback = onRollback;
}
@Override
public String getDropSQL() {
return null;
}
@Override
public String getCreateSQLForCopy(Table targetTable, String quotedName) {
StringBuilder buff = new StringBuilder("CREATE FORCE TRIGGER ");
buff.append(quotedName);
if (insteadOf) {
buff.append(" INSTEAD OF ");
} else if (before) {
buff.append(" BEFORE ");
} else {
buff.append(" AFTER ");
}
buff.append(getTypeNameList());
buff.append(" ON ").append(targetTable.getSQL());
if (rowBased) {
buff.append(" FOR EACH ROW");
}
if (noWait) {
buff.append(" NOWAIT");
} else {
buff.append(" QUEUE ").append(queueSize);
}
if (triggerClassName != null) {
buff.append(" CALL ").append(Parser.quoteIdentifier(triggerClassName));
} else {
buff.append(" AS ").append(StringUtils.quoteStringSQL(triggerSource));
}
return buff.toString();
}
public String getTypeNameList() {
StatementBuilder buff = new StatementBuilder();
if ((typeMask & Trigger.INSERT) != 0) {
buff.appendExceptFirst(", ");
buff.append("INSERT");
}
if ((typeMask & Trigger.UPDATE) != 0) {
buff.appendExceptFirst(", ");
buff.append("UPDATE");
}
if ((typeMask & Trigger.DELETE) != 0) {
buff.appendExceptFirst(", ");
buff.append("DELETE");
}
if ((typeMask & Trigger.SELECT) != 0) {
buff.appendExceptFirst(", ");
buff.append("SELECT");
}
if (onRollback) {
buff.appendExceptFirst(", ");
buff.append("ROLLBACK");
}
return buff.toString();
}
@Override
public String getCreateSQL() {
return getCreateSQLForCopy(table, getSQL());
}
@Override
public int getType() {
return DbObject.TRIGGER;
}
@Override
public void removeChildrenAndResources(Session session) {
table.removeTrigger(this);
database.removeMeta(session, getId());
if (triggerCallback != null) {
try {
triggerCallback.remove();
} catch (SQLException e) {
throw DbException.convert(e);
}
}
table = null;
triggerClassName = null;
triggerSource = null;
triggerCallback = null;
invalidate();
}
@Override
public void checkRename() {
// nothing to do
}
/**
* Get the table of this trigger.
*
* @return the table
*/
public Table getTable() {
return table;
}
/**
* Check if this is a before trigger.
*
* @return true if it is
*/
public boolean isBefore() {
return before;
}
/**
* Get the trigger class name.
*
* @return the class name
*/
public String getTriggerClassName() {
return triggerClassName;
}
public String getTriggerSource() {
return triggerSource;
}
/**
* Close the trigger.
*/
public void close() throws SQLException {
if (triggerCallback != null) {
triggerCallback.close();
}
}
/**
* Check whether this is a select trigger.
*
* @return true if it is
*/
public boolean isSelectTrigger() {
return (typeMask & Trigger.SELECT) != 0;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/security/AES.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.security;
import org.h2.util.Bits;
/**
* An implementation of the AES block cipher algorithm,
* also known as Rijndael. Only AES-128 is supported by this class.
*/
public class AES implements BlockCipher {
private static final int[] RCON = new int[10];
private static final int[] FS = new int[256];
private static final int[] FT0 = new int[256];
private static final int[] FT1 = new int[256];
private static final int[] FT2 = new int[256];
private static final int[] FT3 = new int[256];
private static final int[] RS = new int[256];
private static final int[] RT0 = new int[256];
private static final int[] RT1 = new int[256];
private static final int[] RT2 = new int[256];
private static final int[] RT3 = new int[256];
private final int[] encKey = new int[44];
private final int[] decKey = new int[44];
private static int rot8(int x) {
return (x >>> 8) | (x << 24);
}
private static int xtime(int x) {
return ((x << 1) ^ (((x & 0x80) != 0) ? 0x1b : 0)) & 255;
}
private static int mul(int[] pow, int[] log, int x, int y) {
return (x != 0 && y != 0) ? pow[(log[x] + log[y]) % 255] : 0;
}
static {
int[] pow = new int[256];
int[] log = new int[256];
for (int i = 0, x = 1; i < 256; i++, x ^= xtime(x)) {
pow[i] = x;
log[x] = i;
}
for (int i = 0, x = 1; i < 10; i++, x = xtime(x)) {
RCON[i] = x << 24;
}
FS[0x00] = 0x63;
RS[0x63] = 0x00;
for (int i = 1; i < 256; i++) {
int x = pow[255 - log[i]], y = x;
y = ((y << 1) | (y >> 7)) & 255;
x ^= y;
y = ((y << 1) | (y >> 7)) & 255;
x ^= y;
y = ((y << 1) | (y >> 7)) & 255;
x ^= y;
y = ((y << 1) | (y >> 7)) & 255;
x ^= y ^ 0x63;
FS[i] = x & 255;
RS[x] = i & 255;
}
for (int i = 0; i < 256; i++) {
int x = FS[i], y = xtime(x);
FT0[i] = (x ^ y) ^ (x << 8) ^ (x << 16) ^ (y << 24);
FT1[i] = rot8(FT0[i]);
FT2[i] = rot8(FT1[i]);
FT3[i] = rot8(FT2[i]);
y = RS[i];
RT0[i] = mul(pow, log, 0x0b, y) ^ (mul(pow, log, 0x0d, y) << 8)
^ (mul(pow, log, 0x09, y) << 16) ^ (mul(pow, log, 0x0e, y) << 24);
RT1[i] = rot8(RT0[i]);
RT2[i] = rot8(RT1[i]);
RT3[i] = rot8(RT2[i]);
}
}
private static int getDec(int t) {
return RT0[FS[(t >> 24) & 255]] ^ RT1[FS[(t >> 16) & 255]]
^ RT2[FS[(t >> 8) & 255]] ^ RT3[FS[t & 255]];
}
@Override
public void setKey(byte[] key) {
for (int i = 0, j = 0; i < 4; i++) {
encKey[i] = decKey[i] = ((key[j++] & 255) << 24)
| ((key[j++] & 255) << 16) | ((key[j++] & 255) << 8)
| (key[j++] & 255);
}
int e = 0;
for (int i = 0; i < 10; i++, e += 4) {
encKey[e + 4] = encKey[e] ^ RCON[i]
^ (FS[(encKey[e + 3] >> 16) & 255] << 24)
^ (FS[(encKey[e + 3] >> 8) & 255] << 16)
^ (FS[(encKey[e + 3]) & 255] << 8)
^ FS[(encKey[e + 3] >> 24) & 255];
encKey[e + 5] = encKey[e + 1] ^ encKey[e + 4];
encKey[e + 6] = encKey[e + 2] ^ encKey[e + 5];
encKey[e + 7] = encKey[e + 3] ^ encKey[e + 6];
}
int d = 0;
decKey[d++] = encKey[e++];
decKey[d++] = encKey[e++];
decKey[d++] = encKey[e++];
decKey[d++] = encKey[e++];
for (int i = 1; i < 10; i++) {
e -= 8;
decKey[d++] = getDec(encKey[e++]);
decKey[d++] = getDec(encKey[e++]);
decKey[d++] = getDec(encKey[e++]);
decKey[d++] = getDec(encKey[e++]);
}
e -= 8;
decKey[d++] = encKey[e++];
decKey[d++] = encKey[e++];
decKey[d++] = encKey[e++];
decKey[d] = encKey[e];
}
@Override
public void encrypt(byte[] bytes, int off, int len) {
for (int i = off; i < off + len; i += 16) {
encryptBlock(bytes, bytes, i);
}
}
@Override
public void decrypt(byte[] bytes, int off, int len) {
for (int i = off; i < off + len; i += 16) {
decryptBlock(bytes, bytes, i);
}
}
private void encryptBlock(byte[] in, byte[] out, int off) {
int[] k = encKey;
int x0 = Bits.readInt(in, off) ^ k[0];
int x1 = Bits.readInt(in, off + 4) ^ k[1];
int x2 = Bits.readInt(in, off + 8) ^ k[2];
int x3 = Bits.readInt(in, off + 12) ^ k[3];
int y0 = FT0[(x0 >> 24) & 255] ^ FT1[(x1 >> 16) & 255]
^ FT2[(x2 >> 8) & 255] ^ FT3[x3 & 255] ^ k[4];
int y1 = FT0[(x1 >> 24) & 255] ^ FT1[(x2 >> 16) & 255]
^ FT2[(x3 >> 8) & 255] ^ FT3[x0 & 255] ^ k[5];
int y2 = FT0[(x2 >> 24) & 255] ^ FT1[(x3 >> 16) & 255]
^ FT2[(x0 >> 8) & 255] ^ FT3[x1 & 255] ^ k[6];
int y3 = FT0[(x3 >> 24) & 255] ^ FT1[(x0 >> 16) & 255]
^ FT2[(x1 >> 8) & 255] ^ FT3[x2 & 255] ^ k[7];
x0 = FT0[(y0 >> 24) & 255] ^ FT1[(y1 >> 16) & 255]
^ FT2[(y2 >> 8) & 255] ^ FT3[y3 & 255] ^ k[8];
x1 = FT0[(y1 >> 24) & 255] ^ FT1[(y2 >> 16) & 255]
^ FT2[(y3 >> 8) & 255] ^ FT3[y0 & 255] ^ k[9];
x2 = FT0[(y2 >> 24) & 255] ^ FT1[(y3 >> 16) & 255]
^ FT2[(y0 >> 8) & 255] ^ FT3[y1 & 255] ^ k[10];
x3 = FT0[(y3 >> 24) & 255] ^ FT1[(y0 >> 16) & 255]
^ FT2[(y1 >> 8) & 255] ^ FT3[y2 & 255] ^ k[11];
y0 = FT0[(x0 >> 24) & 255] ^ FT1[(x1 >> 16) & 255]
^ FT2[(x2 >> 8) & 255] ^ FT3[x3 & 255] ^ k[12];
y1 = FT0[(x1 >> 24) & 255] ^ FT1[(x2 >> 16) & 255]
^ FT2[(x3 >> 8) & 255] ^ FT3[x0 & 255] ^ k[13];
y2 = FT0[(x2 >> 24) & 255] ^ FT1[(x3 >> 16) & 255]
^ FT2[(x0 >> 8) & 255] ^ FT3[x1 & 255] ^ k[14];
y3 = FT0[(x3 >> 24) & 255] ^ FT1[(x0 >> 16) & 255]
^ FT2[(x1 >> 8) & 255] ^ FT3[x2 & 255] ^ k[15];
x0 = FT0[(y0 >> 24) & 255] ^ FT1[(y1 >> 16) & 255]
^ FT2[(y2 >> 8) & 255] ^ FT3[y3 & 255] ^ k[16];
x1 = FT0[(y1 >> 24) & 255] ^ FT1[(y2 >> 16) & 255]
^ FT2[(y3 >> 8) & 255] ^ FT3[y0 & 255] ^ k[17];
x2 = FT0[(y2 >> 24) & 255] ^ FT1[(y3 >> 16) & 255]
^ FT2[(y0 >> 8) & 255] ^ FT3[y1 & 255] ^ k[18];
x3 = FT0[(y3 >> 24) & 255] ^ FT1[(y0 >> 16) & 255]
^ FT2[(y1 >> 8) & 255] ^ FT3[y2 & 255] ^ k[19];
y0 = FT0[(x0 >> 24) & 255] ^ FT1[(x1 >> 16) & 255]
^ FT2[(x2 >> 8) & 255] ^ FT3[x3 & 255] ^ k[20];
y1 = FT0[(x1 >> 24) & 255] ^ FT1[(x2 >> 16) & 255]
^ FT2[(x3 >> 8) & 255] ^ FT3[x0 & 255] ^ k[21];
y2 = FT0[(x2 >> 24) & 255] ^ FT1[(x3 >> 16) & 255]
^ FT2[(x0 >> 8) & 255] ^ FT3[x1 & 255] ^ k[22];
y3 = FT0[(x3 >> 24) & 255] ^ FT1[(x0 >> 16) & 255]
^ FT2[(x1 >> 8) & 255] ^ FT3[x2 & 255] ^ k[23];
x0 = FT0[(y0 >> 24) & 255] ^ FT1[(y1 >> 16) & 255]
^ FT2[(y2 >> 8) & 255] ^ FT3[y3 & 255] ^ k[24];
x1 = FT0[(y1 >> 24) & 255] ^ FT1[(y2 >> 16) & 255]
^ FT2[(y3 >> 8) & 255] ^ FT3[y0 & 255] ^ k[25];
x2 = FT0[(y2 >> 24) & 255] ^ FT1[(y3 >> 16) & 255]
^ FT2[(y0 >> 8) & 255] ^ FT3[y1 & 255] ^ k[26];
x3 = FT0[(y3 >> 24) & 255] ^ FT1[(y0 >> 16) & 255]
^ FT2[(y1 >> 8) & 255] ^ FT3[y2 & 255] ^ k[27];
y0 = FT0[(x0 >> 24) & 255] ^ FT1[(x1 >> 16) & 255]
^ FT2[(x2 >> 8) & 255] ^ FT3[x3 & 255] ^ k[28];
y1 = FT0[(x1 >> 24) & 255] ^ FT1[(x2 >> 16) & 255]
^ FT2[(x3 >> 8) & 255] ^ FT3[x0 & 255] ^ k[29];
y2 = FT0[(x2 >> 24) & 255] ^ FT1[(x3 >> 16) & 255]
^ FT2[(x0 >> 8) & 255] ^ FT3[x1 & 255] ^ k[30];
y3 = FT0[(x3 >> 24) & 255] ^ FT1[(x0 >> 16) & 255]
^ FT2[(x1 >> 8) & 255] ^ FT3[x2 & 255] ^ k[31];
x0 = FT0[(y0 >> 24) & 255] ^ FT1[(y1 >> 16) & 255]
^ FT2[(y2 >> 8) & 255] ^ FT3[y3 & 255] ^ k[32];
x1 = FT0[(y1 >> 24) & 255] ^ FT1[(y2 >> 16) & 255]
^ FT2[(y3 >> 8) & 255] ^ FT3[y0 & 255] ^ k[33];
x2 = FT0[(y2 >> 24) & 255] ^ FT1[(y3 >> 16) & 255]
^ FT2[(y0 >> 8) & 255] ^ FT3[y1 & 255] ^ k[34];
x3 = FT0[(y3 >> 24) & 255] ^ FT1[(y0 >> 16) & 255]
^ FT2[(y1 >> 8) & 255] ^ FT3[y2 & 255] ^ k[35];
y0 = FT0[(x0 >> 24) & 255] ^ FT1[(x1 >> 16) & 255]
^ FT2[(x2 >> 8) & 255] ^ FT3[x3 & 255] ^ k[36];
y1 = FT0[(x1 >> 24) & 255] ^ FT1[(x2 >> 16) & 255]
^ FT2[(x3 >> 8) & 255] ^ FT3[x0 & 255] ^ k[37];
y2 = FT0[(x2 >> 24) & 255] ^ FT1[(x3 >> 16) & 255]
^ FT2[(x0 >> 8) & 255] ^ FT3[x1 & 255] ^ k[38];
y3 = FT0[(x3 >> 24) & 255] ^ FT1[(x0 >> 16) & 255]
^ FT2[(x1 >> 8) & 255] ^ FT3[x2 & 255] ^ k[39];
x0 = ((FS[(y0 >> 24) & 255] << 24) | (FS[(y1 >> 16) & 255] << 16)
| (FS[(y2 >> 8) & 255] << 8) | FS[y3 & 255]) ^ k[40];
x1 = ((FS[(y1 >> 24) & 255] << 24) | (FS[(y2 >> 16) & 255] << 16)
| (FS[(y3 >> 8) & 255] << 8) | FS[y0 & 255]) ^ k[41];
x2 = ((FS[(y2 >> 24) & 255] << 24) | (FS[(y3 >> 16) & 255] << 16)
| (FS[(y0 >> 8) & 255] << 8) | FS[y1 & 255]) ^ k[42];
x3 = ((FS[(y3 >> 24) & 255] << 24) | (FS[(y0 >> 16) & 255] << 16)
| (FS[(y1 >> 8) & 255] << 8) | FS[y2 & 255]) ^ k[43];
Bits.writeInt(out, off, x0);
Bits.writeInt(out, off + 4, x1);
Bits.writeInt(out, off + 8, x2);
Bits.writeInt(out, off + 12, x3);
}
private void decryptBlock(byte[] in, byte[] out, int off) {
int[] k = decKey;
int x0 = Bits.readInt(in, off) ^ k[0];
int x1 = Bits.readInt(in, off + 4) ^ k[1];
int x2 = Bits.readInt(in, off + 8) ^ k[2];
int x3 = Bits.readInt(in, off + 12) ^ k[3];
int y0 = RT0[(x0 >> 24) & 255] ^ RT1[(x3 >> 16) & 255]
^ RT2[(x2 >> 8) & 255] ^ RT3[x1 & 255] ^ k[4];
int y1 = RT0[(x1 >> 24) & 255] ^ RT1[(x0 >> 16) & 255]
^ RT2[(x3 >> 8) & 255] ^ RT3[x2 & 255] ^ k[5];
int y2 = RT0[(x2 >> 24) & 255] ^ RT1[(x1 >> 16) & 255]
^ RT2[(x0 >> 8) & 255] ^ RT3[x3 & 255] ^ k[6];
int y3 = RT0[(x3 >> 24) & 255] ^ RT1[(x2 >> 16) & 255]
^ RT2[(x1 >> 8) & 255] ^ RT3[x0 & 255] ^ k[7];
x0 = RT0[(y0 >> 24) & 255] ^ RT1[(y3 >> 16) & 255]
^ RT2[(y2 >> 8) & 255] ^ RT3[y1 & 255] ^ k[8];
x1 = RT0[(y1 >> 24) & 255] ^ RT1[(y0 >> 16) & 255]
^ RT2[(y3 >> 8) & 255] ^ RT3[y2 & 255] ^ k[9];
x2 = RT0[(y2 >> 24) & 255] ^ RT1[(y1 >> 16) & 255]
^ RT2[(y0 >> 8) & 255] ^ RT3[y3 & 255] ^ k[10];
x3 = RT0[(y3 >> 24) & 255] ^ RT1[(y2 >> 16) & 255]
^ RT2[(y1 >> 8) & 255] ^ RT3[y0 & 255] ^ k[11];
y0 = RT0[(x0 >> 24) & 255] ^ RT1[(x3 >> 16) & 255]
^ RT2[(x2 >> 8) & 255] ^ RT3[x1 & 255] ^ k[12];
y1 = RT0[(x1 >> 24) & 255] ^ RT1[(x0 >> 16) & 255]
^ RT2[(x3 >> 8) & 255] ^ RT3[x2 & 255] ^ k[13];
y2 = RT0[(x2 >> 24) & 255] ^ RT1[(x1 >> 16) & 255]
^ RT2[(x0 >> 8) & 255] ^ RT3[x3 & 255] ^ k[14];
y3 = RT0[(x3 >> 24) & 255] ^ RT1[(x2 >> 16) & 255]
^ RT2[(x1 >> 8) & 255] ^ RT3[x0 & 255] ^ k[15];
x0 = RT0[(y0 >> 24) & 255] ^ RT1[(y3 >> 16) & 255]
^ RT2[(y2 >> 8) & 255] ^ RT3[y1 & 255] ^ k[16];
x1 = RT0[(y1 >> 24) & 255] ^ RT1[(y0 >> 16) & 255]
^ RT2[(y3 >> 8) & 255] ^ RT3[y2 & 255] ^ k[17];
x2 = RT0[(y2 >> 24) & 255] ^ RT1[(y1 >> 16) & 255]
^ RT2[(y0 >> 8) & 255] ^ RT3[y3 & 255] ^ k[18];
x3 = RT0[(y3 >> 24) & 255] ^ RT1[(y2 >> 16) & 255]
^ RT2[(y1 >> 8) & 255] ^ RT3[y0 & 255] ^ k[19];
y0 = RT0[(x0 >> 24) & 255] ^ RT1[(x3 >> 16) & 255]
^ RT2[(x2 >> 8) & 255] ^ RT3[x1 & 255] ^ k[20];
y1 = RT0[(x1 >> 24) & 255] ^ RT1[(x0 >> 16) & 255]
^ RT2[(x3 >> 8) & 255] ^ RT3[x2 & 255] ^ k[21];
y2 = RT0[(x2 >> 24) & 255] ^ RT1[(x1 >> 16) & 255]
^ RT2[(x0 >> 8) & 255] ^ RT3[x3 & 255] ^ k[22];
y3 = RT0[(x3 >> 24) & 255] ^ RT1[(x2 >> 16) & 255]
^ RT2[(x1 >> 8) & 255] ^ RT3[x0 & 255] ^ k[23];
x0 = RT0[(y0 >> 24) & 255] ^ RT1[(y3 >> 16) & 255]
^ RT2[(y2 >> 8) & 255] ^ RT3[y1 & 255] ^ k[24];
x1 = RT0[(y1 >> 24) & 255] ^ RT1[(y0 >> 16) & 255]
^ RT2[(y3 >> 8) & 255] ^ RT3[y2 & 255] ^ k[25];
x2 = RT0[(y2 >> 24) & 255] ^ RT1[(y1 >> 16) & 255]
^ RT2[(y0 >> 8) & 255] ^ RT3[y3 & 255] ^ k[26];
x3 = RT0[(y3 >> 24) & 255] ^ RT1[(y2 >> 16) & 255]
^ RT2[(y1 >> 8) & 255] ^ RT3[y0 & 255] ^ k[27];
y0 = RT0[(x0 >> 24) & 255] ^ RT1[(x3 >> 16) & 255]
^ RT2[(x2 >> 8) & 255] ^ RT3[x1 & 255] ^ k[28];
y1 = RT0[(x1 >> 24) & 255] ^ RT1[(x0 >> 16) & 255]
^ RT2[(x3 >> 8) & 255] ^ RT3[x2 & 255] ^ k[29];
y2 = RT0[(x2 >> 24) & 255] ^ RT1[(x1 >> 16) & 255]
^ RT2[(x0 >> 8) & 255] ^ RT3[x3 & 255] ^ k[30];
y3 = RT0[(x3 >> 24) & 255] ^ RT1[(x2 >> 16) & 255]
^ RT2[(x1 >> 8) & 255] ^ RT3[x0 & 255] ^ k[31];
x0 = RT0[(y0 >> 24) & 255] ^ RT1[(y3 >> 16) & 255]
^ RT2[(y2 >> 8) & 255] ^ RT3[y1 & 255] ^ k[32];
x1 = RT0[(y1 >> 24) & 255] ^ RT1[(y0 >> 16) & 255]
^ RT2[(y3 >> 8) & 255] ^ RT3[y2 & 255] ^ k[33];
x2 = RT0[(y2 >> 24) & 255] ^ RT1[(y1 >> 16) & 255]
^ RT2[(y0 >> 8) & 255] ^ RT3[y3 & 255] ^ k[34];
x3 = RT0[(y3 >> 24) & 255] ^ RT1[(y2 >> 16) & 255]
^ RT2[(y1 >> 8) & 255] ^ RT3[y0 & 255] ^ k[35];
y0 = RT0[(x0 >> 24) & 255] ^ RT1[(x3 >> 16) & 255]
^ RT2[(x2 >> 8) & 255] ^ RT3[x1 & 255] ^ k[36];
y1 = RT0[(x1 >> 24) & 255] ^ RT1[(x0 >> 16) & 255]
^ RT2[(x3 >> 8) & 255] ^ RT3[x2 & 255] ^ k[37];
y2 = RT0[(x2 >> 24) & 255] ^ RT1[(x1 >> 16) & 255]
^ RT2[(x0 >> 8) & 255] ^ RT3[x3 & 255] ^ k[38];
y3 = RT0[(x3 >> 24) & 255] ^ RT1[(x2 >> 16) & 255]
^ RT2[(x1 >> 8) & 255] ^ RT3[x0 & 255] ^ k[39];
x0 = ((RS[(y0 >> 24) & 255] << 24) | (RS[(y3 >> 16) & 255] << 16)
| (RS[(y2 >> 8) & 255] << 8) | RS[y1 & 255]) ^ k[40];
x1 = ((RS[(y1 >> 24) & 255] << 24) | (RS[(y0 >> 16) & 255] << 16)
| (RS[(y3 >> 8) & 255] << 8) | RS[y2 & 255]) ^ k[41];
x2 = ((RS[(y2 >> 24) & 255] << 24) | (RS[(y1 >> 16) & 255] << 16)
| (RS[(y0 >> 8) & 255] << 8) | RS[y3 & 255]) ^ k[42];
x3 = ((RS[(y3 >> 24) & 255] << 24) | (RS[(y2 >> 16) & 255] << 16)
| (RS[(y1 >> 8) & 255] << 8) | RS[y0 & 255]) ^ k[43];
Bits.writeInt(out, off, x0);
Bits.writeInt(out, off + 4, x1);
Bits.writeInt(out, off + 8, x2);
Bits.writeInt(out, off + 12, x3);
}
@Override
public int getKeyLength() {
return 16;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/security/BlockCipher.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.security;
/**
* A block cipher is a data encryption algorithm that operates on blocks.
*/
public interface BlockCipher {
/**
* Blocks sizes are always multiples of this number.
*/
int ALIGN = 16;
/**
* Set the encryption key used for encrypting and decrypting.
* The key needs to be 16 bytes long.
*
* @param key the key
*/
void setKey(byte[] key);
/**
* Encrypt a number of bytes. This is done in-place, that
* means the bytes are overwritten.
*
* @param bytes the byte array
* @param off the start index
* @param len the number of bytes to encrypt
*/
void encrypt(byte[] bytes, int off, int len);
/**
* Decrypt a number of bytes. This is done in-place, that
* means the bytes are overwritten.
*
* @param bytes the byte array
* @param off the start index
* @param len the number of bytes to decrypt
*/
void decrypt(byte[] bytes, int off, int len);
/**
* Get the length of the key in bytes.
*
* @return the length of the key
*/
int getKeyLength();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/security/CipherFactory.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.security;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.security.KeyFactory;
import java.security.KeyStore;
import java.security.PrivateKey;
import java.security.Security;
import java.security.cert.Certificate;
import java.security.cert.CertificateFactory;
import java.security.spec.PKCS8EncodedKeySpec;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Properties;
import javax.net.ServerSocketFactory;
import javax.net.ssl.SSLServerSocket;
import javax.net.ssl.SSLServerSocketFactory;
import javax.net.ssl.SSLSocket;
import javax.net.ssl.SSLSocketFactory;
import org.h2.api.ErrorCode;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.store.fs.FileUtils;
import org.h2.util.IOUtils;
import org.h2.util.StringUtils;
/**
* A factory to create new block cipher objects.
*/
public class CipherFactory {
/**
* The default password to use for the .h2.keystore file
*/
public static final String KEYSTORE_PASSWORD =
"h2pass";
/**
* The security property which can prevent anonymous TLS connections.
* Introduced into Java 6, 7, 8 in updates from July 2015.
*/
public static final String LEGACY_ALGORITHMS_SECURITY_KEY =
"jdk.tls.legacyAlgorithms";
/**
* The value of {@value #LEGACY_ALGORITHMS_SECURITY_KEY} security
* property at the time of class initialization.
* Null if it is not set.
*/
public static final String DEFAULT_LEGACY_ALGORITHMS = getLegacyAlgorithmsSilently();
private static final String KEYSTORE =
"~/.h2.keystore";
private static final String KEYSTORE_KEY =
"javax.net.ssl.keyStore";
private static final String KEYSTORE_PASSWORD_KEY =
"javax.net.ssl.keyStorePassword";
private CipherFactory() {
// utility class
}
/**
* Get a new block cipher object for the given algorithm.
*
* @param algorithm the algorithm
* @return a new cipher object
*/
public static BlockCipher getBlockCipher(String algorithm) {
if ("XTEA".equalsIgnoreCase(algorithm)) {
return new XTEA();
} else if ("AES".equalsIgnoreCase(algorithm)) {
return new AES();
} else if ("FOG".equalsIgnoreCase(algorithm)) {
return new Fog();
}
throw DbException.get(ErrorCode.UNSUPPORTED_CIPHER, algorithm);
}
/**
* Create a secure client socket that is connected to the given address and
* port.
*
* @param address the address to connect to
* @param port the port
* @return the socket
*/
public static Socket createSocket(InetAddress address, int port)
throws IOException {
Socket socket = null;
setKeystore();
SSLSocketFactory f = (SSLSocketFactory) SSLSocketFactory.getDefault();
SSLSocket secureSocket = (SSLSocket) f.createSocket();
secureSocket.connect(new InetSocketAddress(address, port),
SysProperties.SOCKET_CONNECT_TIMEOUT);
secureSocket.setEnabledProtocols(
disableSSL(secureSocket.getEnabledProtocols()));
if (SysProperties.ENABLE_ANONYMOUS_TLS) {
String[] list = enableAnonymous(
secureSocket.getEnabledCipherSuites(),
secureSocket.getSupportedCipherSuites());
secureSocket.setEnabledCipherSuites(list);
}
socket = secureSocket;
return socket;
}
/**
* Create a secure server socket. If a bind address is specified, the
* socket is only bound to this address.
* If h2.enableAnonymousTLS is true, an attempt is made to modify
* the security property jdk.tls.legacyAlgorithms (in newer JVMs) to allow
* anonymous TLS. This system change is effectively permanent for the
* lifetime of the JVM.
* @see #removeAnonFromLegacyAlgorithms()
*
* @param port the port to listen on
* @param bindAddress the address to bind to, or null to bind to all
* addresses
* @return the server socket
*/
public static ServerSocket createServerSocket(int port,
InetAddress bindAddress) throws IOException {
ServerSocket socket = null;
if (SysProperties.ENABLE_ANONYMOUS_TLS) {
removeAnonFromLegacyAlgorithms();
}
setKeystore();
ServerSocketFactory f = SSLServerSocketFactory.getDefault();
SSLServerSocket secureSocket;
if (bindAddress == null) {
secureSocket = (SSLServerSocket) f.createServerSocket(port);
} else {
secureSocket = (SSLServerSocket) f.createServerSocket(port, 0, bindAddress);
}
secureSocket.setEnabledProtocols(
disableSSL(secureSocket.getEnabledProtocols()));
if (SysProperties.ENABLE_ANONYMOUS_TLS) {
String[] list = enableAnonymous(
secureSocket.getEnabledCipherSuites(),
secureSocket.getSupportedCipherSuites());
secureSocket.setEnabledCipherSuites(list);
}
socket = secureSocket;
return socket;
}
/**
* Removes DH_anon and ECDH_anon from a comma separated list of ciphers.
* Only the first occurrence is removed.
* If there is nothing to remove, returns the reference to the argument.
* @param list a list of names separated by commas (and spaces)
* @return a new string without DH_anon and ECDH_anon items,
* or the original if none were found
*/
public static String removeDhAnonFromCommaSeparatedList(String list) {
if (list == null) {
return list;
}
List<String> algorithms = new LinkedList<>(Arrays.asList(list.split("\\s*,\\s*")));
boolean dhAnonRemoved = algorithms.remove("DH_anon");
boolean ecdhAnonRemoved = algorithms.remove("ECDH_anon");
if (dhAnonRemoved || ecdhAnonRemoved) {
String string = Arrays.toString(algorithms.toArray(new String[algorithms.size()]));
return (!algorithms.isEmpty()) ? string.substring(1, string.length() - 1): "";
}
return list;
}
/**
* Attempts to weaken the security properties to allow anonymous TLS.
* New JREs would not choose an anonymous cipher suite in a TLS handshake
* if server-side security property
* {@value #LEGACY_ALGORITHMS_SECURITY_KEY}
* were not modified from the default value.
* <p>
* NOTE: In current (as of 2016) default implementations of JSSE which use
* this security property, the value is permanently cached inside the
* ServerHandshake class upon its first use.
* Therefore the modification accomplished by this method has to be done
* before the first use of a server SSL socket.
* Later changes to this property will not have any effect on server socket
* behavior.
*/
public static synchronized void removeAnonFromLegacyAlgorithms() {
String legacyOriginal = getLegacyAlgorithmsSilently();
if (legacyOriginal == null) {
return;
}
String legacyNew = removeDhAnonFromCommaSeparatedList(legacyOriginal);
if (!legacyOriginal.equals(legacyNew)) {
setLegacyAlgorithmsSilently(legacyNew);
}
}
/**
* Attempts to resets the security property to the default value.
* The default value of {@value #LEGACY_ALGORITHMS_SECURITY_KEY} was
* obtained at time of class initialization.
* <p>
* NOTE: Resetting the property might not have any effect on server
* socket behavior.
* @see #removeAnonFromLegacyAlgorithms()
*/
public static synchronized void resetDefaultLegacyAlgorithms() {
setLegacyAlgorithmsSilently(DEFAULT_LEGACY_ALGORITHMS);
}
/**
* Returns the security property {@value #LEGACY_ALGORITHMS_SECURITY_KEY}.
* Ignores security exceptions.
*
* @return the value of the security property, or null if not set
* or not accessible
*/
public static String getLegacyAlgorithmsSilently() {
String defaultLegacyAlgorithms = null;
try {
defaultLegacyAlgorithms = Security.getProperty(LEGACY_ALGORITHMS_SECURITY_KEY);
} catch (SecurityException e) {
// ignore
}
return defaultLegacyAlgorithms;
}
private static void setLegacyAlgorithmsSilently(String legacyAlgorithms) {
if (legacyAlgorithms == null) {
return;
}
try {
Security.setProperty(LEGACY_ALGORITHMS_SECURITY_KEY, legacyAlgorithms);
} catch (SecurityException e) {
// ignore
}
}
private static byte[] getKeyStoreBytes(KeyStore store, String password)
throws IOException {
ByteArrayOutputStream bout = new ByteArrayOutputStream();
try {
store.store(bout, password.toCharArray());
} catch (Exception e) {
throw DbException.convertToIOException(e);
}
return bout.toByteArray();
}
/**
* Get the keystore object using the given password.
*
* @param password the keystore password
* @return the keystore
*/
public static KeyStore getKeyStore(String password) throws IOException {
try {
// The following source code can be re-generated
// if you have a keystore file.
// This code is (hopefully) more Java version independent
// than using keystores directly. See also:
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4887561
// (1.4.2 cannot read keystore written with 1.4.1)
// --- generated code start ---
KeyStore store = KeyStore.getInstance(KeyStore.getDefaultType());
store.load(null, password.toCharArray());
KeyFactory keyFactory = KeyFactory.getInstance("RSA");
store.load(null, password.toCharArray());
PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(
StringUtils.convertHexToBytes(
"30820277020100300d06092a864886f70d010101" +
"0500048202613082025d02010002818100dc0a13" +
"c602b7141110eade2f051b54777b060d0f74e6a1" +
"10f9cce81159f271ebc88d8e8aa1f743b505fc2e" +
"7dfe38d33b8d3f64d1b363d1af4d877833897954" +
"cbaec2fa384c22a415498cf306bb07ac09b76b00" +
"1cd68bf77ea0a628f5101959cf2993a9c23dbee7" +
"9b19305977f8715ae78d023471194cc900b231ee" +
"cb0aaea98d02030100010281810099aa4ff4d0a0" +
"9a5af0bd953cb10c4d08c3d98df565664ac5582e" +
"494314d5c3c92dddedd5d316a32a206be4ec0846" +
"16fe57be15e27cad111aa3c21fa79e32258c6ca8" +
"430afc69eddd52d3b751b37da6b6860910b94653" +
"192c0db1d02abcfd6ce14c01f238eec7c20bd3bb" +
"750940004bacba2880349a9494d10e139ecb2355" +
"d101024100ffdc3defd9c05a2d377ef6019fa62b" +
"3fbd5b0020a04cc8533bca730e1f6fcf5dfceea1" +
"b044fbe17d9eababfbc7d955edad6bc60f9be826" +
"ad2c22ba77d19a9f65024100dc28d43fdbbc9385" +
"2cc3567093157702bc16f156f709fb7db0d9eec0" +
"28f41fd0edcd17224c866e66be1744141fb724a1" +
"0fd741c8a96afdd9141b36d67fff6309024077b1" +
"cddbde0f69604bdcfe33263fb36ddf24aa3b9922" +
"327915b890f8a36648295d0139ecdf68c245652c" +
"4489c6257b58744fbdd961834a4cab201801a3b1" +
"e52d024100b17142e8991d1b350a0802624759d4" +
"8ae2b8071a158ff91fabeb6a8f7c328e762143dc" +
"726b8529f42b1fab6220d1c676fdc27ba5d44e84" +
"7c72c52064afd351a902407c6e23fe35bcfcd1a6" +
"62aa82a2aa725fcece311644d5b6e3894853fd4c" +
"e9fe78218c957b1ff03fc9e5ef8ffeb6bd58235f" +
"6a215c97d354fdace7e781e4a63e8b"));
PrivateKey privateKey = keyFactory.generatePrivate(keySpec);
Certificate[] certs = { CertificateFactory
.getInstance("X.509")
.generateCertificate(
new ByteArrayInputStream(
StringUtils.convertHexToBytes(
"3082018b3081f502044295ce6b300d06092a8648" +
"86f70d0101040500300d310b3009060355040313" +
"024832301e170d3035303532363133323630335a" +
"170d3337303933303036353734375a300d310b30" +
"0906035504031302483230819f300d06092a8648" +
"86f70d010101050003818d0030818902818100dc" +
"0a13c602b7141110eade2f051b54777b060d0f74" +
"e6a110f9cce81159f271ebc88d8e8aa1f743b505" +
"fc2e7dfe38d33b8d3f64d1b363d1af4d87783389" +
"7954cbaec2fa384c22a415498cf306bb07ac09b7" +
"6b001cd68bf77ea0a628f5101959cf2993a9c23d" +
"bee79b19305977f8715ae78d023471194cc900b2" +
"31eecb0aaea98d0203010001300d06092a864886" +
"f70d01010405000381810083f4401a279453701b" +
"ef9a7681a5b8b24f153f7d18c7c892133d97bd5f" +
"13736be7505290a445a7d5ceb75522403e509751" +
"5cd966ded6351ff60d5193de34cd36e5cb04d380" +
"398e66286f99923fd92296645fd4ada45844d194" +
"dfd815e6cd57f385c117be982809028bba1116c8" +
"5740b3d27a55b1a0948bf291ddba44bed337b9"))), };
store.setKeyEntry("h2", privateKey, password.toCharArray(), certs);
// --- generated code end ---
return store;
} catch (Exception e) {
throw DbException.convertToIOException(e);
}
}
private static void setKeystore() throws IOException {
Properties p = System.getProperties();
if (p.getProperty(KEYSTORE_KEY) == null) {
String fileName = KEYSTORE;
byte[] data = getKeyStoreBytes(getKeyStore(
KEYSTORE_PASSWORD), KEYSTORE_PASSWORD);
boolean needWrite = true;
if (FileUtils.exists(fileName) && FileUtils.size(fileName) == data.length) {
// don't need to overwrite the file if it did not change
InputStream fin = FileUtils.newInputStream(fileName);
byte[] now = IOUtils.readBytesAndClose(fin, 0);
if (now != null && Arrays.equals(data, now)) {
needWrite = false;
}
}
if (needWrite) {
try {
OutputStream out = FileUtils.newOutputStream(fileName, false);
out.write(data);
out.close();
} catch (Exception e) {
throw DbException.convertToIOException(e);
}
}
String absolutePath = FileUtils.toRealPath(fileName);
System.setProperty(KEYSTORE_KEY, absolutePath);
}
if (p.getProperty(KEYSTORE_PASSWORD_KEY) == null) {
System.setProperty(KEYSTORE_PASSWORD_KEY, KEYSTORE_PASSWORD);
}
}
private static String[] enableAnonymous(String[] enabled, String[] supported) {
LinkedHashSet<String> set = new LinkedHashSet<>();
for (String x : supported) {
if (!x.startsWith("SSL") && x.contains("_anon_") &&
(x.contains("_AES_") || x.contains("_3DES_")) && x.contains("_SHA")) {
set.add(x);
}
}
Collections.addAll(set, enabled);
return set.toArray(new String[0]);
}
private static String[] disableSSL(String[] enabled) {
HashSet<String> set = new HashSet<>();
for (String x : enabled) {
if (!x.startsWith("SSL")) {
set.add(x);
}
}
return set.toArray(new String[0]);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/security/Fog.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.security;
import org.h2.util.Bits;
/**
* A pseudo-encryption algorithm that makes the data appear to be
* encrypted. This algorithm is cryptographically extremely weak, and should
* only be used to hide data from reading the plain text using a text editor.
*/
public class Fog implements BlockCipher {
private int key;
@Override
public void encrypt(byte[] bytes, int off, int len) {
for (int i = off; i < off + len; i += 16) {
encryptBlock(bytes, bytes, i);
}
}
@Override
public void decrypt(byte[] bytes, int off, int len) {
for (int i = off; i < off + len; i += 16) {
decryptBlock(bytes, bytes, i);
}
}
private void encryptBlock(byte[] in, byte[] out, int off) {
int x0 = Bits.readInt(in, off);
int x1 = Bits.readInt(in, off + 4);
int x2 = Bits.readInt(in, off + 8);
int x3 = Bits.readInt(in, off + 12);
int k = key;
x0 = Integer.rotateLeft(x0 ^ k, x1);
x2 = Integer.rotateLeft(x2 ^ k, x1);
x1 = Integer.rotateLeft(x1 ^ k, x0);
x3 = Integer.rotateLeft(x3 ^ k, x0);
Bits.writeInt(out, off, x0);
Bits.writeInt(out, off + 4, x1);
Bits.writeInt(out, off + 8, x2);
Bits.writeInt(out, off + 12, x3);
}
private void decryptBlock(byte[] in, byte[] out, int off) {
int x0 = Bits.readInt(in, off);
int x1 = Bits.readInt(in, off + 4);
int x2 = Bits.readInt(in, off + 8);
int x3 = Bits.readInt(in, off + 12);
int k = key;
x1 = Integer.rotateRight(x1, x0) ^ k;
x3 = Integer.rotateRight(x3, x0) ^ k;
x0 = Integer.rotateRight(x0, x1) ^ k;
x2 = Integer.rotateRight(x2, x1) ^ k;
Bits.writeInt(out, off, x0);
Bits.writeInt(out, off + 4, x1);
Bits.writeInt(out, off + 8, x2);
Bits.writeInt(out, off + 12, x3);
}
@Override
public int getKeyLength() {
return 16;
}
@Override
public void setKey(byte[] key) {
this.key = (int) Bits.readLong(key, 0);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/security/SHA256.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.security;
import java.security.GeneralSecurityException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
import org.h2.util.Bits;
/**
* This class implements the cryptographic hash function SHA-256.
*/
public class SHA256 {
private SHA256() {
}
/**
* Calculate the hash code by using the given salt. The salt is appended
* after the data before the hash code is calculated. After generating the
* hash code, the data and all internal buffers are filled with zeros to
* avoid keeping insecure data in memory longer than required (and possibly
* swapped to disk).
*
* @param data the data to hash
* @param salt the salt to use
* @return the hash code
*/
public static byte[] getHashWithSalt(byte[] data, byte[] salt) {
byte[] buff = new byte[data.length + salt.length];
System.arraycopy(data, 0, buff, 0, data.length);
System.arraycopy(salt, 0, buff, data.length, salt.length);
return getHash(buff, true);
}
/**
* Calculate the hash of a password by prepending the user name and a '@'
* character. Both the user name and the password are encoded to a byte
* array using UTF-16. After generating the hash code, the password array
* and all internal buffers are filled with zeros to avoid keeping the plain
* text password in memory longer than required (and possibly swapped to
* disk).
*
* @param userName the user name
* @param password the password
* @return the hash code
*/
public static byte[] getKeyPasswordHash(String userName, char[] password) {
String user = userName + "@";
byte[] buff = new byte[2 * (user.length() + password.length)];
int n = 0;
for (int i = 0, length = user.length(); i < length; i++) {
char c = user.charAt(i);
buff[n++] = (byte) (c >> 8);
buff[n++] = (byte) c;
}
for (char c : password) {
buff[n++] = (byte) (c >> 8);
buff[n++] = (byte) c;
}
Arrays.fill(password, (char) 0);
return getHash(buff, true);
}
/**
* Calculate the hash-based message authentication code.
*
* @param key the key
* @param message the message
* @return the hash
*/
public static byte[] getHMAC(byte[] key, byte[] message) {
return initMac(key).doFinal(message);
}
private static Mac initMac(byte[] key) {
// Java forbids empty keys
if (key.length == 0) {
key = new byte[1];
}
try {
Mac mac = Mac.getInstance("HmacSHA256");
mac.init(new SecretKeySpec(key, "HmacSHA256"));
return mac;
} catch (GeneralSecurityException e) {
throw new RuntimeException(e);
}
}
/**
* Calculate the hash using the password-based key derivation function 2.
*
* @param password the password
* @param salt the salt
* @param iterations the number of iterations
* @param resultLen the number of bytes in the result
* @return the result
*/
public static byte[] getPBKDF2(byte[] password, byte[] salt,
int iterations, int resultLen) {
byte[] result = new byte[resultLen];
Mac mac = initMac(password);
int len = 64 + Math.max(32, salt.length + 4);
byte[] message = new byte[len];
byte[] macRes = null;
for (int k = 1, offset = 0; offset < resultLen; k++, offset += 32) {
for (int i = 0; i < iterations; i++) {
if (i == 0) {
System.arraycopy(salt, 0, message, 0, salt.length);
Bits.writeInt(message, salt.length, k);
len = salt.length + 4;
} else {
System.arraycopy(macRes, 0, message, 0, 32);
len = 32;
}
mac.update(message, 0, len);
macRes = mac.doFinal();
for (int j = 0; j < 32 && j + offset < resultLen; j++) {
result[j + offset] ^= macRes[j];
}
}
}
Arrays.fill(password, (byte) 0);
return result;
}
/**
* Calculate the hash code for the given data.
*
* @param data the data to hash
* @param nullData if the data should be filled with zeros after calculating
* the hash code
* @return the hash code
*/
public static byte[] getHash(byte[] data, boolean nullData) {
byte[] result;
try {
result = MessageDigest.getInstance("SHA-256").digest(data);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
if (nullData) {
Arrays.fill(data, (byte) 0);
}
return result;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/security/SecureFileStore.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.security;
import org.h2.engine.Constants;
import org.h2.store.DataHandler;
import org.h2.store.FileStore;
import org.h2.util.Bits;
import org.h2.util.MathUtils;
/**
* A file store that encrypts all data before writing, and decrypts all data
* after reading. Areas that were never written to (for example after calling
* setLength to enlarge the file) are not encrypted (contains 0 bytes).
*/
public class SecureFileStore extends FileStore {
private byte[] key;
private final BlockCipher cipher;
private final BlockCipher cipherForInitVector;
private byte[] buffer = new byte[4];
private long pos;
private final byte[] bufferForInitVector;
private final int keyIterations;
public SecureFileStore(DataHandler handler, String name, String mode,
String cipher, byte[] key, int keyIterations) {
super(handler, name, mode);
this.key = key;
this.cipher = CipherFactory.getBlockCipher(cipher);
this.cipherForInitVector = CipherFactory.getBlockCipher(cipher);
this.keyIterations = keyIterations;
bufferForInitVector = new byte[Constants.FILE_BLOCK_SIZE];
}
@Override
protected byte[] generateSalt() {
return MathUtils.secureRandomBytes(Constants.FILE_BLOCK_SIZE);
}
@Override
protected void initKey(byte[] salt) {
key = SHA256.getHashWithSalt(key, salt);
for (int i = 0; i < keyIterations; i++) {
key = SHA256.getHash(key, true);
}
cipher.setKey(key);
key = SHA256.getHash(key, true);
cipherForInitVector.setKey(key);
}
@Override
protected void writeDirect(byte[] b, int off, int len) {
super.write(b, off, len);
pos += len;
}
@Override
public void write(byte[] b, int off, int len) {
if (buffer.length < b.length) {
buffer = new byte[len];
}
System.arraycopy(b, off, buffer, 0, len);
xorInitVector(buffer, 0, len, pos);
cipher.encrypt(buffer, 0, len);
super.write(buffer, 0, len);
pos += len;
}
@Override
protected void readFullyDirect(byte[] b, int off, int len) {
super.readFully(b, off, len);
pos += len;
}
@Override
public void readFully(byte[] b, int off, int len) {
super.readFully(b, off, len);
for (int i = 0; i < len; i++) {
if (b[i] != 0) {
cipher.decrypt(b, off, len);
xorInitVector(b, off, len, pos);
break;
}
}
pos += len;
}
@Override
public void seek(long x) {
this.pos = x;
super.seek(x);
}
private void xorInitVector(byte[] b, int off, int len, long p) {
byte[] iv = bufferForInitVector;
while (len > 0) {
for (int i = 0; i < Constants.FILE_BLOCK_SIZE; i += 8) {
Bits.writeLong(iv, i, (p + i) >>> 3);
}
cipherForInitVector.encrypt(iv, 0, Constants.FILE_BLOCK_SIZE);
for (int i = 0; i < Constants.FILE_BLOCK_SIZE; i++) {
b[off + i] ^= iv[i];
}
p += Constants.FILE_BLOCK_SIZE;
off += Constants.FILE_BLOCK_SIZE;
len -= Constants.FILE_BLOCK_SIZE;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/security/XTEA.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.security;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.util.Bits;
/**
* An implementation of the XTEA block cipher algorithm.
* <p>
* This implementation uses 32 rounds.
* The best attack reported as of 2009 is 36 rounds (Wikipedia).
*/
public class XTEA implements BlockCipher {
private static final int DELTA = 0x9E3779B9;
private int k0, k1, k2, k3, k4, k5, k6, k7;
private int k8, k9, k10, k11, k12, k13, k14, k15;
private int k16, k17, k18, k19, k20, k21, k22, k23;
private int k24, k25, k26, k27, k28, k29, k30, k31;
@Override
public void setKey(byte[] b) {
int[] key = new int[4];
for (int i = 0; i < 16; i += 4) {
key[i / 4] = Bits.readInt(b, i);
}
int[] r = new int[32];
for (int i = 0, sum = 0; i < 32;) {
r[i++] = sum + key[sum & 3];
sum += DELTA;
r[i++] = sum + key[ (sum >>> 11) & 3];
}
k0 = r[0]; k1 = r[1]; k2 = r[2]; k3 = r[3];
k4 = r[4]; k5 = r[5]; k6 = r[6]; k7 = r[7];
k8 = r[8]; k9 = r[9]; k10 = r[10]; k11 = r[11];
k12 = r[12]; k13 = r[13]; k14 = r[14]; k15 = r[15];
k16 = r[16]; k17 = r[17]; k18 = r[18]; k19 = r[19];
k20 = r[20]; k21 = r[21]; k22 = r[22]; k23 = r[23];
k24 = r[24]; k25 = r[25]; k26 = r[26]; k27 = r[27];
k28 = r[28]; k29 = r[29]; k30 = r[30]; k31 = r[31];
}
@Override
public void encrypt(byte[] bytes, int off, int len) {
if (SysProperties.CHECK) {
if (len % ALIGN != 0) {
DbException.throwInternalError("unaligned len " + len);
}
}
for (int i = off; i < off + len; i += 8) {
encryptBlock(bytes, bytes, i);
}
}
@Override
public void decrypt(byte[] bytes, int off, int len) {
if (SysProperties.CHECK) {
if (len % ALIGN != 0) {
DbException.throwInternalError("unaligned len " + len);
}
}
for (int i = off; i < off + len; i += 8) {
decryptBlock(bytes, bytes, i);
}
}
private void encryptBlock(byte[] in, byte[] out, int off) {
int y = Bits.readInt(in, off);
int z = Bits.readInt(in, off + 4);
y += (((z << 4) ^ (z >>> 5)) + z) ^ k0;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k1;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k2;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k3;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k4;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k5;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k6;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k7;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k8;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k9;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k10;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k11;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k12;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k13;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k14;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k15;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k16;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k17;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k18;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k19;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k20;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k21;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k22;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k23;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k24;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k25;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k26;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k27;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k28;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k29;
y += (((z << 4) ^ (z >>> 5)) + z) ^ k30;
z += (((y >>> 5) ^ (y << 4)) + y) ^ k31;
Bits.writeInt(out, off, y);
Bits.writeInt(out, off + 4, z);
}
private void decryptBlock(byte[] in, byte[] out, int off) {
int y = Bits.readInt(in, off);
int z = Bits.readInt(in, off + 4);
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k31;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k30;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k29;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k28;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k27;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k26;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k25;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k24;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k23;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k22;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k21;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k20;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k19;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k18;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k17;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k16;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k15;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k14;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k13;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k12;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k11;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k10;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k9;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k8;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k7;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k6;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k5;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k4;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k3;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k2;
z -= (((y >>> 5) ^ (y << 4)) + y) ^ k1;
y -= (((z << 4) ^ (z >>> 5)) + z) ^ k0;
Bits.writeInt(out, off, y);
Bits.writeInt(out, off + 4, z);
}
@Override
public int getKeyLength() {
return 16;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/Service.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server;
import java.sql.SQLException;
/**
* Classes implementing this interface usually provide a
* TCP/IP listener such as an FTP server.
* The can be started and stopped, and may or may not
* allow remote connections.
*/
public interface Service {
/**
* Initialize the service from command line options.
*
* @param args the command line options
*/
void init(String... args) throws Exception;
/**
* Get the URL of this service in a human readable form
*
* @return the url
*/
String getURL();
/**
* Start the service. This usually means create the server socket.
* This method must not block.
*/
void start() throws SQLException;
/**
* Listen for incoming connections.
* This method blocks.
*/
void listen();
/**
* Stop the service.
*/
void stop();
/**
* Check if the service is running.
*
* @param traceError if errors should be written
* @return if the server is running
*/
boolean isRunning(boolean traceError);
/**
* Check if remote connections are allowed.
*
* @return true if remote connections are allowed
*/
boolean getAllowOthers();
/**
* Get the human readable name of the service.
*
* @return the name
*/
String getName();
/**
* Get the human readable short name of the service.
*
* @return the type
*/
String getType();
/**
* Gets the port this service is listening on.
*
* @return the port
*/
int getPort();
/**
* Check if a daemon thread should be used.
*
* @return true if a daemon thread should be used
*/
boolean isDaemon();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/ShutdownHandler.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server;
/**
* A shutdown handler is a listener for shutdown events.
*/
public interface ShutdownHandler {
/**
* Tell the listener to shut down.
*/
void shutdown();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/TcpServer.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.UnknownHostException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import org.h2.Driver;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.ext.pulsar.PulsarExtension;
import org.h2.message.DbException;
import org.h2.util.JdbcUtils;
import org.h2.util.NetUtils;
import org.h2.util.StringUtils;
import org.h2.util.Tool;
/**
* The TCP server implements the native H2 database server protocol.
* It supports multiple client connections to multiple databases
* (many to many). The same database may be opened by multiple clients.
* Also supported is the mixed mode: opening databases in embedded mode,
* and at the same time start a TCP server to allow clients to connect to
* the same database over the network.
*/
public class TcpServer implements Service {
private static final int SHUTDOWN_NORMAL = 0;
private static final int SHUTDOWN_FORCE = 1;
/**
* The name of the in-memory management database used by the TCP server
* to keep the active sessions.
*/
private static final String MANAGEMENT_DB_PREFIX = "management_db_";
private static final Map<Integer, TcpServer> SERVERS =
Collections.synchronizedMap(new HashMap<Integer, TcpServer>());
private int port;
private boolean portIsSet;
private boolean trace;
private boolean ssl;
private boolean stop;
private ShutdownHandler shutdownHandler;
private ServerSocket serverSocket;
private final Set<TcpServerThread> running =
Collections.synchronizedSet(new HashSet<TcpServerThread>());
private String baseDir;
private boolean allowOthers;
private boolean isDaemon;
private boolean ifExists;
private Connection managementDb;
private PreparedStatement managementDbAdd;
private PreparedStatement managementDbRemove;
private String managementPassword = "";
private Thread listenerThread;
private int nextThreadId;
private String key, keyDatabase;
/**
* Get the database name of the management database.
* The management database contains a table with active sessions (SESSIONS).
*
* @param port the TCP server port
* @return the database name (usually starting with mem:)
*/
public static String getManagementDbName(int port) {
return "mem:" + MANAGEMENT_DB_PREFIX + port;
}
private void initManagementDb() throws SQLException {
Properties prop = new Properties();
prop.setProperty("user", "");
prop.setProperty("password", managementPassword);
// avoid using the driver manager
Connection conn = Driver.load().connect("jdbc:h2:" +
getManagementDbName(port), prop);
managementDb = conn;
try (Statement stat = conn.createStatement()) {
stat.execute("CREATE ALIAS IF NOT EXISTS STOP_SERVER FOR \"" +
TcpServer.class.getName() + ".stopServer\"");
stat.execute("CREATE TABLE IF NOT EXISTS SESSIONS" +
"(ID INT PRIMARY KEY, URL VARCHAR, USER VARCHAR, " +
"CONNECTED TIMESTAMP)");
managementDbAdd = conn.prepareStatement(
"INSERT INTO SESSIONS VALUES(?, ?, ?, NOW())");
managementDbRemove = conn.prepareStatement(
"DELETE FROM SESSIONS WHERE ID=?");
}
SERVERS.put(port, this);
}
/**
* Shut down this server.
*/
void shutdown() {
if (shutdownHandler != null) {
shutdownHandler.shutdown();
}
}
public void setShutdownHandler(ShutdownHandler shutdownHandler) {
this.shutdownHandler = shutdownHandler;
}
/**
* Add a connection to the management database.
*
* @param id the connection id
* @param url the database URL
* @param user the user name
*/
synchronized void addConnection(int id, String url, String user) {
try {
managementDbAdd.setInt(1, id);
managementDbAdd.setString(2, url);
managementDbAdd.setString(3, user);
managementDbAdd.execute();
} catch (SQLException e) {
DbException.traceThrowable(e);
}
}
/**
* Remove a connection from the management database.
*
* @param id the connection id
*/
synchronized void removeConnection(int id) {
try {
managementDbRemove.setInt(1, id);
managementDbRemove.execute();
} catch (SQLException e) {
DbException.traceThrowable(e);
}
}
private synchronized void stopManagementDb() {
if (managementDb != null) {
try {
managementDb.close();
} catch (SQLException e) {
DbException.traceThrowable(e);
}
managementDb = null;
}
}
@Override
public void init(String... args) {
port = Constants.DEFAULT_TCP_PORT;
for (int i = 0; args != null && i < args.length; i++) {
String a = args[i];
if (Tool.isOption(a, "-trace")) {
trace = true;
} else if (Tool.isOption(a, "-tcpSSL")) {
ssl = true;
} else if (Tool.isOption(a, "-tcpPort")) {
port = Integer.decode(args[++i]);
portIsSet = true;
} else if (Tool.isOption(a, "-tcpPassword")) {
managementPassword = args[++i];
} else if (Tool.isOption(a, "-baseDir")) {
baseDir = args[++i];
} else if (Tool.isOption(a, "-key")) {
key = args[++i];
keyDatabase = args[++i];
} else if (Tool.isOption(a, "-tcpAllowOthers")) {
allowOthers = true;
} else if (Tool.isOption(a, "-tcpDaemon")) {
isDaemon = true;
} else if (Tool.isOption(a, "-ifExists")) {
ifExists = true;
}
}
org.h2.Driver.load();
}
@Override
public String getURL() {
return (ssl ? "ssl" : "tcp") + "://" + NetUtils.getLocalAddress() + ":" + port;
}
@Override
public int getPort() {
return port;
}
/**
* Check if this socket may connect to this server. Remote connections are
* not allowed if the flag allowOthers is set.
*
* @param socket the socket
* @return true if this client may connect
*/
boolean allow(Socket socket) {
if (allowOthers) {
return true;
}
try {
return NetUtils.isLocalAddress(socket);
} catch (UnknownHostException e) {
traceError(e);
return false;
}
}
@Override
public synchronized void start() throws SQLException {
stop = false;
try {
serverSocket = NetUtils.createServerSocket(port, ssl);
} catch (DbException e) {
if (!portIsSet) {
serverSocket = NetUtils.createServerSocket(0, ssl);
} else {
throw e;
}
}
port = serverSocket.getLocalPort();
initManagementDb();
}
@Override
public void listen() {
listenerThread = Thread.currentThread();
String threadName = listenerThread.getName();
try {
while (!stop) {
Socket s = serverSocket.accept();
TcpServerThread c = new TcpServerThread(s, this, nextThreadId++);
running.add(c);
Thread thread = new Thread(c, threadName + " thread");
thread.setDaemon(isDaemon);
c.setThread(thread);
thread.start();
}
serverSocket = NetUtils.closeSilently(serverSocket);
} catch (Exception e) {
if (!stop) {
DbException.traceThrowable(e);
}
}
stopManagementDb();
}
@Override
public synchronized boolean isRunning(boolean traceError) {
if (serverSocket == null) {
return false;
}
try {
Socket s = NetUtils.createLoopbackSocket(port, ssl);
s.close();
return true;
} catch (Exception e) {
if (traceError) {
traceError(e);
}
return false;
}
}
@Override
public void stop() {
// TODO server: share code between web and tcp servers
// need to remove the server first, otherwise the connection is broken
// while the server is still registered in this map
SERVERS.remove(port);
if (!stop) {
stopManagementDb();
stop = true;
if (serverSocket != null) {
try {
serverSocket.close();
} catch (IOException e) {
DbException.traceThrowable(e);
} catch (NullPointerException e) {
// ignore
}
serverSocket = null;
}
if (listenerThread != null) {
try {
listenerThread.join(1000);
} catch (InterruptedException e) {
DbException.traceThrowable(e);
}
}
}
// TODO server: using a boolean 'now' argument? a timeout?
for (TcpServerThread c : new ArrayList<>(running)) {
if (c != null) {
c.close();
try {
c.getThread().join(100);
} catch (Exception e) {
DbException.traceThrowable(e);
}
}
}
// @author Vincent Zhang ivincent.zhang@gmail.com 2020/08/04
PulsarExtension.shutdownSessionFactory();
}
/**
* Stop a running server. This method is called via reflection from the
* STOP_SERVER function.
*
* @param port the port where the server runs, or 0 for all running servers
* @param password the password (or null)
* @param shutdownMode the shutdown mode, SHUTDOWN_NORMAL or SHUTDOWN_FORCE.
*/
public static void stopServer(int port, String password, int shutdownMode) {
if (port == 0) {
for (int p : SERVERS.keySet().toArray(new Integer[0])) {
if (p != 0) {
stopServer(p, password, shutdownMode);
}
}
return;
}
TcpServer server = SERVERS.get(port);
if (server == null) {
return;
}
if (!server.managementPassword.equals(password)) {
return;
}
if (shutdownMode == SHUTDOWN_NORMAL) {
server.stopManagementDb();
server.stop = true;
try {
Socket s = NetUtils.createLoopbackSocket(port, false);
s.close();
} catch (Exception e) {
// try to connect - so that accept returns
}
} else if (shutdownMode == SHUTDOWN_FORCE) {
server.stop();
}
server.shutdown();
}
/**
* Remove a thread from the list.
*
* @param t the thread to remove
*/
void remove(TcpServerThread t) {
running.remove(t);
}
/**
* Get the configured base directory.
*
* @return the base directory
*/
String getBaseDir() {
return baseDir;
}
/**
* Print a message if the trace flag is enabled.
*
* @param s the message
*/
void trace(String s) {
if (trace) {
System.out.println(s);
}
}
/**
* Print a stack trace if the trace flag is enabled.
*
* @param e the exception
*/
void traceError(Throwable e) {
if (trace) {
e.printStackTrace();
}
}
@Override
public boolean getAllowOthers() {
return allowOthers;
}
@Override
public String getType() {
return "TCP";
}
@Override
public String getName() {
return "H2 TCP Server";
}
boolean getIfExists() {
return ifExists;
}
/**
* Stop the TCP server with the given URL.
*
* @param url the database URL
* @param password the password
* @param force if the server should be stopped immediately
* @param all whether all TCP servers that are running in the JVM should be
* stopped
*/
public static synchronized void shutdown(String url, String password,
boolean force, boolean all) throws SQLException {
try {
int port = Constants.DEFAULT_TCP_PORT;
int idx = url.lastIndexOf(':');
if (idx >= 0) {
String p = url.substring(idx + 1);
if (StringUtils.isNumber(p)) {
port = Integer.decode(p);
}
}
String db = getManagementDbName(port);
try {
org.h2.Driver.load();
} catch (Throwable e) {
throw DbException.convert(e);
}
for (int i = 0; i < 2; i++) {
Connection conn = null;
PreparedStatement prep = null;
try {
conn = DriverManager.getConnection("jdbc:h2:" + url + "/" + db, "", password);
prep = conn.prepareStatement("CALL STOP_SERVER(?, ?, ?)");
prep.setInt(1, all ? 0 : port);
prep.setString(2, password);
prep.setInt(3, force ? SHUTDOWN_FORCE : SHUTDOWN_NORMAL);
try {
prep.execute();
} catch (SQLException e) {
if (force) {
// ignore
} else {
if (e.getErrorCode() != ErrorCode.CONNECTION_BROKEN_1) {
throw e;
}
}
}
break;
} catch (SQLException e) {
if (i == 1) {
throw e;
}
} finally {
JdbcUtils.closeSilently(prep);
JdbcUtils.closeSilently(conn);
}
}
} catch (Exception e) {
throw DbException.toSQLException(e);
}
}
/**
* Cancel a running statement.
*
* @param sessionId the session id
* @param statementId the statement id
*/
void cancelStatement(String sessionId, int statementId) {
for (TcpServerThread c : new ArrayList<>(running)) {
if (c != null) {
c.cancelStatement(sessionId, statementId);
}
}
}
/**
* If no key is set, return the original database name. If a key is set,
* check if the key matches. If yes, return the correct database name. If
* not, throw an exception.
*
* @param db the key to test (or database name if no key is used)
* @return the database name
* @throws DbException if a key is set but doesn't match
*/
public String checkKeyAndGetDatabaseName(String db) {
if (key == null) {
return db;
}
if (key.equals(db)) {
return keyDatabase;
}
throw DbException.get(ErrorCode.WRONG_USER_OR_PASSWORD);
}
@Override
public boolean isDaemon() {
return isDaemon;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/TcpServerThread.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server;
import java.io.ByteArrayInputStream;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.Socket;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Objects;
import org.h2.api.ErrorCode;
import org.h2.command.Command;
import org.h2.engine.ConnectionInfo;
import org.h2.engine.Constants;
import org.h2.engine.GeneratedKeysMode;
import org.h2.engine.Session;
import org.h2.engine.SessionRemote;
import org.h2.engine.SysProperties;
import org.h2.expression.Parameter;
import org.h2.expression.ParameterInterface;
import org.h2.expression.ParameterRemote;
import org.h2.ext.pulsar.PulsarExtension;
import org.h2.jdbc.JdbcSQLException;
import org.h2.message.DbException;
import org.h2.result.ResultColumn;
import org.h2.result.ResultInterface;
import org.h2.result.ResultWithGeneratedKeys;
import org.h2.store.LobStorageInterface;
import org.h2.util.IOUtils;
import org.h2.util.SmallLRUCache;
import org.h2.util.SmallMap;
import org.h2.value.Transfer;
import org.h2.value.Value;
import org.h2.value.ValueLobDb;
/**
* One server thread is opened per client connection.
*/
public class TcpServerThread implements Runnable {
protected final Transfer transfer;
private final TcpServer server;
private Session session;
private boolean stop;
private Thread thread;
private Command commit;
private final SmallMap cache =
new SmallMap(SysProperties.SERVER_CACHED_OBJECTS);
private final SmallLRUCache<Long, CachedInputStream> lobs =
SmallLRUCache.newInstance(Math.max(
SysProperties.SERVER_CACHED_OBJECTS,
SysProperties.SERVER_RESULT_SET_FETCH_SIZE * 5));
private final int threadId;
private int clientVersion;
private String sessionId;
TcpServerThread(Socket socket, TcpServer server, int id) {
this.server = server;
this.threadId = id;
transfer = new Transfer(null, socket);
}
private void trace(String s) {
server.trace(this + " " + s);
}
@Override
public void run() {
try {
transfer.init();
trace("Connect");
// TODO server: should support a list of allowed databases
// and a list of allowed clients
try {
Socket socket = transfer.getSocket();
if (socket == null) {
// the transfer is already closed, prevent NPE in TcpServer#allow(Socket)
return;
}
if (!server.allow(transfer.getSocket())) {
throw DbException.get(ErrorCode.REMOTE_CONNECTION_NOT_ALLOWED);
}
int minClientVersion = transfer.readInt();
int maxClientVersion = transfer.readInt();
if (maxClientVersion < Constants.TCP_PROTOCOL_VERSION_MIN_SUPPORTED) {
throw DbException.get(ErrorCode.DRIVER_VERSION_ERROR_2,
"" + clientVersion, "" + Constants.TCP_PROTOCOL_VERSION_MIN_SUPPORTED);
} else if (minClientVersion > Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED) {
throw DbException.get(ErrorCode.DRIVER_VERSION_ERROR_2,
"" + clientVersion, "" + Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED);
}
if (maxClientVersion >= Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED) {
clientVersion = Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED;
} else {
clientVersion = maxClientVersion;
}
transfer.setVersion(clientVersion);
String db = transfer.readString();
String originalURL = transfer.readString();
if (db == null && originalURL == null) {
String targetSessionId = transfer.readString();
int command = transfer.readInt();
stop = true;
if (command == SessionRemote.SESSION_CANCEL_STATEMENT) {
// cancel a running statement
int statementId = transfer.readInt();
server.cancelStatement(targetSessionId, statementId);
} else if (command == SessionRemote.SESSION_CHECK_KEY) {
// check if this is the correct server
db = server.checkKeyAndGetDatabaseName(targetSessionId);
if (!targetSessionId.equals(db)) {
transfer.writeInt(SessionRemote.STATUS_OK);
} else {
transfer.writeInt(SessionRemote.STATUS_ERROR);
}
}
}
String baseDir = server.getBaseDir();
if (baseDir == null) {
baseDir = SysProperties.getBaseDir();
}
db = server.checkKeyAndGetDatabaseName(db);
ConnectionInfo ci = new ConnectionInfo(db);
ci.setOriginalURL(originalURL);
ci.setUserName(transfer.readString());
ci.setUserPasswordHash(transfer.readBytes());
ci.setFilePasswordHash(transfer.readBytes());
int len = transfer.readInt();
for (int i = 0; i < len; i++) {
ci.setProperty(transfer.readString(), transfer.readString());
}
// override client's requested properties with server settings
if (baseDir != null) {
ci.setBaseDir(baseDir);
}
if (server.getIfExists()) {
ci.setProperty("IFEXISTS", "TRUE");
}
transfer.writeInt(SessionRemote.STATUS_OK);
transfer.writeInt(clientVersion);
transfer.flush();
if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_13) {
if (ci.getFilePasswordHash() != null) {
ci.setFileEncryptionKey(transfer.readBytes());
}
}
// session = Engine.getInstance().createSession(ci);
// @author Vincent Zhang ivincent.zhang@gmail.com 2020/08/04
session = (Session) PulsarExtension.createSession(ci);
transfer.setSession(session);
server.addConnection(threadId, originalURL, ci.getUserName());
trace("Connected");
} catch (Throwable e) {
sendError(e);
stop = true;
}
while (!stop) {
try {
process();
} catch (Throwable e) {
sendError(e);
}
}
trace("Disconnect");
} catch (Throwable e) {
server.traceError(e);
} finally {
close();
}
}
private void closeSession() {
if (session != null) {
RuntimeException closeError = null;
try {
Command rollback = session.prepareLocal("ROLLBACK");
rollback.executeUpdate(false);
} catch (RuntimeException e) {
closeError = e;
server.traceError(e);
} catch (Exception e) {
server.traceError(e);
}
try {
session.close();
server.removeConnection(threadId);
} catch (RuntimeException e) {
if (closeError == null) {
closeError = e;
server.traceError(e);
}
} catch (Exception e) {
server.traceError(e);
} finally {
session = null;
}
if (closeError != null) {
throw closeError;
}
}
}
/**
* Close a connection.
*/
void close() {
try {
stop = true;
closeSession();
} catch (Exception e) {
server.traceError(e);
} finally {
transfer.close();
trace("Close");
server.remove(this);
}
}
private void sendError(Throwable t) {
try {
SQLException e = DbException.convert(t).getSQLException();
StringWriter writer = new StringWriter();
e.printStackTrace(new PrintWriter(writer));
String trace = writer.toString();
String message;
String sql;
if (e instanceof JdbcSQLException) {
JdbcSQLException j = (JdbcSQLException) e;
message = j.getOriginalMessage();
sql = j.getSQL();
} else {
message = e.getMessage();
sql = null;
}
transfer.writeInt(SessionRemote.STATUS_ERROR).
writeString(e.getSQLState()).writeString(message).
writeString(sql).writeInt(e.getErrorCode()).writeString(trace).flush();
} catch (Exception e2) {
if (!transfer.isClosed()) {
server.traceError(e2);
}
// if writing the error does not work, close the connection
stop = true;
}
}
private void setParameters(Command command) throws IOException {
int len = transfer.readInt();
ArrayList<? extends ParameterInterface> params = command.getParameters();
for (int i = 0; i < len; i++) {
Parameter p = (Parameter) params.get(i);
p.setValue(transfer.readValue());
}
}
private void process() throws IOException {
int operation = transfer.readInt();
switch (operation) {
case SessionRemote.SESSION_PREPARE_READ_PARAMS:
case SessionRemote.SESSION_PREPARE_READ_PARAMS2:
case SessionRemote.SESSION_PREPARE: {
int id = transfer.readInt();
String sql = transfer.readString();
int old = session.getModificationId();
Command command = session.prepareLocal(sql);
boolean readonly = command.isReadOnly();
cache.addObject(id, command);
boolean isQuery = command.isQuery();
transfer.writeInt(getState(old)).writeBoolean(isQuery).
writeBoolean(readonly);
if (operation == SessionRemote.SESSION_PREPARE_READ_PARAMS2) {
transfer.writeInt(command.getCommandType());
}
ArrayList<? extends ParameterInterface> params = command.getParameters();
transfer.writeInt(params.size());
if (operation != SessionRemote.SESSION_PREPARE) {
for (ParameterInterface p : params) {
ParameterRemote.writeMetaData(transfer, p);
}
}
transfer.flush();
break;
}
case SessionRemote.SESSION_CLOSE: {
stop = true;
closeSession();
transfer.writeInt(SessionRemote.STATUS_OK).flush();
close();
break;
}
case SessionRemote.COMMAND_COMMIT: {
if (commit == null) {
commit = session.prepareLocal("COMMIT");
}
int old = session.getModificationId();
commit.executeUpdate(false);
transfer.writeInt(getState(old)).flush();
break;
}
case SessionRemote.COMMAND_GET_META_DATA: {
int id = transfer.readInt();
int objectId = transfer.readInt();
Command command = (Command) cache.getObject(id, false);
ResultInterface result = command.getMetaData();
cache.addObject(objectId, result);
int columnCount = result.getVisibleColumnCount();
transfer.writeInt(SessionRemote.STATUS_OK).
writeInt(columnCount).writeInt(0);
for (int i = 0; i < columnCount; i++) {
ResultColumn.writeColumn(transfer, result, i);
}
transfer.flush();
break;
}
case SessionRemote.COMMAND_EXECUTE_QUERY: {
int id = transfer.readInt();
int objectId = transfer.readInt();
int maxRows = transfer.readInt();
int fetchSize = transfer.readInt();
Command command = (Command) cache.getObject(id, false);
setParameters(command);
int old = session.getModificationId();
ResultInterface result;
synchronized (session) {
result = command.executeQuery(maxRows, false);
}
cache.addObject(objectId, result);
int columnCount = result.getVisibleColumnCount();
int state = getState(old);
transfer.writeInt(state).writeInt(columnCount);
int rowCount = result.getRowCount();
transfer.writeInt(rowCount);
for (int i = 0; i < columnCount; i++) {
ResultColumn.writeColumn(transfer, result, i);
}
int fetch = Math.min(rowCount, fetchSize);
for (int i = 0; i < fetch; i++) {
sendRow(result);
}
transfer.flush();
break;
}
case SessionRemote.COMMAND_EXECUTE_UPDATE: {
int id = transfer.readInt();
Command command = (Command) cache.getObject(id, false);
setParameters(command);
boolean supportsGeneratedKeys = clientVersion >= Constants.TCP_PROTOCOL_VERSION_17;
boolean writeGeneratedKeys = supportsGeneratedKeys;
Object generatedKeysRequest;
if (supportsGeneratedKeys) {
int mode = transfer.readInt();
switch (mode) {
case GeneratedKeysMode.NONE:
generatedKeysRequest = false;
writeGeneratedKeys = false;
break;
case GeneratedKeysMode.AUTO:
generatedKeysRequest = true;
break;
case GeneratedKeysMode.COLUMN_NUMBERS: {
int len = transfer.readInt();
int[] keys = new int[len];
for (int i = 0; i < len; i++) {
keys[i] = transfer.readInt();
}
generatedKeysRequest = keys;
break;
}
case GeneratedKeysMode.COLUMN_NAMES: {
int len = transfer.readInt();
String[] keys = new String[len];
for (int i = 0; i < len; i++) {
keys[i] = transfer.readString();
}
generatedKeysRequest = keys;
break;
}
default:
throw DbException.get(ErrorCode.CONNECTION_BROKEN_1,
"Unsupported generated keys' mode " + mode);
}
} else {
generatedKeysRequest = false;
}
int old = session.getModificationId();
ResultWithGeneratedKeys result;
synchronized (session) {
result = command.executeUpdate(generatedKeysRequest);
}
int status;
if (session.isClosed()) {
status = SessionRemote.STATUS_CLOSED;
stop = true;
} else {
status = getState(old);
}
transfer.writeInt(status).writeInt(result.getUpdateCount()).
writeBoolean(session.getAutoCommit());
if (writeGeneratedKeys) {
ResultInterface generatedKeys = result.getGeneratedKeys();
int columnCount = generatedKeys.getVisibleColumnCount();
transfer.writeInt(columnCount);
int rowCount = generatedKeys.getRowCount();
transfer.writeInt(rowCount);
for (int i = 0; i < columnCount; i++) {
ResultColumn.writeColumn(transfer, generatedKeys, i);
}
for (int i = 0; i < rowCount; i++) {
sendRow(generatedKeys);
}
generatedKeys.close();
}
transfer.flush();
break;
}
case SessionRemote.COMMAND_CLOSE: {
int id = transfer.readInt();
Command command = (Command) cache.getObject(id, true);
if (command != null) {
command.close();
cache.freeObject(id);
}
break;
}
case SessionRemote.RESULT_FETCH_ROWS: {
int id = transfer.readInt();
int count = transfer.readInt();
ResultInterface result = (ResultInterface) cache.getObject(id, false);
transfer.writeInt(SessionRemote.STATUS_OK);
for (int i = 0; i < count; i++) {
sendRow(result);
}
transfer.flush();
break;
}
case SessionRemote.RESULT_RESET: {
int id = transfer.readInt();
ResultInterface result = (ResultInterface) cache.getObject(id, false);
result.reset();
break;
}
case SessionRemote.RESULT_CLOSE: {
int id = transfer.readInt();
ResultInterface result = (ResultInterface) cache.getObject(id, true);
if (result != null) {
result.close();
cache.freeObject(id);
}
break;
}
case SessionRemote.CHANGE_ID: {
int oldId = transfer.readInt();
int newId = transfer.readInt();
Object obj = cache.getObject(oldId, false);
cache.freeObject(oldId);
cache.addObject(newId, obj);
break;
}
case SessionRemote.SESSION_SET_ID: {
sessionId = transfer.readString();
transfer.writeInt(SessionRemote.STATUS_OK);
if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_15) {
transfer.writeBoolean(session.getAutoCommit());
}
transfer.flush();
break;
}
case SessionRemote.SESSION_SET_AUTOCOMMIT: {
boolean autoCommit = transfer.readBoolean();
session.setAutoCommit(autoCommit);
transfer.writeInt(SessionRemote.STATUS_OK).flush();
break;
}
case SessionRemote.SESSION_HAS_PENDING_TRANSACTION: {
transfer.writeInt(SessionRemote.STATUS_OK).
writeInt(session.hasPendingTransaction() ? 1 : 0).flush();
break;
}
case SessionRemote.LOB_READ: {
long lobId = transfer.readLong();
byte[] hmac;
CachedInputStream in;
boolean verifyMac;
if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_11) {
if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_12) {
hmac = transfer.readBytes();
verifyMac = true;
} else {
hmac = null;
verifyMac = false;
}
in = lobs.get(lobId);
if (in == null && verifyMac) {
in = new CachedInputStream(null);
lobs.put(lobId, in);
}
} else {
verifyMac = false;
hmac = null;
in = lobs.get(lobId);
}
long offset = transfer.readLong();
int length = transfer.readInt();
if (verifyMac) {
transfer.verifyLobMac(hmac, lobId);
}
if (in == null) {
throw DbException.get(ErrorCode.OBJECT_CLOSED);
}
if (in.getPos() != offset) {
LobStorageInterface lobStorage = session.getDataHandler().getLobStorage();
// only the lob id is used
ValueLobDb lob = ValueLobDb.create(Value.BLOB, null, -1, lobId, hmac, -1);
InputStream lobIn = lobStorage.getInputStream(lob, hmac, -1);
in = new CachedInputStream(lobIn);
lobs.put(lobId, in);
lobIn.skip(offset);
}
// limit the buffer size
length = Math.min(16 * Constants.IO_BUFFER_SIZE, length);
byte[] buff = new byte[length];
length = IOUtils.readFully(in, buff, length);
transfer.writeInt(SessionRemote.STATUS_OK);
transfer.writeInt(length);
transfer.writeBytes(buff, 0, length);
transfer.flush();
break;
}
default:
trace("Unknown operation: " + operation);
closeSession();
close();
}
}
private int getState(int oldModificationId) {
if (session.getModificationId() == oldModificationId) {
return SessionRemote.STATUS_OK;
}
return SessionRemote.STATUS_OK_STATE_CHANGED;
}
private void sendRow(ResultInterface result) throws IOException {
if (result.next()) {
transfer.writeBoolean(true);
Value[] v = result.currentRow();
for (int i = 0; i < result.getVisibleColumnCount(); i++) {
if (clientVersion >= Constants.TCP_PROTOCOL_VERSION_12) {
transfer.writeValue(v[i]);
} else {
writeValue(v[i]);
}
}
} else {
transfer.writeBoolean(false);
}
}
private void writeValue(Value v) throws IOException {
if (v.getType() == Value.CLOB || v.getType() == Value.BLOB) {
if (v instanceof ValueLobDb) {
ValueLobDb lob = (ValueLobDb) v;
if (lob.isStored()) {
long id = lob.getLobId();
lobs.put(id, new CachedInputStream(null));
}
}
}
transfer.writeValue(v);
}
void setThread(Thread thread) {
this.thread = thread;
}
Thread getThread() {
return thread;
}
/**
* Cancel a running statement.
*
* @param targetSessionId the session id
* @param statementId the statement to cancel
*/
void cancelStatement(String targetSessionId, int statementId) {
if (Objects.equals(targetSessionId, this.sessionId)) {
Command cmd = (Command) cache.getObject(statementId, false);
cmd.cancel();
}
}
/**
* An input stream with a position.
*/
static class CachedInputStream extends FilterInputStream {
private static final ByteArrayInputStream DUMMY =
new ByteArrayInputStream(new byte[0]);
private long pos;
CachedInputStream(InputStream in) {
super(in == null ? DUMMY : in);
if (in == null) {
pos = -1;
}
}
@Override
public int read(byte[] buff, int off, int len) throws IOException {
len = super.read(buff, off, len);
if (len > 0) {
pos += len;
}
return len;
}
@Override
public int read() throws IOException {
int x = in.read();
if (x >= 0) {
pos++;
}
return x;
}
@Override
public long skip(long n) throws IOException {
n = super.skip(n);
if (n > 0) {
pos += n;
}
return n;
}
public long getPos() {
return pos;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/pg/PgServer.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.pg;
import java.io.IOException;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.UnknownHostException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.message.DbException;
import org.h2.server.Service;
import org.h2.util.NetUtils;
import org.h2.util.Tool;
/**
* This class implements a subset of the PostgreSQL protocol as described here:
* http://developer.postgresql.org/pgdocs/postgres/protocol.html
* The PostgreSQL catalog is described here:
* http://www.postgresql.org/docs/7.4/static/catalogs.html
*
* @author Thomas Mueller
* @author Sergi Vladykin 2009-07-03 (convertType)
*/
public class PgServer implements Service {
/**
* The default port to use for the PG server.
* This value is also in the documentation and in the Server javadoc.
*/
public static final int DEFAULT_PORT = 5435;
/**
* The VARCHAR type.
*/
public static final int PG_TYPE_VARCHAR = 1043;
public static final int PG_TYPE_BOOL = 16;
public static final int PG_TYPE_BYTEA = 17;
public static final int PG_TYPE_BPCHAR = 1042;
public static final int PG_TYPE_INT8 = 20;
public static final int PG_TYPE_INT2 = 21;
public static final int PG_TYPE_INT4 = 23;
public static final int PG_TYPE_TEXT = 25;
public static final int PG_TYPE_OID = 26;
public static final int PG_TYPE_FLOAT4 = 700;
public static final int PG_TYPE_FLOAT8 = 701;
public static final int PG_TYPE_UNKNOWN = 705;
public static final int PG_TYPE_TEXTARRAY = 1009;
public static final int PG_TYPE_DATE = 1082;
public static final int PG_TYPE_TIME = 1083;
public static final int PG_TYPE_TIMESTAMP_NO_TMZONE = 1114;
public static final int PG_TYPE_NUMERIC = 1700;
private final HashSet<Integer> typeSet = new HashSet<>();
private int port = PgServer.DEFAULT_PORT;
private boolean portIsSet;
private boolean stop;
private boolean trace;
private ServerSocket serverSocket;
private final Set<PgServerThread> running = Collections.
synchronizedSet(new HashSet<PgServerThread>());
private final AtomicInteger pid = new AtomicInteger();
private String baseDir;
private boolean allowOthers;
private boolean isDaemon;
private boolean ifExists;
private String key, keyDatabase;
@Override
public void init(String... args) {
port = DEFAULT_PORT;
for (int i = 0; args != null && i < args.length; i++) {
String a = args[i];
if (Tool.isOption(a, "-trace")) {
trace = true;
} else if (Tool.isOption(a, "-pgPort")) {
port = Integer.decode(args[++i]);
portIsSet = true;
} else if (Tool.isOption(a, "-baseDir")) {
baseDir = args[++i];
} else if (Tool.isOption(a, "-pgAllowOthers")) {
allowOthers = true;
} else if (Tool.isOption(a, "-pgDaemon")) {
isDaemon = true;
} else if (Tool.isOption(a, "-ifExists")) {
ifExists = true;
} else if (Tool.isOption(a, "-key")) {
key = args[++i];
keyDatabase = args[++i];
}
}
org.h2.Driver.load();
// int testing;
// trace = true;
}
boolean getTrace() {
return trace;
}
/**
* Print a message if the trace flag is enabled.
*
* @param s the message
*/
void trace(String s) {
if (trace) {
System.out.println(s);
}
}
/**
* Remove a thread from the list.
*
* @param t the thread to remove
*/
synchronized void remove(PgServerThread t) {
running.remove(t);
}
/**
* Print the stack trace if the trace flag is enabled.
*
* @param e the exception
*/
void traceError(Exception e) {
if (trace) {
e.printStackTrace();
}
}
@Override
public String getURL() {
return "pg://" + NetUtils.getLocalAddress() + ":" + port;
}
@Override
public int getPort() {
return port;
}
private boolean allow(Socket socket) {
if (allowOthers) {
return true;
}
try {
return NetUtils.isLocalAddress(socket);
} catch (UnknownHostException e) {
traceError(e);
return false;
}
}
@Override
public void start() {
stop = false;
try {
serverSocket = NetUtils.createServerSocket(port, false);
} catch (DbException e) {
if (!portIsSet) {
serverSocket = NetUtils.createServerSocket(0, false);
} else {
throw e;
}
}
port = serverSocket.getLocalPort();
}
@Override
public void listen() {
String threadName = Thread.currentThread().getName();
try {
while (!stop) {
Socket s = serverSocket.accept();
if (!allow(s)) {
trace("Connection not allowed");
s.close();
} else {
PgServerThread c = new PgServerThread(s, this);
running.add(c);
c.setProcessId(pid.incrementAndGet());
Thread thread = new Thread(c, threadName+" thread");
thread.setDaemon(isDaemon);
c.setThread(thread);
thread.start();
}
}
} catch (Exception e) {
if (!stop) {
e.printStackTrace();
}
}
}
@Override
public void stop() {
// TODO server: combine with tcp server
if (!stop) {
stop = true;
if (serverSocket != null) {
try {
serverSocket.close();
} catch (IOException e) {
// TODO log exception
e.printStackTrace();
}
serverSocket = null;
}
}
// TODO server: using a boolean 'now' argument? a timeout?
for (PgServerThread c : new ArrayList<>(running)) {
c.close();
try {
Thread t = c.getThread();
if (t != null) {
t.join(100);
}
} catch (Exception e) {
// TODO log exception
e.printStackTrace();
}
}
}
@Override
public boolean isRunning(boolean traceError) {
if (serverSocket == null) {
return false;
}
try {
Socket s = NetUtils.createLoopbackSocket(serverSocket.getLocalPort(), false);
s.close();
return true;
} catch (Exception e) {
if (traceError) {
traceError(e);
}
return false;
}
}
/**
* Get the thread with the given process id.
*
* @param processId the process id
* @return the thread
*/
PgServerThread getThread(int processId) {
for (PgServerThread c : new ArrayList<>(running)) {
if (c.getProcessId() == processId) {
return c;
}
}
return null;
}
String getBaseDir() {
return baseDir;
}
@Override
public boolean getAllowOthers() {
return allowOthers;
}
@Override
public String getType() {
return "PG";
}
@Override
public String getName() {
return "H2 PG Server";
}
boolean getIfExists() {
return ifExists;
}
/**
* The Java implementation of the PostgreSQL function pg_get_indexdef. The
* method is used to get CREATE INDEX command for an index, or the column
* definition of one column in the index.
*
* @param conn the connection
* @param indexId the index id
* @param ordinalPosition the ordinal position (null if the SQL statement
* should be returned)
* @param pretty this flag is ignored
* @return the SQL statement or the column name
*/
@SuppressWarnings("unused")
public static String getIndexColumn(Connection conn, int indexId,
Integer ordinalPosition, Boolean pretty) throws SQLException {
if (ordinalPosition == null || ordinalPosition.intValue() == 0) {
PreparedStatement prep = conn.prepareStatement(
"select sql from information_schema.indexes where id=?");
prep.setInt(1, indexId);
ResultSet rs = prep.executeQuery();
if (rs.next()) {
return rs.getString(1);
}
return "";
}
PreparedStatement prep = conn.prepareStatement(
"select column_name from information_schema.indexes " +
"where id=? and ordinal_position=?");
prep.setInt(1, indexId);
prep.setInt(2, ordinalPosition.intValue());
ResultSet rs = prep.executeQuery();
if (rs.next()) {
return rs.getString(1);
}
return "";
}
/**
* Get the name of the current schema.
* This method is called by the database.
*
* @param conn the connection
* @return the schema name
*/
public static String getCurrentSchema(Connection conn) throws SQLException {
ResultSet rs = conn.createStatement().executeQuery("call schema()");
rs.next();
return rs.getString(1);
}
/**
* Get the OID of an object. This method is called by the database.
*
* @param conn the connection
* @param tableName the table name
* @return the oid
*/
public static int getOid(Connection conn, String tableName)
throws SQLException {
if (tableName.startsWith("\"") && tableName.endsWith("\"")) {
tableName = tableName.substring(1, tableName.length() - 1);
}
PreparedStatement prep = conn.prepareStatement(
"select oid from pg_class where relName = ?");
prep.setString(1, tableName);
ResultSet rs = prep.executeQuery();
if (!rs.next()) {
return 0;
}
return rs.getInt(1);
}
/**
* Get the name of this encoding code.
* This method is called by the database.
*
* @param code the encoding code
* @return the encoding name
*/
public static String getEncodingName(int code) {
switch (code) {
case 0:
return "SQL_ASCII";
case 6:
return "UTF8";
case 8:
return "LATIN1";
default:
return code < 40 ? "UTF8" : "";
}
}
/**
* Get the version. This method must return PostgreSQL to keep some clients
* happy. This method is called by the database.
*
* @return the server name and version
*/
public static String getVersion() {
return "PostgreSQL " + Constants.PG_VERSION + " server protocol using H2 " +
Constants.getFullVersion();
}
/**
* Get the current system time.
* This method is called by the database.
*
* @return the current system time
*/
public static Timestamp getStartTime() {
return new Timestamp(System.currentTimeMillis());
}
/**
* Get the user name for this id.
* This method is called by the database.
*
* @param conn the connection
* @param id the user id
* @return the user name
*/
public static String getUserById(Connection conn, int id) throws SQLException {
PreparedStatement prep = conn.prepareStatement(
"SELECT NAME FROM INFORMATION_SCHEMA.USERS WHERE ID=?");
prep.setInt(1, id);
ResultSet rs = prep.executeQuery();
if (rs.next()) {
return rs.getString(1);
}
return null;
}
/**
* Check if the this session has the given database privilege.
* This method is called by the database.
*
* @param id the session id
* @param privilege the privilege to check
* @return true
*/
@SuppressWarnings("unused")
public static boolean hasDatabasePrivilege(int id, String privilege) {
return true;
}
/**
* Check if the current session has access to this table.
* This method is called by the database.
*
* @param table the table name
* @param privilege the privilege to check
* @return true
*/
@SuppressWarnings("unused")
public static boolean hasTablePrivilege(String table, String privilege) {
return true;
}
/**
* Get the current transaction id.
* This method is called by the database.
*
* @param table the table name
* @param id the id
* @return 1
*/
@SuppressWarnings("unused")
public static int getCurrentTid(String table, String id) {
return 1;
}
/**
* A fake wrapper around pg_get_expr(expr_text, relation_oid), in PostgreSQL
* it "decompiles the internal form of an expression, assuming that any vars
* in it refer to the relation indicated by the second parameter".
*
* @param exprText the expression text
* @param relationOid the relation object id
* @return always null
*/
@SuppressWarnings("unused")
public static String getPgExpr(String exprText, int relationOid) {
return null;
}
/**
* Check if the current session has access to this table.
* This method is called by the database.
*
* @param conn the connection
* @param pgType the PostgreSQL type oid
* @param typeMod the type modifier (typically -1)
* @return the name of the given type
*/
public static String formatType(Connection conn, int pgType, int typeMod)
throws SQLException {
PreparedStatement prep = conn.prepareStatement(
"select typname from pg_catalog.pg_type where oid = ? and typtypmod = ?");
prep.setInt(1, pgType);
prep.setInt(2, typeMod);
ResultSet rs = prep.executeQuery();
if (rs.next()) {
return rs.getString(1);
}
return null;
}
/**
* Convert the SQL type to a PostgreSQL type
*
* @param type the SQL type
* @return the PostgreSQL type
*/
public static int convertType(final int type) {
switch (type) {
case Types.BOOLEAN:
return PG_TYPE_BOOL;
case Types.VARCHAR:
return PG_TYPE_VARCHAR;
case Types.CLOB:
return PG_TYPE_TEXT;
case Types.CHAR:
return PG_TYPE_BPCHAR;
case Types.SMALLINT:
return PG_TYPE_INT2;
case Types.INTEGER:
return PG_TYPE_INT4;
case Types.BIGINT:
return PG_TYPE_INT8;
case Types.DECIMAL:
return PG_TYPE_NUMERIC;
case Types.REAL:
return PG_TYPE_FLOAT4;
case Types.DOUBLE:
return PG_TYPE_FLOAT8;
case Types.TIME:
return PG_TYPE_TIME;
case Types.DATE:
return PG_TYPE_DATE;
case Types.TIMESTAMP:
return PG_TYPE_TIMESTAMP_NO_TMZONE;
case Types.VARBINARY:
return PG_TYPE_BYTEA;
case Types.BLOB:
return PG_TYPE_OID;
case Types.ARRAY:
return PG_TYPE_TEXTARRAY;
default:
return PG_TYPE_UNKNOWN;
}
}
/**
* Get the type hash set.
*
* @return the type set
*/
HashSet<Integer> getTypeSet() {
return typeSet;
}
/**
* Check whether a data type is supported.
* A warning is logged if not.
*
* @param type the type
*/
void checkType(int type) {
if (!typeSet.contains(type)) {
trace("Unsupported type: " + type);
}
}
/**
* If no key is set, return the original database name. If a key is set,
* check if the key matches. If yes, return the correct database name. If
* not, throw an exception.
*
* @param db the key to test (or database name if no key is used)
* @return the database name
* @throws DbException if a key is set but doesn't match
*/
public String checkKeyAndGetDatabaseName(String db) {
if (key == null) {
return db;
}
if (key.equals(db)) {
return keyDatabase;
}
throw DbException.get(ErrorCode.WRONG_USER_OR_PASSWORD);
}
@Override
public boolean isDaemon() {
return isDaemon;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/pg/PgServerThread.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.pg;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Reader;
import java.io.StringReader;
import java.net.Socket;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.sql.Connection;
import java.sql.ParameterMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Properties;
import org.h2.command.CommandInterface;
import org.h2.engine.ConnectionInfo;
import org.h2.engine.Constants;
import org.h2.engine.SysProperties;
import org.h2.jdbc.JdbcConnection;
import org.h2.jdbc.JdbcPreparedStatement;
import org.h2.jdbc.JdbcResultSet;
import org.h2.jdbc.JdbcStatement;
import org.h2.message.DbException;
import org.h2.util.DateTimeUtils;
import org.h2.util.JdbcUtils;
import org.h2.util.MathUtils;
import org.h2.util.ScriptReader;
import org.h2.util.StringUtils;
import org.h2.util.Utils;
import org.h2.value.CaseInsensitiveMap;
import org.h2.value.Value;
import org.h2.value.ValueDate;
import org.h2.value.ValueNull;
import org.h2.value.ValueTime;
import org.h2.value.ValueTimestamp;
/**
* One server thread is opened for each client.
*/
public class PgServerThread implements Runnable {
private static final boolean INTEGER_DATE_TYPES = false;
private final PgServer server;
private Socket socket;
private Connection conn;
private boolean stop;
private DataInputStream dataInRaw;
private DataInputStream dataIn;
private OutputStream out;
private int messageType;
private ByteArrayOutputStream outBuffer;
private DataOutputStream dataOut;
private Thread thread;
private boolean initDone;
private String userName;
private String databaseName;
private int processId;
private final int secret;
private JdbcStatement activeRequest;
private String clientEncoding = SysProperties.PG_DEFAULT_CLIENT_ENCODING;
private String dateStyle = "ISO, MDY";
private final HashMap<String, Prepared> prepared =
new CaseInsensitiveMap<>();
private final HashMap<String, Portal> portals =
new CaseInsensitiveMap<>();
PgServerThread(Socket socket, PgServer server) {
this.server = server;
this.socket = socket;
this.secret = (int) MathUtils.secureRandomLong();
}
@Override
public void run() {
try {
server.trace("Connect");
InputStream ins = socket.getInputStream();
out = socket.getOutputStream();
dataInRaw = new DataInputStream(ins);
while (!stop) {
process();
out.flush();
}
} catch (EOFException e) {
// more or less normal disconnect
} catch (Exception e) {
server.traceError(e);
} finally {
server.trace("Disconnect");
close();
}
}
private String readString() throws IOException {
ByteArrayOutputStream buff = new ByteArrayOutputStream();
while (true) {
int x = dataIn.read();
if (x <= 0) {
break;
}
buff.write(x);
}
return new String(buff.toByteArray(), getEncoding());
}
private int readInt() throws IOException {
return dataIn.readInt();
}
private short readShort() throws IOException {
return dataIn.readShort();
}
private byte readByte() throws IOException {
return dataIn.readByte();
}
private void readFully(byte[] buff) throws IOException {
dataIn.readFully(buff);
}
private void process() throws IOException {
int x;
if (initDone) {
x = dataInRaw.read();
if (x < 0) {
stop = true;
return;
}
} else {
x = 0;
}
int len = dataInRaw.readInt();
len -= 4;
byte[] data = Utils.newBytes(len);
dataInRaw.readFully(data, 0, len);
dataIn = new DataInputStream(new ByteArrayInputStream(data, 0, len));
switch (x) {
case 0:
server.trace("Init");
int version = readInt();
if (version == 80877102) {
server.trace("CancelRequest");
int pid = readInt();
int key = readInt();
PgServerThread c = server.getThread(pid);
if (c != null && key == c.secret) {
c.cancelRequest();
} else {
// According to the PostgreSQL documentation, when canceling
// a request, if an invalid secret is provided then no
// exception should be sent back to the client.
server.trace("Invalid CancelRequest: pid=" + pid + ", key=" + key);
}
close();
} else if (version == 80877103) {
server.trace("SSLRequest");
out.write('N');
} else {
server.trace("StartupMessage");
server.trace(" version " + version +
" (" + (version >> 16) + "." + (version & 0xff) + ")");
while (true) {
String param = readString();
if (param.length() == 0) {
break;
}
String value = readString();
if ("user".equals(param)) {
this.userName = value;
} else if ("database".equals(param)) {
this.databaseName = server.checkKeyAndGetDatabaseName(value);
} else if ("client_encoding".equals(param)) {
// UTF8
clientEncoding = value;
} else if ("DateStyle".equals(param)) {
if (value.indexOf(',') < 0) {
value += ", MDY";
}
dateStyle = value;
}
// extra_float_digits 2
// geqo on (Genetic Query Optimization)
server.trace(" param " + param + "=" + value);
}
sendAuthenticationCleartextPassword();
initDone = true;
}
break;
case 'p': {
server.trace("PasswordMessage");
String password = readString();
try {
Properties info = new Properties();
info.put("MODE", "PostgreSQL");
info.put("USER", userName);
info.put("PASSWORD", password);
String url = "jdbc:h2:" + databaseName;
ConnectionInfo ci = new ConnectionInfo(url, info);
String baseDir = server.getBaseDir();
if (baseDir == null) {
baseDir = SysProperties.getBaseDir();
}
if (baseDir != null) {
ci.setBaseDir(baseDir);
}
if (server.getIfExists()) {
ci.setProperty("IFEXISTS", "TRUE");
}
conn = new JdbcConnection(ci, false);
// can not do this because when called inside
// DriverManager.getConnection, a deadlock occurs
// conn = DriverManager.getConnection(url, userName, password);
initDb();
sendAuthenticationOk();
} catch (Exception e) {
e.printStackTrace();
stop = true;
}
break;
}
case 'P': {
server.trace("Parse");
Prepared p = new Prepared();
p.name = readString();
p.sql = getSQL(readString());
int paramTypesCount = readShort();
int[] paramTypes = null;
if (paramTypesCount > 0) {
paramTypes = new int[paramTypesCount];
for (int i = 0; i < paramTypesCount; i++) {
paramTypes[i] = readInt();
}
}
try {
p.prep = (JdbcPreparedStatement) conn.prepareStatement(p.sql);
ParameterMetaData meta = p.prep.getParameterMetaData();
p.paramType = new int[meta.getParameterCount()];
for (int i = 0; i < p.paramType.length; i++) {
int type;
if (i < paramTypesCount && paramTypes[i] != 0) {
type = paramTypes[i];
server.checkType(type);
} else {
type = PgServer.convertType(meta.getParameterType(i + 1));
}
p.paramType[i] = type;
}
prepared.put(p.name, p);
sendParseComplete();
} catch (Exception e) {
sendErrorResponse(e);
}
break;
}
case 'B': {
server.trace("Bind");
Portal portal = new Portal();
portal.name = readString();
String prepName = readString();
Prepared prep = prepared.get(prepName);
if (prep == null) {
sendErrorResponse("Prepared not found");
break;
}
portal.prep = prep;
portals.put(portal.name, portal);
int formatCodeCount = readShort();
int[] formatCodes = new int[formatCodeCount];
for (int i = 0; i < formatCodeCount; i++) {
formatCodes[i] = readShort();
}
int paramCount = readShort();
try {
for (int i = 0; i < paramCount; i++) {
setParameter(prep.prep, prep.paramType[i], i, formatCodes);
}
} catch (Exception e) {
sendErrorResponse(e);
break;
}
int resultCodeCount = readShort();
portal.resultColumnFormat = new int[resultCodeCount];
for (int i = 0; i < resultCodeCount; i++) {
portal.resultColumnFormat[i] = readShort();
}
sendBindComplete();
break;
}
case 'C': {
char type = (char) readByte();
String name = readString();
server.trace("Close");
if (type == 'S') {
Prepared p = prepared.remove(name);
if (p != null) {
JdbcUtils.closeSilently(p.prep);
}
} else if (type == 'P') {
portals.remove(name);
} else {
server.trace("expected S or P, got " + type);
sendErrorResponse("expected S or P");
break;
}
sendCloseComplete();
break;
}
case 'D': {
char type = (char) readByte();
String name = readString();
server.trace("Describe");
if (type == 'S') {
Prepared p = prepared.get(name);
if (p == null) {
sendErrorResponse("Prepared not found: " + name);
} else {
try {
sendParameterDescription(p.prep.getParameterMetaData(), p.paramType);
sendRowDescription(p.prep.getMetaData());
} catch (Exception e) {
sendErrorResponse(e);
}
}
} else if (type == 'P') {
Portal p = portals.get(name);
if (p == null) {
sendErrorResponse("Portal not found: " + name);
} else {
PreparedStatement prep = p.prep.prep;
try {
ResultSetMetaData meta = prep.getMetaData();
sendRowDescription(meta);
} catch (Exception e) {
sendErrorResponse(e);
}
}
} else {
server.trace("expected S or P, got " + type);
sendErrorResponse("expected S or P");
}
break;
}
case 'E': {
String name = readString();
server.trace("Execute");
Portal p = portals.get(name);
if (p == null) {
sendErrorResponse("Portal not found: " + name);
break;
}
int maxRows = readShort();
Prepared prepared = p.prep;
JdbcPreparedStatement prep = prepared.prep;
server.trace(prepared.sql);
try {
prep.setMaxRows(maxRows);
setActiveRequest(prep);
boolean result = prep.execute();
if (result) {
try {
ResultSet rs = prep.getResultSet();
// the meta-data is sent in the prior 'Describe'
while (rs.next()) {
sendDataRow(rs, p.resultColumnFormat);
}
sendCommandComplete(prep, 0);
} catch (Exception e) {
sendErrorResponse(e);
}
} else {
sendCommandComplete(prep, prep.getUpdateCount());
}
} catch (Exception e) {
if (prep.isCancelled()) {
sendCancelQueryResponse();
} else {
sendErrorResponse(e);
}
} finally {
setActiveRequest(null);
}
break;
}
case 'S': {
server.trace("Sync");
sendReadyForQuery();
break;
}
case 'Q': {
server.trace("Query");
String query = readString();
ScriptReader reader = new ScriptReader(new StringReader(query));
while (true) {
JdbcStatement stat = null;
try {
String s = reader.readStatement();
if (s == null) {
break;
}
s = getSQL(s);
stat = (JdbcStatement) conn.createStatement();
setActiveRequest(stat);
boolean result = stat.execute(s);
if (result) {
ResultSet rs = stat.getResultSet();
ResultSetMetaData meta = rs.getMetaData();
try {
sendRowDescription(meta);
while (rs.next()) {
sendDataRow(rs, null);
}
sendCommandComplete(stat, 0);
} catch (Exception e) {
sendErrorResponse(e);
break;
}
} else {
sendCommandComplete(stat, stat.getUpdateCount());
}
} catch (SQLException e) {
if (stat != null && stat.isCancelled()) {
sendCancelQueryResponse();
} else {
sendErrorResponse(e);
}
break;
} finally {
JdbcUtils.closeSilently(stat);
setActiveRequest(null);
}
}
sendReadyForQuery();
break;
}
case 'X': {
server.trace("Terminate");
close();
break;
}
default:
server.trace("Unsupported: " + x + " (" + (char) x + ")");
break;
}
}
private String getSQL(String s) {
String lower = StringUtils.toLowerEnglish(s);
if (lower.startsWith("show max_identifier_length")) {
s = "CALL 63";
} else if (lower.startsWith("set client_encoding to")) {
s = "set DATESTYLE ISO";
}
// s = StringUtils.replaceAll(s, "i.indkey[ia.attnum-1]", "0");
if (server.getTrace()) {
server.trace(s + ";");
}
return s;
}
private void sendCommandComplete(JdbcStatement stat, int updateCount)
throws IOException {
startMessage('C');
switch (stat.getLastExecutedCommandType()) {
case CommandInterface.INSERT:
writeStringPart("INSERT 0 ");
writeString(Integer.toString(updateCount));
break;
case CommandInterface.UPDATE:
writeStringPart("UPDATE ");
writeString(Integer.toString(updateCount));
break;
case CommandInterface.DELETE:
writeStringPart("DELETE ");
writeString(Integer.toString(updateCount));
break;
case CommandInterface.SELECT:
case CommandInterface.CALL:
writeString("SELECT");
break;
case CommandInterface.BEGIN:
writeString("BEGIN");
break;
default:
server.trace("check CommandComplete tag for command " + stat);
writeStringPart("UPDATE ");
writeString(Integer.toString(updateCount));
}
sendMessage();
}
private void sendDataRow(ResultSet rs, int[] formatCodes) throws Exception {
ResultSetMetaData metaData = rs.getMetaData();
int columns = metaData.getColumnCount();
startMessage('D');
writeShort(columns);
for (int i = 1; i <= columns; i++) {
int pgType = PgServer.convertType(metaData.getColumnType(i));
boolean text = formatAsText(pgType);
if (formatCodes != null) {
if (formatCodes.length == 0) {
text = true;
} else if (formatCodes.length == 1) {
text = formatCodes[0] == 0;
} else if (i - 1 < formatCodes.length) {
text = formatCodes[i - 1] == 0;
}
}
writeDataColumn(rs, i, pgType, text);
}
sendMessage();
}
private static long toPostgreDays(long dateValue) {
return DateTimeUtils.prolepticGregorianAbsoluteDayFromDateValue(dateValue) - 10_957;
}
private void writeDataColumn(ResultSet rs, int column, int pgType, boolean text)
throws Exception {
Value v = ((JdbcResultSet) rs).get(column);
if (v == ValueNull.INSTANCE) {
writeInt(-1);
return;
}
if (text) {
// plain text
switch (pgType) {
case PgServer.PG_TYPE_BOOL:
writeInt(1);
dataOut.writeByte(v.getBoolean() ? 't' : 'f');
break;
default:
byte[] data = v.getString().getBytes(getEncoding());
writeInt(data.length);
write(data);
}
} else {
// binary
switch (pgType) {
case PgServer.PG_TYPE_INT2:
writeInt(2);
writeShort(v.getShort());
break;
case PgServer.PG_TYPE_INT4:
writeInt(4);
writeInt(v.getInt());
break;
case PgServer.PG_TYPE_INT8:
writeInt(8);
dataOut.writeLong(v.getLong());
break;
case PgServer.PG_TYPE_FLOAT4:
writeInt(4);
dataOut.writeFloat(v.getFloat());
break;
case PgServer.PG_TYPE_FLOAT8:
writeInt(8);
dataOut.writeDouble(v.getDouble());
break;
case PgServer.PG_TYPE_BYTEA: {
byte[] data = v.getBytesNoCopy();
writeInt(data.length);
write(data);
break;
}
case PgServer.PG_TYPE_DATE: {
ValueDate d = (ValueDate) v.convertTo(Value.DATE);
writeInt(4);
writeInt((int) (toPostgreDays(d.getDateValue())));
break;
}
case PgServer.PG_TYPE_TIME: {
ValueTime t = (ValueTime) v.convertTo(Value.TIME);
writeInt(8);
long m = t.getNanos();
if (INTEGER_DATE_TYPES) {
// long format
m /= 1_000;
} else {
// double format
m = Double.doubleToLongBits(m * 0.000_000_001);
}
dataOut.writeLong(m);
break;
}
case PgServer.PG_TYPE_TIMESTAMP_NO_TMZONE: {
ValueTimestamp t = (ValueTimestamp) v.convertTo(Value.TIMESTAMP);
writeInt(8);
long m = toPostgreDays(t.getDateValue()) * 86_400;
long nanos = t.getTimeNanos();
if (INTEGER_DATE_TYPES) {
// long format
m = m * 1_000_000 + nanos / 1_000;
} else {
// double format
m = Double.doubleToLongBits(m + nanos * 0.000_000_001);
}
dataOut.writeLong(m);
break;
}
default: throw new IllegalStateException("output binary format is undefined");
}
}
}
private Charset getEncoding() {
if ("UNICODE".equals(clientEncoding)) {
return StandardCharsets.UTF_8;
}
return Charset.forName(clientEncoding);
}
private void setParameter(PreparedStatement prep,
int pgType, int i, int[] formatCodes) throws SQLException, IOException {
boolean text = (i >= formatCodes.length) || (formatCodes[i] == 0);
int col = i + 1;
int paramLen = readInt();
if (paramLen == -1) {
prep.setNull(col, Types.NULL);
} else if (text) {
// plain text
byte[] data = Utils.newBytes(paramLen);
readFully(data);
String str = new String(data, getEncoding());
switch (pgType) {
case PgServer.PG_TYPE_DATE: {
// Strip timezone offset
int idx = str.indexOf(' ');
if (idx > 0) {
str = str.substring(0, idx);
}
break;
}
case PgServer.PG_TYPE_TIME: {
// Strip timezone offset
int idx = str.indexOf('+');
if (idx <= 0) {
idx = str.indexOf('-');
}
if (idx > 0) {
str = str.substring(0, idx);
}
break;
}
}
prep.setString(col, str);
} else {
// binary
switch (pgType) {
case PgServer.PG_TYPE_INT2:
checkParamLength(2, paramLen);
prep.setShort(col, readShort());
break;
case PgServer.PG_TYPE_INT4:
checkParamLength(4, paramLen);
prep.setInt(col, readInt());
break;
case PgServer.PG_TYPE_INT8:
checkParamLength(8, paramLen);
prep.setLong(col, dataIn.readLong());
break;
case PgServer.PG_TYPE_FLOAT4:
checkParamLength(4, paramLen);
prep.setFloat(col, dataIn.readFloat());
break;
case PgServer.PG_TYPE_FLOAT8:
checkParamLength(8, paramLen);
prep.setDouble(col, dataIn.readDouble());
break;
case PgServer.PG_TYPE_BYTEA:
byte[] d1 = Utils.newBytes(paramLen);
readFully(d1);
prep.setBytes(col, d1);
break;
default:
server.trace("Binary format for type: "+pgType+" is unsupported");
byte[] d2 = Utils.newBytes(paramLen);
readFully(d2);
prep.setString(col, new String(d2, getEncoding()));
}
}
}
private static void checkParamLength(int expected, int got) {
if (expected != got) {
throw DbException.getInvalidValueException("paramLen", got);
}
}
private void sendErrorResponse(Exception re) throws IOException {
SQLException e = DbException.toSQLException(re);
server.traceError(e);
startMessage('E');
write('S');
writeString("ERROR");
write('C');
writeString(e.getSQLState());
write('M');
writeString(e.getMessage());
write('D');
writeString(e.toString());
write(0);
sendMessage();
}
private void sendCancelQueryResponse() throws IOException {
server.trace("CancelSuccessResponse");
startMessage('E');
write('S');
writeString("ERROR");
write('C');
writeString("57014");
write('M');
writeString("canceling statement due to user request");
write(0);
sendMessage();
}
private void sendParameterDescription(ParameterMetaData meta,
int[] paramTypes) throws Exception {
int count = meta.getParameterCount();
startMessage('t');
writeShort(count);
for (int i = 0; i < count; i++) {
int type;
if (paramTypes != null && paramTypes[i] != 0) {
type = paramTypes[i];
} else {
type = PgServer.PG_TYPE_VARCHAR;
}
server.checkType(type);
writeInt(type);
}
sendMessage();
}
private void sendNoData() throws IOException {
startMessage('n');
sendMessage();
}
private void sendRowDescription(ResultSetMetaData meta) throws Exception {
if (meta == null) {
sendNoData();
} else {
int columns = meta.getColumnCount();
int[] types = new int[columns];
int[] precision = new int[columns];
String[] names = new String[columns];
for (int i = 0; i < columns; i++) {
String name = meta.getColumnName(i + 1);
names[i] = name;
int type = meta.getColumnType(i + 1);
int pgType = PgServer.convertType(type);
// the ODBC client needs the column pg_catalog.pg_index
// to be of type 'int2vector'
// if (name.equalsIgnoreCase("indkey") &&
// "pg_index".equalsIgnoreCase(
// meta.getTableName(i + 1))) {
// type = PgServer.PG_TYPE_INT2VECTOR;
// }
precision[i] = meta.getColumnDisplaySize(i + 1);
if (type != Types.NULL) {
server.checkType(pgType);
}
types[i] = pgType;
}
startMessage('T');
writeShort(columns);
for (int i = 0; i < columns; i++) {
writeString(StringUtils.toLowerEnglish(names[i]));
// object ID
writeInt(0);
// attribute number of the column
writeShort(0);
// data type
writeInt(types[i]);
// pg_type.typlen
writeShort(getTypeSize(types[i], precision[i]));
// pg_attribute.atttypmod
writeInt(-1);
// the format type: text = 0, binary = 1
writeShort(formatAsText(types[i]) ? 0 : 1);
}
sendMessage();
}
}
/**
* Check whether the given type should be formatted as text.
*
* @return true for binary
*/
private static boolean formatAsText(int pgType) {
switch (pgType) {
// TODO: add more types to send as binary once compatibility is
// confirmed
case PgServer.PG_TYPE_BYTEA:
return false;
}
return true;
}
private static int getTypeSize(int pgType, int precision) {
switch (pgType) {
case PgServer.PG_TYPE_BOOL:
return 1;
case PgServer.PG_TYPE_VARCHAR:
return Math.max(255, precision + 10);
default:
return precision + 4;
}
}
private void sendErrorResponse(String message) throws IOException {
server.trace("Exception: " + message);
startMessage('E');
write('S');
writeString("ERROR");
write('C');
// PROTOCOL VIOLATION
writeString("08P01");
write('M');
writeString(message);
sendMessage();
}
private void sendParseComplete() throws IOException {
startMessage('1');
sendMessage();
}
private void sendBindComplete() throws IOException {
startMessage('2');
sendMessage();
}
private void sendCloseComplete() throws IOException {
startMessage('3');
sendMessage();
}
private void initDb() throws SQLException {
Statement stat = null;
try {
synchronized (server) {
// better would be: set the database to exclusive mode
boolean tableFound;
try (ResultSet rs = conn.getMetaData().getTables(null, "PG_CATALOG", "PG_VERSION", null)) {
tableFound = rs.next();
}
stat = conn.createStatement();
if (!tableFound) {
installPgCatalog(stat);
}
try (ResultSet rs = stat.executeQuery("select * from pg_catalog.pg_version")) {
if (!rs.next() || rs.getInt(1) < 2) {
// installation incomplete, or old version
installPgCatalog(stat);
} else {
// version 2 or newer: check the read version
int versionRead = rs.getInt(2);
if (versionRead > 2) {
throw DbException.throwInternalError("Incompatible PG_VERSION");
}
}
}
}
stat.execute("set search_path = PUBLIC, pg_catalog");
HashSet<Integer> typeSet = server.getTypeSet();
if (typeSet.isEmpty()) {
try (ResultSet rs = stat.executeQuery("select oid from pg_catalog.pg_type")) {
while (rs.next()) {
typeSet.add(rs.getInt(1));
}
}
}
} finally {
JdbcUtils.closeSilently(stat);
}
}
private static void installPgCatalog(Statement stat) throws SQLException {
try (Reader r = new InputStreamReader(new ByteArrayInputStream(Utils
.getResource("/org/h2/server/pg/pg_catalog.sql")))) {
ScriptReader reader = new ScriptReader(r);
while (true) {
String sql = reader.readStatement();
if (sql == null) {
break;
}
stat.execute(sql);
}
reader.close();
} catch (IOException e) {
throw DbException.convertIOException(e, "Can not read pg_catalog resource");
}
}
/**
* Close this connection.
*/
void close() {
try {
stop = true;
JdbcUtils.closeSilently(conn);
if (socket != null) {
socket.close();
}
server.trace("Close");
} catch (Exception e) {
server.traceError(e);
}
conn = null;
socket = null;
server.remove(this);
}
private void sendAuthenticationCleartextPassword() throws IOException {
startMessage('R');
writeInt(3);
sendMessage();
}
private void sendAuthenticationOk() throws IOException {
startMessage('R');
writeInt(0);
sendMessage();
sendParameterStatus("client_encoding", clientEncoding);
sendParameterStatus("DateStyle", dateStyle);
sendParameterStatus("integer_datetimes", "off");
sendParameterStatus("is_superuser", "off");
sendParameterStatus("server_encoding", "SQL_ASCII");
sendParameterStatus("server_version", Constants.PG_VERSION);
sendParameterStatus("session_authorization", userName);
sendParameterStatus("standard_conforming_strings", "off");
// TODO PostgreSQL TimeZone
sendParameterStatus("TimeZone", "CET");
sendParameterStatus("integer_datetimes", INTEGER_DATE_TYPES ? "on" : "off");
sendBackendKeyData();
sendReadyForQuery();
}
private void sendReadyForQuery() throws IOException {
startMessage('Z');
char c;
try {
if (conn.getAutoCommit()) {
// idle
c = 'I';
} else {
// in a transaction block
c = 'T';
}
} catch (SQLException e) {
// failed transaction block
c = 'E';
}
write((byte) c);
sendMessage();
}
private void sendBackendKeyData() throws IOException {
startMessage('K');
writeInt(processId);
writeInt(secret);
sendMessage();
}
private void writeString(String s) throws IOException {
writeStringPart(s);
write(0);
}
private void writeStringPart(String s) throws IOException {
write(s.getBytes(getEncoding()));
}
private void writeInt(int i) throws IOException {
dataOut.writeInt(i);
}
private void writeShort(int i) throws IOException {
dataOut.writeShort(i);
}
private void write(byte[] data) throws IOException {
dataOut.write(data);
}
private void write(int b) throws IOException {
dataOut.write(b);
}
private void startMessage(int newMessageType) {
this.messageType = newMessageType;
outBuffer = new ByteArrayOutputStream();
dataOut = new DataOutputStream(outBuffer);
}
private void sendMessage() throws IOException {
dataOut.flush();
byte[] buff = outBuffer.toByteArray();
int len = buff.length;
dataOut = new DataOutputStream(out);
dataOut.write(messageType);
dataOut.writeInt(len + 4);
dataOut.write(buff);
dataOut.flush();
}
private void sendParameterStatus(String param, String value)
throws IOException {
startMessage('S');
writeString(param);
writeString(value);
sendMessage();
}
void setThread(Thread thread) {
this.thread = thread;
}
Thread getThread() {
return thread;
}
void setProcessId(int id) {
this.processId = id;
}
int getProcessId() {
return this.processId;
}
private synchronized void setActiveRequest(JdbcStatement statement) {
activeRequest = statement;
}
/**
* Kill a currently running query on this thread.
*/
private synchronized void cancelRequest() {
if (activeRequest != null) {
try {
activeRequest.cancel();
activeRequest = null;
} catch (SQLException e) {
throw DbException.convert(e);
}
}
}
/**
* Represents a PostgreSQL Prepared object.
*/
static class Prepared {
/**
* The object name.
*/
String name;
/**
* The SQL statement.
*/
String sql;
/**
* The prepared statement.
*/
JdbcPreparedStatement prep;
/**
* The list of parameter types (if set).
*/
int[] paramType;
}
/**
* Represents a PostgreSQL Portal object.
*/
static class Portal {
/**
* The portal name.
*/
String name;
/**
* The format used in the result set columns (if set).
*/
int[] resultColumnFormat;
/**
* The prepared object.
*/
Prepared prep;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/web/ConnectionInfo.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.web;
import org.h2.util.StringUtils;
/**
* The connection info object is a wrapper for database connection information
* such as the database URL, user name and password.
* This class is used by the H2 Console.
*/
public class ConnectionInfo implements Comparable<ConnectionInfo> {
/**
* The driver class name.
*/
public String driver;
/**
* The database URL.
*/
public String url;
/**
* The user name.
*/
public String user;
/**
* The connection display name.
*/
String name;
/**
* The last time this connection was used.
*/
int lastAccess;
ConnectionInfo() {
// nothing to do
}
public ConnectionInfo(String data) {
String[] array = StringUtils.arraySplit(data, '|', false);
name = get(array, 0);
driver = get(array, 1);
url = get(array, 2);
user = get(array, 3);
}
private static String get(String[] array, int i) {
return array != null && array.length > i ? array[i] : "";
}
String getString() {
return StringUtils.arrayCombine(new String[] { name, driver, url, user }, '|');
}
@Override
public int compareTo(ConnectionInfo o) {
return -Integer.compare(lastAccess, o.lastAccess);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/web/DbStarter.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.web;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.Statement;
import javax.servlet.ServletContext;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import org.h2.tools.Server;
import org.h2.util.StringUtils;
/**
* This class can be used to start the H2 TCP server (or other H2 servers, for
* example the PG server) inside a web application container such as Tomcat or
* Jetty. It can also open a database connection.
*/
public class DbStarter implements ServletContextListener {
private Connection conn;
private Server server;
@Override
public void contextInitialized(ServletContextEvent servletContextEvent) {
try {
org.h2.Driver.load();
// This will get the setting from a context-param in web.xml if
// defined:
ServletContext servletContext = servletContextEvent.getServletContext();
String url = getParameter(servletContext, "db.url", "jdbc:h2:~/test");
String user = getParameter(servletContext, "db.user", "sa");
String password = getParameter(servletContext, "db.password", "sa");
// Start the server if configured to do so
String serverParams = getParameter(servletContext, "db.tcpServer", null);
if (serverParams != null) {
String[] params = StringUtils.arraySplit(serverParams, ' ', true);
server = Server.createTcpServer(params);
server.start();
}
// To access the database in server mode, use the database URL:
// jdbc:h2:tcp://localhost/~/test
conn = DriverManager.getConnection(url, user, password);
servletContext.setAttribute("connection", conn);
} catch (Exception e) {
e.printStackTrace();
}
}
private static String getParameter(ServletContext servletContext,
String key, String defaultValue) {
String value = servletContext.getInitParameter(key);
return value == null ? defaultValue : value;
}
/**
* Get the connection.
*
* @return the connection
*/
public Connection getConnection() {
return conn;
}
@Override
public void contextDestroyed(ServletContextEvent servletContextEvent) {
try {
Statement stat = conn.createStatement();
stat.execute("SHUTDOWN");
stat.close();
} catch (Exception e) {
e.printStackTrace();
}
try {
conn.close();
} catch (Exception e) {
e.printStackTrace();
}
if (server != null) {
server.stop();
server = null;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/web/PageParser.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.web;
import java.text.ParseException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.h2.util.New;
/**
* A page parser can parse an HTML page and replace the tags there.
* This class is used by the H2 Console.
*/
public class PageParser {
private static final int TAB_WIDTH = 4;
private final String page;
private int pos;
private final Map<String, Object> settings;
private final int len;
private StringBuilder result;
private PageParser(String page, Map<String, Object> settings, int pos) {
this.page = page;
this.pos = pos;
this.len = page.length();
this.settings = settings;
result = new StringBuilder(len);
}
/**
* Replace the tags in the HTML page with the given settings.
*
* @param page the HTML page
* @param settings the settings
* @return the converted page
*/
public static String parse(String page, Map<String, Object> settings) {
PageParser block = new PageParser(page, settings, 0);
return block.replaceTags();
}
private void setError(int i) {
String s = page.substring(0, i) + "####BUG####" + page.substring(i);
s = PageParser.escapeHtml(s);
result = new StringBuilder();
result.append(s);
}
private String parseBlockUntil(String end) throws ParseException {
PageParser block = new PageParser(page, settings, pos);
block.parseAll();
if (!block.readIf(end)) {
throw new ParseException(page, block.pos);
}
pos = block.pos;
return block.result.toString();
}
private String replaceTags() {
try {
parseAll();
if (pos != len) {
setError(pos);
}
} catch (ParseException e) {
setError(pos);
}
return result.toString();
}
@SuppressWarnings("unchecked")
private void parseAll() throws ParseException {
StringBuilder buff = result;
String p = page;
int i = pos;
for (; i < len; i++) {
char c = p.charAt(i);
switch (c) {
case '<': {
if (p.charAt(i + 3) == ':' && p.charAt(i + 1) == '/') {
// end tag
pos = i;
return;
} else if (p.charAt(i + 2) == ':') {
pos = i;
if (readIf("<c:forEach")) {
String var = readParam("var");
String items = readParam("items");
read(">");
int start = pos;
List<Object> list = (List<Object>) get(items);
if (list == null) {
result.append("?items?");
list = New.arrayList();
}
if (list.isEmpty()) {
parseBlockUntil("</c:forEach>");
}
for (Object o : list) {
settings.put(var, o);
pos = start;
String block = parseBlockUntil("</c:forEach>");
result.append(block);
}
} else if (readIf("<c:if")) {
String test = readParam("test");
int eq = test.indexOf("=='");
if (eq < 0) {
setError(i);
return;
}
String val = test.substring(eq + 3, test.length() - 1);
test = test.substring(0, eq);
String value = (String) get(test);
read(">");
String block = parseBlockUntil("</c:if>");
pos--;
if (value.equals(val)) {
result.append(block);
}
} else {
setError(i);
return;
}
i = pos;
} else {
buff.append(c);
}
break;
}
case '$':
if (p.length() > i + 1 && p.charAt(i + 1) == '{') {
i += 2;
int j = p.indexOf('}', i);
if (j < 0) {
setError(i);
return;
}
String item = p.substring(i, j).trim();
i = j;
String s = (String) get(item);
replaceTags(s);
} else {
buff.append(c);
}
break;
default:
buff.append(c);
break;
}
}
pos = i;
}
@SuppressWarnings("unchecked")
private Object get(String item) {
int dot = item.indexOf('.');
if (dot >= 0) {
String sub = item.substring(dot + 1);
item = item.substring(0, dot);
HashMap<String, Object> map = (HashMap<String, Object>) settings.get(item);
if (map == null) {
return "?" + item + "?";
}
return map.get(sub);
}
return settings.get(item);
}
private void replaceTags(String s) {
if (s != null) {
result.append(PageParser.parse(s, settings));
}
}
private String readParam(String name) throws ParseException {
read(name);
read("=");
read("\"");
int start = pos;
while (page.charAt(pos) != '"') {
pos++;
}
int end = pos;
read("\"");
String s = page.substring(start, end);
return PageParser.parse(s, settings);
}
private void skipSpaces() {
while (page.charAt(pos) == ' ') {
pos++;
}
}
private void read(String s) throws ParseException {
if (!readIf(s)) {
throw new ParseException(s, pos);
}
}
private boolean readIf(String s) {
skipSpaces();
if (page.regionMatches(pos, s, 0, s.length())) {
pos += s.length();
skipSpaces();
return true;
}
return false;
}
/**
* Convert data to HTML, but don't convert newlines and multiple spaces.
*
* @param s the data
* @return the escaped html text
*/
static String escapeHtmlData(String s) {
return escapeHtml(s, false);
}
/**
* Convert data to HTML, including newlines and multiple spaces.
*
* @param s the data
* @return the escaped html text
*/
public static String escapeHtml(String s) {
return escapeHtml(s, true);
}
private static String escapeHtml(String s, boolean convertBreakAndSpace) {
if (s == null) {
return null;
}
if (convertBreakAndSpace) {
if (s.length() == 0) {
return " ";
}
}
StringBuilder buff = new StringBuilder(s.length());
boolean convertSpace = true;
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
if (c == ' ' || c == '\t') {
// convert tabs into spaces
for (int j = 0; j < (c == ' ' ? 1 : TAB_WIDTH); j++) {
if (convertSpace && convertBreakAndSpace) {
buff.append(" ");
} else {
buff.append(' ');
convertSpace = true;
}
}
continue;
}
convertSpace = false;
switch (c) {
case '$':
// so that ${ } in the text is interpreted correctly
buff.append("$");
break;
case '<':
buff.append("<");
break;
case '>':
buff.append(">");
break;
case '&':
buff.append("&");
break;
case '"':
buff.append(""");
break;
case '\'':
buff.append("'");
break;
case '\n':
if (convertBreakAndSpace) {
buff.append("<br />");
convertSpace = true;
} else {
buff.append(c);
}
break;
default:
if (c >= 128) {
buff.append("&#").append((int) c).append(';');
} else {
buff.append(c);
}
break;
}
}
return buff.toString();
}
/**
* Escape text as a the javascript string.
*
* @param s the text
* @return the javascript string
*/
static String escapeJavaScript(String s) {
if (s == null) {
return null;
}
if (s.length() == 0) {
return "";
}
StringBuilder buff = new StringBuilder(s.length());
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
switch (c) {
case '"':
buff.append("\\\"");
break;
case '\'':
buff.append("\\'");
break;
case '\\':
buff.append("\\\\");
break;
case '\n':
buff.append("\\n");
break;
case '\r':
buff.append("\\r");
break;
case '\t':
buff.append("\\t");
break;
default:
buff.append(c);
break;
}
}
return buff.toString();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/web/WebApp.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.web;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.io.StringReader;
import java.io.StringWriter;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.math.BigDecimal;
import java.nio.charset.StandardCharsets;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ParameterMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import org.h2.api.ErrorCode;
import org.h2.bnf.Bnf;
import org.h2.bnf.context.DbColumn;
import org.h2.bnf.context.DbContents;
import org.h2.bnf.context.DbSchema;
import org.h2.bnf.context.DbTableOrView;
import org.h2.engine.Constants;
import org.h2.engine.SysProperties;
import org.h2.jdbc.JdbcSQLException;
import org.h2.message.DbException;
import org.h2.security.SHA256;
import org.h2.tools.Backup;
import org.h2.tools.ChangeFileEncryption;
import org.h2.tools.ConvertTraceFile;
import org.h2.tools.CreateCluster;
import org.h2.tools.DeleteDbFiles;
import org.h2.tools.Recover;
import org.h2.tools.Restore;
import org.h2.tools.RunScript;
import org.h2.tools.Script;
import org.h2.tools.SimpleResultSet;
import org.h2.util.JdbcUtils;
import org.h2.util.New;
import org.h2.util.Profiler;
import org.h2.util.ScriptReader;
import org.h2.util.SortedProperties;
import org.h2.util.StatementBuilder;
import org.h2.util.StringUtils;
import org.h2.util.Tool;
import org.h2.util.Utils;
/**
* For each connection to a session, an object of this class is created.
* This class is used by the H2 Console.
*/
public class WebApp {
/**
* The web server.
*/
protected final WebServer server;
/**
* The session.
*/
protected WebSession session;
/**
* The session attributes
*/
protected Properties attributes;
/**
* The mime type of the current response.
*/
protected String mimeType;
/**
* Whether the response can be cached.
*/
protected boolean cache;
/**
* Whether to close the connection.
*/
protected boolean stop;
/**
* The language in the HTTP header.
*/
protected String headerLanguage;
private Profiler profiler;
WebApp(WebServer server) {
this.server = server;
}
/**
* Set the web session and attributes.
*
* @param session the session
* @param attributes the attributes
*/
void setSession(WebSession session, Properties attributes) {
this.session = session;
this.attributes = attributes;
}
/**
* Process an HTTP request.
*
* @param file the file that was requested
* @param hostAddr the host address
* @return the name of the file to return to the client
*/
String processRequest(String file, String hostAddr) {
int index = file.lastIndexOf('.');
String suffix;
if (index >= 0) {
suffix = file.substring(index + 1);
} else {
suffix = "";
}
if ("ico".equals(suffix)) {
mimeType = "image/x-icon";
cache = true;
} else if ("gif".equals(suffix)) {
mimeType = "image/gif";
cache = true;
} else if ("css".equals(suffix)) {
cache = true;
mimeType = "text/css";
} else if ("html".equals(suffix) ||
"do".equals(suffix) ||
"jsp".equals(suffix)) {
cache = false;
mimeType = "text/html";
if (session == null) {
session = server.createNewSession(hostAddr);
if (!"notAllowed.jsp".equals(file)) {
file = "index.do";
}
}
} else if ("js".equals(suffix)) {
cache = true;
mimeType = "text/javascript";
} else {
cache = true;
mimeType = "application/octet-stream";
}
trace("mimeType=" + mimeType);
trace(file);
if (file.endsWith(".do")) {
file = process(file);
}
return file;
}
private static String getComboBox(String[] elements, String selected) {
StringBuilder buff = new StringBuilder();
for (String value : elements) {
buff.append("<option value=\"").
append(PageParser.escapeHtmlData(value)).
append('\"');
if (value.equals(selected)) {
buff.append(" selected");
}
buff.append('>').
append(PageParser.escapeHtml(value)).
append("</option>");
}
return buff.toString();
}
private static String getComboBox(String[][] elements, String selected) {
StringBuilder buff = new StringBuilder();
for (String[] n : elements) {
buff.append("<option value=\"").
append(PageParser.escapeHtmlData(n[0])).
append('\"');
if (n[0].equals(selected)) {
buff.append(" selected");
}
buff.append('>').
append(PageParser.escapeHtml(n[1])).
append("</option>");
}
return buff.toString();
}
private String process(String file) {
trace("process " + file);
while (file.endsWith(".do")) {
if ("login.do".equals(file)) {
file = login();
} else if ("index.do".equals(file)) {
file = index();
} else if ("logout.do".equals(file)) {
file = logout();
} else if ("settingRemove.do".equals(file)) {
file = settingRemove();
} else if ("settingSave.do".equals(file)) {
file = settingSave();
} else if ("test.do".equals(file)) {
file = test();
} else if ("query.do".equals(file)) {
file = query();
} else if ("tables.do".equals(file)) {
file = tables();
} else if ("editResult.do".equals(file)) {
file = editResult();
} else if ("getHistory.do".equals(file)) {
file = getHistory();
} else if ("admin.do".equals(file)) {
file = admin();
} else if ("adminSave.do".equals(file)) {
file = adminSave();
} else if ("adminStartTranslate.do".equals(file)) {
file = adminStartTranslate();
} else if ("adminShutdown.do".equals(file)) {
file = adminShutdown();
} else if ("autoCompleteList.do".equals(file)) {
file = autoCompleteList();
} else if ("tools.do".equals(file)) {
file = tools();
} else {
file = "error.jsp";
}
}
trace("return " + file);
return file;
}
private String autoCompleteList() {
String query = (String) attributes.get("query");
boolean lowercase = false;
if (query.trim().length() > 0 &&
Character.isLowerCase(query.trim().charAt(0))) {
lowercase = true;
}
try {
String sql = query;
if (sql.endsWith(";")) {
sql += " ";
}
ScriptReader reader = new ScriptReader(new StringReader(sql));
reader.setSkipRemarks(true);
String lastSql = "";
while (true) {
String n = reader.readStatement();
if (n == null) {
break;
}
lastSql = n;
}
String result = "";
if (reader.isInsideRemark()) {
if (reader.isBlockRemark()) {
result = "1#(End Remark)# */\n" + result;
} else {
result = "1#(Newline)#\n" + result;
}
} else {
sql = lastSql;
while (sql.length() > 0 && sql.charAt(0) <= ' ') {
sql = sql.substring(1);
}
if (sql.trim().length() > 0 && Character.isLowerCase(sql.trim().charAt(0))) {
lowercase = true;
}
Bnf bnf = session.getBnf();
if (bnf == null) {
return "autoCompleteList.jsp";
}
HashMap<String, String> map = bnf.getNextTokenList(sql);
String space = "";
if (sql.length() > 0) {
char last = sql.charAt(sql.length() - 1);
if (!Character.isWhitespace(last) && (last != '.' &&
last >= ' ' && last != '\'' && last != '"')) {
space = " ";
}
}
ArrayList<String> list = new ArrayList<>(map.size());
for (Map.Entry<String, String> entry : map.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
String type = "" + key.charAt(0);
if (Integer.parseInt(type) > 2) {
continue;
}
key = key.substring(2);
if (Character.isLetter(key.charAt(0)) && lowercase) {
key = StringUtils.toLowerEnglish(key);
value = StringUtils.toLowerEnglish(value);
}
if (key.equals(value) && !".".equals(value)) {
value = space + value;
}
key = StringUtils.urlEncode(key);
key = key.replace('+', ' ');
value = StringUtils.urlEncode(value);
value = value.replace('+', ' ');
list.add(type + "#" + key + "#" + value);
}
Collections.sort(list);
if (query.endsWith("\n") || query.trim().endsWith(";")) {
list.add(0, "1#(Newline)#\n");
}
StatementBuilder buff = new StatementBuilder();
for (String s : list) {
buff.appendExceptFirst("|");
buff.append(s);
}
result = buff.toString();
}
session.put("autoCompleteList", result);
} catch (Throwable e) {
server.traceError(e);
}
return "autoCompleteList.jsp";
}
private String admin() {
session.put("port", "" + server.getPort());
session.put("allowOthers", "" + server.getAllowOthers());
session.put("ssl", String.valueOf(server.getSSL()));
session.put("sessions", server.getSessions());
return "admin.jsp";
}
private String adminSave() {
try {
Properties prop = new SortedProperties();
int port = Integer.decode((String) attributes.get("port"));
prop.setProperty("webPort", String.valueOf(port));
server.setPort(port);
boolean allowOthers = Utils.parseBoolean((String) attributes.get("allowOthers"), false, false);
prop.setProperty("webAllowOthers", String.valueOf(allowOthers));
server.setAllowOthers(allowOthers);
boolean ssl = Utils.parseBoolean((String) attributes.get("ssl"), false, false);
prop.setProperty("webSSL", String.valueOf(ssl));
server.setSSL(ssl);
server.saveProperties(prop);
} catch (Exception e) {
trace(e.toString());
}
return admin();
}
private String tools() {
try {
String toolName = (String) attributes.get("tool");
session.put("tool", toolName);
String args = (String) attributes.get("args");
String[] argList = StringUtils.arraySplit(args, ',', false);
Tool tool = null;
if ("Backup".equals(toolName)) {
tool = new Backup();
} else if ("Restore".equals(toolName)) {
tool = new Restore();
} else if ("Recover".equals(toolName)) {
tool = new Recover();
} else if ("DeleteDbFiles".equals(toolName)) {
tool = new DeleteDbFiles();
} else if ("ChangeFileEncryption".equals(toolName)) {
tool = new ChangeFileEncryption();
} else if ("Script".equals(toolName)) {
tool = new Script();
} else if ("RunScript".equals(toolName)) {
tool = new RunScript();
} else if ("ConvertTraceFile".equals(toolName)) {
tool = new ConvertTraceFile();
} else if ("CreateCluster".equals(toolName)) {
tool = new CreateCluster();
} else {
throw DbException.throwInternalError(toolName);
}
ByteArrayOutputStream outBuff = new ByteArrayOutputStream();
PrintStream out = new PrintStream(outBuff, false, "UTF-8");
tool.setOut(out);
try {
tool.runTool(argList);
out.flush();
String o = new String(outBuff.toByteArray(), StandardCharsets.UTF_8);
String result = PageParser.escapeHtml(o);
session.put("toolResult", result);
} catch (Exception e) {
session.put("toolResult", getStackTrace(0, e, true));
}
} catch (Exception e) {
server.traceError(e);
}
return "tools.jsp";
}
private String adminStartTranslate() {
Map<?, ?> p = Map.class.cast(session.map.get("text"));
@SuppressWarnings("unchecked")
Map<Object, Object> p2 = (Map<Object, Object>) p;
String file = server.startTranslate(p2);
session.put("translationFile", file);
return "helpTranslate.jsp";
}
/**
* Stop the application and the server.
*
* @return the page to display
*/
protected String adminShutdown() {
server.shutdown();
return "admin.jsp";
}
private String index() {
String[][] languageArray = WebServer.LANGUAGES;
String language = (String) attributes.get("language");
Locale locale = session.locale;
if (language != null) {
if (locale == null || !StringUtils.toLowerEnglish(
locale.getLanguage()).equals(language)) {
locale = new Locale(language, "");
server.readTranslations(session, locale.getLanguage());
session.put("language", language);
session.locale = locale;
}
} else {
language = (String) session.get("language");
}
if (language == null) {
// if the language is not yet known
// use the last header
language = headerLanguage;
}
session.put("languageCombo", getComboBox(languageArray, language));
String[] settingNames = server.getSettingNames();
String setting = attributes.getProperty("setting");
if (setting == null && settingNames.length > 0) {
setting = settingNames[0];
}
String combobox = getComboBox(settingNames, setting);
session.put("settingsList", combobox);
ConnectionInfo info = server.getSetting(setting);
if (info == null) {
info = new ConnectionInfo();
}
session.put("setting", PageParser.escapeHtmlData(setting));
session.put("name", PageParser.escapeHtmlData(setting));
session.put("driver", PageParser.escapeHtmlData(info.driver));
session.put("url", PageParser.escapeHtmlData(info.url));
session.put("user", PageParser.escapeHtmlData(info.user));
return "index.jsp";
}
private String getHistory() {
int id = Integer.parseInt(attributes.getProperty("id"));
String sql = session.getCommand(id);
session.put("query", PageParser.escapeHtmlData(sql));
return "query.jsp";
}
private static int addColumns(boolean mainSchema, DbTableOrView table,
StringBuilder buff, int treeIndex, boolean showColumnTypes,
StringBuilder columnsBuffer) {
DbColumn[] columns = table.getColumns();
for (int i = 0; columns != null && i < columns.length; i++) {
DbColumn column = columns[i];
if (columnsBuffer.length() > 0) {
columnsBuffer.append(' ');
}
columnsBuffer.append(column.getName());
String col = escapeIdentifier(column.getName());
String level = mainSchema ? ", 1, 1" : ", 2, 2";
buff.append("setNode(").append(treeIndex).append(level)
.append(", 'column', '")
.append(PageParser.escapeJavaScript(column.getName()))
.append("', 'javascript:ins(\\'").append(col).append("\\')');\n");
treeIndex++;
if (mainSchema && showColumnTypes) {
buff.append("setNode(").append(treeIndex)
.append(", 2, 2, 'type', '")
.append(PageParser.escapeJavaScript(column.getDataType()))
.append("', null);\n");
treeIndex++;
}
}
return treeIndex;
}
private static String escapeIdentifier(String name) {
return StringUtils.urlEncode(
PageParser.escapeJavaScript(name)).replace('+', ' ');
}
/**
* This class represents index information for the GUI.
*/
static class IndexInfo {
/**
* The index name.
*/
String name;
/**
* The index type name.
*/
String type;
/**
* The indexed columns.
*/
String columns;
}
private static int addIndexes(boolean mainSchema, DatabaseMetaData meta,
String table, String schema, StringBuilder buff, int treeIndex)
throws SQLException {
ResultSet rs;
try {
rs = meta.getIndexInfo(null, schema, table, false, true);
} catch (SQLException e) {
// SQLite
return treeIndex;
}
HashMap<String, IndexInfo> indexMap = new HashMap<>();
while (rs.next()) {
String name = rs.getString("INDEX_NAME");
IndexInfo info = indexMap.get(name);
if (info == null) {
int t = rs.getInt("TYPE");
String type;
if (t == DatabaseMetaData.tableIndexClustered) {
type = "";
} else if (t == DatabaseMetaData.tableIndexHashed) {
type = " (${text.tree.hashed})";
} else if (t == DatabaseMetaData.tableIndexOther) {
type = "";
} else {
type = null;
}
if (name != null && type != null) {
info = new IndexInfo();
info.name = name;
type = (rs.getBoolean("NON_UNIQUE") ?
"${text.tree.nonUnique}" : "${text.tree.unique}") + type;
info.type = type;
info.columns = rs.getString("COLUMN_NAME");
indexMap.put(name, info);
}
} else {
info.columns += ", " + rs.getString("COLUMN_NAME");
}
}
rs.close();
if (indexMap.size() > 0) {
String level = mainSchema ? ", 1, 1" : ", 2, 1";
String levelIndex = mainSchema ? ", 2, 1" : ", 3, 1";
String levelColumnType = mainSchema ? ", 3, 2" : ", 4, 2";
buff.append("setNode(").append(treeIndex).append(level)
.append(", 'index_az', '${text.tree.indexes}', null);\n");
treeIndex++;
for (IndexInfo info : indexMap.values()) {
buff.append("setNode(").append(treeIndex).append(levelIndex)
.append(", 'index', '")
.append(PageParser.escapeJavaScript(info.name))
.append("', null);\n");
treeIndex++;
buff.append("setNode(").append(treeIndex).append(levelColumnType)
.append(", 'type', '").append(info.type).append("', null);\n");
treeIndex++;
buff.append("setNode(").append(treeIndex).append(levelColumnType)
.append(", 'type', '")
.append(PageParser.escapeJavaScript(info.columns))
.append("', null);\n");
treeIndex++;
}
}
return treeIndex;
}
private int addTablesAndViews(DbSchema schema, boolean mainSchema,
StringBuilder buff, int treeIndex) throws SQLException {
if (schema == null) {
return treeIndex;
}
Connection conn = session.getConnection();
DatabaseMetaData meta = session.getMetaData();
int level = mainSchema ? 0 : 1;
boolean showColumns = mainSchema || !schema.isSystem;
String indentation = ", " + level + ", " + (showColumns ? "1" : "2") + ", ";
String indentNode = ", " + (level + 1) + ", 2, ";
DbTableOrView[] tables = schema.getTables();
if (tables == null) {
return treeIndex;
}
boolean isOracle = schema.getContents().isOracle();
boolean notManyTables = tables.length < SysProperties.CONSOLE_MAX_TABLES_LIST_INDEXES;
for (DbTableOrView table : tables) {
if (table.isView()) {
continue;
}
int tableId = treeIndex;
String tab = table.getQuotedName();
if (!mainSchema) {
tab = schema.quotedName + "." + tab;
}
tab = escapeIdentifier(tab);
buff.append("setNode(").append(treeIndex).append(indentation)
.append(" 'table', '")
.append(PageParser.escapeJavaScript(table.getName()))
.append("', 'javascript:ins(\\'").append(tab).append("\\',true)');\n");
treeIndex++;
if (mainSchema || showColumns) {
StringBuilder columnsBuffer = new StringBuilder();
treeIndex = addColumns(mainSchema, table, buff, treeIndex,
notManyTables, columnsBuffer);
if (!isOracle && notManyTables) {
treeIndex = addIndexes(mainSchema, meta, table.getName(),
schema.name, buff, treeIndex);
}
buff.append("addTable('")
.append(PageParser.escapeJavaScript(table.getName())).append("', '")
.append(PageParser.escapeJavaScript(columnsBuffer.toString())).append("', ")
.append(tableId).append(");\n");
}
}
tables = schema.getTables();
for (DbTableOrView view : tables) {
if (!view.isView()) {
continue;
}
int tableId = treeIndex;
String tab = view.getQuotedName();
if (!mainSchema) {
tab = view.getSchema().quotedName + "." + tab;
}
tab = escapeIdentifier(tab);
buff.append("setNode(").append(treeIndex).append(indentation)
.append(" 'view', '")
.append(PageParser.escapeJavaScript(view.getName()))
.append("', 'javascript:ins(\\'").append(tab).append("\\',true)');\n");
treeIndex++;
if (mainSchema) {
StringBuilder columnsBuffer = new StringBuilder();
treeIndex = addColumns(mainSchema, view, buff,
treeIndex, notManyTables, columnsBuffer);
if (schema.getContents().isH2()) {
try (PreparedStatement prep = conn.prepareStatement("SELECT * FROM " +
"INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=?")) {
prep.setString(1, view.getName());
ResultSet rs = prep.executeQuery();
if (rs.next()) {
String sql = rs.getString("SQL");
buff.append("setNode(").append(treeIndex)
.append(indentNode)
.append(" 'type', '")
.append(PageParser.escapeJavaScript(sql))
.append("', null);\n");
treeIndex++;
}
rs.close();
}
}
buff.append("addTable('")
.append(PageParser.escapeJavaScript(view.getName())).append("', '")
.append(PageParser.escapeJavaScript(columnsBuffer.toString())).append("', ")
.append(tableId).append(");\n");
}
}
return treeIndex;
}
private String tables() {
DbContents contents = session.getContents();
boolean isH2 = false;
try {
String url = (String) session.get("url");
Connection conn = session.getConnection();
contents.readContents(url, conn);
session.loadBnf();
isH2 = contents.isH2();
StringBuilder buff = new StringBuilder()
.append("setNode(0, 0, 0, 'database', '")
.append(PageParser.escapeJavaScript(url))
.append("', null);\n");
int treeIndex = 1;
DbSchema defaultSchema = contents.getDefaultSchema();
treeIndex = addTablesAndViews(defaultSchema, true, buff, treeIndex);
DbSchema[] schemas = contents.getSchemas();
for (DbSchema schema : schemas) {
if (schema == defaultSchema || schema == null) {
continue;
}
buff.append("setNode(").append(treeIndex).append(", 0, 1, 'folder', '")
.append(PageParser.escapeJavaScript(schema.name))
.append("', null);\n");
treeIndex++;
treeIndex = addTablesAndViews(schema, false, buff, treeIndex);
}
if (isH2) {
try (Statement stat = conn.createStatement()) {
ResultSet rs = stat.executeQuery("SELECT * FROM " +
"INFORMATION_SCHEMA.SEQUENCES ORDER BY SEQUENCE_NAME");
for (int i = 0; rs.next(); i++) {
if (i == 0) {
buff.append("setNode(").append(treeIndex)
.append(", 0, 1, 'sequences', '${text.tree.sequences}', null);\n");
treeIndex++;
}
String name = rs.getString("SEQUENCE_NAME");
String current = rs.getString("CURRENT_VALUE");
String increment = rs.getString("INCREMENT");
buff.append("setNode(").append(treeIndex)
.append(", 1, 1, 'sequence', '")
.append(PageParser.escapeJavaScript(name))
.append("', null);\n");
treeIndex++;
buff.append("setNode(").append(treeIndex)
.append(", 2, 2, 'type', '${text.tree.current}: ")
.append(PageParser.escapeJavaScript(current))
.append("', null);\n");
treeIndex++;
if (!"1".equals(increment)) {
buff.append("setNode(").append(treeIndex)
.append(", 2, 2, 'type', '${text.tree.increment}: ")
.append(PageParser.escapeJavaScript(increment))
.append("', null);\n");
treeIndex++;
}
}
rs.close();
rs = stat.executeQuery("SELECT * FROM " +
"INFORMATION_SCHEMA.USERS ORDER BY NAME");
for (int i = 0; rs.next(); i++) {
if (i == 0) {
buff.append("setNode(").append(treeIndex)
.append(", 0, 1, 'users', '${text.tree.users}', null);\n");
treeIndex++;
}
String name = rs.getString("NAME");
String admin = rs.getString("ADMIN");
buff.append("setNode(").append(treeIndex)
.append(", 1, 1, 'user', '")
.append(PageParser.escapeJavaScript(name))
.append("', null);\n");
treeIndex++;
if (admin.equalsIgnoreCase("TRUE")) {
buff.append("setNode(").append(treeIndex)
.append(", 2, 2, 'type', '${text.tree.admin}', null);\n");
treeIndex++;
}
}
rs.close();
}
}
DatabaseMetaData meta = session.getMetaData();
String version = meta.getDatabaseProductName() + " " +
meta.getDatabaseProductVersion();
buff.append("setNode(").append(treeIndex)
.append(", 0, 0, 'info', '")
.append(PageParser.escapeJavaScript(version))
.append("', null);\n")
.append("refreshQueryTables();");
session.put("tree", buff.toString());
} catch (Exception e) {
session.put("tree", "");
session.put("error", getStackTrace(0, e, isH2));
}
return "tables.jsp";
}
private String getStackTrace(int id, Throwable e, boolean isH2) {
try {
StringWriter writer = new StringWriter();
e.printStackTrace(new PrintWriter(writer));
String stackTrace = writer.toString();
stackTrace = PageParser.escapeHtml(stackTrace);
if (isH2) {
stackTrace = linkToSource(stackTrace);
}
stackTrace = StringUtils.replaceAll(stackTrace, "\t",
" ");
String message = PageParser.escapeHtml(e.getMessage());
String error = "<a class=\"error\" href=\"#\" " +
"onclick=\"var x=document.getElementById('st" + id +
"').style;x.display=x.display==''?'none':'';\">" + message +
"</a>";
if (e instanceof SQLException) {
SQLException se = (SQLException) e;
error += " " + se.getSQLState() + "/" + se.getErrorCode();
if (isH2) {
int code = se.getErrorCode();
error += " <a href=\"http://h2database.com/javadoc/" +
"org/h2/api/ErrorCode.html#c" + code +
"\">(${text.a.help})</a>";
}
}
error += "<span style=\"display: none;\" id=\"st" + id +
"\"><br />" + stackTrace + "</span>";
error = formatAsError(error);
return error;
} catch (OutOfMemoryError e2) {
server.traceError(e);
return e.toString();
}
}
private static String linkToSource(String s) {
try {
StringBuilder result = new StringBuilder(s.length());
int idx = s.indexOf("<br />");
result.append(s, 0, idx);
while (true) {
int start = s.indexOf("org.h2.", idx);
if (start < 0) {
result.append(s.substring(idx));
break;
}
result.append(s, idx, start);
int end = s.indexOf(')', start);
if (end < 0) {
result.append(s.substring(idx));
break;
}
String element = s.substring(start, end);
int open = element.lastIndexOf('(');
int dotMethod = element.lastIndexOf('.', open - 1);
int dotClass = element.lastIndexOf('.', dotMethod - 1);
String packageName = element.substring(0, dotClass);
int colon = element.lastIndexOf(':');
String file = element.substring(open + 1, colon);
String lineNumber = element.substring(colon + 1, element.length());
String fullFileName = packageName.replace('.', '/') + "/" + file;
result.append("<a href=\"http://h2database.com/html/source.html?file=");
result.append(fullFileName);
result.append("&line=");
result.append(lineNumber);
result.append("&build=");
result.append(Constants.BUILD_ID);
result.append("\">");
result.append(element);
result.append("</a>");
idx = end;
}
return result.toString();
} catch (Throwable t) {
return s;
}
}
private static String formatAsError(String s) {
return "<div class=\"error\">" + s + "</div>";
}
private String test() {
String driver = attributes.getProperty("driver", "");
String url = attributes.getProperty("url", "");
String user = attributes.getProperty("user", "");
String password = attributes.getProperty("password", "");
session.put("driver", driver);
session.put("url", url);
session.put("user", user);
boolean isH2 = url.startsWith("jdbc:h2:");
try {
long start = System.currentTimeMillis();
String profOpen = "", profClose = "";
Profiler prof = new Profiler();
prof.startCollecting();
Connection conn;
try {
conn = server.getConnection(driver, url, user, password);
} finally {
prof.stopCollecting();
profOpen = prof.getTop(3);
}
prof = new Profiler();
prof.startCollecting();
try {
JdbcUtils.closeSilently(conn);
} finally {
prof.stopCollecting();
profClose = prof.getTop(3);
}
long time = System.currentTimeMillis() - start;
String success;
if (time > 1000) {
success = "<a class=\"error\" href=\"#\" " +
"onclick=\"var x=document.getElementById('prof').style;x." +
"display=x.display==''?'none':'';\">" +
"${text.login.testSuccessful}</a>" +
"<span style=\"display: none;\" id=\"prof\"><br />" +
PageParser.escapeHtml(profOpen) +
"<br />" +
PageParser.escapeHtml(profClose) +
"</span>";
} else {
success = "<div class=\"success\">${text.login.testSuccessful}</div>";
}
session.put("error", success);
// session.put("error", "${text.login.testSuccessful}");
return "login.jsp";
} catch (Exception e) {
session.put("error", getLoginError(e, isH2));
return "login.jsp";
}
}
/**
* Get the formatted login error message.
*
* @param e the exception
* @param isH2 if the current database is a H2 database
* @return the formatted error message
*/
private String getLoginError(Exception e, boolean isH2) {
if (e instanceof JdbcSQLException &&
((JdbcSQLException) e).getErrorCode() == ErrorCode.CLASS_NOT_FOUND_1) {
return "${text.login.driverNotFound}<br />" + getStackTrace(0, e, isH2);
}
return getStackTrace(0, e, isH2);
}
private String login() {
String driver = attributes.getProperty("driver", "");
String url = attributes.getProperty("url", "");
String user = attributes.getProperty("user", "");
String password = attributes.getProperty("password", "");
session.put("autoCommit", "checked");
session.put("autoComplete", "1");
session.put("maxrows", "1000");
boolean isH2 = url.startsWith("jdbc:h2:");
try {
Connection conn = server.getConnection(driver, url, user, password);
session.setConnection(conn);
session.put("url", url);
session.put("user", user);
session.remove("error");
settingSave();
return "frame.jsp";
} catch (Exception e) {
session.put("error", getLoginError(e, isH2));
return "login.jsp";
}
}
private String logout() {
try {
Connection conn = session.getConnection();
session.setConnection(null);
session.remove("conn");
session.remove("result");
session.remove("tables");
session.remove("user");
session.remove("tool");
if (conn != null) {
if (session.getShutdownServerOnDisconnect()) {
server.shutdown();
} else {
conn.close();
}
}
} catch (Exception e) {
trace(e.toString());
}
return "index.do";
}
private String query() {
String sql = attributes.getProperty("sql").trim();
try {
ScriptReader r = new ScriptReader(new StringReader(sql));
final ArrayList<String> list = New.arrayList();
while (true) {
String s = r.readStatement();
if (s == null) {
break;
}
list.add(s);
}
final Connection conn = session.getConnection();
if (SysProperties.CONSOLE_STREAM && server.getAllowChunked()) {
String page = new String(server.getFile("result.jsp"), StandardCharsets.UTF_8);
int idx = page.indexOf("${result}");
// the first element of the list is the header, the last the
// footer
list.add(0, page.substring(0, idx));
list.add(page.substring(idx + "${result}".length()));
session.put("chunks", new Iterator<String>() {
private int i;
@Override
public boolean hasNext() {
return i < list.size();
}
@Override
public String next() {
String s = list.get(i++);
if (i == 1 || i == list.size()) {
return s;
}
StringBuilder b = new StringBuilder();
query(conn, s, i - 1, list.size() - 2, b);
return b.toString();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
});
return "result.jsp";
}
String result;
StringBuilder buff = new StringBuilder();
for (int i = 0; i < list.size(); i++) {
String s = list.get(i);
query(conn, s, i, list.size(), buff);
}
result = buff.toString();
session.put("result", result);
} catch (Throwable e) {
session.put("result", getStackTrace(0, e, session.getContents().isH2()));
}
return "result.jsp";
}
/**
* Execute a query and append the result to the buffer.
*
* @param conn the connection
* @param s the statement
* @param i the index
* @param size the number of statements
* @param buff the target buffer
*/
void query(Connection conn, String s, int i, int size, StringBuilder buff) {
if (!(s.startsWith("@") && s.endsWith("."))) {
buff.append(PageParser.escapeHtml(s + ";")).append("<br />");
}
boolean forceEdit = s.startsWith("@edit");
buff.append(getResult(conn, i + 1, s, size == 1, forceEdit)).
append("<br />");
}
private String editResult() {
ResultSet rs = session.result;
int row = Integer.parseInt(attributes.getProperty("row"));
int op = Integer.parseInt(attributes.getProperty("op"));
String result = "", error = "";
try {
if (op == 1) {
boolean insert = row < 0;
if (insert) {
rs.moveToInsertRow();
} else {
rs.absolute(row);
}
for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) {
String x = attributes.getProperty("r" + row + "c" + (i + 1));
unescapeData(x, rs, i + 1);
}
if (insert) {
rs.insertRow();
} else {
rs.updateRow();
}
} else if (op == 2) {
rs.absolute(row);
rs.deleteRow();
} else if (op == 3) {
// cancel
}
} catch (Throwable e) {
result = "<br />" + getStackTrace(0, e, session.getContents().isH2());
error = formatAsError(e.getMessage());
}
String sql = "@edit " + (String) session.get("resultSetSQL");
Connection conn = session.getConnection();
result = error + getResult(conn, -1, sql, true, true) + result;
session.put("result", result);
return "result.jsp";
}
private ResultSet getMetaResultSet(Connection conn, String sql)
throws SQLException {
DatabaseMetaData meta = conn.getMetaData();
if (isBuiltIn(sql, "@best_row_identifier")) {
String[] p = split(sql);
int scale = p[4] == null ? 0 : Integer.parseInt(p[4]);
boolean nullable = Boolean.parseBoolean(p[5]);
return meta.getBestRowIdentifier(p[1], p[2], p[3], scale, nullable);
} else if (isBuiltIn(sql, "@catalogs")) {
return meta.getCatalogs();
} else if (isBuiltIn(sql, "@columns")) {
String[] p = split(sql);
return meta.getColumns(p[1], p[2], p[3], p[4]);
} else if (isBuiltIn(sql, "@column_privileges")) {
String[] p = split(sql);
return meta.getColumnPrivileges(p[1], p[2], p[3], p[4]);
} else if (isBuiltIn(sql, "@cross_references")) {
String[] p = split(sql);
return meta.getCrossReference(p[1], p[2], p[3], p[4], p[5], p[6]);
} else if (isBuiltIn(sql, "@exported_keys")) {
String[] p = split(sql);
return meta.getExportedKeys(p[1], p[2], p[3]);
} else if (isBuiltIn(sql, "@imported_keys")) {
String[] p = split(sql);
return meta.getImportedKeys(p[1], p[2], p[3]);
} else if (isBuiltIn(sql, "@index_info")) {
String[] p = split(sql);
boolean unique = Boolean.parseBoolean(p[4]);
boolean approx = Boolean.parseBoolean(p[5]);
return meta.getIndexInfo(p[1], p[2], p[3], unique, approx);
} else if (isBuiltIn(sql, "@primary_keys")) {
String[] p = split(sql);
return meta.getPrimaryKeys(p[1], p[2], p[3]);
} else if (isBuiltIn(sql, "@procedures")) {
String[] p = split(sql);
return meta.getProcedures(p[1], p[2], p[3]);
} else if (isBuiltIn(sql, "@procedure_columns")) {
String[] p = split(sql);
return meta.getProcedureColumns(p[1], p[2], p[3], p[4]);
} else if (isBuiltIn(sql, "@schemas")) {
return meta.getSchemas();
} else if (isBuiltIn(sql, "@tables")) {
String[] p = split(sql);
String[] types = p[4] == null ? null : StringUtils.arraySplit(p[4], ',', false);
return meta.getTables(p[1], p[2], p[3], types);
} else if (isBuiltIn(sql, "@table_privileges")) {
String[] p = split(sql);
return meta.getTablePrivileges(p[1], p[2], p[3]);
} else if (isBuiltIn(sql, "@table_types")) {
return meta.getTableTypes();
} else if (isBuiltIn(sql, "@type_info")) {
return meta.getTypeInfo();
} else if (isBuiltIn(sql, "@udts")) {
String[] p = split(sql);
int[] types;
if (p[4] == null) {
types = null;
} else {
String[] t = StringUtils.arraySplit(p[4], ',', false);
types = new int[t.length];
for (int i = 0; i < t.length; i++) {
types[i] = Integer.parseInt(t[i]);
}
}
return meta.getUDTs(p[1], p[2], p[3], types);
} else if (isBuiltIn(sql, "@version_columns")) {
String[] p = split(sql);
return meta.getVersionColumns(p[1], p[2], p[3]);
} else if (isBuiltIn(sql, "@memory")) {
SimpleResultSet rs = new SimpleResultSet();
rs.addColumn("Type", Types.VARCHAR, 0, 0);
rs.addColumn("KB", Types.VARCHAR, 0, 0);
rs.addRow("Used Memory", "" + Utils.getMemoryUsed());
rs.addRow("Free Memory", "" + Utils.getMemoryFree());
return rs;
} else if (isBuiltIn(sql, "@info")) {
SimpleResultSet rs = new SimpleResultSet();
rs.addColumn("KEY", Types.VARCHAR, 0, 0);
rs.addColumn("VALUE", Types.VARCHAR, 0, 0);
rs.addRow("conn.getCatalog", conn.getCatalog());
rs.addRow("conn.getAutoCommit", "" + conn.getAutoCommit());
rs.addRow("conn.getTransactionIsolation", "" + conn.getTransactionIsolation());
rs.addRow("conn.getWarnings", "" + conn.getWarnings());
String map;
try {
map = "" + conn.getTypeMap();
} catch (SQLException e) {
map = e.toString();
}
rs.addRow("conn.getTypeMap", "" + map);
rs.addRow("conn.isReadOnly", "" + conn.isReadOnly());
rs.addRow("conn.getHoldability", "" + conn.getHoldability());
addDatabaseMetaData(rs, meta);
return rs;
} else if (isBuiltIn(sql, "@attributes")) {
String[] p = split(sql);
return meta.getAttributes(p[1], p[2], p[3], p[4]);
} else if (isBuiltIn(sql, "@super_tables")) {
String[] p = split(sql);
return meta.getSuperTables(p[1], p[2], p[3]);
} else if (isBuiltIn(sql, "@super_types")) {
String[] p = split(sql);
return meta.getSuperTypes(p[1], p[2], p[3]);
} else if (isBuiltIn(sql, "@prof_stop")) {
if (profiler != null) {
profiler.stopCollecting();
SimpleResultSet rs = new SimpleResultSet();
rs.addColumn("Top Stack Trace(s)", Types.VARCHAR, 0, 0);
rs.addRow(profiler.getTop(3));
profiler = null;
return rs;
}
}
return null;
}
private static void addDatabaseMetaData(SimpleResultSet rs,
DatabaseMetaData meta) {
Method[] methods = DatabaseMetaData.class.getDeclaredMethods();
Arrays.sort(methods, new Comparator<Method>() {
@Override
public int compare(Method o1, Method o2) {
return o1.toString().compareTo(o2.toString());
}
});
for (Method m : methods) {
if (m.getParameterTypes().length == 0) {
try {
Object o = m.invoke(meta);
rs.addRow("meta." + m.getName(), "" + o);
} catch (InvocationTargetException e) {
rs.addRow("meta." + m.getName(), e.getTargetException().toString());
} catch (Exception e) {
rs.addRow("meta." + m.getName(), e.toString());
}
}
}
}
private static String[] split(String s) {
String[] list = new String[10];
String[] t = StringUtils.arraySplit(s, ' ', true);
System.arraycopy(t, 0, list, 0, t.length);
for (int i = 0; i < list.length; i++) {
if ("null".equals(list[i])) {
list[i] = null;
}
}
return list;
}
private int getMaxrows() {
String r = (String) session.get("maxrows");
return r == null ? 0 : Integer.parseInt(r);
}
private String getResult(Connection conn, int id, String sql,
boolean allowEdit, boolean forceEdit) {
try {
sql = sql.trim();
StringBuilder buff = new StringBuilder();
String sqlUpper = StringUtils.toUpperEnglish(sql);
if (sqlUpper.contains("CREATE") ||
sqlUpper.contains("DROP") ||
sqlUpper.contains("ALTER") ||
sqlUpper.contains("RUNSCRIPT")) {
String sessionId = attributes.getProperty("jsessionid");
buff.append("<script type=\"text/javascript\">parent['h2menu'].location='tables.do?jsessionid=")
.append(sessionId).append("';</script>");
}
Statement stat;
DbContents contents = session.getContents();
if (forceEdit || (allowEdit && contents.isH2())) {
stat = conn.createStatement(
ResultSet.TYPE_SCROLL_INSENSITIVE,
ResultSet.CONCUR_UPDATABLE);
} else {
stat = conn.createStatement();
}
ResultSet rs;
long time = System.currentTimeMillis();
boolean metadata = false;
int generatedKeys = Statement.NO_GENERATED_KEYS;
boolean edit = false;
boolean list = false;
if (isBuiltIn(sql, "@autocommit_true")) {
conn.setAutoCommit(true);
return "${text.result.autoCommitOn}";
} else if (isBuiltIn(sql, "@autocommit_false")) {
conn.setAutoCommit(false);
return "${text.result.autoCommitOff}";
} else if (isBuiltIn(sql, "@cancel")) {
stat = session.executingStatement;
if (stat != null) {
stat.cancel();
buff.append("${text.result.statementWasCanceled}");
} else {
buff.append("${text.result.noRunningStatement}");
}
return buff.toString();
} else if (isBuiltIn(sql, "@edit")) {
edit = true;
sql = sql.substring("@edit".length()).trim();
session.put("resultSetSQL", sql);
}
if (isBuiltIn(sql, "@list")) {
list = true;
sql = sql.substring("@list".length()).trim();
}
if (isBuiltIn(sql, "@meta")) {
metadata = true;
sql = sql.substring("@meta".length()).trim();
}
if (isBuiltIn(sql, "@generated")) {
generatedKeys = Statement.RETURN_GENERATED_KEYS;
sql = sql.substring("@generated".length()).trim();
} else if (isBuiltIn(sql, "@history")) {
buff.append(getCommandHistoryString());
return buff.toString();
} else if (isBuiltIn(sql, "@loop")) {
sql = sql.substring("@loop".length()).trim();
int idx = sql.indexOf(' ');
int count = Integer.decode(sql.substring(0, idx));
sql = sql.substring(idx).trim();
return executeLoop(conn, count, sql);
} else if (isBuiltIn(sql, "@maxrows")) {
int maxrows = (int) Double.parseDouble(
sql.substring("@maxrows".length()).trim());
session.put("maxrows", "" + maxrows);
return "${text.result.maxrowsSet}";
} else if (isBuiltIn(sql, "@parameter_meta")) {
sql = sql.substring("@parameter_meta".length()).trim();
PreparedStatement prep = conn.prepareStatement(sql);
buff.append(getParameterResultSet(prep.getParameterMetaData()));
return buff.toString();
} else if (isBuiltIn(sql, "@password_hash")) {
sql = sql.substring("@password_hash".length()).trim();
String[] p = split(sql);
return StringUtils.convertBytesToHex(
SHA256.getKeyPasswordHash(p[0], p[1].toCharArray()));
} else if (isBuiltIn(sql, "@prof_start")) {
if (profiler != null) {
profiler.stopCollecting();
}
profiler = new Profiler();
profiler.startCollecting();
return "Ok";
} else if (isBuiltIn(sql, "@sleep")) {
String s = sql.substring("@sleep".length()).trim();
int sleep = 1;
if (s.length() > 0) {
sleep = Integer.parseInt(s);
}
Thread.sleep(sleep * 1000);
return "Ok";
} else if (isBuiltIn(sql, "@transaction_isolation")) {
String s = sql.substring("@transaction_isolation".length()).trim();
if (s.length() > 0) {
int level = Integer.parseInt(s);
conn.setTransactionIsolation(level);
}
buff.append("Transaction Isolation: ")
.append(conn.getTransactionIsolation())
.append("<br />");
buff.append(Connection.TRANSACTION_READ_UNCOMMITTED)
.append(": read_uncommitted<br />");
buff.append(Connection.TRANSACTION_READ_COMMITTED)
.append(": read_committed<br />");
buff.append(Connection.TRANSACTION_REPEATABLE_READ)
.append(": repeatable_read<br />");
buff.append(Connection.TRANSACTION_SERIALIZABLE)
.append(": serializable");
}
if (sql.startsWith("@")) {
rs = getMetaResultSet(conn, sql);
if (rs == null) {
buff.append("?: ").append(sql);
return buff.toString();
}
} else {
int maxrows = getMaxrows();
stat.setMaxRows(maxrows);
session.executingStatement = stat;
boolean isResultSet = stat.execute(sql, generatedKeys);
session.addCommand(sql);
if (generatedKeys == Statement.RETURN_GENERATED_KEYS) {
rs = null;
rs = stat.getGeneratedKeys();
} else {
if (!isResultSet) {
buff.append("${text.result.updateCount}: ")
.append(stat.getUpdateCount());
time = System.currentTimeMillis() - time;
buff.append("<br />(").append(time).append(" ms)");
stat.close();
return buff.toString();
}
rs = stat.getResultSet();
}
}
time = System.currentTimeMillis() - time;
buff.append(getResultSet(sql, rs, metadata, list, edit, time, allowEdit));
// SQLWarning warning = stat.getWarnings();
// if (warning != null) {
// buff.append("<br />Warning:<br />").
// append(getStackTrace(id, warning));
// }
if (!edit) {
stat.close();
}
return buff.toString();
} catch (Throwable e) {
// throwable: including OutOfMemoryError and so on
return getStackTrace(id, e, session.getContents().isH2());
} finally {
session.executingStatement = null;
}
}
private static boolean isBuiltIn(String sql, String builtIn) {
return StringUtils.startsWithIgnoreCase(sql, builtIn);
}
private String executeLoop(Connection conn, int count, String sql)
throws SQLException {
ArrayList<Integer> params = New.arrayList();
int idx = 0;
while (!stop) {
idx = sql.indexOf('?', idx);
if (idx < 0) {
break;
}
if (isBuiltIn(sql.substring(idx), "?/*rnd*/")) {
params.add(1);
sql = sql.substring(0, idx) + "?" + sql.substring(idx + "/*rnd*/".length() + 1);
} else {
params.add(0);
}
idx++;
}
boolean prepared;
Random random = new Random(1);
long time = System.currentTimeMillis();
if (isBuiltIn(sql, "@statement")) {
sql = sql.substring("@statement".length()).trim();
prepared = false;
Statement stat = conn.createStatement();
for (int i = 0; !stop && i < count; i++) {
String s = sql;
for (Integer type : params) {
idx = s.indexOf('?');
if (type.intValue() == 1) {
s = s.substring(0, idx) + random.nextInt(count) + s.substring(idx + 1);
} else {
s = s.substring(0, idx) + i + s.substring(idx + 1);
}
}
if (stat.execute(s)) {
ResultSet rs = stat.getResultSet();
while (!stop && rs.next()) {
// maybe get the data as well
}
rs.close();
}
}
} else {
prepared = true;
PreparedStatement prep = conn.prepareStatement(sql);
for (int i = 0; !stop && i < count; i++) {
for (int j = 0; j < params.size(); j++) {
Integer type = params.get(j);
if (type.intValue() == 1) {
prep.setInt(j + 1, random.nextInt(count));
} else {
prep.setInt(j + 1, i);
}
}
if (session.getContents().isSQLite()) {
// SQLite currently throws an exception on prep.execute()
prep.executeUpdate();
} else {
if (prep.execute()) {
ResultSet rs = prep.getResultSet();
while (!stop && rs.next()) {
// maybe get the data as well
}
rs.close();
}
}
}
}
time = System.currentTimeMillis() - time;
StatementBuilder buff = new StatementBuilder();
buff.append(time).append(" ms: ").append(count).append(" * ");
if (prepared) {
buff.append("(Prepared) ");
} else {
buff.append("(Statement) ");
}
buff.append('(');
for (int p : params) {
buff.appendExceptFirst(", ");
buff.append(p == 0 ? "i" : "rnd");
}
return buff.append(") ").append(sql).toString();
}
private String getCommandHistoryString() {
StringBuilder buff = new StringBuilder();
ArrayList<String> history = session.getCommandHistory();
buff.append("<table cellspacing=0 cellpadding=0>" +
"<tr><th></th><th>Command</th></tr>");
for (int i = history.size() - 1; i >= 0; i--) {
String sql = history.get(i);
buff.append("<tr><td><a href=\"getHistory.do?id=").
append(i).
append("&jsessionid=${sessionId}\" target=\"h2query\" >").
append("<img width=16 height=16 src=\"ico_write.gif\" " +
"onmouseover = \"this.className ='icon_hover'\" ").
append("onmouseout = \"this.className ='icon'\" " +
"class=\"icon\" alt=\"${text.resultEdit.edit}\" ").
append("title=\"${text.resultEdit.edit}\" border=\"1\"/></a>").
append("</td><td>").
append(PageParser.escapeHtml(sql)).
append("</td></tr>");
}
buff.append("</table>");
return buff.toString();
}
private static String getParameterResultSet(ParameterMetaData meta)
throws SQLException {
StringBuilder buff = new StringBuilder();
if (meta == null) {
return "No parameter meta data";
}
buff.append("<table cellspacing=0 cellpadding=0>").
append("<tr><th>className</th><th>mode</th><th>type</th>").
append("<th>typeName</th><th>precision</th><th>scale</th></tr>");
for (int i = 0; i < meta.getParameterCount(); i++) {
buff.append("</tr><td>").
append(meta.getParameterClassName(i + 1)).
append("</td><td>").
append(meta.getParameterMode(i + 1)).
append("</td><td>").
append(meta.getParameterType(i + 1)).
append("</td><td>").
append(meta.getParameterTypeName(i + 1)).
append("</td><td>").
append(meta.getPrecision(i + 1)).
append("</td><td>").
append(meta.getScale(i + 1)).
append("</td></tr>");
}
buff.append("</table>");
return buff.toString();
}
private String getResultSet(String sql, ResultSet rs, boolean metadata,
boolean list, boolean edit, long time, boolean allowEdit)
throws SQLException {
int maxrows = getMaxrows();
time = System.currentTimeMillis() - time;
StringBuilder buff = new StringBuilder();
if (edit) {
buff.append("<form id=\"editing\" name=\"editing\" method=\"post\" " +
"action=\"editResult.do?jsessionid=${sessionId}\" " +
"id=\"mainForm\" target=\"h2result\">" +
"<input type=\"hidden\" name=\"op\" value=\"1\" />" +
"<input type=\"hidden\" name=\"row\" value=\"\" />" +
"<table cellspacing=0 cellpadding=0 id=\"editTable\">");
} else {
buff.append("<table cellspacing=0 cellpadding=0>");
}
if (metadata) {
SimpleResultSet r = new SimpleResultSet();
r.addColumn("#", Types.INTEGER, 0, 0);
r.addColumn("label", Types.VARCHAR, 0, 0);
r.addColumn("catalog", Types.VARCHAR, 0, 0);
r.addColumn("schema", Types.VARCHAR, 0, 0);
r.addColumn("table", Types.VARCHAR, 0, 0);
r.addColumn("column", Types.VARCHAR, 0, 0);
r.addColumn("type", Types.INTEGER, 0, 0);
r.addColumn("typeName", Types.VARCHAR, 0, 0);
r.addColumn("class", Types.VARCHAR, 0, 0);
r.addColumn("precision", Types.INTEGER, 0, 0);
r.addColumn("scale", Types.INTEGER, 0, 0);
r.addColumn("displaySize", Types.INTEGER, 0, 0);
r.addColumn("autoIncrement", Types.BOOLEAN, 0, 0);
r.addColumn("caseSensitive", Types.BOOLEAN, 0, 0);
r.addColumn("currency", Types.BOOLEAN, 0, 0);
r.addColumn("nullable", Types.INTEGER, 0, 0);
r.addColumn("readOnly", Types.BOOLEAN, 0, 0);
r.addColumn("searchable", Types.BOOLEAN, 0, 0);
r.addColumn("signed", Types.BOOLEAN, 0, 0);
r.addColumn("writable", Types.BOOLEAN, 0, 0);
r.addColumn("definitelyWritable", Types.BOOLEAN, 0, 0);
ResultSetMetaData m = rs.getMetaData();
for (int i = 1; i <= m.getColumnCount(); i++) {
r.addRow(i,
m.getColumnLabel(i),
m.getCatalogName(i),
m.getSchemaName(i),
m.getTableName(i),
m.getColumnName(i),
m.getColumnType(i),
m.getColumnTypeName(i),
m.getColumnClassName(i),
m.getPrecision(i),
m.getScale(i),
m.getColumnDisplaySize(i),
m.isAutoIncrement(i),
m.isCaseSensitive(i),
m.isCurrency(i),
m.isNullable(i),
m.isReadOnly(i),
m.isSearchable(i),
m.isSigned(i),
m.isWritable(i),
m.isDefinitelyWritable(i));
}
rs = r;
}
ResultSetMetaData meta = rs.getMetaData();
int columns = meta.getColumnCount();
int rows = 0;
if (list) {
buff.append("<tr><th>Column</th><th>Data</th></tr><tr>");
while (rs.next()) {
if (maxrows > 0 && rows >= maxrows) {
break;
}
rows++;
buff.append("<tr><td>Row #</td><td>").
append(rows).append("</tr>");
for (int i = 0; i < columns; i++) {
buff.append("<tr><td>").
append(PageParser.escapeHtml(meta.getColumnLabel(i + 1))).
append("</td><td>").
append(escapeData(rs, i + 1)).
append("</td></tr>");
}
}
} else {
buff.append("<tr>");
if (edit) {
buff.append("<th>${text.resultEdit.action}</th>");
}
for (int i = 0; i < columns; i++) {
buff.append("<th>").
append(PageParser.escapeHtml(meta.getColumnLabel(i + 1))).
append("</th>");
}
buff.append("</tr>");
while (rs.next()) {
if (maxrows > 0 && rows >= maxrows) {
break;
}
rows++;
buff.append("<tr>");
if (edit) {
buff.append("<td>").
append("<img onclick=\"javascript:editRow(").
append(rs.getRow()).
append(",'${sessionId}', '${text.resultEdit.save}', " +
"'${text.resultEdit.cancel}'").
append(")\" width=16 height=16 src=\"ico_write.gif\" " +
"onmouseover = \"this.className ='icon_hover'\" " +
"onmouseout = \"this.className ='icon'\" " +
"class=\"icon\" alt=\"${text.resultEdit.edit}\" " +
"title=\"${text.resultEdit.edit}\" border=\"1\"/>").
append("<img onclick=\"javascript:deleteRow(").
append(rs.getRow()).
append(",'${sessionId}', '${text.resultEdit.delete}', " +
"'${text.resultEdit.cancel}'").
append(")\" width=16 height=16 src=\"ico_remove.gif\" " +
"onmouseover = \"this.className ='icon_hover'\" " +
"onmouseout = \"this.className ='icon'\" " +
"class=\"icon\" alt=\"${text.resultEdit.delete}\" " +
"title=\"${text.resultEdit.delete}\" border=\"1\" /></a>").
append("</td>");
}
for (int i = 0; i < columns; i++) {
buff.append("<td>").
append(escapeData(rs, i + 1)).
append("</td>");
}
buff.append("</tr>");
}
}
boolean isUpdatable = false;
try {
if (!session.getContents().isDB2()) {
isUpdatable = rs.getConcurrency() == ResultSet.CONCUR_UPDATABLE
&& rs.getType() != ResultSet.TYPE_FORWARD_ONLY;
}
} catch (NullPointerException e) {
// ignore
// workaround for a JDBC-ODBC bridge problem
}
if (edit) {
ResultSet old = session.result;
if (old != null) {
old.close();
}
session.result = rs;
} else {
rs.close();
}
if (edit) {
buff.append("<tr><td>").
append("<img onclick=\"javascript:editRow(-1, " +
"'${sessionId}', '${text.resultEdit.save}', '${text.resultEdit.cancel}'").
append(")\" width=16 height=16 src=\"ico_add.gif\" " +
"onmouseover = \"this.className ='icon_hover'\" " +
"onmouseout = \"this.className ='icon'\" " +
"class=\"icon\" alt=\"${text.resultEdit.add}\" " +
"title=\"${text.resultEdit.add}\" border=\"1\"/>").
append("</td>");
for (int i = 0; i < columns; i++) {
buff.append("<td></td>");
}
buff.append("</tr>");
}
buff.append("</table>");
if (edit) {
buff.append("</form>");
}
if (rows == 0) {
buff.append("(${text.result.noRows}");
} else if (rows == 1) {
buff.append("(${text.result.1row}");
} else {
buff.append('(').append(rows).append(" ${text.result.rows}");
}
buff.append(", ");
time = System.currentTimeMillis() - time;
buff.append(time).append(" ms)");
if (!edit && isUpdatable && allowEdit) {
buff.append("<br /><br />" +
"<form name=\"editResult\" method=\"post\" " +
"action=\"query.do?jsessionid=${sessionId}\" target=\"h2result\">" +
"<input type=\"submit\" class=\"button\" " +
"value=\"${text.resultEdit.editResult}\" />" +
"<input type=\"hidden\" name=\"sql\" value=\"@edit ").
append(PageParser.escapeHtmlData(sql)).
append("\" /></form>");
}
return buff.toString();
}
/**
* Save the current connection settings to the properties file.
*
* @return the file to open afterwards
*/
private String settingSave() {
ConnectionInfo info = new ConnectionInfo();
info.name = attributes.getProperty("name", "");
info.driver = attributes.getProperty("driver", "");
info.url = attributes.getProperty("url", "");
info.user = attributes.getProperty("user", "");
server.updateSetting(info);
attributes.put("setting", info.name);
server.saveProperties(null);
return "index.do";
}
private static String escapeData(ResultSet rs, int columnIndex)
throws SQLException {
String d = rs.getString(columnIndex);
if (d == null) {
return "<i>null</i>";
} else if (d.length() > 100_000) {
String s;
if (isBinary(rs.getMetaData().getColumnType(columnIndex))) {
s = PageParser.escapeHtml(d.substring(0, 6)) +
"... (" + (d.length() / 2) + " ${text.result.bytes})";
} else {
s = PageParser.escapeHtml(d.substring(0, 100)) +
"... (" + d.length() + " ${text.result.characters})";
}
return "<div style='display: none'>=+</div>" + s;
} else if (d.equals("null") || d.startsWith("= ") || d.startsWith("=+")) {
return "<div style='display: none'>= </div>" + PageParser.escapeHtml(d);
} else if (d.equals("")) {
// PageParser.escapeHtml replaces "" with a non-breaking space
return "";
}
return PageParser.escapeHtml(d);
}
private static boolean isBinary(int sqlType) {
switch (sqlType) {
case Types.BINARY:
case Types.BLOB:
case Types.JAVA_OBJECT:
case Types.LONGVARBINARY:
case Types.OTHER:
case Types.VARBINARY:
return true;
}
return false;
}
private void unescapeData(String x, ResultSet rs, int columnIndex)
throws SQLException {
if (x.equals("null")) {
rs.updateNull(columnIndex);
return;
} else if (x.startsWith("=+")) {
// don't update
return;
} else if (x.equals("=*")) {
// set an appropriate default value
int type = rs.getMetaData().getColumnType(columnIndex);
switch (type) {
case Types.TIME:
rs.updateString(columnIndex, "12:00:00");
break;
case Types.TIMESTAMP:
case Types.DATE:
rs.updateString(columnIndex, "2001-01-01");
break;
default:
rs.updateString(columnIndex, "1");
break;
}
return;
} else if (x.startsWith("= ")) {
x = x.substring(2);
}
ResultSetMetaData meta = rs.getMetaData();
int type = meta.getColumnType(columnIndex);
if (session.getContents().isH2()) {
rs.updateString(columnIndex, x);
return;
}
switch (type) {
case Types.BIGINT:
rs.updateLong(columnIndex, Long.decode(x));
break;
case Types.DECIMAL:
rs.updateBigDecimal(columnIndex, new BigDecimal(x));
break;
case Types.DOUBLE:
case Types.FLOAT:
rs.updateDouble(columnIndex, Double.parseDouble(x));
break;
case Types.REAL:
rs.updateFloat(columnIndex, Float.parseFloat(x));
break;
case Types.INTEGER:
rs.updateInt(columnIndex, Integer.decode(x));
break;
case Types.TINYINT:
rs.updateShort(columnIndex, Short.decode(x));
break;
default:
rs.updateString(columnIndex, x);
}
}
private String settingRemove() {
String setting = attributes.getProperty("name", "");
server.removeSetting(setting);
ArrayList<ConnectionInfo> settings = server.getSettings();
if (!settings.isEmpty()) {
attributes.put("setting", settings.get(0));
}
server.saveProperties(null);
return "index.do";
}
/**
* Get the current mime type.
*
* @return the mime type
*/
String getMimeType() {
return mimeType;
}
boolean getCache() {
return cache;
}
WebSession getSession() {
return session;
}
private void trace(String s) {
server.trace(s);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/web/WebServer.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.web;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.ServerSocket;
import java.net.Socket;
import java.nio.charset.StandardCharsets;
import java.sql.Connection;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import org.h2.engine.Constants;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.server.Service;
import org.h2.server.ShutdownHandler;
import org.h2.store.fs.FileUtils;
import org.h2.util.DateTimeUtils;
import org.h2.util.JdbcUtils;
import org.h2.util.MathUtils;
import org.h2.util.NetUtils;
import org.h2.util.New;
import org.h2.util.SortedProperties;
import org.h2.util.StringUtils;
import org.h2.util.Tool;
import org.h2.util.Utils;
/**
* The web server is a simple standalone HTTP server that implements the H2
* Console application. It is not optimized for performance.
*/
public class WebServer implements Service {
static final String[][] LANGUAGES = {
{ "cs", "\u010ce\u0161tina" },
{ "de", "Deutsch" },
{ "en", "English" },
{ "es", "Espa\u00f1ol" },
{ "fr", "Fran\u00e7ais" },
{ "hu", "Magyar"},
{ "ko", "\ud55c\uad6d\uc5b4"},
{ "in", "Indonesia"},
{ "it", "Italiano"},
{ "ja", "\u65e5\u672c\u8a9e"},
{ "nl", "Nederlands"},
{ "pl", "Polski"},
{ "pt_BR", "Portugu\u00eas (Brasil)"},
{ "pt_PT", "Portugu\u00eas (Europeu)"},
{ "ru", "\u0440\u0443\u0441\u0441\u043a\u0438\u0439"},
{ "sk", "Slovensky"},
{ "tr", "T\u00fcrk\u00e7e"},
{ "uk", "\u0423\u043A\u0440\u0430\u0457\u043D\u0441\u044C\u043A\u0430"},
{ "zh_CN", "\u4e2d\u6587 (\u7b80\u4f53)"},
{ "zh_TW", "\u4e2d\u6587 (\u7e41\u9ad4)"},
};
private static final String COMMAND_HISTORY = "commandHistory";
private static final String DEFAULT_LANGUAGE = "en";
private static final String[] GENERIC = {
"Generic JNDI Data Source|javax.naming.InitialContext|" +
"java:comp/env/jdbc/Test|sa",
"Generic Teradata|com.teradata.jdbc.TeraDriver|" +
"jdbc:teradata://whomooz/|",
"Generic Snowflake|com.snowflake.client.jdbc.SnowflakeDriver|" +
"jdbc:snowflake://accountName.snowflakecomputing.com|",
"Generic Redshift|com.amazon.redshift.jdbc42.Driver|" +
"jdbc:redshift://endpoint:5439/database|",
"Generic Impala|org.cloudera.impala.jdbc41.Driver|" +
"jdbc:impala://clustername:21050/default|",
"Generic Hive 2|org.apache.hive.jdbc.HiveDriver|" +
"jdbc:hive2://clustername:10000/default|",
"Generic Hive|org.apache.hadoop.hive.jdbc.HiveDriver|" +
"jdbc:hive://clustername:10000/default|",
"Generic Azure SQL|com.microsoft.sqlserver.jdbc.SQLServerDriver|" +
"jdbc:sqlserver://name.database.windows.net:1433|",
"Generic Firebird Server|org.firebirdsql.jdbc.FBDriver|" +
"jdbc:firebirdsql:localhost:c:/temp/firebird/test|sysdba",
"Generic SQLite|org.sqlite.JDBC|" +
"jdbc:sqlite:test|sa",
"Generic DB2|com.ibm.db2.jcc.DB2Driver|" +
"jdbc:db2://localhost/test|" ,
"Generic Oracle|oracle.jdbc.driver.OracleDriver|" +
"jdbc:oracle:thin:@localhost:1521:XE|sa" ,
"Generic MS SQL Server 2000|com.microsoft.jdbc.sqlserver.SQLServerDriver|" +
"jdbc:microsoft:sqlserver://localhost:1433;DatabaseName=sqlexpress|sa",
"Generic MS SQL Server 2005|com.microsoft.sqlserver.jdbc.SQLServerDriver|" +
"jdbc:sqlserver://localhost;DatabaseName=test|sa",
"Generic PostgreSQL|org.postgresql.Driver|" +
"jdbc:postgresql:test|" ,
"Generic MySQL|com.mysql.jdbc.Driver|" +
"jdbc:mysql://localhost:3306/test|" ,
"Generic HSQLDB|org.hsqldb.jdbcDriver|" +
"jdbc:hsqldb:test;hsqldb.default_table_type=cached|sa" ,
"Generic Derby (Server)|org.apache.derby.jdbc.ClientDriver|" +
"jdbc:derby://localhost:1527/test;create=true|sa",
"Generic Derby (Embedded)|org.apache.derby.jdbc.EmbeddedDriver|" +
"jdbc:derby:test;create=true|sa",
"Generic H2 (Server)|org.h2.Driver|" +
"jdbc:h2:tcp://localhost/~/test|sa",
// this will be listed on top for new installations
"Generic H2 (Embedded)|org.h2.Driver|" +
"jdbc:h2:~/test|sa",
};
private static int ticker;
/**
* The session timeout (the default is 30 minutes).
*/
private static final long SESSION_TIMEOUT = SysProperties.CONSOLE_TIMEOUT;
// public static void main(String... args) throws IOException {
// String s = IOUtils.readStringAndClose(new java.io.FileReader(
// // "src/main/org/h2/server/web/res/_text_cs.prop"), -1);
// "src/main/org/h2/res/_messages_cs.prop"), -1);
// System.out.println(StringUtils.javaEncode("..."));
// String[] list = Locale.getISOLanguages();
// for (int i = 0; i < list.length; i++) {
// System.out.print(list[i] + " ");
// }
// System.out.println();
// String l = "de";
// String lang = new java.util.Locale(l).
// getDisplayLanguage(new java.util.Locale(l));
// System.out.println(new java.util.Locale(l).getDisplayLanguage());
// System.out.println(lang);
// java.util.Locale.CHINESE.getDisplayLanguage(java.util.Locale.CHINESE);
// for (int i = 0; i < lang.length(); i++) {
// System.out.println(Integer.toHexString(lang.charAt(i)) + " ");
// }
// }
// private URLClassLoader urlClassLoader;
private int port;
private boolean allowOthers;
private boolean isDaemon;
private final Set<WebThread> running =
Collections.synchronizedSet(new HashSet<WebThread>());
private boolean ssl;
private final HashMap<String, ConnectionInfo> connInfoMap = new HashMap<>();
private long lastTimeoutCheck;
private final HashMap<String, WebSession> sessions = new HashMap<>();
private final HashSet<String> languages = new HashSet<>();
private String startDateTime;
private ServerSocket serverSocket;
private String url;
private ShutdownHandler shutdownHandler;
private Thread listenerThread;
private boolean ifExists;
private boolean trace;
private TranslateThread translateThread;
private boolean allowChunked = true;
private String serverPropertiesDir = Constants.SERVER_PROPERTIES_DIR;
// null means the history is not allowed to be stored
private String commandHistoryString;
/**
* Read the given file from the file system or from the resources.
*
* @param file the file name
* @return the data
*/
byte[] getFile(String file) throws IOException {
trace("getFile <" + file + ">");
byte[] data = Utils.getResource("/org/h2/server/web/res/" + file);
if (data == null) {
trace(" null");
} else {
trace(" size=" + data.length);
}
return data;
}
/**
* Remove this web thread from the set of running threads.
*
* @param t the thread to remove
*/
synchronized void remove(WebThread t) {
running.remove(t);
}
private static String generateSessionId() {
byte[] buff = MathUtils.secureRandomBytes(16);
return StringUtils.convertBytesToHex(buff);
}
/**
* Get the web session object for the given session id.
*
* @param sessionId the session id
* @return the web session or null
*/
WebSession getSession(String sessionId) {
long now = System.currentTimeMillis();
if (lastTimeoutCheck + SESSION_TIMEOUT < now) {
for (String id : new ArrayList<>(sessions.keySet())) {
WebSession session = sessions.get(id);
if (session.lastAccess + SESSION_TIMEOUT < now) {
trace("timeout for " + id);
sessions.remove(id);
}
}
lastTimeoutCheck = now;
}
WebSession session = sessions.get(sessionId);
if (session != null) {
session.lastAccess = System.currentTimeMillis();
}
return session;
}
/**
* Create a new web session id and object.
*
* @param hostAddr the host address
* @return the web session object
*/
WebSession createNewSession(String hostAddr) {
String newId;
do {
newId = generateSessionId();
} while (sessions.get(newId) != null);
WebSession session = new WebSession(this);
session.lastAccess = System.currentTimeMillis();
session.put("sessionId", newId);
session.put("ip", hostAddr);
session.put("language", DEFAULT_LANGUAGE);
session.put("frame-border", "0");
session.put("frameset-border", "4");
sessions.put(newId, session);
// always read the english translation,
// so that untranslated text appears at least in english
readTranslations(session, DEFAULT_LANGUAGE);
return getSession(newId);
}
String getStartDateTime() {
if (startDateTime == null) {
SimpleDateFormat format = new SimpleDateFormat(
"EEE, d MMM yyyy HH:mm:ss z", new Locale("en", ""));
format.setTimeZone(DateTimeUtils.UTC);
startDateTime = format.format(System.currentTimeMillis());
}
return startDateTime;
}
@Override
public void init(String... args) {
// set the serverPropertiesDir, because it's used in loadProperties()
for (int i = 0; args != null && i < args.length; i++) {
if ("-properties".equals(args[i])) {
serverPropertiesDir = args[++i];
}
}
Properties prop = loadProperties();
port = SortedProperties.getIntProperty(prop,
"webPort", Constants.DEFAULT_HTTP_PORT);
ssl = SortedProperties.getBooleanProperty(prop,
"webSSL", false);
allowOthers = SortedProperties.getBooleanProperty(prop,
"webAllowOthers", false);
commandHistoryString = prop.getProperty(COMMAND_HISTORY);
for (int i = 0; args != null && i < args.length; i++) {
String a = args[i];
if (Tool.isOption(a, "-webPort")) {
port = Integer.decode(args[++i]);
} else if (Tool.isOption(a, "-webSSL")) {
ssl = true;
} else if (Tool.isOption(a, "-webAllowOthers")) {
allowOthers = true;
} else if (Tool.isOption(a, "-webDaemon")) {
isDaemon = true;
} else if (Tool.isOption(a, "-baseDir")) {
String baseDir = args[++i];
SysProperties.setBaseDir(baseDir);
} else if (Tool.isOption(a, "-ifExists")) {
ifExists = true;
} else if (Tool.isOption(a, "-properties")) {
// already set
i++;
} else if (Tool.isOption(a, "-trace")) {
trace = true;
}
}
// if (driverList != null) {
// try {
// String[] drivers =
// StringUtils.arraySplit(driverList, ',', false);
// URL[] urls = new URL[drivers.length];
// for(int i=0; i<drivers.length; i++) {
// urls[i] = new URL(drivers[i]);
// }
// urlClassLoader = URLClassLoader.newInstance(urls);
// } catch (MalformedURLException e) {
// TraceSystem.traceThrowable(e);
// }
// }
for (String[] lang : LANGUAGES) {
languages.add(lang[0]);
}
updateURL();
}
@Override
public String getURL() {
updateURL();
return url;
}
private void updateURL() {
try {
url = (ssl ? "https" : "http") + "://" +
NetUtils.getLocalAddress() + ":" + port;
} catch (NoClassDefFoundError e) {
// Google App Engine does not allow java.net.InetAddress
}
}
@Override
public void start() {
serverSocket = NetUtils.createServerSocket(port, ssl);
port = serverSocket.getLocalPort();
updateURL();
}
@Override
public void listen() {
this.listenerThread = Thread.currentThread();
try {
while (serverSocket != null) {
Socket s = serverSocket.accept();
WebThread c = new WebThread(s, this);
running.add(c);
c.start();
}
} catch (Exception e) {
trace(e.toString());
}
}
@Override
public boolean isRunning(boolean traceError) {
if (serverSocket == null) {
return false;
}
try {
Socket s = NetUtils.createLoopbackSocket(port, ssl);
s.close();
return true;
} catch (Exception e) {
if (traceError) {
traceError(e);
}
return false;
}
}
public boolean isStopped() {
return serverSocket == null;
}
@Override
public void stop() {
if (serverSocket != null) {
try {
serverSocket.close();
} catch (IOException e) {
traceError(e);
}
serverSocket = null;
}
if (listenerThread != null) {
try {
listenerThread.join(1000);
} catch (InterruptedException e) {
DbException.traceThrowable(e);
}
}
// TODO server: using a boolean 'now' argument? a timeout?
for (WebSession session : new ArrayList<>(sessions.values())) {
session.close();
}
for (WebThread c : new ArrayList<>(running)) {
try {
c.stopNow();
c.join(100);
} catch (Exception e) {
traceError(e);
}
}
}
/**
* Write trace information if trace is enabled.
*
* @param s the message to write
*/
void trace(String s) {
if (trace) {
System.out.println(s);
}
}
/**
* Write the stack trace if trace is enabled.
*
* @param e the exception
*/
void traceError(Throwable e) {
if (trace) {
e.printStackTrace();
}
}
/**
* Check if this language is supported / translated.
*
* @param language the language
* @return true if a translation is available
*/
boolean supportsLanguage(String language) {
return languages.contains(language);
}
/**
* Read the translation for this language and save them in the 'text'
* property of this session.
*
* @param session the session
* @param language the language
*/
void readTranslations(WebSession session, String language) {
Properties text = new Properties();
try {
trace("translation: "+language);
byte[] trans = getFile("_text_"+language+".prop");
trace(" "+new String(trans));
text = SortedProperties.fromLines(new String(trans, StandardCharsets.UTF_8));
// remove starting # (if not translated yet)
for (Entry<Object, Object> entry : text.entrySet()) {
String value = (String) entry.getValue();
if (value.startsWith("#")) {
entry.setValue(value.substring(1));
}
}
} catch (IOException e) {
DbException.traceThrowable(e);
}
session.put("text", new HashMap<>(text));
}
ArrayList<HashMap<String, Object>> getSessions() {
ArrayList<HashMap<String, Object>> list = New.arrayList();
for (WebSession s : sessions.values()) {
list.add(s.getInfo());
}
return list;
}
@Override
public String getType() {
return "Web Console";
}
@Override
public String getName() {
return "H2 Console Server";
}
void setAllowOthers(boolean b) {
allowOthers = b;
}
@Override
public boolean getAllowOthers() {
return allowOthers;
}
void setSSL(boolean b) {
ssl = b;
}
void setPort(int port) {
this.port = port;
}
boolean getSSL() {
return ssl;
}
@Override
public int getPort() {
return port;
}
public boolean isCommandHistoryAllowed() {
return commandHistoryString != null;
}
public void setCommandHistoryAllowed(boolean allowed) {
if (allowed) {
if (commandHistoryString == null) {
commandHistoryString = "";
}
} else {
commandHistoryString = null;
}
}
public ArrayList<String> getCommandHistoryList() {
ArrayList<String> result = New.arrayList();
if (commandHistoryString == null) {
return result;
}
// Split the commandHistoryString on non-escaped semicolons
// and unescape it.
StringBuilder sb = new StringBuilder();
for (int end = 0;; end++) {
if (end == commandHistoryString.length() ||
commandHistoryString.charAt(end) == ';') {
if (sb.length() > 0) {
result.add(sb.toString());
sb.delete(0, sb.length());
}
if (end == commandHistoryString.length()) {
break;
}
} else if (commandHistoryString.charAt(end) == '\\' &&
end < commandHistoryString.length() - 1) {
sb.append(commandHistoryString.charAt(++end));
} else {
sb.append(commandHistoryString.charAt(end));
}
}
return result;
}
/**
* Save the command history to the properties file.
*
* @param commandHistory the history
*/
public void saveCommandHistoryList(ArrayList<String> commandHistory) {
StringBuilder sb = new StringBuilder();
for (String s : commandHistory) {
if (sb.length() > 0) {
sb.append(';');
}
sb.append(s.replace("\\", "\\\\").replace(";", "\\;"));
}
commandHistoryString = sb.toString();
saveProperties(null);
}
/**
* Get the connection information for this setting.
*
* @param name the setting name
* @return the connection information
*/
ConnectionInfo getSetting(String name) {
return connInfoMap.get(name);
}
/**
* Update a connection information setting.
*
* @param info the connection information
*/
void updateSetting(ConnectionInfo info) {
connInfoMap.put(info.name, info);
info.lastAccess = ticker++;
}
/**
* Remove a connection information setting from the list
*
* @param name the setting to remove
*/
void removeSetting(String name) {
connInfoMap.remove(name);
}
private Properties loadProperties() {
try {
if ("null".equals(serverPropertiesDir)) {
return new Properties();
}
return SortedProperties.loadProperties(
serverPropertiesDir + "/" + Constants.SERVER_PROPERTIES_NAME);
} catch (Exception e) {
DbException.traceThrowable(e);
return new Properties();
}
}
/**
* Get the list of connection information setting names.
*
* @return the connection info names
*/
String[] getSettingNames() {
ArrayList<ConnectionInfo> list = getSettings();
String[] names = new String[list.size()];
for (int i = 0; i < list.size(); i++) {
names[i] = list.get(i).name;
}
return names;
}
/**
* Get the list of connection info objects.
*
* @return the list
*/
synchronized ArrayList<ConnectionInfo> getSettings() {
ArrayList<ConnectionInfo> settings = New.arrayList();
if (connInfoMap.size() == 0) {
Properties prop = loadProperties();
if (prop.size() == 0) {
for (String gen : GENERIC) {
ConnectionInfo info = new ConnectionInfo(gen);
settings.add(info);
updateSetting(info);
}
} else {
for (int i = 0;; i++) {
String data = prop.getProperty(String.valueOf(i));
if (data == null) {
break;
}
ConnectionInfo info = new ConnectionInfo(data);
settings.add(info);
updateSetting(info);
}
}
} else {
settings.addAll(connInfoMap.values());
}
Collections.sort(settings);
return settings;
}
/**
* Save the settings to the properties file.
*
* @param prop null or the properties webPort, webAllowOthers, and webSSL
*/
synchronized void saveProperties(Properties prop) {
try {
if (prop == null) {
Properties old = loadProperties();
prop = new SortedProperties();
prop.setProperty("webPort",
"" + SortedProperties.getIntProperty(old,
"webPort", port));
prop.setProperty("webAllowOthers",
"" + SortedProperties.getBooleanProperty(old,
"webAllowOthers", allowOthers));
prop.setProperty("webSSL",
"" + SortedProperties.getBooleanProperty(old,
"webSSL", ssl));
if (commandHistoryString != null) {
prop.setProperty(COMMAND_HISTORY, commandHistoryString);
}
}
ArrayList<ConnectionInfo> settings = getSettings();
int len = settings.size();
for (int i = 0; i < len; i++) {
ConnectionInfo info = settings.get(i);
if (info != null) {
prop.setProperty(String.valueOf(len - i - 1), info.getString());
}
}
if (!"null".equals(serverPropertiesDir)) {
OutputStream out = FileUtils.newOutputStream(
serverPropertiesDir + "/" + Constants.SERVER_PROPERTIES_NAME, false);
prop.store(out, "H2 Server Properties");
out.close();
}
} catch (Exception e) {
DbException.traceThrowable(e);
}
}
/**
* Open a database connection.
*
* @param driver the driver class name
* @param databaseUrl the database URL
* @param user the user name
* @param password the password
* @return the database connection
*/
Connection getConnection(String driver, String databaseUrl, String user,
String password) throws SQLException {
driver = driver.trim();
databaseUrl = databaseUrl.trim();
org.h2.Driver.load();
Properties p = new Properties();
p.setProperty("user", user.trim());
// do not trim the password, otherwise an
// encrypted H2 database with empty user password doesn't work
p.setProperty("password", password);
if (databaseUrl.startsWith("jdbc:h2:")) {
if (ifExists) {
databaseUrl += ";IFEXISTS=TRUE";
}
// PostgreSQL would throw a NullPointerException
// if it is loaded before the H2 driver
// because it can't deal with non-String objects in the connection
// Properties
return org.h2.Driver.load().connect(databaseUrl, p);
}
// try {
// Driver dr = (Driver) urlClassLoader.
// loadClass(driver).newInstance();
// return dr.connect(url, p);
// } catch(ClassNotFoundException e2) {
// throw e2;
// }
return JdbcUtils.getConnection(driver, databaseUrl, p);
}
/**
* Shut down the web server.
*/
void shutdown() {
if (shutdownHandler != null) {
shutdownHandler.shutdown();
}
}
public void setShutdownHandler(ShutdownHandler shutdownHandler) {
this.shutdownHandler = shutdownHandler;
}
/**
* Create a session with a given connection.
*
* @param conn the connection
* @return the URL of the web site to access this connection
*/
public String addSession(Connection conn) throws SQLException {
WebSession session = createNewSession("local");
session.setShutdownServerOnDisconnect();
session.setConnection(conn);
session.put("url", conn.getMetaData().getURL());
String s = (String) session.get("sessionId");
return url + "/frame.jsp?jsessionid=" + s;
}
/**
* The translate thread reads and writes the file translation.properties
* once a second.
*/
private class TranslateThread extends Thread {
private final File file = new File("translation.properties");
private final Map<Object, Object> translation;
private volatile boolean stopNow;
TranslateThread(Map<Object, Object> translation) {
this.translation = translation;
}
public String getFileName() {
return file.getAbsolutePath();
}
public void stopNow() {
this.stopNow = true;
try {
join();
} catch (InterruptedException e) {
// ignore
}
}
@Override
public void run() {
while (!stopNow) {
try {
SortedProperties sp = new SortedProperties();
if (file.exists()) {
InputStream in = FileUtils.newInputStream(file.getName());
sp.load(in);
translation.putAll(sp);
} else {
OutputStream out = FileUtils.newOutputStream(file.getName(), false);
sp.putAll(translation);
sp.store(out, "Translation");
}
Thread.sleep(1000);
} catch (Exception e) {
traceError(e);
}
}
}
}
/**
* Start the translation thread that reads the file once a second.
*
* @param translation the translation map
* @return the name of the file to translate
*/
String startTranslate(Map<Object, Object> translation) {
if (translateThread != null) {
translateThread.stopNow();
}
translateThread = new TranslateThread(translation);
translateThread.setDaemon(true);
translateThread.start();
return translateThread.getFileName();
}
@Override
public boolean isDaemon() {
return isDaemon;
}
void setAllowChunked(boolean allowChunked) {
this.allowChunked = allowChunked;
}
boolean getAllowChunked() {
return allowChunked;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/web/WebServlet.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.web;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Properties;
import javax.servlet.ServletConfig;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.h2.util.New;
/**
* This servlet lets the H2 Console be used in a standard servlet container
* such as Tomcat or Jetty.
*/
public class WebServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
private transient WebServer server;
@Override
public void init() {
ServletConfig config = getServletConfig();
Enumeration<?> en = config.getInitParameterNames();
ArrayList<String> list = New.arrayList();
while (en.hasMoreElements()) {
String name = en.nextElement().toString();
String value = config.getInitParameter(name);
if (!name.startsWith("-")) {
name = "-" + name;
}
list.add(name);
if (value.length() > 0) {
list.add(value);
}
}
String[] args = list.toArray(new String[0]);
server = new WebServer();
server.setAllowChunked(false);
server.init(args);
}
@Override
public void destroy() {
server.stop();
}
private boolean allow(HttpServletRequest req) {
if (server.getAllowOthers()) {
return true;
}
String addr = req.getRemoteAddr();
try {
InetAddress address = InetAddress.getByName(addr);
return address.isLoopbackAddress();
} catch (UnknownHostException e) {
return false;
} catch (NoClassDefFoundError e) {
// Google App Engine does not allow java.net.InetAddress
return false;
}
}
private String getAllowedFile(HttpServletRequest req, String requestedFile) {
if (!allow(req)) {
return "notAllowed.jsp";
}
if (requestedFile.length() == 0) {
return "index.do";
}
return requestedFile;
}
@Override
public void doGet(HttpServletRequest req, HttpServletResponse resp)
throws IOException {
req.setCharacterEncoding("utf-8");
String file = req.getPathInfo();
if (file == null) {
resp.sendRedirect(req.getRequestURI() + "/");
return;
} else if (file.startsWith("/")) {
file = file.substring(1);
}
file = getAllowedFile(req, file);
// extract the request attributes
Properties attributes = new Properties();
Enumeration<?> en = req.getAttributeNames();
while (en.hasMoreElements()) {
String name = en.nextElement().toString();
String value = req.getAttribute(name).toString();
attributes.put(name, value);
}
en = req.getParameterNames();
while (en.hasMoreElements()) {
String name = en.nextElement().toString();
String value = req.getParameter(name);
attributes.put(name, value);
}
WebSession session = null;
String sessionId = attributes.getProperty("jsessionid");
if (sessionId != null) {
session = server.getSession(sessionId);
}
WebApp app = new WebApp(server);
app.setSession(session, attributes);
String ifModifiedSince = req.getHeader("if-modified-since");
String hostAddr = req.getRemoteAddr();
file = app.processRequest(file, hostAddr);
session = app.getSession();
String mimeType = app.getMimeType();
boolean cache = app.getCache();
if (cache && server.getStartDateTime().equals(ifModifiedSince)) {
resp.setStatus(HttpServletResponse.SC_NOT_MODIFIED);
return;
}
byte[] bytes = server.getFile(file);
if (bytes == null) {
resp.sendError(HttpServletResponse.SC_NOT_FOUND);
bytes = ("File not found: " + file).getBytes(StandardCharsets.UTF_8);
} else {
if (session != null && file.endsWith(".jsp")) {
String page = new String(bytes, StandardCharsets.UTF_8);
page = PageParser.parse(page, session.map);
bytes = page.getBytes(StandardCharsets.UTF_8);
}
resp.setContentType(mimeType);
if (!cache) {
resp.setHeader("Cache-Control", "no-cache");
} else {
resp.setHeader("Cache-Control", "max-age=10");
resp.setHeader("Last-Modified", server.getStartDateTime());
}
}
if (bytes != null) {
ServletOutputStream out = resp.getOutputStream();
out.write(bytes);
}
}
@Override
public void doPost(HttpServletRequest req, HttpServletResponse resp)
throws IOException {
doGet(req, resp);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/web/WebSession.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.web;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Locale;
import org.h2.bnf.Bnf;
import org.h2.bnf.context.DbContents;
import org.h2.bnf.context.DbContextRule;
import org.h2.message.DbException;
/**
* The web session keeps all data of a user session.
* This class is used by the H2 Console.
*/
class WebSession {
private static final int MAX_HISTORY = 1000;
/**
* The last time this client sent a request.
*/
long lastAccess;
/**
* The session attribute map.
*/
final HashMap<String, Object> map = new HashMap<>();
/**
* The current locale.
*/
Locale locale;
/**
* The currently executing statement.
*/
Statement executingStatement;
/**
* The current updatable result set.
*/
ResultSet result;
private final WebServer server;
private final ArrayList<String> commandHistory;
private Connection conn;
private DatabaseMetaData meta;
private DbContents contents = new DbContents();
private Bnf bnf;
private boolean shutdownServerOnDisconnect;
WebSession(WebServer server) {
this.server = server;
// This must be stored in the session rather than in the server.
// Otherwise, one client could allow
// saving history for others (insecure).
this.commandHistory = server.getCommandHistoryList();
}
/**
* Put an attribute value in the map.
*
* @param key the key
* @param value the new value
*/
void put(String key, Object value) {
map.put(key, value);
}
/**
* Get the value for the given key.
*
* @param key the key
* @return the value
*/
Object get(String key) {
if ("sessions".equals(key)) {
return server.getSessions();
}
return map.get(key);
}
/**
* Remove a session attribute from the map.
*
* @param key the key
*/
void remove(String key) {
map.remove(key);
}
/**
* Get the BNF object.
*
* @return the BNF object
*/
Bnf getBnf() {
return bnf;
}
/**
* Load the SQL grammar BNF.
*/
void loadBnf() {
try {
Bnf newBnf = Bnf.getInstance(null);
DbContextRule columnRule =
new DbContextRule(contents, DbContextRule.COLUMN);
DbContextRule newAliasRule =
new DbContextRule(contents, DbContextRule.NEW_TABLE_ALIAS);
DbContextRule aliasRule =
new DbContextRule(contents, DbContextRule.TABLE_ALIAS);
DbContextRule tableRule =
new DbContextRule(contents, DbContextRule.TABLE);
DbContextRule schemaRule =
new DbContextRule(contents, DbContextRule.SCHEMA);
DbContextRule columnAliasRule =
new DbContextRule(contents, DbContextRule.COLUMN_ALIAS);
DbContextRule procedure =
new DbContextRule(contents, DbContextRule.PROCEDURE);
newBnf.updateTopic("procedure", procedure);
newBnf.updateTopic("column_name", columnRule);
newBnf.updateTopic("new_table_alias", newAliasRule);
newBnf.updateTopic("table_alias", aliasRule);
newBnf.updateTopic("column_alias", columnAliasRule);
newBnf.updateTopic("table_name", tableRule);
newBnf.updateTopic("schema_name", schemaRule);
newBnf.linkStatements();
bnf = newBnf;
} catch (Exception e) {
// ok we don't have the bnf
server.traceError(e);
}
}
/**
* Get the SQL statement from history.
*
* @param id the history id
* @return the SQL statement
*/
String getCommand(int id) {
return commandHistory.get(id);
}
/**
* Add a SQL statement to the history.
*
* @param sql the SQL statement
*/
void addCommand(String sql) {
if (sql == null) {
return;
}
sql = sql.trim();
if (sql.length() == 0) {
return;
}
if (commandHistory.size() > MAX_HISTORY) {
commandHistory.remove(0);
}
int idx = commandHistory.indexOf(sql);
if (idx >= 0) {
commandHistory.remove(idx);
}
commandHistory.add(sql);
if (server.isCommandHistoryAllowed()) {
server.saveCommandHistoryList(commandHistory);
}
}
/**
* Get the list of SQL statements in the history.
*
* @return the commands
*/
ArrayList<String> getCommandHistory() {
return commandHistory;
}
/**
* Update session meta data information and get the information in a map.
*
* @return a map containing the session meta data
*/
HashMap<String, Object> getInfo() {
HashMap<String, Object> m = new HashMap<>(map.size() + 5);
m.putAll(map);
m.put("lastAccess", new Timestamp(lastAccess).toString());
try {
m.put("url", conn == null ?
"${text.admin.notConnected}" : conn.getMetaData().getURL());
m.put("user", conn == null ?
"-" : conn.getMetaData().getUserName());
m.put("lastQuery", commandHistory.isEmpty() ?
"" : commandHistory.get(0));
m.put("executing", executingStatement == null ?
"${text.admin.no}" : "${text.admin.yes}");
} catch (SQLException e) {
DbException.traceThrowable(e);
}
return m;
}
void setConnection(Connection conn) throws SQLException {
this.conn = conn;
if (conn == null) {
meta = null;
} else {
meta = conn.getMetaData();
}
contents = new DbContents();
}
DatabaseMetaData getMetaData() {
return meta;
}
Connection getConnection() {
return conn;
}
DbContents getContents() {
return contents;
}
/**
* Shutdown the server when disconnecting.
*/
void setShutdownServerOnDisconnect() {
this.shutdownServerOnDisconnect = true;
}
boolean getShutdownServerOnDisconnect() {
return shutdownServerOnDisconnect;
}
/**
* Close the connection and stop the statement if one is currently
* executing.
*/
void close() {
if (executingStatement != null) {
try {
executingStatement.cancel();
} catch (Exception e) {
// ignore
}
}
if (conn != null) {
try {
conn.close();
} catch (Exception e) {
// ignore
}
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/server/web/WebThread.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.server.web;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.util.Iterator;
import java.util.Locale;
import java.util.Properties;
import java.util.StringTokenizer;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.util.IOUtils;
import org.h2.util.NetUtils;
import org.h2.util.StringUtils;
import org.h2.util.Utils;
/**
* For each connection to a session, an object of this class is created.
* This class is used by the H2 Console.
*/
class WebThread extends WebApp implements Runnable {
protected OutputStream output;
protected final Socket socket;
private final Thread thread;
private InputStream input;
private String ifModifiedSince;
WebThread(Socket socket, WebServer server) {
super(server);
this.socket = socket;
thread = new Thread(this, "H2 Console thread");
}
/**
* Start the thread.
*/
void start() {
thread.start();
}
/**
* Wait until the thread is stopped.
*
* @param millis the maximum number of milliseconds to wait
*/
void join(int millis) throws InterruptedException {
thread.join(millis);
}
/**
* Close the connection now.
*/
void stopNow() {
this.stop = true;
try {
socket.close();
} catch (IOException e) {
// ignore
}
}
private String getAllowedFile(String requestedFile) {
if (!allow()) {
return "notAllowed.jsp";
}
if (requestedFile.length() == 0) {
return "index.do";
}
return requestedFile;
}
@Override
public void run() {
try {
input = new BufferedInputStream(socket.getInputStream());
output = new BufferedOutputStream(socket.getOutputStream());
while (!stop) {
if (!process()) {
break;
}
}
} catch (Exception e) {
DbException.traceThrowable(e);
}
IOUtils.closeSilently(output);
IOUtils.closeSilently(input);
try {
socket.close();
} catch (IOException e) {
// ignore
} finally {
server.remove(this);
}
}
@SuppressWarnings("unchecked")
private boolean process() throws IOException {
boolean keepAlive = false;
String head = readHeaderLine();
if (head.startsWith("GET ") || head.startsWith("POST ")) {
int begin = head.indexOf('/'), end = head.lastIndexOf(' ');
String file;
if (begin < 0 || end < begin) {
file = "";
} else {
file = head.substring(begin + 1, end).trim();
}
trace(head + ": " + file);
file = getAllowedFile(file);
attributes = new Properties();
int paramIndex = file.indexOf('?');
session = null;
if (paramIndex >= 0) {
String attrib = file.substring(paramIndex + 1);
parseAttributes(attrib);
String sessionId = attributes.getProperty("jsessionid");
file = file.substring(0, paramIndex);
session = server.getSession(sessionId);
}
keepAlive = parseHeader();
String hostAddr = socket.getInetAddress().getHostAddress();
file = processRequest(file, hostAddr);
if (file.length() == 0) {
// asynchronous request
return true;
}
String message;
byte[] bytes;
if (cache && ifModifiedSince != null &&
ifModifiedSince.equals(server.getStartDateTime())) {
bytes = null;
message = "HTTP/1.1 304 Not Modified\r\n";
} else {
bytes = server.getFile(file);
if (bytes == null) {
message = "HTTP/1.1 404 Not Found\r\n";
bytes = ("File not found: " + file).getBytes(StandardCharsets.UTF_8);
message += "Content-Length: " + bytes.length + "\r\n";
} else {
if (session != null && file.endsWith(".jsp")) {
String page = new String(bytes, StandardCharsets.UTF_8);
if (SysProperties.CONSOLE_STREAM) {
Iterator<String> it = (Iterator<String>) session.map.remove("chunks");
if (it != null) {
message = "HTTP/1.1 200 OK\r\n";
message += "Content-Type: " + mimeType + "\r\n";
message += "Cache-Control: no-cache\r\n";
message += "Transfer-Encoding: chunked\r\n";
message += "\r\n";
trace(message);
output.write(message.getBytes());
while (it.hasNext()) {
String s = it.next();
s = PageParser.parse(s, session.map);
bytes = s.getBytes(StandardCharsets.UTF_8);
if (bytes.length == 0) {
continue;
}
output.write(Integer.toHexString(bytes.length).getBytes());
output.write("\r\n".getBytes());
output.write(bytes);
output.write("\r\n".getBytes());
output.flush();
}
output.write("0\r\n\r\n".getBytes());
output.flush();
return keepAlive;
}
}
page = PageParser.parse(page, session.map);
bytes = page.getBytes(StandardCharsets.UTF_8);
}
message = "HTTP/1.1 200 OK\r\n";
message += "Content-Type: " + mimeType + "\r\n";
if (!cache) {
message += "Cache-Control: no-cache\r\n";
} else {
message += "Cache-Control: max-age=10\r\n";
message += "Last-Modified: " + server.getStartDateTime() + "\r\n";
}
message += "Content-Length: " + bytes.length + "\r\n";
}
}
message += "\r\n";
trace(message);
output.write(message.getBytes());
if (bytes != null) {
output.write(bytes);
}
output.flush();
}
return keepAlive;
}
private String readHeaderLine() throws IOException {
StringBuilder buff = new StringBuilder();
while (true) {
int c = input.read();
if (c == -1) {
throw new IOException("Unexpected EOF");
} else if (c == '\r') {
if (input.read() == '\n') {
return buff.length() > 0 ? buff.toString() : null;
}
} else if (c == '\n') {
return buff.length() > 0 ? buff.toString() : null;
} else {
buff.append((char) c);
}
}
}
private void parseAttributes(String s) {
trace("data=" + s);
while (s != null) {
int idx = s.indexOf('=');
if (idx >= 0) {
String property = s.substring(0, idx);
s = s.substring(idx + 1);
idx = s.indexOf('&');
String value;
if (idx >= 0) {
value = s.substring(0, idx);
s = s.substring(idx + 1);
} else {
value = s;
}
String attr = StringUtils.urlDecode(value);
attributes.put(property, attr);
} else {
break;
}
}
trace(attributes.toString());
}
private boolean parseHeader() throws IOException {
boolean keepAlive = false;
trace("parseHeader");
int len = 0;
ifModifiedSince = null;
boolean multipart = false;
while (true) {
String line = readHeaderLine();
if (line == null) {
break;
}
trace(" " + line);
String lower = StringUtils.toLowerEnglish(line);
if (lower.startsWith("if-modified-since")) {
ifModifiedSince = getHeaderLineValue(line);
} else if (lower.startsWith("connection")) {
String conn = getHeaderLineValue(line);
if ("keep-alive".equals(conn)) {
keepAlive = true;
}
} else if (lower.startsWith("content-type")) {
String type = getHeaderLineValue(line);
if (type.startsWith("multipart/form-data")) {
multipart = true;
}
} else if (lower.startsWith("content-length")) {
len = Integer.parseInt(getHeaderLineValue(line));
trace("len=" + len);
} else if (lower.startsWith("user-agent")) {
boolean isWebKit = lower.contains("webkit/");
if (isWebKit && session != null) {
// workaround for what seems to be a WebKit bug:
// http://code.google.com/p/chromium/issues/detail?id=6402
session.put("frame-border", "1");
session.put("frameset-border", "2");
}
} else if (lower.startsWith("accept-language")) {
Locale locale = session == null ? null : session.locale;
if (locale == null) {
String languages = getHeaderLineValue(line);
StringTokenizer tokenizer = new StringTokenizer(languages, ",;");
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (!token.startsWith("q=")) {
if (server.supportsLanguage(token)) {
int dash = token.indexOf('-');
if (dash >= 0) {
String language = token.substring(0, dash);
String country = token.substring(dash + 1);
locale = new Locale(language, country);
} else {
locale = new Locale(token, "");
}
headerLanguage = locale.getLanguage();
if (session != null) {
session.locale = locale;
session.put("language", headerLanguage);
server.readTranslations(session, headerLanguage);
}
break;
}
}
}
}
} else if (line.trim().length() == 0) {
break;
}
}
if (multipart) {
// not supported
} else if (session != null && len > 0) {
byte[] bytes = Utils.newBytes(len);
for (int pos = 0; pos < len;) {
pos += input.read(bytes, pos, len - pos);
}
String s = new String(bytes);
parseAttributes(s);
}
return keepAlive;
}
private static String getHeaderLineValue(String line) {
return line.substring(line.indexOf(':') + 1).trim();
}
@Override
protected String adminShutdown() {
stopNow();
return super.adminShutdown();
}
private boolean allow() {
if (server.getAllowOthers()) {
return true;
}
try {
return NetUtils.isLocalAddress(socket);
} catch (UnknownHostException e) {
server.traceError(e);
return false;
}
}
private void trace(String s) {
server.trace(s);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/CountingReaderInputStream.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.CharsetEncoder;
import java.nio.charset.CodingErrorAction;
import java.nio.charset.StandardCharsets;
import org.h2.engine.Constants;
/**
* An input stream that reads the data from a reader and limits the number of
* bytes that can be read.
*/
public class CountingReaderInputStream extends InputStream {
private final Reader reader;
private final CharBuffer charBuffer =
CharBuffer.allocate(Constants.IO_BUFFER_SIZE);
private final CharsetEncoder encoder = StandardCharsets.UTF_8.newEncoder().
onMalformedInput(CodingErrorAction.REPLACE).
onUnmappableCharacter(CodingErrorAction.REPLACE);
private ByteBuffer byteBuffer = ByteBuffer.allocate(0);
private long length;
private long remaining;
CountingReaderInputStream(Reader reader, long maxLength) {
this.reader = reader;
this.remaining = maxLength;
}
@Override
public int read(byte[] buff, int offset, int len) throws IOException {
if (!fetch()) {
return -1;
}
len = Math.min(len, byteBuffer.remaining());
byteBuffer.get(buff, offset, len);
return len;
}
@Override
public int read() throws IOException {
if (!fetch()) {
return -1;
}
return byteBuffer.get() & 255;
}
private boolean fetch() throws IOException {
if (byteBuffer != null && byteBuffer.remaining() == 0) {
fillBuffer();
}
return byteBuffer != null;
}
private void fillBuffer() throws IOException {
int len = (int) Math.min(charBuffer.capacity() - charBuffer.position(),
remaining);
if (len > 0) {
len = reader.read(charBuffer.array(), charBuffer.position(), len);
}
if (len > 0) {
remaining -= len;
} else {
len = 0;
remaining = 0;
}
length += len;
charBuffer.limit(charBuffer.position() + len);
charBuffer.rewind();
byteBuffer = ByteBuffer.allocate(Constants.IO_BUFFER_SIZE);
boolean end = remaining == 0;
encoder.encode(charBuffer, byteBuffer, end);
if (end && byteBuffer.position() == 0) {
// EOF
byteBuffer = null;
return;
}
byteBuffer.flip();
charBuffer.compact();
charBuffer.flip();
charBuffer.position(charBuffer.limit());
}
/**
* The number of characters read so far (but there might still be some bytes
* in the buffer).
*
* @return the number of characters
*/
public long getLength() {
return length;
}
@Override
public void close() throws IOException {
reader.close();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/Data.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*
* The variable size number format code is a port from SQLite,
* but stored in reverse order (least significant bits in the first byte).
*/
package org.h2.store;
import java.io.IOException;
import java.io.OutputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.Arrays;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.tools.SimpleResultSet;
import org.h2.util.Bits;
import org.h2.util.DateTimeUtils;
import org.h2.util.MathUtils;
import org.h2.util.Utils;
import org.h2.value.DataType;
import org.h2.value.Value;
import org.h2.value.ValueArray;
import org.h2.value.ValueBoolean;
import org.h2.value.ValueByte;
import org.h2.value.ValueBytes;
import org.h2.value.ValueDate;
import org.h2.value.ValueDecimal;
import org.h2.value.ValueDouble;
import org.h2.value.ValueFloat;
import org.h2.value.ValueGeometry;
import org.h2.value.ValueInt;
import org.h2.value.ValueJavaObject;
import org.h2.value.ValueLob;
import org.h2.value.ValueLobDb;
import org.h2.value.ValueLong;
import org.h2.value.ValueNull;
import org.h2.value.ValueResultSet;
import org.h2.value.ValueShort;
import org.h2.value.ValueString;
import org.h2.value.ValueStringFixed;
import org.h2.value.ValueStringIgnoreCase;
import org.h2.value.ValueTime;
import org.h2.value.ValueTimestamp;
import org.h2.value.ValueTimestampTimeZone;
import org.h2.value.ValueUuid;
/**
* This class represents a byte buffer that contains persistent data of a page.
*
* @author Thomas Mueller
* @author Noel Grandin
* @author Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888
*/
public class Data {
/**
* The length of an integer value.
*/
public static final int LENGTH_INT = 4;
/**
* The length of a long value.
*/
private static final int LENGTH_LONG = 8;
private static final int INT_0_15 = 32;
private static final int LONG_0_7 = 48;
private static final int DECIMAL_0_1 = 56;
private static final int DECIMAL_SMALL_0 = 58;
private static final int DECIMAL_SMALL = 59;
private static final int DOUBLE_0_1 = 60;
private static final int FLOAT_0_1 = 62;
private static final int BOOLEAN_FALSE = 64;
private static final int BOOLEAN_TRUE = 65;
private static final int INT_NEG = 66;
private static final int LONG_NEG = 67;
private static final int STRING_0_31 = 68;
private static final int BYTES_0_31 = 100;
private static final int LOCAL_TIME = 132;
private static final int LOCAL_DATE = 133;
private static final int LOCAL_TIMESTAMP = 134;
private static final long MILLIS_PER_MINUTE = 1000 * 60;
/**
* Can not store the local time, because doing so with old database files
* that didn't do it could result in an ArrayIndexOutOfBoundsException. The
* reason is that adding a row to a page only allocated space for the new
* row, but didn't take into account that existing rows now can use more
* space, due to the changed format.
*/
private static final boolean STORE_LOCAL_TIME = false;
/**
* The data itself.
*/
private byte[] data;
/**
* The current write or read position.
*/
private int pos;
/**
* The data handler responsible for lob objects.
*/
private final DataHandler handler;
private Data(DataHandler handler, byte[] data) {
this.handler = handler;
this.data = data;
}
/**
* Update an integer at the given position.
* The current position is not change.
*
* @param pos the position
* @param x the value
*/
public void setInt(int pos, int x) {
Bits.writeInt(data, pos, x);
}
/**
* Write an integer at the current position.
* The current position is incremented.
*
* @param x the value
*/
public void writeInt(int x) {
Bits.writeInt(data, pos, x);
pos += 4;
}
/**
* Read an integer at the current position.
* The current position is incremented.
*
* @return the value
*/
public int readInt() {
int x = Bits.readInt(data, pos);
pos += 4;
return x;
}
/**
* Get the length of a String. This includes the bytes required to encode
* the length.
*
* @param s the string
* @return the number of bytes required
*/
public static int getStringLen(String s) {
int len = s.length();
return getStringWithoutLengthLen(s, len) + getVarIntLen(len);
}
/**
* Calculate the length of String, excluding the bytes required to encode
* the length.
* <p>
* For performance reasons the internal representation of a String is
* similar to UTF-8, but not exactly UTF-8.
*
* @param s the string
* @param len the length of the string
* @return the number of bytes required
*/
private static int getStringWithoutLengthLen(String s, int len) {
int plus = 0;
for (int i = 0; i < len; i++) {
char c = s.charAt(i);
if (c >= 0x800) {
plus += 2;
} else if (c >= 0x80) {
plus++;
}
}
return len + plus;
}
/**
* Read a String value.
* The current position is incremented.
*
* @return the value
*/
public String readString() {
int len = readVarInt();
return readString(len);
}
/**
* Read a String from the byte array.
* <p>
* For performance reasons the internal representation of a String is
* similar to UTF-8, but not exactly UTF-8.
*
* @param len the length of the resulting string
* @return the String
*/
private String readString(int len) {
byte[] buff = data;
int p = pos;
char[] chars = new char[len];
for (int i = 0; i < len; i++) {
int x = buff[p++] & 0xff;
if (x < 0x80) {
chars[i] = (char) x;
} else if (x >= 0xe0) {
chars[i] = (char) (((x & 0xf) << 12) +
((buff[p++] & 0x3f) << 6) +
(buff[p++] & 0x3f));
} else {
chars[i] = (char) (((x & 0x1f) << 6) +
(buff[p++] & 0x3f));
}
}
pos = p;
return new String(chars);
}
/**
* Write a String.
* The current position is incremented.
*
* @param s the value
*/
public void writeString(String s) {
int len = s.length();
writeVarInt(len);
writeStringWithoutLength(s, len);
}
/**
* Write a String.
* <p>
* For performance reasons the internal representation of a String is
* similar to UTF-8, but not exactly UTF-8.
*
* @param s the string
* @param len the number of characters to write
*/
private void writeStringWithoutLength(String s, int len) {
int p = pos;
byte[] buff = data;
for (int i = 0; i < len; i++) {
int c = s.charAt(i);
if (c < 0x80) {
buff[p++] = (byte) c;
} else if (c >= 0x800) {
buff[p++] = (byte) (0xe0 | (c >> 12));
buff[p++] = (byte) (((c >> 6) & 0x3f));
buff[p++] = (byte) (c & 0x3f);
} else {
buff[p++] = (byte) (0xc0 | (c >> 6));
buff[p++] = (byte) (c & 0x3f);
}
}
pos = p;
}
private void writeStringWithoutLength(char[] chars, int len) {
int p = pos;
byte[] buff = data;
for (int i = 0; i < len; i++) {
int c = chars[i];
if (c < 0x80) {
buff[p++] = (byte) c;
} else if (c >= 0x800) {
buff[p++] = (byte) (0xe0 | (c >> 12));
buff[p++] = (byte) (((c >> 6) & 0x3f));
buff[p++] = (byte) (c & 0x3f);
} else {
buff[p++] = (byte) (0xc0 | (c >> 6));
buff[p++] = (byte) (c & 0x3f);
}
}
pos = p;
}
/**
* Create a new buffer for the given handler. The
* handler will decide what type of buffer is created.
*
* @param handler the data handler
* @param capacity the initial capacity of the buffer
* @return the buffer
*/
public static Data create(DataHandler handler, int capacity) {
return new Data(handler, new byte[capacity]);
}
/**
* Create a new buffer using the given data for the given handler. The
* handler will decide what type of buffer is created.
*
* @param handler the data handler
* @param buff the data
* @return the buffer
*/
public static Data create(DataHandler handler, byte[] buff) {
return new Data(handler, buff);
}
/**
* Get the current write position of this buffer, which is the current
* length.
*
* @return the length
*/
public int length() {
return pos;
}
/**
* Get the byte array used for this page.
*
* @return the byte array
*/
public byte[] getBytes() {
return data;
}
/**
* Set the position to 0.
*/
public void reset() {
pos = 0;
}
/**
* Append a number of bytes to this buffer.
*
* @param buff the data
* @param off the offset in the data
* @param len the length in bytes
*/
public void write(byte[] buff, int off, int len) {
System.arraycopy(buff, off, data, pos, len);
pos += len;
}
/**
* Copy a number of bytes to the given buffer from the current position. The
* current position is incremented accordingly.
*
* @param buff the output buffer
* @param off the offset in the output buffer
* @param len the number of bytes to copy
*/
public void read(byte[] buff, int off, int len) {
System.arraycopy(data, pos, buff, off, len);
pos += len;
}
/**
* Append one single byte.
*
* @param x the value
*/
public void writeByte(byte x) {
data[pos++] = x;
}
/**
* Read one single byte.
*
* @return the value
*/
public byte readByte() {
return data[pos++];
}
/**
* Read a long value. This method reads two int values and combines them.
*
* @return the long value
*/
public long readLong() {
long x = Bits.readLong(data, pos);
pos += 8;
return x;
}
/**
* Append a long value. This method writes two int values.
*
* @param x the value
*/
public void writeLong(long x) {
Bits.writeLong(data, pos, x);
pos += 8;
}
/**
* Append a value.
*
* @param v the value
*/
public void writeValue(Value v) {
int start = pos;
if (v == ValueNull.INSTANCE) {
data[pos++] = 0;
return;
}
int type = v.getType();
switch (type) {
case Value.BOOLEAN:
writeByte((byte) (v.getBoolean() ? BOOLEAN_TRUE : BOOLEAN_FALSE));
break;
case Value.BYTE:
writeByte((byte) type);
writeByte(v.getByte());
break;
case Value.SHORT:
writeByte((byte) type);
writeShortInt(v.getShort());
break;
case Value.ENUM:
case Value.INT: {
int x = v.getInt();
if (x < 0) {
writeByte((byte) INT_NEG);
writeVarInt(-x);
} else if (x < 16) {
writeByte((byte) (INT_0_15 + x));
} else {
writeByte((byte) type);
writeVarInt(x);
}
break;
}
case Value.LONG: {
long x = v.getLong();
if (x < 0) {
writeByte((byte) LONG_NEG);
writeVarLong(-x);
} else if (x < 8) {
writeByte((byte) (LONG_0_7 + x));
} else {
writeByte((byte) type);
writeVarLong(x);
}
break;
}
case Value.DECIMAL: {
BigDecimal x = v.getBigDecimal();
if (BigDecimal.ZERO.equals(x)) {
writeByte((byte) DECIMAL_0_1);
} else if (BigDecimal.ONE.equals(x)) {
writeByte((byte) (DECIMAL_0_1 + 1));
} else {
int scale = x.scale();
BigInteger b = x.unscaledValue();
int bits = b.bitLength();
if (bits <= 63) {
if (scale == 0) {
writeByte((byte) DECIMAL_SMALL_0);
writeVarLong(b.longValue());
} else {
writeByte((byte) DECIMAL_SMALL);
writeVarInt(scale);
writeVarLong(b.longValue());
}
} else {
writeByte((byte) type);
writeVarInt(scale);
byte[] bytes = b.toByteArray();
writeVarInt(bytes.length);
write(bytes, 0, bytes.length);
}
}
break;
}
case Value.TIME:
if (STORE_LOCAL_TIME) {
writeByte((byte) LOCAL_TIME);
ValueTime t = (ValueTime) v;
long nanos = t.getNanos();
long millis = nanos / 1_000_000;
nanos -= millis * 1_000_000;
writeVarLong(millis);
writeVarLong(nanos);
} else {
writeByte((byte) type);
writeVarLong(DateTimeUtils.getTimeLocalWithoutDst(v.getTime()));
}
break;
case Value.DATE: {
if (STORE_LOCAL_TIME) {
writeByte((byte) LOCAL_DATE);
long x = ((ValueDate) v).getDateValue();
writeVarLong(x);
} else {
writeByte((byte) type);
long x = DateTimeUtils.getTimeLocalWithoutDst(v.getDate());
writeVarLong(x / MILLIS_PER_MINUTE);
}
break;
}
case Value.TIMESTAMP: {
if (STORE_LOCAL_TIME) {
writeByte((byte) LOCAL_TIMESTAMP);
ValueTimestamp ts = (ValueTimestamp) v;
long dateValue = ts.getDateValue();
writeVarLong(dateValue);
long nanos = ts.getTimeNanos();
long millis = nanos / 1_000_000;
nanos -= millis * 1_000_000;
writeVarLong(millis);
writeVarLong(nanos);
} else {
Timestamp ts = v.getTimestamp();
writeByte((byte) type);
writeVarLong(DateTimeUtils.getTimeLocalWithoutDst(ts));
writeVarInt(ts.getNanos() % 1_000_000);
}
break;
}
case Value.TIMESTAMP_TZ: {
ValueTimestampTimeZone ts = (ValueTimestampTimeZone) v;
writeByte((byte) type);
writeVarLong(ts.getDateValue());
writeVarLong(ts.getTimeNanos());
writeVarInt(ts.getTimeZoneOffsetMins());
break;
}
case Value.GEOMETRY:
// fall though
case Value.JAVA_OBJECT: {
writeByte((byte) type);
byte[] b = v.getBytesNoCopy();
int len = b.length;
writeVarInt(len);
write(b, 0, len);
break;
}
case Value.BYTES: {
byte[] b = v.getBytesNoCopy();
int len = b.length;
if (len < 32) {
writeByte((byte) (BYTES_0_31 + len));
write(b, 0, len);
} else {
writeByte((byte) type);
writeVarInt(len);
write(b, 0, len);
}
break;
}
case Value.UUID: {
writeByte((byte) type);
ValueUuid uuid = (ValueUuid) v;
writeLong(uuid.getHigh());
writeLong(uuid.getLow());
break;
}
case Value.STRING: {
String s = v.getString();
int len = s.length();
if (len < 32) {
writeByte((byte) (STRING_0_31 + len));
writeStringWithoutLength(s, len);
} else {
writeByte((byte) type);
writeString(s);
}
break;
}
case Value.STRING_IGNORECASE:
case Value.STRING_FIXED:
writeByte((byte) type);
writeString(v.getString());
break;
case Value.DOUBLE: {
double x = v.getDouble();
if (x == 1.0d) {
writeByte((byte) (DOUBLE_0_1 + 1));
} else {
long d = Double.doubleToLongBits(x);
if (d == ValueDouble.ZERO_BITS) {
writeByte((byte) DOUBLE_0_1);
} else {
writeByte((byte) type);
writeVarLong(Long.reverse(d));
}
}
break;
}
case Value.FLOAT: {
float x = v.getFloat();
if (x == 1.0f) {
writeByte((byte) (FLOAT_0_1 + 1));
} else {
int f = Float.floatToIntBits(x);
if (f == ValueFloat.ZERO_BITS) {
writeByte((byte) FLOAT_0_1);
} else {
writeByte((byte) type);
writeVarInt(Integer.reverse(f));
}
}
break;
}
case Value.BLOB:
case Value.CLOB: {
writeByte((byte) type);
if (v instanceof ValueLob) {
ValueLob lob = (ValueLob) v;
lob.convertToFileIfRequired(handler);
byte[] small = lob.getSmall();
if (small == null) {
int t = -1;
if (!lob.isLinkedToTable()) {
t = -2;
}
writeVarInt(t);
writeVarInt(lob.getTableId());
writeVarInt(lob.getObjectId());
writeVarLong(lob.getPrecision());
writeByte((byte) (lob.isCompressed() ? 1 : 0));
if (t == -2) {
writeString(lob.getFileName());
}
} else {
writeVarInt(small.length);
write(small, 0, small.length);
}
} else {
ValueLobDb lob = (ValueLobDb) v;
byte[] small = lob.getSmall();
if (small == null) {
writeVarInt(-3);
writeVarInt(lob.getTableId());
writeVarLong(lob.getLobId());
writeVarLong(lob.getPrecision());
} else {
writeVarInt(small.length);
write(small, 0, small.length);
}
}
break;
}
case Value.ARRAY: {
writeByte((byte) type);
Value[] list = ((ValueArray) v).getList();
writeVarInt(list.length);
for (Value x : list) {
writeValue(x);
}
break;
}
case Value.RESULT_SET: {
writeByte((byte) type);
try {
ResultSet rs = ((ValueResultSet) v).getResultSet();
rs.beforeFirst();
ResultSetMetaData meta = rs.getMetaData();
int columnCount = meta.getColumnCount();
writeVarInt(columnCount);
for (int i = 0; i < columnCount; i++) {
writeString(meta.getColumnName(i + 1));
writeVarInt(meta.getColumnType(i + 1));
writeVarInt(meta.getPrecision(i + 1));
writeVarInt(meta.getScale(i + 1));
}
while (rs.next()) {
writeByte((byte) 1);
for (int i = 0; i < columnCount; i++) {
int t = DataType.getValueTypeFromResultSet(meta, i + 1);
Value val = DataType.readValue(null, rs, i + 1, t);
writeValue(val);
}
}
writeByte((byte) 0);
rs.beforeFirst();
} catch (SQLException e) {
throw DbException.convert(e);
}
break;
}
default:
DbException.throwInternalError("type=" + v.getType());
}
if (SysProperties.CHECK2) {
if (pos - start != getValueLen(v, handler)) {
throw DbException.throwInternalError(
"value size error: got " + (pos - start) +
" expected " + getValueLen(v, handler));
}
}
}
/**
* Read a value.
*
* @return the value
*/
public Value readValue() {
int type = data[pos++] & 255;
switch (type) {
case Value.NULL:
return ValueNull.INSTANCE;
case BOOLEAN_TRUE:
return ValueBoolean.TRUE;
case BOOLEAN_FALSE:
return ValueBoolean.FALSE;
case INT_NEG:
return ValueInt.get(-readVarInt());
case Value.ENUM:
case Value.INT:
return ValueInt.get(readVarInt());
case LONG_NEG:
return ValueLong.get(-readVarLong());
case Value.LONG:
return ValueLong.get(readVarLong());
case Value.BYTE:
return ValueByte.get(readByte());
case Value.SHORT:
return ValueShort.get(readShortInt());
case DECIMAL_0_1:
return (ValueDecimal) ValueDecimal.ZERO;
case DECIMAL_0_1 + 1:
return (ValueDecimal) ValueDecimal.ONE;
case DECIMAL_SMALL_0:
return ValueDecimal.get(BigDecimal.valueOf(readVarLong()));
case DECIMAL_SMALL: {
int scale = readVarInt();
return ValueDecimal.get(BigDecimal.valueOf(readVarLong(), scale));
}
case Value.DECIMAL: {
int scale = readVarInt();
int len = readVarInt();
byte[] buff = Utils.newBytes(len);
read(buff, 0, len);
BigInteger b = new BigInteger(buff);
return ValueDecimal.get(new BigDecimal(b, scale));
}
case LOCAL_DATE: {
return ValueDate.fromDateValue(readVarLong());
}
case Value.DATE: {
long x = readVarLong() * MILLIS_PER_MINUTE;
return ValueDate.fromMillis(DateTimeUtils.getTimeUTCWithoutDst(x));
}
case LOCAL_TIME: {
long nanos = readVarLong() * 1_000_000 + readVarLong();
return ValueTime.fromNanos(nanos);
}
case Value.TIME:
// need to normalize the year, month and day
return ValueTime.fromMillis(
DateTimeUtils.getTimeUTCWithoutDst(readVarLong()));
case LOCAL_TIMESTAMP: {
long dateValue = readVarLong();
long nanos = readVarLong() * 1_000_000 + readVarLong();
return ValueTimestamp.fromDateValueAndNanos(dateValue, nanos);
}
case Value.TIMESTAMP: {
return ValueTimestamp.fromMillisNanos(
DateTimeUtils.getTimeUTCWithoutDst(readVarLong()),
readVarInt());
}
case Value.TIMESTAMP_TZ: {
long dateValue = readVarLong();
long nanos = readVarLong();
short tz = (short) readVarInt();
return ValueTimestampTimeZone.fromDateValueAndNanos(dateValue, nanos, tz);
}
case Value.BYTES: {
int len = readVarInt();
byte[] b = Utils.newBytes(len);
read(b, 0, len);
return ValueBytes.getNoCopy(b);
}
case Value.GEOMETRY: {
int len = readVarInt();
byte[] b = Utils.newBytes(len);
read(b, 0, len);
return ValueGeometry.get(b);
}
case Value.JAVA_OBJECT: {
int len = readVarInt();
byte[] b = Utils.newBytes(len);
read(b, 0, len);
return ValueJavaObject.getNoCopy(null, b, handler);
}
case Value.UUID:
return ValueUuid.get(readLong(), readLong());
case Value.STRING:
return ValueString.get(readString());
case Value.STRING_IGNORECASE:
return ValueStringIgnoreCase.get(readString());
case Value.STRING_FIXED:
return ValueStringFixed.get(readString());
case FLOAT_0_1:
return ValueFloat.get(0);
case FLOAT_0_1 + 1:
return ValueFloat.get(1);
case DOUBLE_0_1:
return ValueDouble.get(0);
case DOUBLE_0_1 + 1:
return ValueDouble.get(1);
case Value.DOUBLE:
return ValueDouble.get(Double.longBitsToDouble(
Long.reverse(readVarLong())));
case Value.FLOAT:
return ValueFloat.get(Float.intBitsToFloat(
Integer.reverse(readVarInt())));
case Value.BLOB:
case Value.CLOB: {
int smallLen = readVarInt();
if (smallLen >= 0) {
byte[] small = Utils.newBytes(smallLen);
read(small, 0, smallLen);
return ValueLobDb.createSmallLob(type, small);
} else if (smallLen == -3) {
int tableId = readVarInt();
long lobId = readVarLong();
long precision = readVarLong();
return ValueLobDb.create(type, handler, tableId,
lobId, null, precision);
} else {
int tableId = readVarInt();
int objectId = readVarInt();
long precision = 0;
boolean compression = false;
// -1: regular; -2: regular, but not linked (in this case:
// including file name)
if (smallLen == -1 || smallLen == -2) {
precision = readVarLong();
compression = readByte() == 1;
}
if (smallLen == -2) {
String filename = readString();
return ValueLob.openUnlinked(type, handler, tableId,
objectId, precision, compression, filename);
}
return ValueLob.openLinked(type, handler, tableId,
objectId, precision, compression);
}
}
case Value.ARRAY: {
int len = readVarInt();
Value[] list = new Value[len];
for (int i = 0; i < len; i++) {
list[i] = readValue();
}
return ValueArray.get(list);
}
case Value.RESULT_SET: {
SimpleResultSet rs = new SimpleResultSet();
rs.setAutoClose(false);
int columns = readVarInt();
for (int i = 0; i < columns; i++) {
rs.addColumn(readString(), readVarInt(), readVarInt(), readVarInt());
}
while (readByte() != 0) {
Object[] o = new Object[columns];
for (int i = 0; i < columns; i++) {
o[i] = readValue().getObject();
}
rs.addRow(o);
}
return ValueResultSet.get(rs);
}
default:
if (type >= INT_0_15 && type < INT_0_15 + 16) {
return ValueInt.get(type - INT_0_15);
} else if (type >= LONG_0_7 && type < LONG_0_7 + 8) {
return ValueLong.get(type - LONG_0_7);
} else if (type >= BYTES_0_31 && type < BYTES_0_31 + 32) {
int len = type - BYTES_0_31;
byte[] b = Utils.newBytes(len);
read(b, 0, len);
return ValueBytes.getNoCopy(b);
} else if (type >= STRING_0_31 && type < STRING_0_31 + 32) {
return ValueString.get(readString(type - STRING_0_31));
}
throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "type: " + type);
}
}
/**
* Calculate the number of bytes required to encode the given value.
*
* @param v the value
* @return the number of bytes required to store this value
*/
public int getValueLen(Value v) {
return getValueLen(v, handler);
}
/**
* Calculate the number of bytes required to encode the given value.
*
* @param v the value
* @param handler the data handler for lobs
* @return the number of bytes required to store this value
*/
public static int getValueLen(Value v, DataHandler handler) {
if (v == ValueNull.INSTANCE) {
return 1;
}
switch (v.getType()) {
case Value.BOOLEAN:
return 1;
case Value.BYTE:
return 2;
case Value.SHORT:
return 3;
case Value.ENUM:
case Value.INT: {
int x = v.getInt();
if (x < 0) {
return 1 + getVarIntLen(-x);
} else if (x < 16) {
return 1;
} else {
return 1 + getVarIntLen(x);
}
}
case Value.LONG: {
long x = v.getLong();
if (x < 0) {
return 1 + getVarLongLen(-x);
} else if (x < 8) {
return 1;
} else {
return 1 + getVarLongLen(x);
}
}
case Value.DOUBLE: {
double x = v.getDouble();
if (x == 1.0d) {
return 1;
}
long d = Double.doubleToLongBits(x);
if (d == ValueDouble.ZERO_BITS) {
return 1;
}
return 1 + getVarLongLen(Long.reverse(d));
}
case Value.FLOAT: {
float x = v.getFloat();
if (x == 1.0f) {
return 1;
}
int f = Float.floatToIntBits(x);
if (f == ValueFloat.ZERO_BITS) {
return 1;
}
return 1 + getVarIntLen(Integer.reverse(f));
}
case Value.STRING: {
String s = v.getString();
int len = s.length();
if (len < 32) {
return 1 + getStringWithoutLengthLen(s, len);
}
return 1 + getStringLen(s);
}
case Value.STRING_IGNORECASE:
case Value.STRING_FIXED:
return 1 + getStringLen(v.getString());
case Value.DECIMAL: {
BigDecimal x = v.getBigDecimal();
if (BigDecimal.ZERO.equals(x)) {
return 1;
} else if (BigDecimal.ONE.equals(x)) {
return 1;
}
int scale = x.scale();
BigInteger b = x.unscaledValue();
int bits = b.bitLength();
if (bits <= 63) {
if (scale == 0) {
return 1 + getVarLongLen(b.longValue());
}
return 1 + getVarIntLen(scale) + getVarLongLen(b.longValue());
}
byte[] bytes = b.toByteArray();
return 1 + getVarIntLen(scale) + getVarIntLen(bytes.length) + bytes.length;
}
case Value.TIME:
if (STORE_LOCAL_TIME) {
long nanos = ((ValueTime) v).getNanos();
long millis = nanos / 1_000_000;
nanos -= millis * 1_000_000;
return 1 + getVarLongLen(millis) + getVarLongLen(nanos);
}
return 1 + getVarLongLen(DateTimeUtils.getTimeLocalWithoutDst(v.getTime()));
case Value.DATE: {
if (STORE_LOCAL_TIME) {
long dateValue = ((ValueDate) v).getDateValue();
return 1 + getVarLongLen(dateValue);
}
long x = DateTimeUtils.getTimeLocalWithoutDst(v.getDate());
return 1 + getVarLongLen(x / MILLIS_PER_MINUTE);
}
case Value.TIMESTAMP: {
if (STORE_LOCAL_TIME) {
ValueTimestamp ts = (ValueTimestamp) v;
long dateValue = ts.getDateValue();
long nanos = ts.getTimeNanos();
long millis = nanos / 1_000_000;
nanos -= millis * 1_000_000;
return 1 + getVarLongLen(dateValue) + getVarLongLen(millis) +
getVarLongLen(nanos);
}
Timestamp ts = v.getTimestamp();
return 1 + getVarLongLen(DateTimeUtils.getTimeLocalWithoutDst(ts)) +
getVarIntLen(ts.getNanos() % 1_000_000);
}
case Value.TIMESTAMP_TZ: {
ValueTimestampTimeZone ts = (ValueTimestampTimeZone) v;
long dateValue = ts.getDateValue();
long nanos = ts.getTimeNanos();
short tz = ts.getTimeZoneOffsetMins();
return 1 + getVarLongLen(dateValue) + getVarLongLen(nanos) +
getVarIntLen(tz);
}
case Value.GEOMETRY:
case Value.JAVA_OBJECT: {
byte[] b = v.getBytesNoCopy();
return 1 + getVarIntLen(b.length) + b.length;
}
case Value.BYTES: {
byte[] b = v.getBytesNoCopy();
int len = b.length;
if (len < 32) {
return 1 + b.length;
}
return 1 + getVarIntLen(b.length) + b.length;
}
case Value.UUID:
return 1 + LENGTH_LONG + LENGTH_LONG;
case Value.BLOB:
case Value.CLOB: {
int len = 1;
if (v instanceof ValueLob) {
ValueLob lob = (ValueLob) v;
lob.convertToFileIfRequired(handler);
byte[] small = lob.getSmall();
if (small == null) {
int t = -1;
if (!lob.isLinkedToTable()) {
t = -2;
}
len += getVarIntLen(t);
len += getVarIntLen(lob.getTableId());
len += getVarIntLen(lob.getObjectId());
len += getVarLongLen(lob.getPrecision());
len += 1;
if (t == -2) {
len += getStringLen(lob.getFileName());
}
} else {
len += getVarIntLen(small.length);
len += small.length;
}
} else {
ValueLobDb lob = (ValueLobDb) v;
byte[] small = lob.getSmall();
if (small == null) {
len += getVarIntLen(-3);
len += getVarIntLen(lob.getTableId());
len += getVarLongLen(lob.getLobId());
len += getVarLongLen(lob.getPrecision());
} else {
len += getVarIntLen(small.length);
len += small.length;
}
}
return len;
}
case Value.ARRAY: {
Value[] list = ((ValueArray) v).getList();
int len = 1 + getVarIntLen(list.length);
for (Value x : list) {
len += getValueLen(x, handler);
}
return len;
}
case Value.RESULT_SET: {
int len = 1;
try {
ResultSet rs = ((ValueResultSet) v).getResultSet();
rs.beforeFirst();
ResultSetMetaData meta = rs.getMetaData();
int columnCount = meta.getColumnCount();
len += getVarIntLen(columnCount);
for (int i = 0; i < columnCount; i++) {
len += getStringLen(meta.getColumnName(i + 1));
len += getVarIntLen(meta.getColumnType(i + 1));
len += getVarIntLen(meta.getPrecision(i + 1));
len += getVarIntLen(meta.getScale(i + 1));
}
while (rs.next()) {
len++;
for (int i = 0; i < columnCount; i++) {
int t = DataType.getValueTypeFromResultSet(meta, i + 1);
Value val = DataType.readValue(null, rs, i + 1, t);
len += getValueLen(val, handler);
}
}
len++;
rs.beforeFirst();
} catch (SQLException e) {
throw DbException.convert(e);
}
return len;
}
default:
throw DbException.throwInternalError("type=" + v.getType());
}
}
/**
* Set the current read / write position.
*
* @param pos the new position
*/
public void setPos(int pos) {
this.pos = pos;
}
/**
* Write a short integer at the current position.
* The current position is incremented.
*
* @param x the value
*/
public void writeShortInt(int x) {
byte[] buff = data;
buff[pos++] = (byte) (x >> 8);
buff[pos++] = (byte) x;
}
/**
* Read an short integer at the current position.
* The current position is incremented.
*
* @return the value
*/
public short readShortInt() {
byte[] buff = data;
return (short) (((buff[pos++] & 0xff) << 8) + (buff[pos++] & 0xff));
}
/**
* Shrink the array to this size.
*
* @param size the new size
*/
public void truncate(int size) {
if (pos > size) {
byte[] buff = Arrays.copyOf(data, size);
this.pos = size;
data = buff;
}
}
/**
* The number of bytes required for a variable size int.
*
* @param x the value
* @return the len
*/
private static int getVarIntLen(int x) {
if ((x & (-1 << 7)) == 0) {
return 1;
} else if ((x & (-1 << 14)) == 0) {
return 2;
} else if ((x & (-1 << 21)) == 0) {
return 3;
} else if ((x & (-1 << 28)) == 0) {
return 4;
}
return 5;
}
/**
* Write a variable size int.
*
* @param x the value
*/
public void writeVarInt(int x) {
while ((x & ~0x7f) != 0) {
data[pos++] = (byte) (0x80 | (x & 0x7f));
x >>>= 7;
}
data[pos++] = (byte) x;
}
/**
* Read a variable size int.
*
* @return the value
*/
public int readVarInt() {
int b = data[pos];
if (b >= 0) {
pos++;
return b;
}
// a separate function so that this one can be inlined
return readVarIntRest(b);
}
private int readVarIntRest(int b) {
int x = b & 0x7f;
b = data[pos + 1];
if (b >= 0) {
pos += 2;
return x | (b << 7);
}
x |= (b & 0x7f) << 7;
b = data[pos + 2];
if (b >= 0) {
pos += 3;
return x | (b << 14);
}
x |= (b & 0x7f) << 14;
b = data[pos + 3];
if (b >= 0) {
pos += 4;
return x | b << 21;
}
x |= ((b & 0x7f) << 21) | (data[pos + 4] << 28);
pos += 5;
return x;
}
/**
* The number of bytes required for a variable size long.
*
* @param x the value
* @return the len
*/
public static int getVarLongLen(long x) {
int i = 1;
while (true) {
x >>>= 7;
if (x == 0) {
return i;
}
i++;
}
}
/**
* Write a variable size long.
*
* @param x the value
*/
public void writeVarLong(long x) {
while ((x & ~0x7f) != 0) {
data[pos++] = (byte) ((x & 0x7f) | 0x80);
x >>>= 7;
}
data[pos++] = (byte) x;
}
/**
* Read a variable size long.
*
* @return the value
*/
public long readVarLong() {
long x = data[pos++];
if (x >= 0) {
return x;
}
x &= 0x7f;
for (int s = 7;; s += 7) {
long b = data[pos++];
x |= (b & 0x7f) << s;
if (b >= 0) {
return x;
}
}
}
/**
* Check if there is still enough capacity in the buffer.
* This method extends the buffer if required.
*
* @param plus the number of additional bytes required
*/
public void checkCapacity(int plus) {
if (pos + plus >= data.length) {
// a separate method to simplify inlining
expand(plus);
}
}
private void expand(int plus) {
// must copy everything, because pos could be 0 and data may be
// still required
data = Utils.copyBytes(data, (data.length + plus) * 2);
}
/**
* Fill up the buffer with empty space and an (initially empty) checksum
* until the size is a multiple of Constants.FILE_BLOCK_SIZE.
*/
public void fillAligned() {
// 0..6 > 8, 7..14 > 16, 15..22 > 24, ...
int len = MathUtils.roundUpInt(pos + 2, Constants.FILE_BLOCK_SIZE);
pos = len;
if (data.length < len) {
checkCapacity(len - data.length);
}
}
/**
* Copy a String from a reader to an output stream.
*
* @param source the reader
* @param target the output stream
*/
public static void copyString(Reader source, OutputStream target)
throws IOException {
char[] buff = new char[Constants.IO_BUFFER_SIZE];
Data d = new Data(null, new byte[3 * Constants.IO_BUFFER_SIZE]);
while (true) {
int l = source.read(buff);
if (l < 0) {
break;
}
d.writeStringWithoutLength(buff, l);
target.write(d.data, 0, d.pos);
d.reset();
}
}
public DataHandler getHandler() {
return handler;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/DataHandler.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import org.h2.api.JavaObjectSerializer;
import org.h2.message.DbException;
import org.h2.util.SmallLRUCache;
import org.h2.util.TempFileDeleter;
import org.h2.value.CompareMode;
/**
* A data handler contains a number of callback methods, mostly related to CLOB
* and BLOB handling. The most important implementing class is a database.
*/
public interface DataHandler {
/**
* Get the database path.
*
* @return the database path
*/
String getDatabasePath();
/**
* Open a file at the given location.
*
* @param name the file name
* @param mode the mode
* @param mustExist whether the file must already exist
* @return the file
*/
FileStore openFile(String name, String mode, boolean mustExist);
/**
* Check if the simulated power failure occurred.
* This call will decrement the countdown.
*
* @throws DbException if the simulated power failure occurred
*/
void checkPowerOff() throws DbException;
/**
* Check if writing is allowed.
*
* @throws DbException if it is not allowed
*/
void checkWritingAllowed() throws DbException;
/**
* Get the maximum length of a in-place large object
*
* @return the maximum size
*/
int getMaxLengthInplaceLob();
/**
* Get the compression algorithm used for large objects.
*
* @param type the data type (CLOB or BLOB)
* @return the compression algorithm, or null
*/
String getLobCompressionAlgorithm(int type);
/**
* Get the temp file deleter mechanism.
*
* @return the temp file deleter
*/
TempFileDeleter getTempFileDeleter();
/**
* Get the synchronization object for lob operations.
*
* @return the synchronization object
*/
Object getLobSyncObject();
/**
* Get the lob file list cache if it is used.
*
* @return the cache or null
*/
SmallLRUCache<String, String[]> getLobFileListCache();
/**
* Get the lob storage mechanism to use.
*
* @return the lob storage mechanism
*/
LobStorageInterface getLobStorage();
/**
* Read from a lob.
*
* @param lobId the lob id
* @param hmac the message authentication code
* @param offset the offset within the lob
* @param buff the target buffer
* @param off the offset within the target buffer
* @param length the number of bytes to read
* @return the number of bytes read
*/
int readLob(long lobId, byte[] hmac, long offset, byte[] buff, int off,
int length);
/**
* Return the serializer to be used for java objects being stored in
* column of type OTHER.
*
* @return the serializer to be used for java objects being stored in
* column of type OTHER
*/
JavaObjectSerializer getJavaObjectSerializer();
/**
* Return compare mode.
*
* @return Compare mode.
*/
CompareMode getCompareMode();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/DataReader.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import org.h2.util.IOUtils;
/**
* This class is backed by an input stream and supports reading values and
* variable size data.
*/
public class DataReader extends Reader {
private final InputStream in;
/**
* Create a new data reader.
*
* @param in the input stream
*/
public DataReader(InputStream in) {
this.in = in;
}
/**
* Read a byte.
*
* @return the byte
*/
public byte readByte() throws IOException {
int x = in.read();
if (x < 0) {
throw new FastEOFException();
}
return (byte) x;
}
/**
* Read a variable size integer.
*
* @return the value
*/
public int readVarInt() throws IOException {
int b = readByte();
if (b >= 0) {
return b;
}
int x = b & 0x7f;
b = readByte();
if (b >= 0) {
return x | (b << 7);
}
x |= (b & 0x7f) << 7;
b = readByte();
if (b >= 0) {
return x | (b << 14);
}
x |= (b & 0x7f) << 14;
b = readByte();
if (b >= 0) {
return x | b << 21;
}
return x | ((b & 0x7f) << 21) | (readByte() << 28);
}
/**
* Read a variable size long.
*
* @return the value
*/
public long readVarLong() throws IOException {
long x = readByte();
if (x >= 0) {
return x;
}
x &= 0x7f;
for (int s = 7;; s += 7) {
long b = readByte();
x |= (b & 0x7f) << s;
if (b >= 0) {
return x;
}
}
}
/**
* Read an integer.
*
* @return the value
*/
// public int readInt() throws IOException {
// return (read() << 24) + ((read() & 0xff) << 16) +
// ((read() & 0xff) << 8) + (read() & 0xff);
//}
/**
* Read a long.
*
* @return the value
*/
// public long readLong() throws IOException {
// return ((long) (readInt()) << 32) + (readInt() & 0xffffffffL);
// }
/**
* Read a number of bytes.
*
* @param buff the target buffer
* @param len the number of bytes to read
*/
public void readFully(byte[] buff, int len) throws IOException {
int got = IOUtils.readFully(in, buff, len);
if (got < len) {
throw new FastEOFException();
}
}
/**
* Read a string from the stream.
*
* @return the string
*/
public String readString() throws IOException {
int len = readVarInt();
return readString(len);
}
private String readString(int len) throws IOException {
char[] chars = new char[len];
for (int i = 0; i < len; i++) {
chars[i] = readChar();
}
return new String(chars);
}
/**
* Read one character from the input stream.
*
* @return the character
*/
private char readChar() throws IOException {
int x = readByte() & 0xff;
if (x < 0x80) {
return (char) x;
} else if (x >= 0xe0) {
return (char) (((x & 0xf) << 12) +
((readByte() & 0x3f) << 6) +
(readByte() & 0x3f));
} else {
return (char) (((x & 0x1f) << 6) +
(readByte() & 0x3f));
}
}
@Override
public void close() throws IOException {
// ignore
}
@Override
public int read(char[] buff, int off, int len) throws IOException {
if (len == 0) {
return 0;
}
int i = 0;
try {
for (; i < len; i++) {
buff[off + i] = readChar();
}
return len;
} catch (EOFException e) {
if (i == 0) {
return -1;
}
return i;
}
}
/**
* Constructing such an EOF exception is fast, because the stack trace is
* not filled in. If used in a static context, this will also avoid
* classloader memory leaks.
*/
static class FastEOFException extends EOFException {
private static final long serialVersionUID = 1L;
@Override
public synchronized Throwable fillInStackTrace() {
return null;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/FileLister.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.nio.channels.FileChannel;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.message.DbException;
import org.h2.message.TraceSystem;
import org.h2.store.fs.FilePath;
import org.h2.store.fs.FileUtils;
import org.h2.util.New;
/**
* Utility class to list the files of a database.
*/
public class FileLister {
private FileLister() {
// utility class
}
/**
* Try to lock the database, and then unlock it. If this worked, the
* .lock.db file will be removed.
*
* @param files the database files to check
* @param message the text to include in the error message
* @throws SQLException if it failed
*/
public static void tryUnlockDatabase(List<String> files, String message)
throws SQLException {
for (String fileName : files) {
if (fileName.endsWith(Constants.SUFFIX_LOCK_FILE)) {
FileLock lock = new FileLock(new TraceSystem(null), fileName,
Constants.LOCK_SLEEP);
try {
lock.lock(FileLockMethod.FILE);
lock.unlock();
} catch (DbException e) {
throw DbException.get(
ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1,
message).getSQLException();
}
} else if (fileName.endsWith(Constants.SUFFIX_MV_FILE)) {
try (FileChannel f = FilePath.get(fileName).open("r")) {
java.nio.channels.FileLock lock = f.tryLock(0, Long.MAX_VALUE, true);
lock.release();
} catch (Exception e) {
throw DbException.get(
ErrorCode.CANNOT_CHANGE_SETTING_WHEN_OPEN_1, e,
message).getSQLException();
}
}
}
}
/**
* Normalize the directory name.
*
* @param dir the directory (null for the current directory)
* @return the normalized directory name
*/
public static String getDir(String dir) {
if (dir == null || dir.equals("")) {
return ".";
}
return FileUtils.toRealPath(dir);
}
/**
* Get the list of database files.
*
* @param dir the directory (must be normalized)
* @param db the database name (null for all databases)
* @param all if true, files such as the lock, trace, and lob
* files are included. If false, only data, index, log,
* and lob files are returned
* @return the list of files
*/
public static ArrayList<String> getDatabaseFiles(String dir, String db,
boolean all) {
ArrayList<String> files = New.arrayList();
// for Windows, File.getCanonicalPath("...b.") returns just "...b"
String start = db == null ? null : (FileUtils.toRealPath(dir + "/" + db) + ".");
for (String f : FileUtils.newDirectoryStream(dir)) {
boolean ok = false;
if (f.endsWith(Constants.SUFFIX_LOBS_DIRECTORY)) {
if (start == null || f.startsWith(start)) {
files.addAll(getDatabaseFiles(f, null, all));
ok = true;
}
} else if (f.endsWith(Constants.SUFFIX_LOB_FILE)) {
ok = true;
} else if (f.endsWith(Constants.SUFFIX_PAGE_FILE)) {
ok = true;
} else if (f.endsWith(Constants.SUFFIX_MV_FILE)) {
ok = true;
} else if (all) {
if (f.endsWith(Constants.SUFFIX_LOCK_FILE)) {
ok = true;
} else if (f.endsWith(Constants.SUFFIX_TEMP_FILE)) {
ok = true;
} else if (f.endsWith(Constants.SUFFIX_TRACE_FILE)) {
ok = true;
}
}
if (ok) {
if (db == null || f.startsWith(start)) {
files.add(f);
}
}
}
return files;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/FileLock.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.OutputStream;
import java.net.BindException;
import java.net.ConnectException;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.UnknownHostException;
import java.util.Properties;
import org.h2.Driver;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.SessionRemote;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.message.TraceSystem;
import org.h2.store.fs.FileUtils;
import org.h2.util.MathUtils;
import org.h2.util.NetUtils;
import org.h2.util.SortedProperties;
import org.h2.util.StringUtils;
import org.h2.value.Transfer;
/**
* The file lock is used to lock a database so that only one process can write
* to it. It uses a cooperative locking protocol. Usually a .lock.db file is
* used, but locking by creating a socket is supported as well.
*/
public class FileLock implements Runnable {
private static final String MAGIC = "FileLock";
private static final String FILE = "file";
private static final String SOCKET = "socket";
private static final String SERIALIZED = "serialized";
private static final int RANDOM_BYTES = 16;
private static final int SLEEP_GAP = 25;
private static final int TIME_GRANULARITY = 2000;
/**
* The lock file name.
*/
private volatile String fileName;
/**
* The server socket (only used when using the SOCKET mode).
*/
private volatile ServerSocket serverSocket;
/**
* Whether the file is locked.
*/
private volatile boolean locked;
/**
* The number of milliseconds to sleep after checking a file.
*/
private final int sleep;
/**
* The trace object.
*/
private final Trace trace;
/**
* The last time the lock file was written.
*/
private long lastWrite;
private String method, ipAddress;
private Properties properties;
private String uniqueId;
private Thread watchdog;
/**
* Create a new file locking object.
*
* @param traceSystem the trace system to use
* @param fileName the file name
* @param sleep the number of milliseconds to sleep
*/
public FileLock(TraceSystem traceSystem, String fileName, int sleep) {
this.trace = traceSystem == null ?
null : traceSystem.getTrace(Trace.FILE_LOCK);
this.fileName = fileName;
this.sleep = sleep;
}
/**
* Lock the file if possible. A file may only be locked once.
*
* @param fileLockMethod the file locking method to use
* @throws DbException if locking was not successful
*/
public synchronized void lock(FileLockMethod fileLockMethod) {
checkServer();
if (locked) {
DbException.throwInternalError("already locked");
}
switch (fileLockMethod) {
case FILE:
lockFile();
break;
case SOCKET:
lockSocket();
break;
case SERIALIZED:
lockSerialized();
break;
case FS:
case NO:
break;
}
locked = true;
}
/**
* Unlock the file. The watchdog thread is stopped. This method does nothing
* if the file is already unlocked.
*/
public synchronized void unlock() {
if (!locked) {
return;
}
locked = false;
try {
if (watchdog != null) {
watchdog.interrupt();
}
} catch (Exception e) {
trace.debug(e, "unlock");
}
try {
if (fileName != null) {
if (load().equals(properties)) {
FileUtils.delete(fileName);
}
}
if (serverSocket != null) {
serverSocket.close();
}
} catch (Exception e) {
trace.debug(e, "unlock");
} finally {
fileName = null;
serverSocket = null;
}
try {
if (watchdog != null) {
watchdog.join();
}
} catch (Exception e) {
trace.debug(e, "unlock");
} finally {
watchdog = null;
}
}
/**
* Add or change a setting to the properties. This call does not save the
* file.
*
* @param key the key
* @param value the value
*/
public void setProperty(String key, String value) {
if (value == null) {
properties.remove(key);
} else {
properties.put(key, value);
}
}
/**
* Save the lock file.
*
* @return the saved properties
*/
public Properties save() {
try {
try (OutputStream out = FileUtils.newOutputStream(fileName, false)) {
properties.store(out, MAGIC);
}
lastWrite = FileUtils.lastModified(fileName);
if (trace.isDebugEnabled()) {
trace.debug("save " + properties);
}
return properties;
} catch (IOException e) {
throw getExceptionFatal("Could not save properties " + fileName, e);
}
}
private void checkServer() {
Properties prop = load();
String server = prop.getProperty("server");
if (server == null) {
return;
}
boolean running = false;
String id = prop.getProperty("id");
try {
Socket socket = NetUtils.createSocket(server,
Constants.DEFAULT_TCP_PORT, false);
Transfer transfer = new Transfer(null, socket);
transfer.init();
transfer.writeInt(Constants.TCP_PROTOCOL_VERSION_MIN_SUPPORTED);
transfer.writeInt(Constants.TCP_PROTOCOL_VERSION_MAX_SUPPORTED);
transfer.writeString(null);
transfer.writeString(null);
transfer.writeString(id);
transfer.writeInt(SessionRemote.SESSION_CHECK_KEY);
transfer.flush();
int state = transfer.readInt();
if (state == SessionRemote.STATUS_OK) {
running = true;
}
transfer.close();
socket.close();
} catch (IOException e) {
return;
}
if (running) {
DbException e = DbException.get(
ErrorCode.DATABASE_ALREADY_OPEN_1, "Server is running");
throw e.addSQL(server + "/" + id);
}
}
/**
* Load the properties file.
*
* @return the properties
*/
public Properties load() {
IOException lastException = null;
for (int i = 0; i < 5; i++) {
try {
Properties p2 = SortedProperties.loadProperties(fileName);
if (trace.isDebugEnabled()) {
trace.debug("load " + p2);
}
return p2;
} catch (IOException e) {
lastException = e;
}
}
throw getExceptionFatal(
"Could not load properties " + fileName, lastException);
}
private void waitUntilOld() {
for (int i = 0; i < 2 * TIME_GRANULARITY / SLEEP_GAP; i++) {
long last = FileUtils.lastModified(fileName);
long dist = System.currentTimeMillis() - last;
if (dist < -TIME_GRANULARITY) {
// lock file modified in the future -
// wait for a bit longer than usual
try {
Thread.sleep(2 * (long) sleep);
} catch (Exception e) {
trace.debug(e, "sleep");
}
return;
} else if (dist > TIME_GRANULARITY) {
return;
}
try {
Thread.sleep(SLEEP_GAP);
} catch (Exception e) {
trace.debug(e, "sleep");
}
}
throw getExceptionFatal("Lock file recently modified", null);
}
private void setUniqueId() {
byte[] bytes = MathUtils.secureRandomBytes(RANDOM_BYTES);
String random = StringUtils.convertBytesToHex(bytes);
uniqueId = Long.toHexString(System.currentTimeMillis()) + random;
properties.setProperty("id", uniqueId);
}
private void lockSerialized() {
method = SERIALIZED;
FileUtils.createDirectories(FileUtils.getParent(fileName));
if (FileUtils.createFile(fileName)) {
properties = new SortedProperties();
properties.setProperty("method", String.valueOf(method));
setUniqueId();
save();
} else {
while (true) {
try {
properties = load();
} catch (DbException e) {
// ignore
}
return;
}
}
}
private void lockFile() {
method = FILE;
properties = new SortedProperties();
properties.setProperty("method", String.valueOf(method));
setUniqueId();
FileUtils.createDirectories(FileUtils.getParent(fileName));
if (!FileUtils.createFile(fileName)) {
waitUntilOld();
String m2 = load().getProperty("method", FILE);
if (!m2.equals(FILE)) {
throw getExceptionFatal("Unsupported lock method " + m2, null);
}
save();
sleep(2 * sleep);
if (!load().equals(properties)) {
throw getExceptionAlreadyInUse("Locked by another process: " + fileName);
}
FileUtils.delete(fileName);
if (!FileUtils.createFile(fileName)) {
throw getExceptionFatal("Another process was faster", null);
}
}
save();
sleep(SLEEP_GAP);
if (!load().equals(properties)) {
fileName = null;
throw getExceptionFatal("Concurrent update", null);
}
locked = true;
watchdog = new Thread(this, "H2 File Lock Watchdog " + fileName);
Driver.setThreadContextClassLoader(watchdog);
watchdog.setDaemon(true);
watchdog.setPriority(Thread.MAX_PRIORITY - 1);
watchdog.start();
}
private void lockSocket() {
method = SOCKET;
properties = new SortedProperties();
properties.setProperty("method", String.valueOf(method));
setUniqueId();
// if this returns 127.0.0.1,
// the computer is probably not networked
ipAddress = NetUtils.getLocalAddress();
FileUtils.createDirectories(FileUtils.getParent(fileName));
if (!FileUtils.createFile(fileName)) {
waitUntilOld();
long read = FileUtils.lastModified(fileName);
Properties p2 = load();
String m2 = p2.getProperty("method", SOCKET);
if (m2.equals(FILE)) {
lockFile();
return;
} else if (!m2.equals(SOCKET)) {
throw getExceptionFatal("Unsupported lock method " + m2, null);
}
String ip = p2.getProperty("ipAddress", ipAddress);
if (!ipAddress.equals(ip)) {
throw getExceptionAlreadyInUse("Locked by another computer: " + ip);
}
String port = p2.getProperty("port", "0");
int portId = Integer.parseInt(port);
InetAddress address;
try {
address = InetAddress.getByName(ip);
} catch (UnknownHostException e) {
throw getExceptionFatal("Unknown host " + ip, e);
}
for (int i = 0; i < 3; i++) {
try {
Socket s = new Socket(address, portId);
s.close();
throw getExceptionAlreadyInUse("Locked by another process");
} catch (BindException e) {
throw getExceptionFatal("Bind Exception", null);
} catch (ConnectException e) {
trace.debug(e, "socket not connected to port " + port);
} catch (IOException e) {
throw getExceptionFatal("IOException", null);
}
}
if (read != FileUtils.lastModified(fileName)) {
throw getExceptionFatal("Concurrent update", null);
}
FileUtils.delete(fileName);
if (!FileUtils.createFile(fileName)) {
throw getExceptionFatal("Another process was faster", null);
}
}
try {
// 0 to use any free port
serverSocket = NetUtils.createServerSocket(0, false);
int port = serverSocket.getLocalPort();
properties.setProperty("ipAddress", ipAddress);
properties.setProperty("port", String.valueOf(port));
} catch (Exception e) {
trace.debug(e, "lock");
serverSocket = null;
lockFile();
return;
}
save();
locked = true;
watchdog = new Thread(this,
"H2 File Lock Watchdog (Socket) " + fileName);
watchdog.setDaemon(true);
watchdog.start();
}
private static void sleep(int time) {
try {
Thread.sleep(time);
} catch (InterruptedException e) {
throw getExceptionFatal("Sleep interrupted", e);
}
}
private static DbException getExceptionFatal(String reason, Throwable t) {
return DbException.get(
ErrorCode.ERROR_OPENING_DATABASE_1, t, reason);
}
private DbException getExceptionAlreadyInUse(String reason) {
DbException e = DbException.get(
ErrorCode.DATABASE_ALREADY_OPEN_1, reason);
if (fileName != null) {
try {
Properties prop = load();
String server = prop.getProperty("server");
if (server != null) {
String serverId = server + "/" + prop.getProperty("id");
e = e.addSQL(serverId);
}
} catch (DbException e2) {
// ignore
}
}
return e;
}
/**
* Get the file locking method type given a method name.
*
* @param method the method name
* @return the method type
* @throws DbException if the method name is unknown
*/
public static FileLockMethod getFileLockMethod(String method) {
if (method == null || method.equalsIgnoreCase("FILE")) {
return FileLockMethod.FILE;
} else if (method.equalsIgnoreCase("NO")) {
return FileLockMethod.NO;
} else if (method.equalsIgnoreCase("SOCKET")) {
return FileLockMethod.SOCKET;
} else if (method.equalsIgnoreCase("SERIALIZED")) {
return FileLockMethod.SERIALIZED;
} else if (method.equalsIgnoreCase("FS")) {
return FileLockMethod.FS;
} else {
throw DbException.get(
ErrorCode.UNSUPPORTED_LOCK_METHOD_1, method);
}
}
public String getUniqueId() {
return uniqueId;
}
@Override
public void run() {
try {
while (locked && fileName != null) {
// trace.debug("watchdog check");
try {
if (!FileUtils.exists(fileName) ||
FileUtils.lastModified(fileName) != lastWrite) {
save();
}
Thread.sleep(sleep);
} catch (OutOfMemoryError e) {
// ignore
} catch (InterruptedException e) {
// ignore
} catch (NullPointerException e) {
// ignore
} catch (Exception e) {
trace.debug(e, "watchdog");
}
}
while (true) {
// take a copy so we don't get an NPE between checking it and using it
ServerSocket local = serverSocket;
if (local == null) {
break;
}
try {
trace.debug("watchdog accept");
Socket s = local.accept();
s.close();
} catch (Exception e) {
trace.debug(e, "watchdog");
}
}
} catch (Exception e) {
trace.debug(e, "watchdog");
}
trace.debug("watchdog end");
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/FileLockMethod.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
public enum FileLockMethod {
/**
* This locking method means no locking is used at all.
*/
NO,
/**
* This locking method means the cooperative file locking protocol should be
* used.
*/
FILE,
/**
* This locking method means a socket is created on the given machine.
*/
SOCKET,
/**
* This locking method means multiple writers are allowed, and they
* synchronize themselves.
*/
SERIALIZED,
/**
* Use the file system to lock the file; don't use a separate lock file.
*/
FS
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/FileStore.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.lang.ref.Reference;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.security.SecureFileStore;
import org.h2.store.fs.FileUtils;
/**
* This class is an abstraction of a random access file.
* Each file contains a magic header, and reading / writing is done in blocks.
* See also {@link SecureFileStore}
*/
public class FileStore {
/**
* The size of the file header in bytes.
*/
public static final int HEADER_LENGTH = 3 * Constants.FILE_BLOCK_SIZE;
/**
* The magic file header.
*/
private static final String HEADER =
"-- H2 0.5/B -- ".substring(0, Constants.FILE_BLOCK_SIZE - 1) + "\n";
/**
* The file name.
*/
protected String name;
/**
* The callback object is responsible to check access rights, and free up
* disk space if required.
*/
private final DataHandler handler;
private FileChannel file;
private long filePos;
private long fileLength;
private Reference<?> autoDeleteReference;
private boolean checkedWriting = true;
private final String mode;
private java.nio.channels.FileLock lock;
/**
* Create a new file using the given settings.
*
* @param handler the callback object
* @param name the file name
* @param mode the access mode ("r", "rw", "rws", "rwd")
*/
protected FileStore(DataHandler handler, String name, String mode) {
this.handler = handler;
this.name = name;
try {
boolean exists = FileUtils.exists(name);
if (exists && !FileUtils.canWrite(name)) {
mode = "r";
} else {
FileUtils.createDirectories(FileUtils.getParent(name));
}
file = FileUtils.open(name, mode);
if (exists) {
fileLength = file.size();
}
} catch (IOException e) {
throw DbException.convertIOException(
e, "name: " + name + " mode: " + mode);
}
this.mode = mode;
}
/**
* Open a non encrypted file store with the given settings.
*
* @param handler the data handler
* @param name the file name
* @param mode the access mode (r, rw, rws, rwd)
* @return the created object
*/
public static FileStore open(DataHandler handler, String name, String mode) {
return open(handler, name, mode, null, null, 0);
}
/**
* Open an encrypted file store with the given settings.
*
* @param handler the data handler
* @param name the file name
* @param mode the access mode (r, rw, rws, rwd)
* @param cipher the name of the cipher algorithm
* @param key the encryption key
* @return the created object
*/
public static FileStore open(DataHandler handler, String name, String mode,
String cipher, byte[] key) {
return open(handler, name, mode, cipher, key,
Constants.ENCRYPTION_KEY_HASH_ITERATIONS);
}
/**
* Open an encrypted file store with the given settings.
*
* @param handler the data handler
* @param name the file name
* @param mode the access mode (r, rw, rws, rwd)
* @param cipher the name of the cipher algorithm
* @param key the encryption key
* @param keyIterations the number of iterations the key should be hashed
* @return the created object
*/
public static FileStore open(DataHandler handler, String name, String mode,
String cipher, byte[] key, int keyIterations) {
FileStore store;
if (cipher == null) {
store = new FileStore(handler, name, mode);
} else {
store = new SecureFileStore(handler, name, mode,
cipher, key, keyIterations);
}
return store;
}
/**
* Generate the random salt bytes if required.
*
* @return the random salt or the magic
*/
protected byte[] generateSalt() {
return HEADER.getBytes(StandardCharsets.UTF_8);
}
/**
* Initialize the key using the given salt.
*
* @param salt the salt
*/
@SuppressWarnings("unused")
protected void initKey(byte[] salt) {
// do nothing
}
public void setCheckedWriting(boolean value) {
this.checkedWriting = value;
}
private void checkWritingAllowed() {
if (handler != null && checkedWriting) {
handler.checkWritingAllowed();
}
}
private void checkPowerOff() {
if (handler != null) {
handler.checkPowerOff();
}
}
/**
* Initialize the file. This method will write or check the file header if
* required.
*/
public void init() {
int len = Constants.FILE_BLOCK_SIZE;
byte[] salt;
byte[] magic = HEADER.getBytes(StandardCharsets.UTF_8);
if (length() < HEADER_LENGTH) {
// write unencrypted
checkedWriting = false;
writeDirect(magic, 0, len);
salt = generateSalt();
writeDirect(salt, 0, len);
initKey(salt);
// write (maybe) encrypted
write(magic, 0, len);
checkedWriting = true;
} else {
// read unencrypted
seek(0);
byte[] buff = new byte[len];
readFullyDirect(buff, 0, len);
if (!Arrays.equals(buff, magic)) {
throw DbException.get(ErrorCode.FILE_VERSION_ERROR_1, name);
}
salt = new byte[len];
readFullyDirect(salt, 0, len);
initKey(salt);
// read (maybe) encrypted
readFully(buff, 0, Constants.FILE_BLOCK_SIZE);
if (!Arrays.equals(buff, magic)) {
throw DbException.get(ErrorCode.FILE_ENCRYPTION_ERROR_1, name);
}
}
}
/**
* Close the file.
*/
public void close() {
if (file != null) {
try {
trace("close", name, file);
file.close();
} catch (IOException e) {
throw DbException.convertIOException(e, name);
} finally {
file = null;
}
}
}
/**
* Close the file without throwing any exceptions. Exceptions are simply
* ignored.
*/
public void closeSilently() {
try {
close();
} catch (Exception e) {
// ignore
}
}
/**
* Close the file (ignoring exceptions) and delete the file.
*/
public void closeAndDeleteSilently() {
if (file != null) {
closeSilently();
handler.getTempFileDeleter().deleteFile(autoDeleteReference, name);
name = null;
}
}
/**
* Read a number of bytes without decrypting.
*
* @param b the target buffer
* @param off the offset
* @param len the number of bytes to read
*/
protected void readFullyDirect(byte[] b, int off, int len) {
readFully(b, off, len);
}
/**
* Read a number of bytes.
*
* @param b the target buffer
* @param off the offset
* @param len the number of bytes to read
*/
public void readFully(byte[] b, int off, int len) {
if (SysProperties.CHECK &&
(len < 0 || len % Constants.FILE_BLOCK_SIZE != 0)) {
DbException.throwInternalError(
"unaligned read " + name + " len " + len);
}
checkPowerOff();
try {
FileUtils.readFully(file, ByteBuffer.wrap(b, off, len));
} catch (IOException e) {
throw DbException.convertIOException(e, name);
}
filePos += len;
}
/**
* Go to the specified file location.
*
* @param pos the location
*/
public void seek(long pos) {
if (SysProperties.CHECK &&
pos % Constants.FILE_BLOCK_SIZE != 0) {
DbException.throwInternalError(
"unaligned seek " + name + " pos " + pos);
}
try {
if (pos != filePos) {
file.position(pos);
filePos = pos;
}
} catch (IOException e) {
throw DbException.convertIOException(e, name);
}
}
/**
* Write a number of bytes without encrypting.
*
* @param b the source buffer
* @param off the offset
* @param len the number of bytes to write
*/
protected void writeDirect(byte[] b, int off, int len) {
write(b, off, len);
}
/**
* Write a number of bytes.
*
* @param b the source buffer
* @param off the offset
* @param len the number of bytes to write
*/
public void write(byte[] b, int off, int len) {
if (SysProperties.CHECK && (len < 0 ||
len % Constants.FILE_BLOCK_SIZE != 0)) {
DbException.throwInternalError(
"unaligned write " + name + " len " + len);
}
checkWritingAllowed();
checkPowerOff();
try {
FileUtils.writeFully(file, ByteBuffer.wrap(b, off, len));
} catch (IOException e) {
closeFileSilently();
throw DbException.convertIOException(e, name);
}
filePos += len;
fileLength = Math.max(filePos, fileLength);
}
/**
* Set the length of the file. This will expand or shrink the file.
*
* @param newLength the new file size
*/
public void setLength(long newLength) {
if (SysProperties.CHECK && newLength % Constants.FILE_BLOCK_SIZE != 0) {
DbException.throwInternalError(
"unaligned setLength " + name + " pos " + newLength);
}
checkPowerOff();
checkWritingAllowed();
try {
if (newLength > fileLength) {
long pos = filePos;
file.position(newLength - 1);
FileUtils.writeFully(file, ByteBuffer.wrap(new byte[1]));
file.position(pos);
} else {
file.truncate(newLength);
}
fileLength = newLength;
} catch (IOException e) {
closeFileSilently();
throw DbException.convertIOException(e, name);
}
}
/**
* Get the file size in bytes.
*
* @return the file size
*/
public long length() {
try {
long len = fileLength;
if (SysProperties.CHECK2) {
len = file.size();
if (len != fileLength) {
DbException.throwInternalError(
"file " + name + " length " + len + " expected " + fileLength);
}
}
if (SysProperties.CHECK2 && len % Constants.FILE_BLOCK_SIZE != 0) {
long newLength = len + Constants.FILE_BLOCK_SIZE -
(len % Constants.FILE_BLOCK_SIZE);
file.truncate(newLength);
fileLength = newLength;
DbException.throwInternalError(
"unaligned file length " + name + " len " + len);
}
return len;
} catch (IOException e) {
throw DbException.convertIOException(e, name);
}
}
/**
* Get the current location of the file pointer.
*
* @return the location
*/
public long getFilePointer() {
if (SysProperties.CHECK2) {
try {
if (file.position() != filePos) {
DbException.throwInternalError(file.position() + " " + filePos);
}
} catch (IOException e) {
throw DbException.convertIOException(e, name);
}
}
return filePos;
}
/**
* Call fsync. Depending on the operating system and hardware, this may or
* may not in fact write the changes.
*/
public void sync() {
try {
file.force(true);
} catch (IOException e) {
closeFileSilently();
throw DbException.convertIOException(e, name);
}
}
/**
* Automatically delete the file once it is no longer in use.
*/
public void autoDelete() {
if (autoDeleteReference == null) {
autoDeleteReference = handler.getTempFileDeleter().addFile(name, this);
}
}
/**
* No longer automatically delete the file once it is no longer in use.
*/
public void stopAutoDelete() {
handler.getTempFileDeleter().stopAutoDelete(autoDeleteReference, name);
autoDeleteReference = null;
}
/**
* Close the file. The file may later be re-opened using openFile.
*/
public void closeFile() throws IOException {
file.close();
file = null;
}
/**
* Just close the file, without setting the reference to null. This method
* is called when writing failed. The reference is not set to null so that
* there are no NullPointerExceptions later on.
*/
private void closeFileSilently() {
try {
file.close();
} catch (IOException e) {
// ignore
}
}
/**
* Re-open the file. The file pointer will be reset to the previous
* location.
*/
public void openFile() throws IOException {
if (file == null) {
file = FileUtils.open(name, mode);
file.position(filePos);
}
}
private static void trace(String method, String fileName, Object o) {
if (SysProperties.TRACE_IO) {
System.out.println("FileStore." + method + " " + fileName + " " + o);
}
}
/**
* Try to lock the file.
*
* @return true if successful
*/
public synchronized boolean tryLock() {
try {
lock = file.tryLock();
return lock != null;
} catch (Exception e) {
// ignore OverlappingFileLockException
return false;
}
}
/**
* Release the file lock.
*/
public synchronized void releaseLock() {
if (file != null && lock != null) {
try {
lock.release();
} catch (Exception e) {
// ignore
}
lock = null;
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/FileStoreInputStream.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.InputStream;
import org.h2.engine.Constants;
import org.h2.message.DbException;
import org.h2.tools.CompressTool;
import org.h2.util.Utils;
/**
* An input stream that is backed by a file store.
*/
public class FileStoreInputStream extends InputStream {
private FileStore store;
private final Data page;
private int remainingInBuffer;
private final CompressTool compress;
private boolean endOfFile;
private final boolean alwaysClose;
public FileStoreInputStream(FileStore store, DataHandler handler,
boolean compression, boolean alwaysClose) {
this.store = store;
this.alwaysClose = alwaysClose;
if (compression) {
compress = CompressTool.getInstance();
} else {
compress = null;
}
page = Data.create(handler, Constants.FILE_BLOCK_SIZE);
try {
if (store.length() <= FileStore.HEADER_LENGTH) {
close();
} else {
fillBuffer();
}
} catch (IOException e) {
throw DbException.convertIOException(e, store.name);
}
}
@Override
public int available() {
return remainingInBuffer <= 0 ? 0 : remainingInBuffer;
}
@Override
public int read(byte[] buff) throws IOException {
return read(buff, 0, buff.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (len == 0) {
return 0;
}
int read = 0;
while (len > 0) {
int r = readBlock(b, off, len);
if (r < 0) {
break;
}
read += r;
off += r;
len -= r;
}
return read == 0 ? -1 : read;
}
private int readBlock(byte[] buff, int off, int len) throws IOException {
fillBuffer();
if (endOfFile) {
return -1;
}
int l = Math.min(remainingInBuffer, len);
page.read(buff, off, l);
remainingInBuffer -= l;
return l;
}
private void fillBuffer() throws IOException {
if (remainingInBuffer > 0 || endOfFile) {
return;
}
page.reset();
store.openFile();
if (store.length() == store.getFilePointer()) {
close();
return;
}
store.readFully(page.getBytes(), 0, Constants.FILE_BLOCK_SIZE);
page.reset();
remainingInBuffer = page.readInt();
if (remainingInBuffer < 0) {
close();
return;
}
page.checkCapacity(remainingInBuffer);
// get the length to read
if (compress != null) {
page.checkCapacity(Data.LENGTH_INT);
page.readInt();
}
page.setPos(page.length() + remainingInBuffer);
page.fillAligned();
int len = page.length() - Constants.FILE_BLOCK_SIZE;
page.reset();
page.readInt();
store.readFully(page.getBytes(), Constants.FILE_BLOCK_SIZE, len);
page.reset();
page.readInt();
if (compress != null) {
int uncompressed = page.readInt();
byte[] buff = Utils.newBytes(remainingInBuffer);
page.read(buff, 0, remainingInBuffer);
page.reset();
page.checkCapacity(uncompressed);
CompressTool.expand(buff, page.getBytes(), 0);
remainingInBuffer = uncompressed;
}
if (alwaysClose) {
store.closeFile();
}
}
@Override
public void close() {
if (store != null) {
try {
store.close();
endOfFile = true;
} finally {
store = null;
}
}
}
@Override
protected void finalize() {
close();
}
@Override
public int read() throws IOException {
fillBuffer();
if (endOfFile) {
return -1;
}
int i = page.readByte() & 0xff;
remainingInBuffer--;
return i;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/FileStoreOutputStream.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.OutputStream;
import java.util.Arrays;
import org.h2.engine.Constants;
import org.h2.tools.CompressTool;
/**
* An output stream that is backed by a file store.
*/
public class FileStoreOutputStream extends OutputStream {
private FileStore store;
private final Data page;
private final String compressionAlgorithm;
private final CompressTool compress;
private final byte[] buffer = { 0 };
public FileStoreOutputStream(FileStore store, DataHandler handler,
String compressionAlgorithm) {
this.store = store;
if (compressionAlgorithm != null) {
this.compress = CompressTool.getInstance();
this.compressionAlgorithm = compressionAlgorithm;
} else {
this.compress = null;
this.compressionAlgorithm = null;
}
page = Data.create(handler, Constants.FILE_BLOCK_SIZE);
}
@Override
public void write(int b) {
buffer[0] = (byte) b;
write(buffer);
}
@Override
public void write(byte[] buff) {
write(buff, 0, buff.length);
}
@Override
public void write(byte[] buff, int off, int len) {
if (len > 0) {
page.reset();
if (compress != null) {
if (off != 0 || len != buff.length) {
buff = Arrays.copyOfRange(buff, off, off + len);
off = 0;
}
int uncompressed = len;
buff = compress.compress(buff, compressionAlgorithm);
len = buff.length;
page.checkCapacity(2 * Data.LENGTH_INT + len);
page.writeInt(len);
page.writeInt(uncompressed);
page.write(buff, off, len);
} else {
page.checkCapacity(Data.LENGTH_INT + len);
page.writeInt(len);
page.write(buff, off, len);
}
page.fillAligned();
store.write(page.getBytes(), 0, page.length());
}
}
@Override
public void close() {
if (store != null) {
try {
store.close();
} finally {
store = null;
}
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/InDoubtTransaction.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
/**
* Represents an in-doubt transaction (a transaction in the prepare phase).
*/
public interface InDoubtTransaction {
/**
* The transaction state meaning this transaction is not committed yet, but
* also not rolled back (in-doubt).
*/
int IN_DOUBT = 0;
/**
* The transaction state meaning this transaction is committed.
*/
int COMMIT = 1;
/**
* The transaction state meaning this transaction is rolled back.
*/
int ROLLBACK = 2;
/**
* Change the state of this transaction.
* This will also update the transaction log.
*
* @param state the new state
*/
void setState(int state);
/**
* Get the state of this transaction as a text.
*
* @return the transaction state text
*/
String getState();
/**
* Get the name of the transaction.
*
* @return the transaction name
*/
String getTransactionName();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/LobStorageBackend.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import org.h2.api.ErrorCode;
import org.h2.engine.Database;
import org.h2.engine.SysProperties;
import org.h2.jdbc.JdbcConnection;
import org.h2.message.DbException;
import org.h2.tools.CompressTool;
import org.h2.util.IOUtils;
import org.h2.util.MathUtils;
import org.h2.util.New;
import org.h2.value.Value;
import org.h2.value.ValueLobDb;
/**
* This class stores LOB objects in the database, in tables. This is the
* back-end i.e. the server side of the LOB storage.
* <p>
* Using the system session
* <p>
* Why do we use the system session to store the data? Some LOB operations can
* take a very long time. If we did them on a normal session, we would be
* locking the LOB tables for long periods of time, which is extremely
* detrimental to the rest of the system. Perhaps when we shift to the MVStore
* engine, we can revisit this design decision (using the StreamStore, that is,
* no connection at all).
* <p>
* Locking
* <p>
* Normally, the locking order in H2 is: first lock the Session object, then
* lock the Database object. However, in the case of the LOB data, we are using
* the system session to store the data. If we locked the normal way, we see
* deadlocks caused by the following pattern:
*
* <pre>
* Thread 1:
* locks normal session
* locks database
* waiting to lock system session
* Thread 2:
* locks system session
* waiting to lock database.
* </pre>
*
* So, in this class alone, we do two things: we have our very own dedicated
* session, the LOB session, and we take the locks in this order: first the
* Database object, and then the LOB session. Since we own the LOB session,
* no-one else can lock on it, and we are safe.
*/
public class LobStorageBackend implements LobStorageInterface {
/**
* The name of the lob data table. If this table exists, then lob storage is
* used.
*/
public static final String LOB_DATA_TABLE = "LOB_DATA";
private static final String LOB_SCHEMA = "INFORMATION_SCHEMA";
private static final String LOBS = LOB_SCHEMA + ".LOBS";
private static final String LOB_MAP = LOB_SCHEMA + ".LOB_MAP";
private static final String LOB_DATA = LOB_SCHEMA + "." + LOB_DATA_TABLE;
/**
* The size of the chunks we use when storing LOBs inside the database file.
*/
private static final int BLOCK_LENGTH = 20_000;
/**
* The size of cache for lob block hashes. Each entry needs 2 longs (16
* bytes), therefore, the size 4096 means 64 KB.
*/
private static final int HASH_CACHE_SIZE = 4 * 1024;
JdbcConnection conn;
final Database database;
private final HashMap<String, PreparedStatement> prepared = new HashMap<>();
private long nextBlock;
private final CompressTool compress = CompressTool.getInstance();
private long[] hashBlocks;
private boolean init;
public LobStorageBackend(Database database) {
this.database = database;
}
@Override
public void init() {
if (init) {
return;
}
synchronized (database) {
// have to check this again or we might miss an update on another
// thread
if (init) {
return;
}
init = true;
conn = database.getLobConnectionForRegularUse();
JdbcConnection initConn = database.getLobConnectionForInit();
try {
Statement stat = initConn.createStatement();
// stat.execute("SET UNDO_LOG 0");
// stat.execute("SET REDO_LOG_BINARY 0");
boolean create = true;
PreparedStatement prep = initConn.prepareStatement(
"SELECT ZERO() FROM INFORMATION_SCHEMA.COLUMNS WHERE " +
"TABLE_SCHEMA=? AND TABLE_NAME=? AND COLUMN_NAME=?");
prep.setString(1, "INFORMATION_SCHEMA");
prep.setString(2, "LOB_MAP");
prep.setString(3, "POS");
ResultSet rs;
rs = prep.executeQuery();
if (rs.next()) {
prep = initConn.prepareStatement(
"SELECT ZERO() FROM INFORMATION_SCHEMA.TABLES WHERE " +
"TABLE_SCHEMA=? AND TABLE_NAME=?");
prep.setString(1, "INFORMATION_SCHEMA");
prep.setString(2, "LOB_DATA");
rs = prep.executeQuery();
if (rs.next()) {
create = false;
}
}
if (create) {
stat.execute("CREATE CACHED TABLE IF NOT EXISTS " + LOBS +
"(ID BIGINT PRIMARY KEY, BYTE_COUNT BIGINT, TABLE INT) HIDDEN");
stat.execute("CREATE INDEX IF NOT EXISTS " +
"INFORMATION_SCHEMA.INDEX_LOB_TABLE ON " +
LOBS + "(TABLE)");
stat.execute("CREATE CACHED TABLE IF NOT EXISTS " + LOB_MAP +
"(LOB BIGINT, SEQ INT, POS BIGINT, HASH INT, " +
"BLOCK BIGINT, PRIMARY KEY(LOB, SEQ)) HIDDEN");
stat.execute("ALTER TABLE " + LOB_MAP +
" RENAME TO " + LOB_MAP + " HIDDEN");
stat.execute("ALTER TABLE " + LOB_MAP +
" ADD IF NOT EXISTS POS BIGINT BEFORE HASH");
// TODO the column name OFFSET was used in version 1.3.156,
// so this can be remove in a later version
stat.execute("ALTER TABLE " + LOB_MAP +
" DROP COLUMN IF EXISTS \"OFFSET\"");
stat.execute("CREATE INDEX IF NOT EXISTS " +
"INFORMATION_SCHEMA.INDEX_LOB_MAP_DATA_LOB ON " +
LOB_MAP + "(BLOCK, LOB)");
stat.execute("CREATE CACHED TABLE IF NOT EXISTS " +
LOB_DATA +
"(BLOCK BIGINT PRIMARY KEY, COMPRESSED INT, DATA BINARY) HIDDEN");
}
rs = stat.executeQuery("SELECT MAX(BLOCK) FROM " + LOB_DATA);
rs.next();
nextBlock = rs.getLong(1) + 1;
stat.close();
} catch (SQLException e) {
throw DbException.convert(e);
}
}
}
private long getNextLobId() throws SQLException {
String sql = "SELECT MAX(LOB) FROM " + LOB_MAP;
PreparedStatement prep = prepare(sql);
ResultSet rs = prep.executeQuery();
rs.next();
long x = rs.getLong(1) + 1;
reuse(sql, prep);
sql = "SELECT MAX(ID) FROM " + LOBS;
prep = prepare(sql);
rs = prep.executeQuery();
rs.next();
x = Math.max(x, rs.getLong(1) + 1);
reuse(sql, prep);
return x;
}
@Override
public void removeAllForTable(int tableId) {
init();
try {
String sql = "SELECT ID FROM " + LOBS + " WHERE TABLE = ?";
PreparedStatement prep = prepare(sql);
prep.setInt(1, tableId);
ResultSet rs = prep.executeQuery();
while (rs.next()) {
removeLob(rs.getLong(1));
}
reuse(sql, prep);
} catch (SQLException e) {
throw DbException.convert(e);
}
if (tableId == LobStorageFrontend.TABLE_ID_SESSION_VARIABLE) {
removeAllForTable(LobStorageFrontend.TABLE_TEMP);
removeAllForTable(LobStorageFrontend.TABLE_RESULT);
}
}
/**
* Read a block of data from the given LOB.
*
* @param block the block number
* @return the block (expanded if stored compressed)
*/
byte[] readBlock(long block) throws SQLException {
// see locking discussion at the top
assertNotHolds(conn.getSession());
synchronized (database) {
synchronized (conn.getSession()) {
String sql = "SELECT COMPRESSED, DATA FROM " +
LOB_DATA + " WHERE BLOCK = ?";
PreparedStatement prep = prepare(sql);
prep.setLong(1, block);
ResultSet rs = prep.executeQuery();
if (!rs.next()) {
throw DbException.get(ErrorCode.IO_EXCEPTION_1,
"Missing lob entry, block: " + block)
.getSQLException();
}
int compressed = rs.getInt(1);
byte[] buffer = rs.getBytes(2);
if (compressed != 0) {
buffer = compress.expand(buffer);
}
reuse(sql, prep);
return buffer;
}
}
}
/**
* Create a prepared statement, or re-use an existing one.
*
* @param sql the SQL statement
* @return the prepared statement
*/
PreparedStatement prepare(String sql) throws SQLException {
if (SysProperties.CHECK2) {
if (!Thread.holdsLock(database)) {
throw DbException.throwInternalError();
}
}
PreparedStatement prep = prepared.remove(sql);
if (prep == null) {
prep = conn.prepareStatement(sql);
}
return prep;
}
/**
* Allow to re-use the prepared statement.
*
* @param sql the SQL statement
* @param prep the prepared statement
*/
void reuse(String sql, PreparedStatement prep) {
if (SysProperties.CHECK2) {
if (!Thread.holdsLock(database)) {
throw DbException.throwInternalError();
}
}
prepared.put(sql, prep);
}
@Override
public void removeLob(ValueLobDb lob) {
removeLob(lob.getLobId());
}
private void removeLob(long lobId) {
try {
// see locking discussion at the top
assertNotHolds(conn.getSession());
synchronized (database) {
synchronized (conn.getSession()) {
String sql = "SELECT BLOCK, HASH FROM " + LOB_MAP + " D WHERE D.LOB = ? " +
"AND NOT EXISTS(SELECT 1 FROM " + LOB_MAP + " O " +
"WHERE O.BLOCK = D.BLOCK AND O.LOB <> ?)";
PreparedStatement prep = prepare(sql);
prep.setLong(1, lobId);
prep.setLong(2, lobId);
ResultSet rs = prep.executeQuery();
ArrayList<Long> blocks = New.arrayList();
while (rs.next()) {
blocks.add(rs.getLong(1));
int hash = rs.getInt(2);
setHashCacheBlock(hash, -1);
}
reuse(sql, prep);
sql = "DELETE FROM " + LOB_MAP + " WHERE LOB = ?";
prep = prepare(sql);
prep.setLong(1, lobId);
prep.execute();
reuse(sql, prep);
sql = "DELETE FROM " + LOB_DATA + " WHERE BLOCK = ?";
prep = prepare(sql);
for (long block : blocks) {
prep.setLong(1, block);
prep.execute();
}
reuse(sql, prep);
sql = "DELETE FROM " + LOBS + " WHERE ID = ?";
prep = prepare(sql);
prep.setLong(1, lobId);
prep.execute();
reuse(sql, prep);
}
}
} catch (SQLException e) {
throw DbException.convert(e);
}
}
@Override
public InputStream getInputStream(ValueLobDb lob, byte[] hmac,
long byteCount) throws IOException {
try {
init();
assertNotHolds(conn.getSession());
// see locking discussion at the top
synchronized (database) {
synchronized (conn.getSession()) {
long lobId = lob.getLobId();
return new LobInputStream(lobId, byteCount);
}
}
} catch (SQLException e) {
throw DbException.convertToIOException(e);
}
}
private ValueLobDb addLob(InputStream in, long maxLength, int type,
CountingReaderInputStream countingReaderForClob) {
try {
byte[] buff = new byte[BLOCK_LENGTH];
if (maxLength < 0) {
maxLength = Long.MAX_VALUE;
}
long length = 0;
long lobId = -1;
int maxLengthInPlaceLob = database.getMaxLengthInplaceLob();
String compressAlgorithm = database.getLobCompressionAlgorithm(type);
try {
byte[] small = null;
for (int seq = 0; maxLength > 0; seq++) {
int len = (int) Math.min(BLOCK_LENGTH, maxLength);
len = IOUtils.readFully(in, buff, len);
if (len <= 0) {
break;
}
maxLength -= len;
// if we had a short read, trim the buffer
byte[] b;
if (len != buff.length) {
b = Arrays.copyOf(buff, len);
} else {
b = buff;
}
if (seq == 0 && b.length < BLOCK_LENGTH &&
b.length <= maxLengthInPlaceLob) {
small = b;
break;
}
assertNotHolds(conn.getSession());
// see locking discussion at the top
synchronized (database) {
synchronized (conn.getSession()) {
if (seq == 0) {
lobId = getNextLobId();
}
storeBlock(lobId, seq, length, b, compressAlgorithm);
}
}
length += len;
}
if (lobId == -1 && small == null) {
// zero length
small = new byte[0];
}
if (small != null) {
// For a BLOB, precision is length in bytes.
// For a CLOB, precision is length in chars
long precision = countingReaderForClob == null ?
small.length : countingReaderForClob.getLength();
return ValueLobDb.createSmallLob(type, small, precision);
}
// For a BLOB, precision is length in bytes.
// For a CLOB, precision is length in chars
long precision = countingReaderForClob == null ?
length : countingReaderForClob.getLength();
return registerLob(type, lobId,
LobStorageFrontend.TABLE_TEMP, length, precision);
} catch (IOException e) {
if (lobId != -1) {
removeLob(lobId);
}
throw DbException.convertIOException(e, null);
}
} catch (SQLException e) {
throw DbException.convert(e);
}
}
private ValueLobDb registerLob(int type, long lobId, int tableId,
long byteCount, long precision) throws SQLException {
assertNotHolds(conn.getSession());
// see locking discussion at the top
synchronized (database) {
synchronized (conn.getSession()) {
String sql = "INSERT INTO " + LOBS +
"(ID, BYTE_COUNT, TABLE) VALUES(?, ?, ?)";
PreparedStatement prep = prepare(sql);
prep.setLong(1, lobId);
prep.setLong(2, byteCount);
prep.setInt(3, tableId);
prep.execute();
reuse(sql, prep);
return ValueLobDb.create(type,
database, tableId, lobId, null, precision);
}
}
}
@Override
public boolean isReadOnly() {
return database.isReadOnly();
}
@Override
public ValueLobDb copyLob(ValueLobDb old, int tableId, long length) {
int type = old.getType();
long oldLobId = old.getLobId();
assertNotHolds(conn.getSession());
// see locking discussion at the top
synchronized (database) {
synchronized (conn.getSession()) {
try {
init();
ValueLobDb v = null;
if (!old.isRecoveryReference()) {
long lobId = getNextLobId();
String sql = "INSERT INTO " + LOB_MAP +
"(LOB, SEQ, POS, HASH, BLOCK) " +
"SELECT ?, SEQ, POS, HASH, BLOCK FROM " +
LOB_MAP + " WHERE LOB = ?";
PreparedStatement prep = prepare(sql);
prep.setLong(1, lobId);
prep.setLong(2, oldLobId);
prep.executeUpdate();
reuse(sql, prep);
sql = "INSERT INTO " + LOBS +
"(ID, BYTE_COUNT, TABLE) " +
"SELECT ?, BYTE_COUNT, ? FROM " + LOBS +
" WHERE ID = ?";
prep = prepare(sql);
prep.setLong(1, lobId);
prep.setLong(2, tableId);
prep.setLong(3, oldLobId);
prep.executeUpdate();
reuse(sql, prep);
v = ValueLobDb.create(type, database, tableId, lobId, null, length);
} else {
// Recovery process, no need to copy LOB using normal
// infrastructure
v = ValueLobDb.create(type, database, tableId, oldLobId, null, length);
}
return v;
} catch (SQLException e) {
throw DbException.convert(e);
}
}
}
}
private long getHashCacheBlock(int hash) {
if (HASH_CACHE_SIZE > 0) {
initHashCache();
int index = hash & (HASH_CACHE_SIZE - 1);
long oldHash = hashBlocks[index];
if (oldHash == hash) {
return hashBlocks[index + HASH_CACHE_SIZE];
}
}
return -1;
}
private void setHashCacheBlock(int hash, long block) {
if (HASH_CACHE_SIZE > 0) {
initHashCache();
int index = hash & (HASH_CACHE_SIZE - 1);
hashBlocks[index] = hash;
hashBlocks[index + HASH_CACHE_SIZE] = block;
}
}
private void initHashCache() {
if (hashBlocks == null) {
hashBlocks = new long[HASH_CACHE_SIZE * 2];
}
}
/**
* Store a block in the LOB storage.
*
* @param lobId the lob id
* @param seq the sequence number
* @param pos the position within the lob
* @param b the data
* @param compressAlgorithm the compression algorithm (may be null)
*/
void storeBlock(long lobId, int seq, long pos, byte[] b,
String compressAlgorithm) throws SQLException {
long block;
boolean blockExists = false;
if (compressAlgorithm != null) {
b = compress.compress(b, compressAlgorithm);
}
int hash = Arrays.hashCode(b);
assertHoldsLock(conn.getSession());
assertHoldsLock(database);
block = getHashCacheBlock(hash);
if (block != -1) {
String sql = "SELECT COMPRESSED, DATA FROM " + LOB_DATA +
" WHERE BLOCK = ?";
PreparedStatement prep = prepare(sql);
prep.setLong(1, block);
ResultSet rs = prep.executeQuery();
if (rs.next()) {
boolean compressed = rs.getInt(1) != 0;
byte[] compare = rs.getBytes(2);
if (compressed == (compressAlgorithm != null) && Arrays.equals(b, compare)) {
blockExists = true;
}
}
reuse(sql, prep);
}
if (!blockExists) {
block = nextBlock++;
setHashCacheBlock(hash, block);
String sql = "INSERT INTO " + LOB_DATA +
"(BLOCK, COMPRESSED, DATA) VALUES(?, ?, ?)";
PreparedStatement prep = prepare(sql);
prep.setLong(1, block);
prep.setInt(2, compressAlgorithm == null ? 0 : 1);
prep.setBytes(3, b);
prep.execute();
reuse(sql, prep);
}
String sql = "INSERT INTO " + LOB_MAP +
"(LOB, SEQ, POS, HASH, BLOCK) VALUES(?, ?, ?, ?, ?)";
PreparedStatement prep = prepare(sql);
prep.setLong(1, lobId);
prep.setInt(2, seq);
prep.setLong(3, pos);
prep.setLong(4, hash);
prep.setLong(5, block);
prep.execute();
reuse(sql, prep);
}
@Override
public Value createBlob(InputStream in, long maxLength) {
init();
return addLob(in, maxLength, Value.BLOB, null);
}
@Override
public Value createClob(Reader reader, long maxLength) {
init();
long max = maxLength == -1 ? Long.MAX_VALUE : maxLength;
CountingReaderInputStream in = new CountingReaderInputStream(reader, max);
return addLob(in, Long.MAX_VALUE, Value.CLOB, in);
}
private static void assertNotHolds(Object lock) {
if (Thread.holdsLock(lock)) {
throw DbException.throwInternalError(lock.toString());
}
}
/**
* Check whether this thread has synchronized on this object.
*
* @param lock the object
*/
static void assertHoldsLock(Object lock) {
if (!Thread.holdsLock(lock)) {
throw DbException.throwInternalError(lock.toString());
}
}
/**
* An input stream that reads from a LOB.
*/
public class LobInputStream extends InputStream {
/**
* Data from the LOB_MAP table. We cache this to prevent other updates
* to the table that contains the LOB column from changing the data
* under us.
*/
private final long[] lobMapBlocks;
/**
* index into the lobMapBlocks array.
*/
private int lobMapIndex;
/**
* The remaining bytes in the lob.
*/
private long remainingBytes;
/**
* The temporary buffer.
*/
private byte[] buffer;
/**
* The position within the buffer.
*/
private int bufferPos;
public LobInputStream(long lobId, long byteCount) throws SQLException {
// we have to take the lock on the session
// before the lock on the database to prevent ABBA deadlocks
assertHoldsLock(conn.getSession());
assertHoldsLock(database);
if (byteCount == -1) {
String sql = "SELECT BYTE_COUNT FROM " + LOBS + " WHERE ID = ?";
PreparedStatement prep = prepare(sql);
prep.setLong(1, lobId);
ResultSet rs = prep.executeQuery();
if (!rs.next()) {
throw DbException.get(ErrorCode.IO_EXCEPTION_1,
"Missing lob entry: " + lobId).getSQLException();
}
byteCount = rs.getLong(1);
reuse(sql, prep);
}
this.remainingBytes = byteCount;
String sql = "SELECT COUNT(*) FROM " + LOB_MAP + " WHERE LOB = ?";
PreparedStatement prep = prepare(sql);
prep.setLong(1, lobId);
ResultSet rs = prep.executeQuery();
rs.next();
int lobMapCount = rs.getInt(1);
if (lobMapCount == 0) {
throw DbException.get(ErrorCode.IO_EXCEPTION_1,
"Missing lob entry: " + lobId).getSQLException();
}
reuse(sql, prep);
this.lobMapBlocks = new long[lobMapCount];
sql = "SELECT BLOCK FROM " + LOB_MAP + " WHERE LOB = ? ORDER BY SEQ";
prep = prepare(sql);
prep.setLong(1, lobId);
rs = prep.executeQuery();
int i = 0;
while (rs.next()) {
this.lobMapBlocks[i] = rs.getLong(1);
i++;
}
reuse(sql, prep);
}
@Override
public int read() throws IOException {
fillBuffer();
if (remainingBytes <= 0) {
return -1;
}
remainingBytes--;
return buffer[bufferPos++] & 255;
}
@Override
public long skip(long n) throws IOException {
if (n <= 0) {
return 0;
}
long remaining = n;
remaining -= skipSmall(remaining);
if (remaining > BLOCK_LENGTH) {
while (remaining > BLOCK_LENGTH) {
remaining -= BLOCK_LENGTH;
remainingBytes -= BLOCK_LENGTH;
lobMapIndex++;
}
bufferPos = 0;
buffer = null;
}
fillBuffer();
remaining -= skipSmall(remaining);
remaining -= super.skip(remaining);
return n - remaining;
}
private int skipSmall(long n) {
if (buffer != null && bufferPos < buffer.length) {
int x = MathUtils.convertLongToInt(Math.min(n, buffer.length - bufferPos));
bufferPos += x;
remainingBytes -= x;
return x;
}
return 0;
}
@Override
public int available() throws IOException {
return MathUtils.convertLongToInt(remainingBytes);
}
@Override
public int read(byte[] buff) throws IOException {
return readFully(buff, 0, buff.length);
}
@Override
public int read(byte[] buff, int off, int length) throws IOException {
return readFully(buff, off, length);
}
private int readFully(byte[] buff, int off, int length) throws IOException {
if (length == 0) {
return 0;
}
int read = 0;
while (length > 0) {
fillBuffer();
if (remainingBytes <= 0) {
break;
}
int len = (int) Math.min(length, remainingBytes);
len = Math.min(len, buffer.length - bufferPos);
System.arraycopy(buffer, bufferPos, buff, off, len);
bufferPos += len;
read += len;
remainingBytes -= len;
off += len;
length -= len;
}
return read == 0 ? -1 : read;
}
private void fillBuffer() throws IOException {
if (buffer != null && bufferPos < buffer.length) {
return;
}
if (remainingBytes <= 0) {
return;
}
if (lobMapIndex >= lobMapBlocks.length) {
System.out.println("halt!");
}
try {
buffer = readBlock(lobMapBlocks[lobMapIndex]);
lobMapIndex++;
bufferPos = 0;
} catch (SQLException e) {
throw DbException.convertToIOException(e);
}
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/LobStorageFrontend.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import org.h2.value.Value;
import org.h2.value.ValueLobDb;
/**
* This factory creates in-memory objects and temporary files. It is used on the
* client side.
*/
public class LobStorageFrontend implements LobStorageInterface {
/**
* The table id for session variables (LOBs not assigned to a table).
*/
public static final int TABLE_ID_SESSION_VARIABLE = -1;
/**
* The table id for temporary objects (not assigned to any object).
*/
public static final int TABLE_TEMP = -2;
/**
* The table id for result sets.
*/
public static final int TABLE_RESULT = -3;
private final DataHandler handler;
public LobStorageFrontend(DataHandler handler) {
this.handler = handler;
}
@Override
public void removeLob(ValueLobDb lob) {
// not stored in the database
}
/**
* Get the input stream for the given lob.
*
* @param lob the lob
* @param hmac the message authentication code (for remote input streams)
* @param byteCount the number of bytes to read, or -1 if not known
* @return the stream
*/
@Override
public InputStream getInputStream(ValueLobDb lob, byte[] hmac,
long byteCount) throws IOException {
if (byteCount < 0) {
byteCount = Long.MAX_VALUE;
}
return new BufferedInputStream(new LobStorageRemoteInputStream(
handler, lob, hmac, byteCount));
}
@Override
public boolean isReadOnly() {
return false;
}
@Override
public ValueLobDb copyLob(ValueLobDb old, int tableId, long length) {
throw new UnsupportedOperationException();
}
@Override
public void removeAllForTable(int tableId) {
throw new UnsupportedOperationException();
}
@Override
public Value createBlob(InputStream in, long maxLength) {
// need to use a temp file, because the input stream could come from
// the same database, which would create a weird situation (trying
// to read a block while writing something)
return ValueLobDb.createTempBlob(in, maxLength, handler);
}
/**
* Create a CLOB object.
*
* @param reader the reader
* @param maxLength the maximum length (-1 if not known)
* @return the LOB
*/
@Override
public Value createClob(Reader reader, long maxLength) {
// need to use a temp file, because the input stream could come from
// the same database, which would create a weird situation (trying
// to read a block while writing something)
return ValueLobDb.createTempClob(reader, maxLength, handler);
}
@Override
public void init() {
// nothing to do
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/LobStorageInterface.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import org.h2.value.Value;
import org.h2.value.ValueLobDb;
/**
* A mechanism to store and retrieve lob data.
*/
public interface LobStorageInterface {
/**
* Create a CLOB object.
*
* @param reader the reader
* @param maxLength the maximum length (-1 if not known)
* @return the LOB
*/
Value createClob(Reader reader, long maxLength);
/**
* Create a BLOB object.
*
* @param in the input stream
* @param maxLength the maximum length (-1 if not known)
* @return the LOB
*/
Value createBlob(InputStream in, long maxLength);
/**
* Copy a lob.
*
* @param old the old lob
* @param tableId the new table id
* @param length the length
* @return the new lob
*/
ValueLobDb copyLob(ValueLobDb old, int tableId, long length);
/**
* Get the input stream for the given lob.
*
* @param lob the lob id
* @param hmac the message authentication code (for remote input streams)
* @param byteCount the number of bytes to read, or -1 if not known
* @return the stream
*/
InputStream getInputStream(ValueLobDb lob, byte[] hmac, long byteCount)
throws IOException;
/**
* Delete a LOB (from the database, if it is stored there).
*
* @param lob the lob
*/
void removeLob(ValueLobDb lob);
/**
* Remove all LOBs for this table.
*
* @param tableId the table id
*/
void removeAllForTable(int tableId);
/**
* Initialize the lob storage.
*/
void init();
/**
* Whether the storage is read-only
*
* @return true if yes
*/
boolean isReadOnly();
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/LobStorageMap.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Map.Entry;
import org.h2.api.ErrorCode;
import org.h2.engine.Database;
import org.h2.message.DbException;
import org.h2.mvstore.MVMap;
import org.h2.mvstore.MVStore;
import org.h2.mvstore.StreamStore;
import org.h2.mvstore.db.MVTableEngine.Store;
import org.h2.util.IOUtils;
import org.h2.util.New;
import org.h2.util.StringUtils;
import org.h2.value.Value;
import org.h2.value.ValueLobDb;
/**
* This class stores LOB objects in the database, in maps. This is the back-end
* i.e. the server side of the LOB storage.
*/
public class LobStorageMap implements LobStorageInterface {
private static final boolean TRACE = false;
private final Database database;
private boolean init;
private final Object nextLobIdSync = new Object();
private long nextLobId;
/**
* The lob metadata map. It contains the mapping from the lob id
* (which is a long) to the stream store id (which is a byte array).
*
* Key: lobId (long)
* Value: { streamStoreId (byte[]), tableId (int),
* byteCount (long), hash (long) }.
*/
private MVMap<Long, Object[]> lobMap;
/**
* The reference map. It is used to remove data from the stream store: if no
* more entries for the given streamStoreId exist, the data is removed from
* the stream store.
*
* Key: { streamStoreId (byte[]), lobId (long) }.
* Value: true (boolean).
*/
private MVMap<Object[], Boolean> refMap;
/**
* The stream store data map.
*
* Key: stream store block id (long).
* Value: data (byte[]).
*/
private MVMap<Long, byte[]> dataMap;
private StreamStore streamStore;
public LobStorageMap(Database database) {
this.database = database;
}
@Override
public void init() {
if (init) {
return;
}
init = true;
Store s = database.getMvStore();
MVStore mvStore;
if (s == null) {
// in-memory database
mvStore = MVStore.open(null);
} else {
mvStore = s.getStore();
}
lobMap = mvStore.openMap("lobMap");
refMap = mvStore.openMap("lobRef");
dataMap = mvStore.openMap("lobData");
streamStore = new StreamStore(dataMap);
// garbage collection of the last blocks
if (database.isReadOnly()) {
return;
}
if (dataMap.isEmpty()) {
return;
}
// search for the last block
// (in theory, only the latest lob can have unreferenced blocks,
// but the latest lob could be a copy of another one, and
// we don't know that, so we iterate over all lobs)
long lastUsedKey = -1;
for (Entry<Long, Object[]> e : lobMap.entrySet()) {
long lobId = e.getKey();
Object[] v = e.getValue();
byte[] id = (byte[]) v[0];
long max = streamStore.getMaxBlockKey(id);
// a lob may not have a referenced blocks if data is kept inline
if (max != -1 && max > lastUsedKey) {
lastUsedKey = max;
if (TRACE) {
trace("lob " + lobId + " lastUsedKey=" + lastUsedKey);
}
}
}
if (TRACE) {
trace("lastUsedKey=" + lastUsedKey);
}
// delete all blocks that are newer
while (true) {
Long last = dataMap.lastKey();
if (last == null || last <= lastUsedKey) {
break;
}
if (TRACE) {
trace("gc " + last);
}
dataMap.remove(last);
}
// don't re-use block ids, except at the very end
Long last = dataMap.lastKey();
if (last != null) {
streamStore.setNextKey(last + 1);
}
}
@Override
public Value createBlob(InputStream in, long maxLength) {
init();
int type = Value.BLOB;
try {
if (maxLength != -1
&& maxLength <= database.getMaxLengthInplaceLob()) {
byte[] small = new byte[(int) maxLength];
int len = IOUtils.readFully(in, small, (int) maxLength);
if (len > maxLength) {
throw new IllegalStateException(
"len > blobLength, " + len + " > " + maxLength);
}
if (len < small.length) {
small = Arrays.copyOf(small, len);
}
return ValueLobDb.createSmallLob(type, small);
}
if (maxLength != -1) {
in = new RangeInputStream(in, 0L, maxLength);
}
return createLob(in, type);
} catch (IllegalStateException e) {
throw DbException.get(ErrorCode.OBJECT_CLOSED, e);
} catch (IOException e) {
throw DbException.convertIOException(e, null);
}
}
@Override
public Value createClob(Reader reader, long maxLength) {
init();
int type = Value.CLOB;
try {
// we multiple by 3 here to get the worst-case size in bytes
if (maxLength != -1
&& maxLength * 3 <= database.getMaxLengthInplaceLob()) {
char[] small = new char[(int) maxLength];
int len = IOUtils.readFully(reader, small, (int) maxLength);
if (len > maxLength) {
throw new IllegalStateException(
"len > blobLength, " + len + " > " + maxLength);
}
byte[] utf8 = new String(small, 0, len)
.getBytes(StandardCharsets.UTF_8);
if (utf8.length > database.getMaxLengthInplaceLob()) {
throw new IllegalStateException(
"len > maxinplace, " + utf8.length + " > "
+ database.getMaxLengthInplaceLob());
}
return ValueLobDb.createSmallLob(type, utf8);
}
if (maxLength < 0) {
maxLength = Long.MAX_VALUE;
}
CountingReaderInputStream in = new CountingReaderInputStream(reader,
maxLength);
ValueLobDb lob = createLob(in, type);
// the length is not correct
lob = ValueLobDb.create(type, database, lob.getTableId(),
lob.getLobId(), null, in.getLength());
return lob;
} catch (IllegalStateException e) {
throw DbException.get(ErrorCode.OBJECT_CLOSED, e);
} catch (IOException e) {
throw DbException.convertIOException(e, null);
}
}
private ValueLobDb createLob(InputStream in, int type) throws IOException {
byte[] streamStoreId;
try {
streamStoreId = streamStore.put(in);
} catch (Exception e) {
throw DbException.convertToIOException(e);
}
long lobId = generateLobId();
long length = streamStore.length(streamStoreId);
int tableId = LobStorageFrontend.TABLE_TEMP;
Object[] value = { streamStoreId, tableId, length, 0 };
lobMap.put(lobId, value);
Object[] key = { streamStoreId, lobId };
refMap.put(key, Boolean.TRUE);
ValueLobDb lob = ValueLobDb.create(
type, database, tableId, lobId, null, length);
if (TRACE) {
trace("create " + tableId + "/" + lobId);
}
return lob;
}
private long generateLobId() {
synchronized (nextLobIdSync) {
if (nextLobId == 0) {
Long id = lobMap.lastKey();
nextLobId = id == null ? 1 : id + 1;
}
return nextLobId++;
}
}
@Override
public boolean isReadOnly() {
return database.isReadOnly();
}
@Override
public ValueLobDb copyLob(ValueLobDb old, int tableId, long length) {
init();
int type = old.getType();
long oldLobId = old.getLobId();
long oldLength = old.getPrecision();
if (oldLength != length) {
throw DbException.throwInternalError("Length is different");
}
Object[] value = lobMap.get(oldLobId);
value = value.clone();
byte[] streamStoreId = (byte[]) value[0];
long lobId = generateLobId();
value[1] = tableId;
lobMap.put(lobId, value);
Object[] key = { streamStoreId, lobId };
refMap.put(key, Boolean.TRUE);
ValueLobDb lob = ValueLobDb.create(
type, database, tableId, lobId, null, length);
if (TRACE) {
trace("copy " + old.getTableId() + "/" + old.getLobId() +
" > " + tableId + "/" + lobId);
}
return lob;
}
@Override
public InputStream getInputStream(ValueLobDb lob, byte[] hmac, long byteCount)
throws IOException {
init();
Object[] value = lobMap.get(lob.getLobId());
if (value == null) {
if (lob.getTableId() == LobStorageFrontend.TABLE_RESULT ||
lob.getTableId() == LobStorageFrontend.TABLE_ID_SESSION_VARIABLE) {
throw DbException.get(
ErrorCode.LOB_CLOSED_ON_TIMEOUT_1, "" +
lob.getLobId() + "/" + lob.getTableId());
}
throw DbException.throwInternalError("Lob not found: " +
lob.getLobId() + "/" + lob.getTableId());
}
byte[] streamStoreId = (byte[]) value[0];
return streamStore.get(streamStoreId);
}
@Override
public void removeAllForTable(int tableId) {
init();
if (database.getMvStore().getStore().isClosed()) {
return;
}
// this might not be very efficient -
// to speed it up, we would need yet another map
ArrayList<Long> list = New.arrayList();
for (Entry<Long, Object[]> e : lobMap.entrySet()) {
Object[] value = e.getValue();
int t = (Integer) value[1];
if (t == tableId) {
list.add(e.getKey());
}
}
for (long lobId : list) {
removeLob(tableId, lobId);
}
if (tableId == LobStorageFrontend.TABLE_ID_SESSION_VARIABLE) {
removeAllForTable(LobStorageFrontend.TABLE_TEMP);
removeAllForTable(LobStorageFrontend.TABLE_RESULT);
}
}
@Override
public void removeLob(ValueLobDb lob) {
init();
int tableId = lob.getTableId();
long lobId = lob.getLobId();
removeLob(tableId, lobId);
}
private void removeLob(int tableId, long lobId) {
if (TRACE) {
trace("remove " + tableId + "/" + lobId);
}
Object[] value = lobMap.remove(lobId);
if (value == null) {
// already removed
return;
}
byte[] streamStoreId = (byte[]) value[0];
Object[] key = {streamStoreId, lobId };
refMap.remove(key);
// check if there are more entries for this streamStoreId
key = new Object[] {streamStoreId, 0L };
value = refMap.ceilingKey(key);
boolean hasMoreEntries = false;
if (value != null) {
byte[] s2 = (byte[]) value[0];
if (Arrays.equals(streamStoreId, s2)) {
if (TRACE) {
trace(" stream still needed in lob " + value[1]);
}
hasMoreEntries = true;
}
}
if (!hasMoreEntries) {
if (TRACE) {
trace(" remove stream " + StringUtils.convertBytesToHex(streamStoreId));
}
streamStore.remove(streamStoreId);
}
}
private static void trace(String op) {
System.out.println("[" + Thread.currentThread().getName() + "] LOB " + op);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/LobStorageRemoteInputStream.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.InputStream;
import org.h2.message.DbException;
import org.h2.value.ValueLobDb;
/**
* An input stream that reads from a remote LOB.
*/
class LobStorageRemoteInputStream extends InputStream {
/**
* The data handler.
*/
private final DataHandler handler;
/**
* The lob id.
*/
private final long lob;
private final byte[] hmac;
/**
* The position.
*/
private long pos;
/**
* The remaining bytes in the lob.
*/
private long remainingBytes;
public LobStorageRemoteInputStream(DataHandler handler, ValueLobDb lob,
byte[] hmac, long byteCount) {
this.handler = handler;
this.lob = lob.getLobId();
this.hmac = hmac;
remainingBytes = byteCount;
}
@Override
public int read() throws IOException {
byte[] buff = new byte[1];
int len = read(buff, 0, 1);
return len < 0 ? len : (buff[0] & 255);
}
@Override
public int read(byte[] buff) throws IOException {
return read(buff, 0, buff.length);
}
@Override
public int read(byte[] buff, int off, int length) throws IOException {
if (length == 0) {
return 0;
}
length = (int) Math.min(length, remainingBytes);
if (length == 0) {
return -1;
}
try {
length = handler.readLob(lob, hmac, pos, buff, off, length);
} catch (DbException e) {
throw DbException.convertToIOException(e);
}
if (length == 0) {
return -1;
}
remainingBytes -= length;
pos += length;
return length;
}
@Override
public long skip(long n) {
remainingBytes -= n;
pos += n;
return n;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/Page.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.lang.reflect.Array;
import org.h2.engine.Session;
import org.h2.util.CacheObject;
/**
* A page. Format:
* <ul><li>0-3: parent page id (0 for root)
* </li><li>4-4: page type
* </li><li>page-type specific data
* </li></ul>
*/
public abstract class Page extends CacheObject {
/**
* This is the last page of a chain.
*/
public static final int FLAG_LAST = 16;
/**
* An empty page.
*/
public static final int TYPE_EMPTY = 0;
/**
* A data leaf page (without overflow: + FLAG_LAST).
*/
public static final int TYPE_DATA_LEAF = 1;
/**
* A data node page (never has overflow pages).
*/
public static final int TYPE_DATA_NODE = 2;
/**
* A data overflow page (the last page: + FLAG_LAST).
*/
public static final int TYPE_DATA_OVERFLOW = 3;
/**
* A b-tree leaf page (without overflow: + FLAG_LAST).
*/
public static final int TYPE_BTREE_LEAF = 4;
/**
* A b-tree node page (never has overflow pages).
*/
public static final int TYPE_BTREE_NODE = 5;
/**
* A page containing a list of free pages (the last page: + FLAG_LAST).
*/
public static final int TYPE_FREE_LIST = 6;
/**
* A stream trunk page.
*/
public static final int TYPE_STREAM_TRUNK = 7;
/**
* A stream data page.
*/
public static final int TYPE_STREAM_DATA = 8;
private static final int COPY_THRESHOLD = 4;
/**
* When this page was changed the last time.
*/
protected long changeCount;
/**
* Copy the data to a new location, change the parent to point to the new
* location, and free up the current page.
*
* @param session the session
* @param newPos the new position
*/
public abstract void moveTo(Session session, int newPos);
/**
* Write the page.
*/
public abstract void write();
/**
* Insert a value in an array. A new array is created if required.
*
* @param old the old array
* @param oldSize the old size
* @param pos the position
* @param x the value to insert
* @return the (new) array
*/
@SuppressWarnings("unchecked")
public static <T> T[] insert(T[] old, int oldSize, int pos, T x) {
T[] result;
if (old.length > oldSize) {
result = old;
} else {
// according to a test, this is as fast as "new Row[..]"
result = (T[]) Array.newInstance(
old.getClass().getComponentType(), oldSize + 1 + COPY_THRESHOLD);
if (pos > 0) {
System.arraycopy(old, 0, result, 0, pos);
}
}
if (oldSize - pos > 0) {
System.arraycopy(old, pos, result, pos + 1, oldSize - pos);
}
result[pos] = x;
return result;
}
/**
* Delete a value in an array. A new array is created if required.
*
* @param old the old array
* @param oldSize the old size
* @param pos the position
* @return the (new) array
*/
@SuppressWarnings("unchecked")
public
static <T> T[] remove(T[] old, int oldSize, int pos) {
T[] result;
if (old.length - oldSize < COPY_THRESHOLD) {
result = old;
} else {
// according to a test, this is as fast as "new Row[..]"
result = (T[]) Array.newInstance(
old.getClass().getComponentType(), oldSize - 1);
System.arraycopy(old, 0, result, 0, Math.min(oldSize - 1, pos));
}
if (pos < oldSize) {
System.arraycopy(old, pos + 1, result, pos, oldSize - pos - 1);
}
return result;
}
/**
* Insert a value in an array. A new array is created if required.
*
* @param old the old array
* @param oldSize the old size
* @param pos the position
* @param x the value to insert
* @return the (new) array
*/
protected static long[] insert(long[] old, int oldSize, int pos, long x) {
long[] result;
if (old != null && old.length > oldSize) {
result = old;
} else {
result = new long[oldSize + 1 + COPY_THRESHOLD];
if (pos > 0) {
System.arraycopy(old, 0, result, 0, pos);
}
}
if (old != null && oldSize - pos > 0) {
System.arraycopy(old, pos, result, pos + 1, oldSize - pos);
}
result[pos] = x;
return result;
}
/**
* Delete a value in an array. A new array is created if required.
*
* @param old the old array
* @param oldSize the old size
* @param pos the position
* @return the (new) array
*/
protected static long[] remove(long[] old, int oldSize, int pos) {
long[] result;
if (old.length - oldSize < COPY_THRESHOLD) {
result = old;
} else {
result = new long[oldSize - 1];
System.arraycopy(old, 0, result, 0, pos);
}
System.arraycopy(old, pos + 1, result, pos, oldSize - pos - 1);
return result;
}
/**
* Insert a value in an array. A new array is created if required.
*
* @param old the old array
* @param oldSize the old size
* @param pos the position
* @param x the value to insert
* @return the (new) array
*/
protected static int[] insert(int[] old, int oldSize, int pos, int x) {
int[] result;
if (old != null && old.length > oldSize) {
result = old;
} else {
result = new int[oldSize + 1 + COPY_THRESHOLD];
if (pos > 0 && old != null) {
System.arraycopy(old, 0, result, 0, pos);
}
}
if (old != null && oldSize - pos > 0) {
System.arraycopy(old, pos, result, pos + 1, oldSize - pos);
}
result[pos] = x;
return result;
}
/**
* Delete a value in an array. A new array is created if required.
*
* @param old the old array
* @param oldSize the old size
* @param pos the position
* @return the (new) array
*/
protected static int[] remove(int[] old, int oldSize, int pos) {
int[] result;
if (old.length - oldSize < COPY_THRESHOLD) {
result = old;
} else {
result = new int[oldSize - 1];
System.arraycopy(old, 0, result, 0, Math.min(oldSize - 1, pos));
}
if (pos < oldSize) {
System.arraycopy(old, pos + 1, result, pos, oldSize - pos - 1);
}
return result;
}
/**
* Add a value to a subset of the array.
*
* @param array the array
* @param from the index of the first element (including)
* @param to the index of the last element (excluding)
* @param x the value to add
*/
protected static void add(int[] array, int from, int to, int x) {
for (int i = from; i < to; i++) {
array[i] += x;
}
}
/**
* If this page can be moved. Transaction log and free-list pages can not.
*
* @return true if moving is allowed
*/
public boolean canMove() {
return true;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/PageFreeList.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import org.h2.engine.Session;
import org.h2.util.BitField;
/**
* The list of free pages of a page store. The format of a free list trunk page
* is:
* <ul>
* <li>page type: byte (0)</li>
* <li>checksum: short (1-2)</li>
* <li>data (3-)</li>
* </ul>
*/
public class PageFreeList extends Page {
private static final int DATA_START = 3;
private final PageStore store;
private final BitField used;
private final int pageCount;
private boolean full;
private Data data;
private PageFreeList(PageStore store, int pageId) {
// kept in cache, and array list in page store
setPos(pageId);
this.store = store;
pageCount = (store.getPageSize() - DATA_START) * 8;
used = new BitField(pageCount);
used.set(0);
}
/**
* Read a free-list page.
*
* @param store the page store
* @param data the data
* @param pageId the page id
* @return the page
*/
static PageFreeList read(PageStore store, Data data, int pageId) {
PageFreeList p = new PageFreeList(store, pageId);
p.data = data;
p.read();
return p;
}
/**
* Create a new free-list page.
*
* @param store the page store
* @param pageId the page id
* @return the page
*/
static PageFreeList create(PageStore store, int pageId) {
return new PageFreeList(store, pageId);
}
/**
* Allocate a page from the free list.
*
* @param exclude the exclude list or null
* @param first the first page to look for
* @return the page, or -1 if all pages are used
*/
int allocate(BitField exclude, int first) {
if (full) {
return -1;
}
// TODO cache last result
int start = Math.max(0, first - getPos());
while (true) {
int free = used.nextClearBit(start);
if (free >= pageCount) {
if (start == 0) {
full = true;
}
return -1;
}
if (exclude != null && exclude.get(free + getPos())) {
start = exclude.nextClearBit(free + getPos()) - getPos();
if (start >= pageCount) {
return -1;
}
} else {
// set the bit first, because logUndo can
// allocate other pages, and we must not
// return the same page twice
used.set(free);
store.logUndo(this, data);
store.update(this);
return free + getPos();
}
}
}
/**
* Get the first free page starting at the given offset.
*
* @param first the page number to start the search
* @return the page number, or -1
*/
int getFirstFree(int first) {
if (full) {
return -1;
}
int start = Math.max(0, first - getPos());
int free = used.nextClearBit(start);
if (free >= pageCount) {
return -1;
}
return free + getPos();
}
int getLastUsed() {
int last = used.length() - 1;
return last <= 0 ? -1 : last + getPos();
}
/**
* Mark a page as used.
*
* @param pageId the page id
*/
void allocate(int pageId) {
int idx = pageId - getPos();
if (idx >= 0 && !used.get(idx)) {
// set the bit first, because logUndo can
// allocate other pages, and we must not
// return the same page twice
used.set(idx);
store.logUndo(this, data);
store.update(this);
}
}
/**
* Add a page to the free list.
*
* @param pageId the page id to add
*/
void free(int pageId) {
full = false;
store.logUndo(this, data);
used.clear(pageId - getPos());
store.update(this);
}
/**
* Read the page from the disk.
*/
private void read() {
data.reset();
data.readByte();
data.readShortInt();
for (int i = 0; i < pageCount; i += 8) {
int x = data.readByte() & 255;
used.setByte(i, x);
}
full = false;
}
@Override
public void write() {
data = store.createData();
data.writeByte((byte) Page.TYPE_FREE_LIST);
data.writeShortInt(0);
for (int i = 0; i < pageCount; i += 8) {
data.writeByte((byte) used.getByte(i));
}
store.writePage(getPos(), data);
}
/**
* Get the number of pages that can fit in a free list.
*
* @param pageSize the page size
* @return the number of pages
*/
public static int getPagesAddressed(int pageSize) {
return (pageSize - DATA_START) * 8;
}
/**
* Get the estimated memory size.
*
* @return number of double words (4 bytes)
*/
@Override
public int getMemory() {
return store.getPageSize() >> 2;
}
/**
* Check if a page is already in use.
*
* @param pageId the page to check
* @return true if it is in use
*/
boolean isUsed(int pageId) {
return used.get(pageId - getPos());
}
@Override
public void moveTo(Session session, int newPos) {
// the old data does not need to be copied, as free-list pages
// at the end of the file are not required
store.free(getPos(), false);
}
@Override
public String toString() {
return "page [" + getPos() + "] freeList" + (full ? "full" : "");
}
@Override
public boolean canRemove() {
return true;
}
@Override
public boolean canMove() {
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/PageInputStream.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.util.BitField;
/**
* An input stream that reads from a page store.
*/
public class PageInputStream extends InputStream {
private final PageStore store;
private final Trace trace;
private final int firstTrunkPage;
private final PageStreamTrunk.Iterator trunkIterator;
private int dataPage;
private PageStreamTrunk trunk;
private int trunkIndex;
private PageStreamData data;
private int dataPos;
private boolean endOfFile;
private int remaining;
private final byte[] buffer = { 0 };
private int logKey;
PageInputStream(PageStore store, int logKey, int firstTrunkPage, int dataPage) {
this.store = store;
this.trace = store.getTrace();
// minus one because we increment before comparing
this.logKey = logKey - 1;
this.firstTrunkPage = firstTrunkPage;
trunkIterator = new PageStreamTrunk.Iterator(store, firstTrunkPage);
this.dataPage = dataPage;
}
@Override
public int read() throws IOException {
int len = read(buffer);
return len < 0 ? -1 : (buffer[0] & 255);
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (len == 0) {
return 0;
}
int read = 0;
while (len > 0) {
int r = readBlock(b, off, len);
if (r < 0) {
break;
}
read += r;
off += r;
len -= r;
}
return read == 0 ? -1 : read;
}
private int readBlock(byte[] buff, int off, int len) throws IOException {
try {
fillBuffer();
if (endOfFile) {
return -1;
}
int l = Math.min(remaining, len);
data.read(dataPos, buff, off, l);
remaining -= l;
dataPos += l;
return l;
} catch (DbException e) {
throw new EOFException();
}
}
private void fillBuffer() {
if (remaining > 0 || endOfFile) {
return;
}
int next;
while (true) {
if (trunk == null) {
trunk = trunkIterator.next();
trunkIndex = 0;
logKey++;
if (trunk == null || trunk.getLogKey() != logKey) {
endOfFile = true;
return;
}
}
if (trunk != null) {
next = trunk.getPageData(trunkIndex++);
if (next == -1) {
trunk = null;
} else if (dataPage == -1 || dataPage == next) {
break;
}
}
}
if (trace.isDebugEnabled()) {
trace.debug("pageIn.readPage " + next);
}
dataPage = -1;
data = null;
Page p = store.getPage(next);
if (p instanceof PageStreamData) {
data = (PageStreamData) p;
}
if (data == null || data.getLogKey() != logKey) {
endOfFile = true;
return;
}
dataPos = PageStreamData.getReadStart();
remaining = store.getPageSize() - dataPos;
}
/**
* Set all pages as 'allocated' in the page store.
*
* @return the bit set
*/
BitField allocateAllPages() {
BitField pages = new BitField();
int key = logKey;
PageStreamTrunk.Iterator it = new PageStreamTrunk.Iterator(
store, firstTrunkPage);
while (true) {
PageStreamTrunk t = it.next();
key++;
if (it.canDelete()) {
store.allocatePage(it.getCurrentPageId());
}
if (t == null || t.getLogKey() != key) {
break;
}
pages.set(t.getPos());
for (int i = 0;; i++) {
int n = t.getPageData(i);
if (n == -1) {
break;
}
pages.set(n);
store.allocatePage(n);
}
}
return pages;
}
int getDataPage() {
return data.getPos();
}
@Override
public void close() {
// nothing to do
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/PageLog.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import org.h2.api.ErrorCode;
import org.h2.compress.CompressLZF;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.result.Row;
import org.h2.result.RowFactory;
import org.h2.util.BitField;
import org.h2.util.IntArray;
import org.h2.util.IntIntHashMap;
import org.h2.util.New;
import org.h2.value.Value;
import org.h2.value.ValueNull;
/**
* Transaction log mechanism. The stream contains a list of records. The data
* format for a record is:
* <ul>
* <li>type (0: no-op, 1: undo, 2: commit, ...)</li>
* <li>data</li>
* </ul>
* The transaction log is split into sections.
* A checkpoint starts a new section.
*/
public class PageLog {
/**
* No operation.
*/
public static final int NOOP = 0;
/**
* An undo log entry. Format: page id: varInt, size, page. Size 0 means
* uncompressed, size 1 means empty page, otherwise the size is the number
* of compressed bytes.
*/
public static final int UNDO = 1;
/**
* A commit entry of a session.
* Format: session id: varInt.
*/
public static final int COMMIT = 2;
/**
* A prepare commit entry for a session.
* Format: session id: varInt, transaction name: string.
*/
public static final int PREPARE_COMMIT = 3;
/**
* Roll back a prepared transaction.
* Format: session id: varInt.
*/
public static final int ROLLBACK = 4;
/**
* Add a record to a table.
* Format: session id: varInt, table id: varInt, row.
*/
public static final int ADD = 5;
/**
* Remove a record from a table.
* Format: session id: varInt, table id: varInt, row.
*/
public static final int REMOVE = 6;
/**
* Truncate a table.
* Format: session id: varInt, table id: varInt.
*/
public static final int TRUNCATE = 7;
/**
* Perform a checkpoint. The log section id is incremented.
* Format: -
*/
public static final int CHECKPOINT = 8;
/**
* Free a log page.
* Format: count: varInt, page ids: varInt
*/
public static final int FREE_LOG = 9;
/**
* The recovery stage to undo changes (re-apply the backup).
*/
static final int RECOVERY_STAGE_UNDO = 0;
/**
* The recovery stage to allocate pages used by the transaction log.
*/
static final int RECOVERY_STAGE_ALLOCATE = 1;
/**
* The recovery stage to redo operations.
*/
static final int RECOVERY_STAGE_REDO = 2;
private static final boolean COMPRESS_UNDO = true;
private final PageStore store;
private final Trace trace;
private Data writeBuffer;
private PageOutputStream pageOut;
private int firstTrunkPage;
private int firstDataPage;
private final Data dataBuffer;
private int logKey;
private int logSectionId, logPos;
private int firstSectionId;
private final CompressLZF compress;
private final byte[] compressBuffer;
/**
* If the bit is set, the given page was written to the current log section.
* The undo entry of these pages doesn't need to be written again.
*/
private BitField undo = new BitField();
/**
* The undo entry of those pages was written in any log section.
* These pages may not be used in the transaction log.
*/
private final BitField undoAll = new BitField();
/**
* The map of section ids (key) and data page where the section starts
* (value).
*/
private final IntIntHashMap logSectionPageMap = new IntIntHashMap();
/**
* The session state map.
* Only used during recovery.
*/
private HashMap<Integer, SessionState> sessionStates = new HashMap<>();
/**
* The map of pages used by the transaction log.
* Only used during recovery.
*/
private BitField usedLogPages;
/**
* This flag is set while freeing up pages.
*/
private boolean freeing;
PageLog(PageStore store) {
this.store = store;
dataBuffer = store.createData();
trace = store.getTrace();
compress = new CompressLZF();
compressBuffer = new byte[store.getPageSize() * 2];
}
/**
* Open the log for writing. For an existing database, the recovery
* must be run first.
*
* @param newFirstTrunkPage the first trunk page
* @param atEnd whether only pages at the end of the file should be used
*/
void openForWriting(int newFirstTrunkPage, boolean atEnd) {
trace.debug("log openForWriting firstPage: " + newFirstTrunkPage);
this.firstTrunkPage = newFirstTrunkPage;
logKey++;
pageOut = new PageOutputStream(store,
newFirstTrunkPage, undoAll, logKey, atEnd);
pageOut.reserve(1);
// pageBuffer = new BufferedOutputStream(pageOut, 8 * 1024);
store.setLogFirstPage(logKey, newFirstTrunkPage,
pageOut.getCurrentDataPageId());
writeBuffer = store.createData();
}
/**
* Free up all pages allocated by the log.
*/
void free() {
if (trace.isDebugEnabled()) {
trace.debug("log free");
}
int currentDataPage = 0;
if (pageOut != null) {
currentDataPage = pageOut.getCurrentDataPageId();
pageOut.freeReserved();
}
try {
freeing = true;
int first = 0;
int loopDetect = 1024, loopCount = 0;
PageStreamTrunk.Iterator it = new PageStreamTrunk.Iterator(
store, firstTrunkPage);
while (firstTrunkPage != 0 && firstTrunkPage < store.getPageCount()) {
PageStreamTrunk t = it.next();
if (t == null) {
if (it.canDelete()) {
store.free(firstTrunkPage, false);
}
break;
}
if (loopCount++ >= loopDetect) {
first = t.getPos();
loopCount = 0;
loopDetect *= 2;
} else if (first != 0 && first == t.getPos()) {
throw DbException.throwInternalError(
"endless loop at " + t);
}
t.free(currentDataPage);
firstTrunkPage = t.getNextTrunk();
}
} finally {
freeing = false;
}
}
/**
* Open the log for reading.
*
* @param newLogKey the first expected log key
* @param newFirstTrunkPage the first trunk page
* @param newFirstDataPage the index of the first data page
*/
void openForReading(int newLogKey, int newFirstTrunkPage,
int newFirstDataPage) {
this.logKey = newLogKey;
this.firstTrunkPage = newFirstTrunkPage;
this.firstDataPage = newFirstDataPage;
}
/**
* Run one recovery stage. There are three recovery stages: 0: only the undo
* steps are run (restoring the state before the last checkpoint). 1: the
* pages that are used by the transaction log are allocated. 2: the
* committed operations are re-applied.
*
* @param stage the recovery stage
* @return whether the transaction log was empty
*/
boolean recover(int stage) {
if (trace.isDebugEnabled()) {
trace.debug("log recover stage: " + stage);
}
if (stage == RECOVERY_STAGE_ALLOCATE) {
PageInputStream in = new PageInputStream(store,
logKey, firstTrunkPage, firstDataPage);
usedLogPages = in.allocateAllPages();
in.close();
return true;
}
PageInputStream pageIn = new PageInputStream(store,
logKey, firstTrunkPage, firstDataPage);
DataReader in = new DataReader(pageIn);
int logId = 0;
Data data = store.createData();
boolean isEmpty = true;
try {
int pos = 0;
while (true) {
int x = in.readByte();
if (x < 0) {
break;
}
pos++;
isEmpty = false;
if (x == UNDO) {
int pageId = in.readVarInt();
int size = in.readVarInt();
if (size == 0) {
in.readFully(data.getBytes(), store.getPageSize());
} else if (size == 1) {
// empty
Arrays.fill(data.getBytes(), 0, store.getPageSize(), (byte) 0);
} else {
in.readFully(compressBuffer, size);
try {
compress.expand(compressBuffer, 0, size,
data.getBytes(), 0, store.getPageSize());
} catch (ArrayIndexOutOfBoundsException e) {
DbException.convertToIOException(e);
}
}
if (stage == RECOVERY_STAGE_UNDO) {
if (!undo.get(pageId)) {
if (trace.isDebugEnabled()) {
trace.debug("log undo {0}", pageId);
}
store.writePage(pageId, data);
undo.set(pageId);
undoAll.set(pageId);
} else {
if (trace.isDebugEnabled()) {
trace.debug("log undo skip {0}", pageId);
}
}
}
} else if (x == ADD) {
int sessionId = in.readVarInt();
int tableId = in.readVarInt();
Row row = readRow(store.getDatabase().getRowFactory(), in, data);
if (stage == RECOVERY_STAGE_UNDO) {
store.allocateIfIndexRoot(pos, tableId, row);
} else if (stage == RECOVERY_STAGE_REDO) {
if (isSessionCommitted(sessionId, logId, pos)) {
if (trace.isDebugEnabled()) {
trace.debug("log redo + table: " + tableId +
" s: " + sessionId + " " + row);
}
store.redo(tableId, row, true);
} else {
if (trace.isDebugEnabled()) {
trace.debug("log ignore s: " + sessionId +
" + table: " + tableId + " " + row);
}
}
}
} else if (x == REMOVE) {
int sessionId = in.readVarInt();
int tableId = in.readVarInt();
long key = in.readVarLong();
if (stage == RECOVERY_STAGE_REDO) {
if (isSessionCommitted(sessionId, logId, pos)) {
if (trace.isDebugEnabled()) {
trace.debug("log redo - table: " + tableId +
" s:" + sessionId + " key: " + key);
}
store.redoDelete(tableId, key);
} else {
if (trace.isDebugEnabled()) {
trace.debug("log ignore s: " + sessionId +
" - table: " + tableId + " " + key);
}
}
}
} else if (x == TRUNCATE) {
int sessionId = in.readVarInt();
int tableId = in.readVarInt();
if (stage == RECOVERY_STAGE_REDO) {
if (isSessionCommitted(sessionId, logId, pos)) {
if (trace.isDebugEnabled()) {
trace.debug("log redo truncate table: " + tableId);
}
store.redoTruncate(tableId);
} else {
if (trace.isDebugEnabled()) {
trace.debug("log ignore s: "+ sessionId +
" truncate table: " + tableId);
}
}
}
} else if (x == PREPARE_COMMIT) {
int sessionId = in.readVarInt();
String transaction = in.readString();
if (trace.isDebugEnabled()) {
trace.debug("log prepare commit " + sessionId + " " +
transaction + " pos: " + pos);
}
if (stage == RECOVERY_STAGE_UNDO) {
int page = pageIn.getDataPage();
setPrepareCommit(sessionId, page, transaction);
}
} else if (x == ROLLBACK) {
int sessionId = in.readVarInt();
if (trace.isDebugEnabled()) {
trace.debug("log rollback " + sessionId + " pos: " + pos);
}
// ignore - this entry is just informational
} else if (x == COMMIT) {
int sessionId = in.readVarInt();
if (trace.isDebugEnabled()) {
trace.debug("log commit " + sessionId + " pos: " + pos);
}
if (stage == RECOVERY_STAGE_UNDO) {
setLastCommitForSession(sessionId, logId, pos);
}
} else if (x == NOOP) {
// nothing to do
} else if (x == CHECKPOINT) {
logId++;
} else if (x == FREE_LOG) {
int count = in.readVarInt();
for (int i = 0; i < count; i++) {
int pageId = in.readVarInt();
if (stage == RECOVERY_STAGE_REDO) {
if (!usedLogPages.get(pageId)) {
store.free(pageId, false);
}
}
}
} else {
if (trace.isDebugEnabled()) {
trace.debug("log end");
break;
}
}
}
} catch (DbException e) {
if (e.getErrorCode() == ErrorCode.FILE_CORRUPTED_1) {
trace.debug("log recovery stopped");
} else {
throw e;
}
} catch (IOException e) {
trace.debug("log recovery completed");
}
undo = new BitField();
if (stage == RECOVERY_STAGE_REDO) {
usedLogPages = null;
}
return isEmpty;
}
/**
* This method is called when a 'prepare commit' log entry is read when
* opening the database.
*
* @param sessionId the session id
* @param pageId the data page with the prepare entry
* @param transaction the transaction name, or null to rollback
*/
private void setPrepareCommit(int sessionId, int pageId, String transaction) {
SessionState state = getOrAddSessionState(sessionId);
PageStoreInDoubtTransaction doubt;
if (transaction == null) {
doubt = null;
} else {
doubt = new PageStoreInDoubtTransaction(store, sessionId, pageId,
transaction);
}
state.inDoubtTransaction = doubt;
}
/**
* Read a row from an input stream.
*
* @param rowFactory the row factory
* @param in the input stream
* @param data a temporary buffer
* @return the row
*/
public static Row readRow(RowFactory rowFactory, DataReader in, Data data) throws IOException {
long key = in.readVarLong();
int len = in.readVarInt();
data.reset();
data.checkCapacity(len);
in.readFully(data.getBytes(), len);
int columnCount = data.readVarInt();
Value[] values = new Value[columnCount];
for (int i = 0; i < columnCount; i++) {
values[i] = data.readValue();
}
Row row = rowFactory.createRow(values, Row.MEMORY_CALCULATE);
row.setKey(key);
return row;
}
/**
* Check if the undo entry was already written for the given page.
*
* @param pageId the page
* @return true if it was written
*/
boolean getUndo(int pageId) {
return undo.get(pageId);
}
/**
* Add an undo entry to the log. The page data is only written once until
* the next checkpoint.
*
* @param pageId the page id
* @param page the old page data
*/
void addUndo(int pageId, Data page) {
if (undo.get(pageId) || freeing) {
return;
}
if (trace.isDebugEnabled()) {
trace.debug("log undo " + pageId);
}
if (SysProperties.CHECK) {
if (page == null) {
DbException.throwInternalError("Undo entry not written");
}
}
undo.set(pageId);
undoAll.set(pageId);
Data buffer = getBuffer();
buffer.writeByte((byte) UNDO);
buffer.writeVarInt(pageId);
if (page.getBytes()[0] == 0) {
buffer.writeVarInt(1);
} else {
int pageSize = store.getPageSize();
if (COMPRESS_UNDO) {
int size = compress.compress(page.getBytes(),
pageSize, compressBuffer, 0);
if (size < pageSize) {
buffer.writeVarInt(size);
buffer.checkCapacity(size);
buffer.write(compressBuffer, 0, size);
} else {
buffer.writeVarInt(0);
buffer.checkCapacity(pageSize);
buffer.write(page.getBytes(), 0, pageSize);
}
} else {
buffer.writeVarInt(0);
buffer.checkCapacity(pageSize);
buffer.write(page.getBytes(), 0, pageSize);
}
}
write(buffer);
}
private void freeLogPages(IntArray pages) {
if (trace.isDebugEnabled()) {
trace.debug("log frees " + pages.get(0) + ".." +
pages.get(pages.size() - 1));
}
Data buffer = getBuffer();
buffer.writeByte((byte) FREE_LOG);
int size = pages.size();
buffer.writeVarInt(size);
for (int i = 0; i < size; i++) {
buffer.writeVarInt(pages.get(i));
}
write(buffer);
}
private void write(Data data) {
pageOut.write(data.getBytes(), 0, data.length());
data.reset();
}
/**
* Mark a transaction as committed.
*
* @param sessionId the session
*/
void commit(int sessionId) {
if (trace.isDebugEnabled()) {
trace.debug("log commit s: " + sessionId);
}
if (store.getDatabase().getPageStore() == null) {
// database already closed
return;
}
Data buffer = getBuffer();
buffer.writeByte((byte) COMMIT);
buffer.writeVarInt(sessionId);
write(buffer);
if (store.getDatabase().getFlushOnEachCommit()) {
flush();
}
}
/**
* Prepare a transaction.
*
* @param session the session
* @param transaction the name of the transaction
*/
void prepareCommit(Session session, String transaction) {
if (trace.isDebugEnabled()) {
trace.debug("log prepare commit s: " + session.getId() + ", " + transaction);
}
if (store.getDatabase().getPageStore() == null) {
// database already closed
return;
}
// store it on a separate log page
int pageSize = store.getPageSize();
pageOut.flush();
pageOut.fillPage();
Data buffer = getBuffer();
buffer.writeByte((byte) PREPARE_COMMIT);
buffer.writeVarInt(session.getId());
buffer.writeString(transaction);
if (buffer.length() >= PageStreamData.getCapacity(pageSize)) {
throw DbException.getInvalidValueException(
"transaction name (too long)", transaction);
}
write(buffer);
// store it on a separate log page
flushOut();
pageOut.fillPage();
if (store.getDatabase().getFlushOnEachCommit()) {
flush();
}
}
/**
* A record is added to a table, or removed from a table.
*
* @param session the session
* @param tableId the table id
* @param row the row to add
* @param add true if the row is added, false if it is removed
*/
void logAddOrRemoveRow(Session session, int tableId, Row row, boolean add) {
if (trace.isDebugEnabled()) {
trace.debug("log " + (add ? "+" : "-") +
" s: " + session.getId() + " table: " + tableId + " row: " + row);
}
session.addLogPos(logSectionId, logPos);
logPos++;
Data data = dataBuffer;
data.reset();
int columns = row.getColumnCount();
data.writeVarInt(columns);
data.checkCapacity(row.getByteCount(data));
if (session.isRedoLogBinaryEnabled()) {
for (int i = 0; i < columns; i++) {
data.writeValue(row.getValue(i));
}
} else {
for (int i = 0; i < columns; i++) {
Value v = row.getValue(i);
if (v.getType() == Value.BYTES) {
data.writeValue(ValueNull.INSTANCE);
} else {
data.writeValue(v);
}
}
}
Data buffer = getBuffer();
buffer.writeByte((byte) (add ? ADD : REMOVE));
buffer.writeVarInt(session.getId());
buffer.writeVarInt(tableId);
buffer.writeVarLong(row.getKey());
if (add) {
buffer.writeVarInt(data.length());
buffer.checkCapacity(data.length());
buffer.write(data.getBytes(), 0, data.length());
}
write(buffer);
}
/**
* A table is truncated.
*
* @param session the session
* @param tableId the table id
*/
void logTruncate(Session session, int tableId) {
if (trace.isDebugEnabled()) {
trace.debug("log truncate s: " + session.getId() + " table: " + tableId);
}
session.addLogPos(logSectionId, logPos);
logPos++;
Data buffer = getBuffer();
buffer.writeByte((byte) TRUNCATE);
buffer.writeVarInt(session.getId());
buffer.writeVarInt(tableId);
write(buffer);
}
/**
* Flush the transaction log.
*/
void flush() {
if (pageOut != null) {
flushOut();
}
}
/**
* Switch to a new log section.
*/
void checkpoint() {
Data buffer = getBuffer();
buffer.writeByte((byte) CHECKPOINT);
write(buffer);
undo = new BitField();
logSectionId++;
logPos = 0;
pageOut.flush();
pageOut.fillPage();
int currentDataPage = pageOut.getCurrentDataPageId();
logSectionPageMap.put(logSectionId, currentDataPage);
}
int getLogSectionId() {
return logSectionId;
}
int getLogFirstSectionId() {
return firstSectionId;
}
int getLogPos() {
return logPos;
}
/**
* Remove all pages until the given log (excluding).
*
* @param firstUncommittedSection the first log section to keep
*/
void removeUntil(int firstUncommittedSection) {
if (firstUncommittedSection == 0) {
return;
}
int firstDataPageToKeep = logSectionPageMap.get(firstUncommittedSection);
firstTrunkPage = removeUntil(firstTrunkPage, firstDataPageToKeep);
store.setLogFirstPage(logKey, firstTrunkPage, firstDataPageToKeep);
while (firstSectionId < firstUncommittedSection) {
if (firstSectionId > 0) {
// there is no entry for log 0
logSectionPageMap.remove(firstSectionId);
}
firstSectionId++;
}
}
/**
* Remove all pages until the given data page.
*
* @param trunkPage the first trunk page
* @param firstDataPageToKeep the first data page to keep
* @return the trunk page of the data page to keep
*/
private int removeUntil(int trunkPage, int firstDataPageToKeep) {
trace.debug("log.removeUntil " + trunkPage + " " + firstDataPageToKeep);
int last = trunkPage;
while (true) {
Page p = store.getPage(trunkPage);
PageStreamTrunk t = (PageStreamTrunk) p;
if (t == null) {
throw DbException.throwInternalError(
"log.removeUntil not found: " + firstDataPageToKeep + " last " + last);
}
logKey = t.getLogKey();
last = t.getPos();
if (t.contains(firstDataPageToKeep)) {
return last;
}
trunkPage = t.getNextTrunk();
IntArray list = new IntArray();
list.add(t.getPos());
for (int i = 0;; i++) {
int next = t.getPageData(i);
if (next == -1) {
break;
}
list.add(next);
}
freeLogPages(list);
pageOut.free(t);
}
}
/**
* Close without further writing.
*/
void close() {
trace.debug("log close");
if (pageOut != null) {
pageOut.close();
pageOut = null;
}
writeBuffer = null;
}
/**
* Check if the session committed after than the given position.
*
* @param sessionId the session id
* @param logId the log id
* @param pos the position in the log
* @return true if it is committed
*/
private boolean isSessionCommitted(int sessionId, int logId, int pos) {
SessionState state = sessionStates.get(sessionId);
if (state == null) {
return false;
}
return state.isCommitted(logId, pos);
}
/**
* Set the last commit record for a session.
*
* @param sessionId the session id
* @param logId the log id
* @param pos the position in the log
*/
private void setLastCommitForSession(int sessionId, int logId, int pos) {
SessionState state = getOrAddSessionState(sessionId);
state.lastCommitLog = logId;
state.lastCommitPos = pos;
state.inDoubtTransaction = null;
}
/**
* Get the session state for this session. A new object is created if there
* is no session state yet.
*
* @param sessionId the session id
* @return the session state object
*/
private SessionState getOrAddSessionState(int sessionId) {
Integer key = sessionId;
SessionState state = sessionStates.get(key);
if (state == null) {
state = new SessionState();
sessionStates.put(key, state);
state.sessionId = sessionId;
}
return state;
}
long getSize() {
return pageOut == null ? 0 : pageOut.getSize();
}
ArrayList<InDoubtTransaction> getInDoubtTransactions() {
ArrayList<InDoubtTransaction> list = New.arrayList();
for (SessionState state : sessionStates.values()) {
PageStoreInDoubtTransaction in = state.inDoubtTransaction;
if (in != null) {
list.add(in);
}
}
return list;
}
/**
* Set the state of an in-doubt transaction.
*
* @param sessionId the session
* @param pageId the page where the commit was prepared
* @param commit whether the transaction should be committed
*/
void setInDoubtTransactionState(int sessionId, int pageId, boolean commit) {
PageStreamData d = (PageStreamData) store.getPage(pageId);
d.initWrite();
Data buff = store.createData();
buff.writeByte((byte) (commit ? COMMIT : ROLLBACK));
buff.writeVarInt(sessionId);
byte[] bytes = buff.getBytes();
d.write(bytes, 0, bytes.length);
bytes = new byte[d.getRemaining()];
d.write(bytes, 0, bytes.length);
d.write();
}
/**
* Called after the recovery has been completed.
*/
void recoverEnd() {
sessionStates = new HashMap<>();
}
private void flushOut() {
pageOut.flush();
}
private Data getBuffer() {
if (writeBuffer.length() == 0) {
return writeBuffer;
}
return store.createData();
}
/**
* Get the smallest possible page id used. This is the trunk page if only
* appending at the end of the file, or 0.
*
* @return the smallest possible page.
*/
int getMinPageId() {
return pageOut == null ? 0 : pageOut.getMinPageId();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/PageOutputStream.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.util.BitField;
import org.h2.util.IntArray;
/**
* An output stream that writes into a page store.
*/
public class PageOutputStream {
private PageStore store;
private final Trace trace;
private final BitField exclude;
private final boolean atEnd;
private final int minPageId;
private int trunkPageId;
private int trunkNext;
private IntArray reservedPages = new IntArray();
private PageStreamTrunk trunk;
private int trunkIndex;
private PageStreamData data;
private int reserved;
private boolean needFlush;
private boolean writing;
private int pageCount;
private int logKey;
/**
* Create a new page output stream.
*
* @param store the page store
* @param trunkPage the first trunk page (already allocated)
* @param exclude the pages not to use
* @param logKey the log key of the first trunk page
* @param atEnd whether only pages at the end of the file should be used
*/
public PageOutputStream(PageStore store, int trunkPage, BitField exclude,
int logKey, boolean atEnd) {
this.trace = store.getTrace();
this.store = store;
this.trunkPageId = trunkPage;
this.exclude = exclude;
// minus one, because we increment before creating a trunk page
this.logKey = logKey - 1;
this.atEnd = atEnd;
minPageId = atEnd ? trunkPage : 0;
}
/**
* Allocate the required pages so that no pages need to be allocated while
* writing.
*
* @param minBuffer the number of bytes to allocate
*/
void reserve(int minBuffer) {
if (reserved < minBuffer) {
int pageSize = store.getPageSize();
int capacityPerPage = PageStreamData.getCapacity(pageSize);
int pages = PageStreamTrunk.getPagesAddressed(pageSize);
int pagesToAllocate = 0, totalCapacity = 0;
do {
// allocate x data pages plus one trunk page
pagesToAllocate += pages + 1;
totalCapacity += pages * capacityPerPage;
} while (totalCapacity < minBuffer);
int firstPageToUse = atEnd ? trunkPageId : 0;
store.allocatePages(reservedPages, pagesToAllocate, exclude, firstPageToUse);
reserved += totalCapacity;
if (data == null) {
initNextData();
}
}
}
private void initNextData() {
int nextData = trunk == null ? -1 : trunk.getPageData(trunkIndex++);
if (nextData == -1) {
int parent = trunkPageId;
if (trunkNext != 0) {
trunkPageId = trunkNext;
}
int len = PageStreamTrunk.getPagesAddressed(store.getPageSize());
int[] pageIds = new int[len];
for (int i = 0; i < len; i++) {
pageIds[i] = reservedPages.get(i);
}
trunkNext = reservedPages.get(len);
logKey++;
trunk = PageStreamTrunk.create(store, parent, trunkPageId,
trunkNext, logKey, pageIds);
trunkIndex = 0;
pageCount++;
trunk.write();
reservedPages.removeRange(0, len + 1);
nextData = trunk.getPageData(trunkIndex++);
}
data = PageStreamData.create(store, nextData, trunk.getPos(), logKey);
pageCount++;
data.initWrite();
}
/**
* Write the data.
*
* @param b the buffer
* @param off the offset
* @param len the length
*/
public void write(byte[] b, int off, int len) {
if (len <= 0) {
return;
}
if (writing) {
DbException.throwInternalError("writing while still writing");
}
try {
reserve(len);
writing = true;
while (len > 0) {
int l = data.write(b, off, len);
if (l < len) {
storePage();
initNextData();
}
reserved -= l;
off += l;
len -= l;
}
needFlush = true;
} finally {
writing = false;
}
}
private void storePage() {
if (trace.isDebugEnabled()) {
trace.debug("pageOut.storePage " + data);
}
data.write();
}
/**
* Write all data.
*/
public void flush() {
if (needFlush) {
storePage();
needFlush = false;
}
}
/**
* Close the stream.
*/
public void close() {
store = null;
}
int getCurrentDataPageId() {
return data.getPos();
}
/**
* Fill the data page with zeros and write it.
* This is required for a checkpoint.
*/
void fillPage() {
if (trace.isDebugEnabled()) {
trace.debug("pageOut.storePage fill " + data.getPos());
}
reserve(data.getRemaining() + 1);
reserved -= data.getRemaining();
data.write();
initNextData();
}
long getSize() {
return pageCount * store.getPageSize();
}
/**
* Remove a trunk page from the stream.
*
* @param t the trunk page
*/
void free(PageStreamTrunk t) {
pageCount -= t.free(0);
}
/**
* Free up all reserved pages.
*/
void freeReserved() {
if (reservedPages.size() > 0) {
int[] array = new int[reservedPages.size()];
reservedPages.toArray(array);
reservedPages = new IntArray();
reserved = 0;
for (int p : array) {
store.free(p, false);
}
}
}
/**
* Get the smallest possible page id used. This is the trunk page if only
* appending at the end of the file, or 0.
*
* @return the smallest possible page.
*/
int getMinPageId() {
return minPageId;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/PageStore.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
import java.util.zip.CRC32;
import org.h2.api.ErrorCode;
import org.h2.command.CommandInterface;
import org.h2.command.ddl.CreateTableData;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.engine.SysProperties;
import org.h2.index.Cursor;
import org.h2.index.Index;
import org.h2.index.IndexType;
import org.h2.index.MultiVersionIndex;
import org.h2.index.PageBtreeIndex;
import org.h2.index.PageBtreeLeaf;
import org.h2.index.PageBtreeNode;
import org.h2.index.PageDataIndex;
import org.h2.index.PageDataLeaf;
import org.h2.index.PageDataNode;
import org.h2.index.PageDataOverflow;
import org.h2.index.PageDelegateIndex;
import org.h2.index.PageIndex;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.result.Row;
import org.h2.schema.Schema;
import org.h2.store.fs.FileUtils;
import org.h2.table.Column;
import org.h2.table.IndexColumn;
import org.h2.table.RegularTable;
import org.h2.table.Table;
import org.h2.table.TableType;
import org.h2.util.BitField;
import org.h2.util.Cache;
import org.h2.util.CacheLRU;
import org.h2.util.CacheObject;
import org.h2.util.CacheWriter;
import org.h2.util.IntArray;
import org.h2.util.IntIntHashMap;
import org.h2.util.New;
import org.h2.util.StatementBuilder;
import org.h2.util.StringUtils;
import org.h2.value.CompareMode;
import org.h2.value.Value;
import org.h2.value.ValueInt;
import org.h2.value.ValueString;
/**
* This class represents a file that is organized as a number of pages. Page 0
* contains a static file header, and pages 1 and 2 both contain the variable
* file header (page 2 is a copy of page 1 and is only read if the checksum of
* page 1 is invalid). The format of page 0 is:
* <ul>
* <li>0-47: file header (3 time "-- H2 0.5/B -- \n")</li>
* <li>48-51: page size in bytes (512 - 32768, must be a power of 2)</li>
* <li>52: write version (read-only if larger than 1)</li>
* <li>53: read version (opening fails if larger than 1)</li>
* </ul>
* The format of page 1 and 2 is:
* <ul>
* <li>CRC32 of the remaining data: int (0-3)</li>
* <li>write counter (incremented on each write): long (4-11)</li>
* <li>log trunk key: int (12-15)</li>
* <li>log trunk page (0 for none): int (16-19)</li>
* <li>log data page (0 for none): int (20-23)</li>
* </ul>
* Page 3 contains the first free list page.
* Page 4 contains the meta table root page.
*/
public class PageStore implements CacheWriter {
// TODO test running out of disk space (using a special file system)
// TODO unused pages should be freed once in a while
// TODO node row counts are incorrect (it's not splitting row counts)
// TODO after opening the database, delay writing until required
// TODO optimization: try to avoid allocating a byte array per page
// TODO optimization: check if calling Data.getValueLen slows things down
// TODO order pages so that searching for a key only seeks forward
// TODO optimization: update: only log the key and changed values
// TODO index creation: use less space (ordered, split at insertion point)
// TODO detect circles in linked lists
// (input stream, free list, extend pages...)
// at runtime and recovery
// TODO remove trace or use isDebugEnabled
// TODO recover tool: support syntax to delete a row with a key
// TODO don't store default values (store a special value)
// TODO check for file size (exception if not exact size expected)
// TODO online backup using bsdiff
/**
* The smallest possible page size.
*/
public static final int PAGE_SIZE_MIN = 64;
/**
* The biggest possible page size.
*/
public static final int PAGE_SIZE_MAX = 32768;
/**
* This log mode means the transaction log is not used.
*/
public static final int LOG_MODE_OFF = 0;
/**
* This log mode means the transaction log is used and FileDescriptor.sync()
* is called for each checkpoint. This is the default level.
*/
public static final int LOG_MODE_SYNC = 2;
private static final int PAGE_ID_FREE_LIST_ROOT = 3;
private static final int PAGE_ID_META_ROOT = 4;
private static final int MIN_PAGE_COUNT = 5;
private static final int INCREMENT_KB = 1024;
private static final int INCREMENT_PERCENT_MIN = 35;
private static final int READ_VERSION = 3;
private static final int WRITE_VERSION = 3;
private static final int META_TYPE_DATA_INDEX = 0;
private static final int META_TYPE_BTREE_INDEX = 1;
private static final int META_TABLE_ID = -1;
private static final int COMPACT_BLOCK_SIZE = 1536;
private final Database database;
private final Trace trace;
private final String fileName;
private FileStore file;
private String accessMode;
private int pageSize = Constants.DEFAULT_PAGE_SIZE;
private int pageSizeShift;
private long writeCountBase, writeCount, readCount;
private int logKey, logFirstTrunkPage, logFirstDataPage;
private final Cache cache;
private int freeListPagesPerList;
private boolean recoveryRunning;
private boolean ignoreBigLog;
/**
* The index to the first free-list page that potentially has free space.
*/
private int firstFreeListIndex;
/**
* The file size in bytes.
*/
private long fileLength;
/**
* Number of pages (including free pages).
*/
private int pageCount;
private PageLog log;
private Schema metaSchema;
private RegularTable metaTable;
private PageDataIndex metaIndex;
private final IntIntHashMap metaRootPageId = new IntIntHashMap();
private final HashMap<Integer, PageIndex> metaObjects = new HashMap<>();
private HashMap<Integer, PageIndex> tempObjects;
/**
* The map of reserved pages, to ensure index head pages
* are not used for regular data during recovery. The key is the page id,
* and the value the latest transaction position where this page is used.
*/
private HashMap<Integer, Integer> reservedPages;
private boolean isNew;
private long maxLogSize = Constants.DEFAULT_MAX_LOG_SIZE;
private final Session pageStoreSession;
/**
* Each free page is marked with a set bit.
*/
private final BitField freed = new BitField();
private final ArrayList<PageFreeList> freeLists = New.arrayList();
private boolean recordPageReads;
private ArrayList<Integer> recordedPagesList;
private IntIntHashMap recordedPagesIndex;
/**
* The change count is something like a "micro-transaction-id".
* It is used to ensure that changed pages are not written to the file
* before the the current operation is not finished. This is only a problem
* when using a very small cache size. The value starts at 1 so that
* pages with change count 0 can be evicted from the cache.
*/
private long changeCount = 1;
private Data emptyPage;
private long logSizeBase;
private HashMap<String, Integer> statistics;
private int logMode = LOG_MODE_SYNC;
private boolean lockFile;
private boolean readMode;
private int backupLevel;
/**
* Create a new page store object.
*
* @param database the database
* @param fileName the file name
* @param accessMode the access mode
* @param cacheSizeDefault the default cache size
*/
public PageStore(Database database, String fileName, String accessMode,
int cacheSizeDefault) {
this.fileName = fileName;
this.accessMode = accessMode;
this.database = database;
trace = database.getTrace(Trace.PAGE_STORE);
// if (fileName.endsWith("X.h2.db"))
// trace.setLevel(TraceSystem.DEBUG);
String cacheType = database.getCacheType();
this.cache = CacheLRU.getCache(this, cacheType, cacheSizeDefault);
pageStoreSession = new Session(database, null, 0);
}
/**
* Start collecting statistics.
*/
public void statisticsStart() {
statistics = new HashMap<>();
}
/**
* Stop collecting statistics.
*
* @return the statistics
*/
public HashMap<String, Integer> statisticsEnd() {
HashMap<String, Integer> result = statistics;
statistics = null;
return result;
}
private void statisticsIncrement(String key) {
if (statistics != null) {
Integer old = statistics.get(key);
statistics.put(key, old == null ? 1 : old + 1);
}
}
/**
* Copy the next page to the output stream.
*
* @param pageId the page to copy
* @param out the output stream
* @return the new position, or -1 if there is no more data to copy
*/
public synchronized int copyDirect(int pageId, OutputStream out)
throws IOException {
byte[] buffer = new byte[pageSize];
if (pageId >= pageCount) {
return -1;
}
file.seek((long) pageId << pageSizeShift);
file.readFullyDirect(buffer, 0, pageSize);
readCount++;
out.write(buffer, 0, pageSize);
return pageId + 1;
}
/**
* Open the file and read the header.
*/
public synchronized void open() {
try {
metaRootPageId.put(META_TABLE_ID, PAGE_ID_META_ROOT);
if (FileUtils.exists(fileName)) {
long length = FileUtils.size(fileName);
if (length < MIN_PAGE_COUNT * PAGE_SIZE_MIN) {
if (database.isReadOnly()) {
throw DbException.get(
ErrorCode.FILE_CORRUPTED_1, fileName + " length: " + length);
}
// the database was not fully created
openNew();
} else {
openExisting();
}
} else {
openNew();
}
} catch (DbException e) {
close();
throw e;
}
}
private void openNew() {
setPageSize(pageSize);
freeListPagesPerList = PageFreeList.getPagesAddressed(pageSize);
file = database.openFile(fileName, accessMode, false);
lockFile();
recoveryRunning = true;
writeStaticHeader();
writeVariableHeader();
log = new PageLog(this);
increaseFileSize(MIN_PAGE_COUNT);
openMetaIndex();
logFirstTrunkPage = allocatePage();
log.openForWriting(logFirstTrunkPage, false);
isNew = true;
recoveryRunning = false;
increaseFileSize();
}
private void lockFile() {
if (lockFile) {
if (!file.tryLock()) {
throw DbException.get(
ErrorCode.DATABASE_ALREADY_OPEN_1, fileName);
}
}
}
private void openExisting() {
try {
file = database.openFile(fileName, accessMode, true);
} catch (DbException e) {
if (e.getErrorCode() == ErrorCode.IO_EXCEPTION_2) {
if (e.getMessage().contains("locked")) {
// in Windows, you can't open a locked file
// (in other operating systems, you can)
// the exact error message is:
// "The process cannot access the file because
// another process has locked a portion of the file"
throw DbException.get(
ErrorCode.DATABASE_ALREADY_OPEN_1, e, fileName);
}
}
throw e;
}
lockFile();
readStaticHeader();
freeListPagesPerList = PageFreeList.getPagesAddressed(pageSize);
fileLength = file.length();
pageCount = (int) (fileLength / pageSize);
if (pageCount < MIN_PAGE_COUNT) {
if (database.isReadOnly()) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
fileName + " pageCount: " + pageCount);
}
file.releaseLock();
file.close();
FileUtils.delete(fileName);
openNew();
return;
}
readVariableHeader();
log = new PageLog(this);
log.openForReading(logKey, logFirstTrunkPage, logFirstDataPage);
boolean old = database.isMultiVersion();
// temporarily disabling multi-version concurrency, because
// the multi-version index sometimes compares rows
// and the LOB storage is not yet available.
database.setMultiVersion(false);
boolean isEmpty = recover();
database.setMultiVersion(old);
if (!database.isReadOnly()) {
readMode = true;
if (!isEmpty || !SysProperties.MODIFY_ON_WRITE || tempObjects != null) {
openForWriting();
removeOldTempIndexes();
}
}
}
private void openForWriting() {
if (!readMode || database.isReadOnly()) {
return;
}
readMode = false;
recoveryRunning = true;
log.free();
logFirstTrunkPage = allocatePage();
log.openForWriting(logFirstTrunkPage, false);
recoveryRunning = false;
freed.set(0, pageCount, true);
checkpoint();
}
private void removeOldTempIndexes() {
if (tempObjects != null) {
metaObjects.putAll(tempObjects);
for (PageIndex index: tempObjects.values()) {
if (index.getTable().isTemporary()) {
index.truncate(pageStoreSession);
index.remove(pageStoreSession);
}
}
pageStoreSession.commit(true);
tempObjects = null;
}
metaObjects.clear();
metaObjects.put(-1, metaIndex);
}
private void writeIndexRowCounts() {
for (PageIndex index: metaObjects.values()) {
index.writeRowCount();
}
}
private void writeBack() {
ArrayList<CacheObject> list = cache.getAllChanged();
Collections.sort(list);
for (CacheObject cacheObject : list) {
writeBack(cacheObject);
}
}
/**
* Flush all pending changes to disk, and switch the new transaction log.
*/
public synchronized void checkpoint() {
trace.debug("checkpoint");
if (log == null || readMode || database.isReadOnly() || backupLevel > 0) {
// the file was never fully opened, or is read-only,
// or checkpoint is currently disabled
return;
}
database.checkPowerOff();
writeIndexRowCounts();
log.checkpoint();
writeBack();
int firstUncommittedSection = getFirstUncommittedSection();
log.removeUntil(firstUncommittedSection);
// write back the free list
writeBack();
// ensure the free list is backed up again
log.checkpoint();
if (trace.isDebugEnabled()) {
trace.debug("writeFree");
}
byte[] test = new byte[16];
byte[] empty = new byte[pageSize];
for (int i = PAGE_ID_FREE_LIST_ROOT; i < pageCount; i++) {
if (isUsed(i)) {
freed.clear(i);
} else if (!freed.get(i)) {
if (trace.isDebugEnabled()) {
trace.debug("free " + i);
}
file.seek((long) i << pageSizeShift);
file.readFully(test, 0, 16);
if (test[0] != 0) {
file.seek((long) i << pageSizeShift);
file.write(empty, 0, pageSize);
writeCount++;
}
freed.set(i);
}
}
}
/**
* Shrink the file so there are no empty pages at the end.
*
* @param compactMode 0 if no compacting should happen, otherwise
* TransactionCommand.SHUTDOWN_COMPACT or TransactionCommand.SHUTDOWN_DEFRAG
*/
public synchronized void compact(int compactMode) {
if (!database.getSettings().pageStoreTrim) {
return;
}
if (SysProperties.MODIFY_ON_WRITE && readMode &&
compactMode == 0) {
return;
}
openForWriting();
// find the last used page
int lastUsed = -1;
for (int i = getFreeListId(pageCount); i >= 0; i--) {
lastUsed = getFreeList(i).getLastUsed();
if (lastUsed != -1) {
break;
}
}
// open a new log at the very end
// (to be truncated later)
writeBack();
log.free();
recoveryRunning = true;
try {
logFirstTrunkPage = lastUsed + 1;
allocatePage(logFirstTrunkPage);
log.openForWriting(logFirstTrunkPage, true);
// ensure the free list is backed up again
log.checkpoint();
} finally {
recoveryRunning = false;
}
long start = System.nanoTime();
boolean isCompactFully = compactMode ==
CommandInterface.SHUTDOWN_COMPACT;
boolean isDefrag = compactMode ==
CommandInterface.SHUTDOWN_DEFRAG;
if (database.getSettings().defragAlways) {
isCompactFully = isDefrag = true;
}
int maxCompactTime = database.getSettings().maxCompactTime;
int maxMove = database.getSettings().maxCompactCount;
if (isCompactFully || isDefrag) {
maxCompactTime = Integer.MAX_VALUE;
maxMove = Integer.MAX_VALUE;
}
int blockSize = isCompactFully ? COMPACT_BLOCK_SIZE : 1;
int firstFree = MIN_PAGE_COUNT;
for (int x = lastUsed, j = 0; x > MIN_PAGE_COUNT &&
j < maxMove; x -= blockSize) {
for (int full = x - blockSize + 1; full <= x; full++) {
if (full > MIN_PAGE_COUNT && isUsed(full)) {
synchronized (this) {
firstFree = getFirstFree(firstFree);
if (firstFree == -1 || firstFree >= full) {
j = maxMove;
break;
}
if (compact(full, firstFree)) {
j++;
long now = System.nanoTime();
if (now > start + TimeUnit.MILLISECONDS.toNanos(maxCompactTime)) {
j = maxMove;
break;
}
}
}
}
}
}
if (isDefrag) {
log.checkpoint();
writeBack();
cache.clear();
ArrayList<Table> tables = database.getAllTablesAndViews(false);
recordedPagesList = New.arrayList();
recordedPagesIndex = new IntIntHashMap();
recordPageReads = true;
Session sysSession = database.getSystemSession();
for (Table table : tables) {
if (!table.isTemporary() && TableType.TABLE == table.getTableType()) {
Index scanIndex = table.getScanIndex(sysSession);
Cursor cursor = scanIndex.find(sysSession, null, null);
while (cursor.next()) {
cursor.get();
}
for (Index index : table.getIndexes()) {
if (index != scanIndex && index.canScan()) {
cursor = index.find(sysSession, null, null);
while (cursor.next()) {
// the data is already read
}
}
}
}
}
recordPageReads = false;
int target = MIN_PAGE_COUNT - 1;
int temp = 0;
for (int i = 0, size = recordedPagesList.size(); i < size; i++) {
log.checkpoint();
writeBack();
int source = recordedPagesList.get(i);
Page pageSource = getPage(source);
if (!pageSource.canMove()) {
continue;
}
while (true) {
Page pageTarget = getPage(++target);
if (pageTarget == null || pageTarget.canMove()) {
break;
}
}
if (target == source) {
continue;
}
temp = getFirstFree(temp);
if (temp == -1) {
DbException.throwInternalError("no free page for defrag");
}
cache.clear();
swap(source, target, temp);
int index = recordedPagesIndex.get(target);
if (index != IntIntHashMap.NOT_FOUND) {
recordedPagesList.set(index, source);
recordedPagesIndex.put(source, index);
}
recordedPagesList.set(i, target);
recordedPagesIndex.put(target, i);
}
recordedPagesList = null;
recordedPagesIndex = null;
}
// TODO can most likely be simplified
checkpoint();
log.checkpoint();
writeIndexRowCounts();
log.checkpoint();
writeBack();
commit(pageStoreSession);
writeBack();
log.checkpoint();
log.free();
// truncate the log
recoveryRunning = true;
try {
setLogFirstPage(++logKey, 0, 0);
} finally {
recoveryRunning = false;
}
writeBack();
for (int i = getFreeListId(pageCount); i >= 0; i--) {
lastUsed = getFreeList(i).getLastUsed();
if (lastUsed != -1) {
break;
}
}
int newPageCount = lastUsed + 1;
if (newPageCount < pageCount) {
freed.set(newPageCount, pageCount, false);
}
pageCount = newPageCount;
// the easiest way to remove superfluous entries
freeLists.clear();
trace.debug("pageCount: " + pageCount);
long newLength = (long) pageCount << pageSizeShift;
if (file.length() != newLength) {
file.setLength(newLength);
writeCount++;
}
}
private int getFirstFree(int start) {
int free = -1;
for (int id = getFreeListId(start); start < pageCount; id++) {
free = getFreeList(id).getFirstFree(start);
if (free != -1) {
break;
}
}
return free;
}
private void swap(int a, int b, int free) {
if (a < MIN_PAGE_COUNT || b < MIN_PAGE_COUNT) {
System.out.println(isUsed(a) + " " + isUsed(b));
DbException.throwInternalError("can't swap " + a + " and " + b);
}
Page f = (Page) cache.get(free);
if (f != null) {
DbException.throwInternalError("not free: " + f);
}
if (trace.isDebugEnabled()) {
trace.debug("swap " + a + " and " + b + " via " + free);
}
Page pageA = null;
if (isUsed(a)) {
pageA = getPage(a);
if (pageA != null) {
pageA.moveTo(pageStoreSession, free);
}
free(a);
}
if (free != b) {
if (isUsed(b)) {
Page pageB = getPage(b);
if (pageB != null) {
pageB.moveTo(pageStoreSession, a);
}
free(b);
}
if (pageA != null) {
f = getPage(free);
if (f != null) {
f.moveTo(pageStoreSession, b);
}
free(free);
}
}
}
private boolean compact(int full, int free) {
if (full < MIN_PAGE_COUNT || free == -1 || free >= full || !isUsed(full)) {
return false;
}
Page f = (Page) cache.get(free);
if (f != null) {
DbException.throwInternalError("not free: " + f);
}
Page p = getPage(full);
if (p == null) {
freePage(full);
} else if (p instanceof PageStreamData || p instanceof PageStreamTrunk) {
if (p.getPos() < log.getMinPageId()) {
// an old transaction log page
// probably a leftover from a crash
freePage(full);
}
} else {
if (trace.isDebugEnabled()) {
trace.debug("move " + p.getPos() + " to " + free);
}
try {
p.moveTo(pageStoreSession, free);
} finally {
changeCount++;
if (SysProperties.CHECK && changeCount < 0) {
throw DbException.throwInternalError(
"changeCount has wrapped");
}
}
}
return true;
}
/**
* Read a page from the store.
*
* @param pageId the page id
* @return the page
*/
public synchronized Page getPage(int pageId) {
Page p = (Page) cache.get(pageId);
if (p != null) {
return p;
}
Data data = createData();
readPage(pageId, data);
int type = data.readByte();
if (type == Page.TYPE_EMPTY) {
return null;
}
data.readShortInt();
data.readInt();
if (!checksumTest(data.getBytes(), pageId, pageSize)) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"wrong checksum");
}
switch (type & ~Page.FLAG_LAST) {
case Page.TYPE_FREE_LIST:
p = PageFreeList.read(this, data, pageId);
break;
case Page.TYPE_DATA_LEAF: {
int indexId = data.readVarInt();
PageIndex idx = metaObjects.get(indexId);
if (idx == null) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"index not found " + indexId);
}
if (!(idx instanceof PageDataIndex)) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"not a data index " + indexId + " " + idx);
}
PageDataIndex index = (PageDataIndex) idx;
if (statistics != null) {
statisticsIncrement(index.getTable().getName() + "." +
index.getName() + " read");
}
p = PageDataLeaf.read(index, data, pageId);
break;
}
case Page.TYPE_DATA_NODE: {
int indexId = data.readVarInt();
PageIndex idx = metaObjects.get(indexId);
if (idx == null) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"index not found " + indexId);
}
if (!(idx instanceof PageDataIndex)) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"not a data index " + indexId + " " + idx);
}
PageDataIndex index = (PageDataIndex) idx;
if (statistics != null) {
statisticsIncrement(index.getTable().getName() + "." +
index.getName() + " read");
}
p = PageDataNode.read(index, data, pageId);
break;
}
case Page.TYPE_DATA_OVERFLOW: {
p = PageDataOverflow.read(this, data, pageId);
if (statistics != null) {
statisticsIncrement("overflow read");
}
break;
}
case Page.TYPE_BTREE_LEAF: {
int indexId = data.readVarInt();
PageIndex idx = metaObjects.get(indexId);
if (idx == null) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"index not found " + indexId);
}
if (!(idx instanceof PageBtreeIndex)) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"not a btree index " + indexId + " " + idx);
}
PageBtreeIndex index = (PageBtreeIndex) idx;
if (statistics != null) {
statisticsIncrement(index.getTable().getName() + "." +
index.getName() + " read");
}
p = PageBtreeLeaf.read(index, data, pageId);
break;
}
case Page.TYPE_BTREE_NODE: {
int indexId = data.readVarInt();
PageIndex idx = metaObjects.get(indexId);
if (idx == null) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"index not found " + indexId);
}
if (!(idx instanceof PageBtreeIndex)) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"not a btree index " + indexId + " " + idx);
}
PageBtreeIndex index = (PageBtreeIndex) idx;
if (statistics != null) {
statisticsIncrement(index.getTable().getName() +
"." + index.getName() + " read");
}
p = PageBtreeNode.read(index, data, pageId);
break;
}
case Page.TYPE_STREAM_TRUNK:
p = PageStreamTrunk.read(this, data, pageId);
break;
case Page.TYPE_STREAM_DATA:
p = PageStreamData.read(this, data, pageId);
break;
default:
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"page=" + pageId + " type=" + type);
}
cache.put(p);
return p;
}
private int getFirstUncommittedSection() {
trace.debug("getFirstUncommittedSection");
Session[] sessions = database.getSessions(true);
int firstUncommittedSection = log.getLogSectionId();
for (Session session : sessions) {
int firstUncommitted = session.getFirstUncommittedLog();
if (firstUncommitted != Session.LOG_WRITTEN) {
if (firstUncommitted < firstUncommittedSection) {
firstUncommittedSection = firstUncommitted;
}
}
}
return firstUncommittedSection;
}
private void readStaticHeader() {
file.seek(FileStore.HEADER_LENGTH);
Data page = Data.create(database,
new byte[PAGE_SIZE_MIN - FileStore.HEADER_LENGTH]);
file.readFully(page.getBytes(), 0,
PAGE_SIZE_MIN - FileStore.HEADER_LENGTH);
readCount++;
setPageSize(page.readInt());
int writeVersion = page.readByte();
int readVersion = page.readByte();
if (readVersion > READ_VERSION) {
throw DbException.get(
ErrorCode.FILE_VERSION_ERROR_1, fileName);
}
if (writeVersion > WRITE_VERSION) {
close();
database.setReadOnly(true);
accessMode = "r";
file = database.openFile(fileName, accessMode, true);
}
}
private void readVariableHeader() {
Data page = createData();
for (int i = 1;; i++) {
if (i == 3) {
throw DbException.get(
ErrorCode.FILE_CORRUPTED_1, fileName);
}
page.reset();
readPage(i, page);
CRC32 crc = new CRC32();
crc.update(page.getBytes(), 4, pageSize - 4);
int expected = (int) crc.getValue();
int got = page.readInt();
if (expected == got) {
writeCountBase = page.readLong();
logKey = page.readInt();
logFirstTrunkPage = page.readInt();
logFirstDataPage = page.readInt();
break;
}
}
}
/**
* Set the page size. The size must be a power of two. This method must be
* called before opening.
*
* @param size the page size
*/
public void setPageSize(int size) {
if (size < PAGE_SIZE_MIN || size > PAGE_SIZE_MAX) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
fileName + " pageSize: " + size);
}
boolean good = false;
int shift = 0;
for (int i = 1; i <= size;) {
if (size == i) {
good = true;
break;
}
shift++;
i += i;
}
if (!good) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1, fileName);
}
pageSize = size;
emptyPage = createData();
pageSizeShift = shift;
}
private void writeStaticHeader() {
Data page = Data.create(database, new byte[pageSize - FileStore.HEADER_LENGTH]);
page.writeInt(pageSize);
page.writeByte((byte) WRITE_VERSION);
page.writeByte((byte) READ_VERSION);
file.seek(FileStore.HEADER_LENGTH);
file.write(page.getBytes(), 0, pageSize - FileStore.HEADER_LENGTH);
writeCount++;
}
/**
* Set the trunk page and data page id of the log.
*
* @param logKey the log key of the trunk page
* @param trunkPageId the trunk page id
* @param dataPageId the data page id
*/
void setLogFirstPage(int logKey, int trunkPageId, int dataPageId) {
if (trace.isDebugEnabled()) {
trace.debug("setLogFirstPage key: " + logKey +
" trunk: "+ trunkPageId +" data: " + dataPageId);
}
this.logKey = logKey;
this.logFirstTrunkPage = trunkPageId;
this.logFirstDataPage = dataPageId;
writeVariableHeader();
}
private void writeVariableHeader() {
trace.debug("writeVariableHeader");
if (logMode == LOG_MODE_SYNC) {
file.sync();
}
Data page = createData();
page.writeInt(0);
page.writeLong(getWriteCountTotal());
page.writeInt(logKey);
page.writeInt(logFirstTrunkPage);
page.writeInt(logFirstDataPage);
CRC32 crc = new CRC32();
crc.update(page.getBytes(), 4, pageSize - 4);
page.setInt(0, (int) crc.getValue());
file.seek(pageSize);
file.write(page.getBytes(), 0, pageSize);
file.seek(pageSize + pageSize);
file.write(page.getBytes(), 0, pageSize);
// don't increment the write counter, because it was just written
}
/**
* Close the file without further writing.
*/
public synchronized void close() {
trace.debug("close");
if (log != null) {
log.close();
log = null;
}
if (file != null) {
try {
file.releaseLock();
file.close();
} finally {
file = null;
}
}
}
@Override
public synchronized void flushLog() {
if (file != null) {
log.flush();
}
}
/**
* Flush the transaction log and sync the file.
*/
public synchronized void sync() {
if (file != null) {
log.flush();
file.sync();
}
}
@Override
public Trace getTrace() {
return trace;
}
@Override
public synchronized void writeBack(CacheObject obj) {
Page record = (Page) obj;
if (trace.isDebugEnabled()) {
trace.debug("writeBack " + record);
}
record.write();
record.setChanged(false);
}
/**
* Write an undo log entry if required.
*
* @param page the page
* @param old the old data (if known) or null
*/
public synchronized void logUndo(Page page, Data old) {
if (logMode == LOG_MODE_OFF) {
return;
}
checkOpen();
database.checkWritingAllowed();
if (!recoveryRunning) {
int pos = page.getPos();
if (!log.getUndo(pos)) {
if (old == null) {
old = readPage(pos);
}
openForWriting();
log.addUndo(pos, old);
}
}
}
/**
* Update a page.
*
* @param page the page
*/
public synchronized void update(Page page) {
if (trace.isDebugEnabled()) {
if (!page.isChanged()) {
trace.debug("updateRecord " + page.toString());
}
}
checkOpen();
database.checkWritingAllowed();
page.setChanged(true);
int pos = page.getPos();
if (SysProperties.CHECK && !recoveryRunning) {
// ensure the undo entry is already written
if (logMode != LOG_MODE_OFF) {
log.addUndo(pos, null);
}
}
allocatePage(pos);
cache.update(pos, page);
}
private int getFreeListId(int pageId) {
return (pageId - PAGE_ID_FREE_LIST_ROOT) / freeListPagesPerList;
}
private PageFreeList getFreeListForPage(int pageId) {
return getFreeList(getFreeListId(pageId));
}
private PageFreeList getFreeList(int i) {
PageFreeList list = null;
if (i < freeLists.size()) {
list = freeLists.get(i);
if (list != null) {
return list;
}
}
int p = PAGE_ID_FREE_LIST_ROOT + i * freeListPagesPerList;
while (p >= pageCount) {
increaseFileSize();
}
if (p < pageCount) {
list = (PageFreeList) getPage(p);
}
if (list == null) {
list = PageFreeList.create(this, p);
cache.put(list);
}
while (freeLists.size() <= i) {
freeLists.add(null);
}
freeLists.set(i, list);
return list;
}
private void freePage(int pageId) {
int index = getFreeListId(pageId);
PageFreeList list = getFreeList(index);
firstFreeListIndex = Math.min(index, firstFreeListIndex);
list.free(pageId);
}
/**
* Set the bit of an already allocated page.
*
* @param pageId the page to allocate
*/
void allocatePage(int pageId) {
PageFreeList list = getFreeListForPage(pageId);
list.allocate(pageId);
}
private boolean isUsed(int pageId) {
return getFreeListForPage(pageId).isUsed(pageId);
}
/**
* Allocate a number of pages.
*
* @param list the list where to add the allocated pages
* @param pagesToAllocate the number of pages to allocate
* @param exclude the exclude list
* @param after all allocated pages are higher than this page
*/
void allocatePages(IntArray list, int pagesToAllocate, BitField exclude,
int after) {
list.ensureCapacity(list.size() + pagesToAllocate);
for (int i = 0; i < pagesToAllocate; i++) {
int page = allocatePage(exclude, after);
after = page;
list.add(page);
}
}
/**
* Allocate a page.
*
* @return the page id
*/
public synchronized int allocatePage() {
openForWriting();
int pos = allocatePage(null, 0);
if (!recoveryRunning) {
if (logMode != LOG_MODE_OFF) {
log.addUndo(pos, emptyPage);
}
}
return pos;
}
private int allocatePage(BitField exclude, int first) {
int page;
for (int i = firstFreeListIndex;; i++) {
PageFreeList list = getFreeList(i);
page = list.allocate(exclude, first);
if (page >= 0) {
firstFreeListIndex = i;
break;
}
}
while (page >= pageCount) {
increaseFileSize();
}
if (trace.isDebugEnabled()) {
// trace.debug("allocatePage " + pos);
}
return page;
}
private void increaseFileSize() {
int increment = INCREMENT_KB * 1024 / pageSize;
int percent = pageCount * INCREMENT_PERCENT_MIN / 100;
if (increment < percent) {
increment = (1 + (percent / increment)) * increment;
}
int max = database.getSettings().pageStoreMaxGrowth;
if (max < increment) {
increment = max;
}
increaseFileSize(increment);
}
private void increaseFileSize(int increment) {
for (int i = pageCount; i < pageCount + increment; i++) {
freed.set(i);
}
pageCount += increment;
long newLength = (long) pageCount << pageSizeShift;
file.setLength(newLength);
writeCount++;
fileLength = newLength;
}
/**
* Add a page to the free list. The undo log entry must have been written.
*
* @param pageId the page id
*/
public synchronized void free(int pageId) {
free(pageId, true);
}
/**
* Add a page to the free list.
*
* @param pageId the page id
* @param undo if the undo record must have been written
*/
void free(int pageId, boolean undo) {
if (trace.isDebugEnabled()) {
// trace.debug("free " + pageId + " " + undo);
}
cache.remove(pageId);
if (SysProperties.CHECK && !recoveryRunning && undo) {
// ensure the undo entry is already written
if (logMode != LOG_MODE_OFF) {
log.addUndo(pageId, null);
}
}
freePage(pageId);
if (recoveryRunning) {
writePage(pageId, createData());
if (reservedPages != null && reservedPages.containsKey(pageId)) {
// re-allocate the page if it is used later on again
int latestPos = reservedPages.get(pageId);
if (latestPos > log.getLogPos()) {
allocatePage(pageId);
}
}
}
}
/**
* Add a page to the free list. The page is not used, therefore doesn't need
* to be overwritten.
*
* @param pageId the page id
*/
void freeUnused(int pageId) {
if (trace.isDebugEnabled()) {
trace.debug("freeUnused " + pageId);
}
cache.remove(pageId);
freePage(pageId);
freed.set(pageId);
}
/**
* Create a data object.
*
* @return the data page.
*/
public Data createData() {
return Data.create(database, new byte[pageSize]);
}
/**
* Read a page.
*
* @param pos the page id
* @return the page
*/
public synchronized Data readPage(int pos) {
Data page = createData();
readPage(pos, page);
return page;
}
/**
* Read a page.
*
* @param pos the page id
* @param page the page
*/
void readPage(int pos, Data page) {
if (recordPageReads) {
if (pos >= MIN_PAGE_COUNT &&
recordedPagesIndex.get(pos) == IntIntHashMap.NOT_FOUND) {
recordedPagesIndex.put(pos, recordedPagesList.size());
recordedPagesList.add(pos);
}
}
if (pos < 0 || pos >= pageCount) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1, pos +
" of " + pageCount);
}
file.seek((long) pos << pageSizeShift);
file.readFully(page.getBytes(), 0, pageSize);
readCount++;
}
/**
* Get the page size.
*
* @return the page size
*/
public int getPageSize() {
return pageSize;
}
/**
* Get the number of pages (including free pages).
*
* @return the page count
*/
public int getPageCount() {
return pageCount;
}
/**
* Write a page.
*
* @param pageId the page id
* @param data the data
*/
public synchronized void writePage(int pageId, Data data) {
if (pageId <= 0) {
DbException.throwInternalError("write to page " + pageId);
}
byte[] bytes = data.getBytes();
if (SysProperties.CHECK) {
boolean shouldBeFreeList = (pageId - PAGE_ID_FREE_LIST_ROOT) %
freeListPagesPerList == 0;
boolean isFreeList = bytes[0] == Page.TYPE_FREE_LIST;
if (bytes[0] != 0 && shouldBeFreeList != isFreeList) {
throw DbException.throwInternalError();
}
}
checksumSet(bytes, pageId);
file.seek((long) pageId << pageSizeShift);
file.write(bytes, 0, pageSize);
writeCount++;
}
/**
* Remove a page from the cache.
*
* @param pageId the page id
*/
public synchronized void removeFromCache(int pageId) {
cache.remove(pageId);
}
Database getDatabase() {
return database;
}
/**
* Run recovery.
*
* @return whether the transaction log was empty
*/
private boolean recover() {
trace.debug("log recover");
recoveryRunning = true;
boolean isEmpty = true;
isEmpty &= log.recover(PageLog.RECOVERY_STAGE_UNDO);
if (reservedPages != null) {
for (int r : reservedPages.keySet()) {
if (trace.isDebugEnabled()) {
trace.debug("reserve " + r);
}
allocatePage(r);
}
}
isEmpty &= log.recover(PageLog.RECOVERY_STAGE_ALLOCATE);
openMetaIndex();
readMetaData();
isEmpty &= log.recover(PageLog.RECOVERY_STAGE_REDO);
boolean setReadOnly = false;
if (!database.isReadOnly()) {
if (log.getInDoubtTransactions().isEmpty()) {
log.recoverEnd();
int firstUncommittedSection = getFirstUncommittedSection();
log.removeUntil(firstUncommittedSection);
} else {
setReadOnly = true;
}
}
PageDataIndex systemTable = (PageDataIndex) metaObjects.get(0);
isNew = systemTable == null;
for (PageIndex index : metaObjects.values()) {
if (index.getTable().isTemporary()) {
// temporary indexes are removed after opening
if (tempObjects == null) {
tempObjects = new HashMap<>();
}
tempObjects.put(index.getId(), index);
} else {
index.close(pageStoreSession);
}
}
allocatePage(PAGE_ID_META_ROOT);
writeIndexRowCounts();
recoveryRunning = false;
reservedPages = null;
writeBack();
// clear the cache because it contains pages with closed indexes
cache.clear();
freeLists.clear();
metaObjects.clear();
metaObjects.put(-1, metaIndex);
if (setReadOnly) {
database.setReadOnly(true);
}
trace.debug("log recover done");
return isEmpty;
}
/**
* A record is added to a table, or removed from a table.
*
* @param session the session
* @param tableId the table id
* @param row the row to add
* @param add true if the row is added, false if it is removed
*/
public synchronized void logAddOrRemoveRow(Session session, int tableId,
Row row, boolean add) {
if (logMode != LOG_MODE_OFF) {
if (!recoveryRunning) {
log.logAddOrRemoveRow(session, tableId, row, add);
}
}
}
/**
* Mark a committed transaction.
*
* @param session the session
*/
public synchronized void commit(Session session) {
checkOpen();
openForWriting();
log.commit(session.getId());
long size = log.getSize();
if (size - logSizeBase > maxLogSize / 2) {
int firstSection = log.getLogFirstSectionId();
checkpoint();
int newSection = log.getLogSectionId();
if (newSection - firstSection <= 2) {
// one section is always kept, and checkpoint
// advances two sections each time it is called
return;
}
long newSize = log.getSize();
if (newSize < size || size < maxLogSize) {
ignoreBigLog = false;
return;
}
if (!ignoreBigLog) {
ignoreBigLog = true;
trace.error(null,
"Transaction log could not be truncated; size: " +
(newSize / 1024 / 1024) + " MB");
}
logSizeBase = log.getSize();
}
}
/**
* Prepare a transaction.
*
* @param session the session
* @param transaction the name of the transaction
*/
public synchronized void prepareCommit(Session session, String transaction) {
log.prepareCommit(session, transaction);
}
/**
* Check whether this is a new database.
*
* @return true if it is
*/
public boolean isNew() {
return isNew;
}
/**
* Reserve the page if this is a index root page entry.
*
* @param logPos the redo log position
* @param tableId the table id
* @param row the row
*/
void allocateIfIndexRoot(int logPos, int tableId, Row row) {
if (tableId == META_TABLE_ID) {
int rootPageId = row.getValue(3).getInt();
if (reservedPages == null) {
reservedPages = new HashMap<>();
}
reservedPages.put(rootPageId, logPos);
}
}
/**
* Redo a delete in a table.
*
* @param tableId the object id of the table
* @param key the key of the row to delete
*/
void redoDelete(int tableId, long key) {
Index index = metaObjects.get(tableId);
PageDataIndex scan = (PageDataIndex) index;
Row row = scan.getRowWithKey(key);
if (row == null || row.getKey() != key) {
trace.error(null, "Entry not found: " + key +
" found instead: " + row + " - ignoring");
return;
}
redo(tableId, row, false);
}
/**
* Redo a change in a table.
*
* @param tableId the object id of the table
* @param row the row
* @param add true if the record is added, false if deleted
*/
void redo(int tableId, Row row, boolean add) {
if (tableId == META_TABLE_ID) {
if (add) {
addMeta(row, pageStoreSession, true);
} else {
removeMeta(row);
}
}
Index index = metaObjects.get(tableId);
if (index == null) {
throw DbException.throwInternalError(
"Table not found: " + tableId + " " + row + " " + add);
}
Table table = index.getTable();
if (add) {
table.addRow(pageStoreSession, row);
} else {
table.removeRow(pageStoreSession, row);
}
}
/**
* Redo a truncate.
*
* @param tableId the object id of the table
*/
void redoTruncate(int tableId) {
Index index = metaObjects.get(tableId);
Table table = index.getTable();
table.truncate(pageStoreSession);
}
private void openMetaIndex() {
CreateTableData data = new CreateTableData();
ArrayList<Column> cols = data.columns;
cols.add(new Column("ID", Value.INT));
cols.add(new Column("TYPE", Value.INT));
cols.add(new Column("PARENT", Value.INT));
cols.add(new Column("HEAD", Value.INT));
cols.add(new Column("OPTIONS", Value.STRING));
cols.add(new Column("COLUMNS", Value.STRING));
metaSchema = new Schema(database, 0, "", null, true);
data.schema = metaSchema;
data.tableName = "PAGE_INDEX";
data.id = META_TABLE_ID;
data.temporary = false;
data.persistData = true;
data.persistIndexes = true;
data.create = false;
data.session = pageStoreSession;
metaTable = new RegularTable(data);
metaIndex = (PageDataIndex) metaTable.getScanIndex(
pageStoreSession);
metaObjects.clear();
metaObjects.put(-1, metaIndex);
}
private void readMetaData() {
Cursor cursor = metaIndex.find(pageStoreSession, null, null);
// first, create all tables
while (cursor.next()) {
Row row = cursor.get();
int type = row.getValue(1).getInt();
if (type == META_TYPE_DATA_INDEX) {
addMeta(row, pageStoreSession, false);
}
}
// now create all secondary indexes
// otherwise the table might not be created yet
cursor = metaIndex.find(pageStoreSession, null, null);
while (cursor.next()) {
Row row = cursor.get();
int type = row.getValue(1).getInt();
if (type != META_TYPE_DATA_INDEX) {
addMeta(row, pageStoreSession, false);
}
}
}
private void removeMeta(Row row) {
int id = row.getValue(0).getInt();
PageIndex index = metaObjects.get(id);
index.getTable().removeIndex(index);
if (index instanceof PageBtreeIndex || index instanceof PageDelegateIndex) {
if (index.isTemporary()) {
pageStoreSession.removeLocalTempTableIndex(index);
} else {
index.getSchema().remove(index);
}
}
index.remove(pageStoreSession);
metaObjects.remove(id);
}
private void addMeta(Row row, Session session, boolean redo) {
int id = row.getValue(0).getInt();
int type = row.getValue(1).getInt();
int parent = row.getValue(2).getInt();
int rootPageId = row.getValue(3).getInt();
String[] options = StringUtils.arraySplit(
row.getValue(4).getString(), ',', false);
String columnList = row.getValue(5).getString();
String[] columns = StringUtils.arraySplit(columnList, ',', false);
Index meta;
if (trace.isDebugEnabled()) {
trace.debug("addMeta id="+ id +" type=" + type +
" root=" + rootPageId + " parent=" + parent + " columns=" + columnList);
}
if (redo && rootPageId != 0) {
// ensure the page is empty, but not used by regular data
writePage(rootPageId, createData());
allocatePage(rootPageId);
}
metaRootPageId.put(id, rootPageId);
if (type == META_TYPE_DATA_INDEX) {
CreateTableData data = new CreateTableData();
if (SysProperties.CHECK) {
if (columns == null) {
throw DbException.throwInternalError(row.toString());
}
}
for (int i = 0, len = columns.length; i < len; i++) {
Column col = new Column("C" + i, Value.INT);
data.columns.add(col);
}
data.schema = metaSchema;
data.tableName = "T" + id;
data.id = id;
data.temporary = options[2].equals("temp");
data.persistData = true;
data.persistIndexes = true;
data.create = false;
data.session = session;
RegularTable table = new RegularTable(data);
boolean binaryUnsigned = SysProperties.SORT_BINARY_UNSIGNED;
if (options.length > 3) {
binaryUnsigned = Boolean.parseBoolean(options[3]);
}
CompareMode mode = CompareMode.getInstance(
options[0], Integer.parseInt(options[1]), binaryUnsigned);
table.setCompareMode(mode);
meta = table.getScanIndex(session);
} else {
Index p = metaObjects.get(parent);
if (p == null) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"Table not found:" + parent + " for " + row + " meta:" + metaObjects);
}
RegularTable table = (RegularTable) p.getTable();
Column[] tableCols = table.getColumns();
int len = columns.length;
IndexColumn[] cols = new IndexColumn[len];
for (int i = 0; i < len; i++) {
String c = columns[i];
IndexColumn ic = new IndexColumn();
int idx = c.indexOf('/');
if (idx >= 0) {
String s = c.substring(idx + 1);
ic.sortType = Integer.parseInt(s);
c = c.substring(0, idx);
}
ic.column = tableCols[Integer.parseInt(c)];
cols[i] = ic;
}
IndexType indexType;
if (options[3].equals("d")) {
indexType = IndexType.createPrimaryKey(true, false);
Column[] tableColumns = table.getColumns();
for (IndexColumn indexColumn : cols) {
tableColumns[indexColumn.column.getColumnId()].setNullable(false);
}
} else {
indexType = IndexType.createNonUnique(true);
}
meta = table.addIndex(session, "I" + id, id, cols, indexType, false, null);
}
PageIndex index;
if (meta instanceof MultiVersionIndex) {
index = (PageIndex) ((MultiVersionIndex) meta).getBaseIndex();
} else {
index = (PageIndex) meta;
}
metaObjects.put(id, index);
}
/**
* Add an index to the in-memory index map.
*
* @param index the index
*/
public synchronized void addIndex(PageIndex index) {
metaObjects.put(index.getId(), index);
}
/**
* Add the meta data of an index.
*
* @param index the index to add
* @param session the session
*/
public void addMeta(PageIndex index, Session session) {
Table table = index.getTable();
if (SysProperties.CHECK) {
if (!table.isTemporary()) {
// to prevent ABBA locking problems, we need to always take
// the Database lock before we take the PageStore lock
synchronized (database) {
synchronized (this) {
database.verifyMetaLocked(session);
}
}
}
}
synchronized (this) {
int type = index instanceof PageDataIndex ?
META_TYPE_DATA_INDEX : META_TYPE_BTREE_INDEX;
IndexColumn[] columns = index.getIndexColumns();
StatementBuilder buff = new StatementBuilder();
for (IndexColumn col : columns) {
buff.appendExceptFirst(",");
int id = col.column.getColumnId();
buff.append(id);
int sortType = col.sortType;
if (sortType != 0) {
buff.append('/');
buff.append(sortType);
}
}
String columnList = buff.toString();
CompareMode mode = table.getCompareMode();
String options = mode.getName()+ "," + mode.getStrength() + ",";
if (table.isTemporary()) {
options += "temp";
}
options += ",";
if (index instanceof PageDelegateIndex) {
options += "d";
}
options += "," + mode.isBinaryUnsigned();
Row row = metaTable.getTemplateRow();
row.setValue(0, ValueInt.get(index.getId()));
row.setValue(1, ValueInt.get(type));
row.setValue(2, ValueInt.get(table.getId()));
row.setValue(3, ValueInt.get(index.getRootPageId()));
row.setValue(4, ValueString.get(options));
row.setValue(5, ValueString.get(columnList));
row.setKey(index.getId() + 1);
metaIndex.add(session, row);
}
}
/**
* Remove the meta data of an index.
*
* @param index the index to remove
* @param session the session
*/
public void removeMeta(Index index, Session session) {
if (SysProperties.CHECK) {
if (!index.getTable().isTemporary()) {
// to prevent ABBA locking problems, we need to always take
// the Database lock before we take the PageStore lock
synchronized (database) {
synchronized (this) {
database.verifyMetaLocked(session);
}
}
}
}
synchronized (this) {
if (!recoveryRunning) {
removeMetaIndex(index, session);
metaObjects.remove(index.getId());
}
}
}
private void removeMetaIndex(Index index, Session session) {
int key = index.getId() + 1;
Row row = metaIndex.getRow(session, key);
if (row.getKey() != key) {
throw DbException.get(ErrorCode.FILE_CORRUPTED_1,
"key: " + key + " index: " + index +
" table: " + index.getTable() + " row: " + row);
}
metaIndex.remove(session, row);
}
/**
* Set the maximum transaction log size in megabytes.
*
* @param maxSize the new maximum log size
*/
public void setMaxLogSize(long maxSize) {
this.maxLogSize = maxSize;
}
/**
* Commit or rollback a prepared transaction after opening a database with
* in-doubt transactions.
*
* @param sessionId the session id
* @param pageId the page where the transaction was prepared
* @param commit if the transaction should be committed
*/
public synchronized void setInDoubtTransactionState(int sessionId,
int pageId, boolean commit) {
boolean old = database.isReadOnly();
try {
database.setReadOnly(false);
log.setInDoubtTransactionState(sessionId, pageId, commit);
} finally {
database.setReadOnly(old);
}
}
/**
* Get the list of in-doubt transaction.
*
* @return the list
*/
public ArrayList<InDoubtTransaction> getInDoubtTransactions() {
return log.getInDoubtTransactions();
}
/**
* Check whether the recovery process is currently running.
*
* @return true if it is
*/
public boolean isRecoveryRunning() {
return recoveryRunning;
}
private void checkOpen() {
if (file == null) {
throw DbException.get(ErrorCode.DATABASE_IS_CLOSED);
}
}
/**
* Get the file write count since the database was created.
*
* @return the write count
*/
public long getWriteCountTotal() {
return writeCount + writeCountBase;
}
/**
* Get the file write count since the database was opened.
*
* @return the write count
*/
public long getWriteCount() {
return writeCount;
}
/**
* Get the file read count since the database was opened.
*
* @return the read count
*/
public long getReadCount() {
return readCount;
}
/**
* A table is truncated.
*
* @param session the session
* @param tableId the table id
*/
public synchronized void logTruncate(Session session, int tableId) {
if (!recoveryRunning) {
openForWriting();
log.logTruncate(session, tableId);
}
}
/**
* Get the root page of an index.
*
* @param indexId the index id
* @return the root page
*/
public int getRootPageId(int indexId) {
return metaRootPageId.get(indexId);
}
public Cache getCache() {
return cache;
}
private void checksumSet(byte[] d, int pageId) {
int ps = pageSize;
int type = d[0];
if (type == Page.TYPE_EMPTY) {
return;
}
int s1 = 255 + (type & 255), s2 = 255 + s1;
s2 += s1 += d[6] & 255;
s2 += s1 += d[(ps >> 1) - 1] & 255;
s2 += s1 += d[ps >> 1] & 255;
s2 += s1 += d[ps - 2] & 255;
s2 += s1 += d[ps - 1] & 255;
d[1] = (byte) (((s1 & 255) + (s1 >> 8)) ^ pageId);
d[2] = (byte) (((s2 & 255) + (s2 >> 8)) ^ (pageId >> 8));
}
/**
* Check if the stored checksum is correct
* @param d the data
* @param pageId the page id
* @param pageSize the page size
* @return true if it is correct
*/
public static boolean checksumTest(byte[] d, int pageId, int pageSize) {
int s1 = 255 + (d[0] & 255), s2 = 255 + s1;
s2 += s1 += d[6] & 255;
s2 += s1 += d[(pageSize >> 1) - 1] & 255;
s2 += s1 += d[pageSize >> 1] & 255;
s2 += s1 += d[pageSize - 2] & 255;
s2 += s1 += d[pageSize - 1] & 255;
return d[1] == (byte) (((s1 & 255) + (s1 >> 8)) ^ pageId) && d[2] == (byte) (((s2 & 255) + (s2 >> 8)) ^ (pageId
>> 8));
}
/**
* Increment the change count. To be done after the operation has finished.
*/
public void incrementChangeCount() {
changeCount++;
if (SysProperties.CHECK && changeCount < 0) {
throw DbException.throwInternalError("changeCount has wrapped");
}
}
/**
* Get the current change count. The first value is 1
*
* @return the change count
*/
public long getChangeCount() {
return changeCount;
}
public void setLogMode(int logMode) {
this.logMode = logMode;
}
public int getLogMode() {
return logMode;
}
public void setLockFile(boolean lockFile) {
this.lockFile = lockFile;
}
public BitField getObjectIds() {
BitField f = new BitField();
Cursor cursor = metaIndex.find(pageStoreSession, null, null);
while (cursor.next()) {
Row row = cursor.get();
int id = row.getValue(0).getInt();
if (id > 0) {
f.set(id);
}
}
return f;
}
public Session getPageStoreSession() {
return pageStoreSession;
}
public synchronized void setBackup(boolean start) {
backupLevel += start ? 1 : -1;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/PageStoreInDoubtTransaction.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import org.h2.message.DbException;
/**
* Represents an in-doubt transaction (a transaction in the prepare phase).
*/
public class PageStoreInDoubtTransaction implements InDoubtTransaction {
private final PageStore store;
private final int sessionId;
private final int pos;
private final String transactionName;
private int state;
/**
* Create a new in-doubt transaction info object.
*
* @param store the page store
* @param sessionId the session id
* @param pos the position
* @param transaction the transaction name
*/
public PageStoreInDoubtTransaction(PageStore store, int sessionId, int pos,
String transaction) {
this.store = store;
this.sessionId = sessionId;
this.pos = pos;
this.transactionName = transaction;
this.state = IN_DOUBT;
}
@Override
public void setState(int state) {
switch (state) {
case COMMIT:
store.setInDoubtTransactionState(sessionId, pos, true);
break;
case ROLLBACK:
store.setInDoubtTransactionState(sessionId, pos, false);
break;
default:
DbException.throwInternalError("state="+state);
}
this.state = state;
}
@Override
public String getState() {
switch (state) {
case IN_DOUBT:
return "IN_DOUBT";
case COMMIT:
return "COMMIT";
case ROLLBACK:
return "ROLLBACK";
default:
throw DbException.throwInternalError("state="+state);
}
}
@Override
public String getTransactionName() {
return transactionName;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/PageStreamData.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import org.h2.engine.Session;
/**
* A data page of a stream. The format is:
* <ul>
* <li>page type: byte (0)</li>
* <li>checksum: short (1-2)</li>
* <li>the trunk page id: int (3-6)</li>
* <li>log key: int (7-10)</li>
* <li>data (11-)</li>
* </ul>
*/
public class PageStreamData extends Page {
private static final int DATA_START = 11;
private final PageStore store;
private int trunk;
private int logKey;
private Data data;
private int remaining;
private PageStreamData(PageStore store, int pageId, int trunk, int logKey) {
setPos(pageId);
this.store = store;
this.trunk = trunk;
this.logKey = logKey;
}
/**
* Read a stream data page.
*
* @param store the page store
* @param data the data
* @param pageId the page id
* @return the page
*/
static PageStreamData read(PageStore store, Data data, int pageId) {
PageStreamData p = new PageStreamData(store, pageId, 0, 0);
p.data = data;
p.read();
return p;
}
/**
* Create a new stream trunk page.
*
* @param store the page store
* @param pageId the page id
* @param trunk the trunk page
* @param logKey the log key
* @return the page
*/
static PageStreamData create(PageStore store, int pageId, int trunk,
int logKey) {
return new PageStreamData(store, pageId, trunk, logKey);
}
/**
* Read the page from the disk.
*/
private void read() {
data.reset();
data.readByte();
data.readShortInt();
trunk = data.readInt();
logKey = data.readInt();
}
/**
* Write the header data.
*/
void initWrite() {
data = store.createData();
data.writeByte((byte) Page.TYPE_STREAM_DATA);
data.writeShortInt(0);
data.writeInt(trunk);
data.writeInt(logKey);
remaining = store.getPageSize() - data.length();
}
/**
* Write the data to the buffer.
*
* @param buff the source data
* @param offset the offset in the source buffer
* @param len the number of bytes to write
* @return the number of bytes written
*/
int write(byte[] buff, int offset, int len) {
int max = Math.min(remaining, len);
data.write(buff, offset, max);
remaining -= max;
return max;
}
@Override
public void write() {
store.writePage(getPos(), data);
}
/**
* Get the number of bytes that fit in a page.
*
* @param pageSize the page size
* @return the number of bytes
*/
static int getCapacity(int pageSize) {
return pageSize - DATA_START;
}
/**
* Read the next bytes from the buffer.
*
* @param startPos the position in the data page
* @param buff the target buffer
* @param off the offset in the target buffer
* @param len the number of bytes to read
*/
void read(int startPos, byte[] buff, int off, int len) {
System.arraycopy(data.getBytes(), startPos, buff, off, len);
}
/**
* Get the number of remaining data bytes of this page.
*
* @return the remaining byte count
*/
int getRemaining() {
return remaining;
}
/**
* Get the estimated memory size.
*
* @return number of double words (4 bytes)
*/
@Override
public int getMemory() {
return store.getPageSize() >> 2;
}
@Override
public void moveTo(Session session, int newPos) {
// not required
}
int getLogKey() {
return logKey;
}
@Override
public String toString() {
return "[" + getPos() + "] stream data key:" + logKey +
" pos:" + data.length() + " remaining:" + remaining;
}
@Override
public boolean canRemove() {
return true;
}
public static int getReadStart() {
return DATA_START;
}
@Override
public boolean canMove() {
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/PageStreamTrunk.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import org.h2.api.ErrorCode;
import org.h2.engine.Session;
import org.h2.message.DbException;
/**
* A trunk page of a stream. It contains the page numbers of the stream, and the
* page number of the next trunk. The format is:
* <ul>
* <li>page type: byte (0)</li>
* <li>checksum: short (1-2)</li>
* <li>previous trunk page, or 0 if none: int (3-6)</li>
* <li>log key: int (7-10)</li>
* <li>next trunk page: int (11-14)</li>
* <li>number of pages: short (15-16)</li>
* <li>page ids (17-)</li>
* </ul>
*/
public class PageStreamTrunk extends Page {
private static final int DATA_START = 17;
/**
* The previous stream trunk.
*/
int parent;
/**
* The next stream trunk.
*/
int nextTrunk;
private final PageStore store;
private int logKey;
private int[] pageIds;
private int pageCount;
private Data data;
private PageStreamTrunk(PageStore store, int parent, int pageId, int next,
int logKey, int[] pageIds) {
setPos(pageId);
this.parent = parent;
this.store = store;
this.nextTrunk = next;
this.logKey = logKey;
this.pageCount = pageIds.length;
this.pageIds = pageIds;
}
private PageStreamTrunk(PageStore store, Data data, int pageId) {
setPos(pageId);
this.data = data;
this.store = store;
}
/**
* Read a stream trunk page.
*
* @param store the page store
* @param data the data
* @param pageId the page id
* @return the page
*/
static PageStreamTrunk read(PageStore store, Data data, int pageId) {
PageStreamTrunk p = new PageStreamTrunk(store, data, pageId);
p.read();
return p;
}
/**
* Create a new stream trunk page.
*
* @param store the page store
* @param parent the parent page
* @param pageId the page id
* @param next the next trunk page
* @param logKey the log key
* @param pageIds the stream data page ids
* @return the page
*/
static PageStreamTrunk create(PageStore store, int parent, int pageId,
int next, int logKey, int[] pageIds) {
return new PageStreamTrunk(store, parent, pageId, next, logKey, pageIds);
}
/**
* Read the page from the disk.
*/
private void read() {
data.reset();
data.readByte();
data.readShortInt();
parent = data.readInt();
logKey = data.readInt();
nextTrunk = data.readInt();
pageCount = data.readShortInt();
pageIds = new int[pageCount];
for (int i = 0; i < pageCount; i++) {
pageIds[i] = data.readInt();
}
}
/**
* Get the data page id at the given position.
*
* @param index the index (0, 1, ...)
* @return the value, or -1 if the index is too large
*/
int getPageData(int index) {
if (index >= pageIds.length) {
return -1;
}
return pageIds[index];
}
@Override
public void write() {
data = store.createData();
data.writeByte((byte) Page.TYPE_STREAM_TRUNK);
data.writeShortInt(0);
data.writeInt(parent);
data.writeInt(logKey);
data.writeInt(nextTrunk);
data.writeShortInt(pageCount);
for (int i = 0; i < pageCount; i++) {
data.writeInt(pageIds[i]);
}
store.writePage(getPos(), data);
}
/**
* Get the number of pages that can be addressed in a stream trunk page.
*
* @param pageSize the page size
* @return the number of pages
*/
static int getPagesAddressed(int pageSize) {
return (pageSize - DATA_START) / 4;
}
/**
* Check if the given data page is in this trunk page.
*
* @param dataPageId the page id
* @return true if it is
*/
boolean contains(int dataPageId) {
for (int i = 0; i < pageCount; i++) {
if (pageIds[i] == dataPageId) {
return true;
}
}
return false;
}
/**
* Free this page and all data pages. Pages after the last used data page
* (if within this list) are empty and therefore not just freed, but marked
* as not used.
*
* @param lastUsedPage the last used data page
* @return the number of pages freed
*/
int free(int lastUsedPage) {
store.free(getPos(), false);
int freed = 1;
boolean notUsed = false;
for (int i = 0; i < pageCount; i++) {
int page = pageIds[i];
if (notUsed) {
store.freeUnused(page);
} else {
store.free(page, false);
}
freed++;
if (page == lastUsedPage) {
notUsed = true;
}
}
return freed;
}
/**
* Get the estimated memory size.
*
* @return number of double words (4 bytes)
*/
@Override
public int getMemory() {
return store.getPageSize() >> 2;
}
@Override
public void moveTo(Session session, int newPos) {
// not required
}
int getLogKey() {
return logKey;
}
public int getNextTrunk() {
return nextTrunk;
}
/**
* An iterator over page stream trunk pages.
*/
static class Iterator {
private final PageStore store;
private int first;
private int next;
private int previous;
private boolean canDelete;
private int current;
Iterator(PageStore store, int first) {
this.store = store;
this.next = first;
}
int getCurrentPageId() {
return current;
}
/**
* Get the next trunk page or null if no next trunk page.
*
* @return the next trunk page or null
*/
PageStreamTrunk next() {
canDelete = false;
if (first == 0) {
first = next;
} else if (first == next) {
return null;
}
if (next == 0 || next >= store.getPageCount()) {
return null;
}
Page p;
current = next;
try {
p = store.getPage(next);
} catch (DbException e) {
if (e.getErrorCode() == ErrorCode.FILE_CORRUPTED_1) {
// wrong checksum means end of stream
return null;
}
throw e;
}
if (p == null || p instanceof PageStreamTrunk ||
p instanceof PageStreamData) {
canDelete = true;
}
if (!(p instanceof PageStreamTrunk)) {
return null;
}
PageStreamTrunk t = (PageStreamTrunk) p;
if (previous > 0 && t.parent != previous) {
return null;
}
previous = next;
next = t.nextTrunk;
return t;
}
/**
* Check if the current page can be deleted. It can if it's empty, a
* stream trunk, or a stream data page.
*
* @return true if it can be deleted
*/
boolean canDelete() {
return canDelete;
}
}
@Override
public boolean canRemove() {
return true;
}
@Override
public String toString() {
return "page[" + getPos() + "] stream trunk key:" + logKey +
" next:" + nextTrunk;
}
@Override
public boolean canMove() {
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/RangeInputStream.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import org.h2.util.IOUtils;
/**
* Input stream that reads only a specified range from the source stream.
*/
public final class RangeInputStream extends FilterInputStream {
private long limit;
/**
* Creates new instance of range input stream.
*
* @param in
* source stream
* @param offset
* offset of the range
* @param limit
* length of the range
* @throws IOException
* on I/O exception during seeking to the specified offset
*/
public RangeInputStream(InputStream in, long offset, long limit) throws IOException {
super(in);
this.limit = limit;
IOUtils.skipFully(in, offset);
}
@Override
public int read() throws IOException {
if (limit <= 0) {
return -1;
}
int b = in.read();
if (b >= 0) {
limit--;
}
return b;
}
@Override
public int read(byte b[], int off, int len) throws IOException {
if (limit <= 0) {
return -1;
}
if (len > limit) {
len = (int) limit;
}
int cnt = in.read(b, off, len);
if (cnt > 0) {
limit -= cnt;
}
return cnt;
}
@Override
public long skip(long n) throws IOException {
if (n > limit) {
n = (int) limit;
}
n = in.skip(n);
limit -= n;
return n;
}
@Override
public int available() throws IOException {
int cnt = in.available();
if (cnt > limit) {
return (int) limit;
}
return cnt;
}
@Override
public void close() throws IOException {
in.close();
}
@Override
public void mark(int readlimit) {
}
@Override
public synchronized void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
@Override
public boolean markSupported() {
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/RangeReader.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.Reader;
import org.h2.util.IOUtils;
/**
* Reader that reads only a specified range from the source reader.
*/
public final class RangeReader extends Reader {
private final Reader r;
private long limit;
/**
* Creates new instance of range reader.
*
* @param r
* source reader
* @param offset
* offset of the range
* @param limit
* length of the range
* @throws IOException
* on I/O exception during seeking to the specified offset
*/
public RangeReader(Reader r, long offset, long limit) throws IOException {
this.r = r;
this.limit = limit;
IOUtils.skipFully(r, offset);
}
@Override
public int read() throws IOException {
if (limit <= 0) {
return -1;
}
int c = r.read();
if (c >= 0) {
limit--;
}
return c;
}
@Override
public int read(char cbuf[], int off, int len) throws IOException {
if (limit <= 0) {
return -1;
}
if (len > limit) {
len = (int) limit;
}
int cnt = r.read(cbuf, off, len);
if (cnt > 0) {
limit -= cnt;
}
return cnt;
}
@Override
public long skip(long n) throws IOException {
if (n > limit) {
n = (int) limit;
}
n = r.skip(n);
limit -= n;
return n;
}
@Override
public boolean ready() throws IOException {
if (limit > 0) {
return r.ready();
}
return false;
}
@Override
public boolean markSupported() {
return false;
}
@Override
public void mark(int readAheadLimit) throws IOException {
throw new IOException("mark() not supported");
}
@Override
public void reset() throws IOException {
throw new IOException("reset() not supported");
}
@Override
public void close() throws IOException {
r.close();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/RecoverTester.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.sql.SQLException;
import java.util.HashSet;
import java.util.Properties;
import org.h2.api.ErrorCode;
import org.h2.engine.ConnectionInfo;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.Session;
import org.h2.message.DbException;
import org.h2.store.fs.FilePathRec;
import org.h2.store.fs.FileUtils;
import org.h2.store.fs.Recorder;
import org.h2.tools.Recover;
import org.h2.util.IOUtils;
import org.h2.util.StringUtils;
import org.h2.util.Utils;
/**
* A tool that simulates a crash while writing to the database, and then
* verifies the database doesn't get corrupt.
*/
public class RecoverTester implements Recorder {
private static RecoverTester instance;
private String testDatabase = "memFS:reopen";
private int writeCount = Utils.getProperty("h2.recoverTestOffset", 0);
private int testEvery = Utils.getProperty("h2.recoverTest", 64);
private final long maxFileSize = Utils.getProperty(
"h2.recoverTestMaxFileSize", Integer.MAX_VALUE) * 1024L * 1024;
private int verifyCount;
private final HashSet<String> knownErrors = new HashSet<>();
private volatile boolean testing;
/**
* Initialize the recover test.
*
* @param recoverTest the value of the recover test parameter
*/
public static synchronized void init(String recoverTest) {
RecoverTester tester = RecoverTester.getInstance();
if (StringUtils.isNumber(recoverTest)) {
tester.setTestEvery(Integer.parseInt(recoverTest));
}
FilePathRec.setRecorder(tester);
}
public static synchronized RecoverTester getInstance() {
if (instance == null) {
instance = new RecoverTester();
}
return instance;
}
@Override
public void log(int op, String fileName, byte[] data, long x) {
if (op != Recorder.WRITE && op != Recorder.TRUNCATE) {
return;
}
if (!fileName.endsWith(Constants.SUFFIX_PAGE_FILE) &&
!fileName.endsWith(Constants.SUFFIX_MV_FILE)) {
return;
}
writeCount++;
if ((writeCount % testEvery) != 0) {
return;
}
if (FileUtils.size(fileName) > maxFileSize) {
// System.out.println(fileName + " " + IOUtils.length(fileName));
return;
}
if (testing) {
// avoid deadlocks
return;
}
testing = true;
PrintWriter out = null;
try {
out = new PrintWriter(
new OutputStreamWriter(
FileUtils.newOutputStream(fileName + ".log", true)));
testDatabase(fileName, out);
} catch (IOException e) {
throw DbException.convertIOException(e, null);
} finally {
IOUtils.closeSilently(out);
testing = false;
}
}
private synchronized void testDatabase(String fileName, PrintWriter out) {
out.println("+ write #" + writeCount + " verify #" + verifyCount);
try {
IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_PAGE_FILE);
String mvFileName = fileName.substring(0, fileName.length() -
Constants.SUFFIX_PAGE_FILE.length()) +
Constants.SUFFIX_MV_FILE;
if (FileUtils.exists(mvFileName)) {
IOUtils.copyFiles(mvFileName, testDatabase + Constants.SUFFIX_MV_FILE);
}
verifyCount++;
// avoid using the Engine class to avoid deadlocks
Properties p = new Properties();
p.setProperty("user", "");
p.setProperty("password", "");
ConnectionInfo ci = new ConnectionInfo("jdbc:h2:" + testDatabase +
";FILE_LOCK=NO;TRACE_LEVEL_FILE=0", p);
Database database = new Database(ci, null);
// close the database
Session sysSession = database.getSystemSession();
sysSession.prepare("script to '" + testDatabase + ".sql'").query(0);
sysSession.prepare("shutdown immediately").update();
database.removeSession(null);
// everything OK - return
return;
} catch (DbException e) {
SQLException e2 = DbException.toSQLException(e);
int errorCode = e2.getErrorCode();
if (errorCode == ErrorCode.WRONG_USER_OR_PASSWORD) {
return;
} else if (errorCode == ErrorCode.FILE_ENCRYPTION_ERROR_1) {
return;
}
e.printStackTrace(System.out);
} catch (Exception e) {
// failed
int errorCode = 0;
if (e instanceof SQLException) {
errorCode = ((SQLException) e).getErrorCode();
}
if (errorCode == ErrorCode.WRONG_USER_OR_PASSWORD) {
return;
} else if (errorCode == ErrorCode.FILE_ENCRYPTION_ERROR_1) {
return;
}
e.printStackTrace(System.out);
}
out.println("begin ------------------------------ " + writeCount);
try {
Recover.execute(fileName.substring(0, fileName.lastIndexOf('/')), null);
} catch (SQLException e) {
// ignore
}
testDatabase += "X";
try {
IOUtils.copyFiles(fileName, testDatabase + Constants.SUFFIX_PAGE_FILE);
// avoid using the Engine class to avoid deadlocks
Properties p = new Properties();
ConnectionInfo ci = new ConnectionInfo("jdbc:h2:" +
testDatabase + ";FILE_LOCK=NO", p);
Database database = new Database(ci, null);
// close the database
database.removeSession(null);
} catch (Exception e) {
int errorCode = 0;
if (e instanceof DbException) {
e = ((DbException) e).getSQLException();
errorCode = ((SQLException) e).getErrorCode();
}
if (errorCode == ErrorCode.WRONG_USER_OR_PASSWORD) {
return;
} else if (errorCode == ErrorCode.FILE_ENCRYPTION_ERROR_1) {
return;
}
StringBuilder buff = new StringBuilder();
StackTraceElement[] list = e.getStackTrace();
for (int i = 0; i < 10 && i < list.length; i++) {
buff.append(list[i].toString()).append('\n');
}
String s = buff.toString();
if (!knownErrors.contains(s)) {
out.println(writeCount + " code: " + errorCode + " " + e.toString());
e.printStackTrace(System.out);
knownErrors.add(s);
} else {
out.println(writeCount + " code: " + errorCode);
}
}
}
public void setTestEvery(int testEvery) {
this.testEvery = testEvery;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/SessionState.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
/**
* The session state contains information about when was the last commit of a
* session. It is only used during recovery.
*/
class SessionState {
/**
* The session id
*/
public int sessionId;
/**
* The last log id where a commit for this session is found.
*/
public int lastCommitLog;
/**
* The position where a commit for this session is found.
*/
public int lastCommitPos;
/**
* The in-doubt transaction if there is one.
*/
public PageStoreInDoubtTransaction inDoubtTransaction;
/**
* Check if this session state is already committed at this point.
*
* @param logId the log id
* @param pos the position in the log
* @return true if it is committed
*/
public boolean isCommitted(int logId, int pos) {
if (logId != lastCommitLog) {
return lastCommitLog > logId;
}
return lastCommitPos >= pos;
}
@Override
public String toString() {
return "sessionId:" + sessionId + " log:" + lastCommitLog +
" pos:" + lastCommitPos + " inDoubt:" + inDoubtTransaction;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/WriterThread.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store;
import java.lang.ref.WeakReference;
import java.security.AccessControlException;
import org.h2.Driver;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.message.Trace;
import org.h2.message.TraceSystem;
/**
* The writer thread is responsible to flush the transaction transaction log
* from time to time.
*/
public class WriterThread implements Runnable {
/**
* The reference to the database.
*
* Thread objects are not garbage collected
* until they returned from the run() method
* (even if they where never started)
* so if the connection was not closed,
* the database object cannot get reclaimed
* by the garbage collector if we use a hard reference.
*/
private volatile WeakReference<Database> databaseRef;
private int writeDelay;
private Thread thread;
private volatile boolean stop;
private WriterThread(Database database, int writeDelay) {
this.databaseRef = new WeakReference<>(database);
this.writeDelay = writeDelay;
}
/**
* Change the write delay
*
* @param writeDelay the new write delay
*/
public void setWriteDelay(int writeDelay) {
this.writeDelay = writeDelay;
}
/**
* Create and start a new writer thread for the given database. If the
* thread can't be created, this method returns null.
*
* @param database the database
* @param writeDelay the delay
* @return the writer thread object or null
*/
public static WriterThread create(Database database, int writeDelay) {
try {
WriterThread writer = new WriterThread(database, writeDelay);
writer.thread = new Thread(writer, "H2 Log Writer " + database.getShortName());
Driver.setThreadContextClassLoader(writer.thread);
writer.thread.setDaemon(true);
return writer;
} catch (AccessControlException e) {
// // Google App Engine does not allow threads
return null;
}
}
@Override
public void run() {
while (!stop) {
Database database = databaseRef.get();
if (database == null) {
break;
}
int wait = writeDelay;
try {
if (database.isFileLockSerialized()) {
wait = Constants.MIN_WRITE_DELAY;
database.checkpointIfRequired();
} else {
database.flush();
}
} catch (Exception e) {
TraceSystem traceSystem = database.getTraceSystem();
if (traceSystem != null) {
traceSystem.getTrace(Trace.DATABASE).error(e, "flush");
}
}
// wait 0 mean wait forever, which is not what we want
wait = Math.max(wait, Constants.MIN_WRITE_DELAY);
synchronized (this) {
while (!stop && wait > 0) {
// wait 100 ms at a time
int w = Math.min(wait, 100);
try {
wait(w);
} catch (InterruptedException e) {
// ignore
}
wait -= w;
}
}
}
databaseRef = null;
}
/**
* Stop the thread. This method is called when closing the database.
*/
public void stopThread() {
stop = true;
synchronized (this) {
notify();
}
// can't do thread.join(), because this thread might be holding
// a lock that the writer thread is waiting for
}
/**
* Start the thread. This method is called after opening the database
* (to avoid deadlocks)
*/
public void startThread() {
thread.start();
thread = null;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FakeFileChannel.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
/**
* Fake file channel to use by in-memory and ZIP file systems.
*/
public class FakeFileChannel extends FileChannel {
@Override
protected void implCloseChannel() throws IOException {
throw new IOException();
}
@Override
public FileLock lock(long position, long size, boolean shared) throws IOException {
throw new IOException();
}
@Override
public MappedByteBuffer map(MapMode mode, long position, long size) throws IOException {
throw new IOException();
}
@Override
public long position() throws IOException {
throw new IOException();
}
@Override
public FileChannel position(long newPosition) throws IOException {
throw new IOException();
}
@Override
public int read(ByteBuffer dst) throws IOException {
throw new IOException();
}
@Override
public int read(ByteBuffer dst, long position) throws IOException {
throw new IOException();
}
@Override
public long read(ByteBuffer[] dsts, int offset, int length) throws IOException {
throw new IOException();
}
@Override
public long size() throws IOException {
throw new IOException();
}
@Override
public long transferFrom(ReadableByteChannel src, long position, long count) throws IOException {
throw new IOException();
}
@Override
public long transferTo(long position, long count, WritableByteChannel target) throws IOException {
throw new IOException();
}
@Override
public FileChannel truncate(long size) throws IOException {
throw new IOException();
}
@Override
public FileLock tryLock(long position, long size, boolean shared) throws IOException {
throw new IOException();
}
@Override
public int write(ByteBuffer src) throws IOException {
throw new IOException();
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
throw new IOException();
}
@Override
public long write(ByteBuffer[] srcs, int offset, int len) throws IOException {
throw new IOException();
}
@Override
public void force(boolean metaData) throws IOException {
throw new IOException();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FileBase.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.WritableByteChannel;
/**
* The base class for file implementations.
*/
public abstract class FileBase extends FileChannel {
@Override
public abstract long size() throws IOException;
@Override
public abstract long position() throws IOException;
@Override
public abstract FileChannel position(long newPosition) throws IOException;
@Override
public abstract int read(ByteBuffer dst) throws IOException;
@Override
public abstract int write(ByteBuffer src) throws IOException;
@Override
public synchronized int read(ByteBuffer dst, long position)
throws IOException {
long oldPos = position();
position(position);
int len = read(dst);
position(oldPos);
return len;
}
@Override
public synchronized int write(ByteBuffer src, long position)
throws IOException {
long oldPos = position();
position(position);
int len = write(src);
position(oldPos);
return len;
}
@Override
public abstract FileChannel truncate(long size) throws IOException;
@Override
public void force(boolean metaData) throws IOException {
// ignore
}
@Override
protected void implCloseChannel() throws IOException {
// ignore
}
@Override
public FileLock lock(long position, long size, boolean shared)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public MappedByteBuffer map(MapMode mode, long position, long size)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long read(ByteBuffer[] dsts, int offset, int length)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long transferFrom(ReadableByteChannel src, long position, long count)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long transferTo(long position, long count, WritableByteChannel target)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public FileLock tryLock(long position, long size, boolean shared)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long write(ByteBuffer[] srcs, int offset, int length)
throws IOException {
throw new UnsupportedOperationException();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FileChannelInputStream.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
/**
* Allows to read from a file channel like an input stream.
*/
public class FileChannelInputStream extends InputStream {
private final FileChannel channel;
private final boolean closeChannel;
private ByteBuffer buffer;
private long pos;
/**
* Create a new file object input stream from the file channel.
*
* @param channel the file channel
* @param closeChannel whether closing the stream should close the channel
*/
public FileChannelInputStream(FileChannel channel, boolean closeChannel) {
this.channel = channel;
this.closeChannel = closeChannel;
}
@Override
public int read() throws IOException {
if (buffer == null) {
buffer = ByteBuffer.allocate(1);
}
buffer.rewind();
int len = channel.read(buffer, pos++);
if (len < 0) {
return -1;
}
return buffer.get(0) & 0xff;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
ByteBuffer buff = ByteBuffer.wrap(b, off, len);
int read = channel.read(buff, pos);
if (read == -1) {
return -1;
}
pos += read;
return read;
}
@Override
public void close() throws IOException {
if (closeChannel) {
channel.close();
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FileChannelOutputStream.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
/**
* Allows to write to a file channel like an output stream.
*/
public class FileChannelOutputStream extends OutputStream {
private final FileChannel channel;
private final byte[] buffer = { 0 };
/**
* Create a new file object output stream from the file channel.
*
* @param channel the file channel
* @param append true for append mode, false for truncate and overwrite
*/
public FileChannelOutputStream(FileChannel channel, boolean append)
throws IOException {
this.channel = channel;
if (append) {
channel.position(channel.size());
} else {
channel.position(0);
channel.truncate(0);
}
}
@Override
public void write(int b) throws IOException {
buffer[0] = (byte) b;
FileUtils.writeFully(channel, ByteBuffer.wrap(buffer));
}
@Override
public void write(byte[] b) throws IOException {
FileUtils.writeFully(channel, ByteBuffer.wrap(b));
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
FileUtils.writeFully(channel, ByteBuffer.wrap(b, off, len));
}
@Override
public void close() throws IOException {
channel.close();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePath.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.FileChannel;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.h2.util.MathUtils;
/**
* A path to a file. It similar to the Java 7 <code>java.nio.file.Path</code>,
* but simpler, and works with older versions of Java. It also implements the
* relevant methods found in <code>java.nio.file.FileSystem</code> and
* <code>FileSystems</code>
*/
public abstract class FilePath {
private static FilePath defaultProvider;
private static Map<String, FilePath> providers;
/**
* The prefix for temporary files.
*/
private static String tempRandom;
private static long tempSequence;
/**
* The complete path (which may be absolute or relative, depending on the
* file system).
*/
protected String name;
/**
* Get the file path object for the given path.
* Windows-style '\' is replaced with '/'.
*
* @param path the path
* @return the file path object
*/
public static FilePath get(String path) {
path = path.replace('\\', '/');
int index = path.indexOf(':');
registerDefaultProviders();
if (index < 2) {
// use the default provider if no prefix or
// only a single character (drive name)
return defaultProvider.getPath(path);
}
String scheme = path.substring(0, index);
FilePath p = providers.get(scheme);
if (p == null) {
// provider not found - use the default
p = defaultProvider;
}
return p.getPath(path);
}
private static void registerDefaultProviders() {
if (providers == null || defaultProvider == null) {
Map<String, FilePath> map = Collections.synchronizedMap(
new HashMap<String, FilePath>());
for (String c : new String[] {
"org.h2.store.fs.FilePathDisk",
"org.h2.store.fs.FilePathMem",
"org.h2.store.fs.FilePathMemLZF",
"org.h2.store.fs.FilePathNioMem",
"org.h2.store.fs.FilePathNioMemLZF",
"org.h2.store.fs.FilePathSplit",
"org.h2.store.fs.FilePathNio",
"org.h2.store.fs.FilePathNioMapped",
"org.h2.store.fs.FilePathZip",
"org.h2.store.fs.FilePathRetryOnInterrupt"
}) {
try {
FilePath p = (FilePath) Class.forName(c).newInstance();
map.put(p.getScheme(), p);
if (defaultProvider == null) {
defaultProvider = p;
}
} catch (Exception e) {
// ignore - the files may be excluded in purpose
}
}
providers = map;
}
}
/**
* Register a file provider.
*
* @param provider the file provider
*/
public static void register(FilePath provider) {
registerDefaultProviders();
providers.put(provider.getScheme(), provider);
}
/**
* Unregister a file provider.
*
* @param provider the file provider
*/
public static void unregister(FilePath provider) {
registerDefaultProviders();
providers.remove(provider.getScheme());
}
/**
* Get the size of a file in bytes
*
* @return the size in bytes
*/
public abstract long size();
/**
* Rename a file if this is allowed.
*
* @param newName the new fully qualified file name
* @param atomicReplace whether the move should be atomic, and the target
* file should be replaced if it exists and replacing is possible
*/
public abstract void moveTo(FilePath newName, boolean atomicReplace);
/**
* Create a new file.
*
* @return true if creating was successful
*/
public abstract boolean createFile();
/**
* Checks if a file exists.
*
* @return true if it exists
*/
public abstract boolean exists();
/**
* Delete a file or directory if it exists.
* Directories may only be deleted if they are empty.
*/
public abstract void delete();
/**
* List the files and directories in the given directory.
*
* @return the list of fully qualified file names
*/
public abstract List<FilePath> newDirectoryStream();
/**
* Normalize a file name.
*
* @return the normalized file name
*/
public abstract FilePath toRealPath();
/**
* Get the parent directory of a file or directory.
*
* @return the parent directory name
*/
public abstract FilePath getParent();
/**
* Check if it is a file or a directory.
*
* @return true if it is a directory
*/
public abstract boolean isDirectory();
/**
* Check if the file name includes a path.
*
* @return if the file name is absolute
*/
public abstract boolean isAbsolute();
/**
* Get the last modified date of a file
*
* @return the last modified date
*/
public abstract long lastModified();
/**
* Check if the file is writable.
*
* @return if the file is writable
*/
public abstract boolean canWrite();
/**
* Create a directory (all required parent directories already exist).
*/
public abstract void createDirectory();
/**
* Get the file or directory name (the last element of the path).
*
* @return the last element of the path
*/
public String getName() {
int idx = Math.max(name.indexOf(':'), name.lastIndexOf('/'));
return idx < 0 ? name : name.substring(idx + 1);
}
/**
* Create an output stream to write into the file.
*
* @param append if true, the file will grow, if false, the file will be
* truncated first
* @return the output stream
*/
public abstract OutputStream newOutputStream(boolean append) throws IOException;
/**
* Open a random access file object.
*
* @param mode the access mode. Supported are r, rw, rws, rwd
* @return the file object
*/
public abstract FileChannel open(String mode) throws IOException;
/**
* Create an input stream to read from the file.
*
* @return the input stream
*/
public abstract InputStream newInputStream() throws IOException;
/**
* Disable the ability to write.
*
* @return true if the call was successful
*/
public abstract boolean setReadOnly();
/**
* Create a new temporary file.
*
* @param suffix the suffix
* @param deleteOnExit if the file should be deleted when the virtual
* machine exists
* @param inTempDir if the file should be stored in the temporary directory
* @return the name of the created file
*/
@SuppressWarnings("unused")
public FilePath createTempFile(String suffix, boolean deleteOnExit,
boolean inTempDir) throws IOException {
while (true) {
FilePath p = getPath(name + getNextTempFileNamePart(false) + suffix);
if (p.exists() || !p.createFile()) {
// in theory, the random number could collide
getNextTempFileNamePart(true);
continue;
}
p.open("rw").close();
return p;
}
}
/**
* Get the next temporary file name part (the part in the middle).
*
* @param newRandom if the random part of the filename should change
* @return the file name part
*/
protected static synchronized String getNextTempFileNamePart(
boolean newRandom) {
if (newRandom || tempRandom == null) {
tempRandom = MathUtils.randomInt(Integer.MAX_VALUE) + ".";
}
return tempRandom + tempSequence++;
}
/**
* Get the string representation. The returned string can be used to
* construct a new object.
*
* @return the path as a string
*/
@Override
public String toString() {
return name;
}
/**
* Get the scheme (prefix) for this file provider.
* This is similar to
* <code>java.nio.file.spi.FileSystemProvider.getScheme</code>.
*
* @return the scheme
*/
public abstract String getScheme();
/**
* Convert a file to a path. This is similar to
* <code>java.nio.file.spi.FileSystemProvider.getPath</code>, but may
* return an object even if the scheme doesn't match in case of the the
* default file provider.
*
* @param path the path
* @return the file path object
*/
public abstract FilePath getPath(String path);
/**
* Get the unwrapped file name (without wrapper prefixes if wrapping /
* delegating file systems are used).
*
* @return the unwrapped path
*/
public FilePath unwrap() {
return this;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathDisk.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.URL;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.NonWritableChannelException;
import java.util.ArrayList;
import java.util.List;
import org.h2.api.ErrorCode;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.util.IOUtils;
import org.h2.util.New;
/**
* This file system stores files on disk.
* This is the most common file system.
*/
public class FilePathDisk extends FilePath {
private static final String CLASSPATH_PREFIX = "classpath:";
@Override
public FilePathDisk getPath(String path) {
FilePathDisk p = new FilePathDisk();
p.name = translateFileName(path);
return p;
}
@Override
public long size() {
return new File(name).length();
}
/**
* Translate the file name to the native format. This will replace '\' with
* '/' and expand the home directory ('~').
*
* @param fileName the file name
* @return the native file name
*/
protected static String translateFileName(String fileName) {
fileName = fileName.replace('\\', '/');
if (fileName.startsWith("file:")) {
fileName = fileName.substring("file:".length());
}
return expandUserHomeDirectory(fileName);
}
/**
* Expand '~' to the user home directory. It is only be expanded if the '~'
* stands alone, or is followed by '/' or '\'.
*
* @param fileName the file name
* @return the native file name
*/
public static String expandUserHomeDirectory(String fileName) {
if (fileName.startsWith("~") && (fileName.length() == 1 ||
fileName.startsWith("~/"))) {
String userDir = SysProperties.USER_HOME;
fileName = userDir + fileName.substring(1);
}
return fileName;
}
@Override
public void moveTo(FilePath newName, boolean atomicReplace) {
File oldFile = new File(name);
File newFile = new File(newName.name);
if (oldFile.getAbsolutePath().equals(newFile.getAbsolutePath())) {
return;
}
if (!oldFile.exists()) {
throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2,
name + " (not found)",
newName.name);
}
// Java 7: use java.nio.file.Files.move(Path source, Path target,
// CopyOption... options)
// with CopyOptions "REPLACE_EXISTING" and "ATOMIC_MOVE".
if (atomicReplace) {
boolean ok = oldFile.renameTo(newFile);
if (ok) {
return;
}
throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName.name);
}
if (newFile.exists()) {
throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)");
}
for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) {
IOUtils.trace("rename", name + " >" + newName, null);
boolean ok = oldFile.renameTo(newFile);
if (ok) {
return;
}
wait(i);
}
throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName.name);
}
private static void wait(int i) {
if (i == 8) {
System.gc();
}
try {
// sleep at most 256 ms
long sleep = Math.min(256, i * i);
Thread.sleep(sleep);
} catch (InterruptedException e) {
// ignore
}
}
@Override
public boolean createFile() {
File file = new File(name);
for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) {
try {
return file.createNewFile();
} catch (IOException e) {
// 'access denied' is really a concurrent access problem
wait(i);
}
}
return false;
}
@Override
public boolean exists() {
return new File(name).exists();
}
@Override
public void delete() {
File file = new File(name);
for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) {
IOUtils.trace("delete", name, null);
boolean ok = file.delete();
if (ok || !file.exists()) {
return;
}
wait(i);
}
throw DbException.get(ErrorCode.FILE_DELETE_FAILED_1, name);
}
@Override
public List<FilePath> newDirectoryStream() {
ArrayList<FilePath> list = New.arrayList();
File f = new File(name);
try {
String[] files = f.list();
if (files != null) {
String base = f.getCanonicalPath();
if (!base.endsWith(SysProperties.FILE_SEPARATOR)) {
base += SysProperties.FILE_SEPARATOR;
}
for (String file : files) {
list.add(getPath(base + file));
}
}
return list;
} catch (IOException e) {
throw DbException.convertIOException(e, name);
}
}
@Override
public boolean canWrite() {
return canWriteInternal(new File(name));
}
@Override
public boolean setReadOnly() {
File f = new File(name);
return f.setReadOnly();
}
@Override
public FilePathDisk toRealPath() {
try {
String fileName = new File(name).getCanonicalPath();
return getPath(fileName);
} catch (IOException e) {
throw DbException.convertIOException(e, name);
}
}
@Override
public FilePath getParent() {
String p = new File(name).getParent();
return p == null ? null : getPath(p);
}
@Override
public boolean isDirectory() {
return new File(name).isDirectory();
}
@Override
public boolean isAbsolute() {
return new File(name).isAbsolute();
}
@Override
public long lastModified() {
return new File(name).lastModified();
}
private static boolean canWriteInternal(File file) {
try {
if (!file.canWrite()) {
return false;
}
} catch (Exception e) {
// workaround for GAE which throws a
// java.security.AccessControlException
return false;
}
// File.canWrite() does not respect windows user permissions,
// so we must try to open it using the mode "rw".
// See also http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4420020
RandomAccessFile r = null;
try {
r = new RandomAccessFile(file, "rw");
return true;
} catch (FileNotFoundException e) {
return false;
} finally {
if (r != null) {
try {
r.close();
} catch (IOException e) {
// ignore
}
}
}
}
@Override
public void createDirectory() {
File dir = new File(name);
for (int i = 0; i < SysProperties.MAX_FILE_RETRY; i++) {
if (dir.exists()) {
if (dir.isDirectory()) {
return;
}
throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1,
name + " (a file with this name already exists)");
} else if (dir.mkdir()) {
return;
}
wait(i);
}
throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1, name);
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
try {
File file = new File(name);
File parent = file.getParentFile();
if (parent != null) {
FileUtils.createDirectories(parent.getAbsolutePath());
}
FileOutputStream out = new FileOutputStream(name, append);
IOUtils.trace("openFileOutputStream", name, out);
return out;
} catch (IOException e) {
freeMemoryAndFinalize();
return new FileOutputStream(name);
}
}
@Override
public InputStream newInputStream() throws IOException {
if (name.matches("[a-zA-Z]{2,19}:.*")) {
// if the ':' is in position 1, a windows file access is assumed:
// C:.. or D:, and if the ':' is not at the beginning, assume its a
// file name with a colon
if (name.startsWith(CLASSPATH_PREFIX)) {
String fileName = name.substring(CLASSPATH_PREFIX.length());
// Force absolute resolution in Class.getResourceAsStream
if (!fileName.startsWith("/")) {
fileName = "/" + fileName;
}
InputStream in = getClass().getResourceAsStream(fileName);
if (in == null) {
// ClassLoader.getResourceAsStream doesn't need leading "/"
in = Thread.currentThread().getContextClassLoader().
getResourceAsStream(fileName.substring(1));
}
if (in == null) {
throw new FileNotFoundException("resource " + fileName);
}
return in;
}
// otherwise an URL is assumed
URL url = new URL(name);
return url.openStream();
}
FileInputStream in = new FileInputStream(name);
IOUtils.trace("openFileInputStream", name, in);
return in;
}
/**
* Call the garbage collection and run finalization. This close all files
* that were not closed, and are no longer referenced.
*/
static void freeMemoryAndFinalize() {
IOUtils.trace("freeMemoryAndFinalize", null, null);
Runtime rt = Runtime.getRuntime();
long mem = rt.freeMemory();
for (int i = 0; i < 16; i++) {
rt.gc();
long now = rt.freeMemory();
rt.runFinalization();
if (now == mem) {
break;
}
mem = now;
}
}
@Override
public FileChannel open(String mode) throws IOException {
FileDisk f;
try {
f = new FileDisk(name, mode);
IOUtils.trace("open", name, f);
} catch (IOException e) {
freeMemoryAndFinalize();
try {
f = new FileDisk(name, mode);
} catch (IOException e2) {
throw e;
}
}
return f;
}
@Override
public String getScheme() {
return "file";
}
@Override
public FilePath createTempFile(String suffix, boolean deleteOnExit,
boolean inTempDir) throws IOException {
String fileName = name + ".";
String prefix = new File(fileName).getName();
File dir;
if (inTempDir) {
dir = new File(System.getProperty("java.io.tmpdir", "."));
} else {
dir = new File(fileName).getAbsoluteFile().getParentFile();
}
FileUtils.createDirectories(dir.getAbsolutePath());
while (true) {
File f = new File(dir, prefix + getNextTempFileNamePart(false) + suffix);
if (f.exists() || !f.createNewFile()) {
// in theory, the random number could collide
getNextTempFileNamePart(true);
continue;
}
if (deleteOnExit) {
try {
f.deleteOnExit();
} catch (Throwable e) {
// sometimes this throws a NullPointerException
// at java.io.DeleteOnExitHook.add(DeleteOnExitHook.java:33)
// we can ignore it
}
}
return get(f.getCanonicalPath());
}
}
}
/**
* Uses java.io.RandomAccessFile to access a file.
*/
class FileDisk extends FileBase {
private final RandomAccessFile file;
private final String name;
private final boolean readOnly;
FileDisk(String fileName, String mode) throws FileNotFoundException {
this.file = new RandomAccessFile(fileName, mode);
this.name = fileName;
this.readOnly = mode.equals("r");
}
@Override
public void force(boolean metaData) throws IOException {
String m = SysProperties.SYNC_METHOD;
if ("".equals(m)) {
// do nothing
} else if ("sync".equals(m)) {
file.getFD().sync();
} else if ("force".equals(m)) {
file.getChannel().force(true);
} else if ("forceFalse".equals(m)) {
file.getChannel().force(false);
} else {
file.getFD().sync();
}
}
@Override
public FileChannel truncate(long newLength) throws IOException {
// compatibility with JDK FileChannel#truncate
if (readOnly) {
throw new NonWritableChannelException();
}
/*
* RandomAccessFile.setLength() does not always work here since Java 9 for
* unknown reason so use FileChannel.truncate().
*/
file.getChannel().truncate(newLength);
return this;
}
@Override
public synchronized FileLock tryLock(long position, long size,
boolean shared) throws IOException {
return file.getChannel().tryLock(position, size, shared);
}
@Override
public void implCloseChannel() throws IOException {
file.close();
}
@Override
public long position() throws IOException {
return file.getFilePointer();
}
@Override
public long size() throws IOException {
return file.length();
}
@Override
public int read(ByteBuffer dst) throws IOException {
int len = file.read(dst.array(), dst.arrayOffset() + dst.position(),
dst.remaining());
if (len > 0) {
dst.position(dst.position() + len);
}
return len;
}
@Override
public FileChannel position(long pos) throws IOException {
file.seek(pos);
return this;
}
@Override
public int write(ByteBuffer src) throws IOException {
int len = src.remaining();
file.write(src.array(), src.arrayOffset() + src.position(), len);
src.position(src.position() + len);
return len;
}
@Override
public String toString() {
return name;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathEncrypt.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import org.h2.security.AES;
import org.h2.security.BlockCipher;
import org.h2.security.SHA256;
import org.h2.util.MathUtils;
/**
* An encrypted file.
*/
public class FilePathEncrypt extends FilePathWrapper {
private static final String SCHEME = "encrypt";
/**
* Register this file system.
*/
public static void register() {
FilePath.register(new FilePathEncrypt());
}
@Override
public FileChannel open(String mode) throws IOException {
String[] parsed = parse(name);
FileChannel file = FileUtils.open(parsed[1], mode);
byte[] passwordBytes = parsed[0].getBytes(StandardCharsets.UTF_8);
return new FileEncrypt(name, passwordBytes, file);
}
@Override
public String getScheme() {
return SCHEME;
}
@Override
protected String getPrefix() {
String[] parsed = parse(name);
return getScheme() + ":" + parsed[0] + ":";
}
@Override
public FilePath unwrap(String fileName) {
return FilePath.get(parse(fileName)[1]);
}
@Override
public long size() {
long size = getBase().size() - FileEncrypt.HEADER_LENGTH;
size = Math.max(0, size);
if ((size & FileEncrypt.BLOCK_SIZE_MASK) != 0) {
size -= FileEncrypt.BLOCK_SIZE;
}
return size;
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
return new FileChannelOutputStream(open("rw"), append);
}
@Override
public InputStream newInputStream() throws IOException {
return new FileChannelInputStream(open("r"), true);
}
/**
* Split the file name into algorithm, password, and base file name.
*
* @param fileName the file name
* @return an array with algorithm, password, and base file name
*/
private String[] parse(String fileName) {
if (!fileName.startsWith(getScheme())) {
throw new IllegalArgumentException(fileName +
" doesn't start with " + getScheme());
}
fileName = fileName.substring(getScheme().length() + 1);
int idx = fileName.indexOf(':');
String password;
if (idx < 0) {
throw new IllegalArgumentException(fileName +
" doesn't contain encryption algorithm and password");
}
password = fileName.substring(0, idx);
fileName = fileName.substring(idx + 1);
return new String[] { password, fileName };
}
/**
* Convert a char array to a byte array, in UTF-16 format. The char array is
* not cleared after use (this must be done by the caller).
*
* @param passwordChars the password characters
* @return the byte array
*/
public static byte[] getPasswordBytes(char[] passwordChars) {
// using UTF-16
int len = passwordChars.length;
byte[] password = new byte[len * 2];
for (int i = 0; i < len; i++) {
char c = passwordChars[i];
password[i + i] = (byte) (c >>> 8);
password[i + i + 1] = (byte) c;
}
return password;
}
/**
* An encrypted file with a read cache.
*/
public static class FileEncrypt extends FileBase {
/**
* The block size.
*/
static final int BLOCK_SIZE = 4096;
/**
* The block size bit mask.
*/
static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1;
/**
* The length of the file header. Using a smaller header is possible,
* but would mean reads and writes are not aligned to the block size.
*/
static final int HEADER_LENGTH = BLOCK_SIZE;
private static final byte[] HEADER = "H2encrypt\n".getBytes();
private static final int SALT_POS = HEADER.length;
/**
* The length of the salt, in bytes.
*/
private static final int SALT_LENGTH = 8;
/**
* The number of iterations. It is relatively low; a higher value would
* slow down opening files on Android too much.
*/
private static final int HASH_ITERATIONS = 10;
private final FileChannel base;
/**
* The current position within the file, from a user perspective.
*/
private long pos;
/**
* The current file size, from a user perspective.
*/
private long size;
private final String name;
private XTS xts;
private byte[] encryptionKey;
public FileEncrypt(String name, byte[] encryptionKey, FileChannel base) {
// don't do any read or write operations here, because they could
// fail if the file is locked, and we want to give the caller a
// chance to lock the file first
this.name = name;
this.base = base;
this.encryptionKey = encryptionKey;
}
private void init() throws IOException {
if (xts != null) {
return;
}
this.size = base.size() - HEADER_LENGTH;
boolean newFile = size < 0;
byte[] salt;
if (newFile) {
byte[] header = Arrays.copyOf(HEADER, BLOCK_SIZE);
salt = MathUtils.secureRandomBytes(SALT_LENGTH);
System.arraycopy(salt, 0, header, SALT_POS, salt.length);
writeFully(base, 0, ByteBuffer.wrap(header));
size = 0;
} else {
salt = new byte[SALT_LENGTH];
readFully(base, SALT_POS, ByteBuffer.wrap(salt));
if ((size & BLOCK_SIZE_MASK) != 0) {
size -= BLOCK_SIZE;
}
}
AES cipher = new AES();
cipher.setKey(SHA256.getPBKDF2(
encryptionKey, salt, HASH_ITERATIONS, 16));
encryptionKey = null;
xts = new XTS(cipher);
}
@Override
protected void implCloseChannel() throws IOException {
base.close();
}
@Override
public FileChannel position(long newPosition) throws IOException {
this.pos = newPosition;
return this;
}
@Override
public long position() throws IOException {
return pos;
}
@Override
public int read(ByteBuffer dst) throws IOException {
int len = read(dst, pos);
if (len > 0) {
pos += len;
}
return len;
}
@Override
public int read(ByteBuffer dst, long position) throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
init();
len = (int) Math.min(len, size - position);
if (position >= size) {
return -1;
} else if (position < 0) {
throw new IllegalArgumentException("pos: " + position);
}
if ((position & BLOCK_SIZE_MASK) != 0 ||
(len & BLOCK_SIZE_MASK) != 0) {
// either the position or the len is unaligned:
// read aligned, and then truncate
long p = position / BLOCK_SIZE * BLOCK_SIZE;
int offset = (int) (position - p);
int l = (len + offset + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE;
ByteBuffer temp = ByteBuffer.allocate(l);
readInternal(temp, p, l);
temp.flip();
temp.limit(offset + len);
temp.position(offset);
dst.put(temp);
return len;
}
readInternal(dst, position, len);
return len;
}
private void readInternal(ByteBuffer dst, long position, int len)
throws IOException {
int x = dst.position();
readFully(base, position + HEADER_LENGTH, dst);
long block = position / BLOCK_SIZE;
while (len > 0) {
xts.decrypt(block++, BLOCK_SIZE, dst.array(), dst.arrayOffset() + x);
x += BLOCK_SIZE;
len -= BLOCK_SIZE;
}
}
private static void readFully(FileChannel file, long pos, ByteBuffer dst)
throws IOException {
do {
int len = file.read(dst, pos);
if (len < 0) {
throw new EOFException();
}
pos += len;
} while (dst.remaining() > 0);
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
init();
int len = src.remaining();
if ((position & BLOCK_SIZE_MASK) != 0 ||
(len & BLOCK_SIZE_MASK) != 0) {
// either the position or the len is unaligned:
// read aligned, and then truncate
long p = position / BLOCK_SIZE * BLOCK_SIZE;
int offset = (int) (position - p);
int l = (len + offset + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE;
ByteBuffer temp = ByteBuffer.allocate(l);
int available = (int) (size - p + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE;
int readLen = Math.min(l, available);
if (readLen > 0) {
readInternal(temp, p, readLen);
temp.rewind();
}
temp.limit(offset + len);
temp.position(offset);
temp.put(src);
temp.limit(l);
temp.rewind();
writeInternal(temp, p, l);
long p2 = position + len;
size = Math.max(size, p2);
int plus = (int) (size & BLOCK_SIZE_MASK);
if (plus > 0) {
temp = ByteBuffer.allocate(plus);
writeFully(base, p + HEADER_LENGTH + l, temp);
}
return len;
}
writeInternal(src, position, len);
long p2 = position + len;
size = Math.max(size, p2);
return len;
}
private void writeInternal(ByteBuffer src, long position, int len)
throws IOException {
ByteBuffer crypt = ByteBuffer.allocate(len);
crypt.put(src);
crypt.flip();
long block = position / BLOCK_SIZE;
int x = 0, l = len;
while (l > 0) {
xts.encrypt(block++, BLOCK_SIZE, crypt.array(), crypt.arrayOffset() + x);
x += BLOCK_SIZE;
l -= BLOCK_SIZE;
}
writeFully(base, position + HEADER_LENGTH, crypt);
}
private static void writeFully(FileChannel file, long pos,
ByteBuffer src) throws IOException {
int off = 0;
do {
int len = file.write(src, pos + off);
off += len;
} while (src.remaining() > 0);
}
@Override
public int write(ByteBuffer src) throws IOException {
int len = write(src, pos);
if (len > 0) {
pos += len;
}
return len;
}
@Override
public long size() throws IOException {
init();
return size;
}
@Override
public FileChannel truncate(long newSize) throws IOException {
init();
if (newSize > size) {
return this;
}
if (newSize < 0) {
throw new IllegalArgumentException("newSize: " + newSize);
}
int offset = (int) (newSize & BLOCK_SIZE_MASK);
if (offset > 0) {
base.truncate(newSize + HEADER_LENGTH + BLOCK_SIZE);
} else {
base.truncate(newSize + HEADER_LENGTH);
}
this.size = newSize;
pos = Math.min(pos, size);
return this;
}
@Override
public void force(boolean metaData) throws IOException {
base.force(metaData);
}
@Override
public FileLock tryLock(long position, long size, boolean shared)
throws IOException {
return base.tryLock(position, size, shared);
}
@Override
public String toString() {
return name;
}
}
/**
* An XTS implementation as described in
* IEEE P1619 (Standard Architecture for Encrypted Shared Storage Media).
* See also
* http://axelkenzo.ru/downloads/1619-2007-NIST-Submission.pdf
*/
static class XTS {
/**
* Galois field feedback.
*/
private static final int GF_128_FEEDBACK = 0x87;
/**
* The AES encryption block size.
*/
private static final int CIPHER_BLOCK_SIZE = 16;
private final BlockCipher cipher;
XTS(BlockCipher cipher) {
this.cipher = cipher;
}
/**
* Encrypt the data.
*
* @param id the (sector) id
* @param len the number of bytes
* @param data the data
* @param offset the offset within the data
*/
void encrypt(long id, int len, byte[] data, int offset) {
byte[] tweak = initTweak(id);
int i = 0;
for (; i + CIPHER_BLOCK_SIZE <= len; i += CIPHER_BLOCK_SIZE) {
if (i > 0) {
updateTweak(tweak);
}
xorTweak(data, i + offset, tweak);
cipher.encrypt(data, i + offset, CIPHER_BLOCK_SIZE);
xorTweak(data, i + offset, tweak);
}
if (i < len) {
updateTweak(tweak);
swap(data, i + offset, i - CIPHER_BLOCK_SIZE + offset, len - i);
xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweak);
cipher.encrypt(data, i - CIPHER_BLOCK_SIZE + offset, CIPHER_BLOCK_SIZE);
xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweak);
}
}
/**
* Decrypt the data.
*
* @param id the (sector) id
* @param len the number of bytes
* @param data the data
* @param offset the offset within the data
*/
void decrypt(long id, int len, byte[] data, int offset) {
byte[] tweak = initTweak(id), tweakEnd = tweak;
int i = 0;
for (; i + CIPHER_BLOCK_SIZE <= len; i += CIPHER_BLOCK_SIZE) {
if (i > 0) {
updateTweak(tweak);
if (i + CIPHER_BLOCK_SIZE + CIPHER_BLOCK_SIZE > len &&
i + CIPHER_BLOCK_SIZE < len) {
tweakEnd = tweak.clone();
updateTweak(tweak);
}
}
xorTweak(data, i + offset, tweak);
cipher.decrypt(data, i + offset, CIPHER_BLOCK_SIZE);
xorTweak(data, i + offset, tweak);
}
if (i < len) {
swap(data, i, i - CIPHER_BLOCK_SIZE + offset, len - i + offset);
xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweakEnd);
cipher.decrypt(data, i - CIPHER_BLOCK_SIZE + offset, CIPHER_BLOCK_SIZE);
xorTweak(data, i - CIPHER_BLOCK_SIZE + offset, tweakEnd);
}
}
private byte[] initTweak(long id) {
byte[] tweak = new byte[CIPHER_BLOCK_SIZE];
for (int j = 0; j < CIPHER_BLOCK_SIZE; j++, id >>>= 8) {
tweak[j] = (byte) (id & 0xff);
}
cipher.encrypt(tweak, 0, CIPHER_BLOCK_SIZE);
return tweak;
}
private static void xorTweak(byte[] data, int pos, byte[] tweak) {
for (int i = 0; i < CIPHER_BLOCK_SIZE; i++) {
data[pos + i] ^= tweak[i];
}
}
private static void updateTweak(byte[] tweak) {
byte ci = 0, co = 0;
for (int i = 0; i < CIPHER_BLOCK_SIZE; i++) {
co = (byte) ((tweak[i] >> 7) & 1);
tweak[i] = (byte) (((tweak[i] << 1) + ci) & 255);
ci = co;
}
if (co != 0) {
tweak[0] ^= GF_128_FEEDBACK;
}
}
private static void swap(byte[] data, int source, int target, int len) {
for (int i = 0; i < len; i++) {
byte temp = data[source + i];
data[source + i] = data[target + i];
data[target + i] = temp;
}
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathMem.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.NonWritableChannelException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicReference;
import org.h2.api.ErrorCode;
import org.h2.compress.CompressLZF;
import org.h2.message.DbException;
import org.h2.util.MathUtils;
import org.h2.util.New;
/**
* This file system keeps files fully in memory. There is an option to compress
* file blocks to save memory.
*/
public class FilePathMem extends FilePath {
private static final TreeMap<String, FileMemData> MEMORY_FILES =
new TreeMap<>();
private static final FileMemData DIRECTORY = new FileMemData("", false);
@Override
public FilePathMem getPath(String path) {
FilePathMem p = new FilePathMem();
p.name = getCanonicalPath(path);
return p;
}
@Override
public long size() {
return getMemoryFile().length();
}
@Override
public void moveTo(FilePath newName, boolean atomicReplace) {
synchronized (MEMORY_FILES) {
if (!atomicReplace && !newName.name.equals(name) &&
MEMORY_FILES.containsKey(newName.name)) {
throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)");
}
FileMemData f = getMemoryFile();
f.setName(newName.name);
MEMORY_FILES.remove(name);
MEMORY_FILES.put(newName.name, f);
}
}
@Override
public boolean createFile() {
synchronized (MEMORY_FILES) {
if (exists()) {
return false;
}
getMemoryFile();
}
return true;
}
@Override
public boolean exists() {
if (isRoot()) {
return true;
}
synchronized (MEMORY_FILES) {
return MEMORY_FILES.get(name) != null;
}
}
@Override
public void delete() {
if (isRoot()) {
return;
}
synchronized (MEMORY_FILES) {
FileMemData old = MEMORY_FILES.remove(name);
if (old != null) {
old.truncate(0);
}
}
}
@Override
public List<FilePath> newDirectoryStream() {
ArrayList<FilePath> list = New.arrayList();
synchronized (MEMORY_FILES) {
for (String n : MEMORY_FILES.tailMap(name).keySet()) {
if (n.startsWith(name)) {
if (!n.equals(name) && n.indexOf('/', name.length() + 1) < 0) {
list.add(getPath(n));
}
} else {
break;
}
}
return list;
}
}
@Override
public boolean setReadOnly() {
return getMemoryFile().setReadOnly();
}
@Override
public boolean canWrite() {
return getMemoryFile().canWrite();
}
@Override
public FilePathMem getParent() {
int idx = name.lastIndexOf('/');
return idx < 0 ? null : getPath(name.substring(0, idx));
}
@Override
public boolean isDirectory() {
if (isRoot()) {
return true;
}
synchronized (MEMORY_FILES) {
FileMemData d = MEMORY_FILES.get(name);
return d == DIRECTORY;
}
}
@Override
public boolean isAbsolute() {
// TODO relative files are not supported
return true;
}
@Override
public FilePathMem toRealPath() {
return this;
}
@Override
public long lastModified() {
return getMemoryFile().getLastModified();
}
@Override
public void createDirectory() {
if (exists()) {
throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1,
name + " (a file with this name already exists)");
}
synchronized (MEMORY_FILES) {
MEMORY_FILES.put(name, DIRECTORY);
}
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
FileMemData obj = getMemoryFile();
FileMem m = new FileMem(obj, false);
return new FileChannelOutputStream(m, append);
}
@Override
public InputStream newInputStream() {
FileMemData obj = getMemoryFile();
FileMem m = new FileMem(obj, true);
return new FileChannelInputStream(m, true);
}
@Override
public FileChannel open(String mode) {
FileMemData obj = getMemoryFile();
return new FileMem(obj, "r".equals(mode));
}
private FileMemData getMemoryFile() {
synchronized (MEMORY_FILES) {
FileMemData m = MEMORY_FILES.get(name);
if (m == DIRECTORY) {
throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1,
name + " (a directory with this name already exists)");
}
if (m == null) {
m = new FileMemData(name, compressed());
MEMORY_FILES.put(name, m);
}
return m;
}
}
private boolean isRoot() {
return name.equals(getScheme() + ":");
}
/**
* Get the canonical path for this file name.
*
* @param fileName the file name
* @return the canonical path
*/
protected static String getCanonicalPath(String fileName) {
fileName = fileName.replace('\\', '/');
int idx = fileName.indexOf(':') + 1;
if (fileName.length() > idx && fileName.charAt(idx) != '/') {
fileName = fileName.substring(0, idx) + "/" + fileName.substring(idx);
}
return fileName;
}
@Override
public String getScheme() {
return "memFS";
}
/**
* Whether the file should be compressed.
*
* @return if it should be compressed.
*/
boolean compressed() {
return false;
}
}
/**
* A memory file system that compresses blocks to conserve memory.
*/
class FilePathMemLZF extends FilePathMem {
@Override
public FilePathMem getPath(String path) {
FilePathMemLZF p = new FilePathMemLZF();
p.name = getCanonicalPath(path);
return p;
}
@Override
boolean compressed() {
return true;
}
@Override
public String getScheme() {
return "memLZF";
}
}
/**
* This class represents an in-memory file.
*/
class FileMem extends FileBase {
/**
* The file data.
*/
final FileMemData data;
private final boolean readOnly;
private long pos;
FileMem(FileMemData data, boolean readOnly) {
this.data = data;
this.readOnly = readOnly;
}
@Override
public long size() {
return data.length();
}
@Override
public FileChannel truncate(long newLength) throws IOException {
// compatibility with JDK FileChannel#truncate
if (readOnly) {
throw new NonWritableChannelException();
}
if (newLength < size()) {
data.touch(readOnly);
pos = Math.min(pos, newLength);
data.truncate(newLength);
}
return this;
}
@Override
public FileChannel position(long newPos) {
this.pos = newPos;
return this;
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
int len = src.remaining();
if (len == 0) {
return 0;
}
data.touch(readOnly);
data.readWrite(position, src.array(),
src.arrayOffset() + src.position(), len, true);
src.position(src.position() + len);
return len;
}
@Override
public int write(ByteBuffer src) throws IOException {
int len = src.remaining();
if (len == 0) {
return 0;
}
data.touch(readOnly);
pos = data.readWrite(pos, src.array(),
src.arrayOffset() + src.position(), len, true);
src.position(src.position() + len);
return len;
}
@Override
public int read(ByteBuffer dst, long position) throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
long newPos = data.readWrite(position, dst.array(),
dst.arrayOffset() + dst.position(), len, false);
len = (int) (newPos - position);
if (len <= 0) {
return -1;
}
dst.position(dst.position() + len);
return len;
}
@Override
public int read(ByteBuffer dst) throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
long newPos = data.readWrite(pos, dst.array(),
dst.arrayOffset() + dst.position(), len, false);
len = (int) (newPos - pos);
if (len <= 0) {
return -1;
}
dst.position(dst.position() + len);
pos = newPos;
return len;
}
@Override
public long position() {
return pos;
}
@Override
public void implCloseChannel() throws IOException {
pos = 0;
}
@Override
public void force(boolean metaData) throws IOException {
// do nothing
}
@Override
public synchronized FileLock tryLock(long position, long size,
boolean shared) throws IOException {
if (shared) {
if (!data.lockShared()) {
return null;
}
} else {
if (!data.lockExclusive()) {
return null;
}
}
return new FileLock(new FakeFileChannel(), position, size, shared) {
@Override
public boolean isValid() {
return true;
}
@Override
public void release() throws IOException {
data.unlock();
}
};
}
@Override
public String toString() {
return data.getName();
}
}
/**
* This class contains the data of an in-memory random access file.
* Data compression using the LZF algorithm is supported as well.
*/
class FileMemData {
private static final int CACHE_SIZE = 8;
private static final int BLOCK_SIZE_SHIFT = 10;
private static final int BLOCK_SIZE = 1 << BLOCK_SIZE_SHIFT;
private static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1;
private static final CompressLZF LZF = new CompressLZF();
private static final byte[] BUFFER = new byte[BLOCK_SIZE * 2];
private static final byte[] COMPRESSED_EMPTY_BLOCK;
private static final Cache<CompressItem, CompressItem> COMPRESS_LATER =
new Cache<>(CACHE_SIZE);
private String name;
private final int id;
private final boolean compress;
private long length;
private AtomicReference<byte[]>[] data;
private long lastModified;
private boolean isReadOnly;
private boolean isLockedExclusive;
private int sharedLockCount;
static {
byte[] n = new byte[BLOCK_SIZE];
int len = LZF.compress(n, BLOCK_SIZE, BUFFER, 0);
COMPRESSED_EMPTY_BLOCK = Arrays.copyOf(BUFFER, len);
}
@SuppressWarnings("unchecked")
FileMemData(String name, boolean compress) {
this.name = name;
this.id = name.hashCode();
this.compress = compress;
this.data = new AtomicReference[0];
lastModified = System.currentTimeMillis();
}
/**
* Get the page if it exists.
*
* @param page the page id
* @return the byte array, or null
*/
byte[] getPage(int page) {
AtomicReference<byte[]>[] b = data;
if (page >= b.length) {
return null;
}
return b[page].get();
}
/**
* Set the page data.
*
* @param page the page id
* @param oldData the old data
* @param newData the new data
* @param force whether the data should be overwritten even if the old data
* doesn't match
*/
void setPage(int page, byte[] oldData, byte[] newData, boolean force) {
AtomicReference<byte[]>[] b = data;
if (page >= b.length) {
return;
}
if (force) {
b[page].set(newData);
} else {
b[page].compareAndSet(oldData, newData);
}
}
int getId() {
return id;
}
/**
* Lock the file in exclusive mode if possible.
*
* @return if locking was successful
*/
synchronized boolean lockExclusive() {
if (sharedLockCount > 0 || isLockedExclusive) {
return false;
}
isLockedExclusive = true;
return true;
}
/**
* Lock the file in shared mode if possible.
*
* @return if locking was successful
*/
synchronized boolean lockShared() {
if (isLockedExclusive) {
return false;
}
sharedLockCount++;
return true;
}
/**
* Unlock the file.
*/
synchronized void unlock() {
if (isLockedExclusive) {
isLockedExclusive = false;
} else {
sharedLockCount = Math.max(0, sharedLockCount - 1);
}
}
/**
* This small cache compresses the data if an element leaves the cache.
*/
static class Cache<K, V> extends LinkedHashMap<K, V> {
private static final long serialVersionUID = 1L;
private final int size;
Cache(int size) {
super(size, (float) 0.75, true);
this.size = size;
}
@Override
public synchronized V put(K key, V value) {
return super.put(key, value);
}
@Override
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
if (size() < size) {
return false;
}
CompressItem c = (CompressItem) eldest.getKey();
c.file.compress(c.page);
return true;
}
}
/**
* Points to a block of bytes that needs to be compressed.
*/
static class CompressItem {
/**
* The file.
*/
FileMemData file;
/**
* The page to compress.
*/
int page;
@Override
public int hashCode() {
return page ^ file.getId();
}
@Override
public boolean equals(Object o) {
if (o instanceof CompressItem) {
CompressItem c = (CompressItem) o;
return c.page == page && c.file == file;
}
return false;
}
}
private void compressLater(int page) {
CompressItem c = new CompressItem();
c.file = this;
c.page = page;
synchronized (LZF) {
COMPRESS_LATER.put(c, c);
}
}
private byte[] expand(int page) {
byte[] d = getPage(page);
if (d.length == BLOCK_SIZE) {
return d;
}
byte[] out = new byte[BLOCK_SIZE];
if (d != COMPRESSED_EMPTY_BLOCK) {
synchronized (LZF) {
LZF.expand(d, 0, d.length, out, 0, BLOCK_SIZE);
}
}
setPage(page, d, out, false);
return out;
}
/**
* Compress the data in a byte array.
*
* @param page which page to compress
*/
void compress(int page) {
byte[] old = getPage(page);
if (old == null || old.length != BLOCK_SIZE) {
// not yet initialized or already compressed
return;
}
synchronized (LZF) {
int len = LZF.compress(old, BLOCK_SIZE, BUFFER, 0);
if (len <= BLOCK_SIZE) {
byte[] d = Arrays.copyOf(BUFFER, len);
// maybe data was changed in the meantime
setPage(page, old, d, false);
}
}
}
/**
* Update the last modified time.
*
* @param openReadOnly if the file was opened in read-only mode
*/
void touch(boolean openReadOnly) throws IOException {
if (isReadOnly || openReadOnly) {
throw new IOException("Read only");
}
lastModified = System.currentTimeMillis();
}
/**
* Get the file length.
*
* @return the length
*/
long length() {
return length;
}
/**
* Truncate the file.
*
* @param newLength the new length
*/
void truncate(long newLength) {
changeLength(newLength);
long end = MathUtils.roundUpLong(newLength, BLOCK_SIZE);
if (end != newLength) {
int lastPage = (int) (newLength >>> BLOCK_SIZE_SHIFT);
byte[] d = expand(lastPage);
byte[] d2 = Arrays.copyOf(d, d.length);
for (int i = (int) (newLength & BLOCK_SIZE_MASK); i < BLOCK_SIZE; i++) {
d2[i] = 0;
}
setPage(lastPage, d, d2, true);
if (compress) {
compressLater(lastPage);
}
}
}
private void changeLength(long len) {
length = len;
len = MathUtils.roundUpLong(len, BLOCK_SIZE);
int blocks = (int) (len >>> BLOCK_SIZE_SHIFT);
if (blocks != data.length) {
AtomicReference<byte[]>[] n = Arrays.copyOf(data, blocks);
for (int i = data.length; i < blocks; i++) {
n[i] = new AtomicReference<>(COMPRESSED_EMPTY_BLOCK);
}
data = n;
}
}
/**
* Read or write.
*
* @param pos the position
* @param b the byte array
* @param off the offset within the byte array
* @param len the number of bytes
* @param write true for writing
* @return the new position
*/
long readWrite(long pos, byte[] b, int off, int len, boolean write) {
long end = pos + len;
if (end > length) {
if (write) {
changeLength(end);
} else {
len = (int) (length - pos);
}
}
while (len > 0) {
int l = (int) Math.min(len, BLOCK_SIZE - (pos & BLOCK_SIZE_MASK));
int page = (int) (pos >>> BLOCK_SIZE_SHIFT);
byte[] block = expand(page);
int blockOffset = (int) (pos & BLOCK_SIZE_MASK);
if (write) {
byte[] p2 = Arrays.copyOf(block, block.length);
System.arraycopy(b, off, p2, blockOffset, l);
setPage(page, block, p2, true);
} else {
System.arraycopy(block, blockOffset, b, off, l);
}
if (compress) {
compressLater(page);
}
off += l;
pos += l;
len -= l;
}
return pos;
}
/**
* Set the file name.
*
* @param name the name
*/
void setName(String name) {
this.name = name;
}
/**
* Get the file name
*
* @return the name
*/
String getName() {
return name;
}
/**
* Get the last modified time.
*
* @return the time
*/
long getLastModified() {
return lastModified;
}
/**
* Check whether writing is allowed.
*
* @return true if it is
*/
boolean canWrite() {
return !isReadOnly;
}
/**
* Set the read-only flag.
*
* @return true
*/
boolean setReadOnly() {
isReadOnly = true;
return true;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathNio.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.NonWritableChannelException;
/**
* This file system stores files on disk and uses java.nio to access the files.
* This class uses FileChannel.
*/
public class FilePathNio extends FilePathWrapper {
@Override
public FileChannel open(String mode) throws IOException {
return new FileNio(name.substring(getScheme().length() + 1), mode);
}
@Override
public String getScheme() {
return "nio";
}
}
/**
* File which uses NIO FileChannel.
*/
class FileNio extends FileBase {
private final String name;
private final FileChannel channel;
FileNio(String fileName, String mode) throws IOException {
this.name = fileName;
channel = new RandomAccessFile(fileName, mode).getChannel();
}
@Override
public void implCloseChannel() throws IOException {
channel.close();
}
@Override
public long position() throws IOException {
return channel.position();
}
@Override
public long size() throws IOException {
return channel.size();
}
@Override
public int read(ByteBuffer dst) throws IOException {
return channel.read(dst);
}
@Override
public FileChannel position(long pos) throws IOException {
channel.position(pos);
return this;
}
@Override
public int read(ByteBuffer dst, long position) throws IOException {
return channel.read(dst, position);
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
return channel.write(src, position);
}
@Override
public FileChannel truncate(long newLength) throws IOException {
long size = channel.size();
if (newLength < size) {
long pos = channel.position();
channel.truncate(newLength);
long newPos = channel.position();
if (pos < newLength) {
// position should stay
// in theory, this should not be needed
if (newPos != pos) {
channel.position(pos);
}
} else if (newPos > newLength) {
// looks like a bug in this FileChannel implementation, as
// the documentation says the position needs to be changed
channel.position(newLength);
}
}
return this;
}
@Override
public void force(boolean metaData) throws IOException {
channel.force(metaData);
}
@Override
public int write(ByteBuffer src) throws IOException {
try {
return channel.write(src);
} catch (NonWritableChannelException e) {
throw new IOException("read only");
}
}
@Override
public synchronized FileLock tryLock(long position, long size,
boolean shared) throws IOException {
return channel.tryLock(position, size, shared);
}
@Override
public String toString() {
return "nio:" + name;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathNioMapped.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.EOFException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.ref.WeakReference;
import java.lang.reflect.Method;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.NonWritableChannelException;
import java.util.concurrent.TimeUnit;
import org.h2.engine.SysProperties;
/**
* This file system stores files on disk and uses java.nio to access the files.
* This class used memory mapped files.
*/
public class FilePathNioMapped extends FilePathNio {
@Override
public FileChannel open(String mode) throws IOException {
return new FileNioMapped(name.substring(getScheme().length() + 1), mode);
}
@Override
public String getScheme() {
return "nioMapped";
}
}
/**
* Uses memory mapped files.
* The file size is limited to 2 GB.
*/
class FileNioMapped extends FileBase {
private static final long GC_TIMEOUT_MS = 10_000;
private final String name;
private final MapMode mode;
private RandomAccessFile file;
private MappedByteBuffer mapped;
private long fileLength;
/**
* The position within the file. Can't use the position of the mapped buffer
* because it doesn't support seeking past the end of the file.
*/
private int pos;
FileNioMapped(String fileName, String mode) throws IOException {
if ("r".equals(mode)) {
this.mode = MapMode.READ_ONLY;
} else {
this.mode = MapMode.READ_WRITE;
}
this.name = fileName;
file = new RandomAccessFile(fileName, mode);
reMap();
}
private void unMap() throws IOException {
if (mapped == null) {
return;
}
// first write all data
mapped.force();
// need to dispose old direct buffer, see bug
// http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038
boolean useSystemGc = true;
if (SysProperties.NIO_CLEANER_HACK) {
try {
Method cleanerMethod = mapped.getClass().getMethod("cleaner");
cleanerMethod.setAccessible(true);
Object cleaner = cleanerMethod.invoke(mapped);
if (cleaner != null) {
Method clearMethod = cleaner.getClass().getMethod("clean");
clearMethod.invoke(cleaner);
}
useSystemGc = false;
} catch (Throwable e) {
// useSystemGc is already true
} finally {
mapped = null;
}
}
if (useSystemGc) {
WeakReference<MappedByteBuffer> bufferWeakRef =
new WeakReference<>(mapped);
mapped = null;
long start = System.nanoTime();
while (bufferWeakRef.get() != null) {
if (System.nanoTime() - start > TimeUnit.MILLISECONDS.toNanos(GC_TIMEOUT_MS)) {
throw new IOException("Timeout (" + GC_TIMEOUT_MS
+ " ms) reached while trying to GC mapped buffer");
}
System.gc();
Thread.yield();
}
}
}
/**
* Re-map byte buffer into memory, called when file size has changed or file
* was created.
*/
private void reMap() throws IOException {
int oldPos = 0;
if (mapped != null) {
oldPos = pos;
unMap();
}
fileLength = file.length();
checkFileSizeLimit(fileLength);
// maps new MappedByteBuffer; the old one is disposed during GC
mapped = file.getChannel().map(mode, 0, fileLength);
int limit = mapped.limit();
int capacity = mapped.capacity();
if (limit < fileLength || capacity < fileLength) {
throw new IOException("Unable to map: length=" + limit +
" capacity=" + capacity + " length=" + fileLength);
}
if (SysProperties.NIO_LOAD_MAPPED) {
mapped.load();
}
this.pos = Math.min(oldPos, (int) fileLength);
}
private static void checkFileSizeLimit(long length) throws IOException {
if (length > Integer.MAX_VALUE) {
throw new IOException(
"File over 2GB is not supported yet when using this file system");
}
}
@Override
public void implCloseChannel() throws IOException {
if (file != null) {
unMap();
file.close();
file = null;
}
}
@Override
public long position() {
return pos;
}
@Override
public String toString() {
return "nioMapped:" + name;
}
@Override
public synchronized long size() throws IOException {
return fileLength;
}
@Override
public synchronized int read(ByteBuffer dst) throws IOException {
try {
int len = dst.remaining();
if (len == 0) {
return 0;
}
len = (int) Math.min(len, fileLength - pos);
if (len <= 0) {
return -1;
}
mapped.position(pos);
mapped.get(dst.array(), dst.arrayOffset() + dst.position(), len);
dst.position(dst.position() + len);
pos += len;
return len;
} catch (IllegalArgumentException e) {
EOFException e2 = new EOFException("EOF");
e2.initCause(e);
throw e2;
} catch (BufferUnderflowException e) {
EOFException e2 = new EOFException("EOF");
e2.initCause(e);
throw e2;
}
}
@Override
public FileChannel position(long pos) throws IOException {
checkFileSizeLimit(pos);
this.pos = (int) pos;
return this;
}
@Override
public synchronized FileChannel truncate(long newLength) throws IOException {
// compatibility with JDK FileChannel#truncate
if (mode == MapMode.READ_ONLY) {
throw new NonWritableChannelException();
}
if (newLength < size()) {
setFileLength(newLength);
}
return this;
}
public synchronized void setFileLength(long newLength) throws IOException {
checkFileSizeLimit(newLength);
int oldPos = pos;
unMap();
for (int i = 0;; i++) {
try {
file.setLength(newLength);
break;
} catch (IOException e) {
if (i > 16 || !e.toString().contains("user-mapped section open")) {
throw e;
}
}
System.gc();
}
reMap();
pos = (int) Math.min(newLength, oldPos);
}
@Override
public void force(boolean metaData) throws IOException {
mapped.force();
file.getFD().sync();
}
@Override
public synchronized int write(ByteBuffer src) throws IOException {
int len = src.remaining();
// check if need to expand file
if (mapped.capacity() < pos + len) {
setFileLength(pos + len);
}
mapped.position(pos);
mapped.put(src);
pos += len;
return len;
}
@Override
public synchronized FileLock tryLock(long position, long size,
boolean shared) throws IOException {
return file.getChannel().tryLock(position, size, shared);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathNioMem.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.nio.channels.NonWritableChannelException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.h2.api.ErrorCode;
import org.h2.compress.CompressLZF;
import org.h2.message.DbException;
import org.h2.util.MathUtils;
import org.h2.util.New;
/**
* This file system keeps files fully in memory. There is an option to compress
* file blocks to save memory.
*/
public class FilePathNioMem extends FilePath {
private static final TreeMap<String, FileNioMemData> MEMORY_FILES =
new TreeMap<>();
/**
* The percentage of uncompressed (cached) entries.
*/
float compressLaterCachePercent = 1;
@Override
public FilePathNioMem getPath(String path) {
FilePathNioMem p = new FilePathNioMem();
p.name = getCanonicalPath(path);
return p;
}
@Override
public long size() {
return getMemoryFile().length();
}
@Override
public void moveTo(FilePath newName, boolean atomicReplace) {
synchronized (MEMORY_FILES) {
if (!atomicReplace && !name.equals(newName.name) &&
MEMORY_FILES.containsKey(newName.name)) {
throw DbException.get(ErrorCode.FILE_RENAME_FAILED_2, name, newName + " (exists)");
}
FileNioMemData f = getMemoryFile();
f.setName(newName.name);
MEMORY_FILES.remove(name);
MEMORY_FILES.put(newName.name, f);
}
}
@Override
public boolean createFile() {
synchronized (MEMORY_FILES) {
if (exists()) {
return false;
}
getMemoryFile();
}
return true;
}
@Override
public boolean exists() {
if (isRoot()) {
return true;
}
synchronized (MEMORY_FILES) {
return MEMORY_FILES.get(name) != null;
}
}
@Override
public void delete() {
if (isRoot()) {
return;
}
synchronized (MEMORY_FILES) {
MEMORY_FILES.remove(name);
}
}
@Override
public List<FilePath> newDirectoryStream() {
ArrayList<FilePath> list = New.arrayList();
synchronized (MEMORY_FILES) {
for (String n : MEMORY_FILES.tailMap(name).keySet()) {
if (n.startsWith(name)) {
list.add(getPath(n));
} else {
break;
}
}
return list;
}
}
@Override
public boolean setReadOnly() {
return getMemoryFile().setReadOnly();
}
@Override
public boolean canWrite() {
return getMemoryFile().canWrite();
}
@Override
public FilePathNioMem getParent() {
int idx = name.lastIndexOf('/');
return idx < 0 ? null : getPath(name.substring(0, idx));
}
@Override
public boolean isDirectory() {
if (isRoot()) {
return true;
}
// TODO in memory file system currently
// does not really support directories
synchronized (MEMORY_FILES) {
return MEMORY_FILES.get(name) == null;
}
}
@Override
public boolean isAbsolute() {
// TODO relative files are not supported
return true;
}
@Override
public FilePathNioMem toRealPath() {
return this;
}
@Override
public long lastModified() {
return getMemoryFile().getLastModified();
}
@Override
public void createDirectory() {
if (exists() && isDirectory()) {
throw DbException.get(ErrorCode.FILE_CREATION_FAILED_1,
name + " (a file with this name already exists)");
}
// TODO directories are not really supported
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
FileNioMemData obj = getMemoryFile();
FileNioMem m = new FileNioMem(obj, false);
return new FileChannelOutputStream(m, append);
}
@Override
public InputStream newInputStream() {
FileNioMemData obj = getMemoryFile();
FileNioMem m = new FileNioMem(obj, true);
return new FileChannelInputStream(m, true);
}
@Override
public FileChannel open(String mode) {
FileNioMemData obj = getMemoryFile();
return new FileNioMem(obj, "r".equals(mode));
}
private FileNioMemData getMemoryFile() {
synchronized (MEMORY_FILES) {
FileNioMemData m = MEMORY_FILES.get(name);
if (m == null) {
m = new FileNioMemData(name, compressed(), compressLaterCachePercent);
MEMORY_FILES.put(name, m);
}
return m;
}
}
protected boolean isRoot() {
return name.equals(getScheme() + ":");
}
/**
* Get the canonical path of a file (with backslashes replaced with forward
* slashes).
*
* @param fileName the file name
* @return the canonical path
*/
protected static String getCanonicalPath(String fileName) {
fileName = fileName.replace('\\', '/');
int idx = fileName.lastIndexOf(':') + 1;
if (fileName.length() > idx && fileName.charAt(idx) != '/') {
fileName = fileName.substring(0, idx) + "/" + fileName.substring(idx);
}
return fileName;
}
@Override
public String getScheme() {
return "nioMemFS";
}
/**
* Whether the file should be compressed.
*
* @return true if it should be compressed.
*/
boolean compressed() {
return false;
}
}
/**
* A memory file system that compresses blocks to conserve memory.
*/
class FilePathNioMemLZF extends FilePathNioMem {
@Override
boolean compressed() {
return true;
}
@Override
public FilePathNioMem getPath(String path) {
if (!path.startsWith(getScheme())) {
throw new IllegalArgumentException(path +
" doesn't start with " + getScheme());
}
int idx1 = path.indexOf(':');
int idx2 = path.lastIndexOf(':');
final FilePathNioMemLZF p = new FilePathNioMemLZF();
if (idx1 != -1 && idx1 != idx2) {
p.compressLaterCachePercent = Float.parseFloat(path.substring(idx1 + 1, idx2));
}
p.name = getCanonicalPath(path);
return p;
}
@Override
protected boolean isRoot() {
return name.lastIndexOf(':') == name.length() - 1;
}
@Override
public String getScheme() {
return "nioMemLZF";
}
}
/**
* This class represents an in-memory file.
*/
class FileNioMem extends FileBase {
/**
* The file data.
*/
final FileNioMemData data;
private final boolean readOnly;
private long pos;
FileNioMem(FileNioMemData data, boolean readOnly) {
this.data = data;
this.readOnly = readOnly;
}
@Override
public long size() {
return data.length();
}
@Override
public FileChannel truncate(long newLength) throws IOException {
// compatibility with JDK FileChannel#truncate
if (readOnly) {
throw new NonWritableChannelException();
}
if (newLength < size()) {
data.touch(readOnly);
pos = Math.min(pos, newLength);
data.truncate(newLength);
}
return this;
}
@Override
public FileChannel position(long newPos) {
this.pos = (int) newPos;
return this;
}
@Override
public int write(ByteBuffer src) throws IOException {
int len = src.remaining();
if (len == 0) {
return 0;
}
data.touch(readOnly);
// offset is 0 because we start writing from src.position()
pos = data.readWrite(pos, src, 0, len, true);
src.position(src.position() + len);
return len;
}
@Override
public int read(ByteBuffer dst) throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
long newPos = data.readWrite(pos, dst, dst.position(), len, false);
len = (int) (newPos - pos);
if (len <= 0) {
return -1;
}
dst.position(dst.position() + len);
pos = newPos;
return len;
}
@Override
public int read(ByteBuffer dst, long position) throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
long newPos;
newPos = data.readWrite(position, dst, dst.position(), len, false);
len = (int) (newPos - position);
if (len <= 0) {
return -1;
}
dst.position(dst.position() + len);
return len;
}
@Override
public long position() {
return pos;
}
@Override
public void implCloseChannel() throws IOException {
pos = 0;
}
@Override
public void force(boolean metaData) throws IOException {
// do nothing
}
@Override
public synchronized FileLock tryLock(long position, long size,
boolean shared) throws IOException {
if (shared) {
if (!data.lockShared()) {
return null;
}
} else {
if (!data.lockExclusive()) {
return null;
}
}
return new FileLock(new FakeFileChannel(), position, size, shared) {
@Override
public boolean isValid() {
return true;
}
@Override
public void release() throws IOException {
data.unlock();
}
};
}
@Override
public String toString() {
return data.getName();
}
}
/**
* This class contains the data of an in-memory random access file.
* Data compression using the LZF algorithm is supported as well.
*/
class FileNioMemData {
private static final int CACHE_MIN_SIZE = 8;
private static final int BLOCK_SIZE_SHIFT = 16;
private static final int BLOCK_SIZE = 1 << BLOCK_SIZE_SHIFT;
private static final int BLOCK_SIZE_MASK = BLOCK_SIZE - 1;
private static final ByteBuffer COMPRESSED_EMPTY_BLOCK;
private static final ThreadLocal<CompressLZF> LZF_THREAD_LOCAL =
new ThreadLocal<CompressLZF>() {
@Override
protected CompressLZF initialValue() {
return new CompressLZF();
}
};
/** the output buffer when compressing */
private static final ThreadLocal<byte[] > COMPRESS_OUT_BUF_THREAD_LOCAL =
new ThreadLocal<byte[] >() {
@Override
protected byte[] initialValue() {
return new byte[BLOCK_SIZE * 2];
}
};
/**
* The hash code of the name.
*/
final int nameHashCode;
private final CompressLaterCache<CompressItem, CompressItem> compressLaterCache =
new CompressLaterCache<>(CACHE_MIN_SIZE);
private String name;
private final boolean compress;
private final float compressLaterCachePercent;
private long length;
private AtomicReference<ByteBuffer>[] buffers;
private long lastModified;
private boolean isReadOnly;
private boolean isLockedExclusive;
private int sharedLockCount;
private final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
static {
final byte[] n = new byte[BLOCK_SIZE];
final byte[] output = new byte[BLOCK_SIZE * 2];
int len = new CompressLZF().compress(n, BLOCK_SIZE, output, 0);
COMPRESSED_EMPTY_BLOCK = ByteBuffer.allocateDirect(len);
COMPRESSED_EMPTY_BLOCK.put(output, 0, len);
}
@SuppressWarnings("unchecked")
FileNioMemData(String name, boolean compress, float compressLaterCachePercent) {
this.name = name;
this.nameHashCode = name.hashCode();
this.compress = compress;
this.compressLaterCachePercent = compressLaterCachePercent;
buffers = new AtomicReference[0];
lastModified = System.currentTimeMillis();
}
/**
* Lock the file in exclusive mode if possible.
*
* @return if locking was successful
*/
synchronized boolean lockExclusive() {
if (sharedLockCount > 0 || isLockedExclusive) {
return false;
}
isLockedExclusive = true;
return true;
}
/**
* Lock the file in shared mode if possible.
*
* @return if locking was successful
*/
synchronized boolean lockShared() {
if (isLockedExclusive) {
return false;
}
sharedLockCount++;
return true;
}
/**
* Unlock the file.
*/
synchronized void unlock() {
if (isLockedExclusive) {
isLockedExclusive = false;
} else {
sharedLockCount = Math.max(0, sharedLockCount - 1);
}
}
/**
* This small cache compresses the data if an element leaves the cache.
*/
static class CompressLaterCache<K, V> extends LinkedHashMap<K, V> {
private static final long serialVersionUID = 1L;
private int size;
CompressLaterCache(int size) {
super(size, (float) 0.75, true);
this.size = size;
}
@Override
public synchronized V put(K key, V value) {
return super.put(key, value);
}
@Override
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
if (size() < size) {
return false;
}
CompressItem c = (CompressItem) eldest.getKey();
c.data.compressPage(c.page);
return true;
}
public void setCacheSize(int size) {
this.size = size;
}
}
/**
* Represents a compressed item.
*/
static class CompressItem {
/**
* The file data.
*/
public final FileNioMemData data;
/**
* The page to compress.
*/
public final int page;
public CompressItem(FileNioMemData data, int page) {
this.data = data;
this.page = page;
}
@Override
public int hashCode() {
return page ^ data.nameHashCode;
}
@Override
public boolean equals(Object o) {
if (o instanceof CompressItem) {
CompressItem c = (CompressItem) o;
return c.data == data && c.page == page;
}
return false;
}
}
private void addToCompressLaterCache(int page) {
CompressItem c = new CompressItem(this, page);
compressLaterCache.put(c, c);
}
private ByteBuffer expandPage(int page) {
final ByteBuffer d = buffers[page].get();
if (d.capacity() == BLOCK_SIZE) {
// already expanded, or not compressed
return d;
}
synchronized (d) {
if (d.capacity() == BLOCK_SIZE) {
return d;
}
ByteBuffer out = ByteBuffer.allocateDirect(BLOCK_SIZE);
if (d != COMPRESSED_EMPTY_BLOCK) {
d.position(0);
CompressLZF.expand(d, out);
}
buffers[page].compareAndSet(d, out);
return out;
}
}
/**
* Compress the data in a byte array.
*
* @param page which page to compress
*/
void compressPage(int page) {
final ByteBuffer d = buffers[page].get();
synchronized (d) {
if (d.capacity() != BLOCK_SIZE) {
// already compressed
return;
}
final byte[] compressOutputBuffer = COMPRESS_OUT_BUF_THREAD_LOCAL.get();
int len = LZF_THREAD_LOCAL.get().compress(d, 0, compressOutputBuffer, 0);
ByteBuffer out = ByteBuffer.allocateDirect(len);
out.put(compressOutputBuffer, 0, len);
buffers[page].compareAndSet(d, out);
}
}
/**
* Update the last modified time.
*
* @param openReadOnly if the file was opened in read-only mode
*/
void touch(boolean openReadOnly) throws IOException {
if (isReadOnly || openReadOnly) {
throw new IOException("Read only");
}
lastModified = System.currentTimeMillis();
}
/**
* Get the file length.
*
* @return the length
*/
long length() {
return length;
}
/**
* Truncate the file.
*
* @param newLength the new length
*/
void truncate(long newLength) {
rwLock.writeLock().lock();
try {
changeLength(newLength);
long end = MathUtils.roundUpLong(newLength, BLOCK_SIZE);
if (end != newLength) {
int lastPage = (int) (newLength >>> BLOCK_SIZE_SHIFT);
ByteBuffer d = expandPage(lastPage);
for (int i = (int) (newLength & BLOCK_SIZE_MASK); i < BLOCK_SIZE; i++) {
d.put(i, (byte) 0);
}
if (compress) {
addToCompressLaterCache(lastPage);
}
}
} finally {
rwLock.writeLock().unlock();
}
}
@SuppressWarnings("unchecked")
private void changeLength(long len) {
length = len;
len = MathUtils.roundUpLong(len, BLOCK_SIZE);
int blocks = (int) (len >>> BLOCK_SIZE_SHIFT);
if (blocks != buffers.length) {
final AtomicReference<ByteBuffer>[] newBuffers = new AtomicReference[blocks];
System.arraycopy(buffers, 0, newBuffers, 0,
Math.min(buffers.length, newBuffers.length));
for (int i = buffers.length; i < blocks; i++) {
newBuffers[i] = new AtomicReference<>(COMPRESSED_EMPTY_BLOCK);
}
buffers = newBuffers;
}
compressLaterCache.setCacheSize(Math.max(CACHE_MIN_SIZE, (int) (blocks *
compressLaterCachePercent / 100)));
}
/**
* Read or write.
*
* @param pos the position
* @param b the byte array
* @param off the offset within the byte array
* @param len the number of bytes
* @param write true for writing
* @return the new position
*/
long readWrite(long pos, ByteBuffer b, int off, int len, boolean write) {
final java.util.concurrent.locks.Lock lock = write ? rwLock.writeLock()
: rwLock.readLock();
lock.lock();
try {
long end = pos + len;
if (end > length) {
if (write) {
changeLength(end);
} else {
len = (int) (length - pos);
}
}
while (len > 0) {
final int l = (int) Math.min(len, BLOCK_SIZE - (pos & BLOCK_SIZE_MASK));
final int page = (int) (pos >>> BLOCK_SIZE_SHIFT);
final ByteBuffer block = expandPage(page);
int blockOffset = (int) (pos & BLOCK_SIZE_MASK);
if (write) {
final ByteBuffer srcTmp = b.slice();
final ByteBuffer dstTmp = block.duplicate();
srcTmp.position(off);
srcTmp.limit(off + l);
dstTmp.position(blockOffset);
dstTmp.put(srcTmp);
} else {
// duplicate, so this can be done concurrently
final ByteBuffer tmp = block.duplicate();
tmp.position(blockOffset);
tmp.limit(l + blockOffset);
int oldPosition = b.position();
b.position(off);
b.put(tmp);
// restore old position
b.position(oldPosition);
}
if (compress) {
addToCompressLaterCache(page);
}
off += l;
pos += l;
len -= l;
}
return pos;
} finally {
lock.unlock();
}
}
/**
* Set the file name.
*
* @param name the name
*/
void setName(String name) {
this.name = name;
}
/**
* Get the file name
*
* @return the name
*/
String getName() {
return name;
}
/**
* Get the last modified time.
*
* @return the time
*/
long getLastModified() {
return lastModified;
}
/**
* Check whether writing is allowed.
*
* @return true if it is
*/
boolean canWrite() {
return !isReadOnly;
}
/**
* Set the read-only flag.
*
* @return true
*/
boolean setReadOnly() {
isReadOnly = true;
return true;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathRec.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.util.Arrays;
/**
* A file system that records all write operations and can re-play them.
*/
public class FilePathRec extends FilePathWrapper {
private static final FilePathRec INSTANCE = new FilePathRec();
private static Recorder recorder;
private boolean trace;
/**
* Register the file system.
*/
public static void register() {
FilePath.register(INSTANCE);
}
/**
* Set the recorder class.
*
* @param recorder the recorder
*/
public static void setRecorder(Recorder recorder) {
FilePathRec.recorder = recorder;
}
@Override
public boolean createFile() {
log(Recorder.CREATE_NEW_FILE, name);
return super.createFile();
}
@Override
public FilePath createTempFile(String suffix, boolean deleteOnExit,
boolean inTempDir) throws IOException {
log(Recorder.CREATE_TEMP_FILE, unwrap(name) + ":" + suffix + ":" +
deleteOnExit + ":" + inTempDir);
return super.createTempFile(suffix, deleteOnExit, inTempDir);
}
@Override
public void delete() {
log(Recorder.DELETE, name);
super.delete();
}
@Override
public FileChannel open(String mode) throws IOException {
return new FileRec(this, super.open(mode), name);
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
log(Recorder.OPEN_OUTPUT_STREAM, name);
return super.newOutputStream(append);
}
@Override
public void moveTo(FilePath newPath, boolean atomicReplace) {
log(Recorder.RENAME, unwrap(name) + ":" + unwrap(newPath.name));
super.moveTo(newPath, atomicReplace);
}
public boolean isTrace() {
return trace;
}
public void setTrace(boolean trace) {
this.trace = trace;
}
/**
* Log the operation.
*
* @param op the operation
* @param fileName the file name(s)
*/
void log(int op, String fileName) {
log(op, fileName, null, 0);
}
/**
* Log the operation.
*
* @param op the operation
* @param fileName the file name
* @param data the data or null
* @param x the value or 0
*/
void log(int op, String fileName, byte[] data, long x) {
if (recorder != null) {
recorder.log(op, fileName, data, x);
}
}
/**
* Get the prefix for this file system.
*
* @return the prefix
*/
@Override
public String getScheme() {
return "rec";
}
}
/**
* A file object that records all write operations and can re-play them.
*/
class FileRec extends FileBase {
private final FilePathRec rec;
private final FileChannel channel;
private final String name;
FileRec(FilePathRec rec, FileChannel file, String fileName) {
this.rec = rec;
this.channel = file;
this.name = fileName;
}
@Override
public void implCloseChannel() throws IOException {
channel.close();
}
@Override
public long position() throws IOException {
return channel.position();
}
@Override
public long size() throws IOException {
return channel.size();
}
@Override
public int read(ByteBuffer dst) throws IOException {
return channel.read(dst);
}
@Override
public int read(ByteBuffer dst, long position) throws IOException {
return channel.read(dst, position);
}
@Override
public FileChannel position(long pos) throws IOException {
channel.position(pos);
return this;
}
@Override
public FileChannel truncate(long newLength) throws IOException {
rec.log(Recorder.TRUNCATE, name, null, newLength);
channel.truncate(newLength);
return this;
}
@Override
public void force(boolean metaData) throws IOException {
channel.force(metaData);
}
@Override
public int write(ByteBuffer src) throws IOException {
byte[] buff = src.array();
int len = src.remaining();
if (src.position() != 0 || len != buff.length) {
int offset = src.arrayOffset() + src.position();
buff = Arrays.copyOfRange(buff, offset, offset + len);
}
int result = channel.write(src);
rec.log(Recorder.WRITE, name, buff, channel.position());
return result;
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
byte[] buff = src.array();
int len = src.remaining();
if (src.position() != 0 || len != buff.length) {
int offset = src.arrayOffset() + src.position();
buff = Arrays.copyOfRange(buff, offset, offset + len);
}
int result = channel.write(src, position);
rec.log(Recorder.WRITE, name, buff, position);
return result;
}
@Override
public synchronized FileLock tryLock(long position, long size,
boolean shared) throws IOException {
return channel.tryLock(position, size, shared);
}
@Override
public String toString() {
return name;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathRetryOnInterrupt.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedByInterruptException;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
/**
* A file system that re-opens and re-tries the operation if the file was
* closed, because a thread was interrupted. This will clear the interrupt flag.
* It is mainly useful for applications that call Thread.interrupt by mistake.
*/
public class FilePathRetryOnInterrupt extends FilePathWrapper {
/**
* The prefix.
*/
static final String SCHEME = "retry";
@Override
public FileChannel open(String mode) throws IOException {
return new FileRetryOnInterrupt(name.substring(getScheme().length() + 1), mode);
}
@Override
public String getScheme() {
return SCHEME;
}
}
/**
* A file object that re-opens and re-tries the operation if the file was
* closed.
*/
class FileRetryOnInterrupt extends FileBase {
private final String fileName;
private final String mode;
private FileChannel channel;
private FileLockRetry lock;
FileRetryOnInterrupt(String fileName, String mode) throws IOException {
this.fileName = fileName;
this.mode = mode;
open();
}
private void open() throws IOException {
channel = FileUtils.open(fileName, mode);
}
private void reopen(int i, IOException e) throws IOException {
if (i > 20) {
throw e;
}
if (!(e instanceof ClosedByInterruptException) &&
!(e instanceof ClosedChannelException)) {
throw e;
}
// clear the interrupt flag, to avoid re-opening many times
Thread.interrupted();
FileChannel before = channel;
// ensure we don't re-open concurrently;
// sometimes we don't re-open, which is fine,
// as this method is called in a loop
synchronized (this) {
if (before == channel) {
open();
reLock();
}
}
}
private void reLock() throws IOException {
if (lock == null) {
return;
}
try {
lock.base.release();
} catch (IOException e) {
// ignore
}
FileLock l2 = channel.tryLock(lock.position(), lock.size(), lock.isShared());
if (l2 == null) {
throw new IOException("Re-locking failed");
}
lock.base = l2;
}
@Override
public void implCloseChannel() throws IOException {
try {
channel.close();
} catch (IOException e) {
// ignore
}
}
@Override
public long position() throws IOException {
for (int i = 0;; i++) {
try {
return channel.position();
} catch (IOException e) {
reopen(i, e);
}
}
}
@Override
public long size() throws IOException {
for (int i = 0;; i++) {
try {
return channel.size();
} catch (IOException e) {
reopen(i, e);
}
}
}
@Override
public int read(ByteBuffer dst) throws IOException {
long pos = position();
for (int i = 0;; i++) {
try {
return channel.read(dst);
} catch (IOException e) {
reopen(i, e);
position(pos);
}
}
}
@Override
public int read(ByteBuffer dst, long position) throws IOException {
for (int i = 0;; i++) {
try {
return channel.read(dst, position);
} catch (IOException e) {
reopen(i, e);
}
}
}
@Override
public FileChannel position(long pos) throws IOException {
for (int i = 0;; i++) {
try {
channel.position(pos);
return this;
} catch (IOException e) {
reopen(i, e);
}
}
}
@Override
public FileChannel truncate(long newLength) throws IOException {
for (int i = 0;; i++) {
try {
channel.truncate(newLength);
return this;
} catch (IOException e) {
reopen(i, e);
}
}
}
@Override
public void force(boolean metaData) throws IOException {
for (int i = 0;; i++) {
try {
channel.force(metaData);
return;
} catch (IOException e) {
reopen(i, e);
}
}
}
@Override
public int write(ByteBuffer src) throws IOException {
long pos = position();
for (int i = 0;; i++) {
try {
return channel.write(src);
} catch (IOException e) {
reopen(i, e);
position(pos);
}
}
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
for (int i = 0;; i++) {
try {
return channel.write(src, position);
} catch (IOException e) {
reopen(i, e);
}
}
}
@Override
public synchronized FileLock tryLock(long position, long size,
boolean shared) throws IOException {
FileLock l = channel.tryLock(position, size, shared);
if (l == null) {
return null;
}
lock = new FileLockRetry(l, this);
return lock;
}
/**
* A wrapped file lock.
*/
static class FileLockRetry extends FileLock {
/**
* The base lock.
*/
FileLock base;
protected FileLockRetry(FileLock base, FileChannel channel) {
super(channel, base.position(), base.size(), base.isShared());
this.base = base;
}
@Override
public boolean isValid() {
return base.isValid();
}
@Override
public void release() throws IOException {
base.release();
}
}
@Override
public String toString() {
return FilePathRetryOnInterrupt.SCHEME + ":" + fileName;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathSplit.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.SequenceInputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.util.ArrayList;
import java.util.List;
import org.h2.engine.SysProperties;
import org.h2.message.DbException;
import org.h2.util.New;
/**
* A file system that may split files into multiple smaller files.
* (required for a FAT32 because it only support files up to 2 GB).
*/
public class FilePathSplit extends FilePathWrapper {
private static final String PART_SUFFIX = ".part";
@Override
protected String getPrefix() {
return getScheme() + ":" + parse(name)[0] + ":";
}
@Override
public FilePath unwrap(String fileName) {
return FilePath.get(parse(fileName)[1]);
}
@Override
public boolean setReadOnly() {
boolean result = false;
for (int i = 0;; i++) {
FilePath f = getBase(i);
if (f.exists()) {
result = f.setReadOnly();
} else {
break;
}
}
return result;
}
@Override
public void delete() {
for (int i = 0;; i++) {
FilePath f = getBase(i);
if (f.exists()) {
f.delete();
} else {
break;
}
}
}
@Override
public long lastModified() {
long lastModified = 0;
for (int i = 0;; i++) {
FilePath f = getBase(i);
if (f.exists()) {
long l = f.lastModified();
lastModified = Math.max(lastModified, l);
} else {
break;
}
}
return lastModified;
}
@Override
public long size() {
long length = 0;
for (int i = 0;; i++) {
FilePath f = getBase(i);
if (f.exists()) {
length += f.size();
} else {
break;
}
}
return length;
}
@Override
public ArrayList<FilePath> newDirectoryStream() {
List<FilePath> list = getBase().newDirectoryStream();
ArrayList<FilePath> newList = New.arrayList();
for (FilePath f : list) {
if (!f.getName().endsWith(PART_SUFFIX)) {
newList.add(wrap(f));
}
}
return newList;
}
@Override
public InputStream newInputStream() throws IOException {
InputStream input = getBase().newInputStream();
for (int i = 1;; i++) {
FilePath f = getBase(i);
if (f.exists()) {
InputStream i2 = f.newInputStream();
input = new SequenceInputStream(input, i2);
} else {
break;
}
}
return input;
}
@Override
public FileChannel open(String mode) throws IOException {
ArrayList<FileChannel> list = New.arrayList();
list.add(getBase().open(mode));
for (int i = 1;; i++) {
FilePath f = getBase(i);
if (f.exists()) {
list.add(f.open(mode));
} else {
break;
}
}
FileChannel[] array = list.toArray(new FileChannel[0]);
long maxLength = array[0].size();
long length = maxLength;
if (array.length == 1) {
long defaultMaxLength = getDefaultMaxLength();
if (maxLength < defaultMaxLength) {
maxLength = defaultMaxLength;
}
} else {
if (maxLength == 0) {
closeAndThrow(0, array, array[0], maxLength);
}
for (int i = 1; i < array.length - 1; i++) {
FileChannel c = array[i];
long l = c.size();
length += l;
if (l != maxLength) {
closeAndThrow(i, array, c, maxLength);
}
}
FileChannel c = array[array.length - 1];
long l = c.size();
length += l;
if (l > maxLength) {
closeAndThrow(array.length - 1, array, c, maxLength);
}
}
return new FileSplit(this, mode, array, length, maxLength);
}
private long getDefaultMaxLength() {
return 1L << Integer.decode(parse(name)[0]).intValue();
}
private void closeAndThrow(int id, FileChannel[] array, FileChannel o,
long maxLength) throws IOException {
String message = "Expected file length: " + maxLength + " got: " +
o.size() + " for " + getName(id);
for (FileChannel f : array) {
f.close();
}
throw new IOException(message);
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
return new FileChannelOutputStream(open("rw"), append);
}
@Override
public void moveTo(FilePath path, boolean atomicReplace) {
FilePathSplit newName = (FilePathSplit) path;
for (int i = 0;; i++) {
FilePath o = getBase(i);
if (o.exists()) {
o.moveTo(newName.getBase(i), atomicReplace);
} else {
break;
}
}
}
/**
* Split the file name into size and base file name.
*
* @param fileName the file name
* @return an array with size and file name
*/
private String[] parse(String fileName) {
if (!fileName.startsWith(getScheme())) {
DbException.throwInternalError(fileName + " doesn't start with " + getScheme());
}
fileName = fileName.substring(getScheme().length() + 1);
String size;
if (fileName.length() > 0 && Character.isDigit(fileName.charAt(0))) {
int idx = fileName.indexOf(':');
size = fileName.substring(0, idx);
try {
fileName = fileName.substring(idx + 1);
} catch (NumberFormatException e) {
// ignore
}
} else {
size = Long.toString(SysProperties.SPLIT_FILE_SIZE_SHIFT);
}
return new String[] { size, fileName };
}
/**
* Get the file name of a part file.
*
* @param id the part id
* @return the file name including the part id
*/
FilePath getBase(int id) {
return FilePath.get(getName(id));
}
private String getName(int id) {
return id > 0 ? getBase().name + "." + id + PART_SUFFIX : getBase().name;
}
@Override
public String getScheme() {
return "split";
}
}
/**
* A file that may be split into multiple smaller files.
*/
class FileSplit extends FileBase {
private final FilePathSplit file;
private final String mode;
private final long maxLength;
private FileChannel[] list;
private long filePointer;
private long length;
FileSplit(FilePathSplit file, String mode, FileChannel[] list, long length,
long maxLength) {
this.file = file;
this.mode = mode;
this.list = list;
this.length = length;
this.maxLength = maxLength;
}
@Override
public void implCloseChannel() throws IOException {
for (FileChannel c : list) {
c.close();
}
}
@Override
public long position() {
return filePointer;
}
@Override
public long size() {
return length;
}
@Override
public synchronized int read(ByteBuffer dst, long position)
throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
len = (int) Math.min(len, length - position);
if (len <= 0) {
return -1;
}
long offset = position % maxLength;
len = (int) Math.min(len, maxLength - offset);
FileChannel channel = getFileChannel(position);
return channel.read(dst, offset);
}
@Override
public int read(ByteBuffer dst) throws IOException {
int len = dst.remaining();
if (len == 0) {
return 0;
}
len = (int) Math.min(len, length - filePointer);
if (len <= 0) {
return -1;
}
long offset = filePointer % maxLength;
len = (int) Math.min(len, maxLength - offset);
FileChannel channel = getFileChannel(filePointer);
channel.position(offset);
len = channel.read(dst);
filePointer += len;
return len;
}
@Override
public FileChannel position(long pos) {
filePointer = pos;
return this;
}
private FileChannel getFileChannel(long position) throws IOException {
int id = (int) (position / maxLength);
while (id >= list.length) {
int i = list.length;
FileChannel[] newList = new FileChannel[i + 1];
System.arraycopy(list, 0, newList, 0, i);
FilePath f = file.getBase(i);
newList[i] = f.open(mode);
list = newList;
}
return list[id];
}
@Override
public FileChannel truncate(long newLength) throws IOException {
if (newLength >= length) {
return this;
}
filePointer = Math.min(filePointer, newLength);
int newFileCount = 1 + (int) (newLength / maxLength);
if (newFileCount < list.length) {
// delete some of the files
FileChannel[] newList = new FileChannel[newFileCount];
// delete backwards, so that truncating is somewhat transactional
for (int i = list.length - 1; i >= newFileCount; i--) {
// verify the file is writable
list[i].truncate(0);
list[i].close();
try {
file.getBase(i).delete();
} catch (DbException e) {
throw DbException.convertToIOException(e);
}
}
System.arraycopy(list, 0, newList, 0, newList.length);
list = newList;
}
long size = newLength - maxLength * (newFileCount - 1);
list[list.length - 1].truncate(size);
this.length = newLength;
return this;
}
@Override
public void force(boolean metaData) throws IOException {
for (FileChannel c : list) {
c.force(metaData);
}
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
if (position >= length && position > maxLength) {
// may need to extend and create files
long oldFilePointer = position;
long x = length - (length % maxLength) + maxLength;
for (; x < position; x += maxLength) {
if (x > length) {
// expand the file size
position(x - 1);
write(ByteBuffer.wrap(new byte[1]));
}
position = oldFilePointer;
}
}
long offset = position % maxLength;
int len = src.remaining();
FileChannel channel = getFileChannel(position);
int l = (int) Math.min(len, maxLength - offset);
if (l == len) {
l = channel.write(src, offset);
} else {
int oldLimit = src.limit();
src.limit(src.position() + l);
l = channel.write(src, offset);
src.limit(oldLimit);
}
length = Math.max(length, position + l);
return l;
}
@Override
public int write(ByteBuffer src) throws IOException {
if (filePointer >= length && filePointer > maxLength) {
// may need to extend and create files
long oldFilePointer = filePointer;
long x = length - (length % maxLength) + maxLength;
for (; x < filePointer; x += maxLength) {
if (x > length) {
// expand the file size
position(x - 1);
write(ByteBuffer.wrap(new byte[1]));
}
filePointer = oldFilePointer;
}
}
long offset = filePointer % maxLength;
int len = src.remaining();
FileChannel channel = getFileChannel(filePointer);
channel.position(offset);
int l = (int) Math.min(len, maxLength - offset);
if (l == len) {
l = channel.write(src);
} else {
int oldLimit = src.limit();
src.limit(src.position() + l);
l = channel.write(src);
src.limit(oldLimit);
}
filePointer += l;
length = Math.max(length, filePointer);
return l;
}
@Override
public synchronized FileLock tryLock(long position, long size,
boolean shared) throws IOException {
return list[0].tryLock(position, size, shared);
}
@Override
public String toString() {
return file.toString();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathWrapper.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.FileChannel;
import java.util.List;
/**
* The base class for wrapping / delegating file systems such as
* the split file system.
*/
public abstract class FilePathWrapper extends FilePath {
private FilePath base;
@Override
public FilePathWrapper getPath(String path) {
return create(path, unwrap(path));
}
/**
* Create a wrapped path instance for the given base path.
*
* @param base the base path
* @return the wrapped path
*/
public FilePathWrapper wrap(FilePath base) {
return base == null ? null : create(getPrefix() + base.name, base);
}
@Override
public FilePath unwrap() {
return unwrap(name);
}
private FilePathWrapper create(String path, FilePath base) {
try {
FilePathWrapper p = getClass().newInstance();
p.name = path;
p.base = base;
return p;
} catch (Exception e) {
throw new IllegalArgumentException("Path: " + path, e);
}
}
protected String getPrefix() {
return getScheme() + ":";
}
/**
* Get the base path for the given wrapped path.
*
* @param path the path including the scheme prefix
* @return the base file path
*/
protected FilePath unwrap(String path) {
return FilePath.get(path.substring(getScheme().length() + 1));
}
protected FilePath getBase() {
return base;
}
@Override
public boolean canWrite() {
return base.canWrite();
}
@Override
public void createDirectory() {
base.createDirectory();
}
@Override
public boolean createFile() {
return base.createFile();
}
@Override
public void delete() {
base.delete();
}
@Override
public boolean exists() {
return base.exists();
}
@Override
public FilePath getParent() {
return wrap(base.getParent());
}
@Override
public boolean isAbsolute() {
return base.isAbsolute();
}
@Override
public boolean isDirectory() {
return base.isDirectory();
}
@Override
public long lastModified() {
return base.lastModified();
}
@Override
public FilePath toRealPath() {
return wrap(base.toRealPath());
}
@Override
public List<FilePath> newDirectoryStream() {
List<FilePath> list = base.newDirectoryStream();
for (int i = 0, len = list.size(); i < len; i++) {
list.set(i, wrap(list.get(i)));
}
return list;
}
@Override
public void moveTo(FilePath newName, boolean atomicReplace) {
base.moveTo(((FilePathWrapper) newName).base, atomicReplace);
}
@Override
public InputStream newInputStream() throws IOException {
return base.newInputStream();
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
return base.newOutputStream(append);
}
@Override
public FileChannel open(String mode) throws IOException {
return base.open(mode);
}
@Override
public boolean setReadOnly() {
return base.setReadOnly();
}
@Override
public long size() {
return base.size();
}
@Override
public FilePath createTempFile(String suffix, boolean deleteOnExit,
boolean inTempDir) throws IOException {
return wrap(base.createTempFile(suffix, deleteOnExit, inTempDir));
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FilePathZip.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.h2.message.DbException;
import org.h2.util.IOUtils;
import org.h2.util.New;
/**
* This is a read-only file system that allows
* to access databases stored in a .zip or .jar file.
*/
public class FilePathZip extends FilePath {
@Override
public FilePathZip getPath(String path) {
FilePathZip p = new FilePathZip();
p.name = path;
return p;
}
@Override
public void createDirectory() {
// ignore
}
@Override
public boolean createFile() {
throw DbException.getUnsupportedException("write");
}
@Override
public void delete() {
throw DbException.getUnsupportedException("write");
}
@Override
public boolean exists() {
try {
String entryName = getEntryName();
if (entryName.length() == 0) {
return true;
}
try (ZipFile file = openZipFile()) {
return file.getEntry(entryName) != null;
}
} catch (IOException e) {
return false;
}
}
@Override
public long lastModified() {
return 0;
}
@Override
public FilePath getParent() {
int idx = name.lastIndexOf('/');
return idx < 0 ? null : getPath(name.substring(0, idx));
}
@Override
public boolean isAbsolute() {
String fileName = translateFileName(name);
return FilePath.get(fileName).isAbsolute();
}
@Override
public FilePath unwrap() {
return FilePath.get(name.substring(getScheme().length() + 1));
}
@Override
public boolean isDirectory() {
try {
String entryName = getEntryName();
if (entryName.length() == 0) {
return true;
}
try (ZipFile file = openZipFile()) {
Enumeration<? extends ZipEntry> en = file.entries();
while (en.hasMoreElements()) {
ZipEntry entry = en.nextElement();
String n = entry.getName();
if (n.equals(entryName)) {
return entry.isDirectory();
} else if (n.startsWith(entryName)) {
if (n.length() == entryName.length() + 1) {
if (n.equals(entryName + "/")) {
return true;
}
}
}
}
}
return false;
} catch (IOException e) {
return false;
}
}
@Override
public boolean canWrite() {
return false;
}
@Override
public boolean setReadOnly() {
return true;
}
@Override
public long size() {
try {
try (ZipFile file = openZipFile()) {
ZipEntry entry = file.getEntry(getEntryName());
return entry == null ? 0 : entry.getSize();
}
} catch (IOException e) {
return 0;
}
}
@Override
public ArrayList<FilePath> newDirectoryStream() {
String path = name;
ArrayList<FilePath> list = New.arrayList();
try {
if (path.indexOf('!') < 0) {
path += "!";
}
if (!path.endsWith("/")) {
path += "/";
}
try (ZipFile file = openZipFile()) {
String dirName = getEntryName();
String prefix = path.substring(0, path.length() - dirName.length());
Enumeration<? extends ZipEntry> en = file.entries();
while (en.hasMoreElements()) {
ZipEntry entry = en.nextElement();
String name = entry.getName();
if (!name.startsWith(dirName)) {
continue;
}
if (name.length() <= dirName.length()) {
continue;
}
int idx = name.indexOf('/', dirName.length());
if (idx < 0 || idx >= name.length() - 1) {
list.add(getPath(prefix + name));
}
}
}
return list;
} catch (IOException e) {
throw DbException.convertIOException(e, "listFiles " + path);
}
}
@Override
public InputStream newInputStream() throws IOException {
return new FileChannelInputStream(open("r"), true);
}
@Override
public FileChannel open(String mode) throws IOException {
ZipFile file = openZipFile();
ZipEntry entry = file.getEntry(getEntryName());
if (entry == null) {
file.close();
throw new FileNotFoundException(name);
}
return new FileZip(file, entry);
}
@Override
public OutputStream newOutputStream(boolean append) throws IOException {
throw new IOException("write");
}
@Override
public void moveTo(FilePath newName, boolean atomicReplace) {
throw DbException.getUnsupportedException("write");
}
private static String translateFileName(String fileName) {
if (fileName.startsWith("zip:")) {
fileName = fileName.substring("zip:".length());
}
int idx = fileName.indexOf('!');
if (idx >= 0) {
fileName = fileName.substring(0, idx);
}
return FilePathDisk.expandUserHomeDirectory(fileName);
}
@Override
public FilePath toRealPath() {
return this;
}
private String getEntryName() {
int idx = name.indexOf('!');
String fileName;
if (idx <= 0) {
fileName = "";
} else {
fileName = name.substring(idx + 1);
}
fileName = fileName.replace('\\', '/');
if (fileName.startsWith("/")) {
fileName = fileName.substring(1);
}
return fileName;
}
private ZipFile openZipFile() throws IOException {
String fileName = translateFileName(name);
return new ZipFile(fileName);
}
@Override
public FilePath createTempFile(String suffix, boolean deleteOnExit,
boolean inTempDir) throws IOException {
if (!inTempDir) {
throw new IOException("File system is read-only");
}
return new FilePathDisk().getPath(name).createTempFile(suffix,
deleteOnExit, true);
}
@Override
public String getScheme() {
return "zip";
}
}
/**
* The file is read from a stream. When reading from start to end, the same
* input stream is re-used, however when reading from end to start, a new input
* stream is opened for each request.
*/
class FileZip extends FileBase {
private static final byte[] SKIP_BUFFER = new byte[1024];
private final ZipFile file;
private final ZipEntry entry;
private long pos;
private InputStream in;
private long inPos;
private final long length;
private boolean skipUsingRead;
FileZip(ZipFile file, ZipEntry entry) {
this.file = file;
this.entry = entry;
length = entry.getSize();
}
@Override
public long position() {
return pos;
}
@Override
public long size() {
return length;
}
@Override
public int read(ByteBuffer dst) throws IOException {
seek();
int len = in.read(dst.array(), dst.arrayOffset() + dst.position(),
dst.remaining());
if (len > 0) {
dst.position(dst.position() + len);
pos += len;
inPos += len;
}
return len;
}
private void seek() throws IOException {
if (inPos > pos) {
if (in != null) {
in.close();
}
in = null;
}
if (in == null) {
in = file.getInputStream(entry);
inPos = 0;
}
if (inPos < pos) {
long skip = pos - inPos;
if (!skipUsingRead) {
try {
IOUtils.skipFully(in, skip);
} catch (NullPointerException e) {
// workaround for Android
skipUsingRead = true;
}
}
if (skipUsingRead) {
while (skip > 0) {
int s = (int) Math.min(SKIP_BUFFER.length, skip);
s = in.read(SKIP_BUFFER, 0, s);
skip -= s;
}
}
inPos = pos;
}
}
@Override
public FileChannel position(long newPos) {
this.pos = newPos;
return this;
}
@Override
public FileChannel truncate(long newLength) throws IOException {
throw new IOException("File is read-only");
}
@Override
public void force(boolean metaData) throws IOException {
// nothing to do
}
@Override
public int write(ByteBuffer src) throws IOException {
throw new IOException("File is read-only");
}
@Override
public synchronized FileLock tryLock(long position, long size,
boolean shared) throws IOException {
if (shared) {
return new FileLock(new FakeFileChannel(), position, size, shared) {
@Override
public boolean isValid() {
return true;
}
@Override
public void release() throws IOException {
// ignore
}};
}
return null;
}
@Override
protected void implCloseChannel() throws IOException {
if (in != null) {
in.close();
in = null;
}
file.close();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/FileUtils.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.List;
/**
* This utility class contains utility functions that use the file system
* abstraction.
*/
public class FileUtils {
/**
* Checks if a file exists.
* This method is similar to Java 7 <code>java.nio.file.Path.exists</code>.
*
* @param fileName the file name
* @return true if it exists
*/
public static boolean exists(String fileName) {
return FilePath.get(fileName).exists();
}
/**
* Create a directory (all required parent directories must already exist).
* This method is similar to Java 7
* <code>java.nio.file.Path.createDirectory</code>.
*
* @param directoryName the directory name
*/
public static void createDirectory(String directoryName) {
FilePath.get(directoryName).createDirectory();
}
/**
* Create a new file. This method is similar to Java 7
* <code>java.nio.file.Path.createFile</code>, but returns false instead of
* throwing a exception if the file already existed.
*
* @param fileName the file name
* @return true if creating was successful
*/
public static boolean createFile(String fileName) {
return FilePath.get(fileName).createFile();
}
/**
* Delete a file or directory if it exists.
* Directories may only be deleted if they are empty.
* This method is similar to Java 7
* <code>java.nio.file.Path.deleteIfExists</code>.
*
* @param path the file or directory name
*/
public static void delete(String path) {
FilePath.get(path).delete();
}
/**
* Get the canonical file or directory name. This method is similar to Java
* 7 <code>java.nio.file.Path.toRealPath</code>.
*
* @param fileName the file name
* @return the normalized file name
*/
public static String toRealPath(String fileName) {
return FilePath.get(fileName).toRealPath().toString();
}
/**
* Get the parent directory of a file or directory. This method returns null
* if there is no parent. This method is similar to Java 7
* <code>java.nio.file.Path.getParent</code>.
*
* @param fileName the file or directory name
* @return the parent directory name
*/
public static String getParent(String fileName) {
FilePath p = FilePath.get(fileName).getParent();
return p == null ? null : p.toString();
}
/**
* Check if the file name includes a path. This method is similar to Java 7
* <code>java.nio.file.Path.isAbsolute</code>.
*
* @param fileName the file name
* @return if the file name is absolute
*/
public static boolean isAbsolute(String fileName) {
return FilePath.get(fileName).isAbsolute()
// Allows Windows to recognize "/path" as absolute.
// Makes the same configuration work on all platforms.
|| fileName.startsWith("/");
}
/**
* Rename a file if this is allowed. This method is similar to Java 7
* <code>java.nio.file.Files.move</code>.
*
* @param source the old fully qualified file name
* @param target the new fully qualified file name
*/
public static void move(String source, String target) {
FilePath.get(source).moveTo(FilePath.get(target), false);
}
/**
* Rename a file if this is allowed, and try to atomically replace an
* existing file. This method is similar to Java 7
* <code>java.nio.file.Files.move</code>.
*
* @param source the old fully qualified file name
* @param target the new fully qualified file name
*/
public static void moveAtomicReplace(String source, String target) {
FilePath.get(source).moveTo(FilePath.get(target), true);
}
/**
* Get the file or directory name (the last element of the path).
* This method is similar to Java 7 <code>java.nio.file.Path.getName</code>.
*
* @param path the directory and file name
* @return just the file name
*/
public static String getName(String path) {
return FilePath.get(path).getName();
}
/**
* List the files and directories in the given directory.
* This method is similar to Java 7
* <code>java.nio.file.Path.newDirectoryStream</code>.
*
* @param path the directory
* @return the list of fully qualified file names
*/
public static List<String> newDirectoryStream(String path) {
List<FilePath> list = FilePath.get(path).newDirectoryStream();
int len = list.size();
List<String> result = new ArrayList<>(len);
for (FilePath filePath : list) {
result.add(filePath.toString());
}
return result;
}
/**
* Get the last modified date of a file.
* This method is similar to Java 7
* <code>java.nio.file.attribute.Attributes.
* readBasicFileAttributes(file).lastModified().toMillis()</code>
*
* @param fileName the file name
* @return the last modified date
*/
public static long lastModified(String fileName) {
return FilePath.get(fileName).lastModified();
}
/**
* Get the size of a file in bytes
* This method is similar to Java 7
* <code>java.nio.file.attribute.Attributes.
* readBasicFileAttributes(file).size()</code>
*
* @param fileName the file name
* @return the size in bytes
*/
public static long size(String fileName) {
return FilePath.get(fileName).size();
}
/**
* Check if it is a file or a directory.
* <code>java.nio.file.attribute.Attributes.
* readBasicFileAttributes(file).isDirectory()</code>
*
* @param fileName the file or directory name
* @return true if it is a directory
*/
public static boolean isDirectory(String fileName) {
return FilePath.get(fileName).isDirectory();
}
/**
* Open a random access file object.
* This method is similar to Java 7
* <code>java.nio.channels.FileChannel.open</code>.
*
* @param fileName the file name
* @param mode the access mode. Supported are r, rw, rws, rwd
* @return the file object
*/
public static FileChannel open(String fileName, String mode)
throws IOException {
return FilePath.get(fileName).open(mode);
}
/**
* Create an input stream to read from the file.
* This method is similar to Java 7
* <code>java.nio.file.Path.newInputStream</code>.
*
* @param fileName the file name
* @return the input stream
*/
public static InputStream newInputStream(String fileName)
throws IOException {
return FilePath.get(fileName).newInputStream();
}
/**
* Create an output stream to write into the file.
* This method is similar to Java 7
* <code>java.nio.file.Path.newOutputStream</code>.
*
* @param fileName the file name
* @param append if true, the file will grow, if false, the file will be
* truncated first
* @return the output stream
*/
public static OutputStream newOutputStream(String fileName, boolean append)
throws IOException {
return FilePath.get(fileName).newOutputStream(append);
}
/**
* Check if the file is writable.
* This method is similar to Java 7
* <code>java.nio.file.Path.checkAccess(AccessMode.WRITE)</code>
*
* @param fileName the file name
* @return if the file is writable
*/
public static boolean canWrite(String fileName) {
return FilePath.get(fileName).canWrite();
}
// special methods =======================================
/**
* Disable the ability to write. The file can still be deleted afterwards.
*
* @param fileName the file name
* @return true if the call was successful
*/
public static boolean setReadOnly(String fileName) {
return FilePath.get(fileName).setReadOnly();
}
/**
* Get the unwrapped file name (without wrapper prefixes if wrapping /
* delegating file systems are used).
*
* @param fileName the file name
* @return the unwrapped
*/
public static String unwrap(String fileName) {
return FilePath.get(fileName).unwrap().toString();
}
// utility methods =======================================
/**
* Delete a directory or file and all subdirectories and files.
*
* @param path the path
* @param tryOnly whether errors should be ignored
*/
public static void deleteRecursive(String path, boolean tryOnly) {
if (exists(path)) {
if (isDirectory(path)) {
for (String s : newDirectoryStream(path)) {
deleteRecursive(s, tryOnly);
}
}
if (tryOnly) {
tryDelete(path);
} else {
delete(path);
}
}
}
/**
* Create the directory and all required parent directories.
*
* @param dir the directory name
*/
public static void createDirectories(String dir) {
if (dir != null) {
if (exists(dir)) {
if (!isDirectory(dir)) {
// this will fail
createDirectory(dir);
}
} else {
String parent = getParent(dir);
createDirectories(parent);
createDirectory(dir);
}
}
}
/**
* Try to delete a file or directory (ignoring errors).
*
* @param path the file or directory name
* @return true if it worked
*/
public static boolean tryDelete(String path) {
try {
FilePath.get(path).delete();
return true;
} catch (Exception e) {
return false;
}
}
/**
* Create a new temporary file.
*
* @param prefix the prefix of the file name (including directory name if
* required)
* @param suffix the suffix
* @param deleteOnExit if the file should be deleted when the virtual
* machine exists
* @param inTempDir if the file should be stored in the temporary directory
* @return the name of the created file
*/
public static String createTempFile(String prefix, String suffix,
boolean deleteOnExit, boolean inTempDir) throws IOException {
return FilePath.get(prefix).createTempFile(
suffix, deleteOnExit, inTempDir).toString();
}
/**
* Fully read from the file. This will read all remaining bytes,
* or throw an EOFException if not successful.
*
* @param channel the file channel
* @param dst the byte buffer
*/
public static void readFully(FileChannel channel, ByteBuffer dst)
throws IOException {
do {
int r = channel.read(dst);
if (r < 0) {
throw new EOFException();
}
} while (dst.remaining() > 0);
}
/**
* Fully write to the file. This will write all remaining bytes.
*
* @param channel the file channel
* @param src the byte buffer
*/
public static void writeFully(FileChannel channel, ByteBuffer src)
throws IOException {
do {
channel.write(src);
} while (src.remaining() > 0);
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/store/fs/Recorder.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.store.fs;
/**
* A recorder for the recording file system.
*/
public interface Recorder {
/**
* Create a new file.
*/
int CREATE_NEW_FILE = 2;
/**
* Create a temporary file.
*/
int CREATE_TEMP_FILE = 3;
/**
* Delete a file.
*/
int DELETE = 4;
/**
* Open a file output stream.
*/
int OPEN_OUTPUT_STREAM = 5;
/**
* Rename a file. The file name contains the source and the target file
* separated with a colon.
*/
int RENAME = 6;
/**
* Truncate the file.
*/
int TRUNCATE = 7;
/**
* Write to the file.
*/
int WRITE = 8;
/**
* Record the method.
*
* @param op the operation
* @param fileName the file name or file name list
* @param data the data or null
* @param x the value or 0
*/
void log(int op, String fileName, byte[] data, long x);
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/table/Column.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.table;
import java.sql.ResultSetMetaData;
import java.util.Arrays;
import org.h2.api.ErrorCode;
import org.h2.command.Parser;
import org.h2.engine.Constants;
import org.h2.engine.Mode;
import org.h2.engine.Session;
import org.h2.expression.ConditionAndOr;
import org.h2.expression.Expression;
import org.h2.expression.ExpressionVisitor;
import org.h2.expression.SequenceValue;
import org.h2.expression.ValueExpression;
import org.h2.message.DbException;
import org.h2.result.Row;
import org.h2.schema.Schema;
import org.h2.schema.Sequence;
import org.h2.util.DateTimeUtils;
import org.h2.util.MathUtils;
import org.h2.util.StringUtils;
import org.h2.value.DataType;
import org.h2.value.Value;
import org.h2.value.ValueDate;
import org.h2.value.ValueEnum;
import org.h2.value.ValueInt;
import org.h2.value.ValueLong;
import org.h2.value.ValueNull;
import org.h2.value.ValueString;
import org.h2.value.ValueTime;
import org.h2.value.ValueTimestamp;
import org.h2.value.ValueTimestampTimeZone;
import org.h2.value.ValueUuid;
/**
* This class represents a column in a table.
*/
public class Column {
/**
* The name of the rowid pseudo column.
*/
public static final String ROWID = "_ROWID_";
/**
* This column is not nullable.
*/
public static final int NOT_NULLABLE =
ResultSetMetaData.columnNoNulls;
/**
* This column is nullable.
*/
public static final int NULLABLE =
ResultSetMetaData.columnNullable;
/**
* It is not know whether this column is nullable.
*/
public static final int NULLABLE_UNKNOWN =
ResultSetMetaData.columnNullableUnknown;
private final int type;
private long precision;
private int scale;
private String[] enumerators;
private int displaySize;
private Table table;
private String name;
private int columnId;
private boolean nullable = true;
private Expression defaultExpression;
private Expression onUpdateExpression;
private Expression checkConstraint;
private String checkConstraintSQL;
private String originalSQL;
private boolean autoIncrement;
private long start;
private long increment;
private boolean convertNullToDefault;
private Sequence sequence;
private boolean isComputed;
private TableFilter computeTableFilter;
private int selectivity;
private SingleColumnResolver resolver;
private String comment;
private boolean primaryKey;
private boolean visible = true;
public Column(String name, int type) {
this(name, type, -1, -1, -1, null);
}
public Column(String name, int type, long precision, int scale,
int displaySize) {
this(name, type, precision, scale, displaySize, null);
}
public Column(String name, int type, long precision, int scale,
int displaySize, String[] enumerators) {
this.name = name;
this.type = type;
if (precision == -1 && scale == -1 && displaySize == -1 && type != Value.UNKNOWN) {
DataType dt = DataType.getDataType(type);
precision = dt.defaultPrecision;
scale = dt.defaultScale;
displaySize = dt.defaultDisplaySize;
}
this.precision = precision;
this.scale = scale;
this.displaySize = displaySize;
this.enumerators = enumerators;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (!(o instanceof Column)) {
return false;
}
Column other = (Column) o;
if (table == null || other.table == null ||
name == null || other.name == null) {
return false;
}
if (table != other.table) {
return false;
}
return name.equals(other.name);
}
@Override
public int hashCode() {
if (table == null || name == null) {
return 0;
}
return table.getId() ^ name.hashCode();
}
public boolean isEnumerated() {
return type == Value.ENUM;
}
public Column getClone() {
Column newColumn = new Column(name, type, precision, scale, displaySize, enumerators);
newColumn.copy(this);
return newColumn;
}
/**
* Convert a value to this column's type.
*
* @param v the value
* @return the value
*/
public Value convert(Value v) {
return convert(v, null);
}
/**
* Convert a value to this column's type using the given {@link Mode}.
* <p>
* Use this method in case the conversion is Mode-dependent.
*
* @param v the value
* @param mode the database {@link Mode} to use
* @return the value
*/
public Value convert(Value v, Mode mode) {
try {
return v.convertTo(type, MathUtils.convertLongToInt(precision), mode, this, getEnumerators());
} catch (DbException e) {
if (e.getErrorCode() == ErrorCode.DATA_CONVERSION_ERROR_1) {
String target = (table == null ? "" : table.getName() + ": ") +
getCreateSQL();
throw DbException.get(
ErrorCode.DATA_CONVERSION_ERROR_1, e,
v.getSQL() + " (" + target + ")");
}
throw e;
}
}
boolean getComputed() {
return isComputed;
}
/**
* Compute the value of this computed column.
*
* @param session the session
* @param row the row
* @return the value
*/
synchronized Value computeValue(Session session, Row row) {
computeTableFilter.setSession(session);
computeTableFilter.set(row);
return defaultExpression.getValue(session);
}
/**
* Set the default value in the form of a computed expression of other
* columns.
*
* @param expression the computed expression
*/
public void setComputedExpression(Expression expression) {
this.isComputed = true;
this.defaultExpression = expression;
}
/**
* Set the table and column id.
*
* @param table the table
* @param columnId the column index
*/
public void setTable(Table table, int columnId) {
this.table = table;
this.columnId = columnId;
}
public Table getTable() {
return table;
}
/**
* Set the default expression.
*
* @param session the session
* @param defaultExpression the default expression
*/
public void setDefaultExpression(Session session,
Expression defaultExpression) {
// also to test that no column names are used
if (defaultExpression != null) {
defaultExpression = defaultExpression.optimize(session);
if (defaultExpression.isConstant()) {
defaultExpression = ValueExpression.get(
defaultExpression.getValue(session));
}
}
this.defaultExpression = defaultExpression;
}
/**
* Set the on update expression.
*
* @param session the session
* @param onUpdateExpression the on update expression
*/
public void setOnUpdateExpression(Session session, Expression onUpdateExpression) {
// also to test that no column names are used
if (onUpdateExpression != null) {
onUpdateExpression = onUpdateExpression.optimize(session);
if (onUpdateExpression.isConstant()) {
onUpdateExpression = ValueExpression.get(onUpdateExpression.getValue(session));
}
}
this.onUpdateExpression = onUpdateExpression;
}
public int getColumnId() {
return columnId;
}
public String getSQL() {
return Parser.quoteIdentifier(name);
}
public String getName() {
return name;
}
public int getType() {
return type;
}
public long getPrecision() {
return precision;
}
public void setPrecision(long p) {
precision = p;
}
public int getDisplaySize() {
return displaySize;
}
public int getScale() {
return scale;
}
public void setNullable(boolean b) {
nullable = b;
}
public String[] getEnumerators() {
return enumerators;
}
public void setEnumerators(String[] enumerators) {
this.enumerators = enumerators;
}
public boolean getVisible() {
return visible;
}
public void setVisible(boolean b) {
visible = b;
}
/**
* Validate the value, convert it if required, and update the sequence value
* if required. If the value is null, the default value (NULL if no default
* is set) is returned. Check constraints are validated as well.
*
* @param session the session
* @param value the value or null
* @return the new or converted value
*/
public Value validateConvertUpdateSequence(Session session, Value value) {
// take a local copy of defaultExpression to avoid holding the lock
// while calling getValue
final Expression localDefaultExpression;
synchronized (this) {
localDefaultExpression = defaultExpression;
}
if (value == null) {
if (localDefaultExpression == null) {
value = ValueNull.INSTANCE;
} else {
value = localDefaultExpression.getValue(session).convertTo(type);
session.getGeneratedKeys().add(this);
if (primaryKey) {
session.setLastIdentity(value);
}
}
}
Mode mode = session.getDatabase().getMode();
if (value == ValueNull.INSTANCE) {
if (convertNullToDefault) {
value = localDefaultExpression.getValue(session).convertTo(type);
session.getGeneratedKeys().add(this);
}
if (value == ValueNull.INSTANCE && !nullable) {
if (mode.convertInsertNullToZero) {
DataType dt = DataType.getDataType(type);
if (dt.decimal) {
value = ValueInt.get(0).convertTo(type);
} else if (dt.type == Value.TIMESTAMP) {
value = ValueTimestamp.fromMillis(session.getTransactionStart());
} else if (dt.type == Value.TIMESTAMP_TZ) {
long ms = session.getTransactionStart();
value = ValueTimestampTimeZone.fromDateValueAndNanos(
DateTimeUtils.dateValueFromDate(ms),
DateTimeUtils.nanosFromDate(ms), (short) 0);
} else if (dt.type == Value.TIME) {
value = ValueTime.fromNanos(0);
} else if (dt.type == Value.DATE) {
value = ValueDate.fromMillis(session.getTransactionStart());
} else {
value = ValueString.get("").convertTo(type);
}
} else {
throw DbException.get(ErrorCode.NULL_NOT_ALLOWED, name);
}
}
}
if (checkConstraint != null) {
resolver.setValue(value);
Value v;
synchronized (this) {
v = checkConstraint.getValue(session);
}
// Both TRUE and NULL are ok
if (v != ValueNull.INSTANCE && !v.getBoolean()) {
throw DbException.get(
ErrorCode.CHECK_CONSTRAINT_VIOLATED_1,
checkConstraint.getSQL());
}
}
value = value.convertScale(mode.convertOnlyToSmallerScale, scale);
if (precision > 0) {
if (!value.checkPrecision(precision)) {
String s = value.getTraceSQL();
if (s.length() > 127) {
s = s.substring(0, 128) + "...";
}
throw DbException.get(ErrorCode.VALUE_TOO_LONG_2,
getCreateSQL(), s + " (" + value.getPrecision() + ")");
}
}
if (isEnumerated() && value != ValueNull.INSTANCE) {
if (!ValueEnum.isValid(enumerators, value)) {
String s = value.getTraceSQL();
if (s.length() > 127) {
s = s.substring(0, 128) + "...";
}
throw DbException.get(ErrorCode.ENUM_VALUE_NOT_PERMITTED,
getCreateSQL(), s);
}
value = ValueEnum.get(enumerators, value.getInt());
}
updateSequenceIfRequired(session, value);
return value;
}
private void updateSequenceIfRequired(Session session, Value value) {
if (sequence != null) {
long current = sequence.getCurrentValue();
long inc = sequence.getIncrement();
long now = value.getLong();
boolean update = false;
if (inc > 0 && now > current) {
update = true;
} else if (inc < 0 && now < current) {
update = true;
}
if (update) {
sequence.modify(now + inc, null, null, null);
session.setLastIdentity(ValueLong.get(now));
sequence.flush(session);
}
}
}
/**
* Convert the auto-increment flag to a sequence that is linked with this
* table.
*
* @param session the session
* @param schema the schema where the sequence should be generated
* @param id the object id
* @param temporary true if the sequence is temporary and does not need to
* be stored
*/
public void convertAutoIncrementToSequence(Session session, Schema schema,
int id, boolean temporary) {
if (!autoIncrement) {
DbException.throwInternalError();
}
if ("IDENTITY".equals(originalSQL)) {
originalSQL = "BIGINT";
} else if ("SERIAL".equals(originalSQL)) {
originalSQL = "INT";
}
String sequenceName;
do {
ValueUuid uuid = ValueUuid.getNewRandom();
String s = uuid.getString();
s = StringUtils.toUpperEnglish(s.replace('-', '_'));
sequenceName = "SYSTEM_SEQUENCE_" + s;
} while (schema.findSequence(sequenceName) != null);
Sequence seq = new Sequence(schema, id, sequenceName, start, increment);
seq.setTemporary(temporary);
session.getDatabase().addSchemaObject(session, seq);
setAutoIncrement(false, 0, 0);
SequenceValue seqValue = new SequenceValue(seq);
setDefaultExpression(session, seqValue);
setSequence(seq);
}
/**
* Prepare all expressions of this column.
*
* @param session the session
*/
public void prepareExpression(Session session) {
if (defaultExpression != null || onUpdateExpression != null) {
computeTableFilter = new TableFilter(session, table, null, false, null, 0, null);
if (defaultExpression != null) {
defaultExpression.mapColumns(computeTableFilter, 0);
defaultExpression = defaultExpression.optimize(session);
}
if (onUpdateExpression != null) {
onUpdateExpression.mapColumns(computeTableFilter, 0);
onUpdateExpression = onUpdateExpression.optimize(session);
}
}
}
public String getCreateSQLWithoutName() {
return getCreateSQL(false);
}
public String getCreateSQL() {
return getCreateSQL(true);
}
private String getCreateSQL(boolean includeName) {
StringBuilder buff = new StringBuilder();
if (includeName && name != null) {
buff.append(Parser.quoteIdentifier(name)).append(' ');
}
if (originalSQL != null) {
buff.append(originalSQL);
} else {
buff.append(DataType.getDataType(type).name);
switch (type) {
case Value.DECIMAL:
buff.append('(').append(precision).append(", ").append(scale).append(')');
break;
case Value.ENUM:
buff.append('(');
for (int i = 0; i < enumerators.length; i++) {
buff.append('\'').append(enumerators[i]).append('\'');
if(i < enumerators.length - 1) {
buff.append(',');
}
}
buff.append(')');
break;
case Value.BYTES:
case Value.STRING:
case Value.STRING_IGNORECASE:
case Value.STRING_FIXED:
if (precision < Integer.MAX_VALUE) {
buff.append('(').append(precision).append(')');
}
break;
default:
}
}
if (!visible) {
buff.append(" INVISIBLE ");
}
if (defaultExpression != null) {
String sql = defaultExpression.getSQL();
if (sql != null) {
if (isComputed) {
buff.append(" AS ").append(sql);
} else if (defaultExpression != null) {
buff.append(" DEFAULT ").append(sql);
}
}
}
if (onUpdateExpression != null) {
String sql = onUpdateExpression.getSQL();
if (sql != null) {
buff.append(" ON UPDATE ").append(sql);
}
}
if (!nullable) {
buff.append(" NOT NULL");
}
if (convertNullToDefault) {
buff.append(" NULL_TO_DEFAULT");
}
if (sequence != null) {
buff.append(" SEQUENCE ").append(sequence.getSQL());
}
if (selectivity != 0) {
buff.append(" SELECTIVITY ").append(selectivity);
}
if (comment != null) {
buff.append(" COMMENT ").append(StringUtils.quoteStringSQL(comment));
}
if (checkConstraint != null) {
buff.append(" CHECK ").append(checkConstraintSQL);
}
return buff.toString();
}
public boolean isNullable() {
return nullable;
}
public void setOriginalSQL(String original) {
originalSQL = original;
}
public String getOriginalSQL() {
return originalSQL;
}
public Expression getDefaultExpression() {
return defaultExpression;
}
public Expression getOnUpdateExpression() {
return onUpdateExpression;
}
public boolean isAutoIncrement() {
return autoIncrement;
}
/**
* Set the autoincrement flag and related properties of this column.
*
* @param autoInc the new autoincrement flag
* @param start the sequence start value
* @param increment the sequence increment
*/
public void setAutoIncrement(boolean autoInc, long start, long increment) {
this.autoIncrement = autoInc;
this.start = start;
this.increment = increment;
this.nullable = false;
if (autoInc) {
convertNullToDefault = true;
}
}
public void setConvertNullToDefault(boolean convert) {
this.convertNullToDefault = convert;
}
/**
* Rename the column. This method will only set the column name to the new
* value.
*
* @param newName the new column name
*/
public void rename(String newName) {
this.name = newName;
}
public void setSequence(Sequence sequence) {
this.sequence = sequence;
}
public Sequence getSequence() {
return sequence;
}
/**
* Get the selectivity of the column. Selectivity 100 means values are
* unique, 10 means every distinct value appears 10 times on average.
*
* @return the selectivity
*/
public int getSelectivity() {
return selectivity == 0 ? Constants.SELECTIVITY_DEFAULT : selectivity;
}
/**
* Set the new selectivity of a column.
*
* @param selectivity the new value
*/
public void setSelectivity(int selectivity) {
selectivity = selectivity < 0 ? 0 : (selectivity > 100 ? 100 : selectivity);
this.selectivity = selectivity;
}
/**
* Add a check constraint expression to this column. An existing check
* constraint constraint is added using AND.
*
* @param session the session
* @param expr the (additional) constraint
*/
public void addCheckConstraint(Session session, Expression expr) {
if (expr == null) {
return;
}
resolver = new SingleColumnResolver(this);
synchronized (this) {
String oldName = name;
if (name == null) {
name = "VALUE";
}
expr.mapColumns(resolver, 0);
name = oldName;
}
expr = expr.optimize(session);
resolver.setValue(ValueNull.INSTANCE);
// check if the column is mapped
synchronized (this) {
expr.getValue(session);
}
if (checkConstraint == null) {
checkConstraint = expr;
} else {
checkConstraint = new ConditionAndOr(ConditionAndOr.AND, checkConstraint, expr);
}
checkConstraintSQL = getCheckConstraintSQL(session, name);
}
/**
* Remove the check constraint if there is one.
*/
public void removeCheckConstraint() {
checkConstraint = null;
checkConstraintSQL = null;
}
/**
* Get the check constraint expression for this column if set.
*
* @param session the session
* @param asColumnName the column name to use
* @return the constraint expression
*/
public Expression getCheckConstraint(Session session, String asColumnName) {
if (checkConstraint == null) {
return null;
}
Parser parser = new Parser(session);
String sql;
synchronized (this) {
String oldName = name;
name = asColumnName;
sql = checkConstraint.getSQL();
name = oldName;
}
return parser.parseExpression(sql);
}
String getDefaultSQL() {
return defaultExpression == null ? null : defaultExpression.getSQL();
}
String getOnUpdateSQL() {
return onUpdateExpression == null ? null : onUpdateExpression.getSQL();
}
int getPrecisionAsInt() {
return MathUtils.convertLongToInt(precision);
}
DataType getDataType() {
return DataType.getDataType(type);
}
/**
* Get the check constraint SQL snippet.
*
* @param session the session
* @param asColumnName the column name to use
* @return the SQL snippet
*/
String getCheckConstraintSQL(Session session, String asColumnName) {
Expression constraint = getCheckConstraint(session, asColumnName);
return constraint == null ? "" : constraint.getSQL();
}
public void setComment(String comment) {
this.comment = comment;
}
public String getComment() {
return comment;
}
public void setPrimaryKey(boolean primaryKey) {
this.primaryKey = primaryKey;
}
/**
* Visit the default expression, the check constraint, and the sequence (if
* any).
*
* @param visitor the visitor
* @return true if every visited expression returned true, or if there are
* no expressions
*/
boolean isEverything(ExpressionVisitor visitor) {
if (visitor.getType() == ExpressionVisitor.GET_DEPENDENCIES) {
if (sequence != null) {
visitor.getDependencies().add(sequence);
}
}
if (defaultExpression != null && !defaultExpression.isEverything(visitor)) {
return false;
}
if (checkConstraint != null && !checkConstraint.isEverything(visitor)) {
return false;
}
return true;
}
public boolean isPrimaryKey() {
return primaryKey;
}
@Override
public String toString() {
return name;
}
/**
* Check whether the new column is of the same type and not more restricted
* than this column.
*
* @param newColumn the new (target) column
* @return true if the new column is compatible
*/
public boolean isWideningConversion(Column newColumn) {
if (type != newColumn.type) {
return false;
}
if (precision > newColumn.precision) {
return false;
}
if (scale != newColumn.scale) {
return false;
}
if (nullable && !newColumn.nullable) {
return false;
}
if (convertNullToDefault != newColumn.convertNullToDefault) {
return false;
}
if (primaryKey != newColumn.primaryKey) {
return false;
}
if (autoIncrement || newColumn.autoIncrement) {
return false;
}
if (checkConstraint != null || newColumn.checkConstraint != null) {
return false;
}
if (convertNullToDefault || newColumn.convertNullToDefault) {
return false;
}
if (defaultExpression != null || newColumn.defaultExpression != null) {
return false;
}
if (isComputed || newColumn.isComputed) {
return false;
}
if (onUpdateExpression != null || newColumn.onUpdateExpression != null) {
return false;
}
return true;
}
/**
* Copy the data of the source column into the current column.
*
* @param source the source column
*/
public void copy(Column source) {
checkConstraint = source.checkConstraint;
checkConstraintSQL = source.checkConstraintSQL;
displaySize = source.displaySize;
name = source.name;
precision = source.precision;
enumerators = source.enumerators == null ? null :
Arrays.copyOf(source.enumerators, source.enumerators.length);
scale = source.scale;
// table is not set
// columnId is not set
nullable = source.nullable;
defaultExpression = source.defaultExpression;
onUpdateExpression = source.onUpdateExpression;
originalSQL = source.originalSQL;
// autoIncrement, start, increment is not set
convertNullToDefault = source.convertNullToDefault;
sequence = source.sequence;
comment = source.comment;
computeTableFilter = source.computeTableFilter;
isComputed = source.isComputed;
selectivity = source.selectivity;
primaryKey = source.primaryKey;
visible = source.visible;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/table/ColumnResolver.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.table;
import org.h2.command.dml.Select;
import org.h2.expression.Expression;
import org.h2.expression.ExpressionColumn;
import org.h2.value.Value;
/**
* A column resolver is list of column (for example, a table) that can map a
* column name to an actual column.
*/
public interface ColumnResolver {
/**
* Get the table alias.
*
* @return the table alias
*/
String getTableAlias();
/**
* Get the column list.
*
* @return the column list
*/
Column[] getColumns();
/**
* Get derived column name, or {@code null}.
*
* @param column column
* @return derived column name, or {@code null}
*/
String getDerivedColumnName(Column column);
/**
* Get the list of system columns, if any.
*
* @return the system columns or null
*/
Column[] getSystemColumns();
/**
* Get the row id pseudo column, if there is one.
*
* @return the row id column or null
*/
Column getRowIdColumn();
/**
* Get the schema name.
*
* @return the schema name
*/
String getSchemaName();
/**
* Get the value for the given column.
*
* @param column the column
* @return the value
*/
Value getValue(Column column);
/**
* Get the table filter.
*
* @return the table filter
*/
TableFilter getTableFilter();
/**
* Get the select statement.
*
* @return the select statement
*/
Select getSelect();
/**
* Get the expression that represents this column.
*
* @param expressionColumn the expression column
* @param column the column
* @return the optimized expression
*/
Expression optimize(ExpressionColumn expressionColumn, Column column);
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/table/FunctionTable.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.table;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
import org.h2.api.ErrorCode;
import org.h2.engine.Session;
import org.h2.expression.Expression;
import org.h2.expression.FunctionCall;
import org.h2.expression.TableFunction;
import org.h2.index.FunctionIndex;
import org.h2.index.Index;
import org.h2.index.IndexType;
import org.h2.message.DbException;
import org.h2.result.LocalResult;
import org.h2.result.ResultInterface;
import org.h2.result.Row;
import org.h2.schema.Schema;
import org.h2.value.DataType;
import org.h2.value.Value;
import org.h2.value.ValueNull;
import org.h2.value.ValueResultSet;
/**
* A table backed by a system or user-defined function that returns a result
* set.
*/
public class FunctionTable extends Table {
private final FunctionCall function;
private final long rowCount;
private Expression functionExpr;
private LocalResult cachedResult;
private Value cachedValue;
public FunctionTable(Schema schema, Session session,
Expression functionExpr, FunctionCall function) {
super(schema, 0, function.getName(), false, true);
this.functionExpr = functionExpr;
this.function = function;
if (function instanceof TableFunction) {
rowCount = ((TableFunction) function).getRowCount();
} else {
rowCount = Long.MAX_VALUE;
}
function.optimize(session);
int type = function.getType();
if (type != Value.RESULT_SET) {
throw DbException.get(
ErrorCode.FUNCTION_MUST_RETURN_RESULT_SET_1, function.getName());
}
Expression[] args = function.getArgs();
int numParams = args.length;
Expression[] columnListArgs = new Expression[numParams];
for (int i = 0; i < numParams; i++) {
args[i] = args[i].optimize(session);
columnListArgs[i] = args[i];
}
ValueResultSet template = function.getValueForColumnList(
session, columnListArgs);
if (template == null) {
throw DbException.get(
ErrorCode.FUNCTION_MUST_RETURN_RESULT_SET_1, function.getName());
}
ResultSet rs = template.getResultSet();
try {
ResultSetMetaData meta = rs.getMetaData();
int columnCount = meta.getColumnCount();
Column[] cols = new Column[columnCount];
for (int i = 0; i < columnCount; i++) {
cols[i] = new Column(meta.getColumnName(i + 1),
DataType.getValueTypeFromResultSet(meta, i + 1),
meta.getPrecision(i + 1),
meta.getScale(i + 1), meta.getColumnDisplaySize(i + 1));
}
setColumns(cols);
} catch (SQLException e) {
throw DbException.convert(e);
}
}
@Override
public boolean lock(Session session, boolean exclusive, boolean forceLockEvenInMvcc) {
// nothing to do
return false;
}
@Override
public void close(Session session) {
// nothing to do
}
@Override
public void unlock(Session s) {
// nothing to do
}
@Override
public boolean isLockedExclusively() {
return false;
}
@Override
public Index addIndex(Session session, String indexName, int indexId,
IndexColumn[] cols, IndexType indexType, boolean create,
String indexComment) {
throw DbException.getUnsupportedException("ALIAS");
}
@Override
public void removeRow(Session session, Row row) {
throw DbException.getUnsupportedException("ALIAS");
}
@Override
public void truncate(Session session) {
throw DbException.getUnsupportedException("ALIAS");
}
@Override
public boolean canDrop() {
throw DbException.throwInternalError(toString());
}
@Override
public void addRow(Session session, Row row) {
throw DbException.getUnsupportedException("ALIAS");
}
@Override
public void checkSupportAlter() {
throw DbException.getUnsupportedException("ALIAS");
}
@Override
public TableType getTableType() {
return null;
}
@Override
public Index getScanIndex(Session session) {
return new FunctionIndex(this, IndexColumn.wrap(columns));
}
@Override
public ArrayList<Index> getIndexes() {
return null;
}
@Override
public boolean canGetRowCount() {
return rowCount != Long.MAX_VALUE;
}
@Override
public long getRowCount(Session session) {
return rowCount;
}
@Override
public String getCreateSQL() {
return null;
}
@Override
public String getDropSQL() {
return null;
}
@Override
public void checkRename() {
throw DbException.getUnsupportedException("ALIAS");
}
/**
* Read the result from the function. This method buffers the result in a
* temporary file.
*
* @param session the session
* @return the result
*/
public ResultInterface getResult(Session session) {
ValueResultSet v = getValueResultSet(session);
if (v == null) {
return null;
}
if (cachedResult != null && cachedValue == v) {
cachedResult.reset();
return cachedResult;
}
LocalResult result = LocalResult.read(session, v.getResultSet(), 0);
if (function.isDeterministic()) {
cachedResult = result;
cachedValue = v;
}
return result;
}
/**
* Read the result set from the function. This method doesn't cache.
*
* @param session the session
* @return the result set
*/
public ResultSet getResultSet(Session session) {
ValueResultSet v = getValueResultSet(session);
return v == null ? null : v.getResultSet();
}
private ValueResultSet getValueResultSet(Session session) {
functionExpr = functionExpr.optimize(session);
Value v = functionExpr.getValue(session);
if (v == ValueNull.INSTANCE) {
return null;
}
return (ValueResultSet) v;
}
public boolean isBufferResultSetToLocalTemp() {
return function.isBufferResultSetToLocalTemp();
}
@Override
public long getMaxDataModificationId() {
// TODO optimization: table-as-a-function currently doesn't know the
// last modified date
return Long.MAX_VALUE;
}
@Override
public Index getUniqueIndex() {
return null;
}
@Override
public String getSQL() {
return function.getSQL();
}
@Override
public long getRowCountApproximation() {
return rowCount;
}
@Override
public long getDiskSpaceUsed() {
return 0;
}
@Override
public boolean isDeterministic() {
return function.isDeterministic();
}
@Override
public boolean canReference() {
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/table/IndexColumn.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.table;
import org.h2.result.SortOrder;
/**
* This represents a column item of an index. This is required because some
* indexes support descending sorted columns.
*/
public class IndexColumn {
/**
* The column name.
*/
public String columnName;
/**
* The column, or null if not set.
*/
public Column column;
/**
* The sort type. Ascending (the default) and descending are supported;
* nulls can be sorted first or last.
*/
public int sortType = SortOrder.ASCENDING;
/**
* Get the SQL snippet for this index column.
*
* @return the SQL snippet
*/
public String getSQL() {
StringBuilder buff = new StringBuilder(column.getSQL());
if ((sortType & SortOrder.DESCENDING) != 0) {
buff.append(" DESC");
}
if ((sortType & SortOrder.NULLS_FIRST) != 0) {
buff.append(" NULLS FIRST");
} else if ((sortType & SortOrder.NULLS_LAST) != 0) {
buff.append(" NULLS LAST");
}
return buff.toString();
}
/**
* Create an array of index columns from a list of columns. The default sort
* type is used.
*
* @param columns the column list
* @return the index column array
*/
public static IndexColumn[] wrap(Column[] columns) {
IndexColumn[] list = new IndexColumn[columns.length];
for (int i = 0; i < list.length; i++) {
list[i] = new IndexColumn();
list[i].column = columns[i];
}
return list;
}
/**
* Map the columns using the column names and the specified table.
*
* @param indexColumns the column list with column names set
* @param table the table from where to map the column names to columns
*/
public static void mapColumns(IndexColumn[] indexColumns, Table table) {
for (IndexColumn col : indexColumns) {
col.column = table.getColumn(col.columnName);
}
}
@Override
public String toString() {
return "IndexColumn " + getSQL();
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/table/IndexHints.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.table;
import org.h2.index.Index;
import java.util.LinkedHashSet;
import java.util.Set;
/**
* Contains the hints for which index to use for a specific table. Currently
* allows a list of "use indexes" to be specified.
* <p>
* Use the factory method IndexHints.createUseIndexHints(listOfIndexes) to limit
* the query planner to only use specific indexes when determining which index
* to use for a table
**/
public final class IndexHints {
private final LinkedHashSet<String> allowedIndexes;
private IndexHints(LinkedHashSet<String> allowedIndexes) {
this.allowedIndexes = allowedIndexes;
}
/**
* Create an index hint object.
*
* @param allowedIndexes the set of allowed indexes
* @return the hint object
*/
public static IndexHints createUseIndexHints(LinkedHashSet<String> allowedIndexes) {
return new IndexHints(allowedIndexes);
}
public Set<String> getAllowedIndexes() {
return allowedIndexes;
}
@Override
public String toString() {
return "IndexHints{allowedIndexes=" + allowedIndexes + '}';
}
/**
* Allow an index to be used.
*
* @param index the index
* @return whether it was already allowed
*/
public boolean allowIndex(Index index) {
return allowedIndexes.contains(index.getName());
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/table/JoinBatch.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.table;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.Future;
import org.h2.command.dml.Query;
import org.h2.command.dml.Select;
import org.h2.command.dml.SelectUnion;
import org.h2.index.BaseIndex;
import org.h2.index.Cursor;
import org.h2.index.IndexCursor;
import org.h2.index.IndexLookupBatch;
import org.h2.index.ViewCursor;
import org.h2.index.ViewIndex;
import org.h2.message.DbException;
import org.h2.result.ResultInterface;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.util.DoneFuture;
import org.h2.util.LazyFuture;
import org.h2.util.New;
import org.h2.value.Value;
import org.h2.value.ValueLong;
/**
* Support for asynchronous batched index lookups on joins.
*
* @see BaseIndex#createLookupBatch(org.h2.table.TableFilter[], int)
* @see IndexLookupBatch
* @author Sergi Vladykin
*/
public final class JoinBatch {
/**
* An empty cursor.
*/
static final Cursor EMPTY_CURSOR = new Cursor() {
@Override
public boolean previous() {
return false;
}
@Override
public boolean next() {
return false;
}
@Override
public SearchRow getSearchRow() {
return null;
}
@Override
public Row get() {
return null;
}
@Override
public String toString() {
return "EMPTY_CURSOR";
}
};
/**
* An empty future cursor.
*/
static final Future<Cursor> EMPTY_FUTURE_CURSOR = new DoneFuture<>(EMPTY_CURSOR);
/**
* The top cursor.
*/
Future<Cursor> viewTopFutureCursor;
/**
* The top filter.
*/
JoinFilter top;
/**
* The filters.
*/
final JoinFilter[] filters;
/**
* Whether this is a batched subquery.
*/
boolean batchedSubQuery;
private boolean started;
private JoinRow current;
private boolean found;
/**
* This filter joined after this batched join and can be used normally.
*/
private final TableFilter additionalFilter;
/**
* @param filtersCount number of filters participating in this batched join
* @param additionalFilter table filter after this batched join.
*/
public JoinBatch(int filtersCount, TableFilter additionalFilter) {
if (filtersCount > 32) {
// This is because we store state in a 64 bit field, 2 bits per
// joined table.
throw DbException.getUnsupportedException(
"Too many tables in join (at most 32 supported).");
}
filters = new JoinFilter[filtersCount];
this.additionalFilter = additionalFilter;
}
/**
* Get the lookup batch for the given table filter.
*
* @param joinFilterId joined table filter id
* @return lookup batch
*/
public IndexLookupBatch getLookupBatch(int joinFilterId) {
return filters[joinFilterId].lookupBatch;
}
/**
* Reset state of this batch.
*
* @param beforeQuery {@code true} if reset was called before the query run,
* {@code false} if after
*/
public void reset(boolean beforeQuery) {
current = null;
started = false;
found = false;
for (JoinFilter jf : filters) {
jf.reset(beforeQuery);
}
if (beforeQuery && additionalFilter != null) {
additionalFilter.reset();
}
}
/**
* Register the table filter and lookup batch.
*
* @param filter table filter
* @param lookupBatch lookup batch
*/
public void register(TableFilter filter, IndexLookupBatch lookupBatch) {
assert filter != null;
top = new JoinFilter(lookupBatch, filter, top);
filters[top.id] = top;
}
/**
* Get the value for the given column.
*
* @param filterId table filter id
* @param column the column
* @return column value for current row
*/
public Value getValue(int filterId, Column column) {
if (current == null) {
return null;
}
Object x = current.row(filterId);
assert x != null;
Row row = current.isRow(filterId) ? (Row) x : ((Cursor) x).get();
int columnId = column.getColumnId();
if (columnId == -1) {
return ValueLong.get(row.getKey());
}
Value value = row.getValue(column.getColumnId());
if (value == null) {
throw DbException.throwInternalError("value is null: " + column + " " + row);
}
return value;
}
private void start() {
// initialize current row
current = new JoinRow(new Object[filters.length]);
// initialize top cursor
Cursor cursor;
if (batchedSubQuery) {
assert viewTopFutureCursor != null;
cursor = get(viewTopFutureCursor);
} else {
// setup usual index cursor
TableFilter f = top.filter;
IndexCursor indexCursor = f.getIndexCursor();
indexCursor.find(f.getSession(), f.getIndexConditions());
cursor = indexCursor;
}
current.updateRow(top.id, cursor, JoinRow.S_NULL, JoinRow.S_CURSOR);
// we need fake first row because batchedNext always will move to the
// next row
JoinRow fake = new JoinRow(null);
fake.next = current;
current = fake;
}
/**
* Get next row from the join batch.
*
* @return true if there is a next row
*/
public boolean next() {
if (!started) {
start();
started = true;
}
if (additionalFilter == null) {
if (batchedNext()) {
assert current.isComplete();
return true;
}
return false;
}
while (true) {
if (!found) {
if (!batchedNext()) {
return false;
}
assert current.isComplete();
found = true;
additionalFilter.reset();
}
// we call furtherFilter in usual way outside of this batch because
// it is more effective
if (additionalFilter.next()) {
return true;
}
found = false;
}
}
private static Cursor get(Future<Cursor> f) {
Cursor c;
try {
c = f.get();
} catch (Exception e) {
throw DbException.convert(e);
}
return c == null ? EMPTY_CURSOR : c;
}
private boolean batchedNext() {
if (current == null) {
// after last
return false;
}
// go next
current = current.next;
if (current == null) {
return false;
}
current.prev = null;
final int lastJfId = filters.length - 1;
int jfId = lastJfId;
while (current.row(jfId) == null) {
// lookup for the first non fetched filter for the current row
jfId--;
}
while (true) {
fetchCurrent(jfId);
if (!current.isDropped()) {
// if current was not dropped then it must be fetched
// successfully
if (jfId == lastJfId) {
// the whole join row is ready to be returned
return true;
}
JoinFilter join = filters[jfId + 1];
if (join.isBatchFull()) {
// get future cursors for join and go right to fetch them
current = join.find(current);
}
if (current.row(join.id) != null) {
// either find called or outer join with null-row
jfId = join.id;
continue;
}
}
// we have to go down and fetch next cursors for jfId if it is
// possible
if (current.next == null) {
// either dropped or null-row
if (current.isDropped()) {
current = current.prev;
if (current == null) {
return false;
}
}
assert !current.isDropped();
assert jfId != lastJfId;
jfId = 0;
while (current.row(jfId) != null) {
jfId++;
}
// force find on half filled batch (there must be either
// searchRows or Cursor.EMPTY set for null-rows)
current = filters[jfId].find(current);
} else {
// here we don't care if the current was dropped
current = current.next;
assert !current.isRow(jfId);
while (current.row(jfId) == null) {
assert jfId != top.id;
// need to go left and fetch more search rows
jfId--;
assert !current.isRow(jfId);
}
}
}
}
@SuppressWarnings("unchecked")
private void fetchCurrent(final int jfId) {
assert current.prev == null || current.prev.isRow(jfId) : "prev must be already fetched";
assert jfId == 0 || current.isRow(jfId - 1) : "left must be already fetched";
assert !current.isRow(jfId) : "double fetching";
Object x = current.row(jfId);
assert x != null : "x null";
// in case of outer join we don't have any future around empty cursor
boolean newCursor = x == EMPTY_CURSOR;
if (newCursor) {
if (jfId == 0) {
// the top cursor is new and empty, then the whole select will
// not produce any rows
current.drop();
return;
}
} else if (current.isFuture(jfId)) {
// get cursor from a future
x = get((Future<Cursor>) x);
current.updateRow(jfId, x, JoinRow.S_FUTURE, JoinRow.S_CURSOR);
newCursor = true;
}
final JoinFilter jf = filters[jfId];
Cursor c = (Cursor) x;
assert c != null;
JoinFilter join = jf.join;
while (true) {
if (c == null || !c.next()) {
if (newCursor && jf.isOuterJoin()) {
// replace cursor with null-row
current.updateRow(jfId, jf.getNullRow(), JoinRow.S_CURSOR, JoinRow.S_ROW);
c = null;
newCursor = false;
} else {
// cursor is done, drop it
current.drop();
return;
}
}
if (!jf.isOk(c == null)) {
// try another row from the cursor
continue;
}
boolean joinEmpty = false;
if (join != null && !join.collectSearchRows()) {
if (join.isOuterJoin()) {
joinEmpty = true;
} else {
// join will fail, try next row in the cursor
continue;
}
}
if (c != null) {
current = current.copyBehind(jfId);
// update jf, set current row from cursor
current.updateRow(jfId, c.get(), JoinRow.S_CURSOR, JoinRow.S_ROW);
}
if (joinEmpty) {
// update jf.join, set an empty cursor
current.updateRow(join.id, EMPTY_CURSOR, JoinRow.S_NULL, JoinRow.S_CURSOR);
}
return;
}
}
/**
* @return Adapter to allow joining to this batch in sub-queries and views.
*/
private IndexLookupBatch viewIndexLookupBatch(ViewIndex viewIndex) {
return new ViewIndexLookupBatch(viewIndex);
}
/**
* Create index lookup batch for a view index.
*
* @param viewIndex view index
* @return index lookup batch or {@code null} if batching is not supported
* for this query
*/
public static IndexLookupBatch createViewIndexLookupBatch(ViewIndex viewIndex) {
Query query = viewIndex.getQuery();
if (query.isUnion()) {
ViewIndexLookupBatchUnion unionBatch = new ViewIndexLookupBatchUnion(viewIndex);
return unionBatch.initialize() ? unionBatch : null;
}
JoinBatch jb = ((Select) query).getJoinBatch();
if (jb == null || jb.getLookupBatch(0) == null) {
// our sub-query is not batched or is top batched sub-query
return null;
}
assert !jb.batchedSubQuery;
jb.batchedSubQuery = true;
return jb.viewIndexLookupBatch(viewIndex);
}
/**
* Create fake index lookup batch for non-batched table filter.
*
* @param filter the table filter
* @return fake index lookup batch
*/
public static IndexLookupBatch createFakeIndexLookupBatch(TableFilter filter) {
return new FakeLookupBatch(filter);
}
@Override
public String toString() {
return "JoinBatch->\n" + "prev->" + (current == null ? null : current.prev) +
"\n" + "curr->" + current +
"\n" + "next->" + (current == null ? null : current.next);
}
/**
* Table filter participating in batched join.
*/
private static final class JoinFilter {
final IndexLookupBatch lookupBatch;
final int id;
final JoinFilter join;
final TableFilter filter;
JoinFilter(IndexLookupBatch lookupBatch, TableFilter filter, JoinFilter join) {
this.filter = filter;
this.id = filter.getJoinFilterId();
this.join = join;
this.lookupBatch = lookupBatch;
assert lookupBatch != null || id == 0;
}
void reset(boolean beforeQuery) {
if (lookupBatch != null) {
lookupBatch.reset(beforeQuery);
}
}
Row getNullRow() {
return filter.getTable().getNullRow();
}
boolean isOuterJoin() {
return filter.isJoinOuter();
}
boolean isBatchFull() {
return lookupBatch.isBatchFull();
}
boolean isOk(boolean ignoreJoinCondition) {
boolean filterOk = filter.isOk(filter.getFilterCondition());
boolean joinOk = filter.isOk(filter.getJoinCondition());
return filterOk && (ignoreJoinCondition || joinOk);
}
boolean collectSearchRows() {
assert !isBatchFull();
IndexCursor c = filter.getIndexCursor();
c.prepare(filter.getSession(), filter.getIndexConditions());
if (c.isAlwaysFalse()) {
return false;
}
return lookupBatch.addSearchRows(c.getStart(), c.getEnd());
}
List<Future<Cursor>> find() {
return lookupBatch.find();
}
JoinRow find(JoinRow current) {
assert current != null;
// lookupBatch is allowed to be empty when we have some null-rows
// and forced find call
List<Future<Cursor>> result = lookupBatch.find();
// go backwards and assign futures
for (int i = result.size(); i > 0;) {
assert current.isRow(id - 1);
if (current.row(id) == EMPTY_CURSOR) {
// outer join support - skip row with existing empty cursor
current = current.prev;
continue;
}
assert current.row(id) == null;
Future<Cursor> future = result.get(--i);
if (future == null) {
current.updateRow(id, EMPTY_CURSOR, JoinRow.S_NULL, JoinRow.S_CURSOR);
} else {
current.updateRow(id, future, JoinRow.S_NULL, JoinRow.S_FUTURE);
}
if (current.prev == null || i == 0) {
break;
}
current = current.prev;
}
// handle empty cursors (because of outer joins) at the beginning
while (current.prev != null && current.prev.row(id) == EMPTY_CURSOR) {
current = current.prev;
}
assert current.prev == null || current.prev.isRow(id);
assert current.row(id) != null;
assert !current.isRow(id);
// the last updated row
return current;
}
@Override
public String toString() {
return "JoinFilter->" + filter;
}
}
/**
* Linked row in batched join.
*/
private static final class JoinRow {
private static final long S_NULL = 0;
private static final long S_FUTURE = 1;
private static final long S_CURSOR = 2;
private static final long S_ROW = 3;
private static final long S_MASK = 3;
JoinRow prev;
JoinRow next;
/**
* May contain one of the following:
* <ul>
* <li>{@code null}: means that we need to get future cursor
* for this row</li>
* <li>{@link Future}: means that we need to get a new {@link Cursor}
* from the {@link Future}</li>
* <li>{@link Cursor}: means that we need to fetch {@link Row}s from the
* {@link Cursor}</li>
* <li>{@link Row}: the {@link Row} is already fetched and is ready to
* be used</li>
* </ul>
*/
private Object[] row;
private long state;
/**
* @param row Row.
*/
JoinRow(Object[] row) {
this.row = row;
}
/**
* @param joinFilterId Join filter id.
* @return Row state.
*/
private long getState(int joinFilterId) {
return (state >>> (joinFilterId << 1)) & S_MASK;
}
/**
* Allows to do a state transition in the following order:
* 0. Slot contains {@code null} ({@link #S_NULL}).
* 1. Slot contains {@link Future} ({@link #S_FUTURE}).
* 2. Slot contains {@link Cursor} ({@link #S_CURSOR}).
* 3. Slot contains {@link Row} ({@link #S_ROW}).
*
* @param joinFilterId {@link JoinRow} filter id.
* @param i Increment by this number of moves.
*/
private void incrementState(int joinFilterId, long i) {
assert i > 0 : i;
state += i << (joinFilterId << 1);
}
void updateRow(int joinFilterId, Object x, long oldState, long newState) {
assert getState(joinFilterId) == oldState : "old state: " + getState(joinFilterId);
row[joinFilterId] = x;
incrementState(joinFilterId, newState - oldState);
assert getState(joinFilterId) == newState : "new state: " + getState(joinFilterId);
}
Object row(int joinFilterId) {
return row[joinFilterId];
}
boolean isRow(int joinFilterId) {
return getState(joinFilterId) == S_ROW;
}
boolean isFuture(int joinFilterId) {
return getState(joinFilterId) == S_FUTURE;
}
private boolean isCursor(int joinFilterId) {
return getState(joinFilterId) == S_CURSOR;
}
boolean isComplete() {
return isRow(row.length - 1);
}
boolean isDropped() {
return row == null;
}
void drop() {
if (prev != null) {
prev.next = next;
}
if (next != null) {
next.prev = prev;
}
row = null;
}
/**
* Copy this JoinRow behind itself in linked list of all in progress
* rows.
*
* @param jfId The last fetched filter id.
* @return The copy.
*/
JoinRow copyBehind(int jfId) {
assert isCursor(jfId);
assert jfId + 1 == row.length || row[jfId + 1] == null;
Object[] r = new Object[row.length];
if (jfId != 0) {
System.arraycopy(row, 0, r, 0, jfId);
}
JoinRow copy = new JoinRow(r);
copy.state = state;
if (prev != null) {
copy.prev = prev;
prev.next = copy;
}
prev = copy;
copy.next = this;
return copy;
}
@Override
public String toString() {
return "JoinRow->" + Arrays.toString(row);
}
}
/**
* Fake Lookup batch for indexes which do not support batching but have to
* participate in batched joins.
*/
private static final class FakeLookupBatch implements IndexLookupBatch {
private final TableFilter filter;
private SearchRow first;
private SearchRow last;
private boolean full;
private final List<Future<Cursor>> result = new SingletonList<>();
FakeLookupBatch(TableFilter filter) {
this.filter = filter;
}
@Override
public String getPlanSQL() {
return "fake";
}
@Override
public void reset(boolean beforeQuery) {
full = false;
first = last = null;
result.set(0, null);
}
@Override
public boolean addSearchRows(SearchRow first, SearchRow last) {
assert !full;
this.first = first;
this.last = last;
full = true;
return true;
}
@Override
public boolean isBatchFull() {
return full;
}
@Override
public List<Future<Cursor>> find() {
if (!full) {
return Collections.emptyList();
}
Cursor c = filter.getIndex().find(filter, first, last);
result.set(0, new DoneFuture<>(c));
full = false;
first = last = null;
return result;
}
}
/**
* Simple singleton list.
* @param <E> Element type.
*/
static final class SingletonList<E> extends AbstractList<E> {
private E element;
@Override
public E get(int index) {
assert index == 0;
return element;
}
@Override
public E set(int index, E element) {
assert index == 0;
this.element = element;
return null;
}
@Override
public int size() {
return 1;
}
}
/**
* Base class for SELECT and SELECT UNION view index lookup batches.
* @param <R> Runner type.
*/
private abstract static class ViewIndexLookupBatchBase<R extends QueryRunnerBase>
implements IndexLookupBatch {
protected final ViewIndex viewIndex;
private final ArrayList<Future<Cursor>> result = New.arrayList();
private int resultSize;
private boolean findCalled;
protected ViewIndexLookupBatchBase(ViewIndex viewIndex) {
this.viewIndex = viewIndex;
}
@Override
public String getPlanSQL() {
return "view";
}
protected abstract boolean collectSearchRows(R r);
protected abstract R newQueryRunner();
protected abstract void startQueryRunners(int resultSize);
protected final boolean resetAfterFind() {
if (!findCalled) {
return false;
}
findCalled = false;
// method find was called, we need to reset futures to initial state
// for reuse
for (int i = 0; i < resultSize; i++) {
queryRunner(i).reset();
}
resultSize = 0;
return true;
}
@SuppressWarnings("unchecked")
protected R queryRunner(int i) {
return (R) result.get(i);
}
@Override
public final boolean addSearchRows(SearchRow first, SearchRow last) {
resetAfterFind();
viewIndex.setupQueryParameters(viewIndex.getSession(), first, last, null);
R r;
if (resultSize < result.size()) {
// get reused runner
r = queryRunner(resultSize);
} else {
// create new runner
result.add(r = newQueryRunner());
}
r.first = first;
r.last = last;
if (!collectSearchRows(r)) {
r.clear();
return false;
}
resultSize++;
return true;
}
@Override
public void reset(boolean beforeQuery) {
if (resultSize != 0 && !resetAfterFind()) {
// find was not called, need to just clear runners
for (int i = 0; i < resultSize; i++) {
queryRunner(i).clear();
}
resultSize = 0;
}
}
@Override
public final List<Future<Cursor>> find() {
if (resultSize == 0) {
return Collections.emptyList();
}
findCalled = true;
startQueryRunners(resultSize);
return resultSize == result.size() ? result : result.subList(0, resultSize);
}
}
/**
* Lazy query runner base for subqueries and views.
*/
private abstract static class QueryRunnerBase extends LazyFuture<Cursor> {
protected final ViewIndex viewIndex;
protected SearchRow first;
protected SearchRow last;
private boolean isLazyResult;
QueryRunnerBase(ViewIndex viewIndex) {
this.viewIndex = viewIndex;
}
protected void clear() {
first = last = null;
}
@Override
public final boolean reset() {
if (isLazyResult) {
resetViewTopFutureCursorAfterQuery();
}
if (super.reset()) {
return true;
}
// this query runner was never executed, need to clear manually
clear();
return false;
}
protected final ViewCursor newCursor(ResultInterface localResult) {
isLazyResult = localResult.isLazy();
ViewCursor cursor = new ViewCursor(viewIndex, localResult, first, last);
clear();
return cursor;
}
protected abstract void resetViewTopFutureCursorAfterQuery();
}
/**
* View index lookup batch for a simple SELECT.
*/
private final class ViewIndexLookupBatch extends ViewIndexLookupBatchBase<QueryRunner> {
ViewIndexLookupBatch(ViewIndex viewIndex) {
super(viewIndex);
}
@Override
protected QueryRunner newQueryRunner() {
return new QueryRunner(viewIndex);
}
@Override
protected boolean collectSearchRows(QueryRunner r) {
return top.collectSearchRows();
}
@Override
public boolean isBatchFull() {
return top.isBatchFull();
}
@Override
protected void startQueryRunners(int resultSize) {
// we do batched find only for top table filter and then lazily run
// the ViewIndex query for each received top future cursor
List<Future<Cursor>> topFutureCursors = top.find();
if (topFutureCursors.size() != resultSize) {
throw DbException
.throwInternalError("Unexpected result size: " +
topFutureCursors.size() + ", expected :" +
resultSize);
}
for (int i = 0; i < resultSize; i++) {
QueryRunner r = queryRunner(i);
r.topFutureCursor = topFutureCursors.get(i);
}
}
}
/**
* Query runner for SELECT.
*/
private final class QueryRunner extends QueryRunnerBase {
Future<Cursor> topFutureCursor;
QueryRunner(ViewIndex viewIndex) {
super(viewIndex);
}
@Override
protected void clear() {
super.clear();
topFutureCursor = null;
}
@Override
protected Cursor run() throws Exception {
if (topFutureCursor == null) {
// if the top cursor is empty then the whole query will produce
// empty result
return EMPTY_CURSOR;
}
viewIndex.setupQueryParameters(viewIndex.getSession(), first, last, null);
JoinBatch.this.viewTopFutureCursor = topFutureCursor;
ResultInterface localResult;
boolean lazy = false;
try {
localResult = viewIndex.getQuery().query(0);
lazy = localResult.isLazy();
} finally {
if (!lazy) {
resetViewTopFutureCursorAfterQuery();
}
}
return newCursor(localResult);
}
@Override
protected void resetViewTopFutureCursorAfterQuery() {
JoinBatch.this.viewTopFutureCursor = null;
}
}
/**
* View index lookup batch for UNION queries.
*/
private static final class ViewIndexLookupBatchUnion
extends ViewIndexLookupBatchBase<QueryRunnerUnion> {
ArrayList<JoinFilter> filters;
ArrayList<JoinBatch> joinBatches;
private boolean onlyBatchedQueries = true;
protected ViewIndexLookupBatchUnion(ViewIndex viewIndex) {
super(viewIndex);
}
boolean initialize() {
return collectJoinBatches(viewIndex.getQuery()) && joinBatches != null;
}
private boolean collectJoinBatches(Query query) {
if (query.isUnion()) {
SelectUnion union = (SelectUnion) query;
return collectJoinBatches(union.getLeft()) &&
collectJoinBatches(union.getRight());
}
Select select = (Select) query;
JoinBatch jb = select.getJoinBatch();
if (jb == null) {
onlyBatchedQueries = false;
} else {
if (jb.getLookupBatch(0) == null) {
// we are top sub-query
return false;
}
assert !jb.batchedSubQuery;
jb.batchedSubQuery = true;
if (joinBatches == null) {
joinBatches = New.arrayList();
filters = New.arrayList();
}
filters.add(jb.filters[0]);
joinBatches.add(jb);
}
return true;
}
@Override
public boolean isBatchFull() {
// if at least one is full
for (JoinFilter filter : filters) {
if (filter.isBatchFull()) {
return true;
}
}
return false;
}
@Override
protected boolean collectSearchRows(QueryRunnerUnion r) {
boolean collected = false;
for (int i = 0; i < filters.size(); i++) {
if (filters.get(i).collectSearchRows()) {
collected = true;
} else {
r.topFutureCursors[i] = EMPTY_FUTURE_CURSOR;
}
}
return collected || !onlyBatchedQueries;
}
@Override
protected QueryRunnerUnion newQueryRunner() {
return new QueryRunnerUnion(this);
}
@Override
protected void startQueryRunners(int resultSize) {
for (int f = 0; f < filters.size(); f++) {
List<Future<Cursor>> topFutureCursors = filters.get(f).find();
int r = 0, c = 0;
for (; r < resultSize; r++) {
Future<Cursor>[] cs = queryRunner(r).topFutureCursors;
if (cs[f] == null) {
cs[f] = topFutureCursors.get(c++);
}
}
assert r == resultSize;
assert c == topFutureCursors.size();
}
}
}
/**
* Query runner for UNION.
*/
private static class QueryRunnerUnion extends QueryRunnerBase {
Future<Cursor>[] topFutureCursors;
private ViewIndexLookupBatchUnion batchUnion;
@SuppressWarnings("unchecked")
QueryRunnerUnion(ViewIndexLookupBatchUnion batchUnion) {
super(batchUnion.viewIndex);
this.batchUnion = batchUnion;
topFutureCursors = new Future[batchUnion.filters.size()];
}
@Override
protected void clear() {
super.clear();
for (int i = 0; i < topFutureCursors.length; i++) {
topFutureCursors[i] = null;
}
}
@Override
protected Cursor run() throws Exception {
viewIndex.setupQueryParameters(viewIndex.getSession(), first, last, null);
ArrayList<JoinBatch> joinBatches = batchUnion.joinBatches;
for (int i = 0, size = joinBatches.size(); i < size; i++) {
assert topFutureCursors[i] != null;
joinBatches.get(i).viewTopFutureCursor = topFutureCursors[i];
}
ResultInterface localResult;
boolean lazy = false;
try {
localResult = viewIndex.getQuery().query(0);
lazy = localResult.isLazy();
} finally {
if (!lazy) {
resetViewTopFutureCursorAfterQuery();
}
}
return newCursor(localResult);
}
@Override
protected void resetViewTopFutureCursorAfterQuery() {
ArrayList<JoinBatch> joinBatches = batchUnion.joinBatches;
if (joinBatches == null) {
return;
}
for (JoinBatch joinBatch : joinBatches) {
joinBatch.viewTopFutureCursor = null;
}
}
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/table/LinkSchema.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.table;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import org.h2.message.DbException;
import org.h2.tools.SimpleResultSet;
import org.h2.util.JdbcUtils;
import org.h2.util.StringUtils;
/**
* A utility class to create table links for a whole schema.
*/
public class LinkSchema {
private LinkSchema() {
// utility class
}
/**
* Link all tables of a schema to the database.
*
* @param conn the connection to the database where the links are to be
* created
* @param targetSchema the schema name where the objects should be created
* @param driver the driver class name of the linked database
* @param url the database URL of the linked database
* @param user the user name
* @param password the password
* @param sourceSchema the schema where the existing tables are
* @return a result set with the created tables
*/
public static ResultSet linkSchema(Connection conn, String targetSchema,
String driver, String url, String user, String password,
String sourceSchema) {
Connection c2 = null;
Statement stat = null;
ResultSet rs = null;
SimpleResultSet result = new SimpleResultSet();
result.setAutoClose(false);
result.addColumn("TABLE_NAME", Types.VARCHAR, Integer.MAX_VALUE, 0);
try {
c2 = JdbcUtils.getConnection(driver, url, user, password);
stat = conn.createStatement();
stat.execute("CREATE SCHEMA IF NOT EXISTS " +
StringUtils.quoteIdentifier(targetSchema));
//Workaround for PostgreSQL to avoid index names
if (url.startsWith("jdbc:postgresql:")) {
rs = c2.getMetaData().getTables(null, sourceSchema, null,
new String[] { "TABLE", "LINKED TABLE", "VIEW", "EXTERNAL" });
} else {
rs = c2.getMetaData().getTables(null, sourceSchema, null, null);
}
while (rs.next()) {
String table = rs.getString("TABLE_NAME");
StringBuilder buff = new StringBuilder();
buff.append("DROP TABLE IF EXISTS ").
append(StringUtils.quoteIdentifier(targetSchema)).
append('.').
append(StringUtils.quoteIdentifier(table));
stat.execute(buff.toString());
buff = new StringBuilder();
buff.append("CREATE LINKED TABLE ").
append(StringUtils.quoteIdentifier(targetSchema)).
append('.').
append(StringUtils.quoteIdentifier(table)).
append('(').
append(StringUtils.quoteStringSQL(driver)).
append(", ").
append(StringUtils.quoteStringSQL(url)).
append(", ").
append(StringUtils.quoteStringSQL(user)).
append(", ").
append(StringUtils.quoteStringSQL(password)).
append(", ").
append(StringUtils.quoteStringSQL(sourceSchema)).
append(", ").
append(StringUtils.quoteStringSQL(table)).
append(')');
stat.execute(buff.toString());
result.addRow(table);
}
} catch (SQLException e) {
throw DbException.convert(e);
} finally {
JdbcUtils.closeSilently(rs);
JdbcUtils.closeSilently(c2);
JdbcUtils.closeSilently(stat);
}
return result;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/table/MetaTable.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.table;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.Timestamp;
import java.text.Collator;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import org.h2.command.Command;
import org.h2.constraint.Constraint;
import org.h2.constraint.ConstraintActionType;
import org.h2.constraint.ConstraintCheck;
import org.h2.constraint.ConstraintReferential;
import org.h2.constraint.ConstraintUnique;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.DbObject;
import org.h2.engine.FunctionAlias;
import org.h2.engine.FunctionAlias.JavaMethod;
import org.h2.engine.QueryStatisticsData;
import org.h2.engine.Right;
import org.h2.engine.Role;
import org.h2.engine.Session;
import org.h2.engine.Setting;
import org.h2.engine.User;
import org.h2.engine.UserAggregate;
import org.h2.engine.UserDataType;
import org.h2.expression.ValueExpression;
import org.h2.index.Index;
import org.h2.index.IndexType;
import org.h2.index.MetaIndex;
import org.h2.index.MultiVersionIndex;
import org.h2.jdbc.JdbcSQLException;
import org.h2.message.DbException;
import org.h2.mvstore.FileStore;
import org.h2.mvstore.db.MVTableEngine.Store;
import org.h2.result.Row;
import org.h2.result.SearchRow;
import org.h2.result.SortOrder;
import org.h2.schema.Constant;
import org.h2.schema.Schema;
import org.h2.schema.SchemaObject;
import org.h2.schema.Sequence;
import org.h2.schema.TriggerObject;
import org.h2.store.InDoubtTransaction;
import org.h2.store.PageStore;
import org.h2.tools.Csv;
import org.h2.util.MathUtils;
import org.h2.util.New;
import org.h2.util.StatementBuilder;
import org.h2.util.StringUtils;
import org.h2.util.Utils;
import org.h2.value.CompareMode;
import org.h2.value.DataType;
import org.h2.value.Value;
import org.h2.value.ValueNull;
import org.h2.value.ValueString;
import org.h2.value.ValueStringIgnoreCase;
/**
* This class is responsible to build the database meta data pseudo tables.
*/
public class MetaTable extends Table {
/**
* The approximate number of rows of a meta table.
*/
public static final long ROW_COUNT_APPROXIMATION = 1000;
private static final String CHARACTER_SET_NAME = "Unicode";
private static final int TABLES = 0;
private static final int COLUMNS = 1;
private static final int INDEXES = 2;
private static final int TABLE_TYPES = 3;
private static final int TYPE_INFO = 4;
private static final int CATALOGS = 5;
private static final int SETTINGS = 6;
private static final int HELP = 7;
private static final int SEQUENCES = 8;
private static final int USERS = 9;
private static final int ROLES = 10;
private static final int RIGHTS = 11;
private static final int FUNCTION_ALIASES = 12;
private static final int SCHEMATA = 13;
private static final int TABLE_PRIVILEGES = 14;
private static final int COLUMN_PRIVILEGES = 15;
private static final int COLLATIONS = 16;
private static final int VIEWS = 17;
private static final int IN_DOUBT = 18;
private static final int CROSS_REFERENCES = 19;
private static final int CONSTRAINTS = 20;
private static final int FUNCTION_COLUMNS = 21;
private static final int CONSTANTS = 22;
private static final int DOMAINS = 23;
private static final int TRIGGERS = 24;
private static final int SESSIONS = 25;
private static final int LOCKS = 26;
private static final int SESSION_STATE = 27;
private static final int QUERY_STATISTICS = 28;
private static final int SYNONYMS = 29;
private static final int TABLE_CONSTRAINTS = 30;
private static final int KEY_COLUMN_USAGE = 31;
private static final int REFERENTIAL_CONSTRAINTS = 32;
private static final int META_TABLE_TYPE_COUNT = REFERENTIAL_CONSTRAINTS + 1;
private final int type;
private final int indexColumn;
private final MetaIndex metaIndex;
/**
* Create a new metadata table.
*
* @param schema the schema
* @param id the object id
* @param type the meta table type
*/
public MetaTable(Schema schema, int id, int type) {
// tableName will be set later
super(schema, id, null, true, true);
this.type = type;
Column[] cols;
String indexColumnName = null;
switch (type) {
case TABLES:
setObjectName("TABLES");
cols = createColumns(
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"TABLE_TYPE",
// extensions
"STORAGE_TYPE",
"SQL",
"REMARKS",
"LAST_MODIFICATION BIGINT",
"ID INT",
"TYPE_NAME",
"TABLE_CLASS",
"ROW_COUNT_ESTIMATE BIGINT"
);
indexColumnName = "TABLE_NAME";
break;
case COLUMNS:
setObjectName("COLUMNS");
cols = createColumns(
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"COLUMN_NAME",
"ORDINAL_POSITION INT",
"COLUMN_DEFAULT",
"IS_NULLABLE",
"DATA_TYPE INT",
"CHARACTER_MAXIMUM_LENGTH INT",
"CHARACTER_OCTET_LENGTH INT",
"NUMERIC_PRECISION INT",
"NUMERIC_PRECISION_RADIX INT",
"NUMERIC_SCALE INT",
"CHARACTER_SET_NAME",
"COLLATION_NAME",
// extensions
"TYPE_NAME",
"NULLABLE INT",
"IS_COMPUTED BIT",
"SELECTIVITY INT",
"CHECK_CONSTRAINT",
"SEQUENCE_NAME",
"REMARKS",
"SOURCE_DATA_TYPE SMALLINT",
"COLUMN_TYPE",
"COLUMN_ON_UPDATE"
);
indexColumnName = "TABLE_NAME";
break;
case INDEXES:
setObjectName("INDEXES");
cols = createColumns(
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"NON_UNIQUE BIT",
"INDEX_NAME",
"ORDINAL_POSITION SMALLINT",
"COLUMN_NAME",
"CARDINALITY INT",
"PRIMARY_KEY BIT",
"INDEX_TYPE_NAME",
"IS_GENERATED BIT",
"INDEX_TYPE SMALLINT",
"ASC_OR_DESC",
"PAGES INT",
"FILTER_CONDITION",
"REMARKS",
"SQL",
"ID INT",
"SORT_TYPE INT",
"CONSTRAINT_NAME",
"INDEX_CLASS",
"AFFINITY BIT"
);
indexColumnName = "TABLE_NAME";
break;
case TABLE_TYPES:
setObjectName("TABLE_TYPES");
cols = createColumns("TYPE");
break;
case TYPE_INFO:
setObjectName("TYPE_INFO");
cols = createColumns(
"TYPE_NAME",
"DATA_TYPE INT",
"PRECISION INT",
"PREFIX",
"SUFFIX",
"PARAMS",
"AUTO_INCREMENT BIT",
"MINIMUM_SCALE SMALLINT",
"MAXIMUM_SCALE SMALLINT",
"RADIX INT",
"POS INT",
"CASE_SENSITIVE BIT",
"NULLABLE SMALLINT",
"SEARCHABLE SMALLINT"
);
break;
case CATALOGS:
setObjectName("CATALOGS");
cols = createColumns("CATALOG_NAME");
break;
case SETTINGS:
setObjectName("SETTINGS");
cols = createColumns("NAME", "VALUE");
break;
case HELP:
setObjectName("HELP");
cols = createColumns(
"ID INT",
"SECTION",
"TOPIC",
"SYNTAX",
"TEXT"
);
break;
case SEQUENCES:
setObjectName("SEQUENCES");
cols = createColumns(
"SEQUENCE_CATALOG",
"SEQUENCE_SCHEMA",
"SEQUENCE_NAME",
"CURRENT_VALUE BIGINT",
"INCREMENT BIGINT",
"IS_GENERATED BIT",
"REMARKS",
"CACHE BIGINT",
"MIN_VALUE BIGINT",
"MAX_VALUE BIGINT",
"IS_CYCLE BIT",
"ID INT"
);
break;
case USERS:
setObjectName("USERS");
cols = createColumns(
"NAME",
"ADMIN",
"REMARKS",
"ID INT"
);
break;
case ROLES:
setObjectName("ROLES");
cols = createColumns(
"NAME",
"REMARKS",
"ID INT"
);
break;
case RIGHTS:
setObjectName("RIGHTS");
cols = createColumns(
"GRANTEE",
"GRANTEETYPE",
"GRANTEDROLE",
"RIGHTS",
"TABLE_SCHEMA",
"TABLE_NAME",
"ID INT"
);
indexColumnName = "TABLE_NAME";
break;
case FUNCTION_ALIASES:
setObjectName("FUNCTION_ALIASES");
cols = createColumns(
"ALIAS_CATALOG",
"ALIAS_SCHEMA",
"ALIAS_NAME",
"JAVA_CLASS",
"JAVA_METHOD",
"DATA_TYPE INT",
"TYPE_NAME",
"COLUMN_COUNT INT",
"RETURNS_RESULT SMALLINT",
"REMARKS",
"ID INT",
"SOURCE"
);
break;
case FUNCTION_COLUMNS:
setObjectName("FUNCTION_COLUMNS");
cols = createColumns(
"ALIAS_CATALOG",
"ALIAS_SCHEMA",
"ALIAS_NAME",
"JAVA_CLASS",
"JAVA_METHOD",
"COLUMN_COUNT INT",
"POS INT",
"COLUMN_NAME",
"DATA_TYPE INT",
"TYPE_NAME",
"PRECISION INT",
"SCALE SMALLINT",
"RADIX SMALLINT",
"NULLABLE SMALLINT",
"COLUMN_TYPE SMALLINT",
"REMARKS",
"COLUMN_DEFAULT"
);
break;
case SCHEMATA:
setObjectName("SCHEMATA");
cols = createColumns(
"CATALOG_NAME",
"SCHEMA_NAME",
"SCHEMA_OWNER",
"DEFAULT_CHARACTER_SET_NAME",
"DEFAULT_COLLATION_NAME",
"IS_DEFAULT BIT",
"REMARKS",
"ID INT"
);
break;
case TABLE_PRIVILEGES:
setObjectName("TABLE_PRIVILEGES");
cols = createColumns(
"GRANTOR",
"GRANTEE",
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"PRIVILEGE_TYPE",
"IS_GRANTABLE"
);
indexColumnName = "TABLE_NAME";
break;
case COLUMN_PRIVILEGES:
setObjectName("COLUMN_PRIVILEGES");
cols = createColumns(
"GRANTOR",
"GRANTEE",
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"COLUMN_NAME",
"PRIVILEGE_TYPE",
"IS_GRANTABLE"
);
indexColumnName = "TABLE_NAME";
break;
case COLLATIONS:
setObjectName("COLLATIONS");
cols = createColumns(
"NAME",
"KEY"
);
break;
case VIEWS:
setObjectName("VIEWS");
cols = createColumns(
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"VIEW_DEFINITION",
"CHECK_OPTION",
"IS_UPDATABLE",
"STATUS",
"REMARKS",
"ID INT"
);
indexColumnName = "TABLE_NAME";
break;
case IN_DOUBT:
setObjectName("IN_DOUBT");
cols = createColumns(
"TRANSACTION",
"STATE"
);
break;
case CROSS_REFERENCES:
setObjectName("CROSS_REFERENCES");
cols = createColumns(
"PKTABLE_CATALOG",
"PKTABLE_SCHEMA",
"PKTABLE_NAME",
"PKCOLUMN_NAME",
"FKTABLE_CATALOG",
"FKTABLE_SCHEMA",
"FKTABLE_NAME",
"FKCOLUMN_NAME",
"ORDINAL_POSITION SMALLINT",
"UPDATE_RULE SMALLINT",
"DELETE_RULE SMALLINT",
"FK_NAME",
"PK_NAME",
"DEFERRABILITY SMALLINT"
);
indexColumnName = "PKTABLE_NAME";
break;
case CONSTRAINTS:
setObjectName("CONSTRAINTS");
cols = createColumns(
"CONSTRAINT_CATALOG",
"CONSTRAINT_SCHEMA",
"CONSTRAINT_NAME",
"CONSTRAINT_TYPE",
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"UNIQUE_INDEX_NAME",
"CHECK_EXPRESSION",
"COLUMN_LIST",
"REMARKS",
"SQL",
"ID INT"
);
indexColumnName = "TABLE_NAME";
break;
case CONSTANTS:
setObjectName("CONSTANTS");
cols = createColumns(
"CONSTANT_CATALOG",
"CONSTANT_SCHEMA",
"CONSTANT_NAME",
"DATA_TYPE INT",
"REMARKS",
"SQL",
"ID INT"
);
break;
case DOMAINS:
setObjectName("DOMAINS");
cols = createColumns(
"DOMAIN_CATALOG",
"DOMAIN_SCHEMA",
"DOMAIN_NAME",
"COLUMN_DEFAULT",
"IS_NULLABLE",
"DATA_TYPE INT",
"PRECISION INT",
"SCALE INT",
"TYPE_NAME",
"SELECTIVITY INT",
"CHECK_CONSTRAINT",
"REMARKS",
"SQL",
"ID INT"
);
break;
case TRIGGERS:
setObjectName("TRIGGERS");
cols = createColumns(
"TRIGGER_CATALOG",
"TRIGGER_SCHEMA",
"TRIGGER_NAME",
"TRIGGER_TYPE",
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"BEFORE BIT",
"JAVA_CLASS",
"QUEUE_SIZE INT",
"NO_WAIT BIT",
"REMARKS",
"SQL",
"ID INT"
);
break;
case SESSIONS: {
setObjectName("SESSIONS");
cols = createColumns(
"ID INT",
"USER_NAME",
"SESSION_START",
"STATEMENT",
"STATEMENT_START",
"CONTAINS_UNCOMMITTED"
);
break;
}
case LOCKS: {
setObjectName("LOCKS");
cols = createColumns(
"TABLE_SCHEMA",
"TABLE_NAME",
"SESSION_ID INT",
"LOCK_TYPE"
);
break;
}
case SESSION_STATE: {
setObjectName("SESSION_STATE");
cols = createColumns(
"KEY",
"SQL"
);
break;
}
case QUERY_STATISTICS: {
setObjectName("QUERY_STATISTICS");
cols = createColumns(
"SQL_STATEMENT",
"EXECUTION_COUNT INT",
"MIN_EXECUTION_TIME DOUBLE",
"MAX_EXECUTION_TIME DOUBLE",
"CUMULATIVE_EXECUTION_TIME DOUBLE",
"AVERAGE_EXECUTION_TIME DOUBLE",
"STD_DEV_EXECUTION_TIME DOUBLE",
"MIN_ROW_COUNT INT",
"MAX_ROW_COUNT INT",
"CUMULATIVE_ROW_COUNT LONG",
"AVERAGE_ROW_COUNT DOUBLE",
"STD_DEV_ROW_COUNT DOUBLE"
);
break;
}
case SYNONYMS: {
setObjectName("SYNONYMS");
cols = createColumns(
"SYNONYM_CATALOG",
"SYNONYM_SCHEMA",
"SYNONYM_NAME",
"SYNONYM_FOR",
"SYNONYM_FOR_SCHEMA",
"TYPE_NAME",
"STATUS",
"REMARKS",
"ID INT"
);
indexColumnName = "SYNONYM_NAME";
break;
}
case TABLE_CONSTRAINTS: {
setObjectName("TABLE_CONSTRAINTS");
cols = createColumns(
"CONSTRAINT_CATALOG",
"CONSTRAINT_SCHEMA",
"CONSTRAINT_NAME",
"CONSTRAINT_TYPE",
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"IS_DEFERRABLE",
"INITIALLY_DEFERRED"
);
indexColumnName = "TABLE_NAME";
break;
}
case KEY_COLUMN_USAGE: {
setObjectName("KEY_COLUMN_USAGE");
cols = createColumns(
"CONSTRAINT_CATALOG",
"CONSTRAINT_SCHEMA",
"CONSTRAINT_NAME",
"TABLE_CATALOG",
"TABLE_SCHEMA",
"TABLE_NAME",
"COLUMN_NAME",
"ORDINAL_POSITION",
"POSITION_IN_UNIQUE_CONSTRAINT"
);
indexColumnName = "TABLE_NAME";
break;
}
case REFERENTIAL_CONSTRAINTS: {
setObjectName("REFERENTIAL_CONSTRAINTS");
cols = createColumns(
"CONSTRAINT_CATALOG",
"CONSTRAINT_SCHEMA",
"CONSTRAINT_NAME",
"UNIQUE_CONSTRAINT_CATALOG",
"UNIQUE_CONSTRAINT_SCHEMA",
"UNIQUE_CONSTRAINT_NAME",
"MATCH_OPTION",
"UPDATE_RULE",
"DELETE_RULE"
);
break;
}
default:
throw DbException.throwInternalError("type="+type);
}
setColumns(cols);
if (indexColumnName == null) {
indexColumn = -1;
metaIndex = null;
} else {
indexColumn = getColumn(indexColumnName).getColumnId();
IndexColumn[] indexCols = IndexColumn.wrap(
new Column[] { cols[indexColumn] });
metaIndex = new MetaIndex(this, indexCols, false);
}
}
private Column[] createColumns(String... names) {
Column[] cols = new Column[names.length];
for (int i = 0; i < names.length; i++) {
String nameType = names[i];
int idx = nameType.indexOf(' ');
int dataType;
String name;
if (idx < 0) {
dataType = database.getMode().lowerCaseIdentifiers ?
Value.STRING_IGNORECASE : Value.STRING;
name = nameType;
} else {
dataType = DataType.getTypeByName(nameType.substring(idx + 1), database.getMode()).type;
name = nameType.substring(0, idx);
}
cols[i] = new Column(name, dataType);
}
return cols;
}
@Override
public String getDropSQL() {
return null;
}
@Override
public String getCreateSQL() {
return null;
}
@Override
public Index addIndex(Session session, String indexName, int indexId,
IndexColumn[] cols, IndexType indexType, boolean create,
String indexComment) {
throw DbException.getUnsupportedException("META");
}
@Override
public boolean lock(Session session, boolean exclusive, boolean forceLockEvenInMvcc) {
// nothing to do
return false;
}
@Override
public boolean isLockedExclusively() {
return false;
}
private String identifier(String s) {
if (database.getMode().lowerCaseIdentifiers) {
s = s == null ? null : StringUtils.toLowerEnglish(s);
}
return s;
}
private ArrayList<Table> getAllTables(Session session) {
ArrayList<Table> tables = database.getAllTablesAndViews(true);
ArrayList<Table> tempTables = session.getLocalTempTables();
tables.addAll(tempTables);
return tables;
}
private ArrayList<Table> getTablesByName(Session session, String tableName) {
if (database.getMode().lowerCaseIdentifiers) {
tableName = StringUtils.toUpperEnglish(tableName);
}
ArrayList<Table> tables = database.getTableOrViewByName(tableName);
for (Table temp : session.getLocalTempTables()) {
if (temp.getName().equals(tableName)) {
tables.add(temp);
}
}
return tables;
}
private boolean checkIndex(Session session, String value, Value indexFrom,
Value indexTo) {
if (value == null || (indexFrom == null && indexTo == null)) {
return true;
}
Database db = session.getDatabase();
Value v;
if (database.getMode().lowerCaseIdentifiers) {
v = ValueStringIgnoreCase.get(value);
} else {
v = ValueString.get(value);
}
if (indexFrom != null && db.compare(v, indexFrom) < 0) {
return false;
}
if (indexTo != null && db.compare(v, indexTo) > 0) {
return false;
}
return true;
}
private static String replaceNullWithEmpty(String s) {
return s == null ? "" : s;
}
private boolean hideTable(Table table, Session session) {
return table.isHidden() && session != database.getSystemSession();
}
/**
* Generate the data for the given metadata table using the given first and
* last row filters.
*
* @param session the session
* @param first the first row to return
* @param last the last row to return
* @return the generated rows
*/
public ArrayList<Row> generateRows(Session session, SearchRow first,
SearchRow last) {
Value indexFrom = null, indexTo = null;
if (indexColumn >= 0) {
if (first != null) {
indexFrom = first.getValue(indexColumn);
}
if (last != null) {
indexTo = last.getValue(indexColumn);
}
}
ArrayList<Row> rows = New.arrayList();
String catalog = identifier(database.getShortName());
boolean admin = session.getUser().isAdmin();
switch (type) {
case TABLES: {
for (Table table : getAllTables(session)) {
String tableName = identifier(table.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
if (hideTable(table, session)) {
continue;
}
String storageType;
if (table.isTemporary()) {
if (table.isGlobalTemporary()) {
storageType = "GLOBAL TEMPORARY";
} else {
storageType = "LOCAL TEMPORARY";
}
} else {
storageType = table.isPersistIndexes() ?
"CACHED" : "MEMORY";
}
String sql = table.getCreateSQL();
if (!admin) {
if (sql != null && sql.contains(JdbcSQLException.HIDE_SQL)) {
// hide the password of linked tables
sql = "-";
}
}
add(rows,
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
tableName,
// TABLE_TYPE
table.getTableType().toString(),
// STORAGE_TYPE
storageType,
// SQL
sql,
// REMARKS
replaceNullWithEmpty(table.getComment()),
// LAST_MODIFICATION
"" + table.getMaxDataModificationId(),
// ID
"" + table.getId(),
// TYPE_NAME
null,
// TABLE_CLASS
table.getClass().getName(),
// ROW_COUNT_ESTIMATE
"" + table.getRowCountApproximation()
);
}
break;
}
case COLUMNS: {
// reduce the number of tables to scan - makes some metadata queries
// 10x faster
final ArrayList<Table> tablesToList;
if (indexFrom != null && indexFrom.equals(indexTo)) {
String tableName = identifier(indexFrom.getString());
tablesToList = getTablesByName(session, tableName);
} else {
tablesToList = getAllTables(session);
}
for (Table table : tablesToList) {
String tableName = identifier(table.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
if (hideTable(table, session)) {
continue;
}
Column[] cols = table.getColumns();
String collation = database.getCompareMode().getName();
for (int j = 0; j < cols.length; j++) {
Column c = cols[j];
Sequence sequence = c.getSequence();
add(rows,
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
tableName,
// COLUMN_NAME
identifier(c.getName()),
// ORDINAL_POSITION
String.valueOf(j + 1),
// COLUMN_DEFAULT
c.getDefaultSQL(),
// IS_NULLABLE
c.isNullable() ? "YES" : "NO",
// DATA_TYPE
"" + DataType.convertTypeToSQLType(c.getType()),
// CHARACTER_MAXIMUM_LENGTH
"" + c.getPrecisionAsInt(),
// CHARACTER_OCTET_LENGTH
"" + c.getPrecisionAsInt(),
// NUMERIC_PRECISION
"" + c.getPrecisionAsInt(),
// NUMERIC_PRECISION_RADIX
"10",
// NUMERIC_SCALE
"" + c.getScale(),
// CHARACTER_SET_NAME
CHARACTER_SET_NAME,
// COLLATION_NAME
collation,
// TYPE_NAME
identifier(DataType.getDataType(c.getType()).name),
// NULLABLE
"" + (c.isNullable() ?
DatabaseMetaData.columnNullable :
DatabaseMetaData.columnNoNulls) ,
// IS_COMPUTED
"" + (c.getComputed() ? "TRUE" : "FALSE"),
// SELECTIVITY
"" + (c.getSelectivity()),
// CHECK_CONSTRAINT
c.getCheckConstraintSQL(session, c.getName()),
// SEQUENCE_NAME
sequence == null ? null : sequence.getName(),
// REMARKS
replaceNullWithEmpty(c.getComment()),
// SOURCE_DATA_TYPE
null,
// COLUMN_TYPE
c.getCreateSQLWithoutName(),
// COLUMN_ON_UPDATE
c.getOnUpdateSQL()
);
}
}
break;
}
case INDEXES: {
// reduce the number of tables to scan - makes some metadata queries
// 10x faster
final ArrayList<Table> tablesToList;
if (indexFrom != null && indexFrom.equals(indexTo)) {
String tableName = identifier(indexFrom.getString());
tablesToList = getTablesByName(session, tableName);
} else {
tablesToList = getAllTables(session);
}
for (Table table : tablesToList) {
String tableName = identifier(table.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
if (hideTable(table, session)) {
continue;
}
ArrayList<Index> indexes = table.getIndexes();
ArrayList<Constraint> constraints = table.getConstraints();
for (int j = 0; indexes != null && j < indexes.size(); j++) {
Index index = indexes.get(j);
if (index.getCreateSQL() == null) {
continue;
}
String constraintName = null;
for (int k = 0; constraints != null && k < constraints.size(); k++) {
Constraint constraint = constraints.get(k);
if (constraint.usesIndex(index)) {
if (index.getIndexType().isPrimaryKey()) {
if (constraint.getConstraintType() == Constraint.Type.PRIMARY_KEY) {
constraintName = constraint.getName();
}
} else {
constraintName = constraint.getName();
}
}
}
IndexColumn[] cols = index.getIndexColumns();
String indexClass;
if (index instanceof MultiVersionIndex) {
indexClass = ((MultiVersionIndex) index).
getBaseIndex().getClass().getName();
} else {
indexClass = index.getClass().getName();
}
for (int k = 0; k < cols.length; k++) {
IndexColumn idxCol = cols[k];
Column column = idxCol.column;
add(rows,
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
tableName,
// NON_UNIQUE
index.getIndexType().isUnique() ?
"FALSE" : "TRUE",
// INDEX_NAME
identifier(index.getName()),
// ORDINAL_POSITION
"" + (k+1),
// COLUMN_NAME
identifier(column.getName()),
// CARDINALITY
"0",
// PRIMARY_KEY
index.getIndexType().isPrimaryKey() ?
"TRUE" : "FALSE",
// INDEX_TYPE_NAME
index.getIndexType().getSQL(),
// IS_GENERATED
index.getIndexType().getBelongsToConstraint() ?
"TRUE" : "FALSE",
// INDEX_TYPE
"" + DatabaseMetaData.tableIndexOther,
// ASC_OR_DESC
(idxCol.sortType & SortOrder.DESCENDING) != 0 ?
"D" : "A",
// PAGES
"0",
// FILTER_CONDITION
"",
// REMARKS
replaceNullWithEmpty(index.getComment()),
// SQL
index.getCreateSQL(),
// ID
"" + index.getId(),
// SORT_TYPE
"" + idxCol.sortType,
// CONSTRAINT_NAME
constraintName,
// INDEX_CLASS
indexClass,
// AFFINITY
index.getIndexType().isAffinity() ?
"TRUE" : "FALSE"
);
}
}
}
break;
}
case TABLE_TYPES: {
add(rows, TableType.TABLE.toString());
add(rows, TableType.TABLE_LINK.toString());
add(rows, TableType.SYSTEM_TABLE.toString());
add(rows, TableType.VIEW.toString());
add(rows, TableType.EXTERNAL_TABLE_ENGINE.toString());
break;
}
case CATALOGS: {
add(rows, catalog);
break;
}
case SETTINGS: {
for (Setting s : database.getAllSettings()) {
String value = s.getStringValue();
if (value == null) {
value = "" + s.getIntValue();
}
add(rows,
identifier(s.getName()),
value
);
}
add(rows, "info.BUILD_ID", "" + Constants.BUILD_ID);
add(rows, "info.VERSION_MAJOR", "" + Constants.VERSION_MAJOR);
add(rows, "info.VERSION_MINOR", "" + Constants.VERSION_MINOR);
add(rows, "info.VERSION", "" + Constants.getFullVersion());
if (admin) {
String[] settings = {
"java.runtime.version", "java.vm.name",
"java.vendor", "os.name", "os.arch", "os.version",
"sun.os.patch.level", "file.separator",
"path.separator", "line.separator", "user.country",
"user.language", "user.variant", "file.encoding" };
for (String s : settings) {
add(rows, "property." + s, Utils.getProperty(s, ""));
}
}
add(rows, "EXCLUSIVE", database.getExclusiveSession() == null ?
"FALSE" : "TRUE");
add(rows, "MODE", database.getMode().getName());
add(rows, "MULTI_THREADED", database.isMultiThreaded() ? "1" : "0");
add(rows, "MVCC", database.isMultiVersion() ? "TRUE" : "FALSE");
add(rows, "QUERY_TIMEOUT", "" + session.getQueryTimeout());
add(rows, "RETENTION_TIME", "" + database.getRetentionTime());
add(rows, "LOG", "" + database.getLogMode());
// database settings
ArrayList<String> settingNames = New.arrayList();
HashMap<String, String> s = database.getSettings().getSettings();
settingNames.addAll(s.keySet());
Collections.sort(settingNames);
for (String k : settingNames) {
add(rows, k, s.get(k));
}
if (database.isPersistent()) {
PageStore store = database.getPageStore();
if (store != null) {
add(rows, "info.FILE_WRITE_TOTAL",
"" + store.getWriteCountTotal());
add(rows, "info.FILE_WRITE",
"" + store.getWriteCount());
add(rows, "info.FILE_READ",
"" + store.getReadCount());
add(rows, "info.PAGE_COUNT",
"" + store.getPageCount());
add(rows, "info.PAGE_SIZE",
"" + store.getPageSize());
add(rows, "info.CACHE_MAX_SIZE",
"" + store.getCache().getMaxMemory());
add(rows, "info.CACHE_SIZE",
"" + store.getCache().getMemory());
}
Store mvStore = database.getMvStore();
if (mvStore != null) {
FileStore fs = mvStore.getStore().getFileStore();
add(rows, "info.FILE_WRITE", "" +
fs.getWriteCount());
add(rows, "info.FILE_READ", "" +
fs.getReadCount());
long size;
try {
size = fs.getFile().size();
} catch (IOException e) {
throw DbException.convertIOException(e, "Can not get size");
}
int pageSize = 4 * 1024;
long pageCount = size / pageSize;
add(rows, "info.PAGE_COUNT", "" +
pageCount);
add(rows, "info.PAGE_SIZE", "" +
pageSize);
add(rows, "info.CACHE_MAX_SIZE", "" +
mvStore.getStore().getCacheSize());
add(rows, "info.CACHE_SIZE", "" +
mvStore.getStore().getCacheSizeUsed());
}
}
break;
}
case TYPE_INFO: {
for (DataType t : DataType.getTypes()) {
if (t.hidden || t.sqlType == Value.NULL) {
continue;
}
add(rows,
// TYPE_NAME
t.name,
// DATA_TYPE
String.valueOf(t.sqlType),
// PRECISION
String.valueOf(MathUtils.convertLongToInt(t.maxPrecision)),
// PREFIX
t.prefix,
// SUFFIX
t.suffix,
// PARAMS
t.params,
// AUTO_INCREMENT
String.valueOf(t.autoIncrement),
// MINIMUM_SCALE
String.valueOf(t.minScale),
// MAXIMUM_SCALE
String.valueOf(t.maxScale),
// RADIX
t.decimal ? "10" : null,
// POS
String.valueOf(t.sqlTypePos),
// CASE_SENSITIVE
String.valueOf(t.caseSensitive),
// NULLABLE
"" + DatabaseMetaData.typeNullable,
// SEARCHABLE
"" + DatabaseMetaData.typeSearchable
);
}
break;
}
case HELP: {
String resource = "/org/h2/res/help.csv";
try {
byte[] data = Utils.getResource(resource);
Reader reader = new InputStreamReader(
new ByteArrayInputStream(data));
Csv csv = new Csv();
csv.setLineCommentCharacter('#');
ResultSet rs = csv.read(reader, null);
for (int i = 0; rs.next(); i++) {
add(rows,
// ID
String.valueOf(i),
// SECTION
rs.getString(1).trim(),
// TOPIC
rs.getString(2).trim(),
// SYNTAX
rs.getString(3).trim(),
// TEXT
rs.getString(4).trim()
);
}
} catch (Exception e) {
throw DbException.convert(e);
}
break;
}
case SEQUENCES: {
for (SchemaObject obj : database.getAllSchemaObjects(
DbObject.SEQUENCE)) {
Sequence s = (Sequence) obj;
add(rows,
// SEQUENCE_CATALOG
catalog,
// SEQUENCE_SCHEMA
identifier(s.getSchema().getName()),
// SEQUENCE_NAME
identifier(s.getName()),
// CURRENT_VALUE
String.valueOf(s.getCurrentValue()),
// INCREMENT
String.valueOf(s.getIncrement()),
// IS_GENERATED
s.getBelongsToTable() ? "TRUE" : "FALSE",
// REMARKS
replaceNullWithEmpty(s.getComment()),
// CACHE
String.valueOf(s.getCacheSize()),
// MIN_VALUE
String.valueOf(s.getMinValue()),
// MAX_VALUE
String.valueOf(s.getMaxValue()),
// IS_CYCLE
s.getCycle() ? "TRUE" : "FALSE",
// ID
"" + s.getId()
);
}
break;
}
case USERS: {
for (User u : database.getAllUsers()) {
if (admin || session.getUser() == u) {
add(rows,
// NAME
identifier(u.getName()),
// ADMIN
String.valueOf(u.isAdmin()),
// REMARKS
replaceNullWithEmpty(u.getComment()),
// ID
"" + u.getId()
);
}
}
break;
}
case ROLES: {
for (Role r : database.getAllRoles()) {
if (admin || session.getUser().isRoleGranted(r)) {
add(rows,
// NAME
identifier(r.getName()),
// REMARKS
replaceNullWithEmpty(r.getComment()),
// ID
"" + r.getId()
);
}
}
break;
}
case RIGHTS: {
if (admin) {
for (Right r : database.getAllRights()) {
Role role = r.getGrantedRole();
DbObject grantee = r.getGrantee();
String rightType = grantee.getType() == DbObject.USER ?
"USER" : "ROLE";
if (role == null) {
DbObject object = r.getGrantedObject();
Schema schema = null;
Table table = null;
if (object != null) {
if (object instanceof Schema) {
schema = (Schema) object;
} else if (object instanceof Table) {
table = (Table) object;
schema = table.getSchema();
}
}
String tableName = (table != null) ? identifier(table.getName()) : "";
String schemaName = (schema != null) ? identifier(schema.getName()) : "";
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
add(rows,
// GRANTEE
identifier(grantee.getName()),
// GRANTEETYPE
rightType,
// GRANTEDROLE
"",
// RIGHTS
r.getRights(),
// TABLE_SCHEMA
schemaName,
// TABLE_NAME
tableName,
// ID
"" + r.getId()
);
} else {
add(rows,
// GRANTEE
identifier(grantee.getName()),
// GRANTEETYPE
rightType,
// GRANTEDROLE
identifier(role.getName()),
// RIGHTS
"",
// TABLE_SCHEMA
"",
// TABLE_NAME
"",
// ID
"" + r.getId()
);
}
}
}
break;
}
case FUNCTION_ALIASES: {
for (SchemaObject aliasAsSchemaObject :
database.getAllSchemaObjects(DbObject.FUNCTION_ALIAS)) {
FunctionAlias alias = (FunctionAlias) aliasAsSchemaObject;
JavaMethod[] methods;
try {
methods = alias.getJavaMethods();
} catch (DbException e) {
methods = new JavaMethod[0];
}
for (FunctionAlias.JavaMethod method : methods) {
int returnsResult = method.getDataType() == Value.NULL ?
DatabaseMetaData.procedureNoResult :
DatabaseMetaData.procedureReturnsResult;
add(rows,
// ALIAS_CATALOG
catalog,
// ALIAS_SCHEMA
alias.getSchema().getName(),
// ALIAS_NAME
identifier(alias.getName()),
// JAVA_CLASS
alias.getJavaClassName(),
// JAVA_METHOD
alias.getJavaMethodName(),
// DATA_TYPE
"" + DataType.convertTypeToSQLType(method.getDataType()),
// TYPE_NAME
DataType.getDataType(method.getDataType()).name,
// COLUMN_COUNT INT
"" + method.getParameterCount(),
// RETURNS_RESULT SMALLINT
"" + returnsResult,
// REMARKS
replaceNullWithEmpty(alias.getComment()),
// ID
"" + alias.getId(),
// SOURCE
alias.getSource()
// when adding more columns, see also below
);
}
}
for (UserAggregate agg : database.getAllAggregates()) {
int returnsResult = DatabaseMetaData.procedureReturnsResult;
add(rows,
// ALIAS_CATALOG
catalog,
// ALIAS_SCHEMA
Constants.SCHEMA_MAIN,
// ALIAS_NAME
identifier(agg.getName()),
// JAVA_CLASS
agg.getJavaClassName(),
// JAVA_METHOD
"",
// DATA_TYPE
"" + DataType.convertTypeToSQLType(Value.NULL),
// TYPE_NAME
DataType.getDataType(Value.NULL).name,
// COLUMN_COUNT INT
"1",
// RETURNS_RESULT SMALLINT
"" + returnsResult,
// REMARKS
replaceNullWithEmpty(agg.getComment()),
// ID
"" + agg.getId(),
// SOURCE
""
// when adding more columns, see also below
);
}
break;
}
case FUNCTION_COLUMNS: {
for (SchemaObject aliasAsSchemaObject :
database.getAllSchemaObjects(DbObject.FUNCTION_ALIAS)) {
FunctionAlias alias = (FunctionAlias) aliasAsSchemaObject;
JavaMethod[] methods;
try {
methods = alias.getJavaMethods();
} catch (DbException e) {
methods = new JavaMethod[0];
}
for (FunctionAlias.JavaMethod method : methods) {
// Add return column index 0
if (method.getDataType() != Value.NULL) {
DataType dt = DataType.getDataType(method.getDataType());
add(rows,
// ALIAS_CATALOG
catalog,
// ALIAS_SCHEMA
alias.getSchema().getName(),
// ALIAS_NAME
identifier(alias.getName()),
// JAVA_CLASS
alias.getJavaClassName(),
// JAVA_METHOD
alias.getJavaMethodName(),
// COLUMN_COUNT
"" + method.getParameterCount(),
// POS INT
"0",
// COLUMN_NAME
"P0",
// DATA_TYPE
"" + DataType.convertTypeToSQLType(method.getDataType()),
// TYPE_NAME
dt.name,
// PRECISION INT
"" + MathUtils.convertLongToInt(dt.defaultPrecision),
// SCALE
"" + dt.defaultScale,
// RADIX
"10",
// NULLABLE SMALLINT
"" + DatabaseMetaData.columnNullableUnknown,
// COLUMN_TYPE
"" + DatabaseMetaData.procedureColumnReturn,
// REMARKS
"",
// COLUMN_DEFAULT
null
);
}
Class<?>[] columnList = method.getColumnClasses();
for (int k = 0; k < columnList.length; k++) {
if (method.hasConnectionParam() && k == 0) {
continue;
}
Class<?> clazz = columnList[k];
int dataType = DataType.getTypeFromClass(clazz);
DataType dt = DataType.getDataType(dataType);
int nullable = clazz.isPrimitive() ? DatabaseMetaData.columnNoNulls
: DatabaseMetaData.columnNullable;
add(rows,
// ALIAS_CATALOG
catalog,
// ALIAS_SCHEMA
alias.getSchema().getName(),
// ALIAS_NAME
identifier(alias.getName()),
// JAVA_CLASS
alias.getJavaClassName(),
// JAVA_METHOD
alias.getJavaMethodName(),
// COLUMN_COUNT
"" + method.getParameterCount(),
// POS INT
"" + (k + (method.hasConnectionParam() ? 0 : 1)),
// COLUMN_NAME
"P" + (k + 1),
// DATA_TYPE
"" + DataType.convertTypeToSQLType(dt.type),
// TYPE_NAME
dt.name,
// PRECISION INT
"" + MathUtils.convertLongToInt(dt.defaultPrecision),
// SCALE
"" + dt.defaultScale,
// RADIX
"10",
// NULLABLE SMALLINT
"" + nullable,
// COLUMN_TYPE
"" + DatabaseMetaData.procedureColumnIn,
// REMARKS
"",
// COLUMN_DEFAULT
null
);
}
}
}
break;
}
case SCHEMATA: {
String collation = database.getCompareMode().getName();
for (Schema schema : database.getAllSchemas()) {
add(rows,
// CATALOG_NAME
catalog,
// SCHEMA_NAME
identifier(schema.getName()),
// SCHEMA_OWNER
identifier(schema.getOwner().getName()),
// DEFAULT_CHARACTER_SET_NAME
CHARACTER_SET_NAME,
// DEFAULT_COLLATION_NAME
collation,
// IS_DEFAULT
Constants.SCHEMA_MAIN.equals(
schema.getName()) ? "TRUE" : "FALSE",
// REMARKS
replaceNullWithEmpty(schema.getComment()),
// ID
"" + schema.getId()
);
}
break;
}
case TABLE_PRIVILEGES: {
for (Right r : database.getAllRights()) {
DbObject object = r.getGrantedObject();
if (!(object instanceof Table)) {
continue;
}
Table table = (Table) object;
if (hideTable(table, session)) {
continue;
}
String tableName = identifier(table.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
addPrivileges(rows, r.getGrantee(), catalog, table, null,
r.getRightMask());
}
break;
}
case COLUMN_PRIVILEGES: {
for (Right r : database.getAllRights()) {
DbObject object = r.getGrantedObject();
if (!(object instanceof Table)) {
continue;
}
Table table = (Table) object;
if (hideTable(table, session)) {
continue;
}
String tableName = identifier(table.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
DbObject grantee = r.getGrantee();
int mask = r.getRightMask();
for (Column column : table.getColumns()) {
addPrivileges(rows, grantee, catalog, table,
column.getName(), mask);
}
}
break;
}
case COLLATIONS: {
for (Locale l : Collator.getAvailableLocales()) {
add(rows,
// NAME
CompareMode.getName(l),
// KEY
l.toString()
);
}
break;
}
case VIEWS: {
for (Table table : getAllTables(session)) {
if (table.getTableType() != TableType.VIEW) {
continue;
}
String tableName = identifier(table.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
TableView view = (TableView) table;
add(rows,
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
tableName,
// VIEW_DEFINITION
table.getCreateSQL(),
// CHECK_OPTION
"NONE",
// IS_UPDATABLE
"NO",
// STATUS
view.isInvalid() ? "INVALID" : "VALID",
// REMARKS
replaceNullWithEmpty(view.getComment()),
// ID
"" + view.getId()
);
}
break;
}
case IN_DOUBT: {
ArrayList<InDoubtTransaction> prepared = database.getInDoubtTransactions();
if (prepared != null && admin) {
for (InDoubtTransaction prep : prepared) {
add(rows,
// TRANSACTION
prep.getTransactionName(),
// STATE
prep.getState()
);
}
}
break;
}
case CROSS_REFERENCES: {
for (SchemaObject obj : database.getAllSchemaObjects(
DbObject.CONSTRAINT)) {
Constraint constraint = (Constraint) obj;
if (constraint.getConstraintType() != Constraint.Type.REFERENTIAL) {
continue;
}
ConstraintReferential ref = (ConstraintReferential) constraint;
IndexColumn[] cols = ref.getColumns();
IndexColumn[] refCols = ref.getRefColumns();
Table tab = ref.getTable();
Table refTab = ref.getRefTable();
String tableName = identifier(refTab.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
int update = getRefAction(ref.getUpdateAction());
int delete = getRefAction(ref.getDeleteAction());
for (int j = 0; j < cols.length; j++) {
add(rows,
// PKTABLE_CATALOG
catalog,
// PKTABLE_SCHEMA
identifier(refTab.getSchema().getName()),
// PKTABLE_NAME
identifier(refTab.getName()),
// PKCOLUMN_NAME
identifier(refCols[j].column.getName()),
// FKTABLE_CATALOG
catalog,
// FKTABLE_SCHEMA
identifier(tab.getSchema().getName()),
// FKTABLE_NAME
identifier(tab.getName()),
// FKCOLUMN_NAME
identifier(cols[j].column.getName()),
// ORDINAL_POSITION
String.valueOf(j + 1),
// UPDATE_RULE SMALLINT
String.valueOf(update),
// DELETE_RULE SMALLINT
String.valueOf(delete),
// FK_NAME
identifier(ref.getName()),
// PK_NAME
identifier(ref.getUniqueIndex().getName()),
// DEFERRABILITY
"" + DatabaseMetaData.importedKeyNotDeferrable
);
}
}
break;
}
case CONSTRAINTS: {
for (SchemaObject obj : database.getAllSchemaObjects(
DbObject.CONSTRAINT)) {
Constraint constraint = (Constraint) obj;
Constraint.Type constraintType = constraint.getConstraintType();
String checkExpression = null;
IndexColumn[] indexColumns = null;
Table table = constraint.getTable();
if (hideTable(table, session)) {
continue;
}
Index index = constraint.getUniqueIndex();
String uniqueIndexName = null;
if (index != null) {
uniqueIndexName = index.getName();
}
String tableName = identifier(table.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
if (constraintType == Constraint.Type.CHECK) {
checkExpression = ((ConstraintCheck) constraint).getExpression().getSQL();
} else if (constraintType == Constraint.Type.UNIQUE ||
constraintType == Constraint.Type.PRIMARY_KEY) {
indexColumns = ((ConstraintUnique) constraint).getColumns();
} else if (constraintType == Constraint.Type.REFERENTIAL) {
indexColumns = ((ConstraintReferential) constraint).getColumns();
}
String columnList = null;
if (indexColumns != null) {
StatementBuilder buff = new StatementBuilder();
for (IndexColumn col : indexColumns) {
buff.appendExceptFirst(",");
buff.append(col.column.getName());
}
columnList = buff.toString();
}
add(rows,
// CONSTRAINT_CATALOG
catalog,
// CONSTRAINT_SCHEMA
identifier(constraint.getSchema().getName()),
// CONSTRAINT_NAME
identifier(constraint.getName()),
// CONSTRAINT_TYPE
constraintType.toString(),
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
tableName,
// UNIQUE_INDEX_NAME
uniqueIndexName,
// CHECK_EXPRESSION
checkExpression,
// COLUMN_LIST
columnList,
// REMARKS
replaceNullWithEmpty(constraint.getComment()),
// SQL
constraint.getCreateSQL(),
// ID
"" + constraint.getId()
);
}
break;
}
case CONSTANTS: {
for (SchemaObject obj : database.getAllSchemaObjects(
DbObject.CONSTANT)) {
Constant constant = (Constant) obj;
ValueExpression expr = constant.getValue();
add(rows,
// CONSTANT_CATALOG
catalog,
// CONSTANT_SCHEMA
identifier(constant.getSchema().getName()),
// CONSTANT_NAME
identifier(constant.getName()),
// CONSTANT_TYPE
"" + DataType.convertTypeToSQLType(expr.getType()),
// REMARKS
replaceNullWithEmpty(constant.getComment()),
// SQL
expr.getSQL(),
// ID
"" + constant.getId()
);
}
break;
}
case DOMAINS: {
for (UserDataType dt : database.getAllUserDataTypes()) {
Column col = dt.getColumn();
add(rows,
// DOMAIN_CATALOG
catalog,
// DOMAIN_SCHEMA
Constants.SCHEMA_MAIN,
// DOMAIN_NAME
identifier(dt.getName()),
// COLUMN_DEFAULT
col.getDefaultSQL(),
// IS_NULLABLE
col.isNullable() ? "YES" : "NO",
// DATA_TYPE
"" + col.getDataType().sqlType,
// PRECISION INT
"" + col.getPrecisionAsInt(),
// SCALE INT
"" + col.getScale(),
// TYPE_NAME
col.getDataType().name,
// SELECTIVITY INT
"" + col.getSelectivity(),
// CHECK_CONSTRAINT
"" + col.getCheckConstraintSQL(session, "VALUE"),
// REMARKS
replaceNullWithEmpty(dt.getComment()),
// SQL
"" + dt.getCreateSQL(),
// ID
"" + dt.getId()
);
}
break;
}
case TRIGGERS: {
for (SchemaObject obj : database.getAllSchemaObjects(
DbObject.TRIGGER)) {
TriggerObject trigger = (TriggerObject) obj;
Table table = trigger.getTable();
add(rows,
// TRIGGER_CATALOG
catalog,
// TRIGGER_SCHEMA
identifier(trigger.getSchema().getName()),
// TRIGGER_NAME
identifier(trigger.getName()),
// TRIGGER_TYPE
trigger.getTypeNameList(),
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
identifier(table.getName()),
// BEFORE BIT
"" + trigger.isBefore(),
// JAVA_CLASS
trigger.getTriggerClassName(),
// QUEUE_SIZE INT
"" + trigger.getQueueSize(),
// NO_WAIT BIT
"" + trigger.isNoWait(),
// REMARKS
replaceNullWithEmpty(trigger.getComment()),
// SQL
trigger.getCreateSQL(),
// ID
"" + trigger.getId()
);
}
break;
}
case SESSIONS: {
long now = System.currentTimeMillis();
for (Session s : database.getSessions(false)) {
if (admin || s == session) {
Command command = s.getCurrentCommand();
long start = s.getCurrentCommandStart();
if (start == 0) {
start = now;
}
add(rows,
// ID
"" + s.getId(),
// USER_NAME
s.getUser().getName(),
// SESSION_START
new Timestamp(s.getSessionStart()).toString(),
// STATEMENT
command == null ? null : command.toString(),
// STATEMENT_START
new Timestamp(start).toString(),
// CONTAINS_UNCOMMITTED
"" + s.containsUncommitted()
);
}
}
break;
}
case LOCKS: {
for (Session s : database.getSessions(false)) {
if (admin || s == session) {
for (Table table : s.getLocks()) {
add(rows,
// TABLE_SCHEMA
table.getSchema().getName(),
// TABLE_NAME
table.getName(),
// SESSION_ID
"" + s.getId(),
// LOCK_TYPE
table.isLockedExclusivelyBy(s) ? "WRITE" : "READ"
);
}
}
}
break;
}
case SESSION_STATE: {
for (String name : session.getVariableNames()) {
Value v = session.getVariable(name);
add(rows,
// KEY
"@" + name,
// SQL
"SET @" + name + " " + v.getSQL()
);
}
for (Table table : session.getLocalTempTables()) {
add(rows,
// KEY
"TABLE " + table.getName(),
// SQL
table.getCreateSQL()
);
}
String[] path = session.getSchemaSearchPath();
if (path != null && path.length > 0) {
StatementBuilder buff = new StatementBuilder(
"SET SCHEMA_SEARCH_PATH ");
for (String p : path) {
buff.appendExceptFirst(", ");
buff.append(StringUtils.quoteIdentifier(p));
}
add(rows,
// KEY
"SCHEMA_SEARCH_PATH",
// SQL
buff.toString()
);
}
String schema = session.getCurrentSchemaName();
if (schema != null) {
add(rows,
// KEY
"SCHEMA",
// SQL
"SET SCHEMA " + StringUtils.quoteIdentifier(schema)
);
}
break;
}
case QUERY_STATISTICS: {
QueryStatisticsData control = database.getQueryStatisticsData();
if (control != null) {
for (QueryStatisticsData.QueryEntry entry : control.getQueries()) {
add(rows,
// SQL_STATEMENT
entry.sqlStatement,
// EXECUTION_COUNT
"" + entry.count,
// MIN_EXECUTION_TIME
"" + entry.executionTimeMinNanos / 1000d / 1000,
// MAX_EXECUTION_TIME
"" + entry.executionTimeMaxNanos / 1000d / 1000,
// CUMULATIVE_EXECUTION_TIME
"" + entry.executionTimeCumulativeNanos / 1000d / 1000,
// AVERAGE_EXECUTION_TIME
"" + entry.executionTimeMeanNanos / 1000d / 1000,
// STD_DEV_EXECUTION_TIME
"" + entry.getExecutionTimeStandardDeviation() / 1000d / 1000,
// MIN_ROW_COUNT
"" + entry.rowCountMin,
// MAX_ROW_COUNT
"" + entry.rowCountMax,
// CUMULATIVE_ROW_COUNT
"" + entry.rowCountCumulative,
// AVERAGE_ROW_COUNT
"" + entry.rowCountMean,
// STD_DEV_ROW_COUNT
"" + entry.getRowCountStandardDeviation()
);
}
}
break;
}
case SYNONYMS: {
for (TableSynonym synonym : database.getAllSynonyms()) {
add(rows,
// SYNONYM_CATALOG
catalog,
// SYNONYM_SCHEMA
identifier(synonym.getSchema().getName()),
// SYNONYM_NAME
identifier(synonym.getName()),
// SYNONYM_FOR
synonym.getSynonymForName(),
// SYNONYM_FOR_SCHEMA
synonym.getSynonymForSchema().getName(),
// TYPE NAME
"SYNONYM",
// STATUS
"VALID",
// REMARKS
replaceNullWithEmpty(synonym.getComment()),
// ID
"" + synonym.getId()
);
}
break;
}
case TABLE_CONSTRAINTS: {
for (SchemaObject obj : database.getAllSchemaObjects(DbObject.CONSTRAINT)) {
Constraint constraint = (Constraint) obj;
Constraint.Type constraintType = constraint.getConstraintType();
Table table = constraint.getTable();
if (hideTable(table, session)) {
continue;
}
String tableName = identifier(table.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
add(rows,
// CONSTRAINT_CATALOG
catalog,
// CONSTRAINT_SCHEMA
identifier(constraint.getSchema().getName()),
// CONSTRAINT_NAME
identifier(constraint.getName()),
// CONSTRAINT_TYPE
constraintType.getSqlName(),
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
tableName,
// IS_DEFERRABLE
"NO",
// INITIALLY_DEFERRED
"NO"
);
}
break;
}
case KEY_COLUMN_USAGE: {
for (SchemaObject obj : database.getAllSchemaObjects(DbObject.CONSTRAINT)) {
Constraint constraint = (Constraint) obj;
Constraint.Type constraintType = constraint.getConstraintType();
IndexColumn[] indexColumns = null;
Table table = constraint.getTable();
if (hideTable(table, session)) {
continue;
}
String tableName = identifier(table.getName());
if (!checkIndex(session, tableName, indexFrom, indexTo)) {
continue;
}
if (constraintType == Constraint.Type.UNIQUE ||
constraintType == Constraint.Type.PRIMARY_KEY) {
indexColumns = ((ConstraintUnique) constraint).getColumns();
} else if (constraintType == Constraint.Type.REFERENTIAL) {
indexColumns = ((ConstraintReferential) constraint).getColumns();
}
if (indexColumns == null) {
continue;
}
ConstraintUnique referenced;
if (constraintType == Constraint.Type.REFERENTIAL) {
referenced = lookupUniqueForReferential((ConstraintReferential) constraint);
} else {
referenced = null;
}
for (int i = 0; i < indexColumns.length; i++) {
IndexColumn indexColumn = indexColumns[i];
String ordinalPosition = Integer.toString(i + 1);
String positionInUniqueConstraint;
if (constraintType == Constraint.Type.REFERENTIAL) {
positionInUniqueConstraint = ordinalPosition;
if (referenced != null) {
Column c = ((ConstraintReferential) constraint).getRefColumns()[i].column;
IndexColumn[] refColumns = referenced.getColumns();
for (int j = 0; j < refColumns.length; j++) {
if (refColumns[j].column.equals(c)) {
positionInUniqueConstraint = Integer.toString(j + 1);
break;
}
}
}
} else {
positionInUniqueConstraint = null;
}
add(rows,
// CONSTRAINT_CATALOG
catalog,
// CONSTRAINT_SCHEMA
identifier(constraint.getSchema().getName()),
// CONSTRAINT_NAME
identifier(constraint.getName()),
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
tableName,
// COLUMN_NAME
indexColumn.columnName,
// ORDINAL_POSITION
ordinalPosition,
// POSITION_IN_UNIQUE_CONSTRAINT
positionInUniqueConstraint
);
}
}
break;
}
case REFERENTIAL_CONSTRAINTS: {
for (SchemaObject obj : database.getAllSchemaObjects(DbObject.CONSTRAINT)) {
if (((Constraint) obj).getConstraintType() != Constraint.Type.REFERENTIAL) {
continue;
}
ConstraintReferential constraint = (ConstraintReferential) obj;
Table table = constraint.getTable();
if (hideTable(table, session)) {
continue;
}
// Should be referenced unique constraint, but H2 uses indexes instead.
// So try to find matching unique constraint first and there is no such
// constraint use index name to return something.
SchemaObject unique = lookupUniqueForReferential(constraint);
if (unique == null) {
unique = constraint.getUniqueIndex();
}
add(rows,
// CONSTRAINT_CATALOG
catalog,
// CONSTRAINT_SCHEMA
identifier(constraint.getSchema().getName()),
// CONSTRAINT_NAME
identifier(constraint.getName()),
// UNIQUE_CONSTRAINT_CATALOG
catalog,
// UNIQUE_CONSTRAINT_SCHEMA
identifier(unique.getSchema().getName()),
// UNIQUE_CONSTRAINT_NAME
unique.getName(),
// MATCH_OPTION
"NONE",
// UPDATE_RULE
constraint.getUpdateAction().getSqlName(),
// DELETE_RULE
constraint.getDeleteAction().getSqlName()
);
}
break;
}
default:
DbException.throwInternalError("type="+type);
}
return rows;
}
private static int getRefAction(ConstraintActionType action) {
switch (action) {
case CASCADE:
return DatabaseMetaData.importedKeyCascade;
case RESTRICT:
return DatabaseMetaData.importedKeyRestrict;
case SET_DEFAULT:
return DatabaseMetaData.importedKeySetDefault;
case SET_NULL:
return DatabaseMetaData.importedKeySetNull;
default:
throw DbException.throwInternalError("action="+action);
}
}
private static ConstraintUnique lookupUniqueForReferential(ConstraintReferential referential) {
Table table = referential.getRefTable();
for (Constraint c : table.getConstraints()) {
if (c.getConstraintType() == Constraint.Type.UNIQUE) {
ConstraintUnique unique = (ConstraintUnique) c;
if (unique.getReferencedColumns(table).equals(referential.getReferencedColumns(table))) {
return unique;
}
}
}
return null;
}
@Override
public void removeRow(Session session, Row row) {
throw DbException.getUnsupportedException("META");
}
@Override
public void addRow(Session session, Row row) {
throw DbException.getUnsupportedException("META");
}
@Override
public void removeChildrenAndResources(Session session) {
throw DbException.getUnsupportedException("META");
}
@Override
public void close(Session session) {
// nothing to do
}
@Override
public void unlock(Session s) {
// nothing to do
}
private void addPrivileges(ArrayList<Row> rows, DbObject grantee,
String catalog, Table table, String column, int rightMask) {
if ((rightMask & Right.SELECT) != 0) {
addPrivilege(rows, grantee, catalog, table, column, "SELECT");
}
if ((rightMask & Right.INSERT) != 0) {
addPrivilege(rows, grantee, catalog, table, column, "INSERT");
}
if ((rightMask & Right.UPDATE) != 0) {
addPrivilege(rows, grantee, catalog, table, column, "UPDATE");
}
if ((rightMask & Right.DELETE) != 0) {
addPrivilege(rows, grantee, catalog, table, column, "DELETE");
}
}
private void addPrivilege(ArrayList<Row> rows, DbObject grantee,
String catalog, Table table, String column, String right) {
String isGrantable = "NO";
if (grantee.getType() == DbObject.USER) {
User user = (User) grantee;
if (user.isAdmin()) {
// the right is grantable if the grantee is an admin
isGrantable = "YES";
}
}
if (column == null) {
add(rows,
// GRANTOR
null,
// GRANTEE
identifier(grantee.getName()),
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
identifier(table.getName()),
// PRIVILEGE_TYPE
right,
// IS_GRANTABLE
isGrantable
);
} else {
add(rows,
// GRANTOR
null,
// GRANTEE
identifier(grantee.getName()),
// TABLE_CATALOG
catalog,
// TABLE_SCHEMA
identifier(table.getSchema().getName()),
// TABLE_NAME
identifier(table.getName()),
// COLUMN_NAME
identifier(column),
// PRIVILEGE_TYPE
right,
// IS_GRANTABLE
isGrantable
);
}
}
private void add(ArrayList<Row> rows, String... strings) {
Value[] values = new Value[strings.length];
for (int i = 0; i < strings.length; i++) {
String s = strings[i];
Value v = (s == null) ? (Value) ValueNull.INSTANCE : ValueString.get(s);
Column col = columns[i];
v = col.convert(v);
values[i] = v;
}
Row row = database.createRow(values, 1);
row.setKey(rows.size());
rows.add(row);
}
@Override
public void checkRename() {
throw DbException.getUnsupportedException("META");
}
@Override
public void checkSupportAlter() {
throw DbException.getUnsupportedException("META");
}
@Override
public void truncate(Session session) {
throw DbException.getUnsupportedException("META");
}
@Override
public long getRowCount(Session session) {
throw DbException.throwInternalError(toString());
}
@Override
public boolean canGetRowCount() {
return false;
}
@Override
public boolean canDrop() {
return false;
}
@Override
public TableType getTableType() {
return TableType.SYSTEM_TABLE;
}
@Override
public Index getScanIndex(Session session) {
return new MetaIndex(this, IndexColumn.wrap(columns), true);
}
@Override
public ArrayList<Index> getIndexes() {
ArrayList<Index> list = New.arrayList();
if (metaIndex == null) {
return list;
}
list.add(new MetaIndex(this, IndexColumn.wrap(columns), true));
// TODO re-use the index
list.add(metaIndex);
return list;
}
@Override
public long getMaxDataModificationId() {
switch (type) {
case SETTINGS:
case IN_DOUBT:
case SESSIONS:
case LOCKS:
case SESSION_STATE:
return Long.MAX_VALUE;
}
return database.getModificationDataId();
}
@Override
public Index getUniqueIndex() {
return null;
}
/**
* Get the number of meta table types. Supported meta table
* types are 0 .. this value - 1.
*
* @return the number of meta table types
*/
public static int getMetaTableTypeCount() {
return META_TABLE_TYPE_COUNT;
}
@Override
public long getRowCountApproximation() {
return ROW_COUNT_APPROXIMATION;
}
@Override
public long getDiskSpaceUsed() {
return 0;
}
@Override
public boolean isDeterministic() {
return true;
}
@Override
public boolean canReference() {
return false;
}
}
|
0
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2
|
java-sources/ai/platon/pulsar/pulsar-h2/1.4.196/org/h2/table/Plan.java
|
/*
* Copyright 2004-2018 H2 Group. Multiple-Licensed under the MPL 2.0,
* and the EPL 1.0 (http://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.table;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import org.h2.engine.Session;
import org.h2.expression.Expression;
import org.h2.expression.ExpressionVisitor;
import org.h2.message.Trace;
import org.h2.table.TableFilter.TableFilterVisitor;
import org.h2.util.New;
/**
* A possible query execution plan. The time required to execute a query depends
* on the order the tables are accessed.
*/
public class Plan {
private final TableFilter[] filters;
private final HashMap<TableFilter, PlanItem> planItems = new HashMap<>();
private final Expression[] allConditions;
private final TableFilter[] allFilters;
/**
* Create a query plan with the given order.
*
* @param filters the tables of the query
* @param count the number of table items
* @param condition the condition in the WHERE clause
*/
public Plan(TableFilter[] filters, int count, Expression condition) {
this.filters = new TableFilter[count];
System.arraycopy(filters, 0, this.filters, 0, count);
final ArrayList<Expression> allCond = New.arrayList();
final ArrayList<TableFilter> all = New.arrayList();
if (condition != null) {
allCond.add(condition);
}
for (int i = 0; i < count; i++) {
TableFilter f = filters[i];
f.visit(new TableFilterVisitor() {
@Override
public void accept(TableFilter f) {
all.add(f);
if (f.getJoinCondition() != null) {
allCond.add(f.getJoinCondition());
}
}
});
}
allConditions = allCond.toArray(new Expression[0]);
allFilters = all.toArray(new TableFilter[0]);
}
/**
* Get the plan item for the given table.
*
* @param filter the table
* @return the plan item
*/
public PlanItem getItem(TableFilter filter) {
return planItems.get(filter);
}
/**
* The the list of tables.
*
* @return the list of tables
*/
public TableFilter[] getFilters() {
return filters;
}
/**
* Remove all index conditions that can not be used.
*/
public void removeUnusableIndexConditions() {
for (int i = 0; i < allFilters.length; i++) {
TableFilter f = allFilters[i];
setEvaluatable(f, true);
if (i < allFilters.length - 1 ||
f.getSession().getDatabase().getSettings().earlyFilter) {
// the last table doesn't need the optimization,
// otherwise the expression is calculated twice unnecessarily
// (not that bad but not optimal)
f.optimizeFullCondition(false);
}
f.removeUnusableIndexConditions();
}
for (TableFilter f : allFilters) {
setEvaluatable(f, false);
}
}
/**
* Calculate the cost of this query plan.
*
* @param session the session
* @return the cost
*/
public double calculateCost(Session session) {
Trace t = session.getTrace();
if (t.isDebugEnabled()) {
t.debug("Plan : calculate cost for plan {0}", Arrays.toString(allFilters));
}
double cost = 1;
boolean invalidPlan = false;
final HashSet<Column> allColumnsSet = ExpressionVisitor
.allColumnsForTableFilters(allFilters);
for (int i = 0; i < allFilters.length; i++) {
TableFilter tableFilter = allFilters[i];
if (t.isDebugEnabled()) {
t.debug("Plan : for table filter {0}", tableFilter);
}
PlanItem item = tableFilter.getBestPlanItem(session, allFilters, i, allColumnsSet);
planItems.put(tableFilter, item);
if (t.isDebugEnabled()) {
t.debug("Plan : best plan item cost {0} index {1}",
item.cost, item.getIndex().getPlanSQL());
}
cost += cost * item.cost;
setEvaluatable(tableFilter, true);
Expression on = tableFilter.getJoinCondition();
if (on != null) {
if (!on.isEverything(ExpressionVisitor.EVALUATABLE_VISITOR)) {
invalidPlan = true;
break;
}
}
}
if (invalidPlan) {
cost = Double.POSITIVE_INFINITY;
}
if (t.isDebugEnabled()) {
session.getTrace().debug("Plan : plan cost {0}", cost);
}
for (TableFilter f : allFilters) {
setEvaluatable(f, false);
}
return cost;
}
private void setEvaluatable(TableFilter filter, boolean b) {
filter.setEvaluatable(filter, b);
for (Expression e : allConditions) {
e.setEvaluatable(filter, b);
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.