code
stringlengths
73
34.1k
label
stringclasses
1 value
static private int indexOfNthOccurrenceOfCharIn(String str, char ch, int n) { boolean inMiddleOfQuote = false; int index = -1, previousIndex = 0; for (int i=0; i < n; i++) { do { index = str.indexOf(ch, index+1); if (index < 0) { return -1; } if (hasOddNumberOfSingleQuotes(str.substring(previousIndex, index))) { inMiddleOfQuote = !inMiddleOfQuote; } previousIndex = index; } while (inMiddleOfQuote); } return index; }
java
protected VoltTable runDML(String dml, boolean transformDml) { String modifiedDml = (transformDml ? transformDML(dml) : dml); printTransformedSql(dml, modifiedDml); return super.runDML(modifiedDml); }
java
static int getClassCode(Class cla) { if (!cla.isPrimitive()) { return ArrayUtil.CLASS_CODE_OBJECT; } return classCodeMap.get(cla, -1); }
java
public static void clearArray(int type, Object data, int from, int to) { switch (type) { case ArrayUtil.CLASS_CODE_BYTE : { byte[] array = (byte[]) data; while (--to >= from) { array[to] = 0; } return; } case ArrayUtil.CLASS_CODE_CHAR : { byte[] array = (byte[]) data; while (--to >= from) { array[to] = 0; } return; } case ArrayUtil.CLASS_CODE_SHORT : { short[] array = (short[]) data; while (--to >= from) { array[to] = 0; } return; } case ArrayUtil.CLASS_CODE_INT : { int[] array = (int[]) data; while (--to >= from) { array[to] = 0; } return; } case ArrayUtil.CLASS_CODE_LONG : { long[] array = (long[]) data; while (--to >= from) { array[to] = 0; } return; } case ArrayUtil.CLASS_CODE_FLOAT : { float[] array = (float[]) data; while (--to >= from) { array[to] = 0; } return; } case ArrayUtil.CLASS_CODE_DOUBLE : { double[] array = (double[]) data; while (--to >= from) { array[to] = 0; } return; } case ArrayUtil.CLASS_CODE_BOOLEAN : { boolean[] array = (boolean[]) data; while (--to >= from) { array[to] = false; } return; } default : { Object[] array = (Object[]) data; while (--to >= from) { array[to] = null; } return; } } }
java
public static void adjustArray(int type, Object array, int usedElements, int index, int count) { if (index >= usedElements) { return; } int newCount = usedElements + count; int source; int target; int size; if (count >= 0) { source = index; target = index + count; size = usedElements - index; } else { source = index - count; target = index; size = usedElements - index + count; } if (size > 0) { System.arraycopy(array, source, array, target, size); } if (count < 0) { clearArray(type, array, newCount, usedElements); } }
java
public static void sortArray(int[] array) { boolean swapped; do { swapped = false; for (int i = 0; i < array.length - 1; i++) { if (array[i] > array[i + 1]) { int temp = array[i + 1]; array[i + 1] = array[i]; array[i] = temp; swapped = true; } } } while (swapped); }
java
public static int find(Object[] array, Object object) { for (int i = 0; i < array.length; i++) { if (array[i] == object) { // hadles both nulls return i; } if (object != null && object.equals(array[i])) { return i; } } return -1; }
java
public static int findNot(int[] array, int value) { for (int i = 0; i < array.length; i++) { if (array[i] != value) { return i; } } return -1; }
java
public static boolean areEqualSets(int[] arra, int[] arrb) { return arra.length == arrb.length && ArrayUtil.haveEqualSets(arra, arrb, arra.length); }
java
public static boolean haveEqualArrays(int[] arra, int[] arrb, int count) { if (count > arra.length || count > arrb.length) { return false; } for (int j = 0; j < count; j++) { if (arra[j] != arrb[j]) { return false; } } return true; }
java
public static boolean haveEqualArrays(Object[] arra, Object[] arrb, int count) { if (count > arra.length || count > arrb.length) { return false; } for (int j = 0; j < count; j++) { if (arra[j] != arrb[j]) { if (arra[j] == null || !arra[j].equals(arrb[j])) { return false; } } } return true; }
java
public static int countSameElements(byte[] arra, int start, byte[] arrb) { int k = 0; int limit = arra.length - start; if (limit > arrb.length) { limit = arrb.length; } for (int i = 0; i < limit; i++) { if (arra[i + start] == arrb[i]) { k++; } else { break; } } return k; }
java
public static int find(byte[] arra, int start, int limit, byte[] arrb) { int k = start; limit = limit - arrb.length + 1; int value = arrb[0]; for (; k < limit; k++) { if (arra[k] == value) { if (arrb.length == 1) { return k; } if (containsAt(arra, k, arrb)) { return k; } } } return -1; }
java
public static int find(byte[] arra, int start, int limit, int b, int c) { int k = 0; for (; k < limit; k++) { if (arra[k] == b || arra[k] == c) { return k; } } return -1; }
java
public static void intIndexesToBooleanArray(int[] arra, boolean[] arrb) { for (int i = 0; i < arra.length; i++) { if (arra[i] < arrb.length) { arrb[arra[i]] = true; } } }
java
public static boolean containsAllTrueElements(boolean[] arra, boolean[] arrb) { for (int i = 0; i < arra.length; i++) { if (arrb[i] && !arra[i]) { return false; } } return true; }
java
public static int countTrueElements(boolean[] arra) { int count = 0; for (int i = 0; i < arra.length; i++) { if (arra[i]) { count++; } } return count; }
java
public static boolean hasNull(Object[] array, int[] columnMap) { int count = columnMap.length; for (int i = 0; i < count; i++) { if (array[columnMap[i]] == null) { return true; } } return false; }
java
public static boolean containsAt(byte[] arra, int start, byte[] arrb) { return countSameElements(arra, start, arrb) == arrb.length; }
java
public static int countStartElementsAt(byte[] arra, int start, byte[] arrb) { int k = 0; mainloop: for (int i = start; i < arra.length; i++) { for (int j = 0; j < arrb.length; j++) { if (arra[i] == arrb[j]) { k++; continue mainloop; } } break; } return k; }
java
public static int[] arraySlice(int[] source, int start, int count) { int[] slice = new int[count]; System.arraycopy(source, start, slice, 0, count); return slice; }
java
public static void fillArray(Object[] array, Object value) { int to = array.length; while (--to >= 0) { array[to] = value; } }
java
public static Object duplicateArray(Object source) { int size = Array.getLength(source); Object newarray = Array.newInstance(source.getClass().getComponentType(), size); System.arraycopy(source, 0, newarray, 0, size); return newarray; }
java
public static Object resizeArrayIfDifferent(Object source, int newsize) { int oldsize = Array.getLength(source); if (oldsize == newsize) { return source; } Object newarray = Array.newInstance(source.getClass().getComponentType(), newsize); if (oldsize < newsize) { newsize = oldsize; } System.arraycopy(source, 0, newarray, 0, newsize); return newarray; }
java
public static void copyAdjustArray(Object source, Object dest, Object addition, int colindex, int adjust) { int length = Array.getLength(source); if (colindex < 0) { System.arraycopy(source, 0, dest, 0, length); return; } System.arraycopy(source, 0, dest, 0, colindex); if (adjust == 0) { int endcount = length - colindex - 1; Array.set(dest, colindex, addition); if (endcount > 0) { System.arraycopy(source, colindex + 1, dest, colindex + 1, endcount); } } else if (adjust < 0) { int endcount = length - colindex - 1; if (endcount > 0) { System.arraycopy(source, colindex + 1, dest, colindex, endcount); } } else { int endcount = length - colindex; Array.set(dest, colindex, addition); if (endcount > 0) { System.arraycopy(source, colindex, dest, colindex + 1, endcount); } } }
java
private static ColumnInfo[] prependColumn(ColumnInfo firstColumn, ColumnInfo[] columns) { int allLen = 1 + columns.length; ColumnInfo[] allColumns = new ColumnInfo[allLen]; allColumns[0] = firstColumn; for (int i = 0; i < columns.length; i++) { allColumns[i+1] = columns[i]; } return allColumns; }
java
public final String getColumnName(int index) { assert(verifyTableInvariants()); if ((index < 0) || (index >= m_colCount)) { throw new IllegalArgumentException("Not a valid column index."); } // move to the start of the list of column names int pos = POS_COL_TYPES + m_colCount; String name = null; for (int i = 0; i < index; i++) { pos += m_buffer.getInt(pos) + 4; } name = readString(pos, METADATA_ENCODING); assert(name != null); assert(verifyTableInvariants()); return name; }
java
public final void addRow(Object... values) { assert(verifyTableInvariants()); if (m_readOnly) { throw new IllegalStateException("Table is read-only. Make a copy before changing."); } if (m_colCount == 0) { throw new IllegalStateException("Table has no columns defined"); } if (values.length != m_colCount) { throw new IllegalArgumentException(values.length + " arguments but table has " + m_colCount + " columns"); } // memoize the start of this row in case we roll back final int pos = m_buffer.position(); try { // Allow the buffer to grow to max capacity m_buffer.limit(m_buffer.capacity()); // advance the row size value m_buffer.position(pos + 4); // where does the type bytes start // skip rowstart + status code + colcount int typePos = POS_COL_TYPES; for (int col = 0; col < m_colCount; col++) { Object value = values[col]; VoltType columnType = VoltType.get(m_buffer.get(typePos + col)); addColumnValue(value, columnType, col); } // // Note, there is some near-identical code in both row add methods. // [ add(..) and addRow(..) ] // If you change code below here, change it in the other method too. // (It would be nice to re-factor, but I couldn't make a clean go at // it quickly - Hugg) // final int rowsize = m_buffer.position() - pos - 4; assert(rowsize >= 0); // check for too big rows if (rowsize > VoltTableRow.MAX_TUPLE_LENGTH) { throw new VoltOverflowException( "Table row total length larger than allowed max " + VoltTableRow.MAX_TUPLE_LENGTH_STR); } // buffer overflow is caught and handled below. m_buffer.putInt(pos, rowsize); m_rowCount++; m_buffer.putInt(m_rowStart, m_rowCount); } catch (VoltTypeException vte) { // revert the row size advance and any other // buffer additions m_buffer.position(pos); throw vte; } catch (BufferOverflowException e) { m_buffer.position(pos); expandBuffer(); addRow(values); } // row was too big, reset and rethrow catch (VoltOverflowException e) { m_buffer.position(pos); throw e; } catch (IllegalArgumentException e) { m_buffer.position(pos); // if this was thrown because of a lack of space // then grow the buffer // the number 32 was picked out of a hat ( maybe a bug if str > 32 ) if (m_buffer.limit() - m_buffer.position() < 32) { expandBuffer(); addRow(values); } else { throw e; } } finally { // constrain buffer limit back to the new position m_buffer.limit(m_buffer.position()); } assert(verifyTableInvariants()); }
java
public static String varbinaryToPrintableString(byte[] bin) { PureJavaCrc32 crc = new PureJavaCrc32(); StringBuilder sb = new StringBuilder(); sb.append("bin[crc:"); crc.update(bin); sb.append(crc.getValue()); sb.append(",value:0x"); String hex = Encoder.hexEncode(bin); if (hex.length() > 13) { sb.append(hex.substring(0, 10)); sb.append("..."); } else { sb.append(hex); } sb.append("]"); return sb.toString(); }
java
@Override public String toJSONString() { JSONStringer js = new JSONStringer(); try { js.object(); // status code (1 byte) js.keySymbolValuePair(JSON_STATUS_KEY, getStatusCode()); // column schema js.key(JSON_SCHEMA_KEY).array(); for (int i = 0; i < getColumnCount(); i++) { js.object(); js.keySymbolValuePair(JSON_NAME_KEY, getColumnName(i)); js.keySymbolValuePair(JSON_TYPE_KEY, getColumnType(i).getValue()); js.endObject(); } js.endArray(); // row data js.key(JSON_DATA_KEY).array(); VoltTableRow row = cloneRow(); row.resetRowPosition(); while (row.advanceRow()) { js.array(); for (int i = 0; i < getColumnCount(); i++) { row.putJSONRep(i, js); } js.endArray(); } js.endArray(); js.endObject(); } catch (JSONException e) { e.printStackTrace(); throw new RuntimeException("Failed to serialized a table to JSON.", e); } return js.toString(); }
java
public static VoltTable fromJSONString(String json) throws JSONException, IOException { JSONObject jsonObj = new JSONObject(json); return fromJSONObject(jsonObj); }
java
VoltTable semiDeepCopy() { assert(verifyTableInvariants()); // share the immutable metadata if it's present for tests final VoltTable cloned = new VoltTable(m_extraMetadata); cloned.m_colCount = m_colCount; cloned.m_rowCount = m_rowCount; cloned.m_rowStart = m_rowStart; cloned.m_buffer = m_buffer.duplicate(); cloned.m_activeRowIndex = m_activeRowIndex; cloned.m_hasCalculatedOffsets = m_hasCalculatedOffsets; cloned.m_memoizedBufferOffset = m_memoizedBufferOffset; cloned.m_memoizedRowOffset = m_memoizedRowOffset; cloned.m_offsets = m_offsets == null ? null : m_offsets.clone(); cloned.m_position = m_position; cloned.m_schemaString = m_schemaString == null ? null : m_schemaString.clone(); cloned.m_wasNull = m_wasNull; // make the new table read only cloned.m_readOnly = true; assert(verifyTableInvariants()); assert(cloned.verifyTableInvariants()); return cloned; }
java
public ColumnInfo[] getTableSchema() { ColumnInfo[] schema = new ColumnInfo[m_colCount]; for (int i = 0; i < m_colCount; i++) { ColumnInfo col = new ColumnInfo(getColumnName(i), getColumnType(i)); schema[i] = col; } return schema; }
java
@Override public void checkProcessorConfig(Properties properties) { String exportClientClass = properties.getProperty(EXPORT_TO_TYPE); Preconditions.checkNotNull(exportClientClass, "export to type is undefined or custom export plugin class missing."); try { final Class<?> clientClass = Class.forName(exportClientClass); ExportClientBase client = (ExportClientBase) clientClass.newInstance(); client.configure(properties); } catch(Throwable t) { throw new RuntimeException(t); } }
java
private long extractCommittedSpHandle(ExportRow row, long committedSeqNo) { long ret = 0; if (committedSeqNo == ExportDataSource.NULL_COMMITTED_SEQNO) { return ret; } // Get the rows's sequence number (3rd column) long seqNo = (long) row.values[2]; if (seqNo != committedSeqNo) { return ret; } // Get the row's sp handle (1rst column) ret = (long) row.values[0]; return ret; }
java
public void processMaterializedViewWarnings(Database db, HashMap<Table, String> matViewMap) throws VoltCompilerException { for (Table table : db.getTables()) { for (MaterializedViewInfo mvInfo : table.getViews()) { for (Statement stmt : mvInfo.getFallbackquerystmts()) { // If there is any statement in the fallBackQueryStmts map, then // there must be some min/max columns. // Only check if the plan uses index scan. if (needsWarningForSingleTableView( getPlanNodeTreeFromCatalogStatement(db, stmt))) { // If we are using IS NOT DISTINCT FROM as our equality operator (which is necessary // to get correct answers), then there will often be no index scans in the plan, // since we cannot optimize IS NOT DISTINCT FROM. m_compiler.addWarn( "No index found to support UPDATE and DELETE on some of the min() / max() columns " + "in the materialized view " + mvInfo.getTypeName() + ", and a sequential scan might be issued when current min / max value is updated / deleted."); break; } } } // If it's a view on join query case, we check if the join can utilize indices. // We throw out warning only if no index scan is used in the plan (ENG-10864). MaterializedViewHandlerInfo mvHandlerInfo = table.getMvhandlerinfo().get("mvHandlerInfo"); if (mvHandlerInfo != null) { Statement createQueryStatement = mvHandlerInfo.getCreatequery().get("createQuery"); if (needsWarningForJoinQueryView( getPlanNodeTreeFromCatalogStatement(db, createQueryStatement))) { m_compiler.addWarn( "No index found to support some of the join operations required to refresh the materialized view " + table.getTypeName() + ". The refreshing may be slow."); } } } }
java
public static MaterializedViewInfo getMaterializedViewInfo(Table tbl) { MaterializedViewInfo mvInfo = null; Table source = tbl.getMaterializer(); if (source != null) { mvInfo = source.getViews().get(tbl.getTypeName()); } return mvInfo; }
java
public static long getFragmentIdForPlanHash(byte[] planHash) { Sha1Wrapper key = new Sha1Wrapper(planHash); FragInfo frag = null; synchronized (FragInfo.class) { frag = m_plansByHash.get(key); } assert(frag != null); return frag.fragId; }
java
public static String getStmtTextForPlanHash(byte[] planHash) { Sha1Wrapper key = new Sha1Wrapper(planHash); FragInfo frag = null; synchronized (FragInfo.class) { frag = m_plansByHash.get(key); } assert(frag != null); // SQL statement text is not stored in the repository for ad hoc statements // -- it may be inaccurate because we parameterize the statement on its constants. // Callers know if they are asking about ad hoc or pre-planned fragments, // and shouldn't call this method for the ad hoc case. assert(frag.stmtText != null); return frag.stmtText; }
java
public static long loadOrAddRefPlanFragment(byte[] planHash, byte[] plan, String stmtText) { Sha1Wrapper key = new Sha1Wrapper(planHash); synchronized (FragInfo.class) { FragInfo frag = m_plansByHash.get(key); if (frag == null) { frag = new FragInfo(key, plan, m_nextFragId++, stmtText); m_plansByHash.put(frag.hash, frag); m_plansById.put(frag.fragId, frag); if (m_plansById.size() > ExecutionEngine.EE_PLAN_CACHE_SIZE) { evictLRUfragment(); } } // Bit of a hack to work around an issue where a statement-less adhoc // fragment could be identical to a statement-needing regular procedure. // This doesn't really address the broader issue that fragment hashes // are not 1-1 with SQL statements. if (frag.stmtText == null) { frag.stmtText = stmtText; } // The fragment MAY be in the LRU map. // An incremented refCount is a lazy way to keep it safe from eviction // without having to update the map. // This optimizes for popular fragments in a small or stable cache that may be reused // many times before the eviction process needs to take any notice. frag.refCount++; return frag.fragId; } }
java
public static byte[] planForFragmentId(long fragmentId) { assert(fragmentId > 0); FragInfo frag = null; synchronized (FragInfo.class) { frag = m_plansById.get(fragmentId); } assert(frag != null); return frag.plan; }
java
public List<AbstractExpression> bindingToIndexedExpression( AbstractExpression expr) { // Defer the result construction for as long as possible on the // assumption that this function mostly gets applied to eliminate // negative cases. if (m_type != expr.m_type) { // The only allowed difference in expression types is between a // parameter and its original constant value. // That's handled in the independent override. return null; } // From here, this is much like the straight equality check, // except that this function and "equals" must each call themselves // in their recursions. // Delegating to this factored-out component of the "equals" // implementation eases simultaneous refinement of both methods. if ( ! hasEqualAttributes(expr)) { return null; } // The derived classes have verified that any added attributes // are identical. // Check that the presence, or lack, of children is the same if ((expr.m_left == null) != (m_left == null)) { return null; } if ((expr.m_right == null) != (m_right == null)) { return null; } if ((expr.m_args == null) != (m_args == null)) { return null; } // Check that the children identify themselves as matching List<AbstractExpression> leftBindings = null; if (m_left != null) { leftBindings = m_left.bindingToIndexedExpression(expr.m_left); if (leftBindings == null) { return null; } } List<AbstractExpression> rightBindings = null; if (m_right != null) { rightBindings = m_right.bindingToIndexedExpression(expr.m_right); if (rightBindings == null) { return null; } } List<AbstractExpression> argBindings = null; if (m_args != null) { if (m_args.size() != expr.m_args.size()) { return null; } argBindings = new ArrayList<>(); int ii = 0; // iterate the args lists in parallel, binding pairwise for (AbstractExpression rhs : expr.m_args) { AbstractExpression lhs = m_args.get(ii++); List<AbstractExpression> moreBindings = lhs.bindingToIndexedExpression(rhs); if (moreBindings == null) { // fail on any non-match return null; } argBindings.addAll(moreBindings); } } // It's a match, so gather up the details. // It's rare (if even possible) for the same bound parameter to get // listed twice, so don't worry about duplicate entries, here. // That should not cause any issue for the caller. List<AbstractExpression> result = new ArrayList<>(); if (leftBindings != null) { // null here can only mean no left child result.addAll(leftBindings); } if (rightBindings != null) { // null here can only mean no right child result.addAll(rightBindings); } if (argBindings != null) { // null here can only mean no args result.addAll(argBindings); } return result; }
java
public static void toJSONArrayFromSortList( JSONStringer stringer, List<AbstractExpression> sortExpressions, List<SortDirectionType> sortDirections) throws JSONException { stringer.key(SortMembers.SORT_COLUMNS); stringer.array(); int listSize = sortExpressions.size(); for (int ii = 0; ii < listSize; ii++) { stringer.object(); stringer.key(SortMembers.SORT_EXPRESSION).object(); sortExpressions.get(ii).toJSONString(stringer); stringer.endObject(); if (sortDirections != null) { stringer.keySymbolValuePair(SortMembers.SORT_DIRECTION, sortDirections.get(ii).toString()); } stringer.endObject(); } stringer.endArray(); }
java
public static void loadSortListFromJSONArray( List<AbstractExpression> sortExpressions, List<SortDirectionType> sortDirections, JSONObject jobj) throws JSONException { if (jobj.has(SortMembers.SORT_COLUMNS)) { sortExpressions.clear(); if (sortDirections != null) { sortDirections.clear(); } JSONArray jarray = jobj.getJSONArray(SortMembers.SORT_COLUMNS); int size = jarray.length(); for (int ii = 0; ii < size; ++ii) { JSONObject tempObj = jarray.getJSONObject(ii); sortExpressions.add( fromJSONChild(tempObj, SortMembers.SORT_EXPRESSION)); if (sortDirections == null || ! tempObj.has(SortMembers.SORT_DIRECTION)) { continue; } String sdAsString = tempObj.getString(SortMembers.SORT_DIRECTION); sortDirections.add(SortDirectionType.get(sdAsString)); } } assert(sortDirections == null || sortExpressions.size() == sortDirections.size()); }
java
public static List<AbstractExpression> loadFromJSONArrayChild( List<AbstractExpression> starter, JSONObject parent, String label, StmtTableScan tableScan) throws JSONException { if (parent.isNull(label)) { return null; } JSONArray jarray = parent.getJSONArray(label); return loadFromJSONArray(starter, jarray, tableScan); }
java
public AbstractExpression replaceWithTVE( Map<AbstractExpression, Integer> aggTableIndexMap, Map<Integer, ParsedColInfo> indexToColumnMap) { Integer ii = aggTableIndexMap.get(this); if (ii != null) { ParsedColInfo col = indexToColumnMap.get(ii); TupleValueExpression tve = new TupleValueExpression( col.m_tableName, col.m_tableAlias, col.m_columnName, col.m_alias, this, ii); if (this instanceof TupleValueExpression) { tve.setOrigStmtId(((TupleValueExpression)this).getOrigStmtId()); } // To prevent pushdown of LIMIT when ORDER BY references an agg. ENG-3487. if (hasAnySubexpressionOfClass(AggregateExpression.class)) { tve.setHasAggregate(true); } return tve; } AbstractExpression lnode = null; AbstractExpression rnode = null; if (m_left != null) { lnode = m_left.replaceWithTVE(aggTableIndexMap, indexToColumnMap); } if (m_right != null) { rnode = m_right.replaceWithTVE(aggTableIndexMap, indexToColumnMap); } ArrayList<AbstractExpression> newArgs = null; boolean changed = false; if (m_args != null) { newArgs = new ArrayList<>(); for (AbstractExpression expr: m_args) { AbstractExpression ex = expr.replaceWithTVE(aggTableIndexMap, indexToColumnMap); newArgs.add(ex); if (ex != expr) { changed = true; } } } if (m_left != lnode || m_right != rnode || changed) { AbstractExpression resExpr = clone(); resExpr.setLeft(lnode); resExpr.setRight(rnode); resExpr.setArgs(newArgs); return resExpr; } return this; }
java
public boolean hasAnySubexpressionWithPredicate(SubexprFinderPredicate pred) { if (pred.matches(this)) { return true; } if (m_left != null && m_left.hasAnySubexpressionWithPredicate(pred)) { return true; } if (m_right != null && m_right.hasAnySubexpressionWithPredicate(pred)) { return true; } if (m_args != null) { for (AbstractExpression argument : m_args) { if (argument.hasAnySubexpressionWithPredicate(pred)) { return true; } } } return false; }
java
void refineOperandType(VoltType valueType) { if (m_valueType != VoltType.NUMERIC) { return; } if (valueType == VoltType.DECIMAL) { m_valueType = VoltType.DECIMAL; m_valueSize = VoltType.DECIMAL.getLengthInBytesForFixedTypes(); } else { m_valueType = VoltType.FLOAT; m_valueSize = VoltType.FLOAT.getLengthInBytesForFixedTypes(); } }
java
protected final void finalizeChildValueTypes() { if (m_left != null) { m_left.finalizeValueTypes(); updateContentDeterminismMessage(m_left.getContentDeterminismMessage()); } if (m_right != null) { m_right.finalizeValueTypes(); updateContentDeterminismMessage(m_right.getContentDeterminismMessage()); } if (m_args != null) { for (AbstractExpression argument : m_args) { argument.finalizeValueTypes(); updateContentDeterminismMessage(argument.getContentDeterminismMessage()); } } }
java
protected final void resolveChildrenForTable(Table table) { if (m_left != null) { m_left.resolveForTable(table); } if (m_right != null) { m_right.resolveForTable(table); } if (m_args != null) { for (AbstractExpression argument : m_args) { argument.resolveForTable(table); } } }
java
public boolean isValidExprForIndexesAndMVs(StringBuffer msg, boolean isMV) { if (containsFunctionById(FunctionSQL.voltGetCurrentTimestampId())) { msg.append("cannot include the function NOW or CURRENT_TIMESTAMP."); return false; } else if (hasAnySubexpressionOfClass(AggregateExpression.class)) { msg.append("cannot contain aggregate expressions."); return false; } else if (hasAnySubexpressionOfClass(AbstractSubqueryExpression.class)) { // There may not be any of these in HSQL1.9.3b. However, in // HSQL2.3.2 subqueries are stored as expressions. So, we may // find some here. We will keep it here for the moment. if (isMV) { msg.append("cannot contain subquery sources."); } else { msg.append("cannot contain subqueries."); } return false; } else if (hasUserDefinedFunctionExpression()) { msg.append("cannot contain calls to user defined functions."); return false; } else { return true; } }
java
public static boolean validateExprsForIndexesAndMVs(List<AbstractExpression> checkList, StringBuffer msg, boolean isMV) { for (AbstractExpression expr : checkList) { if (!expr.isValidExprForIndexesAndMVs(msg, isMV)) { return false; } } return true; }
java
private boolean containsFunctionById(int functionId) { if (this instanceof AbstractValueExpression) { return false; } List<AbstractExpression> functionsList = findAllFunctionSubexpressions(); for (AbstractExpression funcExpr: functionsList) { assert(funcExpr instanceof FunctionExpression); if (((FunctionExpression)funcExpr).hasFunctionId(functionId)) { return true; } } return false; }
java
public boolean isValueTypeIndexable(StringBuffer msg) { if (!m_valueType.isIndexable()) { msg.append("expression of type " + m_valueType.getName()); return false; } return true; }
java
public boolean isValueTypeUniqueIndexable(StringBuffer msg) { // This call to isValueTypeIndexable is needed because // all comparison, all conjunction, and some operator expressions // need to refine it to compensate for their false claims that // their value types (actually non-indexable boolean) is BIGINT. // that their value type is actually boolean. // If they were fixed, isValueTypeIndexable and // isValueTypeUniqueIndexable could be replaced by VoltType functions. if (!isValueTypeIndexable(msg)) { return false; } if (!m_valueType.isUniqueIndexable()) { msg.append("expression of type " + m_valueType.getName()); return false; } return true; }
java
public void findUnsafeOperatorsForDDL(UnsafeOperatorsForDDL ops) { if ( ! m_type.isSafeForDDL()) { ops.add(m_type.symbol()); } if (m_left != null) { m_left.findUnsafeOperatorsForDDL(ops); } if (m_right != null) { m_right.findUnsafeOperatorsForDDL(ops); } if (m_args != null) { for (AbstractExpression arg : m_args) { arg.findUnsafeOperatorsForDDL(ops); } } }
java
public AbstractExpression getFirstArgument() { if (m_left != null) { assert(m_args == null); return m_left; } if (m_args != null && m_args.size() > 0) { assert(m_left == null && m_right == null); return m_args.get(0); } return null; }
java
public static byte[] getHashedPassword(ClientAuthScheme scheme, String password) { if (password == null) { return null; } MessageDigest md = null; try { md = MessageDigest.getInstance(ClientAuthScheme.getDigestScheme(scheme)); } catch (NoSuchAlgorithmException e) { e.printStackTrace(); System.exit(-1); } byte hashedPassword[] = null; hashedPassword = md.digest(password.getBytes(Constants.UTF8ENCODING)); return hashedPassword; }
java
public static Object[] getAuthenticatedConnection(String host, String username, byte[] hashedPassword, int port, final Subject subject, ClientAuthScheme scheme, long timeoutMillis) throws IOException { String service = subject == null ? "database" : Constants.KERBEROS; return getAuthenticatedConnection(service, host, username, hashedPassword, port, subject, scheme, null, timeoutMillis); }
java
public JSONObject getJSONObjectForZK() throws JSONException { final JSONObject jsObj = new JSONObject(); jsObj.put(SnapshotUtil.JSON_PATH, m_path); jsObj.put(SnapshotUtil.JSON_PATH_TYPE, m_stype.toString()); jsObj.put(SnapshotUtil.JSON_NONCE, m_nonce); jsObj.put(SnapshotUtil.JSON_BLOCK, m_blocking); jsObj.put(SnapshotUtil.JSON_FORMAT, m_format.toString()); jsObj.putOpt(SnapshotUtil.JSON_DATA, m_data); jsObj.putOpt(SnapshotUtil.JSON_TERMINUS, m_terminus); return jsObj; }
java
public static DatabaseSizes getCatalogSizes(Database dbCatalog, boolean isXDCR) { DatabaseSizes dbSizes = new DatabaseSizes(); for (Table table: dbCatalog.getTables()) { dbSizes.addTable(getTableSize(table, isXDCR)); } return dbSizes; }
java
static public void main(String[] sa) throws IOException, TarMalformatException { if (sa.length < 1) { System.out.println(RB.singleton.getString(RB.TARGENERATOR_SYNTAX, DbBackup.class.getName())); System.exit(0); } TarGenerator generator = new TarGenerator(new File(sa[0]), true, null); if (sa.length == 1) { generator.queueEntry("stdin", System.in, 10240); } else { for (int i = 1; i < sa.length; i++) { generator.queueEntry(new File(sa[i])); } } generator.write(); }
java
public static byte[] fileToBytes(File path) throws IOException { FileInputStream fin = new FileInputStream(path); byte[] buffer = new byte[(int) fin.getChannel().size()]; try { if (fin.read(buffer) == -1) { throw new IOException("File " + path.getAbsolutePath() + " is empty"); } } finally { fin.close(); } return buffer; }
java
public VoltTable run(SystemProcedureExecutionContext ctx, String tableName, String columnName, String compStr, VoltTable parameter, long chunksize) { return nibbleDeleteCommon(ctx, tableName, columnName, compStr, parameter, chunksize, true); }
java
public byte[] read() throws IOException { if (m_exception.get() != null) { throw m_exception.get(); } byte bytes[] = null; if (m_activeConverters.get() == 0) { bytes = m_available.poll(); } else { try { bytes = m_available.take(); } catch (InterruptedException e) { throw new IOException(e); } } if (bytes != null) { m_availableBytes.addAndGet(-1 * bytes.length); } return bytes; }
java
public boolean compileFromDDL(final String jarOutputPath, final String... ddlFilePaths) { if (ddlFilePaths.length == 0) { compilerLog.error("At least one DDL file is required."); return false; } List<VoltCompilerReader> ddlReaderList; try { ddlReaderList = DDLPathsToReaderList(ddlFilePaths); } catch (VoltCompilerException e) { compilerLog.error("Unable to open DDL file.", e); return false; } return compileInternalToFile(jarOutputPath, null, null, ddlReaderList, null); }
java
public boolean compileDDLString(String ddl, String jarPath) { final File schemaFile = VoltProjectBuilder.writeStringToTempFile(ddl); schemaFile.deleteOnExit(); final String schemaPath = schemaFile.getPath(); return compileFromDDL(jarPath, schemaPath); }
java
public boolean compileEmptyCatalog(final String jarOutputPath) { // Use a special DDL reader to provide the contents. List<VoltCompilerReader> ddlReaderList = new ArrayList<>(1); ddlReaderList.add(new VoltCompilerStringReader("ddl.sql", m_emptyDDLComment)); // Seed it with the DDL so that a version upgrade hack in compileInternalToFile() // doesn't try to get the DDL file from the path. InMemoryJarfile jarFile = new InMemoryJarfile(); try { ddlReaderList.get(0).putInJar(jarFile, "ddl.sql"); } catch (IOException e) { compilerLog.error("Failed to add DDL file to empty in-memory jar."); return false; } return compileInternalToFile(jarOutputPath, null, null, ddlReaderList, jarFile); }
java
private void debugVerifyCatalog(InMemoryJarfile origJarFile, Catalog origCatalog) { final VoltCompiler autoGenCompiler = new VoltCompiler(m_isXDCR); // Make the new compiler use the original jarfile's classloader so it can // pull in the class files for procedures and imports autoGenCompiler.m_classLoader = origJarFile.getLoader(); List<VoltCompilerReader> autogenReaderList = new ArrayList<>(1); autogenReaderList.add(new VoltCompilerJarFileReader(origJarFile, AUTOGEN_DDL_FILE_NAME)); InMemoryJarfile autoGenJarOutput = new InMemoryJarfile(); autoGenCompiler.m_currentFilename = AUTOGEN_DDL_FILE_NAME; // This call is purposely replicated in retryFailedCatalogRebuildUnderDebug, // where it provides an opportunity to set a breakpoint on a do-over when this // mainline call produces a flawed catalog that fails the catalog diff. // Keep the two calls in synch to allow debugging under the same exact conditions. Catalog autoGenCatalog = autoGenCompiler.compileCatalogInternal(null, null, autogenReaderList, autoGenJarOutput); if (autoGenCatalog == null) { Log.info("Did not verify catalog because it could not be compiled."); return; } FilteredCatalogDiffEngine diffEng = new FilteredCatalogDiffEngine(origCatalog, autoGenCatalog, false); String diffCmds = diffEng.commands(); if (diffCmds != null && !diffCmds.equals("")) { // This retry is disabled by default to avoid confusing the unwary developer // with a "pointless" replay of an apparently flawed catalog rebuild. // Enable it via this flag to provide a chance to set an early breakpoint // that is only triggered in hopeless cases. if (RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG) { autoGenCatalog = replayFailedCatalogRebuildUnderDebug( autoGenCompiler, autogenReaderList, autoGenJarOutput); } // Re-run a failed diff more verbosely as a pre-crash test diagnostic. diffEng = new FilteredCatalogDiffEngine(origCatalog, autoGenCatalog, true); diffCmds = diffEng.commands(); String crashAdvice = "Catalog Verification from Generated DDL failed! " + "VoltDB dev: Consider" + (RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG ? "" : " setting VoltCompiler.RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG = true and") + " setting a breakpoint in VoltCompiler.replayFailedCatalogRebuildUnderDebug" + " to debug a replay of the faulty catalog rebuild roundtrip. "; VoltDB.crashLocalVoltDB(crashAdvice + "The offending diffcmds were: " + diffCmds); } else { Log.info("Catalog verification completed successfuly."); } }
java
private Catalog replayFailedCatalogRebuildUnderDebug( VoltCompiler autoGenCompiler, List<VoltCompilerReader> autogenReaderList, InMemoryJarfile autoGenJarOutput) { // Be sure to set RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG = true to enable // this last ditch retry before crashing. // BREAKPOINT HERE! // Then step IN to debug the failed rebuild -- or, just as likely, the canonical ddl. // Or step OVER to debug just the catalog diff process, retried with verbose output -- // maybe it's just being too sensitive to immaterial changes? Catalog autoGenCatalog = autoGenCompiler.compileCatalogInternal(null, null, autogenReaderList, autoGenJarOutput); return autoGenCatalog; }
java
HashMap<String, byte[]> getExplainPlans(Catalog catalog) { HashMap<String, byte[]> retval = new HashMap<>(); Database db = getCatalogDatabase(m_catalog); assert(db != null); for (Procedure proc : db.getProcedures()) { for (Statement stmt : proc.getStatements()) { String s = "SQL: " + stmt.getSqltext() + "\n"; s += "COST: " + Integer.toString(stmt.getCost()) + "\n"; s += "PLAN:\n\n"; s += Encoder.hexDecodeToString(stmt.getExplainplan()) + "\n"; byte[] b = s.getBytes(Constants.UTF8ENCODING); retval.put(proc.getTypeName() + "_" + stmt.getTypeName() + ".txt", b); } } return retval; }
java
private Catalog compileCatalogInternal( final VoltCompilerReader cannonicalDDLIfAny, final Catalog previousCatalogIfAny, final List<VoltCompilerReader> ddlReaderList, final InMemoryJarfile jarOutput) { m_catalog = new Catalog(); // Initialize the catalog for one cluster m_catalog.execute("add / clusters cluster"); m_catalog.getClusters().get("cluster").setSecurityenabled(false); // shutdown and make a new hsqldb try { Database previousDBIfAny = null; if (previousCatalogIfAny != null) { previousDBIfAny = previousCatalogIfAny.getClusters().get("cluster").getDatabases().get("database"); } compileDatabaseNode(cannonicalDDLIfAny, previousDBIfAny, ddlReaderList, jarOutput); } catch (final VoltCompilerException e) { return null; } assert(m_catalog != null); // add epoch info to catalog final int epoch = (int)(TransactionIdManager.getEpoch() / 1000); m_catalog.getClusters().get("cluster").setLocalepoch(epoch); return m_catalog; }
java
private void addExtraClasses(final InMemoryJarfile jarOutput) throws VoltCompilerException { List<String> addedClasses = new ArrayList<>(); for (String className : m_addedClasses) { /* * Only add the class if it isn't already in the output jar. * The jar will be pre-populated when performing an automatic * catalog version upgrade. */ if (!jarOutput.containsKey(className)) { try { Class<?> clz = Class.forName(className, true, m_classLoader); if (addClassToJar(jarOutput, clz)) { addedClasses.add(className); } } catch (Exception e) { String msg = "Class %s could not be loaded/found/added to the jar."; msg = String.format(msg, className); throw new VoltCompilerException(msg); } // reset the added classes to the actual added classes } } m_addedClasses = addedClasses.toArray(new String[0]); }
java
public List<String> harvestCapturedDetail() { List<String> harvested = m_capturedDiagnosticDetail; m_capturedDiagnosticDetail = null; return harvested; }
java
String getKeyPrefix(StatementPartitioning partitioning, DeterminismMode detMode, String joinOrder) { // no caching for inferred yet if (partitioning.isInferred()) { return null; } String joinOrderPrefix = "#"; if (joinOrder != null) { joinOrderPrefix += joinOrder; } boolean partitioned = partitioning.wasSpecifiedAsSingle(); return joinOrderPrefix + String.valueOf(detMode.toChar()) + (partitioned ? "P#" : "R#"); }
java
Statement getCachedStatement(String keyPrefix, String sql) { String key = keyPrefix + sql; Statement candidate = m_previousCatalogStmts.get(key); if (candidate == null) { ++m_stmtCacheMisses; return null; } // check that no underlying tables have been modified since the proc had been compiled String[] tablesTouched = candidate.getTablesread().split(","); for (String tableName : tablesTouched) { if (isDirtyTable(tableName)) { ++m_stmtCacheMisses; return null; } } tablesTouched = candidate.getTablesupdated().split(","); for (String tableName : tablesTouched) { if (isDirtyTable(tableName)) { ++m_stmtCacheMisses; return null; } } ++m_stmtCacheHits; // easy debugging stmt //printStmtCacheStats(); return candidate; }
java
public HashRangeExpressionBuilder put(Integer value1, Integer value2) { m_builder.put(value1, value2); return this; }
java
public HashRangeExpression build(Integer hashColumnIndex) { Map<Integer, Integer> ranges = m_builder.build(); HashRangeExpression predicate = new HashRangeExpression(); predicate.setRanges(ranges); predicate.setHashColumnIndex(hashColumnIndex); return predicate; }
java
@Override public OrderableTransaction poll() { OrderableTransaction retval = null; updateQueueState(); if (m_state == QueueState.UNBLOCKED) { retval = super.peek(); super.poll(); // not BLOCKED_EMPTY assert(retval != null); } return retval; }
java
@Override public boolean add(OrderableTransaction txnState) { if (m_initiatorData.containsKey(txnState.initiatorHSId) == false) { return false; } boolean retval = super.add(txnState); // update the queue state if (retval) updateQueueState(); return retval; }
java
public long noteTransactionRecievedAndReturnLastSeen(long initiatorHSId, long txnId, long lastSafeTxnIdFromInitiator) { // System.out.printf("Site %d got heartbeat message from initiator %d with txnid/safeid: %d/%d\n", // m_siteId, initiatorSiteId, txnId, lastSafeTxnIdFromInitiator); // this doesn't exclude dummy txnid but is also a sanity check assert(txnId != 0); // Drop old data from already-failed initiators. if (m_initiatorData.containsKey(initiatorHSId) == false) { //hostLog.info("Dropping txn " + txnId + " data from failed initiatorSiteId: " + initiatorSiteId); return DtxnConstants.DUMMY_LAST_SEEN_TXN_ID; } // update the latest transaction for the specified initiator LastInitiatorData lid = m_initiatorData.get(initiatorHSId); if (lid.m_lastSeenTxnId < txnId) lid.m_lastSeenTxnId = txnId; if (lid.m_lastSafeTxnId < lastSafeTxnIdFromInitiator) lid.m_lastSafeTxnId = lastSafeTxnIdFromInitiator; /* * Why aren't we asserting that the txnId is > then the last seen/last safe * It seems like this should be guaranteed by TCP ordering and we want to * know if it isn't! */ // find the minimum value across all latest transactions long min = Long.MAX_VALUE; for (LastInitiatorData l : m_initiatorData.values()) if (l.m_lastSeenTxnId < min) min = l.m_lastSeenTxnId; // This transaction is the guaranteed minimum // but is not yet necessarily 2PC'd to every site. m_newestCandidateTransaction = min; // this will update the state of the queue if needed updateQueueState(); // return the last seen id for the originating initiator return lid.m_lastSeenTxnId; }
java
public void gotFaultForInitiator(long initiatorId) { // calculate the next minimum transaction w/o our dead friend noteTransactionRecievedAndReturnLastSeen(initiatorId, Long.MAX_VALUE, DtxnConstants.DUMMY_LAST_SEEN_TXN_ID); // remove initiator from minimum. txnid scoreboard LastInitiatorData remove = m_initiatorData.remove(initiatorId); assert(remove != null); }
java
public int ensureInitiatorIsKnown(long initiatorId) { int newInitiatorCount = 0; if (m_initiatorData.get(initiatorId) == null) { m_initiatorData.put(initiatorId, new LastInitiatorData()); newInitiatorCount++; } return newInitiatorCount; }
java
public Long getNewestSafeTransactionForInitiator(Long initiatorId) { LastInitiatorData lid = m_initiatorData.get(initiatorId); if (lid == null) { return null; } return lid.m_lastSafeTxnId; }
java
public Long safeToRecover() { boolean safe = true; for (LastInitiatorData data : m_initiatorData.values()) { final long lastSeenTxnId = data.m_lastSeenTxnId; if (lastSeenTxnId == DtxnConstants.DUMMY_LAST_SEEN_TXN_ID) { safe = false; } } if (!safe) { return null; } OrderableTransaction next = peek(); if (next == null) { // no work - have heard from all initiators. use a heartbeat if (m_state == QueueState.BLOCKED_EMPTY) { return m_newestCandidateTransaction; } // waiting for some txn to be 2pc to this site. else if (m_state == QueueState.BLOCKED_SAFETY) { return null; } else if (m_state == QueueState.BLOCKED_ORDERING){ return null; } m_recoveryLog.error("Unexpected RPQ state " + m_state + " when attempting to start recovery at " + " the source site. Consider killing the recovering node and trying again"); return null; // unreachable } else { // bingo - have a real transaction to return as the recovery point return next.txnId; } }
java
public void unauthenticate(HttpServletRequest request) { if (HTTP_DONT_USE_SESSION) return; HttpSession session = request.getSession(false); if (session != null) { session.removeAttribute(AUTH_USER_SESSION_KEY); session.invalidate(); } }
java
public AuthenticationResult authenticate(HttpServletRequest request) { HttpSession session = null; AuthenticationResult authResult = null; if (!HTTP_DONT_USE_SESSION && !m_dontUseSession) { try { session = request.getSession(); if (session != null) { if (session.isNew()) { session.setMaxInactiveInterval(MAX_SESSION_INACTIVITY_SECONDS); } authResult = (AuthenticationResult )session.getAttribute(AUTH_USER_SESSION_KEY); } } catch (Exception ex) { //Use no session mode meaning whatever VMC sends as hashed password is used to authenticate. session = null; m_rate_limited_log.log(EstTime.currentTimeMillis(), Level.ERROR, ex, "Failed to get or create HTTP Session. authenticating user explicitely."); } } if (authResult == null) { authResult = getAuthenticationResult(request); if (!authResult.isAuthenticated()) { if (session != null) { session.removeAttribute(AUTH_USER_SESSION_KEY); } m_rate_limited_log.log("JSON interface exception: " + authResult.m_message, EstTime.currentTimeMillis()); } else { if (session != null) { //Cache the authResult in session so we dont authenticate again. session.setAttribute(AUTH_USER_SESSION_KEY, authResult); } } } return authResult; }
java
public static FunctionSQL newSQLFunction(String token, CompileContext context) { int id = regularFuncMap.get(token, -1); if (id == -1) { id = valueFuncMap.get(token, -1); } if (id == -1) { return null; } FunctionSQL function = new FunctionSQL(id); if (id == FUNC_VALUE) { if (context.currentDomain == null) { return null; } function.dataType = context.currentDomain; } return function; }
java
public ProcessTxnResult processTxn(TxnHeader hdr, Record txn) { return dataTree.processTxn(hdr, txn); }
java
public Stat statNode(String path, ServerCnxn serverCnxn) throws KeeperException.NoNodeException { return dataTree.statNode(path, serverCnxn); }
java
public byte[] getData(String path, Stat stat, Watcher watcher) throws KeeperException.NoNodeException { return dataTree.getData(path, stat, watcher); }
java
public void setWatches(long relativeZxid, List<String> dataWatches, List<String> existWatches, List<String> childWatches, Watcher watcher) { dataTree.setWatches(relativeZxid, dataWatches, existWatches, childWatches, watcher); }
java
public List<ACL> getACL(String path, Stat stat) throws NoNodeException { return dataTree.getACL(path, stat); }
java
public List<String> getChildren(String path, Stat stat, Watcher watcher) throws KeeperException.NoNodeException { return dataTree.getChildren(path, stat, watcher); }
java
public SiteTasker take() throws InterruptedException { SiteTasker task = m_tasks.poll(); if (task == null) { m_starvationTracker.beginStarvation(); } else { m_queueDepthTracker.pollUpdate(task.getQueueOfferTime()); return task; } try { task = CoreUtils.queueSpinTake(m_tasks); // task is never null m_queueDepthTracker.pollUpdate(task.getQueueOfferTime()); return task; } finally { m_starvationTracker.endStarvation(); } }
java
public SiteTasker poll() { SiteTasker task = m_tasks.poll(); if (task != null) { m_queueDepthTracker.pollUpdate(task.getQueueOfferTime()); } return task; }
java
public static BufferedWriter newWriter(File file, Charset charset) throws FileNotFoundException { checkNotNull(file); checkNotNull(charset); return new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), charset)); }
java
private static void write(CharSequence from, File to, Charset charset, boolean append) throws IOException { asCharSink(to, charset, modes(append)).write(from); }
java
public static void copy(File from, Charset charset, Appendable to) throws IOException { asCharSource(from, charset).copyTo(to); }
java
public static boolean equal(File file1, File file2) throws IOException { checkNotNull(file1); checkNotNull(file2); if (file1 == file2 || file1.equals(file2)) { return true; } /* * Some operating systems may return zero as the length for files denoting system-dependent * entities such as devices or pipes, in which case we must fall back on comparing the bytes * directly. */ long len1 = file1.length(); long len2 = file2.length(); if (len1 != 0 && len2 != 0 && len1 != len2) { return false; } return asByteSource(file1).contentEquals(asByteSource(file2)); }
java