code
stringlengths
73
34.1k
label
stringclasses
1 value
public void upload(Map<String, ParameterBindingDTO> bindValues) throws BindException { if (!closed) { serializeBinds(bindValues); putBinds(); } }
java
private void serializeBinds(Map<String, ParameterBindingDTO> bindValues) throws BindException { List<ColumnTypeDataPair> columns = getColumnValues(bindValues); List<String[]> rows = buildRows(columns); writeRowsToCSV(rows); }
java
private List<ColumnTypeDataPair> getColumnValues(Map<String, ParameterBindingDTO> bindValues) throws BindException { List<ColumnTypeDataPair> columns = new ArrayList<>(bindValues.size()); for (int i = 1; i <= bindValues.size(); i++) { // bindValues should have n entries with string keys 1 ... n and list values String key = Integer.toString(i); if (!bindValues.containsKey(key)) { throw new BindException( String.format("Bind map with %d columns should contain key \"%d\"", bindValues.size(), i), BindException.Type.SERIALIZATION); } ParameterBindingDTO value = bindValues.get(key); try { String type = value.getType(); List<String> list = (List<String>) value.getValue(); List<String> convertedList = new ArrayList<>(list.size()); if ("TIMESTAMP_LTZ".equals(type) || "TIMESTAMP_NTZ".equals(type)) { for (String e : list) { convertedList.add(synchronizedTimestampFormat(e)); } } else if ("DATE".equals(type)) { for (String e : list) { convertedList.add(synchronizedDateFormat(e)); } } else { convertedList = list; } columns.add(i - 1, new ColumnTypeDataPair(type, convertedList)); } catch (ClassCastException ex) { throw new BindException("Value in binding DTO could not be cast to a list", BindException.Type.SERIALIZATION); } } return columns; }
java
private List<String[]> buildRows(List<ColumnTypeDataPair> columns) throws BindException { List<String[]> rows = new ArrayList<>(); int numColumns = columns.size(); // columns should have binds if (columns.get(0).data.isEmpty()) { throw new BindException("No binds found in first column", BindException.Type.SERIALIZATION); } int numRows = columns.get(0).data.size(); // every column should have the same number of binds for (int i = 0; i < numColumns; i++) { int iNumRows = columns.get(i).data.size(); if (columns.get(i).data.size() != numRows) { throw new BindException( String.format("Column %d has a different number of binds (%d) than column 1 (%d)", i, iNumRows, numRows), BindException.Type.SERIALIZATION); } } for (int rowIdx = 0; rowIdx < numRows; rowIdx++) { String[] row = new String[numColumns]; for (int colIdx = 0; colIdx < numColumns; colIdx++) { row[colIdx] = columns.get(colIdx).data.get(rowIdx); } rows.add(row); } return rows; }
java
private void writeRowsToCSV(List<String[]> rows) throws BindException { int numBytes; int rowNum = 0; int fileCount = 0; while (rowNum < rows.size()) { File file = getFile(++fileCount); try (OutputStream out = openFile(file)) { // until we reach the last row or the file is too big, write to the file numBytes = 0; while (numBytes < fileSize && rowNum < rows.size()) { byte[] csv = createCSVRecord(rows.get(rowNum)); numBytes += csv.length; out.write(csv); rowNum++; } } catch (IOException ex) { throw new BindException( String.format("Exception encountered while writing to file: %s", ex.getMessage()), BindException.Type.SERIALIZATION); } } }
java
private OutputStream openFile(File file) throws BindException { try { return new GZIPOutputStream(new FileOutputStream(file)); } catch (IOException ex) { throw new BindException( String.format("Failed to create output file %s: %s", file.toString(), ex.getMessage()), BindException.Type.SERIALIZATION); } }
java
private byte[] createCSVRecord(String[] data) { StringBuilder sb = new StringBuilder(1024); for (int i = 0; i < data.length; ++i) { if (i > 0) { sb.append(','); } sb.append(SnowflakeType.escapeForCSV(data[i])); } sb.append('\n'); return sb.toString().getBytes(UTF_8); }
java
private String getPutStmt(String bindDir, String stagePath) { return String.format(PUT_STMT, bindDir, File.separator, stagePath) .replaceAll("\\\\", "\\\\\\\\"); }
java
private void putBinds() throws BindException { createStageIfNeeded(); String putStatement = getPutStmt(bindDir.toString(), stagePath); for (int i = 0; i < PUT_RETRY_COUNT; i++) { try { SFStatement statement = new SFStatement(session); SFBaseResultSet putResult = statement.execute(putStatement, null, null); putResult.next(); // metadata is 0-based, result set is 1-based int column = putResult.getMetaData().getColumnIndex( SnowflakeFileTransferAgent.UploadColumns.status.name()) + 1; String status = putResult.getString(column); if (SnowflakeFileTransferAgent.ResultStatus.UPLOADED.name().equals(status)) { return; // success! } logger.debug("PUT statement failed. The response had status %s.", status); } catch (SFException | SQLException ex) { logger.debug("Exception encountered during PUT operation. ", ex); } } // if we haven't returned (on success), throw exception throw new BindException("Failed to PUT files to stage.", BindException.Type.UPLOAD); }
java
private void createStageIfNeeded() throws BindException { if (session.getArrayBindStage() != null) { return; } synchronized (session) { // another thread may have created the session by the time we enter this block if (session.getArrayBindStage() == null) { try { SFStatement statement = new SFStatement(session); statement.execute(CREATE_STAGE_STMT, null, null); session.setArrayBindStage(STAGE_NAME); } catch (SFException | SQLException ex) { // to avoid repeated failures to create stage, disable array bind stage // optimization if we fail to create stage for some reason session.setArrayBindStageThreshold(0); throw new BindException( String.format("Failed to create temporary stage for array binds. %s", ex.getMessage()), BindException.Type.UPLOAD); } } } }
java
public static int arrayBindValueCount(Map<String, ParameterBindingDTO> bindValues) { if (!isArrayBind(bindValues)) { return 0; } else { ParameterBindingDTO bindSample = bindValues.values().iterator().next(); List<String> bindSampleValues = (List<String>) bindSample.getValue(); return bindValues.size() * bindSampleValues.size(); } }
java
public static boolean isArrayBind(Map<String, ParameterBindingDTO> bindValues) { if (bindValues == null || bindValues.size() == 0) { return false; } ParameterBindingDTO bindSample = bindValues.values().iterator().next(); return bindSample.getValue() instanceof List; }
java
public static StorageObjectSummary createFromS3ObjectSummary(S3ObjectSummary objSummary) { return new StorageObjectSummary( objSummary.getBucketName(), objSummary.getKey(), // S3 ETag is not always MD5, but since this code path is only // used in skip duplicate files in PUT command, It's not // critical to guarantee that it's MD5 objSummary.getETag(), objSummary.getSize() ); }
java
public static StorageObjectSummary createFromAzureListBlobItem(ListBlobItem listBlobItem) throws StorageProviderException { String location, key, md5; long size; // Retrieve the BLOB properties that we need for the Summary // Azure Storage stores metadata inside each BLOB, therefore the listBlobItem // will point us to the underlying BLOB and will get the properties from it // During the process the Storage Client could fail, hence we need to wrap the // get calls in try/catch and handle possible exceptions try { location = listBlobItem.getContainer().getName(); CloudBlob cloudBlob = (CloudBlob) listBlobItem; key = cloudBlob.getName(); BlobProperties blobProperties = cloudBlob.getProperties(); // the content md5 property is not always the actual md5 of the file. But for here, it's only // used for skipping file on PUT command, hense is ok. md5 = convertBase64ToHex(blobProperties.getContentMD5()); size = blobProperties.getLength(); } catch (URISyntaxException | StorageException ex) { // This should only happen if somehow we got here with and invalid URI (it should never happen) // ...or there is a Storage service error. Unlike S3, Azure fetches metadata from the BLOB itself, // and its a lazy operation throw new StorageProviderException(ex); } return new StorageObjectSummary(location, key, md5, size); }
java
private boolean isSnowflakeAuthenticator() { String authenticator = (String) connectionPropertiesMap.get( SFSessionProperty.AUTHENTICATOR); PrivateKey privateKey = (PrivateKey) connectionPropertiesMap.get( SFSessionProperty.PRIVATE_KEY); return (authenticator == null && privateKey == null) || ClientAuthnDTO.AuthenticatorType.SNOWFLAKE.name() .equalsIgnoreCase(authenticator); }
java
boolean isExternalbrowserAuthenticator() { String authenticator = (String) connectionPropertiesMap.get( SFSessionProperty.AUTHENTICATOR); return ClientAuthnDTO.AuthenticatorType.EXTERNALBROWSER.name() .equalsIgnoreCase(authenticator); }
java
synchronized void renewSession(String prevSessionToken) throws SFException, SnowflakeSQLException { if (sessionToken != null && !sessionToken.equals(prevSessionToken)) { logger.debug("not renew session because session token has not been updated."); return; } SessionUtil.LoginInput loginInput = new SessionUtil.LoginInput(); loginInput.setServerUrl( (String) connectionPropertiesMap.get(SFSessionProperty.SERVER_URL)) .setSessionToken(sessionToken) .setMasterToken(masterToken) .setIdToken(idToken) .setLoginTimeout(loginTimeout) .setDatabaseName(this.getDatabase()) .setSchemaName(this.getSchema()) .setRole(this.getRole()) .setWarehouse(this.getWarehouse()); SessionUtil.LoginOutput loginOutput = SessionUtil.renewSession(loginInput); if (loginOutput.isUpdatedByTokenRequestIssue()) { setCurrentObjects(loginInput, loginOutput); } sessionToken = loginOutput.getSessionToken(); masterToken = loginOutput.getMasterToken(); }
java
protected void startHeartbeatForThisSession() { if (enableHeartbeat && !Strings.isNullOrEmpty(masterToken)) { logger.debug("start heartbeat, master token validity: " + masterTokenValidityInSeconds); HeartbeatBackground.getInstance().addSession(this, masterTokenValidityInSeconds, this.heartbeatFrequency); } else { logger.debug("heartbeat not enabled for the session"); } }
java
protected void stopHeartbeatForThisSession() { if (enableHeartbeat && !Strings.isNullOrEmpty(masterToken)) { logger.debug("stop heartbeat"); HeartbeatBackground.getInstance().removeSession(this); } else { logger.debug("heartbeat not enabled for the session"); } }
java
protected void heartbeat() throws SFException, SQLException { logger.debug(" public void heartbeat()"); if (isClosed) { return; } HttpPost postRequest = null; String requestId = UUID.randomUUID().toString(); boolean retry = false; // the loop for retrying if it runs into session expiration do { try { URIBuilder uriBuilder; uriBuilder = new URIBuilder( (String) connectionPropertiesMap.get(SFSessionProperty.SERVER_URL)); uriBuilder.addParameter(SFSession.SF_QUERY_REQUEST_ID, requestId); uriBuilder.setPath(SF_PATH_SESSION_HEARTBEAT); postRequest = new HttpPost(uriBuilder.build()); // remember the session token in case it expires we need to renew // the session only when no other thread has renewed it String prevSessionToken = sessionToken; postRequest.setHeader(SF_HEADER_AUTHORIZATION, SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SF_HEADER_TOKEN_TAG + "=\"" + prevSessionToken + "\""); logger.debug("Executing heartbeat request: {}", postRequest.toString()); // the following will retry transient network issues String theResponse = HttpUtil.executeRequest(postRequest, SF_HEARTBEAT_TIMEOUT, 0, null); JsonNode rootNode; logger.debug("connection heartbeat response: {}", theResponse); rootNode = mapper.readTree(theResponse); // check the response to see if it is session expiration response if (rootNode != null && (Constants.SESSION_EXPIRED_GS_CODE == rootNode.path("code").asInt())) { logger.debug("renew session and retry"); this.renewSession(prevSessionToken); retry = true; continue; } SnowflakeUtil.checkErrorAndThrowException(rootNode); // success retry = false; } catch (Throwable ex) { // for snowflake exception, just rethrow it if (ex instanceof SnowflakeSQLException) { throw (SnowflakeSQLException) ex; } logger.error("unexpected exception", ex); throw (SFException) IncidentUtil.generateIncidentV2WithException( this, new SFException(ErrorCode.INTERNAL_ERROR, IncidentUtil.oneLiner("unexpected exception", ex)), null, requestId); } } while (retry); }
java
void setCurrentObjects( SessionUtil.LoginInput loginInput, SessionUtil.LoginOutput loginOutput) { this.sessionToken = loginOutput.sessionToken; // used to run the commands. runInternalCommand( "USE ROLE IDENTIFIER(?)", loginInput.getRole()); runInternalCommand( "USE WAREHOUSE IDENTIFIER(?)", loginInput.getWarehouse()); runInternalCommand( "USE DATABASE IDENTIFIER(?)", loginInput.getDatabaseName()); runInternalCommand( "USE SCHEMA IDENTIFIER(?)", loginInput.getSchemaName()); // This ensures the session returns the current objects and refresh // the local cache. SFBaseResultSet result = runInternalCommand("SELECT ?", "1"); // refresh the current objects loginOutput.setSessionDatabase(this.database); loginOutput.setSessionSchema(this.schema); loginOutput.setSessionWarehouse(this.warehouse); loginOutput.setSessionRole(this.role); loginOutput.setIdToken(loginInput.getIdToken()); // no common parameter is updated. if (result != null) { loginOutput.setCommonParams(result.parameters); } }
java
private void executeImmediate(String stmtText) throws SQLException { // execute the statement and auto-close it as well try (final Statement statement = this.createStatement()) { statement.execute(stmtText); } }
java
@Override public Statement createStatement() throws SQLException { raiseSQLExceptionIfConnectionIsClosed(); Statement stmt = createStatement( ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); openStatements.add(stmt); return stmt; }
java
@Override public void setTransactionIsolation(int level) throws SQLException { logger.debug( "void setTransactionIsolation(int level), level = {}", level); raiseSQLExceptionIfConnectionIsClosed(); if (level == Connection.TRANSACTION_NONE || level == Connection.TRANSACTION_READ_COMMITTED) { this.transactionIsolation = level; } else { throw new SQLFeatureNotSupportedException( "Transaction Isolation " + level + " not supported.", FEATURE_UNSUPPORTED.getSqlState(), FEATURE_UNSUPPORTED.getMessageCode()); } }
java
public InputStream downloadStream(String stageName, String sourceFileName, boolean decompress) throws SQLException { logger.debug("download data to stream: stageName={}" + ", sourceFileName={}", stageName, sourceFileName); if (Strings.isNullOrEmpty(stageName)) { throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "stage name is null or empty"); } if (Strings.isNullOrEmpty(sourceFileName)) { throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "source file name is null or empty"); } SnowflakeStatementV1 stmt = new SnowflakeStatementV1( this, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.CLOSE_CURSORS_AT_COMMIT); StringBuilder getCommand = new StringBuilder(); getCommand.append("get "); if (!stageName.startsWith("@")) { getCommand.append("@"); } getCommand.append(stageName); getCommand.append("/"); if (sourceFileName.startsWith("/")) { sourceFileName = sourceFileName.substring(1); } getCommand.append(sourceFileName); //this is a fake path, used to form Get query and retrieve stage info, //no file will be downloaded to this location getCommand.append(" file:///tmp/ /*jdbc download stream*/"); SnowflakeFileTransferAgent transferAgent = new SnowflakeFileTransferAgent(getCommand.toString(), sfSession, stmt.getSfStatement()); InputStream stream = transferAgent.downloadStream(sourceFileName); if (decompress) { try { return new GZIPInputStream(stream); } catch (IOException ex) { throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), ex.getMessage()); } } else { return stream; } }
java
public static InputStream decryptStream(InputStream inputStream, String keyBase64, String ivBase64, RemoteStoreFileEncryptionMaterial encMat) throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidKeyException, BadPaddingException, IllegalBlockSizeException, InvalidAlgorithmParameterException { byte[] decodedKey = Base64.decode(encMat.getQueryStageMasterKey()); byte[] keyBytes = Base64.decode(keyBase64); byte[] ivBytes = Base64.decode(ivBase64); SecretKey queryStageMasterKey = new SecretKeySpec(decodedKey, 0, decodedKey.length, AES); Cipher keyCipher = Cipher.getInstance(KEY_CIPHER); keyCipher.init(Cipher.DECRYPT_MODE, queryStageMasterKey); byte[] fileKeyBytes = keyCipher.doFinal(keyBytes); SecretKey fileKey = new SecretKeySpec(fileKeyBytes, 0, decodedKey.length, AES); Cipher dataCipher = Cipher.getInstance(FILE_CIPHER); IvParameterSpec ivy = new IvParameterSpec(ivBytes); dataCipher.init(Cipher.DECRYPT_MODE, fileKey, ivy); return new CipherInputStream(inputStream, dataCipher); }
java
synchronized void startFlusher() { // Create a new scheduled executor service with a threadfactory that // creates daemonized threads; this way if the user doesn't exit nicely // the JVM Runtime won't hang flusher = Executors.newScheduledThreadPool(1, new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); t.setDaemon(true); return t; } }); flusher.scheduleWithFixedDelay(new QueueFlusher(), 0, flushPeriodMs, TimeUnit.MILLISECONDS); }
java
public void dumpLogBuffer(String identifier) { final ArrayList<LogRecord> logBufferCopy; final PrintWriter logDumper; final OutputStream outStream; Formatter formatter = this.getFormatter(); // Check if compression of dump file is enabled boolean disableCompression = System.getProperty(DISABLE_DUMP_COMPR_PROP) != null; // If no identifying factor (eg, an incident id) was provided, get one if (identifier == null) { identifier = EventUtil.getDumpFileId(); } // Do some sanity checking to make sure we're not flooding the user's // disk with dump files cleanupSfDumps(true); String logDumpPath = logDumpPathPrefix + "/" + LOG_DUMP_FILE_NAME + identifier + LOG_DUMP_FILE_EXT; if (!disableCompression) { logDumpPath += LOG_DUMP_COMP_EXT; } logger.debug("EventHandler dumping log buffer to {}", logDumpPath); // Copy logBuffer because this is potentially long running. synchronized (this) { logBufferCopy = new ArrayList<>(logBuffer); logBuffer.clear(); } File outputFile = new File(logDumpPath); /** * Because log files could potentially be very large, we should never open * them in append mode. It's rare that this should happen anyways... */ try { // If the dump path doesn't already exist, create it. if (outputFile.getParentFile() != null) { outputFile.getParentFile().mkdirs(); } outStream = disableCompression ? new FileOutputStream(logDumpPath, false) : new GZIPOutputStream(new FileOutputStream(logDumpPath, false)); logDumper = new PrintWriter(outStream, true); } catch (IOException exc) { // Not much to do here, can't dump logs so exit out. logger.debug("Log dump failed, exception: {}", exc.getMessage()); return; } // Iterate over log entries, format them, then dump them. for (LogRecord entry : logBufferCopy) { logDumper.write(formatter != null ? formatter.format(entry) : entry.getMessage()); } // Clean up logDumper.flush(); logDumper.close(); }
java
protected void cleanupSfDumps(boolean deleteOldest) { // Check what the maximum number of dumpfiles and the max allowable // aggregate dump file size is. int maxDumpFiles = System.getProperty(MAX_NUM_DUMP_FILES_PROP) != null ? Integer.valueOf(System.getProperty(MAX_NUM_DUMP_FILES_PROP)) : DEFAULT_MAX_DUMP_FILES; int maxDumpDirSizeMB = System.getProperty(MAX_SIZE_DUMPS_MB_PROP) != null ? Integer.valueOf(System.getProperty(MAX_SIZE_DUMPS_MB_PROP)) : DEFAULT_MAX_DUMPDIR_SIZE_MB; File dumpDir = new File(logDumpPathPrefix); long dirSizeBytes = 0; if (dumpDir.listFiles() == null) { return; } // Keep a sorted list of files by size as we go in case we need to // delete some TreeSet<File> fileList = new TreeSet<>(new Comparator<File>() { @Override public int compare(File a, File b) { return a.length() < b.length() ? -1 : 1; } }); // Loop over files in this directory and get rid of old ones // while accumulating the total size for (File file : dumpDir.listFiles()) { if ((!file.getName().startsWith(LOG_DUMP_FILE_NAME) && !file.getName().startsWith(IncidentUtil.INC_DUMP_FILE_NAME)) || (System.currentTimeMillis() - file.lastModified() > FILE_EXPN_TIME_MS && file.delete())) { continue; } dirSizeBytes += file.length(); fileList.add(file); } // If we're exceeding our max allotted disk usage, cut some stuff out; // else if we need to make space for a new dump delete the oldest. if (dirSizeBytes >= ((long) maxDumpDirSizeMB << 20)) { // While we take up more than half the allotted disk usage, keep deleting. for (File file : fileList) { if (dirSizeBytes < ((long) maxDumpDirSizeMB << 19)) { break; } long victimSize = file.length(); if (file.delete()) { dirSizeBytes -= victimSize; } } } else if (deleteOldest && fileList.size() >= maxDumpFiles) { fileList.first().delete(); } }
java
private synchronized boolean needsToThrottle(String signature) { AtomicInteger sigCount; // Are we already throttling this signature? if (throttledIncidents.containsKey(signature)) { // Lazily check if it's time to unthrottle if (throttledIncidents.get(signature).plusHours(THROTTLE_DURATION_HRS). compareTo(DateTime.now()) <= 0) { // Start counting the # of times we've seen this again & stop throttling. throttledIncidents.remove(signature); incidentCounter.put(signature, new AtomicInteger(1)); return false; } return true; } sigCount = incidentCounter.get(signature); if (sigCount == null) { // If there isn't an entry to track this signature, make one. incidentCounter.put(signature, sigCount = new AtomicInteger(0)); } else if (sigCount.get() + 1 >= INCIDENT_THROTTLE_LIMIT_PER_HR) { // We've hit the limit so throttle. incidentCounter.remove(signature); throttledIncidents.put(signature, DateTime.now()); return true; } sigCount.incrementAndGet(); return false; }
java
@Override public void start() { LOGGER.debug("Start Loading"); // validate parameters validateParameters(); if (_op == null) { this.abort(new ConnectionError("Loader started with no operation")); return; } initDateFormats(); initQueues(); if (_is_first_start_call) { // is this the first start call? try { if (_startTransaction) { LOGGER.debug("Begin Transaction"); _processConn.createStatement().execute("begin transaction"); } else { LOGGER.debug("No Transaction started"); } } catch (SQLException ex) { abort(new Loader.ConnectionError("Failed to start Transaction", Utils.getCause(ex))); } if (_truncate) { truncateTargetTable(); } try { if (_before != null) { LOGGER.debug("Running Execute Before SQL"); _processConn.createStatement().execute(_before); } } catch (SQLException ex) { abort(new Loader.ConnectionError( String.format("Execute Before SQL failed to run: %s", _before), Utils.getCause(ex))); } } }
java
private void flushQueues() { // Terminate data loading thread. LOGGER.debug("Flush Queues"); try { _queueData.put(new byte[0]); _thread.join(10000); if (_thread.isAlive()) { _thread.interrupt(); } } catch (Exception ex) { String msg = "Failed to join StreamLoader queue: " + ex.getMessage(); LOGGER.error(msg, ex); throw new DataError(msg, Utils.getCause(ex)); } // Put last stage on queue terminate(); // wait for the processing to finish _put.join(); _process.join(); if (_aborted.get()) { // Loader was aborted due to an exception. // It was rolled back at that time. //LOGGER.log(Level.WARNING, // "Loader had been previously aborted by error", _abortCause); throw _abortCause; } }
java
@Override public void resetOperation(Operation op) { LOGGER.debug("Reset Loader"); if (op.equals(_op)) { //no-op return; } LOGGER.debug("Operation is changing from {} to {}", _op, op); _op = op; if (_stage != null) { try { queuePut(_stage); } catch (InterruptedException ex) { LOGGER.error(_stage.getId(), ex); } } _stage = new BufferStage(this, _op, _csvFileBucketSize, _csvFileSize); }
java
void overrideCacheFile(File newCacheFile) { this.cacheFile = newCacheFile; this.cacheDir = newCacheFile.getParentFile(); this.baseCacheFileName = newCacheFile.getName(); }
java
JsonNode readCacheFile() { if (cacheFile == null || !this.checkCacheLockFile()) { // no cache or the cache is not valid. return null; } try { if (!cacheFile.exists()) { LOGGER.debug( "Cache file doesn't exists. File: {}", cacheFile); return null; } try (Reader reader = new InputStreamReader( new FileInputStream(cacheFile), DEFAULT_FILE_ENCODING)) { return OBJECT_MAPPER.readTree(reader); } } catch (IOException ex) { LOGGER.debug( "Failed to read the cache file. No worry. File: {}, Err: {}", cacheFile, ex); } return null; }
java
private boolean tryLockCacheFile() { int cnt = 0; boolean locked = false; while (cnt < 100 && !(locked = lockCacheFile())) { try { Thread.sleep(100); } catch (InterruptedException ex) { // doesn't matter } ++cnt; } if (!locked) { LOGGER.debug("Failed to lock the cache file."); } return locked; }
java
private void verifyLocalFilePath(String localFilePathFromGS) throws SnowflakeSQLException { if (command == null) { logger.error("null command"); return; } if (command.indexOf(FILE_PROTOCOL) < 0) { logger.error( "file:// prefix not found in command: {}", command); return; } int localFilePathBeginIdx = command.indexOf(FILE_PROTOCOL) + FILE_PROTOCOL.length(); boolean isLocalFilePathQuoted = (localFilePathBeginIdx > FILE_PROTOCOL.length()) && (command.charAt(localFilePathBeginIdx - 1 - FILE_PROTOCOL.length()) == '\''); // the ending index is exclusive int localFilePathEndIdx = 0; String localFilePath = ""; if (isLocalFilePathQuoted) { // look for the matching quote localFilePathEndIdx = command.indexOf("'", localFilePathBeginIdx); if (localFilePathEndIdx > localFilePathBeginIdx) { localFilePath = command.substring(localFilePathBeginIdx, localFilePathEndIdx); } // unescape backslashes to match the file name from GS localFilePath = localFilePath.replaceAll("\\\\\\\\", "\\\\"); } else { // look for the first space or new line or semi colon List<Integer> indexList = new ArrayList<>(); char[] delimiterChars = {' ', '\n', ';'}; for (int i = 0; i < delimiterChars.length; i++) { int charIndex = command.indexOf(delimiterChars[i], localFilePathBeginIdx); if (charIndex != -1) { indexList.add(charIndex); } } localFilePathEndIdx = indexList.isEmpty() ? -1 : Collections.min(indexList); if (localFilePathEndIdx > localFilePathBeginIdx) { localFilePath = command.substring(localFilePathBeginIdx, localFilePathEndIdx); } else if (localFilePathEndIdx == -1) { localFilePath = command.substring(localFilePathBeginIdx); } } if (!localFilePath.isEmpty() && !localFilePath.equals(localFilePathFromGS)) { throw new SnowflakeSQLException(SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Unexpected local file path from GS. From GS: " + localFilePathFromGS + ", expected: " + localFilePath); } else if (localFilePath.isEmpty()) { logger.debug( "fail to parse local file path from command: {}", command); } else { logger.trace( "local file path from GS matches local parsing: {}", localFilePath); } }
java
private void uploadStream() throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil.createDefaultExecutorService( "sf-stream-upload-worker-", 1); RemoteStoreFileEncryptionMaterial encMat = encryptionMaterial.get(0); if (commandType == CommandType.UPLOAD) { threadExecutor.submit(getUploadFileCallable( stageInfo, SRC_FILE_NAME_FOR_STREAM, fileMetadataMap.get(SRC_FILE_NAME_FOR_STREAM), (stageInfo.getStageType() == StageInfo.StageType.LOCAL_FS) ? null : storageFactory.createClient(stageInfo, parallel, encMat), connection, command, sourceStream, true, parallel, null, encMat)); } else if (commandType == CommandType.DOWNLOAD) { throw new SnowflakeSQLException(SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode()); } threadExecutor.shutdown(); try { // wait for all threads to complete without timeout threadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); } catch (InterruptedException ex) { throw new SnowflakeSQLException(SqlState.QUERY_CANCELED, ErrorCode.INTERRUPTED.getMessageCode()); } logger.debug("Done with uploading from a stream"); } finally { if (threadExecutor != null) { threadExecutor.shutdownNow(); threadExecutor = null; } } }
java
InputStream downloadStream(String fileName) throws SnowflakeSQLException { if (stageInfo.getStageType() == StageInfo.StageType.LOCAL_FS) { logger.error("downloadStream function doesn't support local file system"); throw new SnowflakeSQLException(SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "downloadStream function only supported in remote stages"); } remoteLocation remoteLocation = extractLocationAndPath(stageInfo.getLocation()); String stageFilePath = fileName; if (!remoteLocation.path.isEmpty()) { stageFilePath = SnowflakeUtil.concatFilePathNames(remoteLocation.path, fileName, "/"); } RemoteStoreFileEncryptionMaterial encMat = srcFileToEncMat.get(fileName); return storageFactory.createClient(stageInfo, parallel, encMat) .downloadToStream(connection, command, parallel, remoteLocation.location, stageFilePath, stageInfo.getRegion()); }
java
private void downloadFiles() throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil.createDefaultExecutorService( "sf-file-download-worker-", 1); for (String srcFile : sourceFiles) { FileMetadata fileMetadata = fileMetadataMap.get(srcFile); // Check if the result status is already set so that we don't need to // upload it if (fileMetadata.resultStatus != ResultStatus.UNKNOWN) { logger.debug("Skipping {}, status: {}, details: {}", srcFile, fileMetadata.resultStatus, fileMetadata.errorDetails); continue; } RemoteStoreFileEncryptionMaterial encMat = srcFileToEncMat.get(srcFile); threadExecutor.submit(getDownloadFileCallable( stageInfo, srcFile, localLocation, fileMetadataMap, (stageInfo.getStageType() == StageInfo.StageType.LOCAL_FS) ? null : storageFactory.createClient(stageInfo, parallel, encMat), connection, command, parallel, encMat)); logger.debug("submitted download job for: {}", srcFile); } threadExecutor.shutdown(); try { // wait for all threads to complete without timeout threadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); } catch (InterruptedException ex) { throw new SnowflakeSQLException(SqlState.QUERY_CANCELED, ErrorCode.INTERRUPTED.getMessageCode()); } logger.debug("Done with downloading"); } finally { if (threadExecutor != null) { threadExecutor.shutdownNow(); threadExecutor = null; } } }
java
private void uploadFiles(Set<String> fileList, int parallel) throws SnowflakeSQLException { try { threadExecutor = SnowflakeUtil.createDefaultExecutorService( "sf-file-upload-worker-", parallel); for (String srcFile : fileList) { FileMetadata fileMetadata = fileMetadataMap.get(srcFile); // Check if the result status is already set so that we don't need to // upload it if (fileMetadata.resultStatus != ResultStatus.UNKNOWN) { logger.debug("Skipping {}, status: {}, details: {}", srcFile, fileMetadata.resultStatus, fileMetadata.errorDetails); continue; } /** * For small files, we upload files in parallel, so we don't * want the remote store uploader to upload parts in parallel for each file. * For large files, we upload them in serial, and we want remote store uploader * to upload parts in parallel for each file. This is the reason * for the parallel value. */ File srcFileObj = new File(srcFile); threadExecutor.submit(getUploadFileCallable( stageInfo, srcFile, fileMetadata, (stageInfo.getStageType() == StageInfo.StageType.LOCAL_FS) ? null : storageFactory.createClient(stageInfo, parallel, encryptionMaterial.get(0)), connection, command, null, false, (parallel > 1 ? 1 : this.parallel), srcFileObj, encryptionMaterial.get(0))); logger.debug("submitted copy job for: {}", srcFile); } // shut down the thread executor threadExecutor.shutdown(); try { // wait for all threads to complete without timeout threadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); } catch (InterruptedException ex) { throw new SnowflakeSQLException(SqlState.QUERY_CANCELED, ErrorCode.INTERRUPTED.getMessageCode()); } logger.debug("Done with uploading"); } finally { // shut down the thread pool in any case if (threadExecutor != null) { threadExecutor.shutdownNow(); threadExecutor = null; } } }
java
static public Set<String> expandFileNames(String[] filePathList) throws SnowflakeSQLException { Set<String> result = new HashSet<String>(); // a location to file pattern map so that we only need to list the // same directory once when they appear in multiple times. Map<String, List<String>> locationToFilePatterns; locationToFilePatterns = new HashMap<String, List<String>>(); String cwd = System.getProperty("user.dir"); for (String path : filePathList) { // replace ~ with user home path = path.replace("~", System.getProperty("user.home")); // user may also specify files relative to current directory // add the current path if that is the case if (!(new File(path)).isAbsolute()) { logger.debug("Adding current working dir to relative file path."); path = cwd + localFSFileSep + path; } // check if the path contains any wildcards if (!path.contains("*") && !path.contains("?") && !(path.contains("[") && path.contains("]"))) { /* this file path doesn't have any wildcard, so we don't need to * expand it */ result.add(path); } else { // get the directory path int lastFileSepIndex = path.lastIndexOf(localFSFileSep); // SNOW-15203: if we don't find a default file sep, try "/" if it is not // the default file sep. if (lastFileSepIndex < 0 && !"/".equals(localFSFileSep)) { lastFileSepIndex = path.lastIndexOf("/"); } String loc = path.substring(0, lastFileSepIndex + 1); String filePattern = path.substring(lastFileSepIndex + 1); List<String> filePatterns = locationToFilePatterns.get(loc); if (filePatterns == null) { filePatterns = new ArrayList<String>(); locationToFilePatterns.put(loc, filePatterns); } filePatterns.add(filePattern); } } // For each location, list files and match against the patterns for (Map.Entry<String, List<String>> entry : locationToFilePatterns.entrySet()) { try { java.io.File dir = new java.io.File(entry.getKey()); logger.debug("Listing files under: {} with patterns: {}", entry.getKey(), entry.getValue().toString()); // The following currently ignore sub directories for (Object file : FileUtils.listFiles(dir, new WildcardFileFilter(entry.getValue()), null)) { result.add(((java.io.File) file).getCanonicalPath()); } } catch (Exception ex) { throw new SnowflakeSQLException(ex, SqlState.DATA_EXCEPTION, ErrorCode.FAIL_LIST_FILES.getMessageCode(), "Exception: " + ex.getMessage() + ", Dir=" + entry.getKey() + ", Patterns=" + entry.getValue().toString()); } } logger.debug("Expanded file paths: "); for (String filePath : result) { logger.debug("file: {}", filePath); } return result; }
java
private FileCompressionType mimeTypeToCompressionType(String mimeTypeStr) throws MimeTypeParseException { MimeType mimeType = null; if (mimeTypeStr != null) { mimeType = new MimeType(mimeTypeStr); } if (mimeType != null && mimeType.getSubType() != null) { return FileCompressionType.lookupByMimeSubType( mimeType.getSubType().toLowerCase()); } return null; }
java
private String getMimeTypeFromFileExtension(String srcFile) { String srcFileLowCase = srcFile.toLowerCase(); for (FileCompressionType compressionType : FileCompressionType.values()) { if (srcFileLowCase.endsWith(compressionType.fileExtension)) { return compressionType.mimeType + "/" + compressionType.mimeSubTypes.get(0); } } return null; }
java
static public remoteLocation extractLocationAndPath(String stageLocationPath) { String location = stageLocationPath; String path = ""; // split stage location as location name and path if (stageLocationPath.contains("/")) { location = stageLocationPath.substring(0, stageLocationPath.indexOf("/")); path = stageLocationPath.substring(stageLocationPath.indexOf("/") + 1); } return new remoteLocation(location, path); }
java
@Override public List<SnowflakeColumnMetadata> describeColumns() throws Exception { return SnowflakeUtil.describeFixedViewColumns( commandType == CommandType.UPLOAD ? (showEncryptionParameter ? UploadCommandEncryptionFacade.class : UploadCommandFacade.class) : (showEncryptionParameter ? DownloadCommandEncryptionFacade.class : DownloadCommandFacade.class)); }
java
private void populateStatusRows() { for (Map.Entry<String, FileMetadata> entry : fileMetadataMap.entrySet()) { FileMetadata fileMetadata = entry.getValue(); if (commandType == CommandType.UPLOAD) { statusRows.add(showEncryptionParameter ? new UploadCommandEncryptionFacade( fileMetadata.srcFileName, fileMetadata.destFileName, fileMetadata.resultStatus.name(), fileMetadata.errorDetails, fileMetadata.srcFileSize, fileMetadata.destFileSize, (fileMetadata.srcCompressionType == null) ? "NONE" : fileMetadata.srcCompressionType.name(), (fileMetadata.destCompressionType == null) ? "NONE" : fileMetadata.destCompressionType.name(), fileMetadata.isEncrypted) : new UploadCommandFacade( fileMetadata.srcFileName, fileMetadata.destFileName, fileMetadata.resultStatus.name(), fileMetadata.errorDetails, fileMetadata.srcFileSize, fileMetadata.destFileSize, (fileMetadata.srcCompressionType == null) ? "NONE" : fileMetadata.srcCompressionType.name(), (fileMetadata.destCompressionType == null) ? "NONE" : fileMetadata.destCompressionType.name())); } else if (commandType == CommandType.DOWNLOAD) { statusRows.add(showEncryptionParameter ? new DownloadCommandEncryptionFacade( fileMetadata.srcFileName.startsWith("/") ? fileMetadata.srcFileName.substring(1) : fileMetadata.srcFileName, fileMetadata.resultStatus.name(), fileMetadata.errorDetails, fileMetadata.destFileSize, fileMetadata.isEncrypted) : new DownloadCommandFacade( fileMetadata.srcFileName.startsWith("/") ? fileMetadata.srcFileName.substring(1) : fileMetadata.srcFileName, fileMetadata.resultStatus.name(), fileMetadata.errorDetails, fileMetadata.destFileSize)); } } /* we sort the result if the connection is in sorting mode */ Object sortProperty = null; sortProperty = connection.getSFSessionProperty("sort"); boolean sortResult = sortProperty != null && (Boolean) sortProperty; if (sortResult) { Comparator comparator = (commandType == CommandType.UPLOAD) ? new Comparator<Object>() { public int compare(Object a, Object b) { String srcFileNameA = ((UploadCommandFacade) a).srcFile; String srcFileNameB = ((UploadCommandFacade) b).srcFile; return srcFileNameA.compareTo(srcFileNameB); } } : new Comparator<Object>() { public int compare(Object a, Object b) { String srcFileNameA = ((DownloadCommandFacade) a).file; String srcFileNameB = ((DownloadCommandFacade) b).file; return srcFileNameA.compareTo(srcFileNameB); } }; // sort the rows by source file names Collections.sort(statusRows, comparator); } }
java
@Override public void flush() { ObjectMapper mapper = ObjectMapperFactory.getObjectMapper(); String dtoDump; URI incidentURI; try { dtoDump = mapper.writeValueAsString(new IncidentV2DTO(this)); } catch (JsonProcessingException ex) { logger.error("Incident registration failed, could not map " + "incident report to json string. Exception: {}", ex.getMessage()); return; } // Sanity check... Preconditions.checkNotNull(dtoDump); try { URIBuilder uriBuilder = new URIBuilder(this.serverUrl); uriBuilder.setPath(SF_PATH_CREATE_INCIDENT_V2); incidentURI = uriBuilder.build(); } catch (URISyntaxException ex) { logger.error("Incident registration failed, " + "URI could not be built. Exception: {}", ex.getMessage()); return; } HttpPost postRequest = new HttpPost(incidentURI); postRequest.setHeader(SFSession.SF_HEADER_AUTHORIZATION, SFSession.SF_HEADER_SNOWFLAKE_AUTHTYPE + " " + SFSession.SF_HEADER_TOKEN_TAG + "=\"" + this.sessionToken + "\""); // Compress the payload. ByteArrayEntity input = null; try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); GZIPOutputStream gzos = new GZIPOutputStream(baos); byte[] bytes = dtoDump.getBytes(StandardCharsets.UTF_8); gzos.write(bytes); gzos.finish(); input = new ByteArrayEntity(baos.toByteArray()); input.setContentType("application/json"); } catch (IOException exc) { logger.debug("Incident registration failed, could not compress" + " payload. Exception: {}", exc.getMessage()); } postRequest.setEntity(input); postRequest.addHeader("content-encoding", "gzip"); try { String response = HttpUtil.executeRequest(postRequest, 1000, 0, null); logger.debug("Incident registration was successful. Response: '{}'", response); } catch (Exception ex) { // No much we can do here besides complain. logger.error( "Incident registration request failed, exception: {}", ex.getMessage()); } }
java
private static String[] decideCipherSuites() { String sysCipherSuites = System.getProperty("https.cipherSuites"); String[] cipherSuites = sysCipherSuites != null ? sysCipherSuites.split(",") : // use jdk default cipher suites ((SSLServerSocketFactory) SSLServerSocketFactory.getDefault()) .getDefaultCipherSuites(); // cipher suites need to be picked up in code explicitly for jdk 1.7 // https://stackoverflow.com/questions/44378970/ if (logger.isTraceEnabled()) { logger.trace("Cipher suites used: {}", Arrays.toString(cipherSuites)); } return cipherSuites; }
java
public static Telemetry createTelemetry(Connection conn, int flushSize) { try { return createTelemetry(conn.unwrap(SnowflakeConnectionV1.class).getSfSession(), flushSize); } catch (SQLException ex) { logger.debug("input connection is not a SnowflakeConnection"); return null; } }
java
public void addLogToBatch(TelemetryData log) throws IOException { if (isClosed) { throw new IOException("Telemetry connector is closed"); } if (!isTelemetryEnabled()) { return; // if disable, do nothing } synchronized (locker) { this.logBatch.add(log); } if (this.logBatch.size() >= this.forceFlushSize) { this.sendBatch(); } }
java
public void tryAddLogToBatch(TelemetryData log) { try { addLogToBatch(log); } catch (IOException ex) { logger.debug("Exception encountered while sending metrics to telemetry endpoint.", ex); } }
java
public void close() throws IOException { if (isClosed) { throw new IOException("Telemetry connector is closed"); } try { this.sendBatch(); } catch (IOException e) { logger.error("Send logs failed on closing", e); } finally { this.isClosed = true; } }
java
public boolean sendBatch() throws IOException { if (isClosed) { throw new IOException("Telemetry connector is closed"); } if (!isTelemetryEnabled()) { return false; } LinkedList<TelemetryData> tmpList; synchronized (locker) { tmpList = this.logBatch; this.logBatch = new LinkedList<>(); } if (session.isClosed()) { throw new UnexpectedException("Session is closed when sending log"); } if (!tmpList.isEmpty()) { //session shared with JDBC String sessionToken = this.session.getSessionToken(); HttpPost post = new HttpPost(this.telemetryUrl); post.setEntity(new StringEntity(logsToString(tmpList))); post.setHeader("Content-type", "application/json"); post.setHeader("Authorization", "Snowflake Token=\"" + sessionToken + "\""); String response = null; try { response = HttpUtil.executeRequest(post, 1000, 0, null); } catch (SnowflakeSQLException e) { disableTelemetry(); // when got error like 404 or bad request, disable telemetry in this telemetry instance logger.error( "Telemetry request failed, " + "response: {}, exception: {}", response, e.getMessage()); return false; } } return true; }
java
static ObjectNode logsToJson(LinkedList<TelemetryData> telemetryData) { ObjectNode node = mapper.createObjectNode(); ArrayNode logs = mapper.createArrayNode(); for (TelemetryData data : telemetryData) { logs.add(data.toJson()); } node.set("logs", logs); return node; }
java
@Override public ResultSet executeQuery(String sql) throws SQLException { raiseSQLExceptionIfStatementIsClosed(); return executeQueryInternal(sql, null); }
java
ResultSet executeQueryInternal( String sql, Map<String, ParameterBindingDTO> parameterBindings) throws SQLException { SFBaseResultSet sfResultSet; try { sfResultSet = sfStatement.execute(sql, parameterBindings, SFStatement.CallingMethod.EXECUTE_QUERY); sfResultSet.setSession(this.connection.getSfSession()); } catch (SFException ex) { throw new SnowflakeSQLException(ex.getCause(), ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } if (resultSet != null) { openResultSets.add(resultSet); } resultSet = new SnowflakeResultSetV1(sfResultSet, this); return getResultSet(); }
java
void setParameter(String name, Object value) throws Exception { logger.debug("public void setParameter"); try { if (this.sfStatement != null) { this.sfStatement.addProperty(name, value); } } catch (SFException ex) { throw new SnowflakeSQLException(ex); } }
java
private static ThreadPoolExecutor createChunkDownloaderExecutorService( final String threadNamePrefix, final int parallel) { ThreadFactory threadFactory = new ThreadFactory() { private int threadCount = 1; public Thread newThread(final Runnable r) { final Thread thread = new Thread(r); thread.setName(threadNamePrefix + threadCount++); thread.setUncaughtExceptionHandler( new Thread.UncaughtExceptionHandler() { public void uncaughtException(Thread t, Throwable e) { logger.error( "uncaughtException in thread: " + t + " {}", e); } }); thread.setDaemon(true); return thread; } }; return (ThreadPoolExecutor) Executors.newFixedThreadPool(parallel, threadFactory); }
java
private void startNextDownloaders() throws SnowflakeSQLException { long waitingTime = BASE_WAITING_MS; // submit the chunks to be downloaded up to the prefetch slot capacity // and limited by memory while (nextChunkToDownload - nextChunkToConsume < prefetchSlots && nextChunkToDownload < chunks.size()) { // check if memory limit allows more prefetching final SnowflakeResultChunk nextChunk = chunks.get(nextChunkToDownload); final long neededChunkMemory = nextChunk.computeNeededChunkMemory(); // each time only one thread can enter this block synchronized (currentMemoryUsage) { // make sure memoryLimit > neededChunkMemory; otherwise, the thread hangs if (neededChunkMemory > memoryLimit) { if (logger.isDebugEnabled()) { logger.debug("{}: reset memoryLimit from {} MB to current chunk size {} MB", Thread.currentThread().getName(), memoryLimit / 1024 / 1024, neededChunkMemory / 1024 / 1024); } memoryLimit = neededChunkMemory; } // no memory allocate when memory is not enough for prefetch if (currentMemoryUsage + neededChunkMemory > memoryLimit && nextChunkToDownload - nextChunkToConsume > 0) { break; } // only allocate memory when the future usage is less than the limit if (currentMemoryUsage + neededChunkMemory <= memoryLimit) { nextChunk.tryReuse(chunkDataCache); currentMemoryUsage += neededChunkMemory; if (logger.isDebugEnabled()) { logger.debug("{}: currentMemoryUsage in MB: {}, nextChunkToDownload: {}, nextChunkToConsume: {}, " + "newReservedMemory in B: {} ", Thread.currentThread().getName(), currentMemoryUsage / 1024 / 1024, nextChunkToDownload, nextChunkToConsume, neededChunkMemory); } logger.debug("submit chunk #{} for downloading, url={}", this.nextChunkToDownload, nextChunk.getUrl()); executor.submit(getDownloadChunkCallable(this, nextChunk, qrmk, nextChunkToDownload, chunkHeadersMap, networkTimeoutInMilli, useJsonParserV2)); // increment next chunk to download nextChunkToDownload++; // make sure reset waiting time waitingTime = BASE_WAITING_MS; // go to next chunk continue; } } // waiting when nextChunkToDownload is equal to nextChunkToConsume but reach memory limit try { waitingTime *= WAITING_SECS_MULTIPLIER; waitingTime = waitingTime > MAX_WAITING_MS ? MAX_WAITING_MS : waitingTime; long jitter = ThreadLocalRandom.current().nextLong(0, waitingTime / WAITING_JITTER_RATIO); waitingTime += jitter; if (logger.isDebugEnabled()) { logger.debug("{} waiting for {}s: currentMemoryUsage in MB: {}, neededChunkMemory in MB: {}, " + "nextChunkToDownload: {}, nextChunkToConsume: {} ", Thread.currentThread().getName(), waitingTime / 1000.0, currentMemoryUsage / 1024 / 1024, neededChunkMemory / 1024 / 1024, nextChunkToDownload, nextChunkToConsume); } Thread.sleep(waitingTime); } catch (InterruptedException ie) { throw new SnowflakeSQLException( SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Waiting SnowflakeChunkDownloader has been interrupted."); } } // clear the cache, we can't download more at the moment // so we won't need them in the near future chunkDataCache.clear(); }
java
public void releaseAllChunkMemoryUsage() { if (chunks == null || chunks.size() == 0) { return; } for (int i = 0; i < chunks.size(); i++) { releaseCurrentMemoryUsage(i, chunks.get(i).computeNeededChunkMemory()); } }
java
private void logOutOfMemoryError() { logger.error("Dump some crucial information below:\n" + "Total milliseconds waiting for chunks: {},\n" + "Total memory used: {}, Max heap size: {}, total download time: {} millisec,\n" + "total parsing time: {} milliseconds, total chunks: {},\n" + "currentMemoryUsage in Byte: {}, currentMemoryLimit in Bytes: {} \n" + "nextChunkToDownload: {}, nextChunkToConsume: {}\n" + "Several suggestions to try to resolve the OOM issue:\n" + "1. increase the JVM heap size if you have more space; or \n" + "2. use CLIENT_MEMORY_LIMIT to reduce the memory usage by the JDBC driver " + "(https://docs.snowflake.net/manuals/sql-reference/parameters.html#client-memory-limit)" + "3. please make sure 2 * CLIENT_PREFETCH_THREADS * CLIENT_RESULT_CHUNK_SIZE < CLIENT_MEMORY_LIMIT. " + "If not, please reduce CLIENT_PREFETCH_THREADS and CLIENT_RESULT_CHUNK_SIZE too.", numberMillisWaitingForChunks, Runtime.getRuntime().totalMemory(), Runtime.getRuntime().maxMemory(), totalMillisDownloadingChunks.get(), totalMillisParsingChunks.get(), chunks.size(), currentMemoryUsage, memoryLimit, nextChunkToDownload, nextChunkToConsume); }
java
public Metrics terminate() { if (!terminated) { logger.debug("Total milliseconds waiting for chunks: {}, " + "Total memory used: {}, total download time: {} millisec, " + "total parsing time: {} milliseconds, total chunks: {}", numberMillisWaitingForChunks, Runtime.getRuntime().totalMemory(), totalMillisDownloadingChunks.get(), totalMillisParsingChunks.get(), chunks.size()); if (executor != null) { executor.shutdownNow(); executor = null; } chunks = null; chunkDataCache.clear(); terminated = true; return new Metrics(); } return null; }
java
public static String maskAWSSecret(String sql) { List<SecretDetector.SecretRange> secretRanges = SecretDetector.getAWSSecretPos(sql); for (SecretDetector.SecretRange secretRange : secretRanges) { sql = maskText(sql, secretRange.beginPos, secretRange.endPos); } return sql; }
java
private void sanityCheckQuery(String sql) throws SQLException { if (sql == null || sql.isEmpty()) { throw new SnowflakeSQLException(SqlState.SQL_STATEMENT_NOT_YET_COMPLETE, ErrorCode.INVALID_SQL.getMessageCode(), sql); } }
java
private SFBaseResultSet executeQuery( String sql, Map<String, ParameterBindingDTO> parametersBinding, boolean describeOnly, CallingMethod caller) throws SQLException, SFException { sanityCheckQuery(sql); String trimmedSql = sql.trim(); // snowflake specific client side commands if (isFileTransfer(trimmedSql)) { // PUT/GET command logger.debug("Executing file transfer locally: {}", sql); return executeFileTransfer(sql); } // NOTE: It is intentional two describeOnly parameters are specified. return executeQueryInternal( sql, parametersBinding, describeOnly, describeOnly, // internal query if describeOnly is true caller ); }
java
public SFStatementMetaData describe(String sql) throws SFException, SQLException { SFBaseResultSet baseResultSet = executeQuery(sql, null, true, null); describeJobUUID = baseResultSet.getQueryId(); return new SFStatementMetaData(baseResultSet.getMetaData(), baseResultSet.getStatementType(), baseResultSet.getNumberOfBinds(), baseResultSet.isArrayBindSupported()); }
java
private void setTimeBomb(ScheduledExecutorService executor) { class TimeBombTask implements Callable<Void> { private final SFStatement statement; private TimeBombTask(SFStatement statement) { this.statement = statement; } @Override public Void call() throws SQLException { try { statement.cancel(); } catch (SFException ex) { throw new SnowflakeSQLException(ex, ex.getSqlState(), ex.getVendorCode(), ex.getParams()); } return null; } } executor.schedule(new TimeBombTask(this), this.queryTimeout, TimeUnit.SECONDS); }
java
private void cancelHelper(String sql, String mediaType) throws SnowflakeSQLException, SFException { synchronized (this) { if (isClosed) { throw new SFException(ErrorCode.INTERNAL_ERROR, "statement already closed"); } } StmtUtil.StmtInput stmtInput = new StmtUtil.StmtInput(); stmtInput.setServerUrl(session.getServerUrl()) .setSql(sql) .setMediaType(mediaType) .setRequestId(requestId) .setSessionToken(session.getSessionToken()) .setServiceName(session.getServiceName()); StmtUtil.cancel(stmtInput); synchronized (this) { /* * done with the remote execution of the query. set sequenceId to -1 * and request id to null so that we don't try to abort it again upon * canceling. */ this.sequenceId = -1; this.requestId = null; } }
java
public boolean getMoreResults(int current) throws SQLException { // clean up current result, if exists if (resultSet != null && (current == Statement.CLOSE_CURRENT_RESULT || current == Statement.CLOSE_ALL_RESULTS)) { resultSet.close(); } resultSet = null; // verify if more results exist if (childResults == null || childResults.isEmpty()) { return false; } // fetch next result using the query id SFChildResult nextResult = childResults.remove(0); try { JsonNode result = StmtUtil.getQueryResultJSON( nextResult.getId(), session); Object sortProperty = session.getSFSessionProperty("sort"); boolean sortResult = sortProperty != null && (Boolean) sortProperty; resultSet = new SFResultSet(result, this, sortResult); // override statement type so we can treat the result set like a result of // the original statement called (and not the result scan) resultSet.setStatementType(nextResult.getType()); return nextResult.getType().isGenerateResultSet(); } catch (SFException ex) { throw new SnowflakeSQLException(ex); } }
java
public boolean isServiceException404() { if ((Exception) this instanceof AmazonServiceException) { AmazonServiceException asEx = (AmazonServiceException) ((java.lang.Exception) this); return (asEx.getStatusCode() == HttpStatus.SC_NOT_FOUND); } return false; }
java
public static String oneLiner(Throwable thrown) { StackTraceElement[] stack = thrown.getStackTrace(); String topOfStack = null; if (stack.length > 0) { topOfStack = " at " + stack[0]; } return thrown.toString() + topOfStack; }
java
public static void dumpVmMetrics(String incidentId) { PrintWriter writer = null; try { String dumpFile = EventUtil.getDumpPathPrefix() + "/" + INC_DUMP_FILE_NAME + incidentId + INC_DUMP_FILE_EXT; final OutputStream outStream = new GZIPOutputStream(new FileOutputStream(dumpFile)); writer = new PrintWriter(outStream, true); final VirtualMachineMetrics vm = VirtualMachineMetrics.getInstance(); writer.print("\n\n\n--------------------------- METRICS " + "---------------------------\n\n"); writer.flush(); JsonFactory jf = new JsonFactory(); jf.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); ObjectMapper mapper = new ObjectMapper(jf); mapper.registerModule(new JodaModule()); mapper.setDateFormat(new ISO8601DateFormat()); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); MetricsServlet metrics = new MetricsServlet(Clock.defaultClock(), vm, Metrics.defaultRegistry(), jf, true); final JsonGenerator json = jf.createGenerator(outStream, JsonEncoding.UTF8); json.useDefaultPrettyPrinter(); json.writeStartObject(); // JVM metrics writeVmMetrics(json, vm); // Components metrics metrics.writeRegularMetrics(json, // json generator null, // class prefix false); // include full samples json.writeEndObject(); json.close(); logger.debug("Creating full thread dump in dump file {}", dumpFile); // Thread dump next.... writer.print("\n\n\n--------------------------- THREAD DUMP " + "---------------------------\n\n"); writer.flush(); vm.threadDump(outStream); logger.debug("Dump file {} is created.", dumpFile); } catch (Exception exc) { logger.error( "Unable to write dump file, exception: {}", exc.getMessage()); } finally { if (writer != null) { writer.close(); } } }
java
public static Throwable generateIncidentV2WithException(SFSession session, Throwable exc, String jobId, String requestId) { new Incident(session, exc, jobId, requestId).trigger(); return exc; }
java
public static String getUTCNow() { SimpleDateFormat dateFormatGmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); dateFormatGmt.setTimeZone(TimeZone.getTimeZone("GMT")); //Time in GMT return dateFormatGmt.format(new Date()); }
java
@Override public void renew(Map stageCredentials) throws SnowflakeSQLException { stageInfo.setCredentials(stageCredentials); setupAzureClient(stageInfo, encMat); }
java
@Override public StorageObjectMetadata getObjectMetadata(String remoteStorageLocation, String prefix) throws StorageProviderException { AzureObjectMetadata azureObjectMetadata = null; try { // Get a reference to the BLOB, to retrieve its metadata CloudBlobContainer container = azStorageClient.getContainerReference(remoteStorageLocation); CloudBlob blob = container.getBlockBlobReference(prefix); blob.downloadAttributes(); // Get the user-defined BLOB metadata Map<String, String> userDefinedMetadata = blob.getMetadata(); // Get the BLOB system properties we care about BlobProperties properties = blob.getProperties(); long contentLength = properties.getLength(); String contentEncoding = properties.getContentEncoding(); // Construct an Azure metadata object azureObjectMetadata = new AzureObjectMetadata(contentLength, contentEncoding, userDefinedMetadata); } catch (StorageException ex) { logger.debug("Failed to retrieve BLOB metadata: {} - {}", ex.getErrorCode(), ex.getExtendedErrorInformation()); throw new StorageProviderException(ex); } catch (URISyntaxException ex) { logger.debug("Cannot retrieve BLOB properties, invalid URI: {}", ex); throw new StorageProviderException(ex); } return azureObjectMetadata; }
java
@Override public void download(SFSession connection, String command, String localLocation, String destFileName, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion) throws SnowflakeSQLException { int retryCount = 0; do { try { String localFilePath = localLocation + localFileSep + destFileName; File localFile = new File(localFilePath); CloudBlobContainer container = azStorageClient.getContainerReference(remoteStorageLocation); CloudBlob blob = container.getBlockBlobReference(stageFilePath); // Note that Azure doesn't offer a multi-part parallel download library, // where the user has control of block size and parallelism // we rely on Azure to handle the download, hence the "parallelism" parameter is ignored // in the Azure implementation of the method blob.downloadToFile(localFilePath); // Pull object metadata from Azure blob.downloadAttributes(); // Get the user-defined BLOB metadata Map<String, String> userDefinedMetadata = blob.getMetadata(); AbstractMap.SimpleEntry<String, String> encryptionData = parseEncryptionData(userDefinedMetadata.get(AZ_ENCRYPTIONDATAPROP)); String key = encryptionData.getKey(); String iv = encryptionData.getValue(); if (this.isEncrypting() && this.getEncryptionKeySize() <= 256) { if (key == null || iv == null) { throw new SnowflakeSQLException(SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "File metadata incomplete"); } // Decrypt file try { EncryptionProvider.decrypt(localFile, key, iv, this.encMat); } catch (Exception ex) { logger.error("Error decrypting file", ex); throw ex; } } return; } catch (Exception ex) { logger.debug("Download unsuccessful {}", ex); handleAzureException(ex, ++retryCount, "download", connection, command, this); } } while (retryCount <= getMaxRetries()); throw new SnowflakeSQLException(SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Unexpected: download unsuccessful without exception!"); }
java
private static void handleAzureException( Exception ex, int retryCount, String operation, SFSession connection, String command, SnowflakeAzureClient azClient) throws SnowflakeSQLException { // no need to retry if it is invalid key exception if (ex.getCause() instanceof InvalidKeyException) { // Most likely cause is that the unlimited strength policy files are not installed // Log the error and throw a message that explains the cause SnowflakeFileTransferAgent.throwJCEMissingError(operation, ex); } if (((StorageException) ex).getHttpStatusCode() == 403) { // A 403 indicates that the SAS token has expired, // we need to refresh the Azure client with the new token SnowflakeFileTransferAgent.renewExpiredToken(connection, command, azClient); } if (ex instanceof StorageException) { StorageException se = (StorageException) ex; // If we have exceeded the max number of retries, propagate the error if (retryCount > azClient.getMaxRetries()) { throw new SnowflakeSQLException(se, SqlState.SYSTEM_ERROR, ErrorCode.AZURE_SERVICE_ERROR.getMessageCode(), operation, se.getErrorCode(), se.getExtendedErrorInformation(), se.getHttpStatusCode(), se.getMessage()); } else { logger.debug("Encountered exception ({}) during {}, retry count: {}", ex.getMessage(), operation, retryCount); logger.debug("Stack trace: ", ex); // exponential backoff up to a limit int backoffInMillis = azClient.getRetryBackoffMin(); if (retryCount > 1) { backoffInMillis <<= (Math.min(retryCount - 1, azClient.getRetryBackoffMaxExponent())); } try { logger.debug("Sleep for {} milliseconds before retry", backoffInMillis); Thread.sleep(backoffInMillis); } catch (InterruptedException ex1) { // ignore } if (se.getHttpStatusCode() == 403) { // A 403 indicates that the SAS token has expired, // we need to refresh the Azure client with the new token SnowflakeFileTransferAgent.renewExpiredToken(connection, command, azClient); } } } else { if (ex instanceof InterruptedException || SnowflakeUtil.getRootCause(ex) instanceof SocketTimeoutException) { if (retryCount > azClient.getMaxRetries()) { throw new SnowflakeSQLException(ex, SqlState.SYSTEM_ERROR, ErrorCode.IO_ERROR.getMessageCode(), "Encountered exception during " + operation + ": " + ex.getMessage()); } else { logger.debug("Encountered exception ({}) during {}, retry count: {}", ex.getMessage(), operation, retryCount); } } else { throw new SnowflakeSQLException(ex, SqlState.SYSTEM_ERROR, ErrorCode.IO_ERROR.getMessageCode(), "Encountered exception during " + operation + ": " + ex.getMessage()); } } }
java
@Override public void addDigestMetadata(StorageObjectMetadata meta, String digest) { if (!SnowflakeUtil.isBlank(digest)) { // Azure doesn't allow hyphens in the name of a metadata field. meta.addUserMetadata("sfcdigest", digest); } }
java
private static long initMemoryLimit(final ResultOutput resultOutput) { // default setting long memoryLimit = SessionUtil.DEFAULT_CLIENT_MEMORY_LIMIT * 1024 * 1024; if (resultOutput.parameters.get(CLIENT_MEMORY_LIMIT) != null) { // use the settings from the customer memoryLimit = (int) resultOutput.parameters.get(CLIENT_MEMORY_LIMIT) * 1024L * 1024L; } long maxMemoryToUse = Runtime.getRuntime().maxMemory() * 8 / 10; if ((int) resultOutput.parameters.get(CLIENT_MEMORY_LIMIT) == SessionUtil.DEFAULT_CLIENT_MEMORY_LIMIT) { // if the memory limit is the default value and best effort memory is enabled // set the memory limit to 80% of the maximum as the best effort memoryLimit = Math.max(memoryLimit, maxMemoryToUse); } // always make sure memoryLimit <= 80% of the maximum memoryLimit = Math.min(memoryLimit, maxMemoryToUse); logger.debug("Set allowed memory usage to {} bytes", memoryLimit); return memoryLimit; }
java
static private Object effectiveParamValue( Map<String, Object> parameters, String paramName) { String upper = paramName.toUpperCase(); Object value = parameters.get(upper); if (value != null) { return value; } value = defaultParameters.get(upper); if (value != null) { return value; } logger.debug("Unknown Common Parameter: {}", paramName); return null; }
java
static private SnowflakeDateTimeFormat specializedFormatter( Map<String, Object> parameters, String id, String param, String defaultFormat) { String sqlFormat = SnowflakeDateTimeFormat.effectiveSpecializedTimestampFormat( (String) effectiveParamValue(parameters, param), defaultFormat); SnowflakeDateTimeFormat formatter = new SnowflakeDateTimeFormat(sqlFormat); if (logger.isDebugEnabled()) { logger.debug("sql {} format: {}, java {} format: {}", id, sqlFormat, id, formatter.toSimpleDateTimePattern()); } return formatter; }
java
static public Timestamp adjustTimestamp(Timestamp timestamp) { long milliToAdjust = ResultUtil.msDiffJulianToGregorian(timestamp); if (milliToAdjust != 0) { if (logger.isDebugEnabled()) { logger.debug("adjust timestamp by {} days", milliToAdjust / 86400000); } Timestamp newTimestamp = new Timestamp(timestamp.getTime() + milliToAdjust); newTimestamp.setNanos(timestamp.getNanos()); return newTimestamp; } else { return timestamp; } }
java
static public long msDiffJulianToGregorian(java.util.Date date) { // get the year of the date Calendar cal = Calendar.getInstance(); cal.setTime(date); int year = cal.get(Calendar.YEAR); int month = cal.get(Calendar.MONTH); int dayOfMonth = cal.get(Calendar.DAY_OF_MONTH); // if date is before 1582-10-05, apply the difference // by (H-(H/4)-2) where H is the hundreds digit of the year according to: // http://en.wikipedia.org/wiki/Gregorian_calendar if (date.getTime() < -12220156800000L) { // for dates on or before 02/28, use the previous year otherwise use // current year. // TODO: we need to revisit this since there is a potential issue using // the year/month/day from the calendar since that may not be the same // year/month/day as the original date (which is the problem we are // trying to solve here). if (month == 0 || (month == 1 && dayOfMonth <= 28)) { year = year - 1; } int hundreds = year / 100; int differenceInDays = hundreds - (hundreds / 4) - 2; return differenceInDays * 86400000; } else { return 0; } }
java
static public String getSFTimeAsString( SFTime sft, int scale, SnowflakeDateTimeFormat timeFormatter) { return timeFormatter.format(sft, scale); }
java
static public String getSFTimestampAsString( SFTimestamp sfTS, int columnType, int scale, SnowflakeDateTimeFormat timestampNTZFormatter, SnowflakeDateTimeFormat timestampLTZFormatter, SnowflakeDateTimeFormat timestampTZFormatter, SFSession session) throws SFException { // Derive the timestamp formatter to use SnowflakeDateTimeFormat formatter; if (columnType == Types.TIMESTAMP) { formatter = timestampNTZFormatter; } else if (columnType == SnowflakeUtil.EXTRA_TYPES_TIMESTAMP_LTZ) { formatter = timestampLTZFormatter; } else // TZ { formatter = timestampTZFormatter; } if (formatter == null) { throw (SFException) IncidentUtil.generateIncidentV2WithException( session, new SFException(ErrorCode.INTERNAL_ERROR, "missing timestamp formatter"), null, null); } try { Timestamp adjustedTimestamp = ResultUtil.adjustTimestamp(sfTS.getTimestamp()); return formatter.format( adjustedTimestamp, sfTS.getTimeZone(), scale); } catch (SFTimestamp.TimestampOperationNotAvailableException e) { // this timestamp doesn't fit into a Java timestamp, and therefore we // can't format it (for now). Just print it out as seconds since epoch. BigDecimal nanosSinceEpoch = sfTS.getNanosSinceEpoch(); BigDecimal secondsSinceEpoch = nanosSinceEpoch.scaleByPowerOfTen(-9); return secondsSinceEpoch.setScale(scale).toPlainString(); } }
java
static public String getDateAsString( Date date, SnowflakeDateTimeFormat dateFormatter) { return dateFormatter.format(date, timeZoneUTC); }
java
static public Date adjustDate(Date date) { long milliToAdjust = ResultUtil.msDiffJulianToGregorian(date); if (milliToAdjust != 0) { // add the difference to the new date return new Date(date.getTime() + milliToAdjust); } else { return date; } }
java
static public Date getDate(String str, TimeZone tz, SFSession session) throws SFException { try { long milliSecsSinceEpoch = Long.valueOf(str) * 86400000; SFTimestamp tsInUTC = SFTimestamp.fromDate(new Date(milliSecsSinceEpoch), 0, TimeZone.getTimeZone("UTC")); SFTimestamp tsInClientTZ = tsInUTC.moveToTimeZone(tz); if (logger.isDebugEnabled()) { logger.debug("getDate: tz offset={}", tsInClientTZ.getTimeZone().getOffset(tsInClientTZ.getTime())); } // return the date adjusted to the JVM default time zone Date preDate = new Date(tsInClientTZ.getTime()); // if date is on or before 1582-10-04, apply the difference // by (H-H/4-2) where H is the hundreds digit of the year according to: // http://en.wikipedia.org/wiki/Gregorian_calendar Date newDate = adjustDate(preDate); if (logger.isDebugEnabled()) { logger.debug("Adjust date from {} to {}", preDate.toString(), newDate.toString()); } return newDate; } catch (NumberFormatException ex) { throw (SFException) IncidentUtil.generateIncidentV2WithException( session, new SFException(ErrorCode.INTERNAL_ERROR, "Invalid date value: " + str), null, null); } }
java
static public int calculateUpdateCount(SFBaseResultSet resultSet) throws SFException, SQLException { int updateCount = 0; SFStatementType statementType = resultSet.getStatementType(); if (statementType.isDML()) { while (resultSet.next()) { if (statementType == SFStatementType.COPY) { SFResultSetMetaData resultSetMetaData = resultSet.getMetaData(); int columnIndex = resultSetMetaData.getColumnIndex("rows_loaded"); updateCount += columnIndex == -1 ? 0 : resultSet.getInt(columnIndex + 1); } else if (statementType == SFStatementType.INSERT || statementType == SFStatementType.UPDATE || statementType == SFStatementType.DELETE || statementType == SFStatementType.MERGE || statementType == SFStatementType.MULTI_INSERT) { int columnCount = resultSet.getMetaData().getColumnCount(); for (int i = 0; i < columnCount; i++) updateCount += resultSet.getLong(i + 1); // add up number of rows updated } else { updateCount = 0; } } } else { updateCount = statementType.isGenerateResultSet() ? -1 : 0; } return updateCount; }
java
public static int listSearchCaseInsensitive(List<String> source, String target) { for (int i = 0; i < source.size(); i++) { if (target.equalsIgnoreCase(source.get(i))) { return i; } } return -1; }
java
private static List<String> getResultIds(JsonNode result) { JsonNode resultIds = result.path("data").path("resultIds"); if (resultIds.isNull() || resultIds.isMissingNode() || resultIds.asText().isEmpty()) { return Collections.emptyList(); } return new ArrayList<>(Arrays.asList(resultIds.asText().split(","))); }
java
private static List<SFStatementType> getResultTypes(JsonNode result) { JsonNode resultTypes = result.path("data").path("resultTypes"); if (resultTypes.isNull() || resultTypes.isMissingNode() || resultTypes.asText().isEmpty()) { return Collections.emptyList(); } String[] typeStrs = resultTypes.asText().split(","); List<SFStatementType> res = new ArrayList<>(); for (String typeStr : typeStrs) { long typeId = Long.valueOf(typeStr); res.add(SFStatementType.lookUpTypeById(typeId)); } return res; }
java
public static List<SFChildResult> getChildResults(SFSession session, String requestId, JsonNode result) throws SFException { List<String> ids = getResultIds(result); List<SFStatementType> types = getResultTypes(result); if (ids.size() != types.size()) { throw (SFException) IncidentUtil.generateIncidentV2WithException( session, new SFException(ErrorCode.CHILD_RESULT_IDS_AND_TYPES_DIFFERENT_SIZES, ids.size(), types.size()), null, requestId); } List<SFChildResult> res = new ArrayList<>(); for (int i = 0; i < ids.size(); i++) { res.add(new SFChildResult(ids.get(i), types.get(i))); } return res; }
java
private synchronized void openFile() { try { String fName = _directory.getAbsolutePath() + File.separatorChar + StreamLoader.FILE_PREFIX + _stamp + _fileCount; if (_loader._compressDataBeforePut) { fName += StreamLoader.FILE_SUFFIX; } LOGGER.debug("openFile: {}", fName); OutputStream fileStream = new FileOutputStream(fName); if (_loader._compressDataBeforePut) { OutputStream gzipOutputStream = new GZIPOutputStream( fileStream, 64 * 1024, true) { { def.setLevel((int) _loader._compressLevel); } }; _outstream = new BufferedOutputStream(gzipOutputStream); } else { _outstream = new BufferedOutputStream(fileStream); } _file = new File(fName); _fileCount++; } catch (IOException ex) { _loader.abort(new Loader.ConnectionError(Utils.getCause(ex))); } }
java
boolean stageData(final byte[] line) throws IOException { if (this._rowCount % 10000 == 0) { LOGGER.debug( "rowCount: {}, currentSize: {}", this._rowCount, _currentSize); } _outstream.write(line); _currentSize += line.length; _outstream.write(newLineBytes); this._rowCount++; if (_loader._testRemoteBadCSV) { // inject garbage for a negative test case // The file will be uploaded to the stage, but COPY command will // fail and raise LoaderError _outstream.write(new byte[]{(byte) 0x01, (byte) 0x02}); _outstream.write(newLineBytes); this._rowCount++; } if (_currentSize >= this._csvFileSize) { LOGGER.debug("name: {}, currentSize: {}, Threshold: {}," + " fileCount: {}, fileBucketSize: {}", _file.getAbsolutePath(), _currentSize, this._csvFileSize, _fileCount, this._csvFileBucketSize); _outstream.flush(); _outstream.close(); _outstream = null; FileUploader fu = new FileUploader(_loader, _location, _file); fu.upload(); _uploaders.add(fu); openFile(); _currentSize = 0; } return _fileCount > this._csvFileBucketSize; }
java
void completeUploading() throws IOException { LOGGER.debug("name: {}, currentSize: {}, Threshold: {}," + " fileCount: {}, fileBucketSize: {}", _file.getAbsolutePath(), _currentSize, this._csvFileSize, _fileCount, this._csvFileBucketSize); _outstream.flush(); _outstream.close(); //last file if (_currentSize > 0) { FileUploader fu = new FileUploader(_loader, _location, _file); fu.upload(); _uploaders.add(fu); } else { // delete empty file _file.delete(); } for (FileUploader fu : _uploaders) { // Finish all files being uploaded fu.join(); } // Delete the directory once we are done (for easier tracking // of what is going on) _directory.deleteOnExit(); if (this._rowCount == 0) { setState(State.EMPTY); } }
java
private static String escapeFileSeparatorChar(String fname) { if (File.separatorChar == '\\') { return fname.replaceAll(File.separator + File.separator, "_"); } else { return fname.replaceAll(File.separator, "_"); } }
java
@CheckForNull private Object executeSyncMethod(Method method, Object[] args) throws ApplicationException { if (preparingForShutdown.get()) { throw new JoynrIllegalStateException("Preparing for shutdown. Only stateless methods can be called."); } return executeMethodWithCaller(method, args, new ConnectorCaller() { @Override public Object call(Method method, Object[] args) throws ApplicationException { return connector.executeSyncMethod(method, args); } }); }
java