focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { try { final ResourceCreationRepresentationArrayInner resourceCreationRepresentation = new ResourceCreationRepresentationArrayInner(); final String path = StringUtils.removeStart(folder.getAbsolute(), String.valueOf(Path.DELIMITER)); resourceCreationRepresentation.setPath(path); resourceCreationRepresentation.setResourceType(ResourceCreationRepresentationArrayInner.ResourceTypeEnum.CONTAINER); final EueApiClient client = new EueApiClient(session); final ResourceCreationResponseEntries resourceCreationResponseEntries = new PostChildrenForAliasApi(client).resourceAliasAliasChildrenPost( EueResourceIdProvider.ROOT, Collections.singletonList(resourceCreationRepresentation), null, null, null, null, null); if(!resourceCreationResponseEntries.containsKey(path)) { throw new NotfoundException(folder.getAbsolute()); } final ResourceCreationResponseEntry resourceCreationResponseEntry = resourceCreationResponseEntries.get(path); switch(resourceCreationResponseEntry.getStatusCode()) { case HttpStatus.SC_OK: // Already exists throw new ConflictException(folder.getAbsolute()); case HttpStatus.SC_CREATED: final String resourceId = EueResourceIdProvider.getResourceIdFromResourceUri(resourceCreationResponseEntry.getHeaders().getLocation()); fileid.cache(folder, resourceId); return folder; default: log.warn(String.format("Failure %s creating folder %s", resourceCreationResponseEntry, folder)); final ResourceCreationResponseEntryEntity entity = resourceCreationResponseEntry.getEntity(); if(null == entity) { throw new EueExceptionMappingService().map(new ApiException(resourceCreationResponseEntry.getReason(), null, resourceCreationResponseEntry.getStatusCode(), client.getResponseHeaders())); } throw new EueExceptionMappingService().map(new ApiException(resourceCreationResponseEntry.getEntity().getError(), null, resourceCreationResponseEntry.getStatusCode(), client.getResponseHeaders())); } } catch(ApiException e) { throw new EueExceptionMappingService().map("Cannot create folder {0}", e, folder); } }
@Test(expected = ConflictException.class) public void testCaseSensitivity() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final String filename = new AlphanumericRandomStringService().random(); new EueDirectoryFeature(session, fileid).mkdir(new Path(StringUtils.capitalize(filename), EnumSet.of(Path.Type.directory)), new TransferStatus()); try { new EueDirectoryFeature(session, fileid).mkdir(new Path(StringUtils.lowerCase(filename), EnumSet.of(Path.Type.directory)), new TransferStatus()); fail(); } finally { new EueDeleteFeature(session, fileid).delete(Collections.singletonList(new Path(StringUtils.capitalize(filename), EnumSet.of(Path.Type.directory))), new DisabledLoginCallback(), new Delete.DisabledCallback()); } }
@Override public boolean supportsMixedCaseIdentifiers() { return false; }
@Test void assertSupportsMixedCaseIdentifiers() { assertFalse(metaData.supportsMixedCaseIdentifiers()); }
public static int compose(final int major, final int minor, final int patch) { if (major < 0 || major > 255) { throw new IllegalArgumentException("major must be 0-255: " + major); } if (minor < 0 || minor > 255) { throw new IllegalArgumentException("minor must be 0-255: " + minor); } if (patch < 0 || patch > 255) { throw new IllegalArgumentException("patch must be 0-255: " + patch); } if (major + minor + patch == 0) { throw new IllegalArgumentException("all parts cannot be zero"); } return (major << 16) | (minor << 8) | patch; }
@Test void shouldDetectNegativeMinor() { assertThrows(IllegalArgumentException.class, () -> SemanticVersion.compose(1, -1, 1)); }
@Override public BackupRequestsStrategyStats getStats() { return getStats(_lastDelayStats.get()); }
@Test public void testNoActivityStats() { TrackingBackupRequestsStrategy trackingStrategy = new TrackingBackupRequestsStrategy(new MockBackupRequestsStrategy(() -> Optional.of(10000000L), () -> true)); BackupRequestsStrategyStats stats = trackingStrategy.getStats(); assertNotNull(stats); assertEquals(stats.getAllowed(), 0); assertEquals(stats.getSuccessful(), 0); assertEquals(stats.getMinDelayNano(), 0); assertEquals(stats.getMaxDelayNano(), 0); assertEquals(stats.getAvgDelayNano(), 0); stats = trackingStrategy.getStats(); assertNotNull(stats); assertEquals(stats.getAllowed(), 0); assertEquals(stats.getSuccessful(), 0); assertEquals(stats.getMinDelayNano(), 0); assertEquals(stats.getMaxDelayNano(), 0); assertEquals(stats.getAvgDelayNano(), 0); }
public void close() throws IOException, InterruptedException { closeTxnBatch(); closeConnection(); closed = true; }
@Test public void testInstantiate() throws Exception { DelimitedRecordHiveMapper mapper = new MockedDelemiteredRecordHiveMapper() .withColumnFields(new Fields(colNames)) .withPartitionFields(new Fields(partNames)); HiveEndPoint endPoint = new HiveEndPoint(metaStoreURI, dbName, tblName, Arrays.asList(partitionVals)); TestingHiveWriter writer = new TestingHiveWriter(endPoint, 10, true, timeout , callTimeoutPool, mapper, ugi, false); writer.close(); }
public Optional<Integer> declareManagedMemoryUseCaseAtOperatorScope( ManagedMemoryUseCase managedMemoryUseCase, int weight) { checkNotNull(managedMemoryUseCase); checkArgument( managedMemoryUseCase.scope == ManagedMemoryUseCase.Scope.OPERATOR, "Use case is not operator scope."); checkArgument(weight > 0, "Weights for operator scope use cases must be greater than 0."); return Optional.ofNullable( managedMemoryOperatorScopeUseCaseWeights.put(managedMemoryUseCase, weight)); }
@Test void testDeclareManagedMemoryOperatorScopeUseCaseFailNegativeWeight() { assertThatThrownBy( () -> transformation.declareManagedMemoryUseCaseAtOperatorScope( ManagedMemoryUseCase.OPERATOR, -1)) .isInstanceOf(IllegalArgumentException.class); }
@Override public String getURL( String hostname, String port, String databaseName ) { String url = "jdbc:sqlserver://" + hostname + ":" + port + ";database=" + databaseName + ";encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;"; if ( getAttribute( IS_ALWAYS_ENCRYPTION_ENABLED, "" ).equals( "true" ) ) { url += "columnEncryptionSetting=Enabled;keyVaultProviderClientId=" + getAttribute( CLIENT_ID, "" ) + ";keyVaultProviderClientKey=" + getAttribute( CLIENT_SECRET_KEY, "" ) + ";"; } if ( ACTIVE_DIRECTORY_PASSWORD.equals( getAttribute( JDBC_AUTH_METHOD, "" ) ) ) { return url + "authentication=ActiveDirectoryPassword;"; } else if ( ACTIVE_DIRECTORY_MFA.equals( getAttribute( JDBC_AUTH_METHOD, "" ) ) ) { return url + "authentication=ActiveDirectoryInteractive;"; } else if ( ACTIVE_DIRECTORY_INTEGRATED.equals( getAttribute( JDBC_AUTH_METHOD, "" ) ) ) { return url + "Authentication=ActiveDirectoryIntegrated;"; } else { return url; } }
@Test public void testGetUrlWithSqlAuth(){ dbMeta.setAccessType( DatabaseMeta.TYPE_ACCESS_NATIVE ); dbMeta.addAttribute( IS_ALWAYS_ENCRYPTION_ENABLED, "false" ); dbMeta.addAttribute( JDBC_AUTH_METHOD, SQL_AUTHENTICATION ); String expectedUrl = "jdbc:sqlserver://abc.database.windows.net:1433;database=AzureDB;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;"; String actualUrl = dbMeta.getURL( "abc.database.windows.net", "1433", "AzureDB" ); assertEquals( expectedUrl, actualUrl ); }
public static LayoutLocation fromCompactString(String s) { String[] tokens = s.split(COMMA); if (tokens.length != 4) { throw new IllegalArgumentException(E_BAD_COMPACT + s); } String id = tokens[0]; String type = tokens[1]; String latY = tokens[2]; String longX = tokens[3]; if (Strings.isNullOrEmpty(id)) { throw new IllegalArgumentException(E_BAD_COMPACT + E_EMPTY_ID); } double latOrY; double longOrX; try { latOrY = Double.parseDouble(latY); longOrX = Double.parseDouble(longX); } catch (NumberFormatException nfe) { throw new IllegalArgumentException(E_BAD_COMPACT + E_BAD_DOUBLE); } return LayoutLocation.layoutLocation(id, type, latOrY, longOrX); }
@Test(expected = IllegalArgumentException.class) public void badCompactNoId() { fromCompactString(",GEO,1,2"); }
public MaterializedConfiguration getConfiguration() { MaterializedConfiguration conf = new SimpleMaterializedConfiguration(); FlumeConfiguration fconfig = getFlumeConfiguration(); AgentConfiguration agentConf = fconfig.getConfigurationFor(getAgentName()); if (agentConf != null) { Map<String, ChannelComponent> channelComponentMap = Maps.newHashMap(); Map<String, SourceRunner> sourceRunnerMap = Maps.newHashMap(); Map<String, SinkRunner> sinkRunnerMap = Maps.newHashMap(); try { loadChannels(agentConf, channelComponentMap); loadSources(agentConf, channelComponentMap, sourceRunnerMap); loadSinks(agentConf, channelComponentMap, sinkRunnerMap); Set<String> channelNames = new HashSet<String>(channelComponentMap.keySet()); for (String channelName : channelNames) { ChannelComponent channelComponent = channelComponentMap.get(channelName); if (channelComponent.components.isEmpty()) { LOGGER.warn("Channel {} has no components connected" + " and has been removed.", channelName); channelComponentMap.remove(channelName); Map<String, Channel> nameChannelMap = channelCache.get(channelComponent.channel.getClass()); if (nameChannelMap != null) { nameChannelMap.remove(channelName); } } else { LOGGER.info("Channel {} connected to {}", channelName, channelComponent.components.toString()); conf.addChannel(channelName, channelComponent.channel); } } for (Map.Entry<String, SourceRunner> entry : sourceRunnerMap.entrySet()) { conf.addSourceRunner(entry.getKey(), entry.getValue()); } for (Map.Entry<String, SinkRunner> entry : sinkRunnerMap.entrySet()) { conf.addSinkRunner(entry.getKey(), entry.getValue()); } } catch (InstantiationException ex) { LOGGER.error("Failed to instantiate component", ex); } finally { channelComponentMap.clear(); sourceRunnerMap.clear(); sinkRunnerMap.clear(); } } else { LOGGER.warn("No configuration found for this host:{}", getAgentName()); } return conf; }
@Test public void testReusableChannel() throws Exception { String agentName = "agent1"; Map<String, String> properties = getPropertiesForChannel(agentName, RecyclableChannel.class.getName()); MemoryConfigurationProvider provider = new MemoryConfigurationProvider(agentName, properties); MaterializedConfiguration config1 = provider.getConfiguration(); Channel channel1 = config1.getChannels().values().iterator().next(); assertTrue(channel1 instanceof RecyclableChannel); MaterializedConfiguration config2 = provider.getConfiguration(); Channel channel2 = config2.getChannels().values().iterator().next(); assertTrue(channel2 instanceof RecyclableChannel); assertSame(channel1, channel2); }
@VisibleForTesting static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) { return createStreamExecutionEnvironment( options, MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()), options.getFlinkConfDir()); }
@Test public void shouldAcceptExplicitlySetIdleSourcesFlagWithCheckpointing() { // Checkpointing enable, still accept flag FlinkPipelineOptions options = getDefaultPipelineOptions(); options.setCheckpointingInterval(1000L); options.setShutdownSourcesAfterIdleMs(42L); FlinkExecutionEnvironments.createStreamExecutionEnvironment(options); assertThat(options.getShutdownSourcesAfterIdleMs(), is(42L)); }
public Map<String, Artifact> getStepInstanceArtifacts( String workflowId, long workflowInstanceId, long workflowRunId, String stepId, String stepAttempt) { return getStepInstanceFieldByIds( StepInstanceField.ARTIFACTS, workflowId, workflowInstanceId, workflowRunId, stepId, stepAttempt, this::getArtifacts); }
@Test public void testGetStepInstanceArtifacts() { Map<String, Artifact> artifacts = stepDao.getStepInstanceArtifacts(TEST_WORKFLOW_ID, 1, 1, "job1", "1"); assertTrue(artifacts.isEmpty()); Map<String, Artifact> latest = stepDao.getStepInstanceArtifacts(TEST_WORKFLOW_ID, 1, 1, "job1", "latest"); assertEquals(artifacts, latest); }
public Database getDb(String dbName) { return get(databaseCache, dbName); }
@Test public void testGetDb() { CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore( metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false); Database database = cachingHiveMetastore.getDb("db1"); Assert.assertEquals("db1", database.getFullName()); try { metastore.getDb("db2"); Assert.fail(); } catch (Exception e) { Assert.assertTrue(e instanceof StarRocksConnectorException); } }
@SuppressWarnings("removal") public static int getPortableVersion(Portable portable, int defaultVersion) { int version = defaultVersion; if (portable instanceof VersionedPortable versionedPortable) { version = versionedPortable.getClassVersion(); if (version < 0) { throw new IllegalArgumentException("Version cannot be negative!"); } } return version; }
@Test(expected = IllegalArgumentException.class) public void testGetPortableVersion_negativeVersion() { SerializationUtil.getPortableVersion(new DummyVersionedPortable(), 1); }
@Override public UpdateSchema addRequiredColumn(String name, Type type, String doc) { Preconditions.checkArgument( !name.contains("."), "Cannot add column with ambiguous name: %s, use addColumn(parent, name, type)", name); addRequiredColumn(null, name, type, doc); return this; }
@Test public void testAddRequiredColumn() { Schema schema = new Schema(required(1, "id", Types.IntegerType.get())); Schema expected = new Schema( required(1, "id", Types.IntegerType.get()), required(2, "data", Types.StringType.get())); assertThatThrownBy( () -> new SchemaUpdate(schema, 1).addRequiredColumn("data", Types.StringType.get())) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Incompatible change: cannot add required column: data"); Schema result = new SchemaUpdate(schema, 1) .allowIncompatibleChanges() .addRequiredColumn("data", Types.StringType.get()) .apply(); assertThat(result.asStruct()).isEqualTo(expected.asStruct()); }
@Override public void replay( long offset, long producerId, short producerEpoch, CoordinatorRecord record ) throws RuntimeException { ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); switch (key.version()) { case 0: case 1: offsetMetadataManager.replay( offset, producerId, (OffsetCommitKey) key.message(), (OffsetCommitValue) Utils.messageOrNull(value) ); break; case 2: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) Utils.messageOrNull(value) ); break; case 3: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) Utils.messageOrNull(value) ); break; case 4: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 5: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 6: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 7: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 8: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; case 9: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 10: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 11: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) Utils.messageOrNull(value) ); break; case 12: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 13: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 14: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; default: throw new IllegalStateException("Received an unknown record type " + key.version() + " in " + record); } }
@Test public void testReplayConsumerGroupMetadataWithNullValue() { GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class); OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class); CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); GroupCoordinatorShard coordinator = new GroupCoordinatorShard( new LogContext(), groupMetadataManager, offsetMetadataManager, Time.SYSTEM, new MockCoordinatorTimer<>(Time.SYSTEM), mock(GroupCoordinatorConfig.class), coordinatorMetrics, metricsShard ); ConsumerGroupMetadataKey key = new ConsumerGroupMetadataKey(); coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( new ApiMessageAndVersion(key, (short) 3), null )); verify(groupMetadataManager, times(1)).replay(key, null); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) { return new IteratorStreamMergedResult(queryResults); } Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0)); SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext; selectStatementContext.setIndexes(columnLabelIndexMap); MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database); return decorate(queryResults, selectStatementContext, mergedResult); }
@Test void assertBuildIteratorStreamMergedResultWithSQLServerLimit() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "SQLServer")); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(mock(ShardingSphereSchema.class)); SQLServerSelectStatement selectStatement = (SQLServerSelectStatement) buildSelectStatement(new SQLServerSelectStatement()); selectStatement.setProjections(new ProjectionsSegment(0, 0)); selectStatement.setLimit(new LimitSegment(0, 0, new NumberLiteralLimitValueSegment(0, 0, 1L), null)); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(), selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createSQLServerDatabase(), mock(ConnectionContext.class)); assertThat(actual, instanceOf(TopAndRowNumberDecoratorMergedResult.class)); assertThat(((TopAndRowNumberDecoratorMergedResult) actual).getMergedResult(), instanceOf(IteratorStreamMergedResult.class)); }
@Override public String toString() { return String.format("(samples=%d, sum=%d, mean=%.4f)", samples, sum, mean()); }
@Test public void testNegativeSamplesAreEmpty() throws Throwable { MeanStatistic stat = new MeanStatistic(-10, 1); Assertions.assertThat(stat) .describedAs("stat with negative samples") .matches(MeanStatistic::isEmpty, "is empty") .isEqualTo(empty) .extracting(MeanStatistic::mean) .isEqualTo(ZEROD); Assertions.assertThat(stat.toString()) .contains("0.0"); }
private void flush() throws InterruptedException { RequestInfo requestInfo = createRequestInfo(); while (rateLimitingStrategy.shouldBlock(requestInfo)) { mailboxExecutor.yield(); requestInfo = createRequestInfo(); } List<RequestEntryT> batch = createNextAvailableBatch(requestInfo); if (batch.isEmpty()) { return; } long requestTimestamp = System.currentTimeMillis(); rateLimitingStrategy.registerInFlightRequest(requestInfo); inFlightRequestsCount++; submitRequestEntries( batch, new AsyncSinkWriterResultHandler(requestTimestamp, batch, requestInfo)); }
@Test public void ifTheNumberOfUncompletedInFlightRequestsIsTooManyThenBlockInFlushMethod() throws Exception { CountDownLatch blockedWriteLatch = new CountDownLatch(1); CountDownLatch delayedStartLatch = new CountDownLatch(1); AsyncSinkWriterImpl sink = new AsyncSinkReleaseAndBlockWriterImpl( sinkInitContextAnyThreadMailbox, 1, blockedWriteLatch, delayedStartLatch, false); Thread t = new Thread( () -> { try { sink.writeAsNonMailboxThread("1"); sink.writeAsNonMailboxThread("2"); sink.writeAsNonMailboxThread("3"); } catch (IOException | InterruptedException e) { e.printStackTrace(); fail( "Auxiliary thread encountered an exception when writing to the sink", e); } }); t.start(); delayedStartLatch.await(); Thread s = new Thread( () -> { try { sink.flush(true); fail( "Sink did not block successfully and reached here when it shouldn't have."); } catch (InterruptedException ignored) { } }); Thread.sleep(300); assertThat(s.isInterrupted()).isFalse(); s.interrupt(); blockedWriteLatch.countDown(); t.join(); assertThat(res).isEqualTo(Arrays.asList(1, 2, 3)); }
public static Map<String, String> alterCurrentAttributes(boolean create, Map<String, Attribute> all, ImmutableMap<String, String> currentAttributes, ImmutableMap<String, String> newAttributes) { Map<String, String> init = new HashMap<>(); Map<String, String> add = new HashMap<>(); Map<String, String> update = new HashMap<>(); Map<String, String> delete = new HashMap<>(); Set<String> keys = new HashSet<>(); for (Map.Entry<String, String> attribute : newAttributes.entrySet()) { String key = attribute.getKey(); String realKey = realKey(key); String value = attribute.getValue(); validate(realKey); duplicationCheck(keys, realKey); if (create) { if (key.startsWith("+")) { init.put(realKey, value); } else { throw new RuntimeException("only add attribute is supported while creating topic. key: " + realKey); } } else { if (key.startsWith("+")) { if (!currentAttributes.containsKey(realKey)) { add.put(realKey, value); } else { update.put(realKey, value); } } else if (key.startsWith("-")) { if (!currentAttributes.containsKey(realKey)) { throw new RuntimeException("attempt to delete a nonexistent key: " + realKey); } delete.put(realKey, value); } else { throw new RuntimeException("wrong format key: " + realKey); } } } validateAlter(all, init, true, false); validateAlter(all, add, false, false); validateAlter(all, update, false, false); validateAlter(all, delete, false, true); log.info("add: {}, update: {}, delete: {}", add, update, delete); HashMap<String, String> finalAttributes = new HashMap<>(currentAttributes); finalAttributes.putAll(init); finalAttributes.putAll(add); finalAttributes.putAll(update); for (String s : delete.keySet()) { finalAttributes.remove(s); } return finalAttributes; }
@Test(expected = RuntimeException.class) public void alterCurrentAttributes_UpdateMode_WrongFormatKey_ShouldThrowException() { ImmutableMap<String, String> newAttributes = ImmutableMap.of("attr1", "+value1"); AttributeUtil.alterCurrentAttributes(false, allAttributes, currentAttributes, newAttributes); }
boolean openNextFile() { try { if ( meta.getFileInFields() ) { data.readrow = getRow(); // Grab another row ... if ( data.readrow == null ) { // finished processing! if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.FinishedProcessing" ) ); } return false; } if ( first ) { first = false; data.inputRowMeta = getInputRowMeta(); data.outputRowMeta = data.inputRowMeta.clone(); meta.getFields( data.outputRowMeta, getStepname(), null, null, this, repository, metaStore ); // Create convert meta-data objects that will contain Date & Number formatters // All non binary content is handled as a String. It would be converted to the target type after the processing. data.convertRowMeta = data.outputRowMeta.cloneToType( ValueMetaInterface.TYPE_STRING ); if ( meta.getFileInFields() ) { // Check is filename field is provided if ( Utils.isEmpty( meta.getDynamicFilenameField() ) ) { logError( BaseMessages.getString( PKG, "LoadFileInput.Log.NoField" ) ); throw new KettleException( BaseMessages.getString( PKG, "LoadFileInput.Log.NoField" ) ); } // cache the position of the field if ( data.indexOfFilenameField < 0 ) { data.indexOfFilenameField = data.inputRowMeta.indexOfValue( meta.getDynamicFilenameField() ); if ( data.indexOfFilenameField < 0 ) { // The field is unreachable ! logError( BaseMessages.getString( PKG, "LoadFileInput.Log.ErrorFindingField" ) + "[" + meta.getDynamicFilenameField() + "]" ); throw new KettleException( BaseMessages.getString( PKG, "LoadFileInput.Exception.CouldnotFindField", meta.getDynamicFilenameField() ) ); } } // Get the number of previous fields data.totalpreviousfields = data.inputRowMeta.size(); } } // end if first // get field value String Fieldvalue = data.inputRowMeta.getString( data.readrow, data.indexOfFilenameField ); if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.Stream", meta.getDynamicFilenameField(), Fieldvalue ) ); } try { // Source is a file. data.file = KettleVFS.getFileObject( Fieldvalue ); } catch ( Exception e ) { throw new KettleException( e ); } } else { if ( data.filenr >= data.files.nrOfFiles() ) { // finished processing! if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.FinishedProcessing" ) ); } return false; } // Is this the last file? data.last_file = ( data.filenr == data.files.nrOfFiles() - 1 ); data.file = data.files.getFile( data.filenr ); } // Check if file exists if ( meta.isIgnoreMissingPath() && !data.file.exists() ) { logBasic( BaseMessages.getString( PKG, "LoadFileInput.Error.FileNotExists", "" + data.file.getName() ) ); return openNextFile(); } // Check if file is empty data.fileSize = data.file.getContent().getSize(); // Move file pointer ahead! data.filenr++; if ( meta.isIgnoreEmptyFile() && data.fileSize == 0 ) { logError( BaseMessages.getString( PKG, "LoadFileInput.Error.FileSizeZero", "" + data.file.getName() ) ); return openNextFile(); } else { if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.OpeningFile", data.file.toString() ) ); } data.filename = KettleVFS.getFilename( data.file ); // Add additional fields? if ( meta.getShortFileNameField() != null && meta.getShortFileNameField().length() > 0 ) { data.shortFilename = data.file.getName().getBaseName(); } if ( meta.getPathField() != null && meta.getPathField().length() > 0 ) { data.path = KettleVFS.getFilename( data.file.getParent() ); } if ( meta.isHiddenField() != null && meta.isHiddenField().length() > 0 ) { data.hidden = data.file.isHidden(); } if ( meta.getExtensionField() != null && meta.getExtensionField().length() > 0 ) { data.extension = data.file.getName().getExtension(); } if ( meta.getLastModificationDateField() != null && meta.getLastModificationDateField().length() > 0 ) { data.lastModificationDateTime = new Date( data.file.getContent().getLastModifiedTime() ); } if ( meta.getUriField() != null && meta.getUriField().length() > 0 ) { data.uriName = Const.optionallyDecodeUriString( data.file.getName().getURI() ); } if ( meta.getRootUriField() != null && meta.getRootUriField().length() > 0 ) { data.rootUriName = data.file.getName().getRootURI(); } // get File content getFileContent(); addFileToResultFilesName( data.file ); if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.FileOpened", data.file.toString() ) ); } } } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "LoadFileInput.Log.UnableToOpenFile", "" + data.filenr, data.file .toString(), e.toString() ) ); stopAll(); setErrors( 1 ); return false; } return true; }
@Test public void testOpenNextFile_01() { assertFalse( stepMetaInterface.isIgnoreEmptyFile() ); // ensure default value stepInputFiles.addFile( getFile( "input0.txt" ) ); stepInputFiles.addFile( getFile( "input1.txt" ) ); assertTrue( stepLoadFileInput.openNextFile() ); assertTrue( stepLoadFileInput.openNextFile() ); assertFalse( stepLoadFileInput.openNextFile() ); }
@Override public ValueSet canonicalize(boolean removeSafeConstants) { if (!removeSafeConstants) { return this; } AtomicLong counter = new AtomicLong(0); return new SortedRangeSet( type, lowIndexedRanges.entrySet().stream() .collect(toMap( // Since map values contain all range information, we can mark all keys as 0, 1, 2... in ascending order. entry -> Marker.exactly(BIGINT, counter.incrementAndGet()), entry -> { boolean removeConstants = entry.getValue().getLow().getBound().equals(Marker.Bound.EXACTLY) && entry.getValue().getHigh().getBound().equals(Marker.Bound.EXACTLY); return entry.getValue().canonicalize(removeConstants); }, (e1, e2) -> { throw new IllegalStateException(format("Duplicate key %s", e1)); }, TreeMap::new))); }
@Test public void testCanonicalize() throws Exception { assertSameSet(SortedRangeSet.all(BIGINT), SortedRangeSet.all(BIGINT), false); assertSameSet( SortedRangeSet.of( Range.lessThan(BIGINT, 0L), Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L), Range.range(BIGINT, 5L, false, 9L, false), Range.greaterThanOrEqual(BIGINT, 11L)), SortedRangeSet.of( Range.lessThan(BIGINT, 0L), Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L), Range.range(BIGINT, 5L, false, 9L, false), Range.greaterThanOrEqual(BIGINT, 11L)), false); assertDifferentSet( SortedRangeSet.of( Range.lessThan(BIGINT, 0L), Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L), Range.range(BIGINT, 5L, false, 9L, false), Range.greaterThanOrEqual(BIGINT, 11L)), SortedRangeSet.of( Range.lessThan(BIGINT, 0L), Range.equal(BIGINT, 1L), Range.equal(BIGINT, 3L), Range.range(BIGINT, 5L, false, 9L, false), Range.greaterThanOrEqual(BIGINT, 11L)), false); assertDifferentSet(SortedRangeSet.all(BIGINT), SortedRangeSet.none(BIGINT), false); assertDifferentSet(SortedRangeSet.all(BIGINT), SortedRangeSet.all(VARCHAR), false); assertSameSet( SortedRangeSet.of( Range.lessThan(BIGINT, 0L), Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L), Range.range(BIGINT, 5L, false, 9L, false), Range.greaterThanOrEqual(BIGINT, 11L)), SortedRangeSet.of( Range.lessThan(BIGINT, 0L), Range.equal(BIGINT, 3L), Range.equal(BIGINT, 4L), Range.range(BIGINT, 5L, false, 9L, false), Range.greaterThanOrEqual(BIGINT, 11L)), true); assertSameSet(SortedRangeSet.all(BIGINT), SortedRangeSet.all(BIGINT), true); assertDifferentSet( SortedRangeSet.of( Range.lessThan(BIGINT, 0L), Range.range(BIGINT, 0L, false, 2L, false), Range.range(BIGINT, 5L, false, 9L, false), Range.greaterThanOrEqual(BIGINT, 11L)), SortedRangeSet.of( Range.lessThan(BIGINT, 0L), Range.range(BIGINT, 0L, false, 3L, false), Range.range(BIGINT, 5L, true, 9L, false), Range.greaterThanOrEqual(BIGINT, 11L)), true); assertDifferentSet(SortedRangeSet.all(BIGINT), SortedRangeSet.all(BOOLEAN), true); }
@Override protected long getEncodedElementByteSize(T value) throws Exception { if (valueCoder instanceof StructuredCoder) { // If valueCoder is a StructuredCoder then we can ask it directly for the encoded size of // the value, adding the number of bytes to represent the length. long valueSize = ((StructuredCoder<T>) valueCoder).getEncodedElementByteSize(value); return VarInt.getLength(valueSize) + valueSize; } // If value is not a StructuredCoder then fall back to the default StructuredCoder behavior // of encoding and counting the bytes. The encoding will include the length prefix. return super.getEncodedElementByteSize(value); }
@Test public void testEncodedSize() throws Exception { assertEquals(5L, TEST_CODER.getEncodedElementByteSize(TEST_VALUES.get(0))); }
static String getSparkInternalAccumulatorKey(final String prestoKey) { if (prestoKey.contains(SPARK_INTERNAL_ACCUMULATOR_PREFIX)) { int index = prestoKey.indexOf(PRESTO_NATIVE_OPERATOR_STATS_SEP); return prestoKey.substring(index); } String[] prestoKeyParts = prestoKey.split("\\."); int prestoKeyPartsLength = prestoKeyParts.length; if (prestoKeyPartsLength < 2) { log.debug("Fail to build spark internal key for %s format not supported", prestoKey); return ""; } String prestoNewKey = String.format("%1$s%2$s", prestoKeyParts[0], prestoKeyParts[prestoKeyPartsLength - 1]); if (prestoNewKey.contains("_")) { prestoNewKey = CaseUtils.toCamelCase(prestoKey, false, '_'); } return String.format("%1$s%2$s%3$s", SPARK_INTERNAL_ACCUMULATOR_PREFIX, PRESTO_NATIVE_OPERATOR_STATS_PREFIX, prestoNewKey); }
@Test public void getSparkInternalAccumulatorKeyTest() { String expected = "internal.metrics.velox.TableScanBlockedWaitForSplitTimes"; String prestoKey = "TableScan.0.BlockedWaitForSplitTimes"; String actual = getSparkInternalAccumulatorKey(prestoKey); assertEquals(actual, expected); }
public static void populateGetCreatedKiePMMLTargetsMethod(final ClassOrInterfaceDeclaration modelTemplate, final List<TargetField> targetFields) { final MethodDeclaration methodDeclaration = modelTemplate.getMethodsByName(GET_CREATED_KIEPMMLTARGETS).get(0); final List<MethodCallExpr> kiePMMLTargetFieldsObjectCreations = getKiePMMLTargetFieldsObjectCreations(targetFields); populateListInListGetter(kiePMMLTargetFieldsObjectCreations, methodDeclaration, TO_RETURN); }
@Test void populateGetCreatedKiePMMLTargetsMethod() throws IOException { Random random = new Random(); List<TargetField> kiePMMLTargets = IntStream.range(0, 3).mapToObj(i -> new TargetField(Collections.emptyList(), OP_TYPE.byName(getRandomOpType().value()), "Target-" + i, CAST_INTEGER.byName(getRandomCastInteger().value()), (double) random.nextInt(20), (double) random.nextInt(60) + 20, (double) random.nextInt(100) / 100, (double) random.nextInt(100) / 100 )).collect(Collectors.toList()); String opType0 = OP_TYPE.class.getCanonicalName() + "." + kiePMMLTargets.get(0).getOpType().toString(); String castInteger0 = CAST_INTEGER.class.getCanonicalName() + "." + kiePMMLTargets.get(0).getCastInteger().toString(); String opType1 = OP_TYPE.class.getCanonicalName() + "." + kiePMMLTargets.get(1).getOpType().toString(); String castInteger1 = CAST_INTEGER.class.getCanonicalName() + "." + kiePMMLTargets.get(1).getCastInteger().toString(); String opType2 = OP_TYPE.class.getCanonicalName() + "." + kiePMMLTargets.get(2).getOpType().toString(); String castInteger2 = CAST_INTEGER.class.getCanonicalName() + "." + kiePMMLTargets.get(2).getCastInteger().toString(); org.kie.pmml.compiler.commons.codegenfactories.KiePMMLModelFactoryUtils.populateGetCreatedKiePMMLTargetsMethod(classOrInterfaceDeclaration, kiePMMLTargets); final MethodDeclaration retrieved = classOrInterfaceDeclaration.getMethodsByName(GET_CREATED_KIEPMMLTARGETS).get(0); String text = getFileContent(TEST_10_SOURCE); MethodDeclaration expected = JavaParserUtils.parseMethod(String.format(text, kiePMMLTargets.get(0).getName(), opType0, castInteger0, kiePMMLTargets.get(0).getMin(), kiePMMLTargets.get(0).getMax(), kiePMMLTargets.get(0).getRescaleConstant(), kiePMMLTargets.get(0).getRescaleFactor(), kiePMMLTargets.get(1).getName(), opType1, castInteger1, kiePMMLTargets.get(1).getMin(), kiePMMLTargets.get(1).getMax(), kiePMMLTargets.get(1).getRescaleConstant(), kiePMMLTargets.get(1).getRescaleFactor(), kiePMMLTargets.get(2).getName(), opType2, castInteger2, kiePMMLTargets.get(2).getMin(), kiePMMLTargets.get(2).getMax(), kiePMMLTargets.get(2).getRescaleConstant(), kiePMMLTargets.get(2).getRescaleFactor())); assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue(); }
protected static File resolvePath(File baseDir, String path) { Path filePath = Paths.get(path); if (!filePath.isAbsolute()) { filePath = baseDir.toPath().resolve(path); } return filePath.normalize().toFile(); }
@Test public void shouldGetRelativeFile() { assertThat(ProjectReactorBuilder.resolvePath(getResource(this.getClass(), "/"), "shouldGetFile/foo.properties")) .isEqualTo(getResource(this.getClass(), "shouldGetFile/foo.properties")); }
@Override public List<String> splitAndEvaluate() { return Strings.isNullOrEmpty(inlineExpression) ? Collections.emptyList() : flatten(evaluate(GroovyUtils.split(handlePlaceHolder(inlineExpression)))); }
@Test void assertEvaluateForSimpleString() { List<String> expected = TypedSPILoader.getService(InlineExpressionParser.class, "GROOVY", PropertiesBuilder.build( new PropertiesBuilder.Property(InlineExpressionParser.INLINE_EXPRESSION_KEY, " t_order_0, t_order_1 "))).splitAndEvaluate(); assertThat(expected.size(), is(2)); assertThat(expected, hasItems("t_order_0", "t_order_1")); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { this.delete(files, prompt, callback, new HostPreferences(session.getHost()).getBoolean("openstack.delete.largeobject.segments")); }
@Test(expected = NotfoundException.class) public void testDeleteNotFoundKey() throws Exception { final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new SwiftDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static long getNextMinute() { return Calendar.getInstance().getTimeInMillis() + 1000 * 60; }
@Test public void getNextMinute() { var nextMinute = DateUtils.getNextMinute(); assertTrue(Calendar.getInstance().getTimeInMillis() < nextMinute); assertTrue(nextMinute < Calendar.getInstance().getTimeInMillis() + 61 * 1000); }
public void start() { if (isStarted()) return; int errorCount = 0; if (port <= 0) { errorCount++; addError("No port was configured for appender" + name + " For more information, please visit http://logback.qos.ch/codes.html#socket_no_port"); } if (remoteHost == null) { errorCount++; addError("No remote host was configured for appender" + name + " For more information, please visit http://logback.qos.ch/codes.html#socket_no_host"); } if (queueSize == 0) { addWarn("Queue size of zero is deprecated, use a size of one to indicate synchronous processing"); } if (queueSize < 0) { errorCount++; addError("Queue size must be greater than zero"); } if (errorCount == 0) { try { address = InetAddress.getByName(remoteHost); } catch (UnknownHostException ex) { addError("unknown host: " + remoteHost); errorCount++; } } if (errorCount == 0) { deque = queueFactory.newLinkedBlockingDeque(queueSize); peerId = "remote peer " + remoteHost + ":" + port + ": "; connector = createConnector(address, port, 0, reconnectionDelay.getMilliseconds()); task = getContext().getScheduledExecutorService().submit(new Runnable() { public void run() { connectSocketAndDispatchEvents(); } }); super.start(); } }
@Test public void addsInfoMessageWhenShuttingDownDueToInterrupt() throws Exception { // given doThrow(new InterruptedException()).when(socketConnector).call(); // when appender.start(); // then verify(appender, timeout(TIMEOUT)).addInfo(contains("shutting down")); }
@Override public CompletionStage<Collection<WindowedValue<OutT>>> finish() { /* * We can ignore the results here because its okay to call finish without invoking prepare. It will be a no-op * and an empty collection will be returned. */ collectorSealed.compareAndSet(false, true); synchronized (this) { final CompletionStage<Collection<WindowedValue<OutT>>> sealedOutputFuture = outputFuture; outputFuture = CompletableFuture.completedFuture(new ArrayList<>()); return sealedOutputFuture; } }
@Test public void testFinishWithoutPrepareReturnsEmptyCollection() { CompletionStage<Collection<WindowedValue<String>>> resultFuture = futureCollector.finish(); CompletionStage<Void> validationFuture = resultFuture.thenAccept( result -> { Assert.assertTrue("Expected the result to be empty", result.isEmpty()); }); validationFuture.toCompletableFuture().join(); }
@Override public Map<String, PluginMetadataSummary> test(ViewDTO view) { final Optional<Search> optionalSearch = searchDbService.get(view.searchId()); return optionalSearch.map(searchRequiresParameterSupport::test) .orElseThrow(() -> new IllegalStateException("Search " + view.searchId() + " for view " + view + " is missing.")); }
@Test public void throwsExceptionIfSearchIsMissing() { when(searchDbService.get("searchId")).thenReturn(Optional.empty()); assertThatThrownBy(() -> this.requiresParameterSupport.test(view)) .isInstanceOf(IllegalStateException.class) .hasMessageStartingWith("Search searchId for view") .hasMessageEndingWith("is missing."); }
public static List<?> toList(Object value) { return convert(List.class, value); }
@Test public void toListTest2() { final String str = "1,2"; final List<String> list2 = Convert.toList(String.class, str); assertEquals("1", list2.get(0)); assertEquals("2", list2.get(1)); final List<Integer> list3 = Convert.toList(Integer.class, str); assertEquals(1, list3.get(0).intValue()); assertEquals(2, list3.get(1).intValue()); }
@Override public int compareTo(LeanHit o) { int res = (sortData != null) ? compareData(sortData, o.sortData) : Double.compare(o.relevance, relevance); return (res != 0) ? res : compareData(gid, o.gid); }
@Test void testOrderingByGid() { assertEquals(0, new LeanHit(gidA, 0, 0, 1).compareTo(new LeanHit(gidA, 0, 0, 1))); verifyTransitiveOrdering(new LeanHit(gidA, 0, 0, 1), new LeanHit(gidB, 0, 0, 1), new LeanHit(gidC, 0, 0, 1)); }
void prioritizeCopiesAndShiftUps(List<MigrationInfo> migrations) { for (int i = 0; i < migrations.size(); i++) { prioritize(migrations, i); } if (logger.isFinestEnabled()) { StringBuilder s = new StringBuilder("Migration order after prioritization: ["); int ix = 0; for (MigrationInfo migration : migrations) { s.append("\n\t").append(ix++).append("- ").append(migration).append(","); } s.deleteCharAt(s.length() - 1); s.append("]"); logger.finest(s.toString()); } }
@Test public void testNoCopyPrioritizationAgainstShiftDownToHotterIndex() throws UnknownHostException { List<MigrationInfo> migrations = new ArrayList<>(); final MigrationInfo migration1 = new MigrationInfo(0, new PartitionReplica(new Address("localhost", 5701), uuids[0]), new PartitionReplica(new Address("localhost", 5702), uuids[1]), 0, 1, -1, 0); final MigrationInfo migration2 = new MigrationInfo(0, null, new PartitionReplica(new Address("localhost", 5703), uuids[2]), -1, -1, -1, 2); migrations.add(migration1); migrations.add(migration2); migrationPlanner.prioritizeCopiesAndShiftUps(migrations); assertEquals(asList(migration1, migration2), migrations); }
@Nonnull public static <T> T checkNotNull(T argument, String errorMessage) { return Objects.requireNonNull(argument, errorMessage); }
@Test public void checkNotNull_whenNull() { String msg = "Can't be null"; NullPointerException exception = assertThrows(NullPointerException.class, () -> Preconditions.checkNotNull(null, msg)); assertEquals(msg, exception.getMessage()); }
@CheckForNull public Duration calculate(DefaultIssue issue) { if (issue.isFromExternalRuleEngine()) { return issue.effort(); } Rule rule = ruleRepository.getByKey(issue.ruleKey()); DebtRemediationFunction fn = rule.getRemediationFunction(); if (fn != null) { verifyEffortToFix(issue, fn); Duration debt = Duration.create(0); String gapMultiplier = fn.gapMultiplier(); if (fn.type().usesGapMultiplier() && !Strings.isNullOrEmpty(gapMultiplier)) { int effortToFixValue = MoreObjects.firstNonNull(issue.gap(), 1).intValue(); // TODO convert to Duration directly in Rule#remediationFunction -> better performance + error handling debt = durations.decode(gapMultiplier).multiply(effortToFixValue); } String baseEffort = fn.baseEffort(); if (fn.type().usesBaseEffort() && !Strings.isNullOrEmpty(baseEffort)) { // TODO convert to Duration directly in Rule#remediationFunction -> better performance + error handling debt = debt.add(durations.decode(baseEffort)); } return debt; } return null; }
@Test public void no_debt_if_function_is_not_defined() { DefaultIssue issue = new DefaultIssue().setRuleKey(rule.getKey()); assertThat(underTest.calculate(issue)).isNull(); }
public static Sensor totalCacheSizeBytesSensor(final String threadId, final String taskId, final StreamsMetricsImpl streamsMetrics) { final String name = CACHE_SIZE_BYTES_TOTAL; final Sensor sensor = streamsMetrics.taskLevelSensor(threadId, taskId, name, RecordingLevel.DEBUG); addValueMetricToSensor( sensor, TASK_LEVEL_GROUP, streamsMetrics.taskLevelTagMap(threadId, taskId), name, CACHE_SIZE_BYTES_TOTAL_DESCRIPTION ); return sensor; }
@Test public void shouldGetTotalCacheSizeInBytesSensor() { final String operation = "cache-size-bytes-total"; when(streamsMetrics.taskLevelSensor(THREAD_ID, TASK_ID, operation, RecordingLevel.DEBUG)) .thenReturn(expectedSensor); final String totalBytesDescription = "The total size in bytes of this task's cache."; when(streamsMetrics.taskLevelTagMap(THREAD_ID, TASK_ID)).thenReturn(tagMap); try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) { final Sensor sensor = TaskMetrics.totalCacheSizeBytesSensor(THREAD_ID, TASK_ID, streamsMetrics); streamsMetricsStaticMock.verify( () -> StreamsMetricsImpl.addValueMetricToSensor( expectedSensor, TASK_LEVEL_GROUP, tagMap, operation, totalBytesDescription ) ); assertThat(sensor, is(expectedSensor)); } }
public Optional<String> findShardingColumn(final String columnName, final String tableName) { return Optional.ofNullable(shardingTables.get(tableName)).flatMap(optional -> findShardingColumn(optional, columnName)); }
@Test void assertFindShardingColumnForTableShardingStrategy() { ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration(); shardingRuleConfig.getTables().add(createTableRuleConfigWithTableStrategies()); shardingRuleConfig.getShardingAlgorithms().put("core_standard_fixture", new AlgorithmConfiguration("CORE.STANDARD.FIXTURE", new Properties())); Optional<String> actual = new ShardingRule(shardingRuleConfig, createDataSources(), mock(ComputeNodeInstanceContext.class)).findShardingColumn("column", "logic_Table"); assertTrue(actual.isPresent()); assertThat(actual.get(), is("column")); }
@Override protected SchemaTransform from(SchemaTransformConfiguration configuration) { return new IcebergReadSchemaTransform(configuration); }
@Test public void testSimpleScan() throws Exception { String identifier = "default.table_" + Long.toString(UUID.randomUUID().hashCode(), 16); TableIdentifier tableId = TableIdentifier.parse(identifier); Table simpleTable = warehouse.createTable(tableId, TestFixtures.SCHEMA); final Schema schema = IcebergUtils.icebergSchemaToBeamSchema(TestFixtures.SCHEMA); simpleTable .newFastAppend() .appendFile( warehouse.writeRecords( "file1s1.parquet", simpleTable.schema(), TestFixtures.FILE1SNAPSHOT1)) .appendFile( warehouse.writeRecords( "file2s1.parquet", simpleTable.schema(), TestFixtures.FILE2SNAPSHOT1)) .appendFile( warehouse.writeRecords( "file3s1.parquet", simpleTable.schema(), TestFixtures.FILE3SNAPSHOT1)) .commit(); final List<Row> expectedRows = Stream.of( TestFixtures.FILE1SNAPSHOT1, TestFixtures.FILE2SNAPSHOT1, TestFixtures.FILE3SNAPSHOT1) .flatMap(List::stream) .map(record -> IcebergUtils.icebergRecordToBeamRow(schema, record)) .collect(Collectors.toList()); Map<String, String> properties = new HashMap<>(); properties.put("type", CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP); properties.put("warehouse", warehouse.location); SchemaTransformConfiguration readConfig = SchemaTransformConfiguration.builder() .setTable(identifier) .setCatalogName("name") .setCatalogProperties(properties) .build(); PCollection<Row> output = PCollectionRowTuple.empty(testPipeline) .apply(new IcebergReadSchemaTransformProvider().from(readConfig)) .get(OUTPUT_TAG); PAssert.that(output) .satisfies( (Iterable<Row> rows) -> { assertThat(rows, containsInAnyOrder(expectedRows.toArray())); return null; }); testPipeline.run(); }
void verify() { final MutableInteger messageCount = new MutableInteger(); final ExpandableRingBuffer.MessageConsumer messageConsumer = (buffer, offset, length, headOffset) -> { messageCount.increment(); final int headerOffset = offset + MessageHeaderDecoder.ENCODED_LENGTH; final int clusterSessionIdOffset = headerOffset + SessionMessageHeaderDecoder.clusterSessionIdEncodingOffset(); final long clusterSessionId = buffer.getLong( clusterSessionIdOffset, SessionMessageHeaderDecoder.BYTE_ORDER); if (clusterSessionId != (logServiceSessionId + messageCount.get())) { throw new ClusterException("snapshot has incorrect pending message:" + " serviceId=" + serviceId + " nextServiceSessionId=" + nextServiceSessionId + " logServiceSessionId=" + logServiceSessionId + " clusterSessionId=" + clusterSessionId + " pendingMessageIndex=" + messageCount.get(), AeronException.Category.FATAL); } return true; }; pendingMessages.forEach(messageConsumer, Integer.MAX_VALUE); if (nextServiceSessionId != (logServiceSessionId + messageCount.get() + 1)) { throw new ClusterException("snapshot has incorrect pending message state:" + " serviceId=" + serviceId + " nextServiceSessionId=" + nextServiceSessionId + " logServiceSessionId=" + logServiceSessionId + " pendingMessageCount=" + messageCount.get(), AeronException.Category.FATAL); } }
@Test void snapshotEmpty() { final PendingServiceMessageTracker tracker = new PendingServiceMessageTracker( 0, counter, logPublisher, clusterClock); final Snapshot snapshot = takeSnapshot(tracker); final PendingServiceMessageTracker trackerLoaded = new PendingServiceMessageTracker( 0, counter, logPublisher, clusterClock); loadSnapshot(trackerLoaded, snapshot); trackerLoaded.verify(); }
@Override public String getScheme() { return scheme; }
@Test public void testGetScheme() { // Tests s3 paths. assertEquals("s3", S3ResourceId.fromUri("s3://my_bucket/tmp dir/").getScheme()); // Tests bucket with no ending '/'. assertEquals("s3", S3ResourceId.fromUri("s3://my_bucket").getScheme()); }
@Override public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) { if (!joinKey.isForeignKey()) { ensureMatchingPartitionCounts(buildContext.getServiceContext().getTopicClient()); } final JoinerFactory joinerFactory = new JoinerFactory( buildContext, this, buildContext.buildNodeContext(getId().toString())); return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join(); }
@Test public void shouldPerformStreamToStreamLeftJoin() { // Given: setupStream(left, leftSchemaKStream); setupStream(right, rightSchemaKStream); final JoinNode joinNode = new JoinNode(nodeId, LEFT, joinKey, true, left, right, WITHIN_EXPRESSION, "KAFKA"); // When: joinNode.buildStream(planBuildContext); // Then: verify(leftSchemaKStream).leftJoin( rightSchemaKStream, SYNTH_KEY, WITHIN_EXPRESSION.get(), VALUE_FORMAT.getFormatInfo(), OTHER_FORMAT.getFormatInfo(), CONTEXT_STACKER ); }
@Override public Boolean mSet(Map<byte[], byte[]> tuple) { if (isQueueing() || isPipelined()) { for (Entry<byte[], byte[]> entry: tuple.entrySet()) { write(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue()); } return true; } CommandBatchService es = new CommandBatchService(executorService); for (Entry<byte[], byte[]> entry: tuple.entrySet()) { es.writeAsync(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue()); } es.execute(); return true; }
@Test public void testMSet() { Map<byte[], byte[]> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.put(("test" + i).getBytes(), ("test" + i*100).getBytes()); } connection.mSet(map); for (Map.Entry<byte[], byte[]> entry : map.entrySet()) { assertThat(connection.get(entry.getKey())).isEqualTo(entry.getValue()); } }
@Override protected JobExceptionsInfoWithHistory handleRequest( HandlerRequest<EmptyRequestBody> request, ExecutionGraphInfo executionGraph) { final List<Integer> exceptionToReportMaxSizes = request.getQueryParameter(UpperLimitExceptionParameter.class); final int exceptionToReportMaxSize = exceptionToReportMaxSizes.size() > 0 ? exceptionToReportMaxSizes.get(0) : MAX_NUMBER_EXCEPTION_TO_REPORT; List<FailureLabelFilterParameter.FailureLabel> failureLabelFilter = request.getQueryParameter(FailureLabelFilterParameter.class); failureLabelFilter = failureLabelFilter.size() > 0 ? failureLabelFilter : EMPTY_FAILURE_LABEL_FILTER; return createJobExceptionsInfo( executionGraph, exceptionToReportMaxSize, failureLabelFilter); }
@Test void testOnlyRootCause() throws HandlerRequestException, ExecutionException, InterruptedException { final Throwable rootCause = new RuntimeException("root cause"); final long rootCauseTimestamp = System.currentTimeMillis(); final ExecutionGraphInfo executionGraphInfo = createExecutionGraphInfo(fromGlobalFailure(rootCause, rootCauseTimestamp)); final HandlerRequest<EmptyRequestBody> request = createRequest(executionGraphInfo.getJobId(), 10); final JobExceptionsInfoWithHistory response = testInstance.handleRequest(request, executionGraphInfo); assertThat(response.getRootException()) .isEqualTo(ExceptionUtils.stringifyException(rootCause)); assertThat(response.getRootTimestamp()).isEqualTo(rootCauseTimestamp); assertThat(response.isTruncated()).isFalse(); assertThat(response.getAllExceptions()).isEmpty(); assertThat(response.getExceptionHistory().getEntries()) .satisfies( matching( contains( historyContainsGlobalFailure( rootCause, rootCauseTimestamp)))); }
public ExitStatus(Options options) { this.options = options; }
@Test void with_failed_failed_scenarios() { createRuntime(); bus.send(testCaseFinishedWithStatus(Status.FAILED)); bus.send(testCaseFinishedWithStatus(Status.FAILED)); assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x1))); }
public static String toStepName(ExecutableStage executableStage) { /* * Look for the first/input ParDo/DoFn in this executable stage by * matching ParDo/DoFn's input PCollection with executable stage's * input PCollection */ Set<PipelineNode.PTransformNode> inputs = executableStage.getTransforms().stream() .filter( transform -> transform .getTransform() .getInputsMap() .containsValue(executableStage.getInputPCollection().getId())) .collect(Collectors.toSet()); Set<String> outputIds = executableStage.getOutputPCollections().stream() .map(PipelineNode.PCollectionNode::getId) .collect(Collectors.toSet()); /* * Look for the last/output ParDo/DoFn in this executable stage by * matching ParDo/DoFn's output PCollection(s) with executable stage's * out PCollection(s) */ Set<PipelineNode.PTransformNode> outputs = executableStage.getTransforms().stream() .filter( transform -> CollectionUtils.containsAny( transform.getTransform().getOutputsMap().values(), outputIds)) .collect(Collectors.toSet()); return String.format("[%s-%s]", toStepName(inputs), toStepName(outputs)); }
@Test public void testExecutableStageWithoutOutput() { pipeline.apply(Create.of(KV.of(1L, "1"))); assertEquals("[Create.Values-]", DoFnUtils.toStepName(getOnlyExecutableStage(pipeline))); }
@Override public void destroy() { directory.destroy(); }
@Test void testDestroy() { given(invocation.getMethodName()).willReturn("getMenu"); given(invocation.getParameterTypes()).willReturn(new Class<?>[] {}); given(invocation.getArguments()).willReturn(new Object[] {}); given(invocation.getObjectAttachments()).willReturn(new HashMap<>()); given(invocation.getInvoker()).willReturn(firstInvoker); given(firstInvoker.getUrl()).willReturn(url.addParameter(GROUP_KEY, "first")); given(firstInvoker.getInterface()).willReturn(MenuService.class); given(firstInvoker.invoke(invocation)).willReturn(new AppResponse()); given(secondInvoker.getUrl()).willReturn(url.addParameter(GROUP_KEY, "second")); given(secondInvoker.getInterface()).willReturn(MenuService.class); given(secondInvoker.invoke(invocation)).willReturn(new AppResponse()); given(directory.list(invocation)).willReturn(new ArrayList() { { add(firstInvoker); add(secondInvoker); } }); given(directory.getUrl()).willReturn(url); given(directory.getConsumerUrl()).willReturn(url); given(directory.getConsumerUrl()).willReturn(url); given(directory.getInterface()).willReturn(MenuService.class); mergeableClusterInvoker = new MergeableClusterInvoker<MenuService>(directory); mergeableClusterInvoker.destroy(); assertFalse(firstInvoker.isAvailable()); assertFalse(secondInvoker.isAvailable()); }
public LoggerContext configure() { LoggerContext ctx = helper.getRootContext(); ctx.reset(); helper.enableJulChangePropagation(ctx); configureConsole(ctx); configureWithLogbackWritingToFile(ctx); helper.apply( LogLevelConfig.newBuilder(helper.getRootLoggerName()) .rootLevelFor(ProcessId.APP) .immutableLevel("com.hazelcast", Level.toLevel("WARN")) .build(), appSettings.getProps()); return ctx; }
@Test public void root_logger_only_writes_to_console_with_formatting_when_running_from_sonar_script() { LoggerContext ctx = underTest.configure(); Logger rootLogger = ctx.getLogger(ROOT_LOGGER_NAME); var consoleAppender = (ConsoleAppender<ILoggingEvent>) rootLogger.getAppender("APP_CONSOLE"); verifyAppFormattedLogEncoder(consoleAppender.getEncoder()); var rollingFileAppender = rootLogger.getAppender("file_sonar"); assertThat(rollingFileAppender).isNotNull(); assertThat(rootLogger.iteratorForAppenders()).toIterable().hasSize(2); }
public boolean responsible(String responsibleTag) { final List<String> servers = healthyList; if (!switchDomain.isDistroEnabled() || EnvUtil.getStandaloneMode()) { return true; } if (CollectionUtils.isEmpty(servers)) { // means distro config is not ready yet return false; } String localAddress = EnvUtil.getLocalAddress(); int index = servers.indexOf(localAddress); int lastIndex = servers.lastIndexOf(localAddress); if (lastIndex < 0 || index < 0) { return true; } int target = distroHash(responsibleTag) % servers.size(); return target >= index && target <= lastIndex; }
@Test void testResponsible() { assertTrue(distroMapper.responsible(serviceName)); }
@Transactional public MeetingConfirmResponse create(String uuid, long attendeeId, MeetingConfirmRequest request) { LocalDateTime startDateTime = request.toStartDateTime(); LocalDateTime endDateTime = request.toEndDateTime(); Meeting meeting = meetingRepository.findByUuid(uuid) .orElseThrow(() -> new MomoException(MeetingErrorCode.INVALID_UUID)); Attendee attendee = attendeeRepository.findByIdAndMeeting(attendeeId, meeting) .orElseThrow(() -> new MomoException(AttendeeErrorCode.INVALID_ATTENDEE)); validateHostPermission(attendee); validateNotAlreadyConfirmed(meeting); validateMeetingLocked(meeting); validateTimeRange(meeting, startDateTime, endDateTime); validateDateRange(meeting, startDateTime, endDateTime); ConfirmedMeeting confirmedMeeting = new ConfirmedMeeting(meeting, startDateTime, endDateTime); confirmedMeetingRepository.save(confirmedMeeting); return MeetingConfirmResponse.from(confirmedMeeting); }
@DisplayName("주최자가 잠겨있는 약속의 일정을 확정한다.") @Test void confirmSchedule() { meetingConfirmService.create(meeting.getUuid(), attendee.getId(), validRequest); ConfirmedMeeting confirmedMeeting = confirmedMeetingRepository.findByMeeting(meeting).get(); LocalDateTime startDateTime = confirmedMeeting.getStartDateTime(); LocalDateTime endDateTime = confirmedMeeting.getEndDateTime(); assertAll( () -> assertThat(startDateTime).isEqualTo(validRequest.toStartDateTime()), () -> assertThat(endDateTime).isEqualTo(validRequest.toEndDateTime()) ); }
public String getDefaultCharset(Connection connection) throws SQLException { return sqlExecutor.selectSingleString(connection, "select pg_encoding_to_char(encoding) from pg_database where datname = current_database()"); }
@Test public void test_getDefaultCharset() throws SQLException { answerSelect("latin"); assertThat(underTest.getDefaultCharset(connection)).isEqualTo("latin"); }
public CatalogueTreeSortStrategy getStrategy(String strategyName) { CatalogueTreeSortStrategy catalogueTreeSortStrategy = Safes.of(catalogueTreeSortStrategyMap).get(strategyName); if (Objects.isNull(catalogueTreeSortStrategy)) { log.warn("Strategy {} is not defined. Use DefaultStrategy", strategyName); catalogueTreeSortStrategy = Safes.of(catalogueTreeSortStrategyMap).get(CatalogueSortConstant.STRATEGY_DEFAULT); } if (Objects.isNull(catalogueTreeSortStrategy)) { throw new BusException(StrUtil.format("Strategy {} is not defined.", strategyName)); } return catalogueTreeSortStrategy; }
@Test public void getStrategyIllegalTest() { when(catalogueTreeSortStrategyMap.get(anyString())).thenAnswer(invocationOnMock -> { String strategy = invocationOnMock.getArgument(0); return mockCatalogueTreeSortStrategyMap.get(strategy); }); CatalogueTreeSortStrategy strategy = catalogueTreeSortFactoryTest.getStrategy("xxx"); assertEquals(defaultStrategy, strategy); }
@Override public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) { final Map<Integer, KafkaFutureImpl<Map<String, LogDirDescription>>> futures = new HashMap<>(brokers.size()); final long now = time.milliseconds(); for (final Integer brokerId : brokers) { KafkaFutureImpl<Map<String, LogDirDescription>> future = new KafkaFutureImpl<>(); futures.put(brokerId, future); runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) { @Override public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) { // Query selected partitions in all log directories return new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null)); } @Override public void handleResponse(AbstractResponse abstractResponse) { DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse; Map<String, LogDirDescription> descriptions = logDirDescriptions(response); if (!descriptions.isEmpty()) { future.complete(descriptions); } else { // Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None Errors error = response.data().errorCode() == Errors.NONE.code() ? Errors.CLUSTER_AUTHORIZATION_FAILED : Errors.forCode(response.data().errorCode()); future.completeExceptionally(error.exception()); } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }, now); } return new DescribeLogDirsResult(new HashMap<>(futures)); }
@SuppressWarnings("deprecation") @Test public void testDescribeLogDirsDeprecated() throws ExecutionException, InterruptedException { Set<Integer> brokers = singleton(0); TopicPartition tp = new TopicPartition("topic", 12); String logDir = "/var/data/kafka"; Errors error = Errors.NONE; int offsetLag = 24; long partitionSize = 1234567890; try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom( prepareDescribeLogDirsResponse(error, logDir, tp, partitionSize, offsetLag), env.cluster().nodeById(0)); DescribeLogDirsResult result = env.adminClient().describeLogDirs(brokers); Map<Integer, KafkaFuture<Map<String, DescribeLogDirsResponse.LogDirInfo>>> deprecatedValues = result.values(); assertEquals(brokers, deprecatedValues.keySet()); assertNotNull(deprecatedValues.get(0)); assertDescriptionContains(deprecatedValues.get(0).get(), logDir, tp, error, offsetLag, partitionSize); Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> deprecatedAll = result.all().get(); assertEquals(brokers, deprecatedAll.keySet()); assertDescriptionContains(deprecatedAll.get(0), logDir, tp, error, offsetLag, partitionSize); } }
protected static VplsOperation getOptimizedVplsOperation(Deque<VplsOperation> operations) { if (operations.isEmpty()) { return null; } // no need to optimize if the queue contains only one operation if (operations.size() == 1) { return operations.getFirst(); } final VplsOperation firstOperation = operations.peekFirst(); final VplsOperation lastOperation = operations.peekLast(); final VplsOperation.Operation firstOp = firstOperation.op(); final VplsOperation.Operation lastOp = lastOperation.op(); if (firstOp.equals(VplsOperation.Operation.REMOVE)) { if (lastOp.equals(VplsOperation.Operation.REMOVE)) { // case 1: both first and last operation are REMOVE; do remove return firstOperation; } else if (lastOp.equals(VplsOperation.Operation.ADD)) { // case 2: if first is REMOVE, and last is ADD; do update return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.UPDATE); } else { // case 3: first is REMOVE, last is UPDATE; do update return lastOperation; } } else if (firstOp.equals(VplsOperation.Operation.ADD)) { if (lastOp.equals(VplsOperation.Operation.REMOVE)) { // case 4: first is ADD, last is REMOVE; nothing to do return null; } else if (lastOp.equals(VplsOperation.Operation.ADD)) { // case 5: both first and last are ADD, do add return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.ADD); } else { // case 6: first is ADD and last is update, do add return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.ADD); } } else { if (lastOp.equals(VplsOperation.Operation.REMOVE)) { // case 7: last is remove, do remove return lastOperation; } else if (lastOp.equals(VplsOperation.Operation.ADD)) { // case 8: do update only return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.UPDATE); } else { // case 9: from UPDATE to UPDATE // only need last UPDATE operation return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.UPDATE); } } }
@Test public void testOptimizeOperationsRToA() { Deque<VplsOperation> operations = new ArrayDeque<>(); VplsData vplsData = VplsData.of(VPLS1); vplsData.addInterfaces(ImmutableSet.of(V100H1)); VplsOperation vplsOperation = VplsOperation.of(vplsData, VplsOperation.Operation.REMOVE); operations.add(vplsOperation); vplsData = VplsData.of(VPLS1, EncapsulationType.VLAN); vplsData.addInterfaces(ImmutableSet.of(V100H1, V100H2)); vplsOperation = VplsOperation.of(vplsData, VplsOperation.Operation.ADD); operations.add(vplsOperation); vplsOperation = VplsOperationManager.getOptimizedVplsOperation(operations); assertEquals(VplsOperation.of(vplsData, VplsOperation.Operation.UPDATE), vplsOperation); }
public MeasureDto toMeasureDto(Measure measure, Metric metric, Component component) { MeasureDto out = new MeasureDto(); out.setMetricUuid(metric.getUuid()); out.setComponentUuid(component.getUuid()); out.setAnalysisUuid(analysisMetadataHolder.getUuid()); if (measure.hasQualityGateStatus()) { setAlert(out, measure.getQualityGateStatus()); } out.setValue(valueAsDouble(measure)); out.setData(data(measure)); return out; }
@Test public void toMeasureDto_maps_name_of_Level_to_data_and_has_no_value_for_LEVEL_metric() { MeasureDto trueMeasureDto = underTest.toMeasureDto(Measure.newMeasureBuilder().create(Measure.Level.OK), SOME_LEVEL_METRIC, SOME_COMPONENT); assertThat(trueMeasureDto.getValue()).isNull(); assertThat(trueMeasureDto.getData()).isEqualTo(Measure.Level.OK.name()); }
@Override public EpoxyModel<?> set(int index, EpoxyModel<?> element) { EpoxyModel<?> previousModel = super.set(index, element); if (previousModel.id() != element.id()) { notifyRemoval(index, 1); notifyInsertion(index, 1); } return previousModel; }
@Test public void testSet() { modelList.set(0, new TestModel()); verify(observer).onItemRangeRemoved(0, 1); verify(observer).onItemRangeInserted(0, 1); }
public static long toMillis(long day, long hour, long minute, long second, long millis) { try { long value = millis; value = addExact(value, multiplyExact(day, MILLIS_IN_DAY)); value = addExact(value, multiplyExact(hour, MILLIS_IN_HOUR)); value = addExact(value, multiplyExact(minute, MILLIS_IN_MINUTE)); value = addExact(value, multiplyExact(second, MILLIS_IN_SECOND)); return value; } catch (ArithmeticException e) { throw new IllegalArgumentException(e); } }
@Test public void textMaxDays() { long days = Long.MAX_VALUE / DAYS.toMillis(1); assertEquals(toMillis(days, 0, 0, 0, 0), DAYS.toMillis(days)); }
@SuppressWarnings("unchecked") public final void isLessThan(@Nullable T other) { if (checkNotNull((Comparable<Object>) actual).compareTo(checkNotNull(other)) >= 0) { failWithActual("expected to be less than", other); } }
@Test public void isLessThan_failsGreater() { expectFailureWhenTestingThat(4).isLessThan(3); assertFailureValue("expected to be less than", "3"); }
public static boolean webSocketHostPathMatches(String hostPath, String targetPath) { boolean exactPathMatch = true; if (ObjectHelper.isEmpty(hostPath) || ObjectHelper.isEmpty(targetPath)) { // This scenario should not really be possible as the input args come from the vertx-websocket consumer / producer URI return false; } // Paths ending with '*' are Vert.x wildcard routes so match on the path prefix if (hostPath.endsWith("*")) { exactPathMatch = false; hostPath = hostPath.substring(0, hostPath.lastIndexOf('*')); } String normalizedHostPath = HttpUtils.normalizePath(hostPath + "/"); String normalizedTargetPath = HttpUtils.normalizePath(targetPath + "/"); String[] hostPathElements = normalizedHostPath.split("/"); String[] targetPathElements = normalizedTargetPath.split("/"); if (exactPathMatch && hostPathElements.length != targetPathElements.length) { return false; } if (exactPathMatch) { return normalizedHostPath.equals(normalizedTargetPath); } else { return normalizedTargetPath.startsWith(normalizedHostPath); } }
@Test void webSocketHostWithTrailingMultipleSlashPathMatches() { String hostPath = "/foo/bar/cheese/wine"; String targetPath = "/foo/bar/cheese/wine//"; assertTrue(VertxWebsocketHelper.webSocketHostPathMatches(hostPath, targetPath)); }
public Page getNextPage() throws IOException { return getNextPage(this.appendRowNumber); }
@Test public void testGetNextPage_withoutRowNumbers() throws Exception { List<Type> types = ImmutableList.of(VARCHAR); List<List<?>> values = ImmutableList.of(ImmutableList.of("a", "")); TempFile tempFile = new TempFile(); writeOrcColumnsPresto(tempFile.getFile(), ORC_12, NONE, Optional.empty(), types, values, NOOP_WRITER_STATS); OrcPredicate orcPredicate = createOrcPredicate(types, values, DWRF, false); Map<Integer, Type> includedColumns = IntStream.range(0, types.size()) .boxed() .collect(toImmutableMap(Function.identity(), types::get)); List<Integer> outputColumns = IntStream.range(0, types.size()) .boxed() .collect(toImmutableList()); OrcAggregatedMemoryContext systemMemoryUsage = new TestingHiveOrcAggregatedMemoryContext(); try (OrcSelectiveRecordReader recordReader = createCustomOrcSelectiveRecordReader( tempFile.getFile(), ORC_12.getOrcEncoding(), orcPredicate, types, 1, ImmutableMap.of(), ImmutableList.of(), ImmutableMap.of(), OrcTester.OrcReaderSettings.builder().build().getRequiredSubfields(), ImmutableMap.of(), ImmutableMap.of(), includedColumns, outputColumns, false, systemMemoryUsage, false)) { assertEquals(recordReader.getReaderPosition(), 0); assertEquals(recordReader.getFilePosition(), 0); Page page = recordReader.getNextPage(); // One VARCHAR column, no row number column assertEquals(1, page.getChannelCount()); Block block = page.getBlock(0); assertEquals(block.getPositionCount(), 1); } }
public static InetSocketAddress toAddress(String address) { int i = address.indexOf(':'); String host; int port; if (i > -1) { host = address.substring(0, i); port = Integer.parseInt(address.substring(i + 1)); } else { host = address; port = 0; } return new InetSocketAddress(host, port); }
@Test void testToAddress() { InetSocketAddress address = NetUtils.toAddress("localhost:1234"); assertThat(address.getHostName(), equalTo("localhost")); assertThat(address.getPort(), equalTo(1234)); address = NetUtils.toAddress("localhost"); assertThat(address.getHostName(), equalTo("localhost")); assertThat(address.getPort(), equalTo(0)); }
@Override public Batch toBatch() { return new SparkBatch( sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode()); }
@Test public void testUnpartitionedYears() throws Exception { createUnpartitionedTable(spark, tableName); SparkScanBuilder builder = scanBuilder(); YearsFunction.TimestampToYearsFunction function = new YearsFunction.TimestampToYearsFunction(); UserDefinedScalarFunc udf = toUDF(function, expressions(fieldRef("ts"))); Predicate predicate = new Predicate( "=", expressions( udf, intLit(timestampStrToYearOrdinal("2017-11-22T00:00:00.000000+00:00")))); pushFilters(builder, predicate); Batch scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); // NOT Equal builder = scanBuilder(); predicate = new Not(predicate); pushFilters(builder, predicate); scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); }
public String getRootNodeName() { return "job"; }
@Test public void testGetRootNodeName() throws Exception { assertEquals( "job", jobFileListener.getRootNodeName() ); }
public static String normalizeColumn(String column) { if (column == null) { return "null"; } return column.replace("\t", " ").replace("\r\n", " ").replace("\n", " "); }
@Test void testColumn() { assertEquals("hello world", TableDataUtils.normalizeColumn("hello\tworld")); assertEquals("hello world", TableDataUtils.normalizeColumn("hello\nworld")); assertEquals("hello world", TableDataUtils.normalizeColumn("hello\r\nworld")); assertEquals("hello world", TableDataUtils.normalizeColumn("hello\t\nworld")); assertEquals("null", TableDataUtils.normalizeColumn(null)); }
@Override @SuppressWarnings("UseOfSystemOutOrSystemErr") public void run(Namespace namespace, Liquibase liquibase) throws Exception { final Boolean verbose = namespace.getBoolean("verbose"); liquibase.reportStatus(verbose != null && verbose, getContext(namespace), new OutputStreamWriter(outputStream, StandardCharsets.UTF_8)); }
@Test void testRunOnMigratedDb() throws Exception { final URI existedDbPathUri = Objects.requireNonNull(getClass().getResource("/test-db.mv.db")).toURI(); final String existedDbPath = Paths.get(existedDbPathUri).toString(); final String existedDbUrl = "jdbc:h2:" + existedDbPath.substring(0, existedDbPath.length() - ".mv.db".length()); final TestMigrationConfiguration existedDbConf = MigrationTestSupport.createConfiguration(existedDbUrl); statusCommand.run(null, new Namespace(Collections.emptyMap()), existedDbConf); assertThat(baos.toString(UTF_8.name())).matches("\\S+ is up to date\\R"); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testCallSingleWithinJs() { run( "def res = karate.call('called3-caller1.js')" ); matchVar("res", "2"); }
public static Exception toException(int code, String msg) throws Exception { if (code == Response.Status.NOT_FOUND.getStatusCode()) { throw new NotFoundException(msg); } else if (code == Response.Status.NOT_IMPLEMENTED.getStatusCode()) { throw new ClassNotFoundException(msg); } else if (code == Response.Status.BAD_REQUEST.getStatusCode()) { throw new InvalidRequestException(msg); } else if (code == Response.Status.CONFLICT.getStatusCode()) { throw new RequestConflictException(msg); } else { throw new RuntimeException(msg); } }
@Test public void testToExceptionClassNotFoundException() { assertThrows(ClassNotFoundException.class, () -> RestExceptionMapper.toException(Response.Status.NOT_IMPLEMENTED.getStatusCode(), "Not Implemented")); }
public void execute() { Optional<String> login = configuration.get(CoreProperties.LOGIN); Optional<String> password = configuration.get(CoreProperties.PASSWORD); String warningMessage = null; if (password.isPresent()) { warningMessage = PASSWORD_WARN_MESSAGE; } else if (login.isPresent()) { warningMessage = LOGIN_WARN_MESSAGE; } if (warningMessage != null) { if (isScannerDotNet()) { warningMessage += SCANNER_DOTNET_WARN_MESSAGE; } LOG.warn(warningMessage); analysisWarnings.addUnique(warningMessage); } }
@Test public void execute_whenUsingPasswordAndDotNetScanner_shouldAddWarning() { settings.setProperty(CoreProperties.LOGIN, "test"); settings.setProperty(CoreProperties.PASSWORD, "winner winner chicken dinner"); when(environmentInformation.getKey()).thenReturn("ScannerMSBuild"); underTest.execute(); verify(analysisWarnings, times(1)).addUnique(PASSWORD_WARN_MESSAGE + SCANNER_DOTNET_WARN_MESSAGE); Assertions.assertThat(logger.logs(Level.WARN)).contains(PASSWORD_WARN_MESSAGE + SCANNER_DOTNET_WARN_MESSAGE); }
@Override public ACCESS3Response access(XDR xdr, RpcInfo info) { return access(xdr, getSecurityHandler(info), info.remoteAddress()); }
@Test(timeout = 60000) public void testAccess() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); int namenodeId = Nfs3Utils.getNamenodeId(config); FileHandle handle = new FileHandle(dirId, namenodeId); XDR xdr_req = new XDR(); ACCESS3Request req = new ACCESS3Request(handle); req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. ACCESS3Response response2 = nfsd.access(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus()); }
public KeltnerChannelUpperIndicator(KeltnerChannelMiddleIndicator middle, double ratio, int barCountATR) { this(middle, new ATRIndicator(middle.getBarSeries(), barCountATR), ratio); }
@Test public void keltnerChannelUpperIndicatorTest() { KeltnerChannelMiddleIndicator km = new KeltnerChannelMiddleIndicator(new ClosePriceIndicator(data), 14); KeltnerChannelUpperIndicator ku = new KeltnerChannelUpperIndicator(km, 2, 14); assertNumEquals(11971.9132, ku.getValue(13)); assertNumEquals(12002.3402, ku.getValue(14)); assertNumEquals(12024.4032, ku.getValue(15)); assertNumEquals(12040.3933, ku.getValue(16)); assertNumEquals(12052.8572, ku.getValue(17)); assertNumEquals(12067.9050, ku.getValue(18)); assertNumEquals(12099.5025, ku.getValue(19)); assertNumEquals(12110.5722, ku.getValue(20)); assertNumEquals(12130.8675, ku.getValue(21)); assertNumEquals(12147.7344, ku.getValue(22)); assertNumEquals(12175.5937, ku.getValue(23)); assertNumEquals(12208.1327, ku.getValue(24)); assertNumEquals(12233.9032, ku.getValue(25)); assertNumEquals(12256.9596, ku.getValue(26)); assertNumEquals(12285.9094, ku.getValue(27)); assertNumEquals(12301.1108, ku.getValue(28)); assertNumEquals(12313.2042, ku.getValue(29)); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } ExtensionDefinition<?> other = (ExtensionDefinition<?>) obj; if (!StringUtils.equals(name, other.name)) { return false; } if (!serviceClass.equals(other.serviceClass)) { return false; } if (!order.equals(other.order)) { return false; } return scope.equals(other.scope); }
@Test public void testEquals() { ExtensionDefinition<ChineseHello> definition = new ExtensionDefinition<>("abc", 1, Scope.PROTOTYPE, ChineseHello.class); ExtensionDefinition<ChineseHello> definition2 = new ExtensionDefinition<>("abc", 1, Scope.PROTOTYPE, ChineseHello.class); Assertions.assertEquals(definition2, definition); }
public static Struct convertToStruct(Schema schema, Object value) { return convertToStructInternal(STRUCT_SELECTOR_SCHEMA, value); }
@Test public void shouldConvertEmptyStruct() { Struct struct = new Struct(SchemaBuilder.struct().build()); assertThrows(DataException.class, () -> Values.convertToStruct(struct.schema(), null)); assertThrows(DataException.class, () -> Values.convertToStruct(struct.schema(), "")); Values.convertToStruct(struct.schema(), struct); }
@Override public List<Catalogue> sort(List<Catalogue> catalogueTree, SortTypeEnum sortTypeEnum) { log.debug("sort catalogue tree based on id. catalogueTree: {}, sortTypeEnum: {}", catalogueTree, sortTypeEnum); return recursionSortCatalogues(catalogueTree, sortTypeEnum); }
@Test public void sortEmptyTest() { List<Catalogue> catalogueTree = Lists.newArrayList(); SortTypeEnum sortTypeEnum = SortTypeEnum.ASC; List<Catalogue> resultList = catalogueTreeSortDefaultStrategyTest.sort(catalogueTree, sortTypeEnum); assertEquals(Lists.newArrayList(), resultList); }
protected boolean isClusterVersionLessThan(Version version) { Version clusterVersion = getNodeEngine().getClusterService().getClusterVersion(); return clusterVersion.isLessThan(version); }
@Test public void testClusterVersion_isLessThan_nextMinorVersion() { assertTrue(object.isClusterVersionLessThan(NEXT_MINOR_VERSION)); }
@Override protected final SslHandler newHandler(ByteBufAllocator alloc, boolean startTls) { SslHandler handler = ctx.newHandler(alloc, startTls); initHandler(handler); return handler; }
@Test public void testInitEngineOnNewSslHandler() throws Exception { SslContext delegating = newDelegatingSslContext(); SslHandler handler = delegating.newHandler(UnpooledByteBufAllocator.DEFAULT); assertArrayEquals(EXPECTED_PROTOCOLS, handler.engine().getEnabledProtocols()); handler = delegating.newHandler(UnpooledByteBufAllocator.DEFAULT, "localhost", 9090); assertArrayEquals(EXPECTED_PROTOCOLS, handler.engine().getEnabledProtocols()); }
protected void acceptRows( List<T> rows ) { try { acceptingRowsSemaphore.acquire(); waitForSubscribers(); for ( T row : rows ) { streamStep.getSubtransExecutor().acquireBufferPermit(); streamStep.incrementLinesInput(); publishProcessor.onNext( row ); } } catch ( InterruptedException e ) { logChannel .logError( getString( PKG, "BlockingQueueStream.AcceptRowsInterrupt", Arrays.toString( rows.toArray() ) ) ); Thread.currentThread().interrupt(); } finally { acceptingRowsSemaphore.release(); } }
@Test @SuppressWarnings ( "unchecked" ) public void errorLoggedIfInterruptedInAcceptRows() throws InterruptedException { streamSource.acceptingRowsSemaphore = semaphore; streamSource.logChannel = logChannel; doThrow( new InterruptedException( "interrupt" ) ) .when( semaphore ).acquire(); streamSource.acceptRows( singletonList( "new row" ) ); verify( logChannel ).logError( any() ); verify( semaphore ).release(); }
public < M extends MessageHeaders<EmptyRequestBody, P, EmptyMessageParameters>, P extends ResponseBody> CompletableFuture<P> sendRequest(String targetAddress, int targetPort, M messageHeaders) throws IOException { return sendRequest( targetAddress, targetPort, messageHeaders, EmptyMessageParameters.getInstance(), EmptyRequestBody.getInstance()); }
@Test void testCloseClientBeforeRequest() throws Exception { try (final RestClient restClient = new RestClient(new Configuration(), Executors.directExecutor())) { restClient.close(); // Intentionally close the client prior to the request CompletableFuture<?> future = restClient.sendRequest( unroutableIp, 80, new TestMessageHeaders(), EmptyMessageParameters.getInstance(), EmptyRequestBody.getInstance()); FlinkAssertions.assertThatFuture(future) .eventuallyFailsWith(ExecutionException.class) .withCauseInstanceOf(IllegalStateException.class) .extracting(Throwable::getCause, as(THROWABLE)) .hasMessage("RestClient is already closed"); } }
@Override public String toString() { if (updatedAfterCachedForToString) { StringBuilder toString = new StringBuilder(); AtomicBoolean first = new AtomicBoolean(true); if (toString != null) { toString.append("["); } forEach((range) -> { if (!first.get()) { toString.append(","); } toString.append(range); first.set(false); return true; }); toString.append("]"); cachedToString = toString.toString(); updatedAfterCachedForToString = false; } return cachedToString; }
@Test public void testToString() { OpenLongPairRangeSet<LongPair> set = new OpenLongPairRangeSet<>(consumer); Range<LongPair> range = Range.openClosed(new LongPair(0, 97), new LongPair(0, 99)); set.add(range); assertEquals(set.toString(), "[(0:97..0:99]]"); range = Range.openClosed(new LongPair(0, 98), new LongPair(0, 105)); set.add(range); assertEquals(set.toString(), "[(0:97..0:105]]"); range = Range.openClosed(new LongPair(0, 5), new LongPair(0, 75)); set.add(range); assertEquals(set.toString(), "[(0:5..0:75],(0:97..0:105]]"); }
@Override public IOStreamPair execContainer(ContainerExecContext ctx) throws ContainerExecutionException { IOStreamPair res; try { res = linuxContainerRuntime.execContainer(ctx); } catch (ContainerExecutionException e) { int retCode = e.getExitCode(); if (retCode != 0) { return new IOStreamPair(null, null); } LOG.warn("Error in executing container interactive shell {} exit = {}", ctx, retCode, e); logOutput(e.getOutput()); throw new ContainerExecutionException( "Error in executing container interactive shel" + ctx.getContainer() .getContainerId().toString() + " exit = " + retCode); } return res; }
@Test public void testExecContainer() throws Exception { Container container = mock(Container.class); LinuxContainerExecutor lce = mock(LinuxContainerExecutor.class); ContainerExecContext.Builder builder = new ContainerExecContext.Builder(); builder.setUser("foo").setAppId("app1").setContainer(container); ContainerExecContext ctx = builder.build(); lce.execContainer(ctx); verify(lce, times(1)).execContainer(ctx); }
@Override public void release(final ConnectionSession connectionSession) { MySQLStatementIdGenerator.getInstance().unregisterConnection(connectionSession.getConnectionId()); }
@Test void assertRelease() { ConnectionSession connectionSession = mock(ConnectionSession.class); int connectionId = 1; when(connectionSession.getConnectionId()).thenReturn(connectionId); engine.release(connectionSession); verify(MySQLStatementIdGenerator.getInstance()).unregisterConnection(connectionId); }
public DMNRuntimeBuilderConfigured buildConfiguration() { return buildConfigurationUsingCustomCompiler(DMNCompilerImpl::new); }
@Test void buildFromConfiguration() { final DMNRuntimeImpl retrieved = (DMNRuntimeImpl) dmnRuntimeBuilder .buildConfiguration() .fromResources(Collections.emptyList()).getOrElseThrow(RuntimeException::new); assertThat(retrieved).isNotNull(); }
static Thread createThread(Runnable r) { Thread thread = Executors.defaultThreadFactory().newThread(r); thread.setName("PushEventPoll-%d"); thread.setPriority(MIN_PRIORITY); thread.setDaemon(true); return thread; }
@Test public void create_executor() { PushEventPollExecutorServiceImpl underTest = new PushEventPollExecutorServiceImpl(); assertThat(underTest.createThread(() -> { })) .extracting(Thread::getPriority, Thread::isDaemon, Thread::getName) .containsExactly(Thread.MIN_PRIORITY, true, "PushEventPoll-%d"); }
public static Map<String, Object> getConfigurationFromHadoop( org.apache.hadoop.conf.Configuration hadoopConf) { Map<String, Object> alluxioConfProperties = new HashMap<>(); // Load any Alluxio configuration parameters in the Hadoop configuration. for (Map.Entry<String, String> entry : hadoopConf) { String propertyName = entry.getKey(); if (PropertyKey.isValid(propertyName)) { alluxioConfProperties.put(propertyName, entry.getValue()); } } return alluxioConfProperties; }
@Test public void mergeHadoopConfiguration() { org.apache.hadoop.conf.Configuration hadoopConfig = new org.apache.hadoop.conf.Configuration(); hadoopConfig.set(PropertyKey.S3A_ACCESS_KEY.toString(), TEST_S3_ACCCES_KEY); hadoopConfig.set(PropertyKey.S3A_SECRET_KEY.toString(), TEST_S3_SECRET_KEY); hadoopConfig.set(TEST_ALLUXIO_PROPERTY, TEST_ALLUXIO_VALUE); hadoopConfig.setBoolean(PropertyKey.ZOOKEEPER_ENABLED.getName(), true); hadoopConfig.set(PropertyKey.ZOOKEEPER_ADDRESS.getName(), "host1:port1,host2:port2;host3:port3"); // This hadoop config will not be loaded into Alluxio configuration. hadoopConfig.set("hadoop.config.parameter", "hadoop config value"); mConf.merge( HadoopConfigurationUtils.getConfigurationFromHadoop(hadoopConfig), Source.RUNTIME); assertEquals(TEST_S3_ACCCES_KEY, mConf.get(PropertyKey.S3A_ACCESS_KEY)); assertEquals(TEST_S3_SECRET_KEY, mConf.get(PropertyKey.S3A_SECRET_KEY)); assertEquals(Source.RUNTIME, mConf.getSource(PropertyKey.S3A_ACCESS_KEY)); assertEquals(Source.RUNTIME, mConf.getSource(PropertyKey.S3A_SECRET_KEY)); assertTrue(mConf.getBoolean(PropertyKey.ZOOKEEPER_ENABLED)); assertEquals("host1:port1,host2:port2;host3:port3", mConf.get(PropertyKey.ZOOKEEPER_ADDRESS)); }
@Override public ExecuteContext onThrow(ExecuteContext context) { if (shouldHandle(context)) { ThreadLocalUtils.removeRequestData(); ThreadLocalUtils.removeRequestTag(); } return context; }
@Test public void testOnThrow() { ThreadLocalUtils.setRequestTag(new RequestTag(null)); ThreadLocalUtils.setRequestData(new RequestData(null, null, null)); // State is null interceptor.onThrow(context); Assert.assertNotNull(ThreadLocalUtils.getRequestTag()); Assert.assertNotNull(ThreadLocalUtils.getRequestData()); // The State is not DISCONNECTING arguments[1] = State.CONFIGURED; interceptor.onThrow(context); Assert.assertNotNull(ThreadLocalUtils.getRequestTag()); Assert.assertNotNull(ThreadLocalUtils.getRequestData()); // The State is DISCONNECTING arguments[1] = State.DISCONNECTING; interceptor.onThrow(context); Assert.assertNull(ThreadLocalUtils.getRequestTag()); Assert.assertNull(ThreadLocalUtils.getRequestData()); }
public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() { Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(); return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved); }
@Test public void testNodeResourceFailurePaths() { { CapacityCheckerTester tester = new CapacityCheckerTester(); tester.createNodes(1, 10, 10, new NodeResources(1, 100, 1000, 1), 100, 10, new NodeResources(0, 100, 1000, 1), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking cpu cores.", failureReasons.singularReasonFailures().insufficientVcpu(), failureReasons.size()); } else { fail(); } } { CapacityCheckerTester tester = new CapacityCheckerTester(); tester.createNodes(1, 10, 10, new NodeResources(10, 1, 1000, 1), 100, 10, new NodeResources(10, 0, 1000, 1), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking memory.", failureReasons.singularReasonFailures().insufficientMemoryGb(), failureReasons.size()); } else { fail(); } } { CapacityCheckerTester tester = new CapacityCheckerTester(); tester.createNodes(1, 10, 10, new NodeResources(10, 100, 10, 1), 100, 10, new NodeResources(10, 100, 0, 1), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking disk space.", failureReasons.singularReasonFailures().insufficientDiskGb(), failureReasons.size()); } else { fail(); } } { CapacityCheckerTester tester = new CapacityCheckerTester(); int emptyHostsWithSlowDisk = 10; tester.createNodes(1, 10, List.of(new NodeResources(1, 10, 100, 1)), 10, new NodeResources(0, 0, 0, 0), 100, 10, new NodeResources(10, 1000, 10000, 1, NodeResources.DiskSpeed.slow), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All empty hosts should be invalid due to having incompatible disk speed.", failureReasons.singularReasonFailures().incompatibleDiskSpeed(), emptyHostsWithSlowDisk); } else { fail(); } } }
public ArrayList<AnalysisResult<T>> getOutliers(Track<T> track) { // the stream is wonky due to the raw type, probably could be improved return track.points().stream() .map(point -> analyzePoint(point, track)) .filter(analysisResult -> analysisResult.isOutlier()) .collect(toCollection(ArrayList::new)); }
@Test public void testMissingAltitude_aka_modeCSwap_1() { /* * Some NOP data contains "mode C swaps". Mode C swaps occur when one aircraft (say at 20k * feet) flies directly over top of another aircraft (at say 10k feet). When this occurs the * NOP system can get confused and drop altitude measurements. */ Track<NopHit> testTrack1 = createTrackFromResource(VerticalOutlierDetector.class, "AltitudeOutlier_1.txt"); Collection<AnalysisResult<NopHit>> outliers = (new VerticalOutlierDetector<NopHit>()).getOutliers(testTrack1); confirmExactlyTheseOutliers( outliers, "[RH],AGW,RDG,09/20/2017,17:28:02.096,,,,2525,000,425,252,040.49450,-075.76505,110,,10.66,5.09,,,,RDG,,,,,???,,,,,4221,???,,00,,,1,,0,,90.31,88.64,{RH}" ); }
@Override public synchronized void close() throws IOException { // this be true but just for safety... if (iamCredentialsProvider != null) { iamCredentialsProvider.close(); } }
@Test public void testIAMInstanceCredentialsProviderClose() throws Throwable { new IAMInstanceCredentialsProvider().close(); }
UnmodifiableCOSDictionary(COSDictionary dict) { super(); items = Collections.unmodifiableMap(dict.items); }
@Test void testUnmodifiableCOSDictionary() { COSDictionary unmodifiableCOSDictionary = new COSDictionary().asUnmodifiableDictionary(); try { unmodifiableCOSDictionary.clear(); fail("An UnsupportedOperationException should have been thrown"); } catch(UnsupportedOperationException exception) { // nothing to do } try { unmodifiableCOSDictionary.removeItem(COSName.A); fail("An UnsupportedOperationException should have been thrown"); } catch(UnsupportedOperationException exception) { // nothing to do } COSDictionary cosDictionary = new COSDictionary(); try { unmodifiableCOSDictionary.addAll(cosDictionary); fail("An UnsupportedOperationException should have been thrown"); } catch(UnsupportedOperationException exception) { // nothing to do } try { unmodifiableCOSDictionary.setFlag(COSName.A, 0, true); fail("An UnsupportedOperationException should have been thrown"); } catch(UnsupportedOperationException exception) { // nothing to do } try { unmodifiableCOSDictionary.setNeedToBeUpdated(true); fail("An UnsupportedOperationException should have been thrown"); } catch(UnsupportedOperationException exception) { // nothing to do } }
private static <R extends JarRequestBody, M extends MessageParameters> List<String> getProgramArgs(HandlerRequest<R> request, Logger log) throws RestHandlerException { JarRequestBody requestBody = request.getRequestBody(); @SuppressWarnings("deprecation") List<String> programArgs = tokenizeArguments( fromRequestBodyOrQueryParameter( emptyToNull(requestBody.getProgramArguments()), () -> getQueryParameter(request, ProgramArgsQueryParameter.class), null, log)); List<String> programArgsList = fromRequestBodyOrQueryParameter( requestBody.getProgramArgumentsList(), () -> request.getQueryParameter(ProgramArgQueryParameter.class), null, log); if (!programArgsList.isEmpty()) { if (!programArgs.isEmpty()) { throw new RestHandlerException( "Confusing request: programArgs and programArgsList are specified, please, use only programArgsList", HttpResponseStatus.BAD_REQUEST); } return programArgsList; } else { return programArgs; } }
@Test void testFromRequestDefaults() throws Exception { final JarRunMessageParameters parameters = getDummyMessageParameters(); final HandlerRequest<JarPlanRequestBody> request = HandlerRequest.create(new JarPlanRequestBody(), parameters); final JarHandlerUtils.JarHandlerContext jarHandlerContext = JarHandlerUtils.JarHandlerContext.fromRequest(request, tempDir, LOG); assertThat(jarHandlerContext.getEntryClass()).isNull(); assertThat(jarHandlerContext.getProgramArgs()).isEmpty(); assertThat(jarHandlerContext.getParallelism()) .isEqualTo(CoreOptions.DEFAULT_PARALLELISM.defaultValue()); assertThat(jarHandlerContext.getJobId()).isNull(); }
public synchronized ConnectionProfile createBQDestinationConnectionProfile( String connectionProfileId) { LOG.info( "Creating BQ Destination Connection Profile {} in project {}.", connectionProfileId, projectId); try { ConnectionProfile.Builder connectionProfileBuilder = ConnectionProfile.newBuilder() .setDisplayName(connectionProfileId) .setStaticServiceIpConnectivity(StaticServiceIpConnectivity.getDefaultInstance()) .setBigqueryProfile(BigQueryProfile.newBuilder()); CreateConnectionProfileRequest request = CreateConnectionProfileRequest.newBuilder() .setParent(LocationName.of(projectId, location).toString()) .setConnectionProfile(connectionProfileBuilder) .setConnectionProfileId(connectionProfileId) .build(); ConnectionProfile reference = datastreamClient.createConnectionProfileAsync(request).get(); createdConnectionProfileIds.add(connectionProfileId); LOG.info( "Successfully created BQ Destination Connection Profile {} in project {}.", connectionProfileId, projectId); return reference; } catch (ExecutionException | InterruptedException e) { throw new DatastreamResourceManagerException( "Failed to create BQ destination connection profile. ", e); } }
@Test public void testCreateBQDestinationConnectionInterruptedExceptionShouldFail() throws ExecutionException, InterruptedException { when(datastreamClient .createConnectionProfileAsync(any(CreateConnectionProfileRequest.class)) .get()) .thenThrow(InterruptedException.class); DatastreamResourceManagerException exception = assertThrows( DatastreamResourceManagerException.class, () -> testManager.createBQDestinationConnectionProfile(CONNECTION_PROFILE_ID)); assertThat(exception) .hasMessageThat() .contains("Failed to create BQ destination connection profile."); }
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException { lock.lock(); try { checkArgument(!req.completed, () -> "given SendRequest has already been completed"); log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(), req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString()); // Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us // with the actual outputs that'll be used to gather the required amount of value. In this way, users // can customize coin selection policies. The call below will ignore immature coinbases and outputs // we don't have the keys for. List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW); // Connect (add a value amount) unconnected inputs List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs()); req.tx.clearInputs(); inputs.forEach(req.tx::addInput); // Warn if there are remaining unconnected inputs whose value we do not know // TODO: Consider throwing if there are inputs that we don't have a value for if (req.tx.getInputs().stream() .map(TransactionInput::getValue) .anyMatch(Objects::isNull)) log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee."); // If any inputs have already been added, we don't need to get their value from wallet Coin totalInput = req.tx.getInputSum(); // Calculate the amount of value we need to import. Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput); // Enforce the OP_RETURN limit if (req.tx.getOutputs().stream() .filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey())) .count() > 1) // Only 1 OP_RETURN per transaction allowed. throw new MultipleOpReturnRequested(); // Check for dusty sends if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet. if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust)) throw new DustySendRequested(); } // Filter out candidates that are already included in the transaction inputs List<TransactionOutput> candidates = prelimCandidates.stream() .filter(output -> alreadyIncluded(req.tx.getInputs(), output)) .collect(StreamUtils.toUnmodifiableList()); CoinSelection bestCoinSelection; TransactionOutput bestChangeOutput = null; List<Coin> updatedOutputValues = null; if (!req.emptyWallet) { // This can throw InsufficientMoneyException. FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates); bestCoinSelection = feeCalculation.bestCoinSelection; bestChangeOutput = feeCalculation.bestChangeOutput; updatedOutputValues = feeCalculation.updatedOutputValues; } else { // We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output // of the total value we can currently spend as determined by the selector, and then subtracting the fee. checkState(req.tx.getOutputs().size() == 1, () -> "empty wallet TX must have a single output only"); CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector; bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates); candidates = null; // Selector took ownership and might have changed candidates. Don't access again. req.tx.getOutput(0).setValue(bestCoinSelection.totalValue()); log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString()); } bestCoinSelection.outputs() .forEach(req.tx::addInput); if (req.emptyWallet) { if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee)) throw new CouldNotAdjustDownwards(); } if (updatedOutputValues != null) { for (int i = 0; i < updatedOutputValues.size(); i++) { req.tx.getOutput(i).setValue(updatedOutputValues.get(i)); } } if (bestChangeOutput != null) { req.tx.addOutput(bestChangeOutput); log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString()); } // Now shuffle the outputs to obfuscate which is the change. if (req.shuffleOutputs) req.tx.shuffleOutputs(); // Now sign the inputs, thus proving that we are entitled to redeem the connected outputs. if (req.signInputs) signTransaction(req); // Check size. final int size = req.tx.messageSize(); if (size > Transaction.MAX_STANDARD_TX_SIZE) throw new ExceededMaxTransactionSize(); // Label the transaction as being self created. We can use this later to spend its change output even before // the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much // point - the user isn't interested in a confidence transition they made themselves. getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF); // Label the transaction as being a user requested payment. This can be used to render GUI wallet // transaction lists more appropriately, especially when the wallet starts to generate transactions itself // for internal purposes. req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT); // Record the exchange rate that was valid when the transaction was completed. req.tx.setExchangeRate(req.exchangeRate); req.tx.setMemo(req.memo); req.completed = true; log.info(" completed: {}", req.tx); } finally { lock.unlock(); } }
@Test(expected = Wallet.DustySendRequested.class) public void sendDustTest() throws InsufficientMoneyException { // Tests sending dust, should throw DustySendRequested. Transaction tx = new Transaction(); Coin dustThreshold = new TransactionOutput(null, Coin.COIN, OTHER_ADDRESS).getMinNonDustValue(); tx.addOutput(dustThreshold.subtract(SATOSHI), OTHER_ADDRESS); SendRequest request = SendRequest.forTx(tx); request.ensureMinRequiredFee = true; wallet.completeTx(request); }
public static Optional<TableSchema> getUpdatedSchema( TableSchema oldSchema, TableSchema newSchema) { Result updatedFields = getUpdatedSchema(oldSchema.getFieldsList(), newSchema.getFieldsList()); if (updatedFields.isEquivalent()) { return Optional.empty(); } else { return updatedFields .getFields() .map( tableFieldSchemas -> TableSchema.newBuilder().addAllFields(tableFieldSchemas).build()); } }
@Test public void testNonEquivalentSchema() { TableSchema baseSchema1 = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .build(); TableSchema schema1 = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(baseSchema1.getFieldsList())) .build(); TableSchema baseSchema2 = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.INT64)) .build(); TableSchema schema2 = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(baseSchema2.getFieldsList())) .build(); assertTrue(TableSchemaUpdateUtils.getUpdatedSchema(schema1, schema2).isPresent()); }
@Override public CompletableFuture<List<MessageExt>> queryMessage(String address, boolean uniqueKeyFlag, boolean decompressBody, QueryMessageRequestHeader requestHeader, long timeoutMillis) { CompletableFuture<List<MessageExt>> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_MESSAGE, requestHeader); request.addExtField(MixAll.UNIQUE_MSG_QUERY_FLAG, String.valueOf(uniqueKeyFlag)); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS) { List<MessageExt> wrappers = MessageDecoder.decodesBatch(ByteBuffer.wrap(response.getBody()), true, decompressBody, true); future.complete(filterMessages(wrappers, requestHeader.getTopic(), requestHeader.getKey(), uniqueKeyFlag)); } else if (response.getCode() == ResponseCode.QUERY_NOT_FOUND) { List<MessageExt> wrappers = new ArrayList<>(); future.complete(wrappers); } else { log.warn("queryMessage getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertQueryMessageWithError() { setResponseError(); QueryMessageRequestHeader requestHeader = mock(QueryMessageRequestHeader.class); CompletableFuture<List<MessageExt>> actual = mqClientAdminImpl.queryMessage(defaultBrokerAddr, false, false, requestHeader, defaultTimeout); Throwable thrown = assertThrows(ExecutionException.class, actual::get); assertTrue(thrown.getCause() instanceof MQClientException); MQClientException mqException = (MQClientException) thrown.getCause(); assertEquals(ResponseCode.SYSTEM_ERROR, mqException.getResponseCode()); assertTrue(mqException.getMessage().contains("CODE: 1 DESC: null")); }
public static List<Object> getFieldValues(Iterable<?> collection, final String fieldName) { return getFieldValues(collection, fieldName, false); }
@Test public void getFieldValuesTest() { final Dict v1 = Dict.create().set("id", 12).set("name", "张三").set("age", 23); final Dict v2 = Dict.create().set("age", 13).set("id", 15).set("name", "李四"); final ArrayList<Dict> list = CollUtil.newArrayList(v1, v2); final List<Object> fieldValues = CollUtil.getFieldValues(list, "name"); assertEquals("张三", fieldValues.get(0)); assertEquals("李四", fieldValues.get(1)); }
Config targetConfig(Config sourceConfig, boolean incremental) { // If using incrementalAlterConfigs API, sync the default property with either SET or DELETE action determined by ConfigPropertyFilter::shouldReplicateSourceDefault later. // If not using incrementalAlterConfigs API, sync the default property only if ConfigPropertyFilter::shouldReplicateSourceDefault returns true. // If ConfigPropertyFilter::shouldReplicateConfigProperty returns false, do not sync the property at all. List<ConfigEntry> entries = sourceConfig.entries().stream() .filter(x -> incremental || (x.isDefault() && shouldReplicateSourceDefault(x.name())) || !x.isDefault()) .filter(x -> !x.isReadOnly() && !x.isSensitive()) .filter(x -> x.source() != ConfigEntry.ConfigSource.STATIC_BROKER_CONFIG) .filter(x -> shouldReplicateTopicConfigurationProperty(x.name())) .collect(Collectors.toList()); return new Config(entries); }
@Test public void testConfigPropertyFiltering() { MirrorSourceConnector connector = new MirrorSourceConnector(new SourceAndTarget("source", "target"), new DefaultReplicationPolicy(), x -> true, new DefaultConfigPropertyFilter()); ArrayList<ConfigEntry> entries = new ArrayList<>(); entries.add(new ConfigEntry("name-1", "value-1")); entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), ConfigEntry.ConfigType.STRING, "")); entries.add(new ConfigEntry("min.insync.replicas", "2")); Config config = new Config(entries); Config targetConfig = connector.targetConfig(config, true); assertTrue(targetConfig.entries().stream() .anyMatch(x -> x.name().equals("name-1")), "should replicate properties"); assertTrue(targetConfig.entries().stream() .anyMatch(x -> x.name().equals("name-2")), "should include default properties"); assertFalse(targetConfig.entries().stream() .anyMatch(x -> x.name().equals("min.insync.replicas")), "should not replicate excluded properties"); }
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR if (splittee == null || splitChar == null) { return new String[0]; } final String EMPTY_ELEMENT = ""; int spot; final int splitLength = splitChar.length(); final String adjacentSplit = splitChar + splitChar; final int adjacentSplitLength = adjacentSplit.length(); if (truncate) { while ((spot = splittee.indexOf(adjacentSplit)) != -1) { splittee = splittee.substring(0, spot + splitLength) + splittee.substring(spot + adjacentSplitLength, splittee.length()); } if (splittee.startsWith(splitChar)) { splittee = splittee.substring(splitLength); } if (splittee.endsWith(splitChar)) { // Remove trailing splitter splittee = splittee.substring(0, splittee.length() - splitLength); } } List<String> returns = new ArrayList<>(); final int length = splittee.length(); // This is the new length int start = 0; spot = 0; while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) { if (spot > 0) { returns.add(splittee.substring(start, spot)); } else { returns.add(EMPTY_ELEMENT); } start = spot + splitLength; } if (start < length) { returns.add(splittee.substring(start)); } else if (spot == length - splitLength) {// Found splitChar at end of line returns.add(EMPTY_ELEMENT); } return returns.toArray(new String[returns.size()]); }
@Test public void testSplitSSSEmptyDelimiter() { String in = "a,bc,,"; // Empty delimiter assertThat(JOrphanUtils.split(in, "", "?"), CoreMatchers.equalTo(new String[]{in})); }
@Override public double calculate(double amount) { return amount * TAX_PERCENTAGE / 100.0; }
@Test void testTaxCalculation() { target = new DomesticTaxCalculator(); var tax = target.calculate(100.0); Assertions.assertEquals(tax, 20.0); }
@Override public String toString() { return Long.toString(lvVal()); }
@Test public void testToString() { PaddedAtomicLong counter = new PaddedAtomicLong(10); assertEquals("10", counter.toString()); }
public void setWriteTimeout(int writeTimeout) { this.writeTimeout = writeTimeout; }
@Test public void testBasicOperation() throws IOException { when(mockLowLevelRequest.execute()).thenReturn(mockLowLevelResponse); when(mockLowLevelResponse.getStatusCode()).thenReturn(200); Storage.Buckets.Get result = storage.buckets().get("test"); HttpResponse response = result.executeUnparsed(); assertNotNull(response); verify(mockHttpResponseInterceptor).interceptResponse(any(HttpResponse.class)); verify(mockLowLevelRequest, atLeastOnce()).addHeader(anyString(), anyString()); verify(mockLowLevelRequest).setTimeout(anyInt(), anyInt()); verify(mockLowLevelRequest).setWriteTimeout(anyInt()); verify(mockLowLevelRequest).execute(); verify(mockLowLevelResponse, atLeastOnce()).getStatusCode(); expectedLogs.verifyNotLogged("Request failed"); }
@Override public ConfigErrors errors() { return errors; }
@Test public void shouldValidateCorrectPipelineLabelWithoutTruncationSyntax() { String labelFormat = "pipeline-${COUNT}-${git}-454"; PipelineConfig pipelineConfig = createAndValidatePipelineLabel(labelFormat); assertThat(pipelineConfig.errors().on(PipelineConfig.LABEL_TEMPLATE), is(nullValue())); }