focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public int startWithRunStrategy( @NotNull WorkflowInstance instance, @NotNull RunStrategy runStrategy) { return withMetricLogError( () -> withRetryableTransaction( conn -> { final long nextInstanceId = getLatestInstanceId(conn, instance.getWorkflowId()) + 1; if (isDuplicated(conn, instance)) { return 0; } completeInstanceInit(conn, nextInstanceId, instance); int res; if (instance.getStatus().isTerminal()) { // Save it directly and send a terminate event res = addTerminatedInstance(conn, instance); } else { switch (runStrategy.getRule()) { case SEQUENTIAL: case PARALLEL: case STRICT_SEQUENTIAL: res = insertInstance(conn, instance, true, null); break; case FIRST_ONLY: res = startFirstOnlyInstance(conn, instance); break; case LAST_ONLY: res = startLastOnlyInstance(conn, instance); break; default: throw new MaestroInternalError( "When start, run strategy [%s] is not supported.", runStrategy); } } if (instance.getWorkflowInstanceId() == nextInstanceId) { updateLatestInstanceId(conn, instance.getWorkflowId(), nextInstanceId); } return res; }), "startWithRunStrategy", "Failed to start a workflow [{}][{}] with run strategy [{}]", instance.getWorkflowId(), instance.getWorkflowUuid(), runStrategy); }
@Test public void testStartWithRunStrategyForStartWithRunId() { wfi.setWorkflowInstanceId(0L); wfi.setWorkflowRunId(2L); wfi.setWorkflowUuid("test-uuid"); int res = runStrategyDao.startWithRunStrategy(wfi, Defaults.DEFAULT_RUN_STRATEGY); assertEquals(1, res); assertEquals(2, wfi.getWorkflowInstanceId()); assertEquals(1, wfi.getWorkflowRunId()); // reset to 1 assertEquals("test-uuid", wfi.getWorkflowUuid()); WorkflowInstance latestRun = dao.getLatestWorkflowInstanceRun(wfi.getWorkflowId(), wfi.getWorkflowInstanceId()); assertEquals(2, latestRun.getWorkflowInstanceId()); assertEquals("test-uuid", latestRun.getWorkflowUuid()); verifyPublish(1, 0, 0, 0, 0); MaestroTestHelper.removeWorkflowInstance(dataSource, TEST_WORKFLOW_ID, 2); }
public FloatArrayAsIterable usingExactEquality() { return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_contains_failureWithNegativeZero() { expectFailureWhenTestingThat(array(1.0f, -0.0f, 3.0f)).usingExactEquality().contains(0.0f); assertFailureKeys("value of", "expected to contain", "testing whether", "but was"); assertFailureValue("expected to contain", Float.toString(0.0f)); }
public static TriRpcStatus fromCode(int code) { return fromCode(Code.fromCode(code)); }
@Test void testFromCode() { Assertions.assertEquals(Code.UNKNOWN, TriRpcStatus.fromCode(Code.UNKNOWN).code); }
synchronized ActivateWorkResult activateWorkForKey(ExecutableWork executableWork) { ShardedKey shardedKey = executableWork.work().getShardedKey(); Deque<ExecutableWork> workQueue = activeWork.getOrDefault(shardedKey, new ArrayDeque<>()); // This key does not have any work queued up on it. Create one, insert Work, and mark the work // to be executed. if (!activeWork.containsKey(shardedKey) || workQueue.isEmpty()) { workQueue.addLast(executableWork); activeWork.put(shardedKey, workQueue); incrementActiveWorkBudget(executableWork.work()); return ActivateWorkResult.EXECUTE; } // Check to see if we have this work token queued. Iterator<ExecutableWork> workIterator = workQueue.iterator(); while (workIterator.hasNext()) { ExecutableWork queuedWork = workIterator.next(); if (queuedWork.id().equals(executableWork.id())) { return ActivateWorkResult.DUPLICATE; } if (queuedWork.id().cacheToken() == executableWork.id().cacheToken()) { if (executableWork.id().workToken() > queuedWork.id().workToken()) { // Check to see if the queuedWork is active. We only want to remove it if it is NOT // currently active. if (!queuedWork.equals(workQueue.peek())) { workIterator.remove(); decrementActiveWorkBudget(queuedWork.work()); } // Continue here to possibly remove more non-active stale work that is queued. } else { return ActivateWorkResult.STALE; } } } // Queue the work for later processing. workQueue.addLast(executableWork); incrementActiveWorkBudget(executableWork.work()); return ActivateWorkResult.QUEUED; }
@Test public void testActivateWorkForKey_EXECUTE_unknownKey() { ActivateWorkResult activateWorkResult = activeWorkState.activateWorkForKey( createWork(createWorkItem(1L, 1L, shardedKey("someKey", 1L)))); assertEquals(ActivateWorkResult.EXECUTE, activateWorkResult); }
public FloatArrayAsIterable usingTolerance(double tolerance) { return new FloatArrayAsIterable(tolerance(tolerance), iterableSubject()); }
@Test public void usingTolerance_containsExactly_primitiveFloatArray_inOrder_failure() { expectFailureWhenTestingThat(array(1.1f, TOLERABLE_2POINT2, 3.3f)) .usingTolerance(DEFAULT_TOLERANCE) .containsExactly(array(2.2f, 1.1f, 3.3f)) .inOrder(); assertFailureKeys( "value of", "contents match, but order was wrong", "expected", "testing whether", "but was"); assertFailureValue("expected", lenientFormat("[%s, %s, %s]", 2.2f, 1.1f, 3.3f)); }
public static Properties parseArguments(String[] args) { Properties props = argumentsToProperties(args); // complete with only the system properties that start with "sonar." for (Map.Entry<Object, Object> entry : System.getProperties().entrySet()) { String key = entry.getKey().toString(); if (key.startsWith("sonar.")) { props.setProperty(key, entry.getValue().toString()); } } return props; }
@Test public void parseArguments() { System.setProperty("CommandLineParserTest.unused", "unused"); System.setProperty("sonar.CommandLineParserTest.used", "used"); Properties p = CommandLineParser.parseArguments(new String[] {"-Dsonar.foo=bar"}); // test environment can already declare some system properties prefixed by "sonar." // so we can't test the exact number "2" assertThat(p.size()).isGreaterThanOrEqualTo(2); assertThat(p.getProperty("sonar.foo")).isEqualTo("bar"); assertThat(p.getProperty("sonar.CommandLineParserTest.used")).isEqualTo("used"); }
@JsonCreator public static DataSize parse(CharSequence size) { return parse(size, DataSizeUnit.BYTES); }
@Test void unableParseWrongDataSizeFormat() { assertThatIllegalArgumentException() .isThrownBy(() -> DataSize.parse("1 mega byte")) .withMessage("Invalid size: 1 mega byte"); }
@Override public void append(LogEvent event) { all.mark(); switch (event.getLevel().getStandardLevel()) { case TRACE: trace.mark(); break; case DEBUG: debug.mark(); break; case INFO: info.mark(); break; case WARN: warn.mark(); break; case ERROR: error.mark(); break; case FATAL: fatal.mark(); break; default: break; } }
@Test public void metersTraceEvents() { when(event.getLevel()).thenReturn(Level.TRACE); appender.append(event); assertThat(registry.meter(METRIC_NAME_PREFIX + ".all").getCount()) .isEqualTo(1); assertThat(registry.meter(METRIC_NAME_PREFIX + ".trace").getCount()) .isEqualTo(1); }
@Override public Num getValue(int index) { return values.get(index); }
@Test public void cashFlowWithSellAndBuyTrades() { BarSeries sampleBarSeries = new MockBarSeries(numFunction, 2, 1, 3, 5, 6, 3, 20); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, sampleBarSeries), Trade.sellAt(1, sampleBarSeries), Trade.buyAt(3, sampleBarSeries), Trade.sellAt(4, sampleBarSeries), Trade.sellAt(5, sampleBarSeries), Trade.buyAt(6, sampleBarSeries)); CashFlow cashFlow = new CashFlow(sampleBarSeries, tradingRecord); assertNumEquals(1, cashFlow.getValue(0)); assertNumEquals("0.5", cashFlow.getValue(1)); assertNumEquals("0.5", cashFlow.getValue(2)); assertNumEquals("0.5", cashFlow.getValue(3)); assertNumEquals("0.6", cashFlow.getValue(4)); assertNumEquals("0.6", cashFlow.getValue(5)); assertNumEquals("-2.8", cashFlow.getValue(6)); }
public String getJsonString() throws IOException { try (OutputStream outputStream = new ByteArrayOutputStream()) { new ObjectMapper().writeValue(outputStream, skaffoldFilesTemplate); return outputStream.toString(); } }
@Test public void testGetJsonString() throws IOException { SkaffoldFilesOutput skaffoldFilesOutput = new SkaffoldFilesOutput(); skaffoldFilesOutput.addBuild(Paths.get("buildFile1")); skaffoldFilesOutput.addBuild(Paths.get("buildFile2")); skaffoldFilesOutput.addInput(Paths.get("input1")); skaffoldFilesOutput.addInput(Paths.get("input2")); skaffoldFilesOutput.addIgnore(Paths.get("ignore1")); skaffoldFilesOutput.addIgnore(Paths.get("ignore2")); Assert.assertEquals(TEST_JSON, skaffoldFilesOutput.getJsonString()); }
@Override public void unlock() { unlockInner(locks); }
@Test public void testConnectionFailed() throws IOException, InterruptedException { RedisProcess redis1 = redisTestMultilockInstance(); RedisProcess redis2 = redisTestMultilockInstance(); RedissonClient client1 = createClient(redis1.getRedisServerAddressAndPort()); RedissonClient client2 = createClient(redis2.getRedisServerAddressAndPort()); RLock lock1 = client1.getLock("lock1"); RLock lock2 = client1.getLock("lock2"); assertThat(redis2.stop()).isEqualTo(0); RLock lock3 = client2.getLock("lock3"); Thread t = new Thread() { public void run() { RedissonMultiLock lock = new RedissonRedLock(lock1, lock2, lock3); lock.lock(); try { Thread.sleep(3000); } catch (InterruptedException e) { } lock.unlock(); }; }; t.start(); t.join(1000); RedissonMultiLock lock = new RedissonRedLock(lock1, lock2, lock3); lock.lock(); lock.unlock(); client1.shutdown(); client2.shutdown(); assertThat(redis1.stop()).isEqualTo(0); }
@Override public void handle(TaskEvent event) { if (LOG.isDebugEnabled()) { LOG.debug("Processing " + event.getTaskID() + " of type " + event.getType()); } try { writeLock.lock(); TaskStateInternal oldState = getInternalState(); try { stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { LOG.error("Can't handle this event at current state for " + this.taskId, e); internalError(event.getType()); } if (oldState != getInternalState()) { LOG.info(taskId + " Task Transitioned from " + oldState + " to " + getInternalState()); } } finally { writeLock.unlock(); } }
@Test public void testSpeculativeMapFailedFetchFailure() { // Setup a scenario where speculative task wins, first attempt succeeds mockTask = createMockTask(TaskType.MAP); runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_FAILED); assertEquals(2, taskAttempts.size()); // speculative attempt retroactively fails from fetch failures mockTask.handle(new TaskTAttemptFailedEvent( taskAttempts.get(1).getAttemptId())); assertTaskScheduledState(); assertEquals(3, taskAttempts.size()); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuilder buf = new StringBuilder(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case START_STATE: handleStartState(c, tokenList, buf); break; case DEFAULT_VAL_STATE: handleDefaultValueState(c, tokenList, buf); default: } } // EOS switch (state) { case LITERAL_STATE: addLiteralToken(tokenList, buf); break; case DEFAULT_VAL_STATE: // trailing colon. see also LOGBACK-1140 buf.append(CoreConstants.COLON_CHAR); addLiteralToken(tokenList, buf); break; case START_STATE: // trailing $. see also LOGBACK-1149 buf.append(CoreConstants.DOLLAR); addLiteralToken(tokenList, buf); break; } return tokenList; }
@Test public void mix() throws ScanException { String input = "a${b}c"; Tokenizer tokenizer = new Tokenizer(input); List<Token> tokenList = tokenizer.tokenize(); witnessList.add(new Token(Token.Type.LITERAL, "a")); witnessList.add(Token.START_TOKEN); witnessList.add(new Token(Token.Type.LITERAL, "b")); witnessList.add(Token.CURLY_RIGHT_TOKEN); witnessList.add(new Token(Token.Type.LITERAL, "c")); assertEquals(witnessList, tokenList); }
@Override public final String getConfig(String key, String group, long timeout) throws IllegalStateException { return execute(() -> doGetConfig(key, group), timeout); }
@Test void testGetConfig() { assertNull(configuration.getConfig(null, null)); assertNull(configuration.getConfig(null, null, 200)); }
@Override public <T> void register(Class<T> remoteInterface, T object) { register(remoteInterface, object, 1); }
@Test public void testMethodOverload() { RedissonClient r1 = createInstance(); r1.getRemoteService().register(RemoteInterface.class, new RemoteImpl()); RedissonClient r2 = createInstance(); RemoteInterface ri = r2.getRemoteService().get(RemoteInterface.class); assertThat(ri.methodOverload()).isEqualTo("methodOverload()"); assertThat(ri.methodOverload(1l)).isEqualTo("methodOverload(Long lng)"); assertThat(ri.methodOverload("")).isEqualTo("methodOverload(String str)"); assertThat(ri.methodOverload("", 1l)).isEqualTo("methodOverload(String str, Long lng)"); r1.shutdown(); r2.shutdown(); }
@Override public void start() { // no-op }
@Test public void shouldNotStartKafkaStreamsOnStart() { // When: sandbox.start(); // Then: verifyNoMoreInteractions(kafkaStreams); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema == null && value == null) { return null; } JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
@Test public void timeToJson() { GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 14400000); java.util.Date date = calendar.getTime(); JsonNode converted = parse(converter.fromConnectData(TOPIC, Time.SCHEMA, date)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int32\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1 }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); JsonNode payload = converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertTrue(payload.isInt()); assertEquals(14400000, payload.longValue()); }
@Override public CompletableFuture<SchemaVersion> deleteSchema(String schemaId, String user, boolean force) { return service.deleteSchema(schemaId, user, force); }
@Test public void testDeleteSchema() { String schemaId = "test-schema-id"; String user = "test-user"; CompletableFuture<SchemaVersion> deleteFuture = new CompletableFuture<>(); when(underlyingService.deleteSchema(eq(schemaId), eq(user), eq(false))) .thenReturn(deleteFuture); assertSame(deleteFuture, service.deleteSchema(schemaId, user, false)); verify(underlyingService, times(1)).deleteSchema(eq(schemaId), eq(user), eq(false)); }
@Override public JWKSet getJWKSet(JWKSetCacheRefreshEvaluator refreshEvaluator, long currentTime, T context) throws KeySourceException { var jwksUrl = discoverJwksUrl(); try (var jwkSetSource = new URLBasedJWKSetSource<>(jwksUrl, new HttpRetriever(httpClient))) { return jwkSetSource.getJWKSet(null, 0, context); } catch (IOException e) { throw new RemoteKeySourceException( "failed to fetch jwks from discovery document '%s'".formatted(discoveryUrl), e); } }
@Test void getJWKSet_badDiscoveryUrl() { var discoveryUrl = URI.create("http://badURi"); var sut = new DiscoveryJwkSetSource<>(HttpClient.newHttpClient(), discoveryUrl); assertThrows(RemoteKeySourceException.class, () -> sut.getJWKSet(null, 0, null)); }
private PlantUmlDiagram createDiagram(List<String> rawDiagramLines) { List<String> diagramLines = filterOutComments(rawDiagramLines); Set<PlantUmlComponent> components = parseComponents(diagramLines); PlantUmlComponents plantUmlComponents = new PlantUmlComponents(components); List<ParsedDependency> dependencies = parseDependencies(plantUmlComponents, diagramLines); return new PlantUmlDiagram.Builder(plantUmlComponents) .withDependencies(dependencies) .build(); }
@Test public void parses_two_identical_components_no_dependency() { PlantUmlDiagram diagram = createDiagram(TestDiagram.in(temporaryFolder) .component("someName").withAlias("someAlias").withStereoTypes("someStereotype") .component("someName").withAlias("someAlias").withStereoTypes("someStereotype") .write()); assertThat(diagram.getAllComponents()).containsOnly(getComponentWithName("someName", diagram)); }
public static boolean isSupportPCTRefresh(Table.TableType tableType) { if (!isSupported(tableType)) { return false; } return TRAITS_TABLE.get(tableType).get().isSupportPCTRefresh(); }
@Test public void testisSupportPCTRefresh() { Assert.assertTrue(new OlapPartitionTraits().isSupportPCTRefresh()); Assert.assertTrue(new HivePartitionTraits().isSupportPCTRefresh()); Assert.assertTrue(new IcebergPartitionTraits().isSupportPCTRefresh()); Assert.assertTrue(new PaimonPartitionTraits().isSupportPCTRefresh()); Assert.assertTrue(new JDBCPartitionTraits().isSupportPCTRefresh()); Assert.assertFalse(new HudiPartitionTraits().isSupportPCTRefresh()); Assert.assertFalse(new OdpsPartitionTraits().isSupportPCTRefresh()); Assert.assertFalse(new KuduPartitionTraits().isSupportPCTRefresh()); Assert.assertFalse(new DeltaLakePartitionTraits().isSupportPCTRefresh()); final Set<Table.TableType> supportedTableTypes = ImmutableSet.of( Table.TableType.OLAP, Table.TableType.MATERIALIZED_VIEW, Table.TableType.CLOUD_NATIVE, Table.TableType.CLOUD_NATIVE_MATERIALIZED_VIEW, Table.TableType.HIVE, Table.TableType.ICEBERG, Table.TableType.PAIMON, Table.TableType.JDBC ); for (Table.TableType tableType : Table.TableType.values()) { Assert.assertEquals(supportedTableTypes.contains(tableType), ConnectorPartitionTraits.isSupportPCTRefresh(tableType)); } }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config), Clock.systemDefaultZone() ); }
@Test public void shouldApplyCreateConnectorIfNotExistsStatement() throws Exception { // Given: command = PARSER.parse("-v", "3"); createMigrationFile(1, NAME, migrationsDir, COMMAND); createMigrationFile(3, NAME, migrationsDir,CREATE_CONNECTOR_IF_NOT_EXISTS ); givenCurrentMigrationVersion("1"); givenAppliedMigration(1, NAME, MigrationState.MIGRATED); // When: final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed( Instant.ofEpochMilli(1000), ZoneId.systemDefault())); // Then: assertThat(result, is(0)); final InOrder inOrder = inOrder(ksqlClient); verifyMigratedVersion(inOrder, 3, "1", MigrationState.MIGRATED, () -> inOrder.verify(ksqlClient).createConnector("`WOOF`", false, CONNECTOR_PROPERTIES, true)); inOrder.verify(ksqlClient).close(); inOrder.verifyNoMoreInteractions(); }
public NavigableSet<Position> getMessagesToReplayNow(int maxMessagesToRead) { return messagesToRedeliver.items(maxMessagesToRead, PositionFactory::create); }
@Test(dataProvider = "allowOutOfOrderDelivery", timeOut = 10000) public void testGetMessagesToReplayNow(boolean allowOutOfOrderDelivery) throws Exception { MessageRedeliveryController controller = new MessageRedeliveryController(allowOutOfOrderDelivery); controller.add(2, 2); controller.add(1, 3); controller.add(3, 1); controller.add(2, 1); controller.add(3, 2); controller.add(1, 2); controller.add(1, 1); if (allowOutOfOrderDelivery) { // The entries are sorted by ledger ID but not by entry ID Position[] actual1 = controller.getMessagesToReplayNow(3).toArray(new Position[3]); Position[] expected1 = { PositionFactory.create(1, 1), PositionFactory.create(1, 2), PositionFactory.create(1, 3) }; assertEqualsNoOrder(actual1, expected1); } else { // The entries are completely sorted Set<Position> actual2 = controller.getMessagesToReplayNow(6); Set<Position> expected2 = new TreeSet<>(); expected2.add(PositionFactory.create(1, 1)); expected2.add(PositionFactory.create(1, 2)); expected2.add(PositionFactory.create(1, 3)); expected2.add(PositionFactory.create(2, 1)); expected2.add(PositionFactory.create(2, 2)); expected2.add(PositionFactory.create(3, 1)); assertEquals(actual2, expected2); } }
public final void containsNoneOf( @Nullable Object firstExcluded, @Nullable Object secondExcluded, @Nullable Object @Nullable ... restOfExcluded) { containsNoneIn(accumulate(firstExcluded, secondExcluded, restOfExcluded)); }
@Test public void iterableContainsNoneOfFailureWithEmptyString() { expectFailureWhenTestingThat(asList("")).containsNoneOf("", null); assertFailureKeys("expected not to contain any of", "but contained", "full contents"); assertFailureValue("expected not to contain any of", "[\"\" (empty String), null]"); assertFailureValue("but contained", "[\"\" (empty String)]"); assertFailureValue("full contents", "[]"); }
public NetworkClient.InFlightRequest completeNext(String node) { NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollLast(); inFlightRequestCount.decrementAndGet(); return inFlightRequest; }
@Test public void testCompleteNextThrowsIfNoInflights() { assertThrows(IllegalStateException.class, () -> inFlightRequests.completeNext(dest)); }
private int addValueMeta( RowMetaInterface rowMeta, String fieldName ) { ValueMetaInterface valueMeta = new ValueMetaString( fieldName ); valueMeta.setOrigin( getStepname() ); // add if doesn't exist int index = -1; if ( !rowMeta.exists( valueMeta ) ) { index = rowMeta.size(); rowMeta.addValueMeta( valueMeta ); } else { index = rowMeta.indexOfValue( fieldName ); } return index; }
@Test public void readWrappedInputWithoutHeaders() throws Exception { final String content = new StringBuilder() .append( "r1c1" ).append( '\n' ).append( ";r1c2\n" ) .append( "r2c1" ).append( '\n' ).append( ";r2c2" ) .toString(); final String virtualFile = createVirtualFile( "pdi-2607.txt", content ); TextFileInputMeta meta = new TextFileInputMeta(); meta.setLineWrapped( true ); meta.setNrWraps( 1 ); meta.setInputFields( new TextFileInputField[] { field( "col1" ), field( "col2" ) } ); meta.setFileCompression( "None" ); meta.setFileType( "CSV" ); meta.setHeader( false ); meta.setNrHeaderLines( -1 ); meta.setFooter( false ); meta.setNrFooterLines( -1 ); TextFileInputData data = new TextFileInputData(); data.setFiles( new FileInputList() ); data.getFiles().addFile( KettleVFS.getFileObject( virtualFile ) ); data.outputRowMeta = new RowMeta(); data.outputRowMeta.addValueMeta( new ValueMetaString( "col1" ) ); data.outputRowMeta.addValueMeta( new ValueMetaString( "col2" ) ); data.dataErrorLineHandler = Mockito.mock( FileErrorHandler.class ); data.fileFormatType = TextFileInputMeta.FILE_FORMAT_UNIX; data.separator = ";"; data.filterProcessor = new TextFileFilterProcessor( new TextFileFilter[ 0 ] ); data.filePlayList = new FilePlayListAll(); TextFileInput input = StepMockUtil.getStep( TextFileInput.class, TextFileInputMeta.class, "test" ); List<Object[]> output = TransTestingUtil.execute( input, meta, data, 2, false ); TransTestingUtil.assertResult( new Object[] { "r1c1", "r1c2" }, output.get( 0 ) ); TransTestingUtil.assertResult( new Object[] { "r2c1", "r2c2" }, output.get( 1 ) ); deleteVfsFile( virtualFile ); }
@Cacheable(value = CACHE_LATEST_EXTENSION_VERSION, keyGenerator = GENERATOR_LATEST_EXTENSION_VERSION) public ExtensionVersion getLatest(List<ExtensionVersion> versions, boolean groupedByTargetPlatform) { return getLatest(versions, groupedByTargetPlatform, false); }
@Test public void testGetLatestTargetPlatformSort() { var version = "1.0.0"; var web = new ExtensionVersion(); web.setTargetPlatform(TargetPlatform.NAME_WEB); web.setVersion(version); var linux = new ExtensionVersion(); linux.setTargetPlatform(TargetPlatform.NAME_LINUX_X64); linux.setVersion(version); var windows = new ExtensionVersion(); windows.setTargetPlatform(TargetPlatform.NAME_WIN32_ARM64); windows.setVersion(version); var latest = versions.getLatest(List.of(windows, linux, web), false); assertEquals(linux, latest); }
@Override public Ability processAbility(Ability ab) { return ab; }
@Test public void passedSameAbilityRefOnProcess() { Ability random = DefaultBot.getDefaultBuilder() .name("randomsomethingrandom").build(); toggle = new DefaultToggle(); defaultBot = new DefaultBot(null, EMPTY, db, toggle); defaultBot.onRegister(); assertSame(random, toggle.processAbility(random), "Toggle returned a different ability"); }
@Override public String getProperty(String key) { String answer = super.getProperty(key); if (answer == null) { answer = super.getProperty(StringHelper.dashToCamelCase(key)); } if (answer == null) { answer = super.getProperty(StringHelper.camelCaseToDash(key)); } return answer; }
@Test public void testOrderedLoad() throws Exception { Properties prop = new CamelCaseOrderedProperties(); prop.load(CamelCaseOrderedPropertiesTest.class.getResourceAsStream("/application.properties")); assertEquals(4, prop.size()); Iterator it = prop.keySet().iterator(); assertEquals("hello", it.next()); assertEquals("camel.component.seda.concurrent-consumers", it.next()); assertEquals("camel.component.seda.queueSize", it.next()); assertEquals("camel.component.direct.timeout", it.next()); // should be ordered values it = prop.values().iterator(); assertEquals("World", it.next()); assertEquals("2", it.next()); assertEquals("500", it.next()); assertEquals("1234", it.next()); assertEquals("World", prop.getProperty("hello", "MyDefault")); assertEquals("2", prop.getProperty("camel.component.seda.concurrent-consumers", "MyDefault")); assertEquals("2", prop.getProperty("camel.component.seda.concurrentConsumers", "MyDefault")); assertEquals("500", prop.getProperty("camel.component.seda.queue-size", "MyDefault")); assertEquals("500", prop.getProperty("camel.component.seda.queueSize", "MyDefault")); assertEquals("1234", prop.getProperty("camel.component.direct.timeout", "MyDefault")); }
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, TikaException, SAXException { EmbeddedDocumentExtractor extractor = EmbeddedDocumentUtil.getEmbeddedDocumentExtractor(context); String charsetName = "windows-1252"; metadata.set(Metadata.CONTENT_TYPE, MBOX_MIME_TYPE); metadata.set(Metadata.CONTENT_ENCODING, charsetName); XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); InputStreamReader isr = new InputStreamReader(stream, charsetName); try (BufferedReader reader = new BufferedReader(isr)) { String curLine = reader.readLine(); int mailItem = 0; do { if (curLine.startsWith(MBOX_RECORD_DIVIDER)) { Metadata mailMetadata = new Metadata(); Queue<String> multiline = new LinkedList<>(); mailMetadata.add(EMAIL_FROMLINE_METADATA, curLine.substring(MBOX_RECORD_DIVIDER.length())); mailMetadata.set(Metadata.CONTENT_TYPE, "message/rfc822"); mailMetadata .set(TikaCoreProperties.CONTENT_TYPE_PARSER_OVERRIDE, "message/rfc822"); curLine = reader.readLine(); if (curLine == null) { break; } UnsynchronizedByteArrayOutputStream message = UnsynchronizedByteArrayOutputStream.builder().setBufferSize(100000).get(); do { if (curLine.startsWith(" ") || curLine.startsWith("\t")) { String latestLine = multiline.poll(); latestLine += " " + curLine.trim(); multiline.add(latestLine); } else { multiline.add(curLine); } message.write(curLine.getBytes(charsetName)); message.write(0x0A); curLine = reader.readLine(); } while (curLine != null && !curLine.startsWith(MBOX_RECORD_DIVIDER) && message.size() < MAIL_MAX_SIZE); for (String item : multiline) { saveHeaderInMetadata(mailMetadata, item); } InputStream messageStream = message.toInputStream(); message = null; if (extractor.shouldParseEmbedded(mailMetadata)) { extractor.parseEmbedded(messageStream, xhtml, mailMetadata, true); } if (tracking) { getTrackingMetadata().put(mailItem++, mailMetadata); } } else { curLine = reader.readLine(); } } while (curLine != null && !Thread.currentThread().isInterrupted()); } xhtml.endDocument(); }
@Test public void testQuoted() throws Exception { ContentHandler handler = new BodyContentHandler(); Metadata metadata = new Metadata(); try (InputStream stream = getResourceAsStream("/test-documents/quoted.mbox")) { mboxParser.parse(stream, handler, metadata, recursingContext); } assertContains("Test content", handler.toString()); assertContains("> quoted stuff", handler.toString()); }
@Override public void createEvents(EventFactory eventFactory, EventProcessorParameters processorParameters, EventConsumer<List<EventWithContext>> eventsConsumer) throws EventProcessorException { final AggregationEventProcessorParameters parameters = (AggregationEventProcessorParameters) processorParameters; // TODO: We have to take the Elasticsearch index.refresh_interval into account here! if (!dependencyCheck.hasMessagesIndexedUpTo(parameters.timerange())) { final String msg = String.format(Locale.ROOT, "Couldn't run aggregation <%s/%s> for timerange <%s to %s> because required messages haven't been indexed, yet.", eventDefinition.title(), eventDefinition.id(), parameters.timerange().getFrom(), parameters.timerange().getTo()); throw new EventProcessorPreconditionException(msg, eventDefinition); } LOG.debug("Creating events for config={} parameters={}", config, parameters); // The absence of a series indicates that the user doesn't want to do an aggregation but create events from // a simple search query. (one message -> one event) try { if (config.series().isEmpty()) { filterSearch(eventFactory, parameters, eventsConsumer); } else { aggregatedSearch(eventFactory, parameters, eventsConsumer); } } catch (SearchException e) { if (e.error() instanceof ParameterExpansionError) { final String msg = String.format(Locale.ROOT, "Couldn't run aggregation <%s/%s> because parameters failed to expand: %s", eventDefinition.title(), eventDefinition.id(), e.error().description()); LOG.error(msg); throw new EventProcessorPreconditionException(msg, eventDefinition, e); } } catch (ElasticsearchException e) { final String msg = String.format(Locale.ROOT, "Couldn't run aggregation <%s/%s> because of search error: %s", eventDefinition.title(), eventDefinition.id(), e.getMessage()); LOG.error(msg); throw new EventProcessorPreconditionException(msg, eventDefinition, e); } // Update the state for this processor! This state will be used for dependency checks between event processors. stateService.setState(eventDefinition.id(), parameters.timerange().getFrom(), parameters.timerange().getTo()); }
@Test public void createEventsWithoutRequiredMessagesBeingIndexed() throws Exception { final DateTime now = DateTime.now(DateTimeZone.UTC); final AbsoluteRange timerange = AbsoluteRange.create(now.minusHours(1), now.plusHours(1)); final AggregationEventProcessorConfig config = AggregationEventProcessorConfig.builder() .query(QUERY_STRING) .streams(ImmutableSet.of()) .groupBy(ImmutableList.of()) .series(ImmutableList.of()) .conditions(null) .searchWithinMs(30000) .executeEveryMs(30000) .build(); final EventDefinitionDto eventDefinitionDto = buildEventDefinitionDto(ImmutableSet.of(), ImmutableList.of(), null, emptyList()); final AggregationEventProcessorParameters parameters = AggregationEventProcessorParameters.builder() .timerange(timerange) .build(); final AggregationEventProcessor eventProcessor = new AggregationEventProcessor(eventDefinitionDto, searchFactory, eventProcessorDependencyCheck, stateService, moreSearch, eventStreamService, messages, notificationService, permittedStreams, Set.of(), messageFactory); // If the dependency check returns true, there should be no exception raised and the state service should be called when(eventProcessorDependencyCheck.hasMessagesIndexedUpTo(timerange)).thenReturn(true); assertThatCode(() -> eventProcessor.createEvents(eventFactory, parameters, (events) -> {})).doesNotThrowAnyException(); verify(stateService, times(1)).setState("dto-id-1", timerange.from(), timerange.to()); verify(moreSearch, times(1)).scrollQuery( eq(config.query()), eq(ImmutableSet.of("stream-3", "stream-2", "stream-1", "000000000000000000000001")), eq(emptyList()), eq(config.queryParameters()), eq(parameters.timerange()), eq(parameters.batchSize()), any(MoreSearch.ScrollCallback.class) ); reset(stateService, moreSearch, searchFactory); // Rest mocks so we can verify it again // If the dependency check returns false, a precondition exception should be raised and the state service not be called when(eventProcessorDependencyCheck.hasMessagesIndexedUpTo(timerange)).thenReturn(false); assertThatCode(() -> eventProcessor.createEvents(eventFactory, parameters, (events) -> {})) .hasMessageContaining(eventDefinitionDto.title()) .hasMessageContaining(eventDefinitionDto.id()) .hasMessageContaining(timerange.from().toString()) .hasMessageContaining(timerange.to().toString()) .isInstanceOf(EventProcessorPreconditionException.class); verify(stateService, never()).setState(any(String.class), any(DateTime.class), any(DateTime.class)); verify(searchFactory, never()).create(any(), any(), any(), any(), any()); verify(moreSearch, never()).scrollQuery( eq(config.query()), eq(config.streams()), eq(config.filters()), eq(config.queryParameters()), eq(parameters.timerange()), eq(parameters.batchSize()), any(MoreSearch.ScrollCallback.class) ); }
@Override public ColumnMetadata getColumnMetadata(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle columnHandle) { return ((ExampleColumnHandle) columnHandle).getColumnMetadata(); }
@Test public void getColumnMetadata() { assertEquals(metadata.getColumnMetadata(SESSION, NUMBERS_TABLE_HANDLE, new ExampleColumnHandle(CONNECTOR_ID, "text", createUnboundedVarcharType(), 0)), new ColumnMetadata("text", createUnboundedVarcharType())); // example connector assumes that the table handle and column handle are // properly formed, so it will return a metadata object for any // ExampleTableHandle and ExampleColumnHandle passed in. This is on because // it is not possible for the Presto Metadata system to create the handles // directly. }
public static String[] extractPathComponentsFromUriTemplate(String uriTemplate) { final String normalizedUriTemplate = NORMALIZED_URI_PATTERN.matcher(uriTemplate).replaceAll(""); final UriTemplate template = new UriTemplate(normalizedUriTemplate); final String uri = template.createURI(_EMPTY_STRING_ARRAY); return URI_SEPARATOR_PATTERN.split(uri); }
@Test public void testExtractionWithTemplateVariables() { final String[] components1 = URIParamUtils.extractPathComponentsFromUriTemplate("foo"); Assert.assertEquals(components1.length, 1); Assert.assertEquals(components1[0], "foo"); final String[] components2 = URIParamUtils.extractPathComponentsFromUriTemplate("foo/{keys}/bar"); Assert.assertEquals(components2.length, 2); Assert.assertEquals(components2[0], "foo"); Assert.assertEquals(components2[1], "bar"); final String[] components3 = URIParamUtils.extractPathComponentsFromUriTemplate("foo/{keys1}/bar/{keys2}/baz"); Assert.assertEquals(components3.length, 3); Assert.assertEquals(components3[0], "foo"); Assert.assertEquals(components3[1], "bar"); Assert.assertEquals(components3[2], "baz"); final String[] components4 = URIParamUtils.extractPathComponentsFromUriTemplate("foo/{keys1}/{keys2}/bar"); Assert.assertEquals(components4.length, 2); Assert.assertEquals(components4[0], "foo"); Assert.assertEquals(components4[1], "bar"); }
@Override public ResultSet getPseudoColumns(final String catalog, final String schemaPattern, final String tableNamePattern, final String columnNamePattern) throws SQLException { return createDatabaseMetaDataResultSet( getDatabaseMetaData().getPseudoColumns(getActualCatalog(catalog), getActualSchema(schemaPattern), getActualTableNamePattern(tableNamePattern), columnNamePattern)); }
@Test void assertGetPseudoColumns() throws SQLException { when(databaseMetaData.getPseudoColumns("test", null, null, null)).thenReturn(resultSet); assertThat(shardingSphereDatabaseMetaData.getPseudoColumns("test", null, null, null), instanceOf(DatabaseMetaDataResultSet.class)); }
public static <T> T instantiate( final String className, final Class<T> targetType, final ClassLoader classLoader) throws FlinkException { final Class<? extends T> clazz; try { clazz = Class.forName(className, false, classLoader).asSubclass(targetType); } catch (ClassNotFoundException e) { throw new FlinkException( String.format( "Could not instantiate class '%s' of type '%s'. Please make sure that this class is on your class path.", className, targetType.getName()), e); } return instantiate(clazz); }
@Test void testInstantiationOfStringValueAndCastToValue() { Object stringValue = InstantiationUtil.instantiate(StringValue.class, Value.class); assertThat(stringValue).isNotNull(); }
public boolean isStable() { return this.oldConf.isEmpty(); }
@Test public void testIsStable() { ConfigurationEntry entry = TestUtils.getConfEntry("localhost:8081,localhost:8082,localhost:8083", "localhost:8080,localhost:8081,localhost:8082"); assertFalse(entry.isStable()); assertEquals(4, entry.listPeers().size()); assertTrue(entry.contains(new PeerId("localhost", 8080))); assertTrue(entry.contains(new PeerId("localhost", 8081))); assertTrue(entry.contains(new PeerId("localhost", 8082))); assertTrue(entry.contains(new PeerId("localhost", 8083))); }
public static NacosNamingServiceWrapper createNamingService(URL connectionURL) { boolean check = connectionURL.getParameter(NACOS_CHECK_KEY, true); int retryTimes = connectionURL.getPositiveParameter(NACOS_RETRY_KEY, 10); int sleepMsBetweenRetries = connectionURL.getPositiveParameter(NACOS_RETRY_WAIT_KEY, 10); NacosConnectionManager nacosConnectionManager = new NacosConnectionManager(connectionURL, check, retryTimes, sleepMsBetweenRetries); return new NacosNamingServiceWrapper(nacosConnectionManager, retryTimes, sleepMsBetweenRetries); }
@Test void testDisable() { try (MockedStatic<NacosFactory> nacosFactoryMockedStatic = Mockito.mockStatic(NacosFactory.class)) { NamingService mock = new MockNamingService() { @Override public String getServerStatus() { return DOWN; } }; nacosFactoryMockedStatic .when(() -> NacosFactory.createNamingService((Properties) any())) .thenReturn(mock); URL url = URL.valueOf("nacos://127.0.0.1:8848") .addParameter("nacos.retry", 5) .addParameter("nacos.retry-wait", 10) .addParameter("nacos.check", "false"); try { NacosNamingServiceUtils.createNamingService(url); } catch (Throwable t) { Assertions.fail(t); } } }
protected SetFile() {}
@Test public void testSetFile() throws Exception { FileSystem fs = FileSystem.getLocal(conf); try { RandomDatum[] data = generate(10000); writeTest(fs, data, FILE, CompressionType.NONE); readTest(fs, data, FILE); writeTest(fs, data, FILE, CompressionType.BLOCK); readTest(fs, data, FILE); } finally { fs.close(); } }
public ActionResult apply(Agent agent, Map<String, String> request) { log.debug("Writing content to file {}", request.get("filename")); String filename = request.get("filename"); if (filename == null || filename.isEmpty()) { return ActionResult.builder() .status(ActionResult.Status.FAILURE) .summary("The filename parameter is missing or has an empty value.") .error("The filename parameter is missing or has an empty value.") .build(); } Document document = workspace.addDocument(agent.getId(), filename, request.get("body").getBytes(), new HashMap<>()); return ActionResult.builder() .status(ActionResult.Status.SUCCESS) .summary(String.format("The file %s has been written to the Agent workspace.", request.get("filename"))) .result(String.format("The file %s has been written to the Agent workspace.", request.get("filename"))) .documents(Arrays.asList(document)) .build(); }
@Test void testApplyWithMissingFilename() { String agentId = "agent1"; String fileContent = "This is a test file."; Map<String, String> request = new HashMap<>(); request.put("body", fileContent); when(agent.getId()).thenReturn(agentId); ActionResult result = writeFileAction.apply(agent, request); assertEquals(ActionResult.Status.FAILURE, result.getStatus()); verify(workspace, never()).addDocument(anyString(), anyString(), any(), any()); }
public static Optional<String> maybeCreateProcessingLogTopic( final KafkaTopicClient topicClient, final ProcessingLogConfig config, final KsqlConfig ksqlConfig) { if (!config.getBoolean(ProcessingLogConfig.TOPIC_AUTO_CREATE)) { return Optional.empty(); } final String topicName = getTopicName(config, ksqlConfig); final int nPartitions = config.getInt(ProcessingLogConfig.TOPIC_PARTITIONS); final short nReplicas = config.getShort(ProcessingLogConfig.TOPIC_REPLICATION_FACTOR); try { topicClient.createTopic(topicName, nPartitions, nReplicas); } catch (final KafkaTopicExistsException e) { if (e.getPartitionOrReplicaMismatch()) { LOGGER.warn(String.format("Log topic %s already exists", topicName), e); } else { LOGGER.info(String.format("Log topic %s already exists", topicName), e); } } return Optional.of(topicName); }
@Test public void shouldCreateProcessingLogTopicWithCorrectDefaultName() { // Given: final ProcessingLogConfig config = new ProcessingLogConfig( ImmutableMap.of( ProcessingLogConfig.TOPIC_AUTO_CREATE, true, ProcessingLogConfig.TOPIC_PARTITIONS, PARTITIONS, ProcessingLogConfig.TOPIC_REPLICATION_FACTOR, REPLICAS ) ); // When: final Optional<String> createdTopic = ProcessingLogServerUtils.maybeCreateProcessingLogTopic( mockTopicClient, config, ksqlConfig); // Then: assertThat(createdTopic.isPresent(), is(true)); assertThat(createdTopic.get(), equalTo(DEFAULT_TOPIC)); verify(mockTopicClient).createTopic(DEFAULT_TOPIC, PARTITIONS, REPLICAS); }
@Override public synchronized void createSchema(ConnectorSession session, String schemaName, Map<String, Object> properties) { if (schemas.contains(schemaName)) { throw new PrestoException(ALREADY_EXISTS, format("Schema [%s] already exists", schemaName)); } schemas.add(schemaName); }
@Test public void testCreateSchema() { assertEquals(metadata.listSchemaNames(SESSION), ImmutableList.of("default")); metadata.createSchema(SESSION, "test", ImmutableMap.of()); assertEquals(metadata.listSchemaNames(SESSION), ImmutableList.of("default", "test")); }
@Override public V put(final K key, final V value) { final Entry<K, V>[] table = this.table; final int hash = key.hashCode(); final int index = HashUtil.indexFor(hash, table.length, mask); for (Entry<K, V> e = table[index]; e != null; e = e.hashNext) { final K entryKey; if ((entryKey = e.key) == key || entryKey.equals(key)) { moveToTop(e); return e.setValue(value); } } final Entry<K, V> e = new Entry<>(key, value); e.hashNext = table[index]; table[index] = e; final Entry<K, V> top = this.top; e.next = top; if (top != null) { top.previous = e; } else { back = e; } this.top = e; _size += 1; if (removeEldestEntry(back)) { remove(eldestKey()); } else if (_size > capacity) { rehash(HashUtil.nextCapacity(capacity)); } return null; }
@Test public void forEachProcedure() { final LinkedHashMap<Integer, String> tested = new LinkedHashMap<>(); for (int i = 0; i < 100000; ++i) { tested.put(i, Integer.toString(i)); } final int[] ii = {0}; tested.forEachKey(object -> { ii[0]++; return true; }); tested.forEachValue(object -> { ii[0]++; return true; }); Assert.assertEquals(tested.size() * 2, ii[0]); ii[0] = 0; tested.forEachKey(object -> { ii[0]++; return object > 99500; }); tested.forEachValue(object -> { ii[0]++; return true; }); Assert.assertEquals(tested.size() + 500, ii[0]); }
public static Collection<InstanceInfo> selectAll(Applications applications) { List<InstanceInfo> all = new ArrayList<>(); for (Application a : applications.getRegisteredApplications()) { all.addAll(a.getInstances()); } return all; }
@Test public void testSelectAllIfNotNullReturnAllInstances() { Application application = createSingleInstanceApp("foo", "foo", InstanceInfo.ActionType.ADDED); Applications applications = createApplications(application); applications.addApplication(application); Assert.assertEquals(new ArrayList<>(Arrays.asList( application.getByInstanceId("foo"), application.getByInstanceId("foo"))), EurekaEntityFunctions.selectAll(applications)); }
@Override public void onMsg(TbContext ctx, TbMsg msg) throws TbNodeException { ctx.tellNext(msg, checkMatches(msg) ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE); }
@Test void givenTypeCircleAndConfigWithCircleDefined_whenOnMsg_thenTrue() throws TbNodeException { // GIVEN var config = new TbGpsGeofencingFilterNodeConfiguration().defaultConfiguration(); config.setFetchPerimeterInfoFromMessageMetadata(false); config.setPerimeterType(PerimeterType.CIRCLE); config.setCenterLatitude(CIRCLE_CENTER.getLatitude()); config.setCenterLongitude(CIRCLE_CENTER.getLongitude()); config.setRange(CIRCLE_RANGE); config.setRangeUnit(RangeUnit.KILOMETER); node.init(ctx, new TbNodeConfiguration(JacksonUtil.valueToTree(config))); DeviceId deviceId = new DeviceId(UUID.randomUUID()); TbMsg msg = getTbMsg(deviceId, TbMsgMetaData.EMPTY, POINT_INSIDE_CIRCLE.getLatitude(), POINT_INSIDE_CIRCLE.getLongitude()); // WHEN node.onMsg(ctx, msg); // THEN ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx, times(1)).tellNext(newMsgCaptor.capture(), eq(TbNodeConnectionType.TRUE)); verify(ctx, never()).tellFailure(any(), any()); TbMsg newMsg = newMsgCaptor.getValue(); assertThat(newMsg).isNotNull(); assertThat(newMsg).isSameAs(msg); }
private void resolveNativeEntityGrokPattern(EntityDescriptor entityDescriptor, InputWithExtractors inputWithExtractors, MutableGraph<EntityDescriptor> mutableGraph) { inputWithExtractors.extractors().stream() .filter(e -> e.getType().equals(Extractor.Type.GROK)) .map(e -> (String) e.getExtractorConfig().get(GrokExtractor.CONFIG_GROK_PATTERN)) .map(GrokPatternService::extractPatternNames) .flatMap(Collection::stream) .forEach(patternName -> { grokPatternService.loadByName(patternName).ifPresent(depPattern -> { final EntityDescriptor depEntityDescriptor = EntityDescriptor.create( depPattern.id(), ModelTypes.GROK_PATTERN_V1); mutableGraph.putEdge(entityDescriptor, depEntityDescriptor); }); }); }
@Test @MongoDBFixtures("InputFacadeTest.json") public void resolveNativeEntityGrokPattern() throws NotFoundException { final Input input = inputService.find("5ae2ebbeef27464477f0fd8b"); EntityDescriptor entityDescriptor = EntityDescriptor.create(ModelId.of(input.getId()), ModelTypes.INPUT_V1); EntityDescriptor expectedDescriptor = EntityDescriptor.create(ModelId.of("1"), ModelTypes.GROK_PATTERN_V1); Graph<EntityDescriptor> graph = facade.resolveNativeEntity(entityDescriptor); assertThat(graph.nodes()).contains(expectedDescriptor); }
@VisibleForTesting void validateDictTypeUnique(Long id, String type) { if (StrUtil.isEmpty(type)) { return; } DictTypeDO dictType = dictTypeMapper.selectByType(type); if (dictType == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的字典类型 if (id == null) { throw exception(DICT_TYPE_TYPE_DUPLICATE); } if (!dictType.getId().equals(id)) { throw exception(DICT_TYPE_TYPE_DUPLICATE); } }
@Test public void testValidateDictTypeUnique_success() { // 调用,成功 dictTypeService.validateDictTypeUnique(randomLongId(), randomString()); }
public Set<String> assembleAllWatchKeys(String appId, String clusterName, String namespace, String dataCenter) { Multimap<String, String> watchedKeysMap = assembleAllWatchKeys(appId, clusterName, Sets.newHashSet(namespace), dataCenter); return Sets.newHashSet(watchedKeysMap.get(namespace)); }
@Test public void testAssembleAllWatchKeysWithOneNamespaceAndSomeDC() throws Exception { Set<String> watchKeys = watchKeysUtil.assembleAllWatchKeys(someAppId, someDC, someNamespace, someDC); Set<String> clusters = Sets.newHashSet(defaultCluster, someDC); assertEquals(clusters.size(), watchKeys.size()); assertWatchKeys(someAppId, clusters, someNamespace, watchKeys); }
public static CommonsConfigurationTimeLimiterConfiguration of(final Configuration configuration) throws ConfigParseException { CommonsConfigurationTimeLimiterConfiguration obj = new CommonsConfigurationTimeLimiterConfiguration(); try { obj.getConfigs().putAll(obj.getProperties(configuration.subset(TIME_LIMITER_CONFIGS_PREFIX))); obj.getInstances().putAll(obj.getProperties(configuration.subset(TIME_LIMITER_INSTANCES_PREFIX))); return obj; } catch (Exception ex) { throw new ConfigParseException("Error creating timelimiter configuration", ex); } }
@Test public void testFromPropertiesFile() throws ConfigurationException { Configuration config = CommonsConfigurationUtil.getConfiguration(PropertiesConfiguration.class, TestConstants.RESILIENCE_CONFIG_PROPERTIES_FILE_NAME); CommonsConfigurationTimeLimiterConfiguration timeLimiterConfiguration = CommonsConfigurationTimeLimiterConfiguration.of(config); assertConfigs(timeLimiterConfiguration.getConfigs()); assertInstances(timeLimiterConfiguration.getInstances()); }
public long getPaneIntervalInMs() { return paneIntervalInMs; }
@Test void testGetPaneIntervalInMs() { assertEquals(intervalInMs / paneCount, window.getPaneIntervalInMs()); }
public static List<ClientMessage> getFragments(int maxFrameSize, ClientMessage clientMessage) { if (clientMessage.getFrameLength() <= maxFrameSize) { return Collections.singletonList(clientMessage); } long fragmentId = FRAGMENT_ID_SEQUENCE.next(); LinkedList<ClientMessage> fragments = new LinkedList<>(); ClientMessage.ForwardFrameIterator iterator = clientMessage.frameIterator(); ReadState state = ReadState.BEGINNING; int length = 0; ClientMessage fragment = null; while (iterator.hasNext()) { ClientMessage.Frame frame = iterator.peekNext(); int frameSize = frame.getSize(); length += frameSize; if (frameSize > maxFrameSize) { iterator.next(); if (state == ReadState.MIDDLE) { fragments.add(fragment); } fragment = createFragment(fragmentId); fragment.add(frame.copy()); fragments.add(fragment); state = ReadState.BEGINNING; length = 0; } else if (length <= maxFrameSize) { iterator.next(); if (state == ReadState.BEGINNING) { fragment = createFragment(fragmentId); } fragment.add(frame.copy()); state = ReadState.MIDDLE; } else { assert state == ReadState.MIDDLE; fragments.add(fragment); state = ReadState.BEGINNING; length = 0; } } if (state == ReadState.MIDDLE) { fragments.add(fragment); } fragments.getFirst().getStartFrame().flags |= BEGIN_FRAGMENT_FLAG; fragments.getLast().getStartFrame().flags |= END_FRAGMENT_FLAG; return fragments; }
@Test public void testGetSubFrames() { List<ClientMessage> fragments = getFragments(128, clientMessage); ClientMessage.ForwardFrameIterator originalIterator = clientMessage.frameIterator(); assertEquals(19, fragments.size()); assertFragments(fragments, originalIterator); }
public void isIn(@Nullable Iterable<?> iterable) { checkNotNull(iterable); if (!contains(iterable, actual)) { failWithActual("expected any of", iterable); } }
@Test public void isInNullFailure() { expectFailure.whenTesting().that((String) null).isIn(oneShotIterable("a", "b", "c")); }
public Optional<PinotQueryGeneratorResult> generate(PlanNode plan, ConnectorSession session) { try { PinotQueryGeneratorContext context = requireNonNull(plan.accept( new PinotQueryPlanVisitor(session), new PinotQueryGeneratorContext()), "Resulting context is null"); return Optional.of(new PinotQueryGeneratorResult(context.toQuery(pinotConfig, session), context)); } catch (PinotException e) { log.debug(e, "Possibly benign error when pushing plan into scan node %s", plan); return Optional.empty(); } }
@Test public void testApproxDistinctWithInvalidParameters() { PlanNode justScan = buildPlan(planBuilder -> tableScan(planBuilder, pinotTable, regionId, secondsSinceEpoch, city, fare)); PlanNode approxPlanNode = buildPlan(planBuilder -> planBuilder.aggregation(aggBuilder -> aggBuilder.source(justScan).singleGroupingSet(variable("city")).addAggregation(planBuilder.variable("agg"), getRowExpression("approx_distinct(fare, 0)", defaultSessionHolder)))); Optional<PinotQueryGenerator.PinotQueryGeneratorResult> generatedQuery = new PinotQueryGenerator(pinotConfig, functionAndTypeManager, functionAndTypeManager, standardFunctionResolution) .generate(approxPlanNode, defaultSessionHolder.getConnectorSession()); assertFalse(generatedQuery.isPresent()); approxPlanNode = buildPlan(planBuilder -> planBuilder.aggregation(aggBuilder -> aggBuilder.source(justScan).singleGroupingSet(variable("city")).addAggregation(planBuilder.variable("agg"), getRowExpression("approx_distinct(fare, 0.004)", defaultSessionHolder)))); generatedQuery = new PinotQueryGenerator(pinotConfig, functionAndTypeManager, functionAndTypeManager, standardFunctionResolution) .generate(approxPlanNode, defaultSessionHolder.getConnectorSession()); assertFalse(generatedQuery.isPresent()); approxPlanNode = buildPlan(planBuilder -> planBuilder.aggregation(aggBuilder -> aggBuilder.source(justScan).singleGroupingSet(variable("city")).addAggregation(planBuilder.variable("agg"), getRowExpression("approx_distinct(fare, 1)", defaultSessionHolder)))); generatedQuery = new PinotQueryGenerator(pinotConfig, functionAndTypeManager, functionAndTypeManager, standardFunctionResolution) .generate(approxPlanNode, defaultSessionHolder.getConnectorSession()); assertFalse(generatedQuery.isPresent()); }
@CheckForNull @Override public Set<Path> branchChangedFiles(String targetBranchName, Path rootBaseDir) { return Optional.ofNullable((branchChangedFilesWithFileMovementDetection(targetBranchName, rootBaseDir))) .map(GitScmProvider::extractAbsoluteFilePaths) .orElse(null); }
@Test public void branchChangedFiles_should_return_null_if_repo_exactref_is_null() throws IOException { Repository repository = mock(Repository.class); RefDatabase refDatabase = mock(RefDatabase.class); when(repository.getRefDatabase()).thenReturn(refDatabase); when(refDatabase.findRef(BRANCH_NAME)).thenReturn(null); GitScmProvider provider = new GitScmProvider(mockCommand(), analysisWarnings, gitIgnoreCommand, system2, documentationLinkGenerator) { @Override Repository buildRepo(Path basedir) { return repository; } }; assertThat(provider.branchChangedFiles(BRANCH_NAME, worktree)).isNull(); String refNotFound = "Could not find ref 'branch' in refs/heads, refs/remotes, refs/remotes/upstream or refs/remotes/origin"; LogAndArguments warnLog = logs.getLogs(WARN).get(0); assertThat(warnLog.getRawMsg()).isEqualTo(refNotFound); String warning = refNotFound + ". You may see unexpected issues and changes. Please make sure to fetch this ref before pull request analysis" + " and refer to <a href=\"" + TEST_DOC_LINK + "\" rel=\"noopener noreferrer\" target=\"_blank\">the documentation</a>."; verify(analysisWarnings).addUnique(warning); }
@Override public void onStreamRequest(StreamRequest req, RequestContext requestContext, Map<String, String> wireAttrs, NextFilter<StreamRequest, StreamResponse> nextFilter) { disruptRequest(req, requestContext, wireAttrs, nextFilter); }
@Test public void testStreamLatencyDisrupt() throws Exception { final RequestContext requestContext = new RequestContext(); requestContext.putLocalAttr(DISRUPT_CONTEXT_KEY, DisruptContexts.delay(REQUEST_LATENCY)); final DisruptFilter filter = new DisruptFilter(_scheduler, _executor, REQUEST_TIMEOUT, _clock); final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean success = new AtomicBoolean(false); final NextFilter<StreamRequest, StreamResponse> next = new NextFilter<StreamRequest, StreamResponse>() { @Override public void onRequest(StreamRequest restRequest, RequestContext requestContext, Map<String, String> wireAttrs) { success.set(true); latch.countDown(); } @Override public void onResponse(StreamResponse restResponse, RequestContext requestContext, Map<String, String> wireAttrs) { latch.countDown(); } @Override public void onError(Throwable ex, RequestContext requestContext, Map<String, String> wireAttrs) { latch.countDown(); } }; filter.onStreamRequest(new StreamRequestBuilder(new URI(URI)).build(EntityStreams.emptyStream()), requestContext, Collections.emptyMap(), next); Assert.assertTrue(latch.await(TEST_TIMEOUT, TimeUnit.MILLISECONDS), "Missing NextFilter invocation"); Assert.assertTrue(success.get(), "Unexpected method invocation"); }
@SuppressWarnings("unchecked") @Udf public <T> List<T> union( @UdfParameter(description = "First array of values") final List<T> left, @UdfParameter(description = "Second array of values") final List<T> right) { if (left == null || right == null) { return null; } final Set<T> combined = Sets.newLinkedHashSet(left); combined.addAll(right); return (List<T>) Arrays.asList(combined.toArray()); }
@Test public void shouldReturnNullForNullLeftInput() { final List<String> input1 = Arrays.asList("foo"); final List<String> result = udf.union(input1, null); assertThat(result, is(nullValue())); }
@Override public void correctMinOffset(long minCommitLogOffset) { // Check if the consume queue is the state of deprecation. if (minLogicOffset >= mappedFileQueue.getMaxOffset()) { log.info("ConsumeQueue[Topic={}, queue-id={}] contains no valid entries", topic, queueId); return; } // Check whether the consume queue maps no valid data at all. This check may cost 1 IO operation. // The rationale is that consume queue always preserves the last file. In case there are many deprecated topics, // This check would save a lot of efforts. MappedFile lastMappedFile = this.mappedFileQueue.getLastMappedFile(); if (null == lastMappedFile) { return; } SelectMappedBufferResult lastRecord = null; try { int maxReadablePosition = lastMappedFile.getReadPosition(); lastRecord = lastMappedFile.selectMappedBuffer(maxReadablePosition - ConsumeQueue.CQ_STORE_UNIT_SIZE, ConsumeQueue.CQ_STORE_UNIT_SIZE); if (null != lastRecord) { ByteBuffer buffer = lastRecord.getByteBuffer(); long commitLogOffset = buffer.getLong(); if (commitLogOffset < minCommitLogOffset) { // Keep the largest known consume offset, even if this consume-queue contains no valid entries at // all. Let minLogicOffset point to a future slot. this.minLogicOffset = lastMappedFile.getFileFromOffset() + maxReadablePosition; log.info("ConsumeQueue[topic={}, queue-id={}] contains no valid entries. Min-offset is assigned as: {}.", topic, queueId, getMinOffsetInQueue()); return; } } } finally { if (null != lastRecord) { lastRecord.release(); } } MappedFile mappedFile = this.mappedFileQueue.getFirstMappedFile(); long minExtAddr = 1; if (mappedFile != null) { // Search from previous min logical offset. Typically, a consume queue file segment contains 300,000 entries // searching from previous position saves significant amount of comparisons and IOs boolean intact = true; // Assume previous value is still valid long start = this.minLogicOffset - mappedFile.getFileFromOffset(); if (start < 0) { intact = false; start = 0; } if (start > mappedFile.getReadPosition()) { log.error("[Bug][InconsistentState] ConsumeQueue file {} should have been deleted", mappedFile.getFileName()); return; } SelectMappedBufferResult result = mappedFile.selectMappedBuffer((int) start); if (result == null) { log.warn("[Bug] Failed to scan consume queue entries from file on correcting min offset: {}", mappedFile.getFileName()); return; } try { // No valid consume entries if (result.getSize() == 0) { log.debug("ConsumeQueue[topic={}, queue-id={}] contains no valid entries", topic, queueId); return; } ByteBuffer buffer = result.getByteBuffer().slice(); // Verify whether the previous value is still valid or not before conducting binary search long commitLogOffset = buffer.getLong(); if (intact && commitLogOffset >= minCommitLogOffset) { log.info("Abort correction as previous min-offset points to {}, which is greater than {}", commitLogOffset, minCommitLogOffset); return; } // Binary search between range [previous_min_logic_offset, first_file_from_offset + file_size) // Note the consume-queue deletion procedure ensures the last entry points to somewhere valid. int low = 0; int high = result.getSize() - ConsumeQueue.CQ_STORE_UNIT_SIZE; while (true) { if (high - low <= ConsumeQueue.CQ_STORE_UNIT_SIZE) { break; } int mid = (low + high) / 2 / ConsumeQueue.CQ_STORE_UNIT_SIZE * ConsumeQueue.CQ_STORE_UNIT_SIZE; buffer.position(mid); commitLogOffset = buffer.getLong(); if (commitLogOffset > minCommitLogOffset) { high = mid; } else if (commitLogOffset == minCommitLogOffset) { low = mid; high = mid; break; } else { low = mid; } } // Examine the last one or two entries for (int i = low; i <= high; i += ConsumeQueue.CQ_STORE_UNIT_SIZE) { buffer.position(i); long offsetPy = buffer.getLong(); buffer.position(i + 12); long tagsCode = buffer.getLong(); if (offsetPy >= minCommitLogOffset) { this.minLogicOffset = mappedFile.getFileFromOffset() + start + i; log.info("Compute logical min offset: {}, topic: {}, queueId: {}", this.getMinOffsetInQueue(), this.topic, this.queueId); // This maybe not take effect, when not every consume queue has an extended file. if (isExtAddr(tagsCode)) { minExtAddr = tagsCode; } break; } } } catch (Exception e) { log.error("Exception thrown when correctMinOffset", e); } finally { result.release(); } } if (isExtReadEnable()) { this.consumeQueueExt.truncateByMinAddress(minExtAddr); } }
@Test public void testCorrectMinOffset() { String topic = "T1"; int queueId = 0; MessageStoreConfig storeConfig = new MessageStoreConfig(); File tmpDir = new File(System.getProperty("java.io.tmpdir"), "test_correct_min_offset"); tmpDir.deleteOnExit(); storeConfig.setStorePathRootDir(tmpDir.getAbsolutePath()); storeConfig.setEnableConsumeQueueExt(false); DefaultMessageStore messageStore = Mockito.mock(DefaultMessageStore.class); Mockito.when(messageStore.getMessageStoreConfig()).thenReturn(storeConfig); RunningFlags runningFlags = new RunningFlags(); Mockito.when(messageStore.getRunningFlags()).thenReturn(runningFlags); StoreCheckpoint storeCheckpoint = Mockito.mock(StoreCheckpoint.class); Mockito.when(messageStore.getStoreCheckpoint()).thenReturn(storeCheckpoint); ConsumeQueue consumeQueue = new ConsumeQueue(topic, queueId, storeConfig.getStorePathRootDir(), storeConfig.getMappedFileSizeConsumeQueue(), messageStore); int max = 10000; int messageSize = 100; for (int i = 0; i < max; ++i) { DispatchRequest dispatchRequest = new DispatchRequest(topic, queueId, messageSize * i, messageSize, 0, 0, i, null, null, 0, 0, null); consumeQueue.putMessagePositionInfoWrapper(dispatchRequest); } consumeQueue.setMinLogicOffset(0L); consumeQueue.correctMinOffset(0L); Assert.assertEquals(0, consumeQueue.getMinOffsetInQueue()); consumeQueue.setMinLogicOffset(100); consumeQueue.correctMinOffset(2000); Assert.assertEquals(20, consumeQueue.getMinOffsetInQueue()); consumeQueue.setMinLogicOffset((max - 1) * ConsumeQueue.CQ_STORE_UNIT_SIZE); consumeQueue.correctMinOffset(max * messageSize); Assert.assertEquals(max * ConsumeQueue.CQ_STORE_UNIT_SIZE, consumeQueue.getMinLogicOffset()); consumeQueue.setMinLogicOffset(max * ConsumeQueue.CQ_STORE_UNIT_SIZE); consumeQueue.correctMinOffset(max * messageSize); Assert.assertEquals(max * ConsumeQueue.CQ_STORE_UNIT_SIZE, consumeQueue.getMinLogicOffset()); consumeQueue.destroy(); }
static String getSpanName(String commandName, @Nullable String collectionName) { if (collectionName == null) return commandName; return commandName + " " + collectionName; }
@Test void getSpanName_presentCollectionName() { assertThat(getSpanName("foo", "bar")).isEqualTo("foo bar"); }
@SqlNullable @Description("splits a string by a delimiter and returns the specified field (counting from one)") @ScalarFunction @LiteralParameters({"x", "y"}) @SqlType("varchar(x)") public static Slice splitPart(@SqlType("varchar(x)") Slice string, @SqlType("varchar(y)") Slice delimiter, @SqlType(StandardTypes.BIGINT) long index) { checkCondition(index > 0, INVALID_FUNCTION_ARGUMENT, "Index must be greater than zero"); // Empty delimiter? Then every character will be a split if (delimiter.length() == 0) { int startCodePoint = toIntExact(index); int indexStart = offsetOfCodePoint(string, startCodePoint - 1); if (indexStart < 0) { // index too big return null; } int length = lengthOfCodePoint(string, indexStart); if (indexStart + length > string.length()) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Invalid UTF-8 encoding"); } return string.slice(indexStart, length); } int matchCount = 0; int previousIndex = 0; while (previousIndex < string.length()) { int matchIndex = string.indexOf(delimiter, previousIndex); // No match if (matchIndex < 0) { break; } // Reached the requested part? if (++matchCount == index) { return string.slice(previousIndex, matchIndex - previousIndex); } // Continue searching after the delimiter previousIndex = matchIndex + delimiter.length(); } if (matchCount == index - 1) { // returns last section of the split return string.slice(previousIndex, string.length() - previousIndex); } // index is too big, null is returned return null; }
@Test public void testSplitPart() { assertFunction("SPLIT_PART('abc-@-def-@-ghi', '-@-', 1)", createVarcharType(15), "abc"); assertFunction("SPLIT_PART('abc-@-def-@-ghi', '-@-', 2)", createVarcharType(15), "def"); assertFunction("SPLIT_PART('abc-@-def-@-ghi', '-@-', 3)", createVarcharType(15), "ghi"); assertFunction("SPLIT_PART('abc-@-def-@-ghi', '-@-', 4)", createVarcharType(15), null); assertFunction("SPLIT_PART('abc-@-def-@-ghi', '-@-', 99)", createVarcharType(15), null); assertFunction("SPLIT_PART('abc', 'abc', 1)", createVarcharType(3), ""); assertFunction("SPLIT_PART('abc', 'abc', 2)", createVarcharType(3), ""); assertFunction("SPLIT_PART('abc', 'abc', 3)", createVarcharType(3), null); assertFunction("SPLIT_PART('abc', '-@-', 1)", createVarcharType(3), "abc"); assertFunction("SPLIT_PART('abc', '-@-', 2)", createVarcharType(3), null); assertFunction("SPLIT_PART('', 'abc', 1)", createVarcharType(0), ""); assertFunction("SPLIT_PART('', '', 1)", createVarcharType(0), null); assertFunction("SPLIT_PART('abc', '', 1)", createVarcharType(3), "a"); assertFunction("SPLIT_PART('abc', '', 2)", createVarcharType(3), "b"); assertFunction("SPLIT_PART('abc', '', 3)", createVarcharType(3), "c"); assertFunction("SPLIT_PART('abc', '', 4)", createVarcharType(3), null); assertFunction("SPLIT_PART('abc', '', 99)", createVarcharType(3), null); assertFunction("SPLIT_PART('abc', 'abcd', 1)", createVarcharType(3), "abc"); assertFunction("SPLIT_PART('abc', 'abcd', 2)", createVarcharType(3), null); assertFunction("SPLIT_PART('abc--@--def', '-@-', 1)", createVarcharType(11), "abc-"); assertFunction("SPLIT_PART('abc--@--def', '-@-', 2)", createVarcharType(11), "-def"); assertFunction("SPLIT_PART('abc-@-@-@-def', '-@-', 1)", createVarcharType(13), "abc"); assertFunction("SPLIT_PART('abc-@-@-@-def', '-@-', 2)", createVarcharType(13), "@"); assertFunction("SPLIT_PART('abc-@-@-@-def', '-@-', 3)", createVarcharType(13), "def"); assertFunction("SPLIT_PART(' ', ' ', 1)", createVarcharType(1), ""); assertFunction("SPLIT_PART('abcdddddef', 'dd', 1)", createVarcharType(10), "abc"); assertFunction("SPLIT_PART('abcdddddef', 'dd', 2)", createVarcharType(10), ""); assertFunction("SPLIT_PART('abcdddddef', 'dd', 3)", createVarcharType(10), "def"); assertFunction("SPLIT_PART('a/b/c', '/', 4)", createVarcharType(5), null); assertFunction("SPLIT_PART('a/b/c/', '/', 4)", createVarcharType(6), ""); // // Test SPLIT_PART for non-ASCII assertFunction("SPLIT_PART('\u4FE1\u5FF5,\u7231,\u5E0C\u671B', ',', 1)", createVarcharType(7), "\u4FE1\u5FF5"); assertFunction("SPLIT_PART('\u4FE1\u5FF5,\u7231,\u5E0C\u671B', ',', 2)", createVarcharType(7), "\u7231"); assertFunction("SPLIT_PART('\u4FE1\u5FF5,\u7231,\u5E0C\u671B', ',', 3)", createVarcharType(7), "\u5E0C\u671B"); assertFunction("SPLIT_PART('\u4FE1\u5FF5,\u7231,\u5E0C\u671B', ',', 4)", createVarcharType(7), null); assertFunction("SPLIT_PART('\u8B49\u8BC1\u8A3C', '\u8BC1', 1)", createVarcharType(3), "\u8B49"); assertFunction("SPLIT_PART('\u8B49\u8BC1\u8A3C', '\u8BC1', 2)", createVarcharType(3), "\u8A3C"); assertFunction("SPLIT_PART('\u8B49\u8BC1\u8A3C', '\u8BC1', 3)", createVarcharType(3), null); assertInvalidFunction("SPLIT_PART('abc', '', 0)", "Index must be greater than zero"); assertInvalidFunction("SPLIT_PART('abc', '', -1)", "Index must be greater than zero"); assertInvalidFunction("SPLIT_PART(utf8(from_hex('CE')), '', 1)", "Invalid UTF-8 encoding"); }
@Override public Optional<SubflowExecutionResult> createSubflowExecutionResult( RunContext runContext, TaskRun taskRun, io.kestra.core.models.flows.Flow flow, Execution execution ) { // we only create a worker task result when the execution is terminated if (!taskRun.getState().isTerminated()) { return Optional.empty(); } boolean isOutputsAllowed = runContext .<Boolean>pluginConfiguration(PLUGIN_FLOW_OUTPUTS_ENABLED) .orElse(true); final Output.OutputBuilder builder = Output.builder() .executionId(execution.getId()) .state(execution.getState().getCurrent()); final Map<String, Object> subflowOutputs = Optional .ofNullable(flow.getOutputs()) .map(outputs -> outputs .stream() .collect(Collectors.toMap( io.kestra.core.models.flows.Output::getId, io.kestra.core.models.flows.Output::getValue) ) ) .orElseGet(() -> isOutputsAllowed ? this.getOutputs() : null); if (subflowOutputs != null) { try { Map<String, Object> outputs = runContext.render(subflowOutputs); FlowInputOutput flowInputOutput = ((DefaultRunContext)runContext).getApplicationContext().getBean(FlowInputOutput.class); // this is hacking if (flow.getOutputs() != null && flowInputOutput != null) { outputs = flowInputOutput.typedOutputs(flow, execution, outputs); } builder.outputs(outputs); } catch (Exception e) { runContext.logger().warn("Failed to extract outputs with the error: '{}'", e.getLocalizedMessage(), e); var state = this.isAllowFailure() ? State.Type.WARNING : State.Type.FAILED; taskRun = taskRun .withState(state) .withAttempts(Collections.singletonList(TaskRunAttempt.builder().state(new State().withState(state)).build())) .withOutputs(builder.build().toMap()); return Optional.of(SubflowExecutionResult.builder() .executionId(execution.getId()) .state(State.Type.FAILED) .parentTaskRun(taskRun) .build()); } } taskRun = taskRun.withOutputs(builder.build().toMap()); State.Type finalState = ExecutableUtils.guessState(execution, this.transmitFailed, this.isAllowFailure()); if (taskRun.getState().getCurrent() != finalState) { taskRun = taskRun.withState(finalState); } return Optional.of(ExecutableUtils.subflowExecutionResult(taskRun, execution)); }
@SuppressWarnings("deprecation") @Test void shouldReturnOutputsForSubflowOutputsEnabled() throws IllegalVariableEvaluationException { // Given Mockito.when(applicationContext.getProperty(Subflow.PLUGIN_FLOW_OUTPUTS_ENABLED, Boolean.class)) .thenReturn(Optional.of(true)); Map<String, Object> outputs = Map.of("key", "value"); Mockito.when(runContext.render(Mockito.anyMap())).thenReturn(outputs); Subflow subflow = Subflow.builder() .outputs(outputs) .build(); // When Optional<SubflowExecutionResult> result = subflow.createSubflowExecutionResult( runContext, TaskRun.builder().state(DEFAULT_SUCCESS_STATE).build(), Flow.builder().build(), Execution.builder().id(EXECUTION_ID).state(DEFAULT_SUCCESS_STATE).build() ); // Then assertTrue(result.isPresent()); Map<String, Object> expected = Subflow.Output.builder() .executionId(EXECUTION_ID) .state(DEFAULT_SUCCESS_STATE.getCurrent()) .outputs(outputs) .build() .toMap(); assertThat(result.get().getParentTaskRun().getOutputs(), is(expected)); assertThat(result.get().getParentTaskRun().getAttempts().get(0).getState().getHistories(), Matchers.contains( hasProperty("state", is(State.Type.CREATED)), hasProperty("state", is(State.Type.RUNNING)), hasProperty("state", is(State.Type.SUCCESS)) )); }
public static boolean isIn(TemporalAccessor date, TemporalAccessor beginDate, TemporalAccessor endDate) { return isIn(date, beginDate, endDate, true, true); }
@Test public void isInTest(){ final String sourceStr = "2022-04-19 00:00:00"; final String startTimeStr = "2022-04-19 00:00:00"; final String endTimeStr = "2022-04-19 23:59:59"; final boolean between = TemporalAccessorUtil.isIn( LocalDateTimeUtil.parse(sourceStr, DatePattern.NORM_DATETIME_FORMATTER), LocalDateTimeUtil.parse(startTimeStr, DatePattern.NORM_DATETIME_FORMATTER), LocalDateTimeUtil.parse(endTimeStr, DatePattern.NORM_DATETIME_FORMATTER)); assertTrue(between); }
public Optional<Object> evaluate(final Map<String, Object> columnPairsMap, final String outputColumn, final String regexField) { boolean matching = true; boolean isRegex = regexField != null && columnValues.containsKey(regexField) && (boolean) columnValues.get(regexField); for (Map.Entry<String, Object> columnPairEntry : columnPairsMap.entrySet()) { Object value = columnValues.get(columnPairEntry.getKey()); matching = isRegex ? isRegexMatching(value.toString(), (String) columnPairEntry.getValue()) : isMatching(value, columnPairEntry.getValue()); if (!matching) { break; } } return matching ? Optional.ofNullable(columnValues.get(outputColumn)) : Optional.empty(); }
@Test void evaluateKeyFoundMatchingOutputColumnFound() { KiePMMLRow kiePMMLRow = new KiePMMLRow(COLUMN_VALUES); Optional<Object> retrieved = kiePMMLRow.evaluate(Collections.singletonMap("KEY-1", 1), "KEY-0", null); assertThat(retrieved).isPresent(); assertThat(retrieved.get()).isEqualTo(COLUMN_VALUES.get("KEY-0")); }
@Override public Revision checkpoint(String noteId, String notePath, String commitMessage, AuthenticationInfo subject) throws IOException { Revision revision = super.checkpoint(noteId, notePath, commitMessage, subject); updateRemoteStream(); return revision; }
@Test /** * Test the case when the check-pointing (add new files and commit) it also pulls the latest changes from the * remote repository */ void pullChangesFromRemoteRepositoryOnCheckpointing() throws GitAPIException, IOException { // Create a new commit in the remote repository RevCommit secondCommitRevision = remoteGit.commit().setMessage("Second commit from remote repository").call(); // Add a new paragraph to the local repository addParagraphToNotebook(); // Commit and push the changes to remote repository NotebookRepoWithVersionControl.Revision thirdCommitRevision = gitHubNotebookRepo.checkpoint( TEST_NOTE_ID, TEST_NOTE_PATH, "Third commit from local repository", null); // Check all the commits as seen from the local repository. The commits are ordered chronologically. The last // commit is the first in the commit logs. Iterator<RevCommit> revisions = gitHubNotebookRepo.getGit().log().all().call().iterator(); revisions.next(); // The Merge `master` commit after pushing to the remote repository assert(thirdCommitRevision.id.equals(revisions.next().getName())); // The local commit after adding the paragraph // The second commit done on the remote repository assert(secondCommitRevision.getName().equals(revisions.next().getName())); // The first commit done on the remote repository assert(firstCommitRevision.getName().equals(revisions.next().getName())); }
public static boolean areKerberosCredentialsValid( UserGroupInformation ugi, boolean useTicketCache) { Preconditions.checkState(isKerberosSecurityEnabled(ugi)); // note: UGI::hasKerberosCredentials inaccurately reports false // for logins based on a keytab (fixed in Hadoop 2.6.1, see HADOOP-10786), // so we check only in ticket cache scenario. if (useTicketCache && !ugi.hasKerberosCredentials()) { if (hasHDFSDelegationToken(ugi)) { LOG.warn( "Hadoop security is enabled but current login user does not have Kerberos credentials, " + "use delegation token instead. Flink application will terminate after token expires."); return true; } else { LOG.error( "Hadoop security is enabled, but current login user has neither Kerberos credentials " + "nor delegation tokens!"); return false; } } return true; }
@Test public void testShouldReturnTrueWhenDelegationTokenIsPresent() { UserGroupInformation.setConfiguration( getHadoopConfigWithAuthMethod(AuthenticationMethod.KERBEROS)); UserGroupInformation userWithoutCredentialsButHavingToken = createTestUser(AuthenticationMethod.KERBEROS); userWithoutCredentialsButHavingToken.addToken(getHDFSDelegationToken()); assumeFalse(userWithoutCredentialsButHavingToken.hasKerberosCredentials()); boolean result = HadoopUtils.areKerberosCredentialsValid(userWithoutCredentialsButHavingToken, true); assertTrue(result); }
@VisibleForTesting String getTargetNodePartition() { return targetNodePartition; }
@Test public void testSchedulingRequestValidation() { // Valid assertValidSchedulingRequest(SchedulingRequest.newBuilder().executionType( ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED)) .allocationRequestId(10L).priority(Priority.newInstance(1)) .placementConstraintExpression(PlacementConstraints .targetNotIn(PlacementConstraints.NODE, PlacementConstraints.PlacementTargets .allocationTag("mapper", "reducer"), PlacementConstraints.PlacementTargets.nodePartition("")) .build()).resourceSizing( ResourceSizing.newInstance(1, Resource.newInstance(1024, 1))) .build()); Assert.assertEquals("", allocator.getTargetNodePartition()); // Valid (with partition) assertValidSchedulingRequest(SchedulingRequest.newBuilder().executionType( ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED)) .allocationRequestId(10L).priority(Priority.newInstance(1)) .placementConstraintExpression(PlacementConstraints .targetNotIn(PlacementConstraints.NODE, PlacementConstraints.PlacementTargets .allocationTag("mapper", "reducer"), PlacementConstraints.PlacementTargets.nodePartition("x")) .build()).resourceSizing( ResourceSizing.newInstance(1, Resource.newInstance(1024, 1))) .build()); Assert.assertEquals("x", allocator.getTargetNodePartition()); // Valid (without specifying node partition) assertValidSchedulingRequest(SchedulingRequest.newBuilder().executionType( ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED)) .allocationRequestId(10L).priority(Priority.newInstance(1)) .placementConstraintExpression(PlacementConstraints .targetNotIn(PlacementConstraints.NODE, PlacementConstraints.PlacementTargets .allocationTag("mapper", "reducer")).build()) .resourceSizing( ResourceSizing.newInstance(1, Resource.newInstance(1024, 1))) .build()); // Node partition is unspecified, use the default node label expression y Assert.assertEquals("y", allocator.getTargetNodePartition()); // Valid (with application Id target) assertValidSchedulingRequest(SchedulingRequest.newBuilder().executionType( ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED)) .allocationRequestId(10L).priority(Priority.newInstance(1)) .placementConstraintExpression(PlacementConstraints .targetNotIn(PlacementConstraints.NODE, PlacementConstraints.PlacementTargets .allocationTag("mapper", "reducer")).build()) .resourceSizing( ResourceSizing.newInstance(1, Resource.newInstance(1024, 1))) .build()); // Allocation tags should not include application Id Assert.assertEquals("y", allocator.getTargetNodePartition()); // Invalid (without sizing) assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType( ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED)) .allocationRequestId(10L).priority(Priority.newInstance(1)) .placementConstraintExpression(PlacementConstraints .targetNotIn(PlacementConstraints.NODE, PlacementConstraints.PlacementTargets .allocationTag("mapper", "reducer")).build()) .build(), true); // Invalid (without target tags) assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType( ExecutionTypeRequest.newInstance(ExecutionType.GUARANTEED)) .allocationRequestId(10L).priority(Priority.newInstance(1)) .placementConstraintExpression(PlacementConstraints .targetNotIn(PlacementConstraints.NODE).build()) .build(), true); // Invalid (not GUARANTEED) assertInvalidSchedulingRequest(SchedulingRequest.newBuilder().executionType( ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC)) .allocationRequestId(10L).priority(Priority.newInstance(1)) .placementConstraintExpression(PlacementConstraints .targetNotIn(PlacementConstraints.NODE, PlacementConstraints.PlacementTargets .allocationTag("mapper", "reducer"), PlacementConstraints.PlacementTargets.nodePartition("")) .build()).resourceSizing( ResourceSizing.newInstance(1, Resource.newInstance(1024, 1))) .build(), true); }
public abstract List<DataType> getChildren();
@Test void testArrayInternalElementConversion() { assertThat(ARRAY(STRING()).bridgedTo(ArrayData.class)) .getChildren() .containsExactly(STRING().bridgedTo(StringData.class)); }
@Override public Set<OpenstackNode> nodes() { return osNodeStore.nodes(); }
@Test public void testGetNodesByType() { assertEquals(ERR_SIZE, 2, target.nodes(COMPUTE).size()); assertTrue(ERR_NOT_FOUND, target.nodes(COMPUTE).contains(COMPUTE_2)); assertTrue(ERR_NOT_FOUND, target.nodes(COMPUTE).contains(COMPUTE_3)); assertEquals(ERR_SIZE, 1, target.nodes(GATEWAY).size()); assertTrue(ERR_NOT_FOUND, target.nodes(GATEWAY).contains(GATEWAY_1)); }
@Override public void addMetric(final List<String> labelValues, final double value) { gaugeMetricFamily.addMetric(labelValues, value); }
@Test void assertCreate() throws ReflectiveOperationException { PrometheusMetricsGaugeMetricFamilyCollector collector = new PrometheusMetricsGaugeMetricFamilyCollector(new MetricConfiguration("foo_gauge_metric_family", MetricCollectorType.GAUGE_METRIC_FAMILY, "foo_help", Collections.emptyList(), Collections.emptyMap())); collector.addMetric(Collections.emptyList(), 1D); assertThat(Plugins.getMemberAccessor().get(PrometheusMetricsGaugeMetricFamilyCollector.class.getDeclaredField("gaugeMetricFamily"), collector), instanceOf(GaugeMetricFamily.class)); }
@Override public void add(long item, long count) { if (count < 0) { // Negative values are not implemented in the regular version, and do not // play nicely with this algorithm anyway throw new IllegalArgumentException("Negative increments not implemented"); } int[] buckets = new int[depth]; for (int i = 0; i < depth; ++i) { buckets[i] = hash(item, i); } long min = table[0][buckets[0]]; for (int i = 1; i < depth; ++i) { min = Math.min(min, table[i][buckets[i]]); } for (int i = 0; i < depth; ++i) { long newVal = Math.max(table[i][buckets[i]], min + count); table[i][buckets[i]] = newVal; } size += count; }
@Test public void testAccuracy() { int seed = 7364181; Random r = new Random(seed); int numItems = 10000000; int maxScale = 15000; double epsOfTotalCount = 0.00075; double errorRange = epsOfTotalCount; double confidence = 0.99; int[] actualFreq = new int[maxScale]; IFrequency sketch = new ConservativeAddSketch(epsOfTotalCount, confidence, seed); IFrequency baseSketch = new CountMinSketch(epsOfTotalCount, confidence, seed); for (int i = 0; i < numItems; i++) { int x = r.nextInt(maxScale); sketch.add(x, 1); baseSketch.add(x, 1); actualFreq[x]++; } int numErrors = 0; int usedNumbers = 0; int betterNumbers = 0; long totalDelta = 0; int okayError = (int) (numItems * errorRange) + 1; long totalError = 0; for (int i = 0; i < actualFreq.length; i++) { if (actualFreq[i] > 0) { usedNumbers++; } else { continue; } long error = sketch.estimateCount(i) - actualFreq[i]; totalError += error; if (error > okayError) { numErrors++; } long delta = baseSketch.estimateCount(i) - sketch.estimateCount(i); if (delta > 0) { totalDelta += delta; betterNumbers++; } } double pCorrect = 1 - 1.0 * numErrors / usedNumbers; System.out.println("Confidence : " + pCorrect + " Errors : " + numErrors + " Error margin : " + okayError); System.out.println("Total error : " + totalError + " Average error : " + totalError / usedNumbers); System.out.println("Beat base for : " + 100 * betterNumbers / usedNumbers + " percent of values" + " with a total delta of " + totalDelta); assertTrue("Confidence not reached: required " + confidence + ", reached " + pCorrect, pCorrect > confidence); }
List<MethodSpec> buildEventFunctions( AbiDefinition functionDefinition, TypeSpec.Builder classBuilder) throws ClassNotFoundException { String functionName = functionDefinition.getName(); List<AbiDefinition.NamedType> inputs = functionDefinition.getInputs(); String responseClassName = Strings.capitaliseFirstLetter(functionName) + "EventResponse"; List<NamedTypeName> parameters = new ArrayList<>(); List<NamedTypeName> indexedParameters = new ArrayList<>(); List<NamedTypeName> nonIndexedParameters = new ArrayList<>(); for (AbiDefinition.NamedType namedType : inputs) { final TypeName typeName; if (namedType.getType().equals("tuple")) { typeName = structClassNameMap.get(namedType.structIdentifier()); } else if (namedType.getType().startsWith("tuple") && namedType.getType().contains("[")) { typeName = buildStructArrayTypeName(namedType, false); } else { typeName = buildTypeName(namedType.getType(), useJavaPrimitiveTypes); } NamedTypeName parameter = new NamedTypeName(namedType, typeName); if (namedType.isIndexed()) { indexedParameters.add(parameter); } else { nonIndexedParameters.add(parameter); } parameters.add(parameter); } classBuilder.addField(createEventDefinition(functionName, parameters)); classBuilder.addType( buildEventResponseObject( responseClassName, indexedParameters, nonIndexedParameters)); List<MethodSpec> methods = new ArrayList<>(); methods.add( buildEventTransactionReceiptFunction( responseClassName, functionName, indexedParameters, nonIndexedParameters)); methods.add( buildEventLogFunction( responseClassName, functionName, indexedParameters, nonIndexedParameters)); methods.add( buildEventFlowableFunction( responseClassName, functionName, indexedParameters, nonIndexedParameters)); methods.add(buildDefaultEventFlowableFunction(responseClassName, functionName)); return methods; }
@Test public void testBuildEventWithNativeList() throws Exception { NamedType array = new NamedType("array", "uint256[]"); AbiDefinition functionDefinition = new AbiDefinition( false, Arrays.asList(array), "Transfer", new ArrayList<>(), "event", false); TypeSpec.Builder builder = TypeSpec.classBuilder("testClass"); builder.addMethods( solidityFunctionWrapper.buildEventFunctions(functionDefinition, builder)); String expected = "class testClass {\n" + " public static final org.web3j.abi.datatypes.Event TRANSFER_EVENT = new org.web3j.abi.datatypes.Event(\"Transfer\", \n" + " java.util.Arrays.<org.web3j.abi.TypeReference<?>>asList(new org.web3j.abi.TypeReference<org.web3j.abi.datatypes.DynamicArray<org.web3j.abi.datatypes.generated.Uint256>>() {}));\n ;\n\n" + " public static java.util.List<TransferEventResponse> getTransferEvents(\n" + " org.web3j.protocol.core.methods.response.TransactionReceipt transactionReceipt) {\n" + " java.util.List<org.web3j.tx.Contract.EventValuesWithLog> valueList = staticExtractEventParametersWithLog(TRANSFER_EVENT, transactionReceipt);\n" + " java.util.ArrayList<TransferEventResponse> responses = new java.util.ArrayList<TransferEventResponse>(valueList.size());\n" + " for (org.web3j.tx.Contract.EventValuesWithLog eventValues : valueList) {\n" + " TransferEventResponse typedResponse = new TransferEventResponse();\n" + " typedResponse.log = eventValues.getLog();\n" + " typedResponse.array = (java.util.List<java.math.BigInteger>) ((org.web3j.abi.datatypes.Array) eventValues.getNonIndexedValues().get(0)).getNativeValueCopy();\n" + " responses.add(typedResponse);\n" + " }\n" + " return responses;\n" + " }\n" + "\n" + " public static TransferEventResponse getTransferEventFromLog(\n" + " org.web3j.protocol.core.methods.response.Log log) {\n" + " org.web3j.tx.Contract.EventValuesWithLog eventValues = staticExtractEventParametersWithLog(TRANSFER_EVENT, log);\n" + " TransferEventResponse typedResponse = new TransferEventResponse();\n" + " typedResponse.log = log;\n" + " typedResponse.array = (java.util.List<java.math.BigInteger>) ((org.web3j.abi.datatypes.Array) eventValues.getNonIndexedValues().get(0)).getNativeValueCopy();\n" + " return typedResponse;\n" + " }\n" + "\n" + " public io.reactivex.Flowable<TransferEventResponse> transferEventFlowable(\n" + " org.web3j.protocol.core.methods.request.EthFilter filter) {\n" + " return web3j.ethLogFlowable(filter).map(log -> getTransferEventFromLog(log));\n" + " }\n" + "\n" + " public io.reactivex.Flowable<TransferEventResponse> transferEventFlowable(\n" + " org.web3j.protocol.core.DefaultBlockParameter startBlock,\n" + " org.web3j.protocol.core.DefaultBlockParameter endBlock) {\n" + " org.web3j.protocol.core.methods.request.EthFilter filter = new org.web3j.protocol.core.methods.request.EthFilter(startBlock, endBlock, getContractAddress());\n" + " filter.addSingleTopic(org.web3j.abi.EventEncoder.encode(TRANSFER_EVENT));\n" + " return transferEventFlowable(filter);\n" + " }\n" + "\n" + " public static class TransferEventResponse extends org.web3j.protocol.core.methods.response.BaseEventResponse {\n" + " public java.util.List<java.math.BigInteger> array;\n" + " }\n" + "}\n"; assertEquals((expected), builder.build().toString()); }
@Override public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) { return delegated.create(sqlStatementContext); }
@Test void assertCreateOtherExecutor() { OpenGaussAdminExecutorCreator creator = new OpenGaussAdminExecutorCreator(); SQLStatementContext sqlStatementContext = mock(SQLStatementContext.class, withSettings().extraInterfaces(TableAvailable.class).defaultAnswer(RETURNS_DEEP_STUBS)); when(((TableAvailable) sqlStatementContext).getTablesContext().getTableNames()).thenReturn(Collections.emptyList()); assertThat(creator.create(sqlStatementContext), is(Optional.empty())); assertThat(creator.create(sqlStatementContext, "", "", Collections.emptyList()), is(Optional.empty())); }
public long getIntervalInMs() { return intervalInMs; }
@Test void testGetIntervalInMs() { assertEquals(intervalInMs, window.getIntervalInMs()); }
public Properties apply(final Properties properties) { if (properties == null) { throw new IllegalArgumentException("properties must not be null"); } else { if (properties.isEmpty()) { return new Properties(); } else { final Properties filtered = new Properties(); for (Map.Entry<Object, Object> entry : properties.entrySet()) { final Object key = entry.getKey(); final Object value = entry.getValue(); if (!keysToRemove.contains(key)) { filtered.put(key, value); } } return filtered; } } }
@Test public void emptyInputResultsInEmptyOutput() { // Given Properties emptyProperties = new Properties(); Filter f = new Filter(); // When Properties filtered = f.apply(emptyProperties); // Then assertEquals(0, filtered.size()); }
@Override public List<String> getGroups(String userName) throws IOException { return new ArrayList(getUnixGroups(userName)); }
@Test public void testGetGroupsNonexistentUser() throws Exception { TestGroupUserNotExist mapping = new TestGroupUserNotExist(); List<String> groups = mapping.getGroups("foobarusernotexist"); assertTrue(groups.isEmpty()); }
public void setAction(String action) { this.action = action; }
@Test void testSetAction() { Permission permission = new Permission(); permission.setAction(ActionTypes.READ.toString()); assertEquals(ActionTypes.READ.toString(), permission.getAction()); }
@VisibleForTesting static void computeAndSetAggregatedInstanceStatus( WorkflowInstance currentInstance, WorkflowInstanceAggregatedInfo aggregated) { if (!currentInstance.getStatus().isTerminal() || currentInstance.isFreshRun() || !currentInstance.getStatus().equals(WorkflowInstance.Status.SUCCEEDED)) { aggregated.setWorkflowInstanceStatus(currentInstance.getStatus()); return; } boolean succeeded = true; for (StepAggregatedView stepAggregated : aggregated.getStepAggregatedViews().values()) { WorkflowInstance.Status workflowStatus = STEP_INSTANCE_STATUS_TO_WORKFLOW_INSTANCE_STATUS.get(stepAggregated.getStatus()); switch (workflowStatus) { case FAILED: // if any step is failed overall status is failed aggregated.setWorkflowInstanceStatus(WorkflowInstance.Status.FAILED); return; case TIMED_OUT: // if any are timed out, we want to keep going to search for failed steps aggregated.setWorkflowInstanceStatus(WorkflowInstance.Status.TIMED_OUT); break; case STOPPED: // prioritize timed out status before stopped if (aggregated.getWorkflowInstanceStatus() != WorkflowInstance.Status.TIMED_OUT) { aggregated.setWorkflowInstanceStatus(WorkflowInstance.Status.STOPPED); } break; case SUCCEEDED: break; case CREATED: // there are steps in NOT_CREATED STATUS and the workflow instance cannot be SUCCEEDED. succeeded = false; break; default: // should never reach here with IN_PROGRESS status; throw new MaestroInternalError( "State %s is not expected during aggregated status computation for" + " workflow_id = %s ; workflow_instance_id = %s ; workflow_run_id = %s", workflowStatus, currentInstance.getWorkflowId(), currentInstance.getWorkflowInstanceId(), currentInstance.getWorkflowRunId()); } } if (aggregated.getWorkflowInstanceStatus() == null) { if (succeeded) { aggregated.setWorkflowInstanceStatus(WorkflowInstance.Status.SUCCEEDED); } else { aggregated.setWorkflowInstanceStatus( currentInstance.getAggregatedInfo().getWorkflowInstanceStatus()); } } }
@Test public void testStatusAggregation() { WorkflowInstance run2 = getGenericWorkflowInstance( 2, WorkflowInstance.Status.SUCCEEDED, RunPolicy.START_CUSTOMIZED_RUN, RestartPolicy.RESTART_FROM_INCOMPLETE); WorkflowInstance run3 = getGenericWorkflowInstance( 3, WorkflowInstance.Status.IN_PROGRESS, RunPolicy.START_CUSTOMIZED_RUN, RestartPolicy.RESTART_FROM_INCOMPLETE); WorkflowInstance run4 = getGenericWorkflowInstance( 4, WorkflowInstance.Status.STOPPED, RunPolicy.START_CUSTOMIZED_RUN, RestartPolicy.RESTART_FROM_INCOMPLETE); WorkflowInstance run5 = getGenericWorkflowInstance( 5, WorkflowInstance.Status.FAILED, RunPolicy.START_CUSTOMIZED_RUN, RestartPolicy.RESTART_FROM_INCOMPLETE); WorkflowInstanceAggregatedInfo aggregated = new WorkflowInstanceAggregatedInfo(); aggregated .getStepAggregatedViews() .put("step1", generateStepAggregated(StepInstance.Status.SUCCEEDED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step2", generateStepAggregated(StepInstance.Status.SUCCEEDED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step3", generateStepAggregated(StepInstance.Status.SUCCEEDED, 11L, 12L)); AggregatedViewHelper.computeAndSetAggregatedInstanceStatus(run2, aggregated); assertEquals(WorkflowInstance.Status.SUCCEEDED, aggregated.getWorkflowInstanceStatus()); aggregated = new WorkflowInstanceAggregatedInfo(); aggregated .getStepAggregatedViews() .put("step1", generateStepAggregated(StepInstance.Status.SUCCEEDED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step2", generateStepAggregated(StepInstance.Status.FATALLY_FAILED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step3", generateStepAggregated(StepInstance.Status.SUCCEEDED, 11L, 12L)); AggregatedViewHelper.computeAndSetAggregatedInstanceStatus(run5, aggregated); assertEquals(WorkflowInstance.Status.FAILED, aggregated.getWorkflowInstanceStatus()); aggregated = new WorkflowInstanceAggregatedInfo(); aggregated .getStepAggregatedViews() .put("step1", generateStepAggregated(StepInstance.Status.NOT_CREATED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step2", generateStepAggregated(StepInstance.Status.NOT_CREATED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step3", generateStepAggregated(StepInstance.Status.NOT_CREATED, 11L, 12L)); AggregatedViewHelper.computeAndSetAggregatedInstanceStatus(run3, aggregated); assertEquals(WorkflowInstance.Status.IN_PROGRESS, aggregated.getWorkflowInstanceStatus()); aggregated = new WorkflowInstanceAggregatedInfo(); aggregated .getStepAggregatedViews() .put("step1", generateStepAggregated(StepInstance.Status.SUCCEEDED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step2", generateStepAggregated(StepInstance.Status.NOT_CREATED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step3", generateStepAggregated(StepInstance.Status.NOT_CREATED, 11L, 12L)); AggregatedViewHelper.computeAndSetAggregatedInstanceStatus(run3, aggregated); assertEquals(WorkflowInstance.Status.IN_PROGRESS, aggregated.getWorkflowInstanceStatus()); aggregated = new WorkflowInstanceAggregatedInfo(); aggregated .getStepAggregatedViews() .put("step1", generateStepAggregated(StepInstance.Status.INTERNALLY_FAILED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step2", generateStepAggregated(StepInstance.Status.NOT_CREATED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step3", generateStepAggregated(StepInstance.Status.NOT_CREATED, 11L, 12L)); AggregatedViewHelper.computeAndSetAggregatedInstanceStatus(run5, aggregated); assertEquals(WorkflowInstance.Status.FAILED, aggregated.getWorkflowInstanceStatus()); aggregated = new WorkflowInstanceAggregatedInfo(); aggregated .getStepAggregatedViews() .put("step1", generateStepAggregated(StepInstance.Status.INTERNALLY_FAILED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step2", generateStepAggregated(StepInstance.Status.STOPPED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step3", generateStepAggregated(StepInstance.Status.STOPPED, 11L, 12L)); AggregatedViewHelper.computeAndSetAggregatedInstanceStatus(run5, aggregated); assertEquals(WorkflowInstance.Status.FAILED, aggregated.getWorkflowInstanceStatus()); aggregated = new WorkflowInstanceAggregatedInfo(); aggregated .getStepAggregatedViews() .put("step1", generateStepAggregated(StepInstance.Status.SUCCEEDED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step2", generateStepAggregated(StepInstance.Status.STARTING, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step3", generateStepAggregated(StepInstance.Status.STOPPED, 11L, 12L)); AggregatedViewHelper.computeAndSetAggregatedInstanceStatus(run3, aggregated); assertEquals(WorkflowInstance.Status.IN_PROGRESS, aggregated.getWorkflowInstanceStatus()); aggregated = new WorkflowInstanceAggregatedInfo(); aggregated .getStepAggregatedViews() .put("step1", generateStepAggregated(StepInstance.Status.STOPPED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step2", generateStepAggregated(StepInstance.Status.NOT_CREATED, 11L, 12L)); aggregated .getStepAggregatedViews() .put("step3", generateStepAggregated(StepInstance.Status.NOT_CREATED, 11L, 12L)); AggregatedViewHelper.computeAndSetAggregatedInstanceStatus(run4, aggregated); assertEquals(WorkflowInstance.Status.STOPPED, aggregated.getWorkflowInstanceStatus()); }
@Operation(summary = "Gets the status of ongoing database migrations, if any", description = "Return the detailed status of ongoing database migrations" + " including starting date. If no migration is ongoing or needed it is still possible to call this endpoint and receive appropriate information.") @GetMapping public DatabaseMigrationsResponse getStatus() { Optional<Long> currentVersion = databaseVersion.getVersion(); checkState(currentVersion.isPresent(), NO_CONNECTION_TO_DB); DatabaseVersion.Status status = databaseVersion.getStatus(); if (status == DatabaseVersion.Status.UP_TO_DATE || status == DatabaseVersion.Status.REQUIRES_DOWNGRADE) { return new DatabaseMigrationsResponse(databaseMigrationState); } else if (!database.getDialect().supportsMigration()) { return new DatabaseMigrationsResponse(DatabaseMigrationState.Status.STATUS_NOT_SUPPORTED); } else { return switch (databaseMigrationState.getStatus()) { case RUNNING, FAILED, SUCCEEDED -> new DatabaseMigrationsResponse(databaseMigrationState); case NONE -> new DatabaseMigrationsResponse(DatabaseMigrationState.Status.MIGRATION_REQUIRED); default -> throw new IllegalArgumentException(UNSUPPORTED_DATABASE_MIGRATION_STATUS); }; } }
@Test void getStatus_whenMigrationFailedWithError_IncludeErrorInResponse() throws Exception { when(databaseVersion.getStatus()).thenReturn(DatabaseVersion.Status.FRESH_INSTALL); when(dialect.supportsMigration()).thenReturn(true); when(migrationState.getStatus()).thenReturn(FAILED); when(migrationState.getStartedAt()).thenReturn(Optional.of(SOME_DATE)); when(migrationState.getError()).thenReturn(Optional.of(new UnsupportedOperationException("error message"))); mockMvc.perform(get(DATABASE_MIGRATIONS_ENDPOINT)).andExpectAll(status().isOk(), content().json("{\"status\":\"MIGRATION_FAILED\",\"message\":\"error message\"}")); }
public Negation setOperand(Predicate operand) { Objects.requireNonNull(operand, "operand"); this.operand = operand; return this; }
@Test void requireThatEqualsIsImplemented() { Negation lhs = new Negation(SimplePredicates.newString("foo")); assertEquals(lhs, lhs); assertNotEquals(lhs, new Object()); Negation rhs = new Negation(SimplePredicates.newString("bar")); assertNotEquals(lhs, rhs); rhs.setOperand(SimplePredicates.newString("foo")); assertEquals(lhs, rhs); }
@Udf public String uuid() { return java.util.UUID.randomUUID().toString(); }
@Test public void nullValueShouldReturnNullValue() { final String uuid = udf.uuid(null); assertThat(uuid, is(nullValue())); }
public static void cut(File srcImgFile, File destImgFile, Rectangle rectangle) { BufferedImage image = null; try { image = read(srcImgFile); cut(image, destImgFile, rectangle); } finally { flush(image); } }
@Test @Disabled public void cutTest() { ImgUtil.cut(FileUtil.file("d:/test/hutool.png"), FileUtil.file("d:/test/result.png"), new Rectangle(0, 0, 400, 240)); }
@VisibleForTesting public Map<String, String> getOptionsMap() { return mMountOptions.stream() .collect(ImmutableMap.toImmutableMap( Pair::getFirst, Pair::getSecond, // merge function: use value of the last occurrence if the key occurs multiple times (oldValue, newValue) -> newValue )); }
@Test public void parseKeyOnly() throws Exception { MountCliOptions mountOptions = new MountCliOptions(); JCommander jCommander = JCommander.newBuilder() .addObject(mountOptions) .build(); jCommander.parse("-o", "key1", "-o", "key2"); Map<String, String> optionsMap = mountOptions.getOptionsMap(); assertEquals(Optional.of(""), Optional.ofNullable(optionsMap.get("key1"))); assertEquals(Optional.of(""), Optional.ofNullable(optionsMap.get("key2"))); }
@Override public boolean tick() { return false; }
@Test public void test_tick() { NopScheduler scheduler = new NopScheduler(); assertFalse(scheduler.tick()); }
@Description("Inverse of Cauchy cdf for a given probability, median, and scale (gamma)") @ScalarFunction @SqlType(StandardTypes.DOUBLE) public static double inverseCauchyCdf( @SqlType(StandardTypes.DOUBLE) double median, @SqlType(StandardTypes.DOUBLE) double scale, @SqlType(StandardTypes.DOUBLE) double p) { checkCondition(p >= 0 && p <= 1, INVALID_FUNCTION_ARGUMENT, "inverseCauchyCdf Function: p must be in the interval [0, 1]"); checkCondition(scale > 0, INVALID_FUNCTION_ARGUMENT, "inverseCauchyCdf Function: scale must be greater than 0"); CauchyDistribution distribution = new CauchyDistribution(null, median, scale, CauchyDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); return distribution.inverseCumulativeProbability(p); }
@Test public void testInverseCauchyCdf() { assertFunction("inverse_cauchy_cdf(0.0, 1.0, 0.5)", DOUBLE, 0.0); assertFunction("inverse_cauchy_cdf(5.0, 2.0, 0.25)", DOUBLE, 3.0); assertFunction("round(inverse_cauchy_cdf(2.5, 1.0, 0.65), 2)", DOUBLE, 3.01); assertFunction("round(inverse_cauchy_cdf(5.0, 1.0, 0.15), 2)", DOUBLE, 3.04); assertInvalidFunction("inverse_cauchy_cdf(0.0, -1.0, 0.0)", "inverseCauchyCdf Function: scale must be greater than 0"); }
CompletableFuture<CreatePayPalOneTimePaymentMutation.CreatePayPalOneTimePayment> createPayPalOneTimePayment( final BigDecimal amount, final String currency, final String returnUrl, final String cancelUrl, final String locale) { final CreatePayPalOneTimePaymentInput input = buildCreatePayPalOneTimePaymentInput(amount, currency, returnUrl, cancelUrl, locale); final CreatePayPalOneTimePaymentMutation mutation = new CreatePayPalOneTimePaymentMutation(input); final HttpRequest request = buildRequest(mutation); return httpClient.sendAsync(request, HttpResponse.BodyHandlers.ofString()) .thenApply(httpResponse -> { // IntelliJ users: type parameters error “no instance of type variable exists so that Data conforms to Data” // is not accurate; this might be fixed in Kotlin 1.8: https://youtrack.jetbrains.com/issue/KTIJ-21905/ final CreatePayPalOneTimePaymentMutation.Data data = assertSuccessAndExtractData(httpResponse, mutation); return data.createPayPalOneTimePayment; }); }
@Test void createPayPalOneTimePaymentGraphQlError() { final HttpResponse<Object> response = mock(HttpResponse.class); when(httpClient.sendAsync(any(), any())) .thenReturn(CompletableFuture.completedFuture(response)); when(response.body()) .thenReturn(createErrorResponse("createPayPalOneTimePayment", "12345")); when(response.statusCode()) .thenReturn(200); final CompletableFuture<CreatePayPalOneTimePaymentMutation.CreatePayPalOneTimePayment> future = braintreeGraphqlClient.createPayPalOneTimePayment( BigDecimal.ONE, CURRENCY, RETURN_URL, CANCEL_URL, LOCALE); assertTimeoutPreemptively(Duration.ofSeconds(3), () -> { final ExecutionException e = assertThrows(ExecutionException.class, future::get); assertTrue(e.getCause() instanceof ServiceUnavailableException); }); }
public NewIssuesNotification newNewIssuesNotification(Map<String, UserDto> assigneesByUuid) { verifyAssigneesByUuid(assigneesByUuid); return new NewIssuesNotification(new DetailsSupplierImpl(assigneesByUuid)); }
@Test public void newNewIssuesNotification_DetailsSupplier_getUserNameByUuid_returns_empty_if_user_has_null_name() { UserDto user = UserTesting.newUserDto().setLogin("user_noname").setName(null); NewIssuesNotification underTest = this.underTest.newNewIssuesNotification(ImmutableMap.of(user.getUuid(), user)); DetailsSupplier detailsSupplier = readDetailsSupplier(underTest); assertThat(detailsSupplier.getUserNameByUuid(user.getUuid())).isEmpty(); }
@Override public void updateGroup(MemberGroupUpdateReqVO updateReqVO) { // 校验存在 validateGroupExists(updateReqVO.getId()); // 更新 MemberGroupDO updateObj = MemberGroupConvert.INSTANCE.convert(updateReqVO); memberGroupMapper.updateById(updateObj); }
@Test public void testUpdateGroup_success() { // mock 数据 MemberGroupDO dbGroup = randomPojo(MemberGroupDO.class); groupMapper.insert(dbGroup);// @Sql: 先插入出一条存在的数据 // 准备参数 MemberGroupUpdateReqVO reqVO = randomPojo(MemberGroupUpdateReqVO.class, o -> { o.setId(dbGroup.getId()); // 设置更新的 ID o.setStatus(randomCommonStatus()); }); // 调用 groupService.updateGroup(reqVO); // 校验是否更新正确 MemberGroupDO group = groupMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, group); }
@Override public boolean isEnabled() { return settings.isEnabled(); }
@Test public void is_enabled() { enableBitbucketAuthentication(true); assertThat(underTest.isEnabled()).isTrue(); settings.setProperty("sonar.auth.bitbucket.enabled", false); assertThat(underTest.isEnabled()).isFalse(); }
static List<Integer> getTargetTpcPorts(List<Integer> tpcPorts, ClientTpcConfig tpcConfig) { List<Integer> targetTpcPorts; int tpcConnectionCount = tpcConfig.getConnectionCount(); if (tpcConnectionCount == 0 || tpcConnectionCount >= tpcPorts.size()) { // zero means connect to all. targetTpcPorts = tpcPorts; } else { // we make a copy of the tpc ports because items are removed. List<Integer> tpcPortsCopy = new LinkedList<>(tpcPorts); targetTpcPorts = new ArrayList<>(tpcConnectionCount); ThreadLocalRandom threadLocalRandom = ThreadLocalRandom.current(); for (int k = 0; k < tpcConnectionCount; k++) { int index = threadLocalRandom.nextInt(tpcPortsCopy.size()); targetTpcPorts.add(tpcPortsCopy.remove(index)); } } return targetTpcPorts; }
@Test public void testGetTargetTpcPorts_whenConnectToSubset() { ClientTpcConfig config = new ClientTpcConfig(); config.setConnectionCount(2); List<Integer> tpcPorts = asList(1, 2, 3); List<Integer> result = getTargetTpcPorts(tpcPorts, config); assertEquals(2, result.size()); assertTrue(tpcPorts.containsAll(result)); }
public static boolean fullyDelete(final File dir) { return fullyDelete(dir, false); }
@Test (timeout = 30000) public void testFullyDelete() throws IOException { boolean ret = FileUtil.fullyDelete(del); Assert.assertTrue(ret); Verify.notExists(del); validateTmpDir(); }
@CanDistro @PostMapping @TpsControl(pointName = "NamingInstanceRegister", name = "HttpNamingInstanceRegister") @Secured(action = ActionTypes.WRITE) public Result<String> register(InstanceForm instanceForm) throws NacosException { // check param instanceForm.validate(); checkWeight(instanceForm.getWeight()); // build instance Instance instance = buildInstance(instanceForm); instanceServiceV2.registerInstance(instanceForm.getNamespaceId(), buildCompositeServiceName(instanceForm), instance); NotifyCenter.publishEvent( new RegisterInstanceTraceEvent(System.currentTimeMillis(), NamingRequestUtil.getSourceIp(), false, instanceForm.getNamespaceId(), instanceForm.getGroupName(), instanceForm.getServiceName(), instance.getIp(), instance.getPort())); return Result.success("ok"); }
@Test void registerInstance() throws Exception { InstanceForm instanceForm = new InstanceForm(); instanceForm.setNamespaceId(TEST_NAMESPACE); instanceForm.setGroupName("DEFAULT_GROUP"); instanceForm.setServiceName("test-service"); instanceForm.setIp(TEST_IP); instanceForm.setClusterName(TEST_CLUSTER_NAME); instanceForm.setPort(9999); instanceForm.setHealthy(true); instanceForm.setWeight(1.0); instanceForm.setEnabled(true); instanceForm.setMetadata(TEST_METADATA); instanceForm.setEphemeral(true); Result<String> result = instanceControllerV2.register(instanceForm); verify(instanceServiceV2).registerInstance(eq(TEST_NAMESPACE), eq(TEST_SERVICE_NAME), any()); assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode()); assertEquals("ok", result.getData()); }
public ConsumerBuilder shareConnections(Integer shareConnections) { this.shareconnections = shareConnections; return getThis(); }
@Test void shareConnections() { ConsumerBuilder builder = ConsumerBuilder.newBuilder(); builder.shareConnections(300); Assertions.assertEquals(300, builder.build().getShareconnections()); }
public void addPackageDescr(Resource resource, PackageDescr packageDescr) { if (!getNamespace().equals(packageDescr.getNamespace())) { throw new RuntimeException("Composing PackageDescr (" + packageDescr.getName() + ") in different namespaces (namespace=" + getNamespace() + " packageDescr=" + packageDescr.getNamespace() + ")" ); } internalAdd(resource, packageDescr); }
@Test public void addPackageDescrSamePkgUUID() { String pkgUUID = generateUUID(); PackageDescr toAdd = new PackageDescr(NAMESPACE); toAdd.setPreferredPkgUUID(pkgUUID); compositePackageDescr.addPackageDescr(new ByteArrayResource(), toAdd); assertThat(compositePackageDescr.getPreferredPkgUUID().isPresent()).isTrue(); assertThat(compositePackageDescr.getPreferredPkgUUID().get()).isEqualTo(pkgUUID); toAdd = new PackageDescr(NAMESPACE); toAdd.setPreferredPkgUUID(pkgUUID); compositePackageDescr.addPackageDescr(new ByteArrayResource(), toAdd); assertThat(compositePackageDescr.getPreferredPkgUUID().get()).isEqualTo(pkgUUID); }
@Udf public <T> List<T> remove( @UdfParameter(description = "Array of values") final List<T> array, @UdfParameter(description = "Value to remove") final T victim) { if (array == null) { return null; } return array.stream() .filter(el -> !Objects.equals(el, victim)) .collect(Collectors.toList()); }
@Test public void shouldRemoveIntegers() { final List<Integer> input1 = Arrays.asList(1, 2, 3, 2, 1); final Integer input2 = 2; final List<Integer> result = udf.remove(input1, input2); assertThat(result, contains(1, 3, 1)); }
public static Match match() { return new AutoValue_FileIO_Match.Builder() .setConfiguration(MatchConfiguration.create(EmptyMatchTreatment.DISALLOW)) .build(); }
@Test @Category(NeedsRunner.class) public void testMatchDisallowEmptyNonWildcard() throws IOException { p.apply( FileIO.match() .filepattern(tmpFolder.getRoot().getAbsolutePath() + "/blah") .withEmptyMatchTreatment(EmptyMatchTreatment.ALLOW_IF_WILDCARD)); thrown.expectCause(isA(FileNotFoundException.class)); p.run(); }
@VisibleForTesting Iterable<SnapshotContext> getSnapshots(Iterable<Snapshot> snapshots, SnapshotContext since) { List<SnapshotContext> result = Lists.newArrayList(); boolean foundSince = Objects.isNull(since); for (Snapshot snapshot : snapshots) { if (!foundSince) { if (snapshot.snapshotId() == since.getSnapshotId()) { foundSince = true; } } else { result.add(toSnapshotContext(snapshot)); } } return foundSince ? result : Collections.emptyList(); }
@Test public void testGetSnapshotContextsReturnsAllSnapshotsWhenGivenSnapshotIsNull() { HiveIcebergStorageHandler storageHandler = new HiveIcebergStorageHandler(); Iterable<SnapshotContext> result = storageHandler.getSnapshots(asList(appendSnapshot, deleteSnapshot), null); List<SnapshotContext> resultList = IterableUtils.toList(result); assertThat(resultList.size(), is(2)); assertThat(resultList.get(0).getSnapshotId(), is(appendSnapshot.snapshotId())); assertThat(resultList.get(0).getOperation(), is(SnapshotContext.WriteOperationType.APPEND)); assertThat(resultList.get(0).getAddedRowCount(), is(12L)); assertThat(resultList.get(0).getDeletedRowCount(), is(0L)); assertThat(resultList.get(1).getSnapshotId(), is(deleteSnapshot.snapshotId())); assertThat(resultList.get(1).getOperation(), is(SnapshotContext.WriteOperationType.DELETE)); assertThat(resultList.get(1).getAddedRowCount(), is(0L)); assertThat(resultList.get(1).getDeletedRowCount(), is(3L)); }
public static boolean isRunningWithPostJava8() { String javaVersion = getCurrentRuntimeJavaVersion(); return !javaVersion.startsWith("1."); }
@Test public void verifyPostJava8() { assumeFalse("Test is for Java 9+ only", System.getProperty("java.version").startsWith("1.")); assertTrue("isRunningWithPostJava8() should return true on Java 9 and above", JavaUtils.isRunningWithPostJava8()); }
static boolean parseBoolean(String s) { if (s == null) { return false; } if ("true".equalsIgnoreCase(s)) { return true; } else if ("false".equalsIgnoreCase(s)) { return false; } if ("1".equalsIgnoreCase(s)) { return true; } else if ("0".equalsIgnoreCase(s)) { return false; } if ("yes".equalsIgnoreCase(s)) { return true; } else if ("no".equalsIgnoreCase(s)) { return false; } throw new IllegalArgumentException( String.format("'%s' Was not a valid boolean value. Please use either 'true' or 'false'.", s)); }
@Test void null_is_false() { assertThat(BooleanString.parseBoolean(null), is(false)); }
public static String buildStoreName(Scheme scheme, String name) { return buildStoreNamePrefix(scheme) + "/" + name; }
@Test void buildStoreName() { var storeName = ExtensionStoreUtil.buildStoreName(scheme, "fake-name"); assertEquals("/registry/fake.halo.run/fakes/fake-name", storeName); storeName = ExtensionStoreUtil.buildStoreName(grouplessScheme, "fake-name"); assertEquals("/registry/fakes/fake-name", storeName); }
public static CodeStats getClassStats(Class<?> cls) { try (InputStream stream = cls.getResourceAsStream(ReflectionUtils.getClassNameWithoutPackage(cls) + ".class"); BufferedInputStream bis = new BufferedInputStream(Objects.requireNonNull(stream))) { byte[] bytecodes = new byte[stream.available()]; bis.read(bytecodes); return getClassStats(bytecodes); } catch (IOException e) { throw new RuntimeException(e); } }
@Test public void testGetClassStats() { CompileUnit unit = new CompileUnit( "demo.pkg1", "A", ("" + "package demo.pkg1;\n" + "public class A {\n" + " public static String hello() { return \"HELLO\"; }\n" + "}")); // Since janino is not called frequently, we test only 50 times. byte[] bytecodes = JaninoUtils.toBytecode(Thread.currentThread().getContextClassLoader(), unit) .values() .iterator() .next(); JaninoUtils.CodeStats classStats = JaninoUtils.getClassStats(bytecodes); System.out.println(classStats); Assert.assertTrue(classStats.methodsSize.containsKey("hello")); }
@Override public HashSlotCursor8byteKey cursor() { return new Cursor(); }
@Test public void testCursor_withManyValues() { final int k = 1000; for (int i = 1; i <= k; i++) { long key = (long) i; insert(key); } boolean[] verifiedKeys = new boolean[k]; HashSlotCursor8byteKey cursor = hsa.cursor(); while (cursor.advance()) { long key = cursor.key(); long valueAddress = cursor.valueAddress(); verifyValue(key, valueAddress); verifiedKeys[((int) key) - 1] = true; } for (int i = 0; i < k; i++) { assertTrue("Failed to encounter key " + i, verifiedKeys[i]); } }
@Override protected final CompletableFuture<MetricCollectionResponseBody> handleRequest( @Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway) throws RestHandlerException { metricFetcher.update(); final MetricStore.ComponentMetricStore componentMetricStore = getComponentMetricStore(request, metricFetcher.getMetricStore()); if (componentMetricStore == null || componentMetricStore.metrics == null) { return CompletableFuture.completedFuture( new MetricCollectionResponseBody(Collections.emptyList())); } final Set<String> requestedMetrics = new HashSet<>(request.getQueryParameter(MetricsFilterParameter.class)); if (requestedMetrics.isEmpty()) { return CompletableFuture.completedFuture( new MetricCollectionResponseBody(getAvailableMetrics(componentMetricStore))); } else { final List<Metric> metrics = getRequestedMetrics(componentMetricStore, requestedMetrics); return CompletableFuture.completedFuture(new MetricCollectionResponseBody(metrics)); } }
@Test void testGetMetrics() throws Exception { final CompletableFuture<MetricCollectionResponseBody> completableFuture = testMetricsHandler.handleRequest( HandlerRequest.resolveParametersAndCreate( EmptyRequestBody.getInstance(), new TestMessageParameters(), Collections.emptyMap(), Collections.singletonMap( METRICS_FILTER_QUERY_PARAM, Collections.singletonList(TEST_METRIC_NAME)), Collections.emptyList()), mockDispatcherGateway); assertThat(completableFuture).isDone(); final MetricCollectionResponseBody metricCollectionResponseBody = completableFuture.get(); assertThat(metricCollectionResponseBody.getMetrics()).hasSize(1); final Metric metric = metricCollectionResponseBody.getMetrics().iterator().next(); assertThat(metric.getId()).isEqualTo(TEST_METRIC_NAME); assertThat(metric.getValue()).isEqualTo(Integer.toString(TEST_METRIC_VALUE)); }