focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
private void successResponseTrigger(final Upstream upstream) { upstream.getSucceededElapsed().addAndGet(System.currentTimeMillis() - beginTime); upstream.getSucceeded().incrementAndGet(); }
@Test public void successResponseTriggerTest() throws Exception { dividePlugin = DividePlugin.class.newInstance(); Field field = DividePlugin.class.getDeclaredField("beginTime"); field.setAccessible(true); field.set(dividePlugin, 0L); Method method = DividePlugin.class.getDeclaredMethod("successResponseTrigger", Upstream.class); method.setAccessible(true); Upstream upstream = Upstream.builder() .url("upstream") .build(); method.invoke(dividePlugin, upstream); assertEquals(1, upstream.getSucceeded().get()); }
@Override public synchronized void editSchedule() { updateConfigIfNeeded(); long startTs = clock.getTime(); CSQueue root = scheduler.getRootQueue(); Resource clusterResources = Resources.clone(scheduler.getClusterResource()); containerBasedPreemptOrKill(root, clusterResources); if (LOG.isDebugEnabled()) { LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms."); } }
@Test public void testPerQueueDisablePreemptionInheritParent() { int[][] qData = new int[][] { // / A E // B C D F G H {1000, 500, 200, 200, 100, 500, 200, 200, 100 }, // abs (guar) {1000,1000,1000,1000,1000,1000,1000,1000,1000 }, // maxCap {1000, 700, 0, 350, 350, 300, 0, 200, 100 }, // used { 200, 0, 0, 0, 0, 200, 200, 0, 0 }, // pending { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, // reserved // appA appB appC appD appE { 5, 2, 0, 1, 1, 3, 1, 1, 1 }, // apps { -1, -1, 1, 1, 1, -1, 1, 1, 1 }, // req granulrity { 2, 3, 0, 0, 0, 3, 0, 0, 0 }, // subqueues }; ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); policy.editSchedule(); // With all queues preemptable, resources should be taken from queueC(appA) // and queueD(appB). Resources taken more from queueD(appB) than // queueC(appA) because it's over its capacity by a larger percentage. verify(mDisp, times(17)).handle(argThat(new IsPreemptionRequestFor(appA))); verify(mDisp, times(183)).handle(argThat(new IsPreemptionRequestFor(appB))); // Turn off preemption for queueA and it's children. queueF(appC)'s request // should starve. setup(); // Call setup() to reset mDisp conf.setPreemptionDisabled(QUEUE_A, true); ProportionalCapacityPreemptionPolicy policy2 = buildPolicy(qData); policy2.editSchedule(); verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); // queueC verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB))); // queueD verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appD))); // queueG verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appE))); // queueH }
@Override public HttpResponse handle(HttpRequest request) { log.log(Level.FINE, () -> request.getMethod() + " " + request.getUri().toString()); try { return switch (request.getMethod()) { case POST -> handlePOST(request); case GET -> handleGET(request); case PUT -> handlePUT(request); case DELETE -> handleDELETE(request); default -> createErrorResponse(request.getMethod()); }; } catch (NotFoundException | com.yahoo.vespa.config.server.NotFoundException e) { return HttpErrorResponse.notFoundError(getMessage(e, request)); } catch (ActivationConflictException e) { return HttpErrorResponse.conflictWhenActivating(getMessage(e, request)); } catch (InvalidApplicationException e) { return HttpErrorResponse.invalidApplicationPackage(getMessage(e, request)); } catch (IllegalArgumentException | UnsupportedOperationException e) { return HttpErrorResponse.badRequest(getMessage(e, request)); } catch (NodeAllocationException e) { return e.retryable() ? HttpErrorResponse.nodeAllocationFailure(getMessage(e, request)) : HttpErrorResponse.invalidApplicationPackage(getMessage(e, request)); } catch (InternalServerException e) { return HttpErrorResponse.internalServerError(getMessage(e, request)); } catch (UnknownVespaVersionException e) { return HttpErrorResponse.unknownVespaVersion(getMessage(e, request)); } catch (RequestTimeoutException e) { return HttpErrorResponse.requestTimeout(getMessage(e, request)); } catch (ApplicationLockException e) { return HttpErrorResponse.applicationLockFailure(getMessage(e, request)); } catch (ParentHostUnavailableException e) { return HttpErrorResponse.parentHostNotReady(getMessage(e, request)); } catch (CertificateNotReadyException e) { return HttpErrorResponse.certificateNotReady(getMessage(e, request)); } catch (ConfigNotConvergedException e) { return HttpErrorResponse.configNotConverged(getMessage(e, request)); } catch (LoadBalancerServiceException e) { return HttpErrorResponse.loadBalancerNotReady(getMessage(e, request)); } catch (ReindexingStatusException e) { return HttpErrorResponse.reindexingStatusUnavailable(getMessage(e, request)); } catch (PreconditionFailedException e) { return HttpErrorResponse.preconditionFailed(getMessage(e, request)); } catch (QuotaExceededException e) { return HttpErrorResponse.quotaExceeded(getMessage(e, request)); } catch (Exception e) { log.log(Level.WARNING, "Unexpected exception handling a config server request", e); return HttpErrorResponse.internalServerError(getMessage(e, request)); } }
@Test public void testResponse() throws IOException { final String message = "failed"; HttpHandler httpHandler = new HttpTestHandler(new InvalidApplicationException(message)); HttpResponse response = httpHandler.handle(HttpRequest.createTestRequest("foo", com.yahoo.jdisc.http.HttpRequest.Method.GET)); assertEquals(Response.Status.BAD_REQUEST, response.getStatus()); ByteArrayOutputStream baos = new ByteArrayOutputStream(); response.render(baos); Slime data = SlimeUtils.jsonToSlime(baos.toByteArray()); assertEquals(HttpErrorResponse.ErrorCode.INVALID_APPLICATION_PACKAGE.name(), data.get().field("error-code").asString()); assertEquals(message, data.get().field("message").asString()); }
static double estimatePixelCount(final Image image, final double widthOverHeight) { if (image.getHeight() == HEIGHT_UNKNOWN) { if (image.getWidth() == WIDTH_UNKNOWN) { // images whose size is completely unknown will be in their own subgroups, so // any one of them will do, hence returning the same value for all of them return 0; } else { return image.getWidth() * image.getWidth() / widthOverHeight; } } else if (image.getWidth() == WIDTH_UNKNOWN) { return image.getHeight() * image.getHeight() * widthOverHeight; } else { return image.getHeight() * image.getWidth(); } }
@Test public void testEstimatePixelCountAllKnown() { assertEquals(20000.0, estimatePixelCount(img(100, 200), 1.0), 0.0); assertEquals(20000.0, estimatePixelCount(img(100, 200), 12.0), 0.0); assertEquals( 100.0, estimatePixelCount(img(100, 1), 12.0), 0.0); assertEquals( 100.0, estimatePixelCount(img( 1, 100), 0.5), 0.0); }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<Path>(); // At least one entry successfully parsed boolean success = false; // Call hook for those implementors which need to perform some action upon the list after it has been created // from the server stream, but before any clients see the list parser.preParse(replies); for(String line : replies) { final FTPFile f = parser.parseFTPEntry(line); if(null == f) { continue; } final String name = f.getName(); if(!success) { if(lenient) { // Workaround for #2410. STAT only returns ls of directory itself // Workaround for #2434. STAT of symbolic link directory only lists the directory itself. if(directory.getName().equals(name)) { log.warn(String.format("Skip %s matching parent directory name", f.getName())); continue; } if(name.contains(String.valueOf(Path.DELIMITER))) { if(!name.startsWith(directory.getAbsolute() + Path.DELIMITER)) { // Workaround for #2434. log.warn(String.format("Skip %s with delimiter in name", name)); continue; } } } } success = true; if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", f.getName())); } continue; } final Path parsed = new Path(directory, PathNormalizer.name(name), f.getType() == FTPFile.DIRECTORY_TYPE ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file)); switch(f.getType()) { case FTPFile.SYMBOLIC_LINK_TYPE: parsed.setType(EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Symbolic link target may be an absolute or relative path final String target = f.getLink(); if(StringUtils.isBlank(target)) { log.warn(String.format("Missing symbolic link target for %s", parsed)); final EnumSet<Path.Type> type = parsed.getType(); type.remove(Path.Type.symboliclink); } else if(StringUtils.startsWith(target, String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else if(StringUtils.equals("..", target)) { parsed.setSymlinkTarget(directory); } else if(StringUtils.equals(".", target)) { parsed.setSymlinkTarget(parsed); } else { parsed.setSymlinkTarget(new Path(directory, target, EnumSet.of(Path.Type.file))); } break; } if(parsed.isFile()) { parsed.attributes().setSize(f.getSize()); } parsed.attributes().setOwner(f.getUser()); parsed.attributes().setGroup(f.getGroup()); Permission.Action u = Permission.Action.none; if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.READ_PERMISSION)) { u = u.or(Permission.Action.read); } if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.WRITE_PERMISSION)) { u = u.or(Permission.Action.write); } if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.EXECUTE_PERMISSION)) { u = u.or(Permission.Action.execute); } Permission.Action g = Permission.Action.none; if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.READ_PERMISSION)) { g = g.or(Permission.Action.read); } if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.WRITE_PERMISSION)) { g = g.or(Permission.Action.write); } if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.EXECUTE_PERMISSION)) { g = g.or(Permission.Action.execute); } Permission.Action o = Permission.Action.none; if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.READ_PERMISSION)) { o = o.or(Permission.Action.read); } if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.WRITE_PERMISSION)) { o = o.or(Permission.Action.write); } if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.EXECUTE_PERMISSION)) { o = o.or(Permission.Action.execute); } final Permission permission = new Permission(u, g, o); if(f instanceof FTPExtendedFile) { permission.setSetuid(((FTPExtendedFile) f).isSetuid()); permission.setSetgid(((FTPExtendedFile) f).isSetgid()); permission.setSticky(((FTPExtendedFile) f).isSticky()); } if(!Permission.EMPTY.equals(permission)) { parsed.attributes().setPermission(permission); } final Calendar timestamp = f.getTimestamp(); if(timestamp != null) { parsed.attributes().setModificationDate(timestamp.getTimeInMillis()); } children.add(parsed); } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test public void testParseMD1766() throws Exception { final List<String> lines = new FTPStatListService(null, null).parse( 211, new String[]{ "lrwxrwxrwx 1 sss 7 Nov 2 2015 bin", "lrwxrwxrwx 1 sss 6 Nov 2 2015 home1", "lrwxrwxrwx 1 sss 15 Nov 2 2015 vvvdev" // "lrwxrwxrwx 1 USER SSS 7 Nov 02 2015 bin -> script/", // "lrwxrwxrwx 1 USER SSS 6 Nov 02 2015 home1 -> /home1", // "lrwxrwxrwx 1 USER SSS 15 Nov 02 2015 vvvdev -> /fff/dev/vvvdev" }); final AttributedList<Path> list = new FTPListResponseReader(new FTPParserSelector().getParser("UNIX Type: L8 Version: BSD-44"), true) .read(new Path("/", EnumSet.of(Path.Type.directory)), lines); assertEquals(3, list.size()); assertNull(list.get(0).getSymlinkTarget()); assertFalse(list.get(0).isSymbolicLink()); assertNull(list.get(1).getSymlinkTarget()); assertFalse(list.get(1).isSymbolicLink()); assertNull(list.get(2).getSymlinkTarget()); assertFalse(list.get(2).isSymbolicLink()); }
@Override public void verifyCompatibility(WindowFn<?, ?> other) throws IncompatibleWindowException { if (!this.isCompatible(other)) { throw new IncompatibleWindowException( other, String.format( "Only %s objects with the same size and offset are compatible.", FixedWindows.class.getSimpleName())); } }
@Test public void testVerifyCompatibility() throws IncompatibleWindowException { FixedWindows.of(Duration.millis(10)).verifyCompatibility(FixedWindows.of(Duration.millis(10))); thrown.expect(IncompatibleWindowException.class); FixedWindows.of(Duration.millis(10)).verifyCompatibility(FixedWindows.of(Duration.millis(20))); }
@Override protected Function3<EntityColumnMapping, Object[], Map<String, Object>, Object> compile(String sql) { StringBuilder builder = new StringBuilder(sql.length()); int argIndex = 0; for (int i = 0; i < sql.length(); i++) { char c = sql.charAt(i); if (c == '?') { builder.append("_arg").append(argIndex++); } else { builder.append(c); } } try { SpelExpressionParser parser = new SpelExpressionParser(); Expression expression = parser.parseExpression(builder.toString()); AtomicLong errorCount = new AtomicLong(); return (mapping, args, object) -> { if (errorCount.get() > 1024) { return null; } object = createArguments(mapping, object); if (args != null && args.length != 0) { int index = 0; for (Object parameter : args) { object.put("_arg" + index, parameter); } } StandardEvaluationContext context = SHARED_CONTEXT.get(); try { context.setRootObject(object); Object val = expression.getValue(context); errorCount.set(0); return val; } catch (Throwable err) { log.warn("invoke native sql [{}] value error", sql, err); errorCount.incrementAndGet(); } finally { context.setRootObject(null); } return null; }; } catch (Throwable error) { return spelError(sql, error); } }
@Test void testSnake() { SpelSqlExpressionInvoker invoker = new SpelSqlExpressionInvoker(); EntityColumnMapping mapping = Mockito.mock(EntityColumnMapping.class); { Function3<EntityColumnMapping,Object[], Map<String, Object>, Object> func = invoker.compile("count_value + ?"); assertEquals(12, func.apply(mapping,new Object[]{2}, Collections.singletonMap("countValue", 10))); } { Mockito.when(mapping.getPropertyByColumnName("_count_v")) .thenReturn(java.util.Optional.of("countValue")); Function3<EntityColumnMapping,Object[], Map<String, Object>, Object> func = invoker.compile("_count_v + ?"); assertEquals(12, func.apply(mapping,new Object[]{2}, Collections.singletonMap("countValue", 10))); } }
public void clearQueryContext() { queryContext = null; }
@Test void assertClearQueryContext() { connectionSession.setQueryContext(mock(QueryContext.class)); assertNotNull(connectionSession.getQueryContext()); connectionSession.clearQueryContext(); assertNull(connectionSession.getQueryContext()); }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldSupportInsertIntoWithSchemaInferenceMatchAndCustomMetadata() throws Exception { // Given: when(srClient.getLatestSchemaMetadata(Mockito.any())) .thenReturn(new SchemaMetadata(1, 1, "")); when(srClient.getSchemaById(1)) .thenReturn(new AvroSchema(AVRO_RAW_ONE_KEY_SCHEMA_WITH_CUSTOM_METADATA)); givenDataSourceWithSchema( TOPIC_NAME, SCHEMA, SerdeFeatures.of(SerdeFeature.SCHEMA_INFERENCE, SerdeFeature.WRAP_SINGLES), SerdeFeatures.of(), FormatInfo.of(FormatFactory.AVRO.name()), FormatInfo.of(FormatFactory.AVRO.name()), false, false); final ConfiguredStatement<InsertValues> statement = givenInsertValues( ImmutableList.of(K0, COL0), ImmutableList.of( new StringLiteral("foo"), new StringLiteral("bar")) ); // When: executor.execute(statement, mock(SessionProperties.class), engine, serviceContext); // Then: verify(keySerializer).serialize(TOPIC_NAME, genericKey("foo")); verify(valueSerializer).serialize(TOPIC_NAME, genericRow("bar", null)); verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE)); }
@Override public Serde<List<?>> getSerde( final PersistenceSchema schema, final Map<String, String> formatProperties, final KsqlConfig ksqlConfig, final Supplier<SchemaRegistryClient> srClientFactory, final boolean isKey) { FormatProperties.validateProperties(name(), formatProperties, getSupportedProperties()); SerdeUtils.throwOnUnsupportedFeatures(schema.features(), supportedFeatures()); if (!schema.columns().isEmpty()) { throw new KsqlException("The '" + NAME + "' format can only be used when no columns are defined. Got: " + schema.columns()); } return new KsqlVoidSerde<>(); }
@Test(expected = IllegalArgumentException.class) public void shouldThrowOnUnsupportedFeatures() { // Given: when(schema.features()).thenReturn(SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES)); // When: format.getSerde(schema, formatProps, ksqlConfig, srClientFactory, false); }
public void resetPositionsIfNeeded() { Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp(); if (offsetResetTimestamps.isEmpty()) return; resetPositionsAsync(offsetResetTimestamps); }
@Test public void testListOffsetUpdateEpoch() { buildFetcher(); // Set up metadata with leaderEpoch=1 subscriptions.assignFromUser(singleton(tp0)); MetadataResponse metadataWithLeaderEpochs = RequestTestUtils.metadataUpdateWithIds( "kafka-cluster", 1, Collections.emptyMap(), singletonMap(topicName, 4), tp -> 1, topicIds); client.updateMetadata(metadataWithLeaderEpochs); // Reset offsets to trigger ListOffsets call subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); // Now we see a ListOffsets with leaderEpoch=2 epoch, we trigger a metadata update client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP, 1), listOffsetResponse(tp0, Errors.NONE, 1L, 5L, 2)); offsetFetcher.resetPositionsIfNeeded(); consumerClient.pollNoWakeup(); assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertTrue(metadata.updateRequested()); assertOptional(metadata.lastSeenLeaderEpoch(tp0), epoch -> assertEquals((long) epoch, 2)); }
@Udf public int field( @UdfParameter final String str, @UdfParameter final String... args ) { if (str == null || args == null) { return 0; } for (int i = 0; i < args.length; i++) { if (str.equals(args[i])) { return i + 1; } } return 0; }
@Test public void shouldNotFindIfNoArgs() { // When: final int pos = field.field("missing"); // Then: assertThat(pos, equalTo(0)); }
@Override public void apply(final Record<Windowed<KOut>, Change<VOut>> record) { @SuppressWarnings("rawtypes") final ProcessorNode prev = context.currentNode(); context.setCurrentNode(myNode); try { context.forward(record.withTimestamp(record.key().window().end())); } finally { context.setCurrentNode(prev); } }
@Test public void shouldForwardKeyNewValueOldValueAndTimestamp() { @SuppressWarnings("unchecked") final InternalProcessorContext<Windowed<String>, Change<String>> context = mock(InternalProcessorContext.class); doNothing().when(context).forward( new Record<>( new Windowed<>("key", new SessionWindow(21L, 73L)), new Change<>("newValue", "oldValue"), 73L)); new SessionCacheFlushListener<>(context).apply( new Record<>( new Windowed<>("key", new SessionWindow(21L, 73L)), new Change<>("newValue", "oldValue"), 42L)); verify(context, times(2)).setCurrentNode(null); }
public static Combine.BinaryCombineIntegerFn ofIntegers() { return new SumIntegerFn(); }
@Test public void testSumIntegerFn() { testCombineFn(Sum.ofIntegers(), Lists.newArrayList(1, 2, 3, 4), 10); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { try { if (statement.getStatement() instanceof CreateAsSelect) { registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement); } else if (statement.getStatement() instanceof CreateSource) { registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } // Remove schema id from SessionConfig return stripSchemaIdConfig(statement); }
@Test public void shouldThrowInconsistentKeySchemaTypeExceptionWithOverrideSchema() { // Given: final SchemaAndId schemaAndId = SchemaAndId.schemaAndId(SCHEMA.value(), AVRO_SCHEMA, 1); givenStatement("CREATE STREAM source (id int key, f1 varchar) " + "WITH (" + "kafka_topic='expectedName', " + "key_format='PROTOBUF', " + "value_format='JSON', " + "key_schema_id=1, " + "partitions=1" + ");", Pair.of(schemaAndId, null)); // When: final Exception e = assertThrows( KsqlStatementException.class, () -> injector.inject(statement) ); // Then: assertThat(e.getMessage(), containsString("Format and fetched schema type using " + "KEY_SCHEMA_ID 1 are different. Format: [PROTOBUF], Fetched schema type: [AVRO].")); }
@Override public UUID generateId() { long counterValue = counter.incrementAndGet(); if (counterValue == MAX_COUNTER_VALUE) { throw new CucumberException( "Out of " + IncrementingUuidGenerator.class.getSimpleName() + " capacity. Please generate using a new instance or use another " + UuidGenerator.class.getSimpleName() + "implementation."); } long leastSigBits = counterValue | 0x8000000000000000L; // set variant return new UUID(msb, leastSigBits); }
@Test void different_classloaderId_leads_to_different_uuid_when_ignoring_epoch_time() { // Given the two generator have the different classloaderId UuidGenerator generator1 = getUuidGeneratorFromOtherClassloader(1); UuidGenerator generator2 = getUuidGeneratorFromOtherClassloader(2); // When the UUID are generated UUID uuid1 = generator1.generateId(); UUID uuid2 = generator2.generateId(); // Then the UUID are the same assertNotEquals(removeEpochTime(uuid1), removeEpochTime(uuid2)); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } return FEELFnResult.ofResult( BigDecimal.valueOf( list.size() ) ); }
@Test void invokeParamListNull() { FunctionTestUtil.assertResultError(countFunction.invoke((List) null), InvalidParametersEvent.class); }
@Override public double quantile(double p) { if (p < 0.0 || p > 1.0) { throw new IllegalArgumentException("Invalid p: " + p); } return -Math.log(1 - p) / lambda; }
@Test public void testQuantile() { System.out.println("quantile"); ExponentialDistribution instance = new ExponentialDistribution(2.0); instance.rand(); assertEquals(0.05268026, instance.quantile(0.1), 1E-7); assertEquals(0.1783375, instance.quantile(0.3), 1E-7); assertEquals(0.3465736, instance.quantile(0.5), 1E-7); assertEquals(0.6019864, instance.quantile(0.7), 1E-7); }
@VisibleForTesting static List<Tuple2<ConfigGroup, String>> generateTablesForClass( Class<?> optionsClass, Collection<OptionWithMetaInfo> optionWithMetaInfos) { ConfigGroups configGroups = optionsClass.getAnnotation(ConfigGroups.class); List<OptionWithMetaInfo> allOptions = selectOptionsToDocument(optionWithMetaInfos); if (allOptions.isEmpty()) { return Collections.emptyList(); } List<Tuple2<ConfigGroup, String>> tables; if (configGroups != null) { tables = new ArrayList<>(configGroups.groups().length + 1); Tree tree = new Tree(configGroups.groups(), allOptions); for (ConfigGroup group : configGroups.groups()) { List<OptionWithMetaInfo> configOptions = tree.findConfigOptions(group); if (!configOptions.isEmpty()) { sortOptions(configOptions); tables.add(Tuple2.of(group, toHtmlTable(configOptions))); } } List<OptionWithMetaInfo> configOptions = tree.getDefaultOptions(); if (!configOptions.isEmpty()) { sortOptions(configOptions); tables.add(Tuple2.of(null, toHtmlTable(configOptions))); } } else { sortOptions(allOptions); tables = Collections.singletonList(Tuple2.of(null, toHtmlTable(allOptions))); } return tables; }
@Test void testCreatingDescription() { final String expectedTable = "<table class=\"configuration table table-bordered\">\n" + " <thead>\n" + " <tr>\n" + " <th class=\"text-left\" style=\"width: 20%\">Key</th>\n" + " <th class=\"text-left\" style=\"width: 15%\">Default</th>\n" + " <th class=\"text-left\" style=\"width: 10%\">Type</th>\n" + " <th class=\"text-left\" style=\"width: 55%\">Description</th>\n" + " </tr>\n" + " </thead>\n" + " <tbody>\n" + " <tr>\n" + " <td><h5>first.option.a</h5></td>\n" + " <td style=\"word-wrap: break-word;\">2</td>\n" + " <td>Integer</td>\n" + " <td>This is example description for the first option.</td>\n" + " </tr>\n" + " <tr>\n" + " <td><h5>second.option.a</h5></td>\n" + " <td style=\"word-wrap: break-word;\">(none)</td>\n" + " <td>String</td>\n" + " <td>This is long example description for the second option.</td>\n" + " </tr>\n" + " </tbody>\n" + "</table>\n"; final String htmlTable = ConfigOptionsDocGenerator.generateTablesForClass( TestConfigGroup.class, ConfigurationOptionLocator.extractConfigOptions( TestConfigGroup.class)) .get(0) .f1; assertThat(htmlTable).isEqualTo(expectedTable); }
public static Event[] fromJson(final String json) throws IOException { return fromJson(json, BasicEventFactory.INSTANCE); }
@Test public void testFromJsonWithBlankString() throws Exception { Event[] events = Event.fromJson(" "); assertEquals(0, events.length); }
@Override public void handleRequest(HttpServerExchange httpServerExchange) { if (!httpServerExchange.getRequestMethod().equals(HttpString.tryFromString("GET"))) { httpServerExchange.setStatusCode(HTTP_METHOD_NOT_ALLOWED); httpServerExchange.getResponseSender().send(""); } else { httpServerExchange.setStatusCode(HTTP_OK); httpServerExchange.getResponseHeaders().put(Headers.CONTENT_TYPE, "text/plain"); var metricsContents = registry.scrape(); httpServerExchange.getResponseSender().send(metricsContents); } }
@Test void methodNotAllowed() { // when var httpServerExchange = mock(HttpServerExchange.class); var sender = mock(Sender.class); when(httpServerExchange.getResponseSender()).thenReturn(sender); when(httpServerExchange.getRequestMethod()).thenReturn(HttpString.tryFromString("POST")); // given var prometheusMeterRegistry = new PrometheusMeterRegistry(PrometheusConfig.DEFAULT); var sut = new MetricsEndpoint(prometheusMeterRegistry); // when sut.handleRequest(httpServerExchange); // then verify(httpServerExchange).setStatusCode(Status.METHOD_NOT_ALLOWED.getStatusCode()); }
@Override public final void getSize(@NonNull SizeReadyCallback cb) { sizeDeterminer.getSize(cb); }
@Test public void testSizeCallbackIsCalledSynchronouslyIfLayoutParamsConcreteSizeSet() { int dimens = 444; LayoutParams layoutParams = new FrameLayout.LayoutParams(dimens, dimens); view.setLayoutParams(layoutParams); view.requestLayout(); target.getSize(cb); verify(cb).onSizeReady(eq(dimens), eq(dimens)); }
public MessageType convert(Descriptors.Descriptor descriptor) { // Remember classes seen with depths to avoid cycles. int depth = 0; ImmutableSetMultimap<String, Integer> seen = ImmutableSetMultimap.of(descriptor.getFullName(), depth); LOG.trace("convert:\n{}", descriptor.toProto()); MessageType messageType = convertFields(Types.buildMessage(), descriptor.getFields(), seen, depth) .named(descriptor.getFullName()); LOG.debug("Converter info:\n{}\n was converted to:\n{}", descriptor.toProto(), messageType); return messageType; }
@Test public void testDeepRecursion() { // The general idea is to test the fanout of the schema. // TODO: figured out closed forms of the binary tree and struct series. long expectedBinaryTreeSize = 4; long expectedStructSize = 7; for (int i = 0; i < 10; ++i) { MessageType deepSchema = new ProtoSchemaConverter(true, i, false).convert(Trees.WideTree.class); // 3, 5, 7, 9, 11, 13, 15, 17, 19, 21 assertEquals(2 * i + 3, deepSchema.getPaths().size()); deepSchema = new ProtoSchemaConverter(true, i, false).convert(Trees.BinaryTree.class); // 4, 10, 22, 46, 94, 190, 382, 766, 1534, 3070 assertEquals(expectedBinaryTreeSize, deepSchema.getPaths().size()); expectedBinaryTreeSize = 2 * expectedBinaryTreeSize + 2; deepSchema = new ProtoSchemaConverter(true, i, false).convert(Struct.class); // 7, 18, 40, 84, 172, 348, 700, 1404, 2812, 5628 assertEquals(expectedStructSize, deepSchema.getPaths().size()); expectedStructSize = 2 * expectedStructSize + 4; } }
public static boolean isNotEmpty(final Object[] array) { return !isEmpty(array); }
@Test void isNotEmpty() { assertFalse(ArrayUtils.isNotEmpty(null)); assertFalse(ArrayUtils.isNotEmpty(new Object[0])); assertTrue(ArrayUtils.isNotEmpty(new Object[] {"abc"})); }
FeatureControlManager featureControl() { return featureControl; }
@Test public void testUpgradeMigrationStateFrom34() throws Exception { try (LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv.Builder(1).build()) { // In 3.4, we only wrote a PRE_MIGRATION to the log. In that software version, we defined this // as enum value 1. In 3.5+ software, this enum value is redefined as MIGRATION BootstrapMetadata bootstrapMetadata = BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_4_IV0, "test"); List<ApiMessageAndVersion> initialRecords = new ArrayList<>(bootstrapMetadata.records()); initialRecords.add(ZkMigrationState.of((byte) 1).toRecord()); logEnv.appendInitialRecords(initialRecords); try ( QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv.Builder(logEnv). setControllerBuilderInitializer(controllerBuilder -> controllerBuilder.setZkMigrationEnabled(true) ). setBootstrapMetadata(bootstrapMetadata). build() ) { QuorumController active = controlEnv.activeController(); assertEquals(active.featureControl().zkMigrationState(), ZkMigrationState.MIGRATION); assertFalse(active.featureControl().inPreMigrationMode()); } testToImages(logEnv.allRecords()); } }
@Override public boolean supportsAlterTableWithAddColumn() { return false; }
@Test void assertSupportsAlterTableWithAddColumn() { assertFalse(metaData.supportsAlterTableWithAddColumn()); }
public static <P> Builder<P> newBuilder() { return new Builder<P>(); }
@Test void putAllRules() { Matcher<Void> one = v -> false; Matcher<Void> two = v -> true; Matcher<Void> three = v -> Boolean.FALSE; Matcher<Void> four = v -> Boolean.TRUE; ParameterizedSampler<Void> base = ParameterizedSampler.<Void>newBuilder() .putRule(one, Sampler.ALWAYS_SAMPLE) .putRule(two, Sampler.NEVER_SAMPLE) .putRule(three, Sampler.ALWAYS_SAMPLE) .build(); ParameterizedSampler<Void> extended = ParameterizedSampler.<Void>newBuilder() .putAllRules(base) .putRule(one, Sampler.NEVER_SAMPLE) .putRule(four, Sampler.ALWAYS_SAMPLE) .build(); assertThat(extended).usingRecursiveComparison() .isEqualTo(ParameterizedSampler.<Void>newBuilder() .putRule(one, Sampler.NEVER_SAMPLE) .putRule(two, Sampler.NEVER_SAMPLE) .putRule(three, Sampler.ALWAYS_SAMPLE) .putRule(four, Sampler.ALWAYS_SAMPLE) .build() ); }
public void checkForUpgradeAndExtraProperties() throws IOException { if (upgradesEnabled()) { checkForUpgradeAndExtraProperties(systemEnvironment.getAgentMd5(), systemEnvironment.getGivenAgentLauncherMd5(), systemEnvironment.getAgentPluginsMd5(), systemEnvironment.getTfsImplMd5()); } else { LOGGER.debug("[Agent Upgrade] Skipping check as there is no wrapping launcher to relaunch the agent JVM..."); } }
@Test void checkForUpgradeShouldNotKillAgentIfAllDownloadsAreCompatible() throws Exception { setupForNoChangesToMD5(); agentUpgradeService.checkForUpgradeAndExtraProperties(); verify(jvmExitter, never()).jvmExit(anyString(), anyString(), anyString()); }
@Override public Encoder getMapValueEncoder() { return encoder; }
@Test public void shouldSerializeTheStringCorrectly() throws Exception { assertThat(mapCodec.getMapValueEncoder().encode("foo").toString(CharsetUtil.UTF_8)) .isEqualTo("\"foo\""); }
public static Collection<WhereSegment> getSubqueryWhereSegments(final SelectStatement selectStatement) { Collection<WhereSegment> result = new LinkedList<>(); for (SubquerySegment each : SubqueryExtractUtils.getSubquerySegments(selectStatement)) { each.getSelect().getWhere().ifPresent(result::add); result.addAll(getJoinWhereSegments(each.getSelect())); } return result; }
@Test void assertGetWhereSegmentsFromSubQueryJoin() { JoinTableSegment joinTableSegment = new JoinTableSegment(); joinTableSegment.setLeft(new SimpleTableSegment(new TableNameSegment(37, 39, new IdentifierValue("t_order")))); joinTableSegment.setRight(new SimpleTableSegment(new TableNameSegment(54, 56, new IdentifierValue("t_order_item")))); joinTableSegment.setJoinType("INNER"); joinTableSegment.setCondition(new BinaryOperationExpression(63, 83, new ColumnSegment(63, 71, new IdentifierValue("order_id")), new ColumnSegment(75, 83, new IdentifierValue("order_id")), "=", "oi.order_id = o.order_id")); SelectStatement subQuerySelectStatement = mock(SelectStatement.class); when(subQuerySelectStatement.getFrom()).thenReturn(Optional.of(joinTableSegment)); SelectStatement selectStatement = mock(SelectStatement.class); when(selectStatement.getFrom()).thenReturn(Optional.of(new SubqueryTableSegment(0, 0, new SubquerySegment(20, 84, subQuerySelectStatement, "")))); Collection<WhereSegment> subqueryWhereSegments = WhereExtractUtils.getSubqueryWhereSegments(selectStatement); WhereSegment actual = subqueryWhereSegments.iterator().next(); assertThat(actual.getExpr(), is(joinTableSegment.getCondition())); }
@Override public PurgeExecutions.Output run(RunContext runContext) throws Exception { ExecutionService executionService = ((DefaultRunContext)runContext).getApplicationContext().getBean(ExecutionService.class); FlowService flowService = ((DefaultRunContext)runContext).getApplicationContext().getBean(FlowService.class); // validate that this namespace is authorized on the target namespace / all namespaces var flowInfo = runContext.flowInfo(); if (namespace == null){ flowService.checkAllowedAllNamespaces(flowInfo.tenantId(), flowInfo.tenantId(), flowInfo.namespace()); } else if (!runContext.render(namespace).equals(flowInfo.namespace())) { flowService.checkAllowedNamespace(flowInfo.tenantId(), runContext.render(namespace), flowInfo.tenantId(), flowInfo.namespace()); } ExecutionService.PurgeResult purgeResult = executionService.purge( purgeExecution, purgeLog, purgeMetric, purgeStorage, flowInfo.tenantId(), runContext.render(namespace), runContext.render(flowId), startDate != null ? ZonedDateTime.parse(runContext.render(startDate)) : null, ZonedDateTime.parse(runContext.render(endDate)), states ); return Output.builder() .executionsCount(purgeResult.getExecutionsCount()) .logsCount(purgeResult.getLogsCount()) .storagesCount(purgeResult.getStoragesCount()) .metricsCount(purgeResult.getMetricsCount()) .build(); }
@Test void run() throws Exception { // create an execution to delete String namespace = "run.namespace"; String flowId = "run-flow-id"; var execution = Execution.builder() .id(IdUtils.create()) .namespace(namespace) .flowId(flowId) .state(new State().withState(State.Type.SUCCESS)) .build(); executionRepository.save(execution); var purge = PurgeExecutions.builder() .flowId(flowId) .namespace(namespace) .endDate(ZonedDateTime.now().plusMinutes(1).format(DateTimeFormatter.ISO_ZONED_DATE_TIME)) .build(); var runContext = runContextFactory.of(Map.of("flow", Map.of("namespace", namespace, "id", flowId))); var output = purge.run(runContext); assertThat(output.getExecutionsCount(), is(1)); }
@Override public void removeProvider(ProviderGroup providerGroup) { if (ProviderHelper.isEmpty(providerGroup)) { return; } wLock.lock(); try { getProviderGroup(providerGroup.getName()).removeAll(providerGroup.getProviderInfos()); } finally { wLock.unlock(); } }
@Test public void removeProvider() throws Exception { SingleGroupAddressHolder addressHolder = new SingleGroupAddressHolder(null); addressHolder.addProvider(new ProviderGroup("xxx", Arrays.asList( ProviderHelper.toProviderInfo("127.0.0.1:12200"), ProviderHelper.toProviderInfo("127.0.0.1:12201"), ProviderHelper.toProviderInfo("127.0.0.1:12202")))); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DEFAULT_GROUP).size() == 3); addressHolder.removeProvider(new ProviderGroup("xxx", new ArrayList<ProviderInfo>())); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DEFAULT_GROUP).size() == 3); addressHolder.removeProvider(new ProviderGroup("xxx", Collections.singletonList( ProviderHelper.toProviderInfo("127.0.0.1:12200")))); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DEFAULT_GROUP).size() == 2); addressHolder.removeProvider(new ProviderGroup("xxx", Arrays.asList( ProviderHelper.toProviderInfo("127.0.0.1:12201"), ProviderHelper.toProviderInfo("127.0.0.1:12202")))); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DEFAULT_GROUP).size() == 0); addressHolder.addProvider(new ProviderGroup(ADDRESS_DIRECT_GROUP, Arrays.asList( ProviderHelper.toProviderInfo("127.0.0.1:12200"), ProviderHelper.toProviderInfo("127.0.0.1:12201"), ProviderHelper.toProviderInfo("127.0.0.1:12202")))); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DIRECT_GROUP).size() == 3); addressHolder.removeProvider(new ProviderGroup(ADDRESS_DIRECT_GROUP, new ArrayList<ProviderInfo>())); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DIRECT_GROUP).size() == 3); addressHolder.removeProvider(new ProviderGroup(ADDRESS_DIRECT_GROUP, Collections.singletonList( ProviderHelper.toProviderInfo("127.0.0.1:12200")))); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DIRECT_GROUP).size() == 2); addressHolder.removeProvider(new ProviderGroup(ADDRESS_DIRECT_GROUP, Arrays.asList( ProviderHelper.toProviderInfo("127.0.0.1:12201"), ProviderHelper.toProviderInfo("127.0.0.1:12202")))); Assert.assertTrue(addressHolder.getProviderGroup(ADDRESS_DIRECT_GROUP).size() == 0); }
protected boolean init() { return true; }
@Test public void testInit() { assertEquals( true, avroInput.init() ); }
@SafeVarargs public static Optional<Predicate<Throwable>> createExceptionsPredicate( Predicate<Throwable> exceptionPredicate, Class<? extends Throwable>... exceptions) { return PredicateCreator.createExceptionsPredicate(exceptions) .map(predicate -> exceptionPredicate == null ? predicate : predicate.or(exceptionPredicate)) .or(() -> Optional.ofNullable(exceptionPredicate)); }
@Test public void buildRecordExceptionsPredicate() { Predicate<Throwable> predicate = PredicateCreator .createExceptionsPredicate(RuntimeException.class, IOException.class) .orElseThrow(); then(predicate.test(new RuntimeException())).isTrue(); then(predicate.test(new IllegalArgumentException())).isTrue(); then(predicate.test(new Throwable())).isFalse(); then(predicate.test(new Exception())).isFalse(); then(predicate.test(new IOException())).isTrue(); }
public StepInstanceActionResponse bypassStepDependencies( WorkflowInstance instance, String stepId, User user, boolean blocking) { validateStepId(instance, stepId, Actions.StepInstanceAction.BYPASS_STEP_DEPENDENCIES); StepInstance stepInstance = getStepInstanceAndValidateBypassStepDependencyConditions(instance, stepId); StepAction stepAction = StepAction.createBypassStepDependencies(stepInstance, user); saveAction(stepInstance, stepAction); if (blocking) { return waitBypassStepDependenciesResponseWithTimeout(stepInstance, stepAction); } else { return createActionResponseFrom(stepInstance, null, stepAction.toTimelineEvent()); } }
@Test public void testBypassSignalDependencies() { stepInstance.getRuntimeState().setStatus(StepInstance.Status.WAITING_FOR_SIGNALS); stepInstance.getStepRetry().setRetryable(false); ((TypedStep) stepInstance.getDefinition()).setFailureMode(FailureMode.FAIL_AFTER_RUNNING); stepInstanceDao.insertOrUpsertStepInstance(stepInstance, true); StepInstanceActionResponse response = actionDao.bypassStepDependencies(instance, "job1", user, false); Assert.assertEquals("sample-dag-test-3", response.getWorkflowId()); Assert.assertEquals(1, response.getWorkflowInstanceId()); Assert.assertEquals(1, response.getWorkflowRunId()); Assert.assertEquals("job1", response.getStepId()); Assert.assertEquals(1L, response.getStepAttemptId().longValue()); Assert.assertEquals( "User [tester] take action [BYPASS_STEP_DEPENDENCIES] on the step", response.getTimelineEvent().getMessage()); }
@Override public String getMethod() { return PATH; }
@Test public void testSetMyCommandsWithEmptyScope() { SetMyCommands setMyCommands = SetMyCommands .builder() .command(BotCommand.builder().command("test").description("Test description").build()) .languageCode("en") .build(); assertEquals("setMyCommands", setMyCommands.getMethod()); assertDoesNotThrow(setMyCommands::validate); }
public static String trimSemicolon(final String sql) { return sql.endsWith(SQL_END) ? sql.substring(0, sql.length() - 1) : sql; }
@Test void assertTrimSemiColon() { assertThat(SQLUtils.trimSemicolon("SHOW DATABASES;"), is("SHOW DATABASES")); assertThat(SQLUtils.trimSemicolon("SHOW DATABASES"), is("SHOW DATABASES")); }
@Override public int getOrder() { return PluginEnum.KEY_AUTH.getCode(); }
@Test public void testGetOrder() { assertEquals(PluginEnum.KEY_AUTH.getCode(), keyAuthPlugin.getOrder()); }
public static <T> AsIterable<T> asIterable() { return new AsIterable<>(); }
@Test @Category(ValidatesRunner.class) public void testEmptyIterableSideInput() throws Exception { final PCollectionView<Iterable<Integer>> view = pipeline.apply("CreateEmptyView", Create.empty(VarIntCoder.of())).apply(View.asIterable()); PCollection<Integer> results = pipeline .apply("Create1", Create.of(1)) .apply( "OutputSideInputs", ParDo.of( new DoFn<Integer, Integer>() { @ProcessElement public void processElement(ProcessContext c) { assertFalse(c.sideInput(view).iterator().hasNext()); c.output(1); } }) .withSideInputs(view)); // Pass at least one value through to guarantee that DoFn executes. PAssert.that(results).containsInAnyOrder(1); pipeline.run(); }
public static int removeConsecutiveDuplicates(int[] arr, int end) { int curr = 0; for (int i = 1; i < end; ++i) { if (arr[i] != arr[curr]) arr[++curr] = arr[i]; } return curr + 1; }
@Test public void removeConsecutiveDuplicates() { int[] arr = new int[]{3, 3, 4, 2, 1, -3, -3, 9, 3, 6, 6, 7, 7}; assertEquals(9, ArrayUtil.removeConsecutiveDuplicates(arr, arr.length)); // note that only the first 9 elements should be considered the 'valid' range assertEquals(IntArrayList.from(3, 4, 2, 1, -3, 9, 3, 6, 7, 6, 6, 7, 7), IntArrayList.from(arr)); int[] brr = new int[]{4, 4, 3, 5, 3}; assertEquals(2, ArrayUtil.removeConsecutiveDuplicates(brr, 3)); assertEquals(IntArrayList.from(4, 3, 3, 5, 3), IntArrayList.from(brr)); }
public static final String[] getVariableNames( VariableSpace space ) { String[] variableNames = space.listVariables(); for ( int i = 0; i < variableNames.length; i++ ) { for ( int j = 0; j < Const.DEPRECATED_VARIABLES.length; j++ ) { if ( variableNames[i].equals( Const.DEPRECATED_VARIABLES[j] ) ) { variableNames[i] = variableNames[i] + Const.getDeprecatedPrefix(); break; } } } Arrays.sort( variableNames, new Comparator<String>() { public int compare( String var1, String var2 ) { if ( var1.endsWith( Const.getDeprecatedPrefix() ) && var2.endsWith( Const.getDeprecatedPrefix() ) ) { return 0; } if ( var1.endsWith( Const.getDeprecatedPrefix() ) && !var2.endsWith( Const.getDeprecatedPrefix() ) ) { return 1; } if ( !var1.endsWith( Const.getDeprecatedPrefix() ) && var2.endsWith( Const.getDeprecatedPrefix() ) ) { return -1; } return var1.compareTo( var2 ); } } ); return variableNames; }
@Test public void testGetVariableNames() { Assert.assertTrue( Const.DEPRECATED_VARIABLES.length > 0 ); String deprecatedVariableName = Const.DEPRECATED_VARIABLES[0]; String deprecatedPrefix = Const.getDeprecatedPrefix(); String[] variableNames = new String[] { "test_variable1", "test_variable2", deprecatedVariableName }; String[] expectedVariables = new String[] { "test_variable1", "test_variable2", deprecatedVariableName + deprecatedPrefix }; VariableSpace variableSpace = mock( VariableSpace.class ); doReturn( variableNames ).when( variableSpace ).listVariables(); Assert.assertArrayEquals( expectedVariables, ControlSpaceKeyAdapter.getVariableNames( variableSpace ) ); }
@Override public void run() { updateElasticSearchHealthStatus(); updateFileSystemMetrics(); }
@Test public void when_elasticsearch_down_status_is_updated_to_red() { ClusterHealthResponse clusterHealthResponse = new ClusterHealthResponse(); clusterHealthResponse.setStatus(ClusterHealthStatus.RED); when(esClient.clusterHealth(any())).thenReturn(clusterHealthResponse); underTest.run(); verify(serverMonitoringMetrics, times(1)).setElasticSearchStatusToRed(); verifyNoMoreInteractions(serverMonitoringMetrics); }
@Override public boolean contains(String clientId) { return clients.containsKey(clientId); }
@Test void testContainsEphemeralIpPortId() { assertTrue(ephemeralIpPortClientManager.contains(ephemeralIpPortId)); assertTrue(ephemeralIpPortClientManager.contains(syncedClientId)); String unUsedClientId = "127.0.0.1:8888#true"; assertFalse(ephemeralIpPortClientManager.contains(unUsedClientId)); }
@Override public AbstractWALEvent decode(final ByteBuffer data, final BaseLogSequenceNumber logSequenceNumber) { AbstractWALEvent result; byte[] bytes = new byte[data.remaining()]; data.get(bytes); String dataText = new String(bytes, StandardCharsets.UTF_8); if (decodeWithTX) { result = decodeDataWithTX(dataText); } else { result = decodeDataIgnoreTX(dataText); } result.setLogSequenceNumber(logSequenceNumber); return result; }
@Test void assertDecodeTime() throws SQLException { MppTableData tableData = new MppTableData(); tableData.setTableName("public.test"); tableData.setOpType("INSERT"); tableData.setColumnsName(new String[]{"data"}); tableData.setColumnsType(new String[]{"time without time zone"}); tableData.setColumnsVal(new String[]{"'1 2 3'"}); TimestampUtils timestampUtils = mock(TimestampUtils.class); when(timestampUtils.toTime(null, "1 2 3")).thenThrow(new SQLException("")); ByteBuffer data = ByteBuffer.wrap(JsonUtils.toJsonString(tableData).getBytes()); assertThrows(DecodingException.class, () -> new MppdbDecodingPlugin(new OpenGaussTimestampUtils(timestampUtils), true, false).decode(data, logSequenceNumber)); }
@InvokeOnHeader(Web3jConstants.ETH_ACCOUNTS) void ethAccounts(Message message) throws IOException { Request<?, EthAccounts> request = web3j.ethAccounts(); setRequestId(message, request); EthAccounts response = request.send(); message.setBody(response.getAccounts()); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getAccounts()); } }
@Test public void ethAccountsTest() throws Exception { EthAccounts response = Mockito.mock(EthAccounts.class); Mockito.when(mockWeb3j.ethAccounts()).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getAccounts()).thenReturn(Collections.emptyList()); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_ACCOUNTS); template.send(exchange); List body = exchange.getIn().getBody(List.class); assertTrue(body.isEmpty()); }
@Override public void accept(ICOSVisitor visitor) throws IOException { visitor.visitFromBoolean(this); }
@Override @Test void testAccept() { ByteArrayOutputStream outStream = new ByteArrayOutputStream(); COSWriter visitor = new COSWriter(outStream); int index = 0; try { cosBooleanTrue.accept(visitor); testByteArrays(String.valueOf(cosBooleanTrue).getBytes(StandardCharsets.ISO_8859_1), outStream.toByteArray()); outStream.reset(); cosBooleanFalse.accept(visitor); testByteArrays(String.valueOf(cosBooleanFalse).getBytes(StandardCharsets.ISO_8859_1), outStream.toByteArray()); outStream.reset(); } catch (Exception e) { fail("Failed to write " + index + " exception: " + e.getMessage()); } }
public int capacity() { return capacity; }
@Test void shouldCalculateCapacityForBuffer() { assertThat(ringBuffer.capacity(), is(CAPACITY)); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestStartPosition() { internalEncodeLogHeader(buffer, 0, 1000, 1000, () -> 500_000_000L); final StartPositionRequestEncoder requestEncoder = new StartPositionRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(3) .correlationId(16) .recordingId(1); dissectControlRequest(CMD_IN_START_POSITION, buffer, 0, builder); assertEquals("[0.500000000] " + CONTEXT + ": " + CMD_IN_START_POSITION.name() + " [1000/1000]:" + " controlSessionId=3" + " correlationId=16" + " recordingId=1", builder.toString()); }
public void logSlowQuery(final Statement statement, final long startTimeNanos, final JdbcSessionContext context) { if ( logSlowQuery < 1 ) { return; } if ( startTimeNanos <= 0 ) { throw new IllegalArgumentException( "startTimeNanos [" + startTimeNanos + "] should be greater than 0" ); } final long queryExecutionMillis = elapsedFrom( startTimeNanos ); if ( queryExecutionMillis > logSlowQuery ) { final String sql = statement.toString(); logSlowQueryInternal( context, queryExecutionMillis, sql ); } }
@Test public void testLogSlowQueryFromStatementWhenLoggingEnabled() { long logSlowQueryThresholdMillis = 300L; SqlStatementLogger sqlStatementLogger = new SqlStatementLogger( false, false, false, logSlowQueryThresholdMillis ); AtomicInteger callCounterToString = new AtomicInteger(); Statement statement = mockStatementForCountingToString( callCounterToString ); long startTimeNanos = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos( logSlowQueryThresholdMillis + 1 ); sqlStatementLogger.logSlowQuery( statement, startTimeNanos, null ); assertEquals( 1, callCounterToString.get() ); }
@Override public String getDisplayType() { return "Corporate EDS Login"; }
@Test public void testGetDisplayType() { //GIVEN CorporateEdsLoginAuthenticatorFactory factory = new CorporateEdsLoginAuthenticatorFactory(); //WHEN String type = factory.getDisplayType(); //THEN assertEquals("The display type should be Corporate EDS Login", "Corporate EDS Login", type); }
public boolean isNew(Component component, DefaultIssue issue) { if (analysisMetadataHolder.isPullRequest()) { return true; } if (periodHolder.hasPeriod()) { if (periodHolder.hasPeriodDate()) { return periodHolder.getPeriod().isOnPeriod(issue.creationDate()); } if (isOnBranchUsingReferenceBranch()) { return hasAtLeastOneLocationOnChangedLines(component, issue); } } return false; }
@Test public void isNew_returns_false_if_period_without_date() { periodHolder.setPeriod(new Period(NewCodePeriodType.NUMBER_OF_DAYS.name(), "10", null)); assertThat(newIssueClassifier.isNew(mock(Component.class), mock(DefaultIssue.class))).isFalse(); }
public ServerInfo getServerInfo(URI server) { HttpUrl url = HttpUrl.get(server); if (url == null) { throw new ClientException("Invalid server URL: " + server); } url = url.newBuilder().encodedPath("/v1/info").build(); Request request = new Request.Builder().url(url).build(); JsonResponse<ServerInfo> response = JsonResponse.execute(SERVER_INFO_CODEC, httpClient, request); if (!response.hasValue()) { throw new RuntimeException(format("Request to %s failed: %s [Error: %s]", server, response, response.getResponseBody())); } return response.getValue(); }
@Test public void testGetServerInfo() throws Exception { ServerInfo expected = new ServerInfo(UNKNOWN, "test", true, false, Optional.of(Duration.valueOf("2m"))); server.enqueue(new MockResponse() .addHeader(CONTENT_TYPE, "application/json") .setBody(SERVER_INFO_CODEC.toJson(expected))); QueryExecutor executor = new QueryExecutor(new OkHttpClient()); ServerInfo actual = executor.getServerInfo(server.url("/v1/info").uri()); assertEquals(actual.getEnvironment(), "test"); assertEquals(actual.getUptime(), Optional.of(Duration.valueOf("2m"))); assertEquals(server.getRequestCount(), 1); assertEquals(server.takeRequest().getPath(), "/v1/info"); }
public static void updateKeyForBlobStore(Map<String, Object> conf, BlobStore blobStore, CuratorFramework zkClient, String key, NimbusInfo nimbusDetails) { try { // Most of clojure tests currently try to access the blobs using getBlob. Since, updateKeyForBlobStore // checks for updating the correct version of the blob as a part of nimbus ha before performing any // operation on it, there is a necessity to stub several test cases to ignore this method. It is a valid // trade off to return if nimbusDetails which include the details of the current nimbus host port data are // not initialized as a part of the test. Moreover, this applies to only local blobstore when used along with // nimbus ha. if (nimbusDetails == null) { return; } boolean isListContainsCurrentNimbusInfo = false; List<String> stateInfo; if (zkClient.checkExists().forPath(BLOBSTORE_SUBTREE + "/" + key) == null) { return; } stateInfo = zkClient.getChildren().forPath(BLOBSTORE_SUBTREE + "/" + key); if (stateInfo == null || stateInfo.isEmpty()) { return; } LOG.debug("StateInfo for update {}", stateInfo); Set<NimbusInfo> nimbusInfoList = getNimbodesWithLatestSequenceNumberOfBlob(zkClient, key); for (NimbusInfo nimbusInfo : nimbusInfoList) { if (nimbusInfo.getHost().equals(nimbusDetails.getHost())) { isListContainsCurrentNimbusInfo = true; break; } } if (!isListContainsCurrentNimbusInfo && downloadUpdatedBlob(conf, blobStore, key, nimbusInfoList)) { LOG.debug("Updating state inside zookeeper for an update"); createStateInZookeeper(conf, key, nimbusDetails); } } catch (KeeperException.NoNodeException | KeyNotFoundException e) { //race condition with a delete return; } catch (Exception exp) { throw new RuntimeException(exp); } }
@Test public void testUpdateKeyForBlobStore_hostsMatch() { zkClientBuilder.withExists(BLOBSTORE_KEY, true); zkClientBuilder.withGetChildren(BLOBSTORE_KEY, "localhost:1111-1"); when(nimbusDetails.getHost()).thenReturn("localhost"); BlobStoreUtils.updateKeyForBlobStore(conf, blobStore, zkClientBuilder.build(), KEY, nimbusDetails); zkClientBuilder.verifyExists(true); zkClientBuilder.verifyGetChildren(2); verify(nimbusDetails).getHost(); verify(conf, never()).get(anyString()); }
public static WorkflowRuntimeOverview computeOverview( ObjectMapper objectMapper, WorkflowSummary summary, WorkflowRollupOverview rollupBase, Map<String, Task> realTaskMap) { Map<String, StepRuntimeState> states = realTaskMap.entrySet().stream() .collect( Collectors.toMap( Map.Entry::getKey, e -> StepHelper.retrieveStepRuntimeState( e.getValue().getOutputData(), objectMapper))); EnumMap<StepInstance.Status, WorkflowStepStatusSummary> stepStatusMap = toStepStatusMap(summary, states); WorkflowRollupOverview rollupOverview = realTaskMap.values().stream() .filter(t -> t.getOutputData().containsKey(Constants.STEP_RUNTIME_SUMMARY_FIELD)) .map( t -> { StepRuntimeSummary stepSummary = StepHelper.retrieveRuntimeSummary(objectMapper, t.getOutputData()); switch (stepSummary.getType()) { case FOREACH: if (stepSummary.getArtifacts().containsKey(Artifact.Type.FOREACH.key())) { ForeachArtifact artifact = stepSummary.getArtifacts().get(Artifact.Type.FOREACH.key()).asForeach(); if (artifact.getForeachOverview() != null && artifact.getForeachOverview().getCheckpoint() > 0) { return artifact.getForeachOverview().getOverallRollup(); } } break; case SUBWORKFLOW: if (stepSummary.getArtifacts().containsKey(Artifact.Type.SUBWORKFLOW.key())) { SubworkflowArtifact artifact = stepSummary .getArtifacts() .get(Artifact.Type.SUBWORKFLOW.key()) .asSubworkflow(); if (artifact.getSubworkflowOverview() != null) { return artifact.getSubworkflowOverview().getRollupOverview(); } } break; case TEMPLATE: default: break; } StepInstance.Status status = StepHelper.retrieveStepStatus(t.getOutputData()); WorkflowRollupOverview.CountReference ref = new WorkflowRollupOverview.CountReference(); ref.setCnt(1); if (status.isOverview()) { ref.setRef( Collections.singletonMap( RollupAggregationHelper.getReference( summary.getWorkflowId(), summary.getWorkflowRunId()), Collections.singletonList( RollupAggregationHelper.getReference( summary.getWorkflowInstanceId(), stepSummary.getStepId(), stepSummary.getStepAttemptId())))); } return WorkflowRollupOverview.of(1L, Collections.singletonMap(status, ref)); }) .reduce(new WorkflowRollupOverview(), WorkflowRollupOverview::aggregate); rollupOverview.aggregate(rollupBase); return WorkflowRuntimeOverview.of(summary.getTotalStepCount(), stepStatusMap, rollupOverview); }
@Test public void testComputeOverview() throws Exception { WorkflowSummary workflowSummary = loadObject("fixtures/parameters/sample-wf-summary-params.json", WorkflowSummary.class); Task t = new Task(); t.setTaskType(Constants.MAESTRO_TASK_NAME); t.setSeq(1); Map<String, Object> summary = new HashMap<>(); summary.put("runtime_state", Collections.singletonMap("status", "SUCCEEDED")); summary.put("type", "NOOP"); t.setOutputData(Collections.singletonMap(Constants.STEP_RUNTIME_SUMMARY_FIELD, summary)); WorkflowRuntimeOverview overview = TaskHelper.computeOverview( MAPPER, workflowSummary, new WorkflowRollupOverview(), Collections.singletonMap("job1", t)); Assert.assertEquals(4, overview.getTotalStepCount()); Assert.assertEquals( singletonEnumMap( StepInstance.Status.SUCCEEDED, WorkflowStepStatusSummary.of(0L).addStep(Arrays.asList(2L, null, null))), overview.getStepOverview()); WorkflowRollupOverview expected = new WorkflowRollupOverview(); expected.setTotalLeafCount(1L); WorkflowRollupOverview.CountReference ref = new WorkflowRollupOverview.CountReference(); ref.setCnt(1); expected.setOverview(singletonEnumMap(StepInstance.Status.SUCCEEDED, ref)); Assert.assertEquals(expected, overview.getRollupOverview()); }
Plugin create(Options.Plugin plugin) { try { return instantiate(plugin.pluginString(), plugin.pluginClass(), plugin.argument()); } catch (IOException | URISyntaxException e) { throw new CucumberException(e); } }
@Test void fails_to_instantiates_html_plugin_with_dir_arg() { PluginOption option = parse("html:" + tmp.toAbsolutePath()); assertThrows(IllegalArgumentException.class, () -> fc.create(option)); }
public static Class<?> getAssistInterface(Object proxyBean) throws NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException, NoSuchMethodException, InvocationTargetException { if (proxyBean == null) { return null; } if (!isDubboProxyName(proxyBean.getClass().getName())) { return null; } Field handlerField = proxyBean.getClass().getDeclaredField("handler"); handlerField.setAccessible(true); Object invokerInvocationHandler = handlerField.get(proxyBean); Field invokerField = invokerInvocationHandler.getClass().getDeclaredField("invoker"); invokerField.setAccessible(true); Object invoker = invokerField.get(invokerInvocationHandler); Field failoverClusterInvokerField = invoker.getClass().getDeclaredField("invoker"); failoverClusterInvokerField.setAccessible(true); Object failoverClusterInvoker = failoverClusterInvokerField.get(invoker); return (Class<?>)ReflectionUtil.invokeMethod(failoverClusterInvoker, "getInterface"); }
@Test public void testGetAssistInterfaceForNull() throws NoSuchFieldException, InvocationTargetException, IllegalAccessException, NoSuchMethodException { assertNull(DubboUtil.getAssistInterface(null)); }
@Override public void transform(Message message, DataType fromType, DataType toType) { AvroSchema schema = message.getExchange().getProperty(SchemaHelper.CONTENT_SCHEMA, AvroSchema.class); if (schema == null) { throw new CamelExecutionException("Missing proper avro schema for data type processing", message.getExchange()); } try { byte[] marshalled; String contentClass = SchemaHelper.resolveContentClass(message.getExchange(), null); if (contentClass != null) { Class<?> contentType = message.getExchange().getContext().getClassResolver().resolveMandatoryClass(contentClass); marshalled = Avro.mapper().writer().forType(contentType).with(schema) .writeValueAsBytes(message.getBody()); } else { marshalled = Avro.mapper().writer().forType(JsonNode.class).with(schema) .writeValueAsBytes(getBodyAsJsonNode(message, schema)); } message.setBody(marshalled); message.setHeader(Exchange.CONTENT_TYPE, MimeType.AVRO_BINARY.type()); message.setHeader(SchemaHelper.CONTENT_SCHEMA, schema.getAvroSchema().getFullName()); } catch (InvalidPayloadException | IOException | ClassNotFoundException e) { throw new CamelExecutionException("Failed to apply Avro binary data type on exchange", message.getExchange(), e); } }
@Test void shouldHandlePojo() throws Exception { Exchange exchange = new DefaultExchange(camelContext); AvroSchema avroSchema = Avro.mapper().schemaFrom(AvroBinaryDataTypeTransformerTest.class.getResourceAsStream("Person.avsc")); exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, avroSchema); exchange.getMessage().setBody(new Person("Mickey", 20)); transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY); JSONAssert.assertEquals(""" {"name":"Mickey","age":20} """, Json.mapper().writeValueAsString( Avro.mapper().reader().with(avroSchema).readTree(exchange.getMessage().getBody(byte[].class))), true); }
@Override public Stream<FileSlice> getLatestFileSliceInRange(List<String> commitsToReturn) { return execute(commitsToReturn, preferredView::getLatestFileSliceInRange, (commits) -> getSecondaryView().getLatestFileSliceInRange(commits)); }
@Test public void testGetLatestFileSliceInRange() { Stream<FileSlice> actual; Stream<FileSlice> expected = testFileSliceStream; List<String> commitsToReturn = Collections.singletonList("/table2"); when(primary.getLatestFileSliceInRange(commitsToReturn)).thenReturn(testFileSliceStream); actual = fsView.getLatestFileSliceInRange(commitsToReturn); assertEquals(expected, actual); verify(secondaryViewSupplier, never()).get(); resetMocks(); when(secondaryViewSupplier.get()).thenReturn(secondary); when(primary.getLatestFileSliceInRange(commitsToReturn)).thenThrow(new RuntimeException()); when(secondary.getLatestFileSliceInRange(commitsToReturn)).thenReturn(testFileSliceStream); actual = fsView.getLatestFileSliceInRange(commitsToReturn); assertEquals(expected, actual); resetMocks(); when(secondary.getLatestFileSliceInRange(commitsToReturn)).thenReturn(testFileSliceStream); actual = fsView.getLatestFileSliceInRange(commitsToReturn); assertEquals(expected, actual); resetMocks(); when(secondary.getLatestFileSliceInRange(commitsToReturn)).thenThrow(new RuntimeException()); assertThrows(RuntimeException.class, () -> { fsView.getLatestFileSliceInRange(commitsToReturn); }); }
@Override public MavenArtifact searchSha1(String sha1) throws IOException { if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) { throw new IllegalArgumentException("Invalid SHA1 format"); } final URL url = new URL(rootURL, String.format("identify/sha1/%s", sha1.toLowerCase())); LOGGER.debug("Searching Nexus url {}", url); // Determine if we need to use a proxy. The rules: // 1) If the proxy is set, AND the setting is set to true, use the proxy // 2) Otherwise, don't use the proxy (either the proxy isn't configured, // or proxy is specifically set to false final HttpURLConnection conn; final URLConnectionFactory factory = new URLConnectionFactory(settings); conn = factory.createHttpURLConnection(url, useProxy); conn.setDoOutput(true); final String authHeader = buildHttpAuthHeaderValue(); if (!authHeader.isEmpty()) { conn.addRequestProperty("Authorization", authHeader); } // JSON would be more elegant, but there's not currently a dependency // on JSON, so don't want to add one just for this conn.addRequestProperty("Accept", "application/xml"); conn.connect(); switch (conn.getResponseCode()) { case 200: try { final DocumentBuilder builder = XmlUtils.buildSecureDocumentBuilder(); final Document doc = builder.parse(conn.getInputStream()); final XPath xpath = XPathFactory.newInstance().newXPath(); final String groupId = xpath .evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/groupId", doc); final String artifactId = xpath.evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/artifactId", doc); final String version = xpath .evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/version", doc); final String link = xpath .evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/artifactLink", doc); final String pomLink = xpath .evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/pomLink", doc); final MavenArtifact ma = new MavenArtifact(groupId, artifactId, version); if (link != null && !link.isEmpty()) { ma.setArtifactUrl(link); } if (pomLink != null && !pomLink.isEmpty()) { ma.setPomUrl(pomLink); } return ma; } catch (ParserConfigurationException | IOException | SAXException | XPathExpressionException e) { // Anything else is jacked-up XML stuff that we really can't recover // from well throw new IOException(e.getMessage(), e); } case 404: throw new FileNotFoundException("Artifact not found in Nexus"); default: LOGGER.debug("Could not connect to Nexus received response code: {} {}", conn.getResponseCode(), conn.getResponseMessage()); throw new IOException("Could not connect to Nexus"); } }
@Test @Ignore public void testValidSha1() throws Exception { MavenArtifact ma = searcher.searchSha1("9977a8d04e75609cf01badc4eb6a9c7198c4c5ea"); assertEquals("Incorrect group", "org.apache.maven.plugins", ma.getGroupId()); assertEquals("Incorrect artifact", "maven-compiler-plugin", ma.getArtifactId()); assertEquals("Incorrect version", "3.1", ma.getVersion()); assertNotNull("URL Should not be null", ma.getArtifactUrl()); }
public int getInt(Object obj) { return Platform.UNSAFE.getInt(obj, fieldOffset); }
@Test public void testRepeatedFields() { assertEquals(new UnsafeFieldAccessor(A.class, "f1").getInt(new A()), 1); assertEquals(new UnsafeFieldAccessor(A.class, "f2").getInt(new A()), 2); assertEquals(new UnsafeFieldAccessor(B.class, "f1").getInt(new B()), 2); assertEquals(new UnsafeFieldAccessor(B.class, "f2").getInt(new B()), 2); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException { try { final StoregateApiClient client = session.getClient(); final MoveFileRequest move = new MoveFileRequest() .name(renamed.getName()) .parentID(fileid.getFileId(renamed.getParent())) .mode(1); // Overwrite final HttpEntityEnclosingRequestBase request; request = new HttpPost(String.format("%s/v4.2/files/%s/move", client.getBasePath(), fileid.getFileId(file))); if(status.getLockId() != null) { request.addHeader("X-Lock-Id", status.getLockId().toString()); } request.setEntity(new StringEntity(new JSON().getContext(move.getClass()).writeValueAsString(move), ContentType.create("application/json", StandardCharsets.UTF_8.name()))); request.addHeader(HTTP.CONTENT_TYPE, MEDIA_TYPE); final HttpResponse response = client.getClient().execute(request); try { switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_NO_CONTENT: final PathAttributes attr = new PathAttributes(file.attributes()); fileid.cache(file, null); fileid.cache(renamed, attr.getFileId()); return renamed.withAttributes(attr); default: throw new StoregateExceptionMappingService(fileid).map("Cannot rename {0}", new ApiException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } finally { EntityUtils.consume(response.getEntity()); } } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot rename {0}", e, file); } }
@Test public void testMoveOverride() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final String filename = new AlphanumericRandomStringService().random(); final Path test = new StoregateTouchFeature(session, nodeid).touch(new Path(room, filename, EnumSet.of(Path.Type.file)), new TransferStatus()); final Path target = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new StoregateTouchFeature(session, nodeid).touch(target, new TransferStatus()); new StoregateMoveFeature(session, nodeid).move(test, target, new TransferStatus().exists(true), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new DefaultFindFeature(session).find(new Path(room, filename, EnumSet.of(Path.Type.file)))); assertTrue(new DefaultFindFeature(session).find(target)); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static ForIteration getForIteration(EvaluationContext ctx, String name, Object start, Object end) { validateValues(ctx, start, end); if (start instanceof BigDecimal bigDecimal) { return new ForIteration(name, bigDecimal, (BigDecimal) end); } if (start instanceof LocalDate localDate) { return new ForIteration(name, localDate, (LocalDate) end); } ctx.notifyEvt(() -> new ASTEventBase(FEELEvent.Severity.ERROR, Msg.createMessage(Msg.VALUE_X_NOT_A_VALID_ENDPOINT_FOR_RANGE_BECAUSE_NOT_A_NUMBER_NOT_A_DATE, start), null)); throw new EndpointOfRangeOfDifferentTypeException(); }
@Test void getForIterationNotValidTest() { try { getForIteration(ctx, "iteration", "NOT", "VALID"); } catch (Exception e) { assertTrue(e instanceof EndpointOfRangeNotValidTypeException); final ArgumentCaptor<FEELEvent> captor = ArgumentCaptor.forClass(FEELEvent.class); verify(listener, times(1)).onEvent(captor.capture()); reset(listener); } try { getForIteration(ctx, "iteration", BigDecimal.valueOf(1), LocalDate.of(2021, 1, 1)); } catch (Exception e) { assertTrue(e instanceof EndpointOfRangeOfDifferentTypeException); final ArgumentCaptor<FEELEvent> captor = ArgumentCaptor.forClass(FEELEvent.class); verify(listener, times(1)).onEvent(captor.capture()); reset(listener); } try { getForIteration(ctx, "iteration", LocalDate.of(2021, 1, 1), BigDecimal.valueOf(1)); } catch (Exception e) { assertTrue(e instanceof EndpointOfRangeOfDifferentTypeException); final ArgumentCaptor<FEELEvent> captor = ArgumentCaptor.forClass(FEELEvent.class); verify(listener, times(1)).onEvent(captor.capture()); reset(listener); } }
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) { OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers); if (filteredOpenAPI == null) { return filteredOpenAPI; } OpenAPI clone = new OpenAPI(); clone.info(filteredOpenAPI.getInfo()); clone.openapi(filteredOpenAPI.getOpenapi()); clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect()); clone.setSpecVersion(filteredOpenAPI.getSpecVersion()); clone.setExtensions(filteredOpenAPI.getExtensions()); clone.setExternalDocs(filteredOpenAPI.getExternalDocs()); clone.setSecurity(filteredOpenAPI.getSecurity()); clone.setServers(filteredOpenAPI.getServers()); clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags())); final Set<String> allowedTags = new HashSet<>(); final Set<String> filteredTags = new HashSet<>(); Paths clonedPaths = new Paths(); if (filteredOpenAPI.getPaths() != null) { for (String resourcePath : filteredOpenAPI.getPaths().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clonedPaths.addPathItem(resourcePath, clonedPathItem); } } } clone.paths(clonedPaths); } filteredTags.removeAll(allowedTags); final List<Tag> tags = clone.getTags(); if (tags != null && !filteredTags.isEmpty()) { tags.removeIf(tag -> filteredTags.contains(tag.getName())); if (clone.getTags().isEmpty()) { clone.setTags(null); } } if (filteredOpenAPI.getWebhooks() != null) { for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clone.addWebhooks(resourcePath, clonedPathItem); } } } } if (filteredOpenAPI.getComponents() != null) { clone.components(new Components()); clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers)); clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes()); clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks()); clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples()); clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions()); clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders()); clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks()); clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters()); clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies()); clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses()); clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems()); } if (filter.isRemovingUnreferencedDefinitions()) { clone = removeBrokenReferenceDefinitions(clone); } return clone; }
@Test(description = "it should filter away internal model properties") public void filterAwayInternalModelProperties() throws IOException { final OpenAPI openAPI = getOpenAPI(RESOURCE_PATH); final InternalModelPropertiesRemoverFilter filter = new InternalModelPropertiesRemoverFilter(); final OpenAPI filtered = new SpecFilter().filter(openAPI, filter, null, null, null); for (Map.Entry<String, Schema> entry : filtered.getComponents().getSchemas().entrySet()) { for (String propName : (Set<String>) entry.getValue().getProperties().keySet()) { assertFalse(propName.startsWith("_")); } } }
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException { ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null")); if (null == value) { return convertNullValue(convertType); } if (value.getClass() == convertType) { return value; } if (value instanceof LocalDateTime) { return convertLocalDateTimeValue((LocalDateTime) value, convertType); } if (value instanceof Timestamp) { return convertTimestampValue((Timestamp) value, convertType); } if (URL.class.equals(convertType)) { return convertURL(value); } if (value instanceof Number) { return convertNumberValue(value, convertType); } if (value instanceof Date) { return convertDateValue((Date) value, convertType); } if (value instanceof byte[]) { return convertByteArrayValue((byte[]) value, convertType); } if (boolean.class.equals(convertType)) { return convertBooleanValue(value); } if (String.class.equals(convertType)) { return value.toString(); } try { return convertType.cast(value); } catch (final ClassCastException ignored) { throw new SQLFeatureNotSupportedException("getObject with type"); } }
@Test void assertConvertDateValueError() { assertThrows(UnsupportedDataTypeConversionException.class, () -> ResultSetUtils.convertValue(new Date(), int.class)); }
@Override @SuppressFBWarnings(value = "EI_EXPOSE_REP") public KsqlConfig getKsqlConfig() { return ksqlConfig; }
@Test public void shouldIgnoreRecordsWithDifferentKeyWithinPoll() { // Given: addPollResult( "foo", "val".getBytes(StandardCharsets.UTF_8), KafkaConfigStore.CONFIG_MSG_KEY, serializer.serialize("", savedProperties) ); expectRead(consumerBefore); // When: getKsqlConfig(); // Then: verifyDrainLog(consumerBefore, 1); inOrder.verifyNoMoreInteractions(); }
public Optional<UserDto> authenticate(HttpRequest request) { return extractCredentialsFromHeader(request) .flatMap(credentials -> Optional.ofNullable(authenticate(credentials, request))); }
@Test public void fail_to_authenticate_when_no_login() { when(request.getHeader(AUTHORIZATION_HEADER)).thenReturn("Basic " + toBase64(":" + A_PASSWORD)); assertThatThrownBy(() -> underTest.authenticate(request)) .isInstanceOf(AuthenticationException.class) .hasFieldOrPropertyWithValue("source", Source.local(BASIC)); verifyNoInteractions(authenticationEvent); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testDisableForwardIndexForInvertedIndexDisabledColumns() throws Exception { Set<String> forwardIndexDisabledColumns = new HashSet<>(SV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS); forwardIndexDisabledColumns.addAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); Set<String> noDictColumnsToRemove = new HashSet<>(); noDictColumnsToRemove.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); for (String column : _noDictionaryColumns) { if (FORWARD_INDEX_DISABLED_RAW_COLUMNS.contains(column) || RAW_SORTED_INDEX_COLUMNS.contains(column)) { // Forward index already disabled for these columns, skip them continue; } SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); forwardIndexDisabledColumns.add(column); indexLoadingConfig.setForwardIndexDisabledColumns(forwardIndexDisabledColumns); noDictColumnsToRemove.add(column); indexLoadingConfig.removeNoDictionaryColumns(noDictColumnsToRemove); Set<String> invertedIndexColumns = new HashSet<>(forwardIndexDisabledColumns); invertedIndexColumns.removeAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); invertedIndexColumns.remove(column); indexLoadingConfig.setInvertedIndexColumns(invertedIndexColumns); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); validateIndexMap(column, true, true); validateIndexesForForwardIndexDisabledColumns(column); // In column metadata, nothing other than hasDictionary and dictionaryElementSize should change. ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(column); FieldSpec.DataType dataType = metadata.getDataType(); int dictionaryElementSize = 0; if (dataType == FieldSpec.DataType.STRING || dataType == FieldSpec.DataType.BYTES) { // This value is based on the rows in createTestData(). dictionaryElementSize = 7; } else if (dataType == FieldSpec.DataType.BIG_DECIMAL) { dictionaryElementSize = 4; } validateMetadataProperties(column, true, dictionaryElementSize, metadata.getCardinality(), metadata.getTotalDocs(), dataType, metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); } }
DateRange getRange(String dateRangeString) throws ParseException { if (dateRangeString == null || dateRangeString.isEmpty()) return null; String[] dateArr = dateRangeString.split("-"); if (dateArr.length > 2 || dateArr.length < 1) return null; // throw new IllegalArgumentException("Only Strings containing two Date separated by a '-' or a single Date are allowed"); ParsedCalendar from = parseDateString(dateArr[0]); ParsedCalendar to; if (dateArr.length == 2) to = parseDateString(dateArr[1]); else // faster and safe? // to = new ParsedCalendar(from.parseType, (Calendar) from.parsedCalendar.clone()); to = parseDateString(dateArr[0]); try { return new DateRange(from, to); } catch (IllegalArgumentException ex) { return null; } }
@Test public void testParseSimpleDateRangeWithoutYearAndDay() throws ParseException { DateRange dateRange = dateRangeParser.getRange("Jul-Aug"); assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.JUNE, 9))); assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.JULY, 10))); assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 12))); assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.SEPTEMBER, 14))); }
public static <I> Builder<I> foreach(Iterable<I> items) { return new Builder<>(requireNonNull(items, "items")); }
@Test public void testFailSlowCallRevertSuppressed() throws Throwable { assertFailed(builder() .suppressExceptions() .revertWith(reverter) .onFailure(failures), failingTask); failingTask.assertInvokedAtLeast("success", FAILPOINT); // all committed were reverted // identify which task failed from the set int failing = failures.getItem().id; items.stream() .filter(i -> i.id != failing) .filter(i -> i.committed) .forEach(Item::assertReverted); // all reverted items are committed items.stream().filter(i -> i.reverted) .forEach(Item::assertCommitted); // only one failure was triggered failures.assertInvoked("failure event", 1); }
void fetchAndRunCommands() { lastPollTime.set(clock.instant()); final List<QueuedCommand> commands = commandStore.getNewCommands(NEW_CMDS_TIMEOUT); if (commands.isEmpty()) { if (!commandTopicExists.get()) { commandTopicDeleted = true; } return; } final List<QueuedCommand> compatibleCommands = checkForIncompatibleCommands(commands); final Optional<QueuedCommand> terminateCmd = findTerminateCommand(compatibleCommands, commandDeserializer); if (terminateCmd.isPresent()) { terminateCluster(terminateCmd.get().getAndDeserializeCommand(commandDeserializer)); return; } LOG.debug("Found {} new writes to command topic", compatibleCommands.size()); for (final QueuedCommand command : compatibleCommands) { if (closed) { return; } executeStatement(command); } }
@Test public void shouldRetryOnException() { // Given: givenQueuedCommands(queuedCommand1, queuedCommand2); doThrow(new RuntimeException()) .doThrow(new RuntimeException()) .doNothing().when(statementExecutor).handleStatement(queuedCommand2); // When: commandRunner.fetchAndRunCommands(); // Then: final InOrder inOrder = inOrder(statementExecutor); inOrder.verify(statementExecutor, times(1)).handleStatement(queuedCommand1); inOrder.verify(statementExecutor, times(3)).handleStatement(queuedCommand2); }
public CompletionStage<Void> migrate(MigrationSet set) { InterProcessLock lock = new InterProcessSemaphoreMutex(client.unwrap(), ZKPaths.makePath(lockPath, set.id())); CompletionStage<Void> lockStage = lockAsync(lock, lockMax.toMillis(), TimeUnit.MILLISECONDS, executor); return lockStage.thenCompose(__ -> runMigrationInLock(lock, set)); }
@Test public void testBasic() { Migration m1 = () -> Arrays.asList(v1opA, v1opB); Migration m2 = () -> Collections.singletonList(v2op); Migration m3 = () -> Collections.singletonList(v3op); MigrationSet migrationSet = MigrationSet.build("1", Arrays.asList(m1, m2, m3)); complete(manager.migrate(migrationSet)); ModeledFramework<ModelV3> v3Client = ModeledFramework.wrap(client, v3Spec); complete(v3Client.read(), (m, e) -> { assertEquals(m.getAge(), 30); assertEquals(m.getFirstName(), "One"); assertEquals(m.getLastName(), "Two"); }); int count = manager.debugCount.get(); complete(manager.migrate(migrationSet)); assertEquals(manager.debugCount.get(), count); // second call should do nothing }
void upsertSummary(CommittableSummary<CommT> summary) { SubtaskCommittableManager<CommT> existing = subtasksCommittableManagers.putIfAbsent( summary.getSubtaskId(), new SubtaskCommittableManager<>( summary.getNumberOfCommittables(), subtaskId, summary.getCheckpointId().isPresent() ? summary.getCheckpointId().getAsLong() : null, metricGroup)); if (existing != null) { throw new UnsupportedOperationException( "Currently it is not supported to update the CommittableSummary for a checkpoint coming from the same subtask. Please check the status of FLINK-25920"); } }
@Test void testUpdateCommittableSummary() { final CheckpointCommittableManagerImpl<Integer> checkpointCommittables = new CheckpointCommittableManagerImpl<>(1, 1, 1L, METRIC_GROUP); checkpointCommittables.upsertSummary(new CommittableSummary<>(1, 1, 1L, 1, 0, 0)); assertThatThrownBy( () -> checkpointCommittables.upsertSummary( new CommittableSummary<>(1, 1, 1L, 2, 0, 0))) .isInstanceOf(UnsupportedOperationException.class) .hasMessageContaining("FLINK-25920"); }
@Override public void run() { doHealthCheck(); }
@Test void testRunHealthyInstanceWithExpire() { injectInstance(true, 0); when(globalConfig.isExpireInstance()).thenReturn(true); beatCheckTask.run(); assertTrue(client.getAllInstancePublishInfo().isEmpty()); }
DateRange getRange(String dateRangeString) throws ParseException { if (dateRangeString == null || dateRangeString.isEmpty()) return null; String[] dateArr = dateRangeString.split("-"); if (dateArr.length > 2 || dateArr.length < 1) return null; // throw new IllegalArgumentException("Only Strings containing two Date separated by a '-' or a single Date are allowed"); ParsedCalendar from = parseDateString(dateArr[0]); ParsedCalendar to; if (dateArr.length == 2) to = parseDateString(dateArr[1]); else // faster and safe? // to = new ParsedCalendar(from.parseType, (Calendar) from.parsedCalendar.clone()); to = parseDateString(dateArr[0]); try { return new DateRange(from, to); } catch (IllegalArgumentException ex) { return null; } }
@Test public void testParseReverseDateRange() throws ParseException { DateRange dateRange = dateRangeParser.getRange("2014 Aug 14-2015 Mar 10"); assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 13))); assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 14))); assertTrue(dateRange.isInRange(getCalendar(2015, Calendar.MARCH, 10))); assertFalse(dateRange.isInRange(getCalendar(2015, Calendar.MARCH, 11))); }
List<CSVResult> sniff(Reader reader) throws IOException { if (!reader.markSupported()) { reader = new BufferedReader(reader); } List<CSVResult> ret = new ArrayList<>(); for (char delimiter : delimiters) { reader.mark(markLimit); try { CSVResult result = new Snifflet(delimiter).sniff(reader); ret.add(result); } finally { reader.reset(); } } Collections.sort(ret); return ret; }
@Test public void testAllowWhiteSpacesAroundAQuote() throws Exception { List<CSVResult> results = sniff(DELIMITERS, ALLOW_SPACES_BEFORE_QUOTE, StandardCharsets.UTF_8); assertEquals(4, results.size()); assertEquals(Character.valueOf(','), results.get(0).getDelimiter()); results = sniff(DELIMITERS, ALLOW_SPACES_AFTER_QUOTE, StandardCharsets.UTF_8); assertEquals(4, results.size()); assertEquals(Character.valueOf(','), results.get(0).getDelimiter()); }
private DataSource createDataSource( SourceDef sourceDef, StreamExecutionEnvironment env, Configuration pipelineConfig) { // Search the data source factory DataSourceFactory sourceFactory = FactoryDiscoveryUtils.getFactoryByIdentifier( sourceDef.getType(), DataSourceFactory.class); // Add source JAR to environment FactoryDiscoveryUtils.getJarPathByIdentifier(sourceFactory) .ifPresent(jar -> FlinkEnvironmentUtils.addJar(env, jar)); DataSource dataSource = sourceFactory.createDataSource( new FactoryHelper.DefaultContext( sourceDef.getConfig(), pipelineConfig, Thread.currentThread().getContextClassLoader())); return dataSource; }
@Test void testCreateDataSourceFromSourceDef() { SourceDef sourceDef = new SourceDef( "data-source-factory-1", "source-database", Configuration.fromMap( ImmutableMap.<String, String>builder() .put("host", "0.0.0.0") .build())); DataSourceFactory sourceFactory = FactoryDiscoveryUtils.getFactoryByIdentifier( sourceDef.getType(), DataSourceFactory.class); DataSource dataSource = sourceFactory.createDataSource( new FactoryHelper.DefaultContext( sourceDef.getConfig(), new Configuration(), Thread.currentThread().getContextClassLoader())); Assertions.assertTrue(dataSource instanceof DataSourceFactory1.TestDataSource); Assertions.assertEquals( "0.0.0.0", ((DataSourceFactory1.TestDataSource) dataSource).getHost()); }
@Override public boolean accept(Path path) { if (engineContext == null) { this.engineContext = new HoodieLocalEngineContext(this.conf); } if (LOG.isDebugEnabled()) { LOG.debug("Checking acceptance for path " + path); } Path folder = null; try { if (storage == null) { storage = new HoodieHadoopStorage(convertToStoragePath(path), conf); } // Assumes path is a file folder = path.getParent(); // get the immediate parent. // Try to use the caches. if (nonHoodiePathCache.contains(folder.toString())) { if (LOG.isDebugEnabled()) { LOG.debug("Accepting non-hoodie path from cache: " + path); } return true; } if (hoodiePathCache.containsKey(folder.toString())) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("%s Hoodie path checked against cache, accept => %s \n", path, hoodiePathCache.get(folder.toString()).contains(path))); } return hoodiePathCache.get(folder.toString()).contains(path); } // Skip all files that are descendants of .hoodie in its path. String filePath = path.toString(); if (filePath.contains("/" + HoodieTableMetaClient.METAFOLDER_NAME + "/") || filePath.endsWith("/" + HoodieTableMetaClient.METAFOLDER_NAME)) { if (LOG.isDebugEnabled()) { LOG.debug(String.format("Skipping Hoodie Metadata file %s \n", filePath)); } return false; } // Perform actual checking. Path baseDir; StoragePath storagePath = convertToStoragePath(folder); if (HoodiePartitionMetadata.hasPartitionMetadata(storage, storagePath)) { HoodiePartitionMetadata metadata = new HoodiePartitionMetadata(storage, storagePath); metadata.readFromFS(); baseDir = HoodieHiveUtils.getNthParent(folder, metadata.getPartitionDepth()); } else { baseDir = safeGetParentsParent(folder); } if (baseDir != null) { // Check whether baseDir in nonHoodiePathCache if (nonHoodiePathCache.contains(baseDir.toString())) { if (LOG.isDebugEnabled()) { LOG.debug("Accepting non-hoodie path from cache: " + path); } return true; } HoodieTableFileSystemView fsView = null; try { HoodieTableMetaClient metaClient = metaClientCache.get(baseDir.toString()); if (null == metaClient) { metaClient = HoodieTableMetaClient.builder() .setConf(storage.getConf().newInstance()).setBasePath(baseDir.toString()) .setLoadActiveTimelineOnLoad(true).build(); metaClientCache.put(baseDir.toString(), metaClient); } final Configuration conf = getConf(); final String timestampAsOf = conf.get(TIMESTAMP_AS_OF.key()); if (nonEmpty(timestampAsOf)) { validateTimestampAsOf(metaClient, timestampAsOf); // Build FileSystemViewManager with specified time, it's necessary to set this config when you may // access old version files. For example, in spark side, using "hoodie.datasource.read.paths" // which contains old version files, if not specify this value, these files will be filtered. fsView = FileSystemViewManager.createInMemoryFileSystemViewWithTimeline(engineContext, metaClient, HoodieInputFormatUtils.buildMetadataConfig(conf), metaClient.getActiveTimeline().filterCompletedInstants().findInstantsBeforeOrEquals(timestampAsOf)); } else { fsView = FileSystemViewManager.createInMemoryFileSystemView(engineContext, metaClient, HoodieInputFormatUtils.buildMetadataConfig(conf)); } String partition = HadoopFSUtils.getRelativePartitionPath(new Path(metaClient.getBasePath().toString()), folder); List<HoodieBaseFile> latestFiles = fsView.getLatestBaseFiles(partition).collect(Collectors.toList()); // populate the cache if (!hoodiePathCache.containsKey(folder.toString())) { hoodiePathCache.put(folder.toString(), new HashSet<>()); } LOG.info("Based on hoodie metadata from base path: " + baseDir.toString() + ", caching " + latestFiles.size() + " files under " + folder); for (HoodieBaseFile lfile : latestFiles) { hoodiePathCache.get(folder.toString()).add(new Path(lfile.getPath())); } // accept the path, if its among the latest files. if (LOG.isDebugEnabled()) { LOG.debug(String.format("%s checked after cache population, accept => %s \n", path, hoodiePathCache.get(folder.toString()).contains(path))); } return hoodiePathCache.get(folder.toString()).contains(path); } catch (TableNotFoundException e) { // Non-hoodie path, accept it. if (LOG.isDebugEnabled()) { LOG.debug(String.format("(1) Caching non-hoodie path under %s with basePath %s \n", folder.toString(), baseDir.toString())); } nonHoodiePathCache.add(folder.toString()); nonHoodiePathCache.add(baseDir.toString()); return true; } finally { if (fsView != null) { fsView.close(); } } } else { // files is at < 3 level depth in FS tree, can't be hoodie dataset if (LOG.isDebugEnabled()) { LOG.debug(String.format("(2) Caching non-hoodie path under %s \n", folder.toString())); } nonHoodiePathCache.add(folder.toString()); return true; } } catch (Exception e) { String msg = "Error checking path :" + path + ", under folder: " + folder; LOG.error(msg, e); throw new HoodieException(msg, e); } }
@Test public void testNonHoodiePaths() throws IOException { java.nio.file.Path path1 = Paths.get(basePath, "nonhoodiefolder"); Files.createDirectories(path1); assertTrue(pathFilter.accept(new Path(path1.toUri()))); java.nio.file.Path path2 = Paths.get(basePath, "nonhoodiefolder/somefile"); Files.createFile(path2); assertTrue(pathFilter.accept(new Path(path2.toUri()))); assertEquals(2, pathFilter.nonHoodiePathCache.size(), "NonHoodiePathCache size should be 2"); }
@Override public TransformResultMetadata getResultMetadata() { return _resultMetadata; }
@Test public void testArrayElementAtDouble() { Random rand = new Random(); int index = rand.nextInt(MAX_NUM_MULTI_VALUES); ExpressionContext expression = RequestContextUtils.getExpression( String.format("array_element_at_double(%s, %d)", DOUBLE_MV_COLUMN, index + 1)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getResultMetadata().getDataType(), DataType.DOUBLE); assertTrue(transformFunction.getResultMetadata().isSingleValue()); double[] expectedValues = new double[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = _doubleMVValues[i].length > index ? _doubleMVValues[i][index] : NullValuePlaceHolder.DOUBLE; } testTransformFunction(transformFunction, expectedValues); }
public static IRubyObject deep(final Ruby runtime, final Object input) { if (input == null) { return runtime.getNil(); } final Class<?> cls = input.getClass(); final Rubyfier.Converter converter = CONVERTER_MAP.get(cls); if (converter != null) { return converter.convert(runtime, input); } return fallbackConvert(runtime, input, cls); }
@Test public void testDeepWithBigInteger() { Object result = Rubyfier.deep(RubyUtil.RUBY, new BigInteger("1")); assertEquals(RubyBignum.class, result.getClass()); assertEquals(1L, ((RubyBignum)result).getLongValue()); }
@Override public DirectPipelineResult run(Pipeline pipeline) { try { options = MAPPER .readValue(MAPPER.writeValueAsBytes(options), PipelineOptions.class) .as(DirectOptions.class); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } performRewrites(pipeline); MetricsEnvironment.setMetricsSupported(true); try { DirectGraphVisitor graphVisitor = new DirectGraphVisitor(); pipeline.traverseTopologically(graphVisitor); @SuppressWarnings("rawtypes") KeyedPValueTrackingVisitor keyedPValueVisitor = KeyedPValueTrackingVisitor.create(); pipeline.traverseTopologically(keyedPValueVisitor); DisplayDataValidator.validatePipeline(pipeline); DisplayDataValidator.validateOptions(options); ExecutorService metricsPool = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setThreadFactory(MoreExecutors.platformThreadFactory()) .setDaemon(false) // otherwise you say you want to leak, please don't! .setNameFormat("direct-metrics-counter-committer") .build()); DirectGraph graph = graphVisitor.getGraph(); EvaluationContext context = EvaluationContext.create( clockSupplier.get(), Enforcement.bundleFactoryFor(enabledEnforcements, graph), graph, keyedPValueVisitor.getKeyedPValues(), metricsPool); TransformEvaluatorRegistry registry = TransformEvaluatorRegistry.javaSdkNativeRegistry(context, options); PipelineExecutor executor = ExecutorServiceParallelExecutor.create( options.getTargetParallelism(), registry, Enforcement.defaultModelEnforcements(enabledEnforcements), context, metricsPool); executor.start(graph, RootProviderRegistry.javaNativeRegistry(context, options)); DirectPipelineResult result = new DirectPipelineResult(executor, context); if (options.isBlockOnRun()) { try { result.waitUntilFinish(); } catch (UserCodeException userException) { throw new PipelineExecutionException(userException.getCause()); } catch (Throwable t) { if (t instanceof RuntimeException) { throw (RuntimeException) t; } throw new RuntimeException(t); } } return result; } finally { MetricsEnvironment.setMetricsSupported(false); } }
@Test public void wordCountShouldSucceed() throws Throwable { Pipeline p = getPipeline(); PCollection<KV<String, Long>> counts = p.apply(Create.of("foo", "bar", "foo", "baz", "bar", "foo")) .apply( MapElements.via( new SimpleFunction<String, String>() { @Override public String apply(String input) { return input; } })) .apply(Count.perElement()); PCollection<String> countStrs = counts.apply( MapElements.via( new SimpleFunction<KV<String, Long>, String>() { @Override public String apply(KV<String, Long> input) { return String.format("%s: %s", input.getKey(), input.getValue()); } })); PAssert.that(countStrs).containsInAnyOrder("baz: 1", "bar: 2", "foo: 3"); DirectPipelineResult result = (DirectPipelineResult) p.run(); result.waitUntilFinish(); }
public static List<Event> computeEventDiff(final Params params) { final List<Event> events = new ArrayList<>(); emitPerNodeDiffEvents(createBaselineParams(params), events); emitWholeClusterDiffEvent(createBaselineParams(params), events); emitDerivedBucketSpaceStatesDiffEvents(params, events); return events; }
@Test void removed_exhaustion_in_feed_block_resource_set_emits_node_event() { final EventFixture fixture = EventFixture.createForNodes(3) .clusterStateBefore("distributor:3 storage:3") .feedBlockBefore(ClusterStateBundle.FeedBlock.blockedWith( "we're closed", setOf(exhaustion(1, "oil"), exhaustion(2, "cpu_brake_fluid")))) .clusterStateAfter("distributor:3 storage:3") .feedBlockAfter(ClusterStateBundle.FeedBlock.blockedWith( "we're still closed", setOf(exhaustion(1, "oil")))); final List<Event> events = fixture.computeEventDiff(); assertThat(events.size(), equalTo(1)); assertThat(events, hasItem(allOf( eventForNode(storageNode(2)), nodeEventWithDescription("Removed resource exhaustion: cpu_brake_fluid on node 2 [unknown hostname] (<= 70.0%)"), nodeEventForBaseline()))); }
@Override public BasicTypeDefine<MysqlType> reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.<MysqlType>builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case NULL: builder.nativeType(MysqlType.NULL); builder.columnType(MYSQL_NULL); builder.dataType(MYSQL_NULL); break; case BOOLEAN: builder.nativeType(MysqlType.BOOLEAN); builder.columnType(String.format("%s(%s)", MYSQL_TINYINT, 1)); builder.dataType(MYSQL_TINYINT); builder.length(1L); break; case TINYINT: builder.nativeType(MysqlType.TINYINT); builder.columnType(MYSQL_TINYINT); builder.dataType(MYSQL_TINYINT); break; case SMALLINT: builder.nativeType(MysqlType.SMALLINT); builder.columnType(MYSQL_SMALLINT); builder.dataType(MYSQL_SMALLINT); break; case INT: builder.nativeType(MysqlType.INT); builder.columnType(MYSQL_INT); builder.dataType(MYSQL_INT); break; case BIGINT: builder.nativeType(MysqlType.BIGINT); builder.columnType(MYSQL_BIGINT); builder.dataType(MYSQL_BIGINT); break; case FLOAT: builder.nativeType(MysqlType.FLOAT); builder.columnType(MYSQL_FLOAT); builder.dataType(MYSQL_FLOAT); break; case DOUBLE: builder.nativeType(MysqlType.DOUBLE); builder.columnType(MYSQL_DOUBLE); builder.dataType(MYSQL_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.nativeType(MysqlType.DECIMAL); builder.columnType(String.format("%s(%s,%s)", MYSQL_DECIMAL, precision, scale)); builder.dataType(MYSQL_DECIMAL); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.nativeType(MysqlType.VARBINARY); builder.columnType( String.format("%s(%s)", MYSQL_VARBINARY, MAX_VARBINARY_LENGTH / 2)); builder.dataType(MYSQL_VARBINARY); } else if (column.getColumnLength() < MAX_VARBINARY_LENGTH) { builder.nativeType(MysqlType.VARBINARY); builder.columnType( String.format("%s(%s)", MYSQL_VARBINARY, column.getColumnLength())); builder.dataType(MYSQL_VARBINARY); } else if (column.getColumnLength() < POWER_2_24) { builder.nativeType(MysqlType.MEDIUMBLOB); builder.columnType(MYSQL_MEDIUMBLOB); builder.dataType(MYSQL_MEDIUMBLOB); } else { builder.nativeType(MysqlType.LONGBLOB); builder.columnType(MYSQL_LONGBLOB); builder.dataType(MYSQL_LONGBLOB); } break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.nativeType(MysqlType.LONGTEXT); builder.columnType(MYSQL_LONGTEXT); builder.dataType(MYSQL_LONGTEXT); } else if (column.getColumnLength() < POWER_2_8) { builder.nativeType(MysqlType.VARCHAR); builder.columnType( String.format("%s(%s)", MYSQL_VARCHAR, column.getColumnLength())); builder.dataType(MYSQL_VARCHAR); } else if (column.getColumnLength() < POWER_2_16) { builder.nativeType(MysqlType.TEXT); builder.columnType(MYSQL_TEXT); builder.dataType(MYSQL_TEXT); } else if (column.getColumnLength() < POWER_2_24) { builder.nativeType(MysqlType.MEDIUMTEXT); builder.columnType(MYSQL_MEDIUMTEXT); builder.dataType(MYSQL_MEDIUMTEXT); } else { builder.nativeType(MysqlType.LONGTEXT); builder.columnType(MYSQL_LONGTEXT); builder.dataType(MYSQL_LONGTEXT); } break; case DATE: builder.nativeType(MysqlType.DATE); builder.columnType(MYSQL_DATE); builder.dataType(MYSQL_DATE); break; case TIME: builder.nativeType(MysqlType.TIME); builder.dataType(MYSQL_TIME); if (version.isAtOrBefore(MySqlVersion.V_5_5)) { builder.columnType(MYSQL_TIME); } else if (column.getScale() != null && column.getScale() > 0) { int timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", MYSQL_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(MYSQL_TIME); } break; case TIMESTAMP: builder.nativeType(MysqlType.DATETIME); builder.dataType(MYSQL_DATETIME); if (version.isAtOrBefore(MySqlVersion.V_5_5)) { builder.columnType(MYSQL_DATETIME); } else if (column.getScale() != null && column.getScale() > 0) { int timestampScale = column.getScale(); if (timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(String.format("%s(%s)", MYSQL_DATETIME, timestampScale)); builder.scale(timestampScale); } else { builder.columnType(MYSQL_DATETIME); } break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.MYSQL, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertByte() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.BYTE_TYPE).build(); BasicTypeDefine<MysqlType> typeDefine = MySqlTypeConverter.DEFAULT_INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(MysqlType.TINYINT, typeDefine.getNativeType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_TINYINT, typeDefine.getColumnType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_TINYINT, typeDefine.getDataType()); }
static String computeDetailsAsString(SearchRequest searchRequest) { StringBuilder message = new StringBuilder(); message.append(String.format("ES search request '%s'", searchRequest)); if (searchRequest.indices().length > 0) { message.append(String.format(ON_INDICES_MESSAGE, Arrays.toString(searchRequest.indices()))); } return message.toString(); }
@Test public void should_format_IndicesStats() { assertThat(EsRequestDetails.computeDetailsAsString("index-1", "index-2")) .isEqualTo("ES indices stats request on indices 'index-1,index-2'"); }
protected static boolean isMatchingMetricName(Meter meter, String metricName) { return meter.getId().getName().equals(metricName); }
@Test void matchingMetricNameReturnsTrue() { assertTrue(MetricsUtils.isMatchingMetricName(meter, "testMetric")); }
@Override public final int compareTo(VirtualFile o) { return getName().compareToIgnoreCase(o.getName()); }
@Test public void testCompareTo_Same() throws IOException { String parentFolder = "parentFolder"; File parentFile = tmp.newFolder(parentFolder); String child1 = "child1"; File childFile1 = new File(parentFile, child1); VirtualFile vf1 = new VirtualFileMinimalImplementation(childFile1); String child2 = child1; File childFile2 = new File(parentFile, child2); VirtualFile vf2 = new VirtualFileMinimalImplementation(childFile2); assertThat(vf1.compareTo(vf2), is(0)); }
public static String encodingParams(Map<String, String> params, String encoding) throws UnsupportedEncodingException { StringBuilder sb = new StringBuilder(); if (null == params || params.isEmpty()) { return null; } for (Map.Entry<String, String> entry : params.entrySet()) { if (StringUtils.isEmpty(entry.getValue())) { continue; } sb.append(entry.getKey()).append('='); sb.append(URLEncoder.encode(entry.getValue(), encoding)); sb.append('&'); } return sb.toString(); }
@Test void testEncodingParamsList() throws UnsupportedEncodingException { List<String> params = new LinkedList<>(); params.add("a"); params.add(""); params.add("b"); params.add("x"); params.add("uriChar"); params.add("="); params.add("chinese"); params.add("测试"); assertEquals("a=&b=x&uriChar=%3D&chinese=%E6%B5%8B%E8%AF%95", HttpUtils.encodingParams(params, "UTF-8")); }
protected String getIdentifierKey(String... params) { String prefix = KeyTypeEnum.UNIQUE_KEY.build(serviceInterface, version, group, side); return KeyTypeEnum.UNIQUE_KEY.build(prefix, params); }
@Test void getIdentifierKey() { String identifierKey = baseServiceMetadataIdentifier.getIdentifierKey("appName"); Assertions.assertEquals(identifierKey, "BaseServiceMetadataIdentifierTest:1.0.0:test:provider:appName"); }
@Override public Integer doCall() throws Exception { JsonObject pluginConfig = loadConfig(); JsonObject plugins = pluginConfig.getMap("plugins"); Optional<PluginType> camelPlugin = PluginType.findByName(name); if (camelPlugin.isPresent()) { if (command == null) { command = camelPlugin.get().getCommand(); } if (description == null) { description = camelPlugin.get().getDescription(); } if (firstVersion == null) { firstVersion = camelPlugin.get().getFirstVersion(); } } if (command == null) { // use plugin name as command command = name; } if (firstVersion == null) { // fallback to version specified firstVersion = version; } JsonObject plugin = new JsonObject(); plugin.put("name", name); plugin.put("command", command); if (firstVersion != null) { plugin.put("firstVersion", firstVersion); } plugin.put("description", description != null ? description : "Plugin %s called with command %s".formatted(name, command)); if (gav == null && (groupId != null && artifactId != null)) { if (version == null) { CamelCatalog catalog = new DefaultCamelCatalog(); version = catalog.getCatalogVersion(); } gav = "%s:%s:%s".formatted(groupId, artifactId, version); } if (gav != null) { plugin.put("dependency", gav); } plugins.put(name, plugin); saveConfig(pluginConfig); return 0; }
@Test public void shouldUseMavenGAV() throws Exception { PluginAdd command = new PluginAdd(new CamelJBangMain().withPrinter(printer)); command.name = "foo-plugin"; command.command = "foo"; command.gav = "org.apache.camel:foo-plugin:1.0.0"; command.doCall(); Assertions.assertEquals("", printer.getOutput()); Assertions.assertEquals("{\"plugins\":{\"foo-plugin\":{\"name\":\"foo-plugin\",\"command\":\"foo\"," + "\"description\":\"Plugin foo-plugin called with command foo\",\"dependency\":\"org.apache.camel:foo-plugin:1.0.0\"}}}", PluginHelper.getOrCreatePluginConfig().toJson()); }
@SuppressWarnings("OptionalGetWithoutIsPresent") public static StatementExecutorResponse execute( final ConfiguredStatement<ListConnectors> configuredStatement, final SessionProperties sessionProperties, final KsqlExecutionContext ksqlExecutionContext, final ServiceContext serviceContext ) { final ConnectClient connectClient = serviceContext.getConnectClient(); final ConnectResponse<List<String>> connectors = serviceContext.getConnectClient().connectors(); if (connectors.error().isPresent()) { final String errorMsg = "Failed to list connectors: " + connectors.error().get(); throw new KsqlRestException(EndpointResponse.create() .status(connectors.httpCode()) .entity(new KsqlErrorMessage(Errors.toErrorCode(connectors.httpCode()), errorMsg)) .build() ); } final List<SimpleConnectorInfo> infos = new ArrayList<>(); final List<KsqlWarning> warnings = new ArrayList<>(); final Scope scope = configuredStatement.getStatement().getScope(); for (final String name : connectors.datum().get()) { final ConnectResponse<ConnectorInfo> response = connectClient.describe(name); if (response.datum().filter(i -> inScope(i.type(), scope)).isPresent()) { final ConnectResponse<ConnectorStateInfo> status = connectClient.status(name); infos.add(fromConnectorInfoResponse(name, response, status)); } else if (response.error().isPresent()) { if (scope == Scope.ALL) { infos.add(new SimpleConnectorInfo(name, ConnectorType.UNKNOWN, null, null)); } warnings.add( new KsqlWarning( String.format( "Could not describe connector %s: %s", name, response.error().get()))); } } return StatementExecutorResponse.handled(Optional.of( new ConnectorList( configuredStatement.getMaskedStatementText(), warnings, infos) )); }
@Test public void shouldLabelConnectorsWithNoRunningTasksAsWarning() { // Given: when(connectClient.status("connector")) .thenReturn(ConnectResponse.success(STATUS_WARNING, HttpStatus.SC_OK)); when(connectClient.connectors()) .thenReturn(ConnectResponse.success(ImmutableList.of("connector"), HttpStatus.SC_OK)); final KsqlConfig ksqlConfig = new KsqlConfig(ImmutableMap.of()); final ConfiguredStatement<ListConnectors> statement = ConfiguredStatement .of(PreparedStatement.of("", new ListConnectors(Optional.empty(), Scope.ALL)), SessionConfig.of(ksqlConfig, ImmutableMap.of())); // When: final Optional<KsqlEntity> entity = ListConnectorsExecutor .execute(statement, mock(SessionProperties.class), engine, serviceContext).getEntity(); // Then: assertThat("expected response!", entity.isPresent()); final ConnectorList connectorList = (ConnectorList) entity.get(); assertThat(connectorList, is(new ConnectorList( "", ImmutableList.of(), ImmutableList.of( new SimpleConnectorInfo("connector", ConnectorType.SOURCE, CONNECTOR_CLASS, "WARNING (0/2 tasks RUNNING)") ) ))); }
@Override public Comparator<? super E> comparator() { return underlying().comparator(); }
@Test public void testDelegationOfComparator() { TreePSet<Integer> testSet = TreePSet.from(Arrays.asList(3, 4, 5)); new PCollectionsTreeSetWrapperDelegationChecker<>() .defineMockConfigurationForFunctionInvocation(TreePSet::comparator, testSet.comparator()) .defineWrapperFunctionInvocationAndMockReturnValueTransformation(PCollectionsImmutableNavigableSet::comparator, identity()) .doFunctionDelegationCheck(); }
@Override public Num getValue(int index) { return values.get(index); }
@Test public void cashFlowValueWithTwoPositionsAndLongTimeWithoutTrades() { BarSeries sampleBarSeries = new MockBarSeries(numFunction, 1d, 2d, 4d, 8d, 16d, 32d); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(1, sampleBarSeries), Trade.sellAt(2, sampleBarSeries), Trade.buyAt(4, sampleBarSeries), Trade.sellAt(5, sampleBarSeries)); CashFlow cashFlow = new CashFlow(sampleBarSeries, tradingRecord); assertNumEquals(1, cashFlow.getValue(0)); assertNumEquals(1, cashFlow.getValue(1)); assertNumEquals(2, cashFlow.getValue(2)); assertNumEquals(2, cashFlow.getValue(3)); assertNumEquals(2, cashFlow.getValue(4)); assertNumEquals(4, cashFlow.getValue(5)); }
int startReconfiguration(final String nodeType, final String address) throws IOException, InterruptedException { return startReconfigurationUtil(nodeType, address, System.out, System.err); }
@Test(timeout = 30000) public void testNameNodeGetReconfigurationStatus() throws IOException, InterruptedException, TimeoutException { ReconfigurationUtil ru = mock(ReconfigurationUtil.class); namenode.setReconfigurationUtil(ru); final String address = namenode.getHostAndPort(); List<ReconfigurationUtil.PropertyChange> changes = new ArrayList<>(); changes.add(new ReconfigurationUtil.PropertyChange( DFS_HEARTBEAT_INTERVAL_KEY, String.valueOf(6), namenode.getConf().get(DFS_HEARTBEAT_INTERVAL_KEY))); changes.add(new ReconfigurationUtil.PropertyChange( "randomKey", "new123", "old456")); when(ru.parseChangedProperties(any(Configuration.class), any(Configuration.class))).thenReturn(changes); assertThat(admin.startReconfiguration("namenode", address), is(0)); final List<String> outs = Lists.newArrayList(); final List<String> errs = Lists.newArrayList(); awaitReconfigurationFinished("namenode", address, outs, errs); // verify change assertEquals( DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", 6, namenode .getConf() .getLong(DFS_HEARTBEAT_INTERVAL_KEY, DFS_HEARTBEAT_INTERVAL_DEFAULT)); assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", 6, namenode .getNamesystem() .getBlockManager() .getDatanodeManager() .getHeartbeatInterval()); int offset = 1; assertThat(outs.get(offset), containsString("SUCCESS: Changed property " + DFS_HEARTBEAT_INTERVAL_KEY)); assertThat(outs.get(offset + 1), is(allOf(containsString("From:"), containsString("3")))); assertThat(outs.get(offset + 2), is(allOf(containsString("To:"), containsString("6")))); }
@Override public void alterTable(ConnectContext context, AlterTableStmt stmt) throws UserException { String dbName = stmt.getDbName(); String tableName = stmt.getTableName(); org.apache.iceberg.Table table = icebergCatalog.getTable(dbName, tableName); if (table == null) { throw new StarRocksConnectorException( "Failed to load iceberg table: " + stmt.getTbl().toString()); } IcebergAlterTableExecutor executor = new IcebergAlterTableExecutor(stmt, table, icebergCatalog); executor.execute(); synchronized (this) { tables.remove(TableIdentifier.of(dbName, tableName)); try { icebergCatalog.refreshTable(dbName, tableName, jobPlanningExecutor); } catch (Exception exception) { LOG.error("Failed to refresh caching iceberg table."); icebergCatalog.invalidateCache(new CachingIcebergCatalog.IcebergTableName(dbName, tableName)); } asyncRefreshOthersFeMetadataCache(dbName, tableName); } }
@Test public void testAlterTable(@Mocked IcebergHiveCatalog icebergHiveCatalog) throws UserException { IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergHiveCatalog, Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null); TableName tableName = new TableName("db", "tbl"); ColumnDef c1 = new ColumnDef("col1", TypeDef.create(PrimitiveType.INT), true); AddColumnClause addColumnClause = new AddColumnClause(c1, null, null, new HashMap<>()); ColumnDef c2 = new ColumnDef("col2", TypeDef.create(PrimitiveType.BIGINT), true); ColumnDef c3 = new ColumnDef("col3", TypeDef.create(PrimitiveType.VARCHAR), true); List<ColumnDef> cols = new ArrayList<>(); cols.add(c2); cols.add(c3); AddColumnsClause addColumnsClause = new AddColumnsClause(cols, null, new HashMap<>()); List<AlterClause> clauses = Lists.newArrayList(); clauses.add(addColumnClause); clauses.add(addColumnsClause); AlterTableStmt stmt = new AlterTableStmt(tableName, clauses); metadata.alterTable(new ConnectContext(), stmt); clauses.clear(); // must be default null ColumnDef c4 = new ColumnDef("col4", TypeDef.create(PrimitiveType.INT), false); AddColumnClause addC4 = new AddColumnClause(c4, null, null, new HashMap<>()); clauses.add(addC4); AlterTableStmt stmtC4 = new AlterTableStmt(tableName, clauses); Assert.assertThrows(DdlException.class, () -> metadata.alterTable(new ConnectContext(), stmtC4)); clauses.clear(); // drop/rename/modify column DropColumnClause dropColumnClause = new DropColumnClause("col1", null, new HashMap<>()); ColumnRenameClause columnRenameClause = new ColumnRenameClause("col2", "col22"); ColumnDef newCol = new ColumnDef("col1", TypeDef.create(PrimitiveType.BIGINT), true); Map<String, String> properties = new HashMap<>(); ModifyColumnClause modifyColumnClause = new ModifyColumnClause(newCol, ColumnPosition.FIRST, null, properties); clauses.add(dropColumnClause); clauses.add(columnRenameClause); clauses.add(modifyColumnClause); metadata.alterTable(new ConnectContext(), new AlterTableStmt(tableName, clauses)); // rename table clauses.clear(); TableRenameClause tableRenameClause = new TableRenameClause("newTbl"); clauses.add(tableRenameClause); metadata.alterTable(new ConnectContext(), new AlterTableStmt(tableName, clauses)); // modify table properties/comment clauses.clear(); Map<String, String> newProperties = new HashMap<>(); newProperties.put(FILE_FORMAT, "orc"); newProperties.put(LOCATION_PROPERTY, "new location"); newProperties.put(COMPRESSION_CODEC, "gzip"); newProperties.put(TableProperties.ORC_BATCH_SIZE, "10240"); ModifyTablePropertiesClause modifyTablePropertiesClause = new ModifyTablePropertiesClause(newProperties); AlterTableCommentClause alterTableCommentClause = new AlterTableCommentClause("new comment", NodePosition.ZERO); clauses.add(modifyTablePropertiesClause); clauses.add(alterTableCommentClause); metadata.alterTable(new ConnectContext(), new AlterTableStmt(tableName, clauses)); // modify empty properties clauses.clear(); Map<String, String> emptyProperties = new HashMap<>(); ModifyTablePropertiesClause emptyPropertiesClause = new ModifyTablePropertiesClause(emptyProperties); clauses.add(emptyPropertiesClause); Assert.assertThrows(DdlException.class, () -> metadata.alterTable(new ConnectContext(), new AlterTableStmt(tableName, clauses))); // modify unsupported properties clauses.clear(); Map<String, String> invalidProperties = new HashMap<>(); invalidProperties.put(FILE_FORMAT, "parquet"); invalidProperties.put(COMPRESSION_CODEC, "zzz"); ModifyTablePropertiesClause invalidCompressionClause = new ModifyTablePropertiesClause(invalidProperties); clauses.add(invalidCompressionClause); Assert.assertThrows(DdlException.class, () -> metadata.alterTable(new ConnectContext(), new AlterTableStmt(tableName, clauses))); }
public static final String getLine( LogChannelInterface log, InputStreamReader reader, int formatNr, StringBuilder line ) throws KettleFileException { EncodingType type = EncodingType.guessEncodingType( reader.getEncoding() ); return getLine( log, reader, type, formatNr, line ); }
@Test( timeout = 100 ) public void test_PDI695() throws KettleFileException, UnsupportedEncodingException { String inputDOS = "col1\tcol2\tcol3\r\ndata1\tdata2\tdata3\r\n"; String inputUnix = "col1\tcol2\tcol3\ndata1\tdata2\tdata3\n"; String inputOSX = "col1\tcol2\tcol3\rdata1\tdata2\tdata3\r"; String expected = "col1\tcol2\tcol3"; assertEquals( expected, TextFileInput.getLine( null, getInputStreamReader( inputDOS ), TextFileInputMeta.FILE_FORMAT_UNIX, new StringBuilder( 1000 ) ) ); assertEquals( expected, TextFileInput.getLine( null, getInputStreamReader( inputUnix ), TextFileInputMeta.FILE_FORMAT_UNIX, new StringBuilder( 1000 ) ) ); assertEquals( expected, TextFileInput.getLine( null, getInputStreamReader( inputOSX ), TextFileInputMeta.FILE_FORMAT_UNIX, new StringBuilder( 1000 ) ) ); }
public synchronized Topology addSource(final String name, final String... topics) { internalTopologyBuilder.addSource(null, name, null, null, null, topics); return this; }
@Test public void shouldNotAllowNullNameWhenAddingSourceWithPattern() { assertThrows(NullPointerException.class, () -> topology.addSource(null, Pattern.compile(".*"))); }
public void move() { LOGGER.info("move"); }
@Test void testMove() { final var ballItem = new BallItem(); final var ballThread = mock(BallThread.class); ballItem.setTwin(ballThread); ballItem.move(); assertTrue(appender.logContains("move")); verifyNoMoreInteractions(ballThread); assertEquals(1, appender.getLogSize()); }
@Override public Future<?> submit(Runnable runnable) { submitted.mark(); return delegate.submit(new InstrumentedRunnable(runnable)); }
@Test @SuppressWarnings("unchecked") public void reportsTasksInformationForThreadPoolExecutor() throws Exception { executor = new ThreadPoolExecutor(4, 16, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(32)); instrumentedExecutorService = new InstrumentedExecutorService(executor, registry, "tp"); submitted = registry.meter("tp.submitted"); running = registry.counter("tp.running"); completed = registry.meter("tp.completed"); duration = registry.timer("tp.duration"); idle = registry.timer("tp.idle"); final Gauge<Integer> poolSize = (Gauge<Integer>) registry.getGauges().get("tp.pool.size"); final Gauge<Integer> poolCoreSize = (Gauge<Integer>) registry.getGauges().get("tp.pool.core"); final Gauge<Integer> poolMaxSize = (Gauge<Integer>) registry.getGauges().get("tp.pool.max"); final Gauge<Integer> tasksActive = (Gauge<Integer>) registry.getGauges().get("tp.tasks.active"); final Gauge<Long> tasksCompleted = (Gauge<Long>) registry.getGauges().get("tp.tasks.completed"); final Gauge<Integer> tasksQueued = (Gauge<Integer>) registry.getGauges().get("tp.tasks.queued"); final Gauge<Integer> tasksCapacityRemaining = (Gauge<Integer>) registry.getGauges().get("tp.tasks.capacity"); assertThat(submitted.getCount()).isEqualTo(0); assertThat(running.getCount()).isEqualTo(0); assertThat(completed.getCount()).isEqualTo(0); assertThat(duration.getCount()).isEqualTo(0); assertThat(idle.getCount()).isEqualTo(0); assertThat(poolSize.getValue()).isEqualTo(0); assertThat(poolCoreSize.getValue()).isEqualTo(4); assertThat(poolMaxSize.getValue()).isEqualTo(16); assertThat(tasksActive.getValue()).isEqualTo(0); assertThat(tasksCompleted.getValue()).isEqualTo(0L); assertThat(tasksQueued.getValue()).isEqualTo(0); assertThat(tasksCapacityRemaining.getValue()).isEqualTo(32); Runnable runnable = () -> { assertThat(submitted.getCount()).isEqualTo(1); assertThat(running.getCount()).isEqualTo(1); assertThat(completed.getCount()).isEqualTo(0); assertThat(duration.getCount()).isEqualTo(0); assertThat(idle.getCount()).isEqualTo(1); assertThat(tasksActive.getValue()).isEqualTo(1); assertThat(tasksQueued.getValue()).isEqualTo(0); }; Future<?> theFuture = instrumentedExecutorService.submit(runnable); assertThat(theFuture).succeedsWithin(Duration.ofSeconds(5L)); assertThat(submitted.getCount()).isEqualTo(1); assertThat(running.getCount()).isEqualTo(0); assertThat(completed.getCount()).isEqualTo(1); assertThat(duration.getCount()).isEqualTo(1); assertThat(duration.getSnapshot().size()).isEqualTo(1); assertThat(idle.getCount()).isEqualTo(1); assertThat(idle.getSnapshot().size()).isEqualTo(1); assertThat(poolSize.getValue()).isEqualTo(1); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { locks.computeIfAbsent(msg.getOriginator(), SemaphoreWithTbMsgQueue::new) .addToQueueAndTryProcess(msg, ctx, this::processMsgAsync); }
@Test public void test_sqrt_5_to_timeseries_and_metadata_and_data() { var node = initNode(TbRuleNodeMathFunctionType.SQRT, new TbMathResult(TbMathArgumentType.TIME_SERIES, "result", 3, true, true, DataConstants.SERVER_SCOPE), new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "a") ); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, originator, TbMsgMetaData.EMPTY, JacksonUtil.newObjectNode().put("a", 5).toString()); when(telemetryService.saveAndNotify(any(), any(), any(TsKvEntry.class))) .thenReturn(Futures.immediateFuture(null)); node.onMsg(ctx, msg); ArgumentCaptor<TbMsg> msgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx, timeout(TIMEOUT)).tellSuccess(msgCaptor.capture()); verify(telemetryService, times(1)).saveAndNotify(any(), any(), any(TsKvEntry.class)); TbMsg resultMsg = msgCaptor.getValue(); assertNotNull(resultMsg); assertNotNull(resultMsg.getData()); var resultMetadata = resultMsg.getMetaData().getValue("result"); var resultData = JacksonUtil.toJsonNode(resultMsg.getData()); assertTrue(resultData.has("result")); assertEquals(2.236, resultData.get("result").asDouble(), 0.0); assertNotNull(resultMetadata); assertEquals("2.236", resultMetadata); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final byte[] payload = rawMessage.getPayload(); final Map<String, Object> event; try { event = objectMapper.readValue(payload, TypeReferences.MAP_STRING_OBJECT); } catch (IOException e) { LOG.error("Couldn't decode raw message {}", rawMessage); return null; } return parseEvent(event); }
@Test public void decodeMessagesHandlesGenericBeatWithCloudEC2() throws Exception { final Message message = codec.decode(messageFromJson("generic-with-cloud-ec2.json")); assertThat(message).isNotNull(); assertThat(message.getMessage()).isEqualTo("null"); assertThat(message.getSource()).isEqualTo("unknown"); assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC)); assertThat(message.getField("facility")).isEqualTo("genericbeat"); assertThat(message.getField("beat_foo")).isEqualTo("bar"); assertThat(message.getField("beat_meta_cloud_provider")).isEqualTo("ec2"); assertThat(message.getField("beat_meta_cloud_machine_type")).isEqualTo("t2.medium"); assertThat(message.getField("beat_meta_cloud_instance_id")).isEqualTo("i-4e123456"); assertThat(message.getField("beat_meta_cloud_region")).isEqualTo("us-east-1"); assertThat(message.getField("beat_meta_cloud_availability_zone")).isEqualTo("us-east-1c"); }
public StatementExecutorResponse execute( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext executionContext, final KsqlSecurityContext securityContext ) { final String commandRunnerWarningString = commandRunnerWarning.get(); if (!commandRunnerWarningString.equals("")) { throw new KsqlServerException("Failed to handle Ksql Statement." + System.lineSeparator() + commandRunnerWarningString); } final InjectorWithSideEffects injector = InjectorWithSideEffects.wrap( injectorFactory.apply(executionContext, securityContext.getServiceContext())); final ConfiguredStatementWithSideEffects<?> injectedWithSideEffects = injector.injectWithSideEffects(statement); try { return executeInjected( injectedWithSideEffects.getStatement(), statement, executionContext, securityContext); } catch (Exception e) { injector.revertSideEffects(injectedWithSideEffects); throw e; } }
@Test public void shouldThrowExceptionWhenInsertIntoReadOnlyTopic() { // Given final PreparedStatement<Statement> preparedStatement = PreparedStatement.of("", new InsertInto(SourceName.of("s1"), mock(Query.class))); final ConfiguredStatement<Statement> configured = ConfiguredStatement.of(preparedStatement, SessionConfig.of(KSQL_CONFIG, ImmutableMap.of()) ); final DataSource dataSource = mock(DataSource.class); doReturn(dataSource).when(metaStore).getSource(SourceName.of("s1")); when(dataSource.getKafkaTopicName()).thenReturn("_confluent-ksql-default__command-topic"); // When: final Exception e = assertThrows( KsqlException.class, () -> distributor.execute(configured, executionContext, mock(KsqlSecurityContext.class)) ); // Then: assertThat(e.getMessage(), containsString( "Cannot insert into read-only topic: " + "_confluent-ksql-default__command-topic")); }