focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
static void registerStateStores(final Logger log, final String logPrefix, final ProcessorTopology topology, final ProcessorStateManager stateMgr, final StateDirectory stateDirectory, final InternalProcessorContext processorContext) { if (topology.stateStores().isEmpty()) { return; } final TaskId id = stateMgr.taskId(); if (!stateDirectory.lock(id)) { throw new LockException(String.format("%sFailed to lock the state directory for task %s", logPrefix, id)); } log.debug("Acquired state directory lock"); final boolean storeDirsEmpty = stateDirectory.directoryForTaskIsEmpty(id); stateMgr.registerStateStores(topology.stateStores(), processorContext); log.debug("Registered state stores"); // We should only load checkpoint AFTER the corresponding state directory lock has been acquired and // the state stores have been registered; we should not try to load at the state manager construction time. // See https://issues.apache.org/jira/browse/KAFKA-8574 stateMgr.initializeStoreOffsetsFromCheckpoint(storeDirsEmpty); log.debug("Initialized state stores"); }
@Test public void testRegisterStateStoreWhenTopologyEmpty() { when(topology.stateStores()).thenReturn(emptyList()); StateManagerUtil.registerStateStores(logger, "logPrefix:", topology, stateManager, stateDirectory, processorContext); }
@Override public SinkWriter<WindowedValue<IsmRecord<V>>> writer() throws IOException { return new IsmSinkWriter(FileSystems.create(resourceId, MimeTypes.BINARY)); }
@Test public void testWriteEmptyKeyWithValueLargerThanBlockSize() throws Throwable { IsmSink<byte[]> sink = new IsmSink<>( FileSystems.matchNewResource(tmpFolder.newFile().getPath(), false), IsmRecordCoder.of( 1, // We hash using only the window 0, // There are no metadata records // We specifically use a coder that encodes to 0 bytes. ImmutableList.<Coder<?>>of(VoidCoder.of()), ByteArrayCoder.of()), BLOOM_FILTER_SIZE_LIMIT); SinkWriter<WindowedValue<IsmRecord<byte[]>>> sinkWriter = sink.writer(); sinkWriter.add( new ValueInEmptyWindows<>( IsmRecord.of( Arrays.asList(new Object[] {null}), new byte[IsmSink.BLOCK_SIZE_BYTES * 2]))); sinkWriter.close(); }
boolean isLocalRoute() { PathSegment firstPathSegment = null; // Find the first Path Segment by ignoring the AS_CONFED_* segments for (PathSegment pathSegment : asPath.getPathSegments()) { if ((pathSegment.getType() == BgpConstants.Update.AsPath.AS_SET) || (pathSegment.getType() == BgpConstants.Update.AsPath.AS_SEQUENCE)) { firstPathSegment = pathSegment; break; } } if (firstPathSegment == null) { return true; // Local route: no path segments } // If the first path segment is AS_SET, the route is considered local if (firstPathSegment.getType() == BgpConstants.Update.AsPath.AS_SET) { return true; } return false; // The route is not local }
@Test public void testIsLocalRoute() { // // Test non-local route // BgpRouteEntry bgpRouteEntry = generateBgpRouteEntry(); assertThat(bgpRouteEntry.isLocalRoute(), is(false)); // // Test local route with AS Path that begins with AS_SET // Ip4Prefix prefix = Ip4Prefix.valueOf("1.2.3.0/24"); Ip4Address nextHop = Ip4Address.valueOf("5.6.7.8"); byte origin = BgpConstants.Update.Origin.IGP; // Setup the AS Path ArrayList<BgpRouteEntry.PathSegment> pathSegments = new ArrayList<>(); byte pathSegmentType1 = (byte) BgpConstants.Update.AsPath.AS_SET; ArrayList<Long> segmentAsNumbers1 = new ArrayList<>(); segmentAsNumbers1.add(1L); segmentAsNumbers1.add(2L); segmentAsNumbers1.add(3L); BgpRouteEntry.PathSegment pathSegment1 = new BgpRouteEntry.PathSegment(pathSegmentType1, segmentAsNumbers1); pathSegments.add(pathSegment1); // byte pathSegmentType2 = (byte) BgpConstants.Update.AsPath.AS_SEQUENCE; ArrayList<Long> segmentAsNumbers2 = new ArrayList<>(); segmentAsNumbers2.add(4L); segmentAsNumbers2.add(5L); segmentAsNumbers2.add(6L); BgpRouteEntry.PathSegment pathSegment2 = new BgpRouteEntry.PathSegment(pathSegmentType2, segmentAsNumbers2); pathSegments.add(pathSegment2); // BgpRouteEntry.AsPath asPath = new BgpRouteEntry.AsPath(pathSegments); // long localPref = 100; long multiExitDisc = 20; // bgpRouteEntry = new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath, localPref); bgpRouteEntry.setMultiExitDisc(multiExitDisc); assertThat(bgpRouteEntry.isLocalRoute(), is(true)); // // Test local route with empty AS Path // pathSegments = new ArrayList<>(); asPath = new BgpRouteEntry.AsPath(pathSegments); bgpRouteEntry = new BgpRouteEntry(bgpSession, prefix, nextHop, origin, asPath, localPref); bgpRouteEntry.setMultiExitDisc(multiExitDisc); assertThat(bgpRouteEntry.isLocalRoute(), is(true)); }
@Override public ManageSnapshots renameBranch(String name, String newName) { updateSnapshotReferencesOperation().renameBranch(name, newName); return this; }
@TestTemplate public void testRenameBranch() { table.newAppend().appendFile(FILE_A).commit(); table.newAppend().appendFile(FILE_A).commit(); long snapshotId = table.currentSnapshot().snapshotId(); // Test creating and renaming independently table.manageSnapshots().createBranch("branch1", snapshotId).commit(); table.manageSnapshots().renameBranch("branch1", "branch2").commit(); TableMetadata updated = table.ops().refresh(); assertThat(updated.ref("branch1")).isNull(); assertThat(SnapshotRef.branchBuilder(snapshotId).build()).isEqualTo(updated.ref("branch2")); table .manageSnapshots() .createBranch("branch3", snapshotId) .renameBranch("branch3", "branch4") .commit(); updated = table.ops().refresh(); assertThat(updated.ref("branch3")).isNull(); assertThat(SnapshotRef.branchBuilder(snapshotId).build()).isEqualTo(updated.ref("branch4")); }
public static BigDecimal cast(final Integer value, final int precision, final int scale) { if (value == null) { return null; } return cast(value.longValue(), precision, scale); }
@Test public void shouldNotCastDoubleTooNegative() { // When: final Exception e = assertThrows( ArithmeticException.class, () -> cast(-10.0, 2, 1) ); // Then: assertThat(e.getMessage(), containsString("Numeric field overflow")); }
void appendValuesClause(StringBuilder sb) { sb.append("VALUES "); appendValues(sb, jdbcTable.dbFieldNames().size()); }
@Test void appendValuesClause() { H2UpsertQueryBuilder builder = new H2UpsertQueryBuilder(table, dialect); StringBuilder sb = new StringBuilder(); builder.appendValuesClause(sb); String valueClause = sb.toString(); assertThat(valueClause).isEqualTo("VALUES (?,?)"); }
public boolean hasSchemaEntry(String schemaPath) { return schemaEntries.containsKey(schemaPath); }
@Test void hasSchemaEntry() { SchemaMap schemaMap = new SchemaMap(); schemaMap.putSchemaEntry("path1", "schema1"); assertTrue(schemaMap.hasSchemaEntry("path1")); assertFalse(schemaMap.hasSchemaEntry("path2")); }
public static SlidingWindows ofTimeDifferenceAndGrace(final Duration timeDifference, final Duration afterWindowEnd) throws IllegalArgumentException { final String timeDifferenceMsgPrefix = prepareMillisCheckFailMsgPrefix(timeDifference, "timeDifference"); final long timeDifferenceMs = validateMillisecondDuration(timeDifference, timeDifferenceMsgPrefix); final String afterWindowEndMsgPrefix = prepareMillisCheckFailMsgPrefix(afterWindowEnd, "afterWindowEnd"); final long afterWindowEndMs = validateMillisecondDuration(afterWindowEnd, afterWindowEndMsgPrefix); return new SlidingWindows(timeDifferenceMs, afterWindowEndMs); }
@Test public void equalsAndHashcodeShouldNotBeEqualForDifferentTimeDifference() { final long grace = 1L + (long) (Math.random() * (10L - 1L)); final long timeDifferenceOne = 1L + (long) (Math.random() * (10L - 1L)); final long timeDifferenceTwo = 21L + (long) (Math.random() * (41L - 21L)); verifyInEquality( SlidingWindows.ofTimeDifferenceAndGrace(ofMillis(timeDifferenceOne), ofMillis(grace)), SlidingWindows.ofTimeDifferenceAndGrace(ofMillis(timeDifferenceTwo), ofMillis(grace)) ); }
@Override public URL getResource(String name) { ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name); log.trace("Received request to load resource '{}'", name); for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) { URL url = null; switch (classLoadingSource) { case APPLICATION: url = super.getResource(name); break; case PLUGIN: url = findResource(name); break; case DEPENDENCIES: url = findResourceFromDependencies(name); break; } if (url != null) { log.trace("Found resource '{}' in {} classpath", name, classLoadingSource); return url; } else { log.trace("Couldn't find resource '{}' in {}", name, classLoadingSource); } } return null; }
@Test void parentFirstGetResourceExistsInBothParentAndPlugin() throws URISyntaxException, IOException { URL resource = parentFirstPluginClassLoader.getResource("META-INF/file-in-both-parent-and-plugin"); assertFirstLine("parent", resource); }
@Override public String format(final Schema schema) { final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema); return options.contains(Option.AS_COLUMN_LIST) ? stripTopLevelStruct(converted) : converted; }
@Test public void shouldFormatMap() { // Given: final Schema schema = SchemaBuilder .map(Schema.STRING_SCHEMA, Schema.FLOAT64_SCHEMA) .build(); // Then: assertThat(DEFAULT.format(schema), is("MAP<VARCHAR, DOUBLE>")); assertThat(STRICT.format(schema), is("MAP<VARCHAR NOT NULL, DOUBLE NOT NULL> NOT NULL")); }
@Override public void metricChange(final KafkaMetric metric) { if (!THROUGHPUT_METRIC_NAMES.contains(metric.metricName().name()) || !StreamsMetricsImpl.TOPIC_LEVEL_GROUP.equals(metric.metricName().group())) { return; } addMetric( metric, getQueryId(metric), getTopic(metric) ); }
@Test public void shouldExtractQueryIdWithHyphenInSharedRuntimes() { // Given: final Map<String, String> sharedRuntimeQueryTags = ImmutableMap.of( "logical_cluster_id", "lksqlc-12345", "query-id", "CSAS_TEST_COPY-STREAM_1_23", "member", "_confluent_blahblah_query-1-blahblah", "topic", TOPIC_NAME ); listener.metricChange(mockMetric( BYTES_CONSUMED_TOTAL, 2D, ImmutableMap.of( "thread-id", "_confluent_blahblah_query-1-blahblah", "task-id", "CSAS_TEST_COPY-STREAM_1_23__" + TASK_ID_1, "processor-node-id", PROCESSOR_NODE_ID, "topic", TOPIC_NAME)) ); Measurable bytesConsumed = verifyAndGetMetric(BYTES_CONSUMED_TOTAL, sharedRuntimeQueryTags); Object bytesConsumedValue = bytesConsumed.measure(new MetricConfig().tags(sharedRuntimeQueryTags), 0L); assertThat(bytesConsumedValue, equalTo(2D)); }
public GsonBuilder newBuilder() { return new GsonBuilder(this); }
@Test public void testClonedTypeAdapterFactoryListsAreIndependent() { Gson original = new Gson( CUSTOM_EXCLUDER, CUSTOM_FIELD_NAMING_STRATEGY, new HashMap<>(), true, false, true, false, FormattingStyle.PRETTY, Strictness.LENIENT, false, true, LongSerializationPolicy.DEFAULT, null, DateFormat.DEFAULT, DateFormat.DEFAULT, new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), CUSTOM_OBJECT_TO_NUMBER_STRATEGY, CUSTOM_NUMBER_TO_NUMBER_STRATEGY, Collections.emptyList()); Gson clone = original.newBuilder().registerTypeAdapter(int.class, new TestTypeAdapter()).create(); assertThat(clone.factories).hasSize(original.factories.size() + 1); }
@Override public InterpreterResult interpret(String st, InterpreterContext context) { return helper.interpret(session, st, context); }
@Test void should_describe_keyspace() { // Given String query = "DESCRIBE KEYSPACE live_data;"; final String expected = reformatHtml( readTestResource("/scalate/DescribeKeyspace_live_data.html")); // When final InterpreterResult actual = interpreter.interpret(query, intrContext); // Then assertEquals(Code.SUCCESS, actual.code()); assertEquals(expected, reformatHtml(actual.message().get(0).getData())); }
@Override public String getDisplayName() { return NSFileManager.defaultManager().displayNameAtPath(this.getName()); }
@Test public void testDisplayName() { assertEquals("f/a", new FinderLocal(System.getProperty("java.io.tmpdir"), "f:a").getDisplayName()); }
static void setHttp2Authority(String authority, Http2Headers out) { // The authority MUST NOT include the deprecated "userinfo" subcomponent if (authority != null) { if (authority.isEmpty()) { out.authority(EMPTY_STRING); } else { int start = authority.indexOf('@') + 1; int length = authority.length() - start; if (length == 0) { throw new IllegalArgumentException("authority: " + authority); } out.authority(new AsciiString(authority, start, length)); } } }
@Test public void setHttp2AuthorityNullOrEmpty() { Http2Headers headers = new DefaultHttp2Headers(); HttpConversionUtil.setHttp2Authority(null, headers); assertNull(headers.authority()); // https://datatracker.ietf.org/doc/html/rfc9113#section-8.3.1 // Clients that generate HTTP/2 requests directly MUST use the ":authority" pseudo-header // field to convey authority information, unless there is no authority information to convey // (in which case it MUST NOT generate ":authority"). // An intermediary that forwards a request over HTTP/2 MUST construct an ":authority" pseudo-header // field using the authority information from the control data of the original request, unless the // original request's target URI does not contain authority information // (in which case it MUST NOT generate ":authority"). assertThrows(Http2Exception.class, new Executable() { @Override public void execute() { HttpConversionUtil.setHttp2Authority("", new DefaultHttp2Headers()); } }); }
@Override public void validate(String methodName, Class<?>[] parameterTypes, Object[] arguments) throws Exception { List<Class<?>> groups = new ArrayList<>(); Class<?> methodClass = methodClass(methodName); if (methodClass != null) { groups.add(methodClass); } Method method = clazz.getMethod(methodName, parameterTypes); Class<?>[] methodClasses; if (method.isAnnotationPresent(MethodValidated.class)) { methodClasses = method.getAnnotation(MethodValidated.class).value(); groups.addAll(Arrays.asList(methodClasses)); } // add into default group groups.add(0, Default.class); groups.add(1, clazz); // convert list to array Class<?>[] classGroups = groups.toArray(new Class[0]); Set<ConstraintViolation<?>> violations = new HashSet<>(); Object parameterBean = getMethodParameterBean(clazz, method, arguments); if (parameterBean != null) { violations.addAll(validator.validate(parameterBean, classGroups)); } for (Object arg : arguments) { validate(violations, arg, classGroups); } if (!violations.isEmpty()) { logger.info("Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations); throw new ConstraintViolationException( "Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations, violations); } }
@Test void testItWithNestedParameterValidationWithNullNestedParams() { URL url = URL.valueOf("test://test:11/org.apache.dubbo.validation.support.jvalidation.mock.JValidatorTestTarget"); JValidator jValidator = new JValidator(url); try { JValidatorTestTarget.BaseParam<JValidatorTestTarget.Param> param = new JValidatorTestTarget.BaseParam<>(); param.setBody(new JValidatorTestTarget.Param()); jValidator.validate( "someMethod7", new Class<?>[] {JValidatorTestTarget.BaseParam.class}, new Object[] {param}); Assertions.fail(); } catch (Exception e) { assertThat(e, instanceOf(ConstraintViolationException.class)); ConstraintViolationException e1 = (ConstraintViolationException) e; assertThat(e1.getConstraintViolations().size(), is(1)); assertThat(e1.getMessage(), containsString("name must not be null")); } }
@Override public void executeUpdate(final SetComputeNodeStateStatement sqlStatement, final ContextManager contextManager) { if ("DISABLE".equals(sqlStatement.getState())) { checkDisablingIsValid(contextManager, sqlStatement.getInstanceId()); } else { checkEnablingIsValid(contextManager, sqlStatement.getInstanceId()); } contextManager.getPersistServiceFacade().getComputeNodePersistService().updateComputeNodeState(sqlStatement.getInstanceId(), "DISABLE".equals(sqlStatement.getState()) ? InstanceState.CIRCUIT_BREAK : InstanceState.OK); }
@Test void assertExecuteUpdateWithAlreadyDisableInstance() { ContextManager contextManager = mock(ContextManager.class, RETURNS_DEEP_STUBS); when(contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId()).thenReturn("currentInstance"); when(contextManager.getComputeNodeInstanceContext().getComputeNodeInstanceById("instanceID").isPresent()).thenReturn(true); when(contextManager.getComputeNodeInstanceContext().getComputeNodeInstanceById("instanceID").get().getState().getCurrentState()).thenReturn(InstanceState.CIRCUIT_BREAK); assertThrows(UnsupportedSQLOperationException.class, () -> executor.executeUpdate(new SetComputeNodeStateStatement("DISABLE", "instanceID"), contextManager)); }
@Override public KTable<K, V> toTable() { return toTable(NamedInternal.empty(), Materialized.with(keySerde, valueSerde)); }
@Test public void shouldMaterializeKTableFromKStream() { final Consumed<String, String> consumed = Consumed.with(Serdes.String(), Serdes.String()); final StreamsBuilder builder = new StreamsBuilder(); final String storeName = "store"; final String input = "input"; builder.stream(input, consumed) .toTable(Materialized.as(Stores.inMemoryKeyValueStore(storeName))); final Topology topology = builder.build(); final String topologyDescription = topology.describe().toString(); assertThat( topologyDescription, equalTo("Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input])\n" + " --> KSTREAM-TOTABLE-0000000001\n" + " Processor: KSTREAM-TOTABLE-0000000001 (stores: [store])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n") ); try (final TopologyTestDriver driver = new TopologyTestDriver(topology, props)) { final TestInputTopic<String, String> inputTopic = driver.createInputTopic(input, new StringSerializer(), new StringSerializer()); final KeyValueStore<String, String> store = driver.getKeyValueStore(storeName); inputTopic.pipeInput("A", "01"); inputTopic.pipeInput("B", "02"); inputTopic.pipeInput("A", "03"); final Map<String, String> expectedStore = mkMap(mkEntry("A", "03"), mkEntry("B", "02")); assertThat(asMap(store), is(expectedStore)); } }
@Override protected void initChannel(final SocketChannel channel) { channel.pipeline().addLast(new ProtobufVarint32FrameDecoder()); channel.pipeline().addLast(new ProtobufDecoder(CDCRequest.getDefaultInstance())); channel.pipeline().addLast(new ProtobufVarint32LengthFieldPrepender()); channel.pipeline().addLast(new ProtobufEncoder()); channel.pipeline().addLast(new CDCChannelInboundHandler()); }
@Test void assertInitChannel() { SocketChannel channel = mock(SocketChannel.class); ChannelPipeline pipeline = mock(ChannelPipeline.class); when(channel.pipeline()).thenReturn(pipeline); CDCServerHandlerInitializer initializer = new CDCServerHandlerInitializer(); initializer.initChannel(channel); verify(pipeline).addLast(any(ProtobufVarint32FrameDecoder.class)); verify(pipeline).addLast(any(ProtobufDecoder.class)); verify(pipeline).addLast(any(ProtobufVarint32LengthFieldPrepender.class)); verify(pipeline).addLast(any(ProtobufEncoder.class)); verify(pipeline).addLast(any(CDCChannelInboundHandler.class)); }
public double interpolate(double... x) { if (x.length != this.x[0].length) { throw new IllegalArgumentException(String.format("Invalid input vector size: %d, expected: %d", x.length, this.x[0].length)); } double weight = 0.0, sum = 0.0; for (int i = 0; i < this.x.length; i++) { double r = MathEx.squaredDistance(x, this.x[i]); if (r == 0.0) { return y[i]; } double w = Math.pow(r, p/2); weight += w; sum += w * y[i]; } return sum / weight; }
@Test public void testInterpolate2D() { System.out.println("interpolate 2d"); double[] x1 = {0, 1}; double[] x2 = {0, 1}; double[] y = {0, 1}; ShepardInterpolation2D instance = new ShepardInterpolation2D(x1, x2, y); assertEquals(0, instance.interpolate(0, 0), 1E-7); assertEquals(1, instance.interpolate(1, 1), 1E-7); assertEquals(0.5, instance.interpolate(0.5, 0.5), 1E-7); }
public static String getResourceFileAsText(String name) { try { String lineEnd = System.getProperty("line.separator"); // $NON-NLS-1$ InputStream is = JMeterUtils.class.getClassLoader().getResourceAsStream(name); if (is != null) { try (Reader in = new InputStreamReader(is, StandardCharsets.UTF_8); BufferedReader fileReader = new BufferedReader(in)) { return fileReader.lines() .collect(Collectors.joining(lineEnd, "", lineEnd)); } } else { return ""; // $NON-NLS-1$ } } catch (IOException e) { return ""; // $NON-NLS-1$ } }
@Test public void testGetResourceFileAsTextWithMisingResource() throws Exception{ assertEquals("", JMeterUtils.getResourceFileAsText("not_existant_resourcefile.txt")); }
public void schedule(ExecutableMethod<?, ?> method) { if (hasParametersOutsideOfJobContext(method.getTargetMethod())) { throw new IllegalStateException("Methods annotated with " + Recurring.class.getName() + " can only have zero parameters or a single parameter of type JobContext."); } String id = getId(method); String cron = getCron(method); String interval = getInterval(method); if (StringUtils.isNullOrEmpty(cron) && StringUtils.isNullOrEmpty(interval)) throw new IllegalArgumentException("Either cron or interval attribute is required."); if (isNotNullOrEmpty(cron) && isNotNullOrEmpty(interval)) throw new IllegalArgumentException("Both cron and interval attribute provided. Only one is allowed."); if (Recurring.RECURRING_JOB_DISABLED.equals(cron) || Recurring.RECURRING_JOB_DISABLED.equals(interval)) { if (id == null) { LOGGER.warn("You are trying to disable a recurring job using placeholders but did not define an id."); } else { jobScheduler.deleteRecurringJob(id); } } else { JobDetails jobDetails = getJobDetails(method); ZoneId zoneId = getZoneId(method); if (isNotNullOrEmpty(cron)) { jobScheduler.scheduleRecurrently(id, jobDetails, CronExpression.create(cron), zoneId); } else { jobScheduler.scheduleRecurrently(id, jobDetails, new Interval(interval), zoneId); } } }
@Test void beansWithMethodsAnnotatedWithRecurringIntervalAnnotationWillAutomaticallyBeRegistered() { final ExecutableMethod executableMethod = mock(ExecutableMethod.class); final Method method = getRequiredMethod(MyServiceWithRecurringIntervalJobUsingJobContext.class, "myRecurringMethod", JobContext.class); when(executableMethod.getTargetMethod()).thenReturn(method); when(executableMethod.stringValue(Recurring.class, "id")).thenReturn(Optional.of("my-recurring-job")); when(executableMethod.stringValue(Recurring.class, "cron")).thenReturn(Optional.empty()); when(executableMethod.stringValue(Recurring.class, "interval")).thenReturn(Optional.of("PT10M")); when(executableMethod.stringValue(Recurring.class, "zoneId")).thenReturn(Optional.empty()); jobRunrRecurringJobScheduler.schedule(executableMethod); verify(jobScheduler).scheduleRecurrently(eq("my-recurring-job"), jobDetailsArgumentCaptor.capture(), eq(new Interval("PT10M")), eq(ZoneId.systemDefault())); final JobDetails actualJobDetails = jobDetailsArgumentCaptor.getValue(); assertThat(actualJobDetails) .isCacheable() .hasClassName(MyServiceWithRecurringIntervalJobUsingJobContext.class.getName()) .hasMethodName("myRecurringMethod") .hasJobContextArg(); }
@ExceptionHandler(FlagrException.class) public ResponseEntity<FlagrErrorResponse> handleFlagrException(FlagrException ex) { FlagrErrorResponse errorResponse = new FlagrErrorResponse(ex); return ResponseEntity.status(HttpResponseStatus.INTERNAL_SERVER_ERROR.getStatusCode()) .body(errorResponse); }
@Test void handleFlagrException() { final ResponseEntity<FlagrErrorResponse> result = flagrExceptionHandler.handleFlagrException(new FlagrException("flagr exception")); assertThat(result.getStatusCodeValue()).isEqualTo(500); assertThat(result.getBody()) .extracting(FlagrErrorResponse::getStatus, FlagrErrorResponse::getMessage) .containsExactly(500, "flagr exception"); }
protected boolean shouldAllowPreemptiveResponse(Channel channel) { // If the request timed-out while being read, then there won't have been any LastContent, but thats ok because // the connection will have to be discarded anyway. StatusCategory status = StatusCategoryUtils.getStatusCategory(ClientRequestReceiver.getRequestFromChannel(channel)); return status == ZuulStatusCategory.FAILURE_CLIENT_TIMEOUT; }
@Test void allowExtensionForPremptingResponse() { final ZuulStatusCategory customStatus = ZuulStatusCategory.SUCCESS_LOCAL_NO_ROUTE; final ClientResponseWriter responseWriter = new ClientResponseWriter(new BasicRequestCompleteHandler()) { @Override protected boolean shouldAllowPreemptiveResponse(Channel channel) { StatusCategory status = StatusCategoryUtils.getStatusCategory(ClientRequestReceiver.getRequestFromChannel(channel)); return status == customStatus; } }; final EmbeddedChannel channel = new EmbeddedChannel(); final SessionContext context = new SessionContext(); StatusCategoryUtils.setStatusCategory(context, customStatus); final HttpRequestMessage request = new HttpRequestBuilder(context).withDefaults(); channel.attr(ClientRequestReceiver.ATTR_ZUUL_REQ).set(request); assertThat(responseWriter.shouldAllowPreemptiveResponse(channel)).isTrue(); }
public List<Document> export(final String collectionName, final List<String> exportedFieldNames, final int limit, final Bson dbFilter, final List<Sort> sorts, final Subject subject) { final MongoCollection<Document> collection = mongoConnection.getMongoDatabase().getCollection(collectionName); final FindIterable<Document> resultsWithoutLimit = collection.find(Objects.requireNonNullElse(dbFilter, Filters.empty())) .projection(Projections.fields(Projections.include(exportedFieldNames))) .sort(toMongoDbSort(sorts)); final var userCanReadAllEntities = permissionsUtils.hasAllPermission(subject) || permissionsUtils.hasReadPermissionForWholeCollection(subject, collectionName); final var checkPermission = permissionsUtils.createPermissionCheck(subject, collectionName); final var documents = userCanReadAllEntities ? getFromMongo(resultsWithoutLimit, limit) : getWithInMemoryPermissionCheck(resultsWithoutLimit, limit, checkPermission); return documents.collect(Collectors.toList()); }
@Test void testExportUsesProjectionCorrectly() { insertTestData(); simulateAdminUser(); final List<Document> exportedDocuments = toTest.export(TEST_COLLECTION_NAME, List.of("name"), 10, Filters.empty(), List.of(), subject); assertThat(exportedDocuments) .isNotNull() .hasSize(3) .containsExactlyInAnyOrder( new Document(Map.of("_id", "0000000000000000000000a5", "name", "John")), new Document(Map.of("_id", "0000000000000000000000b6", "name", "Jerry")), new Document(Map.of("_id", "0000000000000000000000c7", "name", "Judith")) ); }
public static boolean isJobOrTransformation( EngineMetaInterface engineMetaInterface ) { if ( engineMetaInterface == null || engineMetaInterface.getRepositoryElementType() == null ) { return false; } RepositoryObjectType objectType = engineMetaInterface.getRepositoryElementType(); return RepositoryObjectType.TRANSFORMATION.equals( objectType ) || RepositoryObjectType.JOB.equals( objectType ); }
@Test public void isJobOrTransformation_withTransformation() { TransMeta transfromataionInstance = new TransMeta(); assertTrue( EngineMetaUtils.isJobOrTransformation( transfromataionInstance ) ); }
@Override protected void getConfigFromEnv() { paramCheckEnabled = EnvUtil.getProperty("nacos.core.param.check.enabled", Boolean.class, true); activeParamChecker = EnvUtil.getProperty("nacos.core.param.check.checker", String.class, "default"); }
@Test void getConfigFromEnv() throws ReflectiveOperationException { MockEnvironment environment = new MockEnvironment(); EnvUtil.setEnvironment(environment); environment.setProperty("nacos.core.param.check.enabled", String.valueOf(false)); environment.setProperty("nacos.core.param.check.checker", "default"); Constructor<ServerParamCheckConfig> declaredConstructor = ServerParamCheckConfig.class.getDeclaredConstructor(); declaredConstructor.setAccessible(true); ServerParamCheckConfig paramCheckConfig = declaredConstructor.newInstance(); assertFalse(paramCheckConfig.isParamCheckEnabled()); assertEquals("default", paramCheckConfig.getActiveParamChecker()); }
public void setPrefix(String prefix) { this.prefix = prefix; }
@Test public void customMetricsPrefix() throws Exception { iqtp.setPrefix(PREFIX); iqtp.start(); assertThat(metricRegistry.getNames()) .overridingErrorMessage("Custom metrics prefix doesn't match") .allSatisfy(name -> assertThat(name).startsWith(PREFIX)); iqtp.stop(); assertThat(metricRegistry.getMetrics()) .overridingErrorMessage("The default metrics prefix was changed") .isEmpty(); }
@Nonnull public static ArnResource accessPointFromArn(String arn) throws IllegalArgumentException { Arn parsed = Arn.fromString(arn); if (!parsed.region().isPresent() || !parsed.accountId().isPresent() || parsed.resourceAsString().isEmpty()) { throw new IllegalArgumentException( String.format("Access Point Arn %s has an invalid format or missing properties", arn)); } String resourceName = parsed.resource().resource(); return new ArnResource(resourceName, parsed.accountId().get(), parsed.region().get(), parsed.partition(), arn); }
@Test public void invalidARNsMustThrow() throws Exception { describe("Using an invalid ARN format must throw when initializing an ArnResource."); intercept(IllegalArgumentException.class, () -> ArnResource.accessPointFromArn("invalid:arn:resource")); }
@Override public <T> T invoke(K key, EntryProcessor<K, V, T> entryProcessor, Object... arguments) throws EntryProcessorException { return cache.invoke(key, entryProcessor, arguments); }
@Test public void testInvoke() { cache.put(23, "value-23"); cache.put(42, "value-42"); String result = adapter.invoke(23, new ICacheReplaceEntryProcessor(), "value", "newValue"); assertEquals("newValue-23", result); assertEquals("newValue-23", cache.get(23)); assertEquals("value-42", cache.get(42)); }
public static SimpleTransform log() { return new SimpleTransform(Operation.log); }
@Test public void testLog() { TransformationMap t = new TransformationMap(Collections.singletonList(SimpleTransform.log()),new HashMap<>()); testSimple(t,Math::log); }
@VisibleForTesting public void validateDictDataExists(Long id) { if (id == null) { return; } DictDataDO dictData = dictDataMapper.selectById(id); if (dictData == null) { throw exception(DICT_DATA_NOT_EXISTS); } }
@Test public void testValidateDictDataExists_notExists() { assertServiceException(() -> dictDataService.validateDictDataExists(randomLongId()), DICT_DATA_NOT_EXISTS); }
public Array getArray(String name) { Array a = arrayMap.get(name); if (a == null) { validateArray(name); a = new Array(configDefinition, name); arrayMap.put(name, a); } return a; }
@Test public void require_that_arrays_are_created() { ConfigPayloadBuilder builder = new ConfigPayloadBuilder(); ConfigPayloadBuilder.Array array = builder.getArray("foo"); assertNotNull(array); }
public Map<String, String> transform(Map<String, String> configs) { return transform(null, configs); }
@Test public void testReplaceVariableWithTTLAndScheduleRestart() { // Setup when(worker.herder()).thenReturn(herder); when(herder.restartConnector(eq(1L), eq(MY_CONNECTOR), notNull())).thenReturn(requestId); // Execution Map<String, String> result = configTransformer.transform(MY_CONNECTOR, Collections.singletonMap(MY_KEY, "${test:testPath:testKeyWithTTL}")); // Assertions assertEquals(TEST_RESULT_WITH_TTL, result.get(MY_KEY)); verify(herder).restartConnector(eq(1L), eq(MY_CONNECTOR), notNull()); }
@Override public URL getLocalArtifactUrl(DependencyJar dependency) { String depShortName = dependency.getShortName(); String pathStr = properties.getProperty(depShortName); if (pathStr != null) { if (pathStr.indexOf(File.pathSeparatorChar) != -1) { throw new IllegalArgumentException( "didn't expect multiple files for " + dependency + ": " + pathStr); } Path path = baseDir.resolve(Paths.get(pathStr)); try { return path.toUri().toURL(); } catch (MalformedURLException e) { throw new RuntimeException(e); } } else { if (delegate != null) { return delegate.getLocalArtifactUrl(dependency); } } throw new RuntimeException("no artifacts found for " + dependency); }
@Test public void whenAbsolutePathIsProvidedInProperties_shouldReturnFileUrl() throws Exception { String absolutePath = cColonBackslash ? "c:\\tmp\\file.jar" : "/tmp/file.jar"; DependencyResolver resolver = new PropertiesDependencyResolver( propsFile("com.group:example:1.3", new File(absolutePath).getAbsoluteFile()), mock); URL url = resolver.getLocalArtifactUrl(exampleDep); if (cColonBackslash) { assertThat(url).isEqualTo(Paths.get("c:\\tmp\\file.jar").toUri().toURL()); } else { assertThat(url).isEqualTo(Paths.get("/tmp/file.jar").toUri().toURL()); } }
@Override public String getTargetName(final String groupName, final List<String> availableTargetNames) { return availableTargetNames.get(ThreadLocalRandom.current().nextInt(availableTargetNames.size())); }
@Test void assertGetAvailableTargetNameWithDefaultStrategy() { LoadBalanceAlgorithm loadBalanceAlgorithm = TypedSPILoader.getService(LoadBalanceAlgorithm.class, "RANDOM", new Properties()); String availableTargetNames1 = "test_read_ds_1"; String availableTargetNames2 = "test_read_ds_2"; List<String> availableTargetNames = Arrays.asList(availableTargetNames1, availableTargetNames2); assertRandomLoadBalance(availableTargetNames, loadBalanceAlgorithm); assertTrue(availableTargetNames.contains(loadBalanceAlgorithm.getTargetName("ds", availableTargetNames))); assertTrue(availableTargetNames.contains(loadBalanceAlgorithm.getTargetName("ds", availableTargetNames))); }
@VisibleForTesting static boolean isDeletable(PurgeableAnalysisDto snapshot) { return !snapshot.isLast() && !snapshot.hasEvents(); }
@Test void test_isDeletable() { assertThat(KeepOneFilter.isDeletable(DbCleanerTestUtils.createAnalysisWithDate("u1", "2011-05-01"))).isTrue(); assertThat(KeepOneFilter.isDeletable(DbCleanerTestUtils.createAnalysisWithDate("u1", "2011-05-01").setLast(true))).isFalse(); assertThat(KeepOneFilter.isDeletable(DbCleanerTestUtils.createAnalysisWithDate("u1", "2011-05-01").setHasEvents(true))).isFalse(); }
public ProtocolBuilder appendParameter(String key, String value) { this.parameters = appendParameter(parameters, key, value); return getThis(); }
@Test void appendParameter() { ProtocolBuilder builder = new ProtocolBuilder(); builder.appendParameter("default.num", "one").appendParameter("num", "ONE"); Map<String, String> parameters = builder.build().getParameters(); Assertions.assertTrue(parameters.containsKey("default.num")); Assertions.assertEquals("ONE", parameters.get("num")); }
@Override protected List<MatchResult> match(List<String> specs) throws IOException { return match(new File(".").getAbsolutePath(), specs); }
@Test public void testMatchWithFileSlashPrefix() throws Exception { List<String> expected = ImmutableList.of(temporaryFolder.newFile("a").toString()); temporaryFolder.newFile("aa"); temporaryFolder.newFile("ab"); String file = "file:/" + temporaryFolder.getRoot().toPath().resolve("a").toString(); List<MatchResult> results = localFileSystem.match(ImmutableList.of(file)); assertThat( toFilenames(results), containsInAnyOrder(expected.toArray(new String[expected.size()]))); }
public static boolean shutdownExecutorForcefully(ExecutorService executor, Duration timeout) { return shutdownExecutorForcefully(executor, timeout, true); }
@Test void testShutdownExecutorForcefullyReachesTimeout() { MockExecutorService executor = new MockExecutorService(5); executor.timeoutAfterNumForcefulShutdown(clock, 1); assertThat( ComponentClosingUtils.shutdownExecutorForcefully( executor, Duration.ofDays(1), false)) .isFalse(); assertThat(executor.forcefullyShutdownCount).isOne(); }
public static Path expandIfZip(Path filePath) throws IOException { if (!isZipFile(filePath)) { return filePath; } FileTime pluginZipDate = Files.getLastModifiedTime(filePath); String fileName = filePath.getFileName().toString(); String directoryName = fileName.substring(0, fileName.lastIndexOf(".")); Path pluginDirectory = filePath.resolveSibling(directoryName); if (!Files.exists(pluginDirectory) || pluginZipDate.compareTo(Files.getLastModifiedTime(pluginDirectory)) > 0) { // expand '.zip' file Unzip unzip = new Unzip(); unzip.setSource(filePath.toFile()); unzip.setDestination(pluginDirectory.toFile()); unzip.extract(); log.info("Expanded plugin zip '{}' in '{}'", filePath.getFileName(), pluginDirectory.getFileName()); } return pluginDirectory; }
@Test public void expandIfZipForZipWithOnlyModuleDescriptor() throws Exception { PluginZip pluginZip = new PluginZip.Builder(pluginsPath.resolve("my-plugin-1.2.3.zip"), "myPlugin") .pluginVersion("1.2.3") .build(); Path unzipped = FileUtils.expandIfZip(pluginZip.path()); assertEquals(pluginZip.unzippedPath(), unzipped); assertTrue(Files.exists(unzipped.resolve("plugin.properties"))); }
public StatMap<K> merge(K key, int value) { if (key.getType() == Type.LONG) { merge(key, (long) value); return this; } int oldValue = getInt(key); int newValue = key.merge(oldValue, value); if (newValue == 0) { _map.remove(key); } else { _map.put(key, newValue); } return this; }
@Test public void encodeDecodeAll() throws IOException { StatMap<MyStats> statMap = new StatMap<>(MyStats.class); for (MyStats stat : MyStats.values()) { switch (stat.getType()) { case BOOLEAN: statMap.merge(stat, true); break; case INT: statMap.merge(stat, 1); break; case LONG: statMap.merge(stat, 1L); break; case STRING: statMap.merge(stat, "foo"); break; default: throw new IllegalStateException(); } } testSerializeDeserialize(statMap); }
@Override public void open(Map<String, Object> config, SourceContext sourceContext) throws Exception { this.config = config; this.sourceContext = sourceContext; this.intermediateTopicName = SourceConfigUtils.computeBatchSourceIntermediateTopicName(sourceContext.getTenant(), sourceContext.getNamespace(), sourceContext.getSourceName()).toString(); this.discoveryThread = Executors.newSingleThreadExecutor( new DefaultThreadFactory( String.format("%s-batch-source-discovery", FunctionCommon.getFullyQualifiedName( sourceContext.getTenant(), sourceContext.getNamespace(), sourceContext.getSourceName())))); this.getBatchSourceConfigs(config); this.initializeBatchSource(); this.start(); }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "BatchSourceTriggerer does not implement the correct interface") public void testWithoutRightTriggerer() throws Exception { testBatchConfig.setDiscoveryTriggererClassName(TestBatchSource.class.getName()); config.put(BatchSourceConfig.BATCHSOURCE_CONFIG_KEY, new Gson().toJson(testBatchConfig)); batchSourceExecutor.open(config, context); }
public ShareFetchContext newContext(String groupId, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData, List<TopicIdPartition> toForget, ShareRequestMetadata reqMetadata, Boolean isAcknowledgeDataPresent) { ShareFetchContext context; // TopicPartition with maxBytes as 0 should not be added in the cachedPartitions Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchDataWithMaxBytes = new HashMap<>(); shareFetchData.forEach((tp, sharePartitionData) -> { if (sharePartitionData.maxBytes > 0) shareFetchDataWithMaxBytes.put(tp, sharePartitionData); }); // If the request's epoch is FINAL_EPOCH or INITIAL_EPOCH, we should remove the existing sessions. Also, start a // new session in case it is INITIAL_EPOCH. Hence, we need to treat them as special cases. if (reqMetadata.isFull()) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); if (reqMetadata.epoch() == ShareRequestMetadata.FINAL_EPOCH) { // If the epoch is FINAL_EPOCH, don't try to create a new session. if (!shareFetchDataWithMaxBytes.isEmpty()) { throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } else { log.debug("Removed share session with key " + key); } context = new FinalContext(); } else { if (isAcknowledgeDataPresent) { log.error("Acknowledge data present in Initial Fetch Request for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) != null) { log.debug("Removed share session with key {}", key); } ImplicitLinkedHashCollection<CachedSharePartition> cachedSharePartitions = new ImplicitLinkedHashCollection<>(shareFetchDataWithMaxBytes.size()); shareFetchDataWithMaxBytes.forEach((topicIdPartition, reqData) -> cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, reqData, false))); ShareSessionKey responseShareSessionKey = cache.maybeCreateSession(groupId, reqMetadata.memberId(), time.milliseconds(), cachedSharePartitions); if (responseShareSessionKey == null) { log.error("Could not create a share session for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } context = new ShareSessionContext(reqMetadata, shareFetchDataWithMaxBytes); log.debug("Created a new ShareSessionContext with key {} isSubsequent {} returning {}. A new share " + "session will be started.", responseShareSessionKey, false, partitionsToLogString(shareFetchDataWithMaxBytes.keySet())); } } else { // We update the already existing share session. synchronized (cache) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); ShareSession shareSession = cache.get(key); if (shareSession == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } if (shareSession.epoch != reqMetadata.epoch()) { log.debug("Share session error for {}: expected epoch {}, but got {} instead", key, shareSession.epoch, reqMetadata.epoch()); throw Errors.INVALID_SHARE_SESSION_EPOCH.exception(); } Map<ShareSession.ModifiedTopicIdPartitionType, List<TopicIdPartition>> modifiedTopicIdPartitions = shareSession.update( shareFetchDataWithMaxBytes, toForget); cache.touch(shareSession, time.milliseconds()); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); log.debug("Created a new ShareSessionContext for session key {}, epoch {}: " + "added {}, updated {}, removed {}", shareSession.key(), shareSession.epoch, partitionsToLogString(modifiedTopicIdPartitions.get( ShareSession.ModifiedTopicIdPartitionType.ADDED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.UPDATED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.REMOVED)) ); context = new ShareSessionContext(reqMetadata, shareSession); } } return context; }
@Test public void testNewContextReturnsFinalContextWithRequestData() { Time time = new MockTime(); ShareSessionCache cache = new ShareSessionCache(10, 1000); SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache).withTime(time).build(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> reqData1 = new LinkedHashMap<>(); reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), PARTITION_MAX_BYTES)); reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), PARTITION_MAX_BYTES)); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); assertEquals(ShareSessionContext.class, context1.getClass()); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); // shareFetchData is not empty, but the maxBytes of topic partition is 0, which means this is added only for acknowledgements. // New context should be created successfully Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> reqData3 = Collections.singletonMap(new TopicIdPartition(tpId1, new TopicPartition("foo", 0)), new ShareFetchRequest.SharePartitionData(tpId1, 0)); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); }
public SchemaProvider getSchemaProvider() { if (batch.isPresent() && schemaProvider == null) { throw new HoodieException("Please provide a valid schema provider class!"); } return Option.ofNullable(schemaProvider).orElseGet(NullSchemaProvider::getInstance); }
@Test public void getSchemaProviderShouldReturnNullSchemaProvider() { final InputBatch<String> inputBatch = new InputBatch<>(Option.empty(), null, null); SchemaProvider schemaProvider = inputBatch.getSchemaProvider(); assertTrue(schemaProvider instanceof InputBatch.NullSchemaProvider); }
public static String convertEventToStr(Event event, List<RecordData.FieldGetter> fieldGetters) { if (event instanceof SchemaChangeEvent) { return event.toString(); } else if (event instanceof DataChangeEvent) { DataChangeEvent dataChangeEvent = (DataChangeEvent) event; String eventStr = "DataChangeEvent{" + "tableId=" + dataChangeEvent.tableId() + ", before=" + getFields(fieldGetters, dataChangeEvent.before()) + ", after=" + getFields(fieldGetters, dataChangeEvent.after()) + ", op=" + dataChangeEvent.op() + ", meta=" + dataChangeEvent.describeMeta() + '}'; return eventStr; } return "Event{}"; }
@Test public void testConvertEventToStr() { Schema schema = Schema.newBuilder() .physicalColumn("col1", DataTypes.STRING()) .physicalColumn("col2", DataTypes.STRING()) .primaryKey("col1") .build(); TableId tableId = TableId.parse("default.default.table1"); BinaryRecordDataGenerator generator = new BinaryRecordDataGenerator(RowType.of(DataTypes.STRING(), DataTypes.STRING())); List<RecordData.FieldGetter> fieldGetters = SchemaUtils.createFieldGetters(schema); Assert.assertEquals( "CreateTableEvent{tableId=default.default.table1, schema=columns={`col1` STRING,`col2` STRING}, primaryKeys=col1, options=()}", ValuesDataSinkHelper.convertEventToStr( new CreateTableEvent(tableId, schema), fieldGetters)); DataChangeEvent insertEvent = DataChangeEvent.insertEvent( tableId, generator.generate( new Object[] { BinaryStringData.fromString("1"), BinaryStringData.fromString("1") })); Assert.assertEquals( "DataChangeEvent{tableId=default.default.table1, before=[], after=[1, 1], op=INSERT, meta=()}", ValuesDataSinkHelper.convertEventToStr(insertEvent, fieldGetters)); DataChangeEvent deleteEvent = DataChangeEvent.deleteEvent( tableId, generator.generate( new Object[] { BinaryStringData.fromString("1"), BinaryStringData.fromString("1") })); Assert.assertEquals( "DataChangeEvent{tableId=default.default.table1, before=[1, 1], after=[], op=DELETE, meta=()}", ValuesDataSinkHelper.convertEventToStr(deleteEvent, fieldGetters)); DataChangeEvent updateEvent = DataChangeEvent.updateEvent( tableId, generator.generate( new Object[] { BinaryStringData.fromString("1"), BinaryStringData.fromString("1") }), generator.generate( new Object[] { BinaryStringData.fromString("1"), BinaryStringData.fromString("x") })); Assert.assertEquals( "DataChangeEvent{tableId=default.default.table1, before=[1, 1], after=[1, x], op=UPDATE, meta=()}", ValuesDataSinkHelper.convertEventToStr(updateEvent, fieldGetters)); }
static PrintWriter createPrintWriter(PrintStream printStream) { return new PrintWriter(new OutputStreamWriter(printStream, StandardCharsets.UTF_8)); }
@Test @SetSystemProperty(key = "hazelcast.logging.type", value = "log4j2") @SetSystemProperty(key = "log4j2.configurationFile", value = "faulty-log.properties") void test_log4j2_exception() throws Exception { try (ByteArrayOutputStream outputStreamCaptor = new ByteArrayOutputStream(); PrintStream errorPrintStream = new PrintStream(outputStreamCaptor)) { CommandLine cmd = new CommandLine(new HazelcastServerCommandLine()) .setOut(createPrintWriter(System.out)).setErr(createPrintWriter(errorPrintStream)) .setTrimQuotes(true).setExecutionExceptionHandler(new ExceptionHandler()); cmd.execute("start"); String string = outputStreamCaptor.toString(StandardCharsets.UTF_8); assertThat(string).contains("org.apache.logging.log4j.core.config.ConfigurationException: " + "No type attribute provided for Layout on Appender STDOUT"); } }
public boolean performCrashDetectingFlow() { final File newCrashFile = new File(mApp.getFilesDir(), NEW_CRASH_FILENAME); if (newCrashFile.isFile()) { String ackReportFilename = getAckReportFilename(); StringBuilder header = new StringBuilder(); StringBuilder report = new StringBuilder(); try (BufferedReader reader = new BufferedReader( new InputStreamReader( mApp.openFileInput(NEW_CRASH_FILENAME), Charset.forName("UTF-8")))) { try (BufferedWriter writer = new BufferedWriter( new OutputStreamWriter( mApp.openFileOutput(ackReportFilename, Context.MODE_PRIVATE), Charset.forName("UTF-8")))) { Logger.i(TAG, "Archiving crash report to %s.", ackReportFilename); Logger.d(TAG, "Crash report:"); String line; boolean stillInHeader = true; while (null != (line = reader.readLine())) { writer.write(line); writer.newLine(); report.append(line).append(NEW_LINE); if (line.equals(HEADER_BREAK_LINE)) stillInHeader = false; if (stillInHeader) header.append(line).append(NEW_LINE); Logger.d(TAG, "err: %s", line); } } } catch (Exception e) { Logger.e(TAG, "Failed to write crash report to archive!"); return false; } if (!newCrashFile.delete()) { Logger.e(TAG, "Failed to delete crash log! %s", newCrashFile.getAbsolutePath()); } sendNotification( header.toString(), report.toString(), new File(mApp.getFilesDir(), ackReportFilename)); return true; } return false; }
@Test public void testDoesNotCreateArchivedReportIfNotCrashed() { Context app = ApplicationProvider.getApplicationContext(); var notificationDriver = Mockito.mock(NotificationDriver.class); TestableChewbaccaUncaughtExceptionHandler underTest = new TestableChewbaccaUncaughtExceptionHandler(app, null, notificationDriver); Assert.assertFalse(underTest.performCrashDetectingFlow()); Assert.assertFalse(new File(app.getFilesDir(), "crashes").exists()); Mockito.verify(notificationDriver, Mockito.never()).notify(Mockito.any(), Mockito.anyBoolean()); }
public static void parseAS2MessageEntity(HttpMessage message) throws HttpException { if (EntityUtils.hasEntity(message)) { HttpEntity entity = ObjectHelper.notNull(EntityUtils.getMessageEntity(message), "message entity"); if (entity instanceof MimeEntity) { // already parsed return; } try { // Determine Content Type of Message String contentTypeStr = HttpMessageUtils.getHeaderValue(message, AS2Header.CONTENT_TYPE); if (contentTypeStr == null) { // contentTypeStr can be null when dispositionNotificationTo isn't set return; } doParseAS2MessageEntity(message, contentTypeStr, entity); } catch (HttpException e) { throw e; } catch (Exception e) { throw new HttpException("Failed to parse entity content", e); } } }
@Test public void parseMessageDispositionNotificationReportMessageTest() throws Exception { HttpResponse response = new BasicClassicHttpResponse( HttpStatus.SC_OK, EnglishReasonPhraseCatalog.INSTANCE.getReason(HttpStatus.SC_OK, null)); response.setVersion(new ProtocolVersion("HTTP", 1, 1)); HttpMessageUtils.setHeaderValue(response, AS2Header.CONTENT_TRANSFER_ENCODING, DISPOSITION_NOTIFICATION_CONTENT_TRANSFER_ENCODING); InputStream is = new ByteArrayInputStream( DISPOSITION_NOTIFICATION_REPORT_CONTENT.getBytes(DISPOSITION_NOTIFICATION_REPORT_CONTENT_CHARSET_NAME)); BasicHttpEntity entity = new BasicHttpEntity(is, ContentType.parse(REPORT_CONTENT_TYPE_VALUE)); EntityUtils.setMessageEntity(response, entity); EntityParser.parseAS2MessageEntity(response); HttpEntity parsedEntity = EntityUtils.getMessageEntity(response); assertNotNull(parsedEntity, "Unexpected Null message disposition notification report entity"); assertTrue(parsedEntity instanceof DispositionNotificationMultipartReportEntity, "Unexpected type for message disposition notification report entity"); }
@Override protected Trigger getContinuationTrigger(List<Trigger> continuationTriggers) { // Use OrFinallyTrigger instead of AfterFirst because the continuation of ACTUAL // may not be a OnceTrigger. return Repeatedly.forever( new OrFinallyTrigger( continuationTriggers.get(ACTUAL), (Trigger.OnceTrigger) continuationTriggers.get(UNTIL))); }
@Test public void testContinuation() throws Exception { OnceTrigger triggerA = AfterProcessingTime.pastFirstElementInPane(); OnceTrigger triggerB = AfterWatermark.pastEndOfWindow(); Trigger aOrFinallyB = triggerA.orFinally(triggerB); Trigger bOrFinallyA = triggerB.orFinally(triggerA); assertEquals( Repeatedly.forever( triggerA.getContinuationTrigger().orFinally(triggerB.getContinuationTrigger())), aOrFinallyB.getContinuationTrigger()); assertEquals( Repeatedly.forever( triggerB.getContinuationTrigger().orFinally(triggerA.getContinuationTrigger())), bOrFinallyA.getContinuationTrigger()); }
@Override public ItemChangeSets resolve(long namespaceId, String configText, List<ItemDTO> baseItems) { Map<Integer, ItemDTO> oldLineNumMapItem = BeanUtils.mapByKey("lineNum", baseItems); Map<String, ItemDTO> oldKeyMapItem = BeanUtils.mapByKey("key", baseItems); //remove comment and blank item map. oldKeyMapItem.remove(""); String[] newItems = configText.split(ITEM_SEPARATOR); Set<String> repeatKeys = new HashSet<>(); if (isHasRepeatKey(newItems, repeatKeys)) { throw new BadRequestException("Config text has repeated keys: %s, please check your input.", repeatKeys); } ItemChangeSets changeSets = new ItemChangeSets(); Map<Integer, String> newLineNumMapItem = new HashMap<>();//use for delete blank and comment item int lineCounter = 1; for (String newItem : newItems) { newItem = newItem.trim(); newLineNumMapItem.put(lineCounter, newItem); ItemDTO oldItemByLine = oldLineNumMapItem.get(lineCounter); //comment item if (isCommentItem(newItem)) { handleCommentLine(namespaceId, oldItemByLine, newItem, lineCounter, changeSets); //blank item } else if (isBlankItem(newItem)) { handleBlankLine(namespaceId, oldItemByLine, lineCounter, changeSets); //normal item } else { handleNormalLine(namespaceId, oldKeyMapItem, newItem, lineCounter, changeSets); } lineCounter++; } deleteCommentAndBlankItem(oldLineNumMapItem, newLineNumMapItem, changeSets); deleteNormalKVItem(oldKeyMapItem, changeSets); return changeSets; }
@Test public void testDeleteItem() { ItemChangeSets changeSets = resolver.resolve(1, "a=b", mockBaseItemHas3Key()); Assert.assertEquals(2, changeSets.getDeleteItems().size()); }
@SqlNullable @Description("Returns the vertex of a linestring at the specified index (indices started with 1) ") @ScalarFunction("ST_PointN") @SqlType(GEOMETRY_TYPE_NAME) public static Slice stPointN(@SqlType(GEOMETRY_TYPE_NAME) Slice input, @SqlType(INTEGER) long index) { Geometry geometry = deserialize(input); validateType("ST_PointN", geometry, EnumSet.of(LINE_STRING)); LineString linestring = (LineString) geometry; if (index < 1 || index > linestring.getNumPoints()) { return null; } return serialize(linestring.getPointN(toIntExact(index) - 1)); }
@Test public void testSTPointN() { assertPointN("LINESTRING(1 2, 3 4, 5 6, 7 8)", 1, "POINT (1 2)"); assertPointN("LINESTRING(1 2, 3 4, 5 6, 7 8)", 3, "POINT (5 6)"); assertPointN("LINESTRING(1 2, 3 4, 5 6, 7 8)", 10, null); assertPointN("LINESTRING(1 2, 3 4, 5 6, 7 8)", 0, null); assertPointN("LINESTRING(1 2, 3 4, 5 6, 7 8)", -1, null); assertInvalidPointN("POINT (1 2)", "POINT"); assertInvalidPointN("MULTIPOINT (1 1, 2 2)", "MULTI_POINT"); assertInvalidPointN("MULTILINESTRING ((1 1, 2 2), (3 3, 4 4))", "MULTI_LINE_STRING"); assertInvalidPointN("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))", "POLYGON"); assertInvalidPointN("MULTIPOLYGON (((1 1, 1 4, 4 4, 4 1, 1 1)), ((1 1, 1 4, 4 4, 4 1, 1 1)))", "MULTI_POLYGON"); assertInvalidPointN("GEOMETRYCOLLECTION(POINT(4 6),LINESTRING(4 6, 7 10))", "GEOMETRY_COLLECTION"); }
@Override public Set<Link> getDeviceIngressLinks(DeviceId deviceId) { checkNotNull(deviceId, DEVICE_NULL); return manager.getVirtualLinks(this.networkId()) .stream() .filter(link -> (deviceId.equals(link.src().elementId()))) .collect(Collectors.toSet()); }
@Test(expected = NullPointerException.class) public void testGetDeviceIngressLinksByNullId() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); LinkService linkService = manager.get(virtualNetwork.id(), LinkService.class); // test the getDeviceIngressLinks() method with a null device identifier. linkService.getDeviceIngressLinks(null); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position ) { try { final ReadOnlySessionStore<GenericKey, GenericRow> store = stateStore .store(QueryableStoreTypes.sessionStore(), partition); return KsMaterializedQueryResult.rowIterator( findSession(store, key, windowStart, windowEnd).iterator()); } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldIgnoreSessionsThatEndAtUpperBoundIfUpperBoundOpen() { // Given: final Range<Instant> endBounds = Range.closedOpen( LOWER_INSTANT, UPPER_INSTANT ); givenSingleSession(UPPER_INSTANT.minusMillis(1), UPPER_INSTANT); // When: final Iterator<WindowedRow> rowIterator = table.get(A_KEY, PARTITION, Range.all(), endBounds).rowIterator; // Then: assertThat(rowIterator.hasNext(), is(false)); }
@GuardedBy("lock") private boolean isLeader(ResourceManager<?> resourceManager) { return running && this.leaderResourceManager == resourceManager; }
@Test void grantLeadership_startRmAndConfirmLeaderSession() throws Exception { final UUID leaderSessionId = UUID.randomUUID(); final CompletableFuture<UUID> startRmFuture = new CompletableFuture<>(); rmFactoryBuilder.setInitializeConsumer(startRmFuture::complete); createAndStartResourceManager(); // grant leadership final CompletableFuture<LeaderInformation> confirmedLeaderInformation = leaderElection.isLeader(leaderSessionId); // should start new RM and confirm leader session assertThatFuture(startRmFuture).eventuallySucceeds().isSameAs(leaderSessionId); assertThat(confirmedLeaderInformation.get().getLeaderSessionID()).isSameAs(leaderSessionId); }
public synchronized void maybeAddPartition(TopicPartition topicPartition) { maybeFailWithError(); throwIfPendingState("send"); if (isTransactional()) { if (!hasProducerId()) { throw new IllegalStateException("Cannot add partition " + topicPartition + " to transaction before completing a call to initTransactions"); } else if (currentState != State.IN_TRANSACTION) { throw new IllegalStateException("Cannot add partition " + topicPartition + " to transaction while in state " + currentState); } else if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) { return; } else { log.debug("Begin adding new partition {} to transaction", topicPartition); txnPartitionMap.getOrCreate(topicPartition); newPartitionsInTransaction.add(topicPartition); } } }
@Test public void testNotReadyForSendBeforeInitTransactions() { assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); }
@Override public Dataset<Row> apply( final JavaSparkContext jsc, final SparkSession sparkSession, final Dataset<Row> rowDataset, final TypedProperties props) { final String sqlFile = getStringWithAltKeys(props, SqlTransformerConfig.TRANSFORMER_SQL_FILE); final FileSystem fs = HadoopFSUtils.getFs(sqlFile, jsc.hadoopConfiguration(), true); // tmp table name doesn't like dashes final String tmpTable = TMP_TABLE.concat(UUID.randomUUID().toString().replace("-", "_")); LOG.info("Registering tmp table: {}", tmpTable); rowDataset.createOrReplaceTempView(tmpTable); try (final Scanner scanner = new Scanner(fs.open(new Path(sqlFile)), "UTF-8")) { Dataset<Row> rows = null; // each sql statement is separated with semicolon hence set that as delimiter. scanner.useDelimiter(";"); LOG.info("SQL Query for transformation:"); while (scanner.hasNext()) { String sqlStr = scanner.next(); sqlStr = sqlStr.replaceAll(SRC_PATTERN, tmpTable).trim(); if (!sqlStr.isEmpty()) { LOG.info(sqlStr); // overwrite the same dataset object until the last statement then return. rows = sparkSession.sql(sqlStr); } } return rows; } catch (final IOException ioe) { throw new HoodieTransformExecutionException("Error reading transformer SQL file.", ioe); } finally { sparkSession.catalog().dropTempView(tmpTable); } }
@Test public void testSqlFileBasedTransformerEmptyDataset() throws IOException { UtilitiesTestBase.Helpers.copyToDFS( "streamer-config/sql-file-transformer-empty.sql", UtilitiesTestBase.storage, UtilitiesTestBase.basePath + "/sql-file-transformer-empty.sql"); // Test if the SQL file based transformer works as expected for the empty SQL statements. props.setProperty( "hoodie.streamer.transformer.sql.file", UtilitiesTestBase.basePath + "/sql-file-transformer-empty.sql"); Dataset<Row> emptyRow = sqlFileTransformer.apply(jsc, sparkSession, inputDatasetRows, props); String[] actualRows = emptyRow.as(Encoders.STRING()).collectAsList().toArray(new String[0]); String[] expectedRows = emptyDatasetRow.collectAsList().toArray(new String[0]); assertArrayEquals(expectedRows, actualRows); }
@Override public V load(K key) { awaitSuccessfulInit(); try (SqlResult queryResult = sqlService.execute(queries.load(), key)) { Iterator<SqlRow> it = queryResult.iterator(); V value = null; if (it.hasNext()) { SqlRow sqlRow = it.next(); if (it.hasNext()) { throw new IllegalStateException("multiple matching rows for a key " + key); } // If there is a single column as the value, return that column as the value if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) { value = sqlRow.getObject(1); } else { //noinspection unchecked value = (V) toGenericRecord(sqlRow, genericMapStoreProperties); } } return value; } }
@Test public void givenRowAndIdColumn_whenLoad_thenReturnGenericRecord() { ObjectSpec spec = objectProvider.createObject(mapName, true); objectProvider.insertItems(spec, 1); Properties properties = new Properties(); properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF); properties.setProperty(ID_COLUMN_PROPERTY, "person-id"); mapLoader = createMapLoader(properties, hz); GenericRecord genericRecord = mapLoader.load(0); assertThat(genericRecord.getInt32("person-id")).isZero(); assertThat(genericRecord.getString("name")).isEqualTo("name-0"); }
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { return invoke(invoker, invocation, PROVIDER.equals(MetricsSupport.getSide(invocation))); }
@Test void testThrowable() { invocation.setTargetServiceUniqueName(INTERFACE_NAME); invocation.setMethodName(METHOD_NAME); invocation.setParameterTypes(new Class[] {String.class}); given(invoker.invoke(invocation)).willReturn(new AppResponse("success")); Result result = filter.invoke(invoker, invocation); result.setException(new RuntimeException("failed")); Object eventObj = invocation.get(METRIC_FILTER_EVENT); if (eventObj != null) { Assertions.assertDoesNotThrow(() -> MetricsEventBus.after((RequestEvent) eventObj, result)); } }
public static CronExpression create(String expression) { if (expression.isEmpty()) { throw new InvalidCronExpressionException("empty expression"); } String[] fields = expression.trim().toLowerCase().split("\\s+"); int count = fields.length; if (count > 6 || count < 5) { throw new InvalidCronExpressionException( "crontab expression should have 6 fields for (seconds resolution) or 5 fields for (minutes resolution)"); } CronExpression cronExpression = new CronExpression(); cronExpression.hasSecondsField = count == 6; String token; int index = 0; if (cronExpression.hasSecondsField) { token = fields[index++]; cronExpression.seconds = CronExpression.SECONDS_FIELD_PARSER.parse(token); } else { cronExpression.seconds = new BitSet(1); cronExpression.seconds.set(0); } token = fields[index++]; cronExpression.minutes = CronExpression.MINUTES_FIELD_PARSER.parse(token); token = fields[index++]; cronExpression.hours = CronExpression.HOURS_FIELD_PARSER.parse(token); token = fields[index++]; String daysToken = token; cronExpression.days = CronExpression.DAYS_FIELD_PARSER.parse(token); cronExpression.isLastDayOfMonth = token.equals("l"); boolean daysStartWithAsterisk = token.startsWith("*"); token = fields[index++]; cronExpression.months = CronExpression.MONTHS_FIELD_PARSER.parse(token); token = fields[index++]; cronExpression.daysOfWeek = CronExpression.DAY_OF_WEEK_FIELD_PARSER.parse(token); boolean daysOfWeekStartAsterisk = token.startsWith("*"); if (token.length() == 2 && token.endsWith("l")) { if (cronExpression.isLastDayOfMonth) { throw new InvalidCronExpressionException("You can only specify the last day of month week in either the DAY field or in the DAY_OF_WEEK field, not both."); } if (!daysToken.equalsIgnoreCase("*")) { throw new InvalidCronExpressionException("when last days of month is specified. the day of the month must be \"*\""); } // this flag will be used later duing finding the next schedule as some months have less than 31 days cronExpression.isSpecificLastDayOfMonth = true; } cronExpression.daysOf5Weeks = generateDaysOf5Weeks(cronExpression.daysOfWeek); cronExpression.daysAndDaysOfWeekRelation = (daysStartWithAsterisk || daysOfWeekStartAsterisk) ? DaysAndDaysOfWeekRelation.INTERSECT : DaysAndDaysOfWeekRelation.UNION; if (!cronExpression.canScheduleActuallyOccur()) throw new InvalidCronExpressionException("Cron expression not valid. The specified months do not have the day 30th or the day 31st"); cronExpression.expression = expression.trim(); return cronExpression; }
@Test void invalidCronExpressionThrowsExceptionIfBothLastDayOfMonth() { assertThatThrownBy(() -> CronExpression.create("0 0 0 l * 5L")) .isInstanceOf(InvalidCronExpressionException.class) .hasMessage("You can only specify the last day of month week in either the DAY field or in the DAY_OF_WEEK field, not both."); }
@Restricted(NoExternalUse.class) public static void initialized() {}
@Test public void classLoadingDeadlock() throws Exception { PeepholePermalink.initialized(); Thread t = new Thread(() -> { assertThat("successfully loaded permalinks", PermalinkProjectAction.Permalink.BUILTIN.stream().map(PermalinkProjectAction.Permalink::getId).collect(Collectors.toSet()), containsInAnyOrder("lastBuild", "lastStableBuild", "lastSuccessfulBuild", "lastFailedBuild", "lastUnstableBuild", "lastUnsuccessfulBuild", "lastCompletedBuild")); }); t.start(); new PeepholePermalink() { @Override public boolean apply(Run<?, ?> run) { throw new UnsupportedOperationException(); } @Override public String getDisplayName() { throw new UnsupportedOperationException(); } @Override public String getId() { throw new UnsupportedOperationException(); } }; t.join(); }
void cleanCurrentEntryInLocal() { if (context instanceof NullContext) { return; } Context originalContext = context; if (originalContext != null) { Entry curEntry = originalContext.getCurEntry(); if (curEntry == this) { Entry parent = this.parent; originalContext.setCurEntry(parent); if (parent != null) { ((CtEntry)parent).child = null; } } else { String curEntryName = curEntry == null ? "none" : curEntry.resourceWrapper.getName() + "@" + curEntry.hashCode(); String msg = String.format("Bad async context state, expected entry: %s, but actual: %s", getResourceWrapper().getName() + "@" + hashCode(), curEntryName); throw new IllegalStateException(msg); } } }
@Test(expected = IllegalStateException.class) public void testCleanCurrentEntryInLocalError() { final String contextName = "abc"; try { ContextUtil.enter(contextName); Context curContext = ContextUtil.getContext(); AsyncEntry entry = new AsyncEntry(new StringResourceWrapper("testCleanCurrentEntryInLocal", EntryType.OUT), null, curContext); entry.cleanCurrentEntryInLocal(); entry.cleanCurrentEntryInLocal(); } finally { ContextTestUtil.cleanUpContext(); } }
public Certificate add(X509Certificate cert) { final Certificate db; try { db = Certificate.from(cert); } catch (CertificateEncodingException e) { logger.error("Encoding error in certificate", e); throw new ClientException("Encoding error in certificate", e); } try { // Special case for first CSCA certificate for this document type if (repository.countByDocumentTypeAndType(db.getDocumentType(), db.getType()) == 0) { cert.verify(cert.getPublicKey()); logger.warn("Added first CSCA certificate for {}, set trusted flag manually", db.getDocumentType()); } else { verify(cert, allowAddingExpired ? cert.getNotAfter() : null); } } catch (GeneralSecurityException | VerificationException e) { logger.error( String.format("Could not verify certificate of %s issued by %s", cert.getSubjectX500Principal(), cert.getIssuerX500Principal() ), e ); throw new ClientException("Could not verify certificate", e); } return repository.saveAndFlush(db); }
@Test public void shouldAllowToAddCertificateIfTrustedByExistingEvenIfExpiredIfAllowed() throws CertificateException, IOException { certificateRepo.saveAndFlush(loadCertificate("test/root.crt", true)); certificateRepo.saveAndFlush(loadCertificate("test/intermediate.crt", false)); final X509Certificate cert = readCertificate("test/expired.crt"); ReflectionTestUtils.setField(service, "allowAddingExpired", true); final Certificate dbCert = service.add(cert); assertEquals(X509Factory.toCanonical(cert.getSubjectX500Principal()), dbCert.getSubject()); assertEquals(false, dbCert.isTrusted()); }
@Override public void close() { inner.close(); }
@Test public void shouldCloseInnerSerializerOnClose() { // When: serializer.close(); // Then: verify(innerSerializer).close(); }
@DoNotSub public int hashCode() { @DoNotSub int hashCode = 0; for (final int value : values) { if (MISSING_VALUE != value) { hashCode += Integer.hashCode(value); } } if (containsMissingValue) { hashCode += Integer.hashCode(MISSING_VALUE); } return hashCode; }
@Test void setsWithTheSameValuesHaveTheSameHashcode() { final IntHashSet other = new IntHashSet(100); addTwoElements(testSet); addTwoElements(other); assertEquals(testSet.hashCode(), other.hashCode()); }
@Override public DefaultExecutionVertex getVertex(final ExecutionVertexID vertexId) { final DefaultExecutionVertex executionVertex = executionVertices.get(vertexId); if (executionVertex == null) { throw new IllegalArgumentException( String.format("Execution vertex %s not found in pipelined region", vertexId)); } return executionVertex; }
@Test void gettingUnknownVertexThrowsException() { final Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.emptyMap(); final DefaultSchedulingPipelinedRegion pipelinedRegion = new DefaultSchedulingPipelinedRegion( Collections.emptySet(), resultPartitionById::get); final ExecutionVertexID unknownVertexId = new ExecutionVertexID(new JobVertexID(), 0); assertThatThrownBy(() -> pipelinedRegion.getVertex(unknownVertexId)) .withFailMessage("Expected exception not thrown") .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining(unknownVertexId + " not found"); }
@Override public TypeSerializer<T> restoreSerializer() { checkNotNull(runtimeType); checkNotNull(schema); if (runtimeSchema != null) { return new AvroSerializer<>( runtimeType, new SerializableAvroSchema(runtimeSchema), new SerializableAvroSchema(schema)); } else { return new AvroSerializer<>( runtimeType, new SerializableAvroSchema(schema), new SerializableAvroSchema(schema)); } }
@Test void recordSerializedShouldBeDeserializeWithTheResortedSerializer() throws IOException { // user is an avro generated test object. final User user = TestDataGenerator.generateRandomUser(new Random()); final AvroSerializer<User> originalSerializer = new AvroSerializer<>(User.class); // // first serialize the record // ByteBuffer serializedUser = serialize(originalSerializer, user); // // then restore a serializer from the snapshot // TypeSerializer<User> restoredSerializer = originalSerializer.snapshotConfiguration().restoreSerializer(); // // now deserialize the user with the resorted serializer. // User restoredUser = deserialize(restoredSerializer, serializedUser); assertThat(restoredUser).isEqualTo(user); }
@Override public Long sendSingleNotify(Long userId, Integer userType, String templateCode, Map<String, Object> templateParams) { // 校验模版 NotifyTemplateDO template = validateNotifyTemplate(templateCode); if (Objects.equals(template.getStatus(), CommonStatusEnum.DISABLE.getStatus())) { log.info("[sendSingleNotify][模版({})已经关闭,无法给用户({}/{})发送]", templateCode, userId, userType); return null; } // 校验参数 validateTemplateParams(template, templateParams); // 发送站内信 String content = notifyTemplateService.formatNotifyTemplateContent(template.getContent(), templateParams); return notifyMessageService.createNotifyMessage(userId, userType, template, content, templateParams); }
@Test public void testSendSingleMail_successWhenSmsTemplateDisable() { // 准备参数 Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String templateCode = randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock NotifyTemplateService 的方法 NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.DISABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(notifyTemplateService.getNotifyTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); // 调用 Long resultMessageId = notifySendService.sendSingleNotify(userId, userType, templateCode, templateParams); // 断言 assertNull(resultMessageId); verify(notifyTemplateService, never()).formatNotifyTemplateContent(anyString(), anyMap()); verify(notifyMessageService, never()).createNotifyMessage(anyLong(), anyInt(), any(), anyString(), anyMap()); }
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"}) public static boolean isScalablePushQuery( final Statement statement, final KsqlExecutionContext ksqlEngine, final KsqlConfig ksqlConfig, final Map<String, Object> overrides ) { if (!isPushV2Enabled(ksqlConfig, overrides)) { return false; } if (! (statement instanceof Query)) { return false; } final Query query = (Query) statement; final SourceFinder sourceFinder = new SourceFinder(); sourceFinder.process(query.getFrom(), null); // It will be present if it's not a join, which we don't handle if (!sourceFinder.getSourceName().isPresent()) { return false; } // Find all of the writers to this particular source. final SourceName sourceName = sourceFinder.getSourceName().get(); final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName); // See if the config or override have set the stream to be "latest" final boolean isLatest = isLatest(ksqlConfig, overrides); // Cannot be a pull query, i.e. must be a push return !query.isPullQuery() // Group by is not supported && !query.getGroupBy().isPresent() // Windowing is not supported && !query.getWindow().isPresent() // Having clause is not supported && !query.getHaving().isPresent() // Partition by is not supported && !query.getPartitionBy().isPresent() // There must be an EMIT CHANGES clause && (query.getRefinement().isPresent() && query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES) // Must be reading from "latest" && isLatest // We only handle a single sink source at the moment from a CTAS/CSAS && upstreamQueries.size() == 1 // ROWPARTITION and ROWOFFSET are not currently supported in SPQs && !containsDisallowedColumns(query); }
@Test public void isScalablePushQuery_false_hasGroupBy() { try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) { // When: expectIsSPQ(ColumnName.of("foo"), columnExtractor); when(query.getGroupBy()) .thenReturn( Optional.of(new GroupBy(Optional.empty(), ImmutableList.of(new IntegerLiteral(1))))); // Then: assertThat(ScalablePushUtil.isScalablePushQuery(query, ksqlEngine, ksqlConfig, ImmutableMap.of(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")), equalTo(false)); } }
@Override public String sign(String data, String key) { if (StringUtils.isNotBlank(key) && StringUtils.isNotBlank(data)) { return RamSignAdapter.getRamSign(data, key); } return data; }
@Test public void testGetRamSignNull() { String data = null; String key = "exampleEncryptKey"; DefaultAuthSigner signer = new DefaultAuthSigner(); String sign = signer.sign(data, key); Assertions.assertNull(sign); }
@CheckForNull public Object getProperty(String key) { return properties.get(key); }
@Test @UseDataProvider("indexAndTypeMappings") public void define_fields(NewIndex<?> newIndex, TypeMapping typeMapping) { typeMapping.setField("foo_field", ImmutableMap.of("type", "keyword")); typeMapping.createBooleanField("boolean_field"); typeMapping.createByteField("byte_field"); typeMapping.createDateTimeField("dt_field"); typeMapping.createDoubleField("double_field"); typeMapping.createIntegerField("int_field"); typeMapping.createLongField("long_field"); typeMapping.createShortField("short_field"); typeMapping.createUuidPathField("uuid_path_field"); assertThat(newIndex.getProperty("foo_field")).isInstanceOf(Map.class); assertThat((Map) newIndex.getProperty("foo_field")).containsEntry("type", "keyword"); assertThat((Map) newIndex.getProperty("byte_field")).isNotEmpty(); assertThat((Map) newIndex.getProperty("double_field")).isNotEmpty(); assertThat((Map) newIndex.getProperty("dt_field")).isNotEmpty(); assertThat((Map) newIndex.getProperty("int_field")).containsEntry("type", "integer"); assertThat((Map) newIndex.getProperty("long_field")).isNotEmpty(); assertThat((Map) newIndex.getProperty("short_field")).isNotEmpty(); assertThat((Map) newIndex.getProperty("uuid_path_field")).isNotEmpty(); assertThat((Map) newIndex.getProperty("unknown")).isNull(); }
public final void isNotEmpty() { if (checkNotNull(actual).isEmpty()) { failWithoutActual(simpleFact("expected not to be empty")); } }
@Test public void multimapIsNotEmptyWithFailure() { ImmutableMultimap<Integer, Integer> multimap = ImmutableMultimap.of(); expectFailureWhenTestingThat(multimap).isNotEmpty(); assertFailureKeys("expected not to be empty"); }
protected long beginTransaction(PartitionIdentifier partition) throws RunningTxnExceedException, AnalysisException, LabelAlreadyUsedException, DuplicatedRequestException { long dbId = partition.getDbId(); long tableId = partition.getTableId(); long partitionId = partition.getPartitionId(); long currentTs = System.currentTimeMillis(); TransactionState.LoadJobSourceType loadJobSourceType = TransactionState.LoadJobSourceType.LAKE_COMPACTION; TransactionState.TxnSourceType txnSourceType = TransactionState.TxnSourceType.FE; TransactionState.TxnCoordinator coordinator = new TransactionState.TxnCoordinator(txnSourceType, HOST_NAME); String label = String.format("COMPACTION_%d-%d-%d-%d", dbId, tableId, partitionId, currentTs); WarehouseManager manager = GlobalStateMgr.getCurrentState().getWarehouseMgr(); Warehouse warehouse = manager.getCompactionWarehouse(); return transactionMgr.beginTransaction(dbId, Lists.newArrayList(tableId), label, coordinator, loadJobSourceType, Config.lake_compaction_default_timeout_second, warehouse.getId()); }
@Test public void testBeginTransactionSucceedWithSmallerStreamLoadTimeout() { long dbId = 9000L; long transactionId = 12345L; GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().addDatabaseTransactionMgr(dbId); new Expectations() { { try { dbTransactionMgr.beginTransaction( (List<Long>) any, anyString, (TUniqueId) any, (TransactionState.TxnCoordinator) any, (TransactionState.LoadJobSourceType) any, anyLong, anyLong, anyLong ); } catch (Exception e) { // skip } result = transactionId; } }; new MockUp<WarehouseManager>() { @Mock public Warehouse getWarehouse(String warehouseName) { return new DefaultWarehouse(WarehouseManager.DEFAULT_WAREHOUSE_ID, WarehouseManager.DEFAULT_WAREHOUSE_NAME); } @Mock public Warehouse getWarehouse(long warehouseId) { return new DefaultWarehouse(WarehouseManager.DEFAULT_WAREHOUSE_ID, WarehouseManager.DEFAULT_WAREHOUSE_NAME); } @Mock public List<Long> getAllComputeNodeIds(long warehouseId) { return Lists.newArrayList(1L); } @Mock public Long getComputeNodeId(String warehouseName, LakeTablet tablet) { return 1L; } @Mock public Long getComputeNodeId(Long warehouseId, LakeTablet tablet) { return 1L; } @Mock public ComputeNode getAllComputeNodeIdsAssignToTablet(Long warehouseId, LakeTablet tablet) { return new ComputeNode(1L, "127.0.0.1", 9030); } @Mock public ComputeNode getAllComputeNodeIdsAssignToTablet(String warehouseName, LakeTablet tablet) { return null; } @Mock public ImmutableMap<Long, ComputeNode> getComputeNodesFromWarehouse(long warehouseId) { return ImmutableMap.of(1L, new ComputeNode(1L, "127.0.0.1", 9030)); } }; // default value Config.lake_compaction_default_timeout_second = 86400; // value smaller than `lake_compaction_default_timeout_second` // expect not affect lake compaction's transaction operation Config.max_stream_load_timeout_second = 64800; CompactionMgr compactionManager = new CompactionMgr(); CompactionScheduler compactionScheduler = new CompactionScheduler(compactionManager, GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(), GlobalStateMgr.getCurrentState().getGlobalTransactionMgr(), GlobalStateMgr.getCurrentState(), ""); PartitionIdentifier partitionIdentifier = new PartitionIdentifier(dbId, 2, 3); try { assertEquals(transactionId, compactionScheduler.beginTransaction(partitionIdentifier)); } catch (Exception e) { Assert.fail("Transaction failed for lake compaction"); } }
public void setOuterJoinType(OuterJoinType outerJoinType) { this.outerJoinType = outerJoinType; }
@Test void testFullOuterJoinWithFullMatchingKeys() throws Exception { final List<String> leftInput = Arrays.asList("foo", "bar", "foobar"); final List<String> rightInput = Arrays.asList("bar", "foobar", "foo"); baseOperator.setOuterJoinType(OuterJoinOperatorBase.OuterJoinType.FULL); List<String> expected = Arrays.asList("bar,bar", "foo,foo", "foobar,foobar"); testOuterJoin(leftInput, rightInput, expected); }
public Ticket add(long delay, TimerHandler handler, Object... args) { if (handler == null) { return null; } Utils.checkArgument(delay > 0, "Delay of a ticket has to be strictly greater than 0"); final Ticket ticket = new Ticket(this, now(), delay, handler, args); insert(ticket); return ticket; }
@Test public void testCancelTwice() { ZTicket.Ticket ticket = tickets.add(10, handler); assertThat(ticket, notNullValue()); boolean rc = ticket.cancel(); assertThat(rc, is(true)); rc = ticket.cancel(); assertThat(rc, is(false)); }
@Override public void onWorkflowFinalized(Workflow workflow) { WorkflowSummary summary = StepHelper.retrieveWorkflowSummary(objectMapper, workflow.getInput()); WorkflowRuntimeSummary runtimeSummary = retrieveWorkflowRuntimeSummary(workflow); String reason = workflow.getReasonForIncompletion(); LOG.info( "Workflow {} with execution_id [{}] is finalized with internal state [{}] and reason [{}]", summary.getIdentity(), workflow.getWorkflowId(), workflow.getStatus(), reason); metrics.counter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, getClass(), TYPE_TAG, "onWorkflowFinalized", MetricConstants.STATUS_TAG, workflow.getStatus().name()); if (reason != null && workflow.getStatus() == Workflow.WorkflowStatus.FAILED && reason.startsWith(MaestroStartTask.DEDUP_FAILURE_PREFIX)) { LOG.info( "Workflow {} with execution_id [{}] has not actually started, thus skip onWorkflowFinalized.", summary.getIdentity(), workflow.getWorkflowId()); return; // special case doing nothing } WorkflowInstance.Status instanceStatus = instanceDao.getWorkflowInstanceStatus( summary.getWorkflowId(), summary.getWorkflowInstanceId(), summary.getWorkflowRunId()); if (instanceStatus == null || (instanceStatus.isTerminal() && workflow.getStatus().isTerminal())) { LOG.info( "Workflow {} with execution_id [{}] does not exist or already " + "in a terminal state [{}] with internal state [{}], thus skip onWorkflowFinalized.", summary.getIdentity(), workflow.getWorkflowId(), instanceStatus, workflow.getStatus()); return; } Map<String, Task> realTaskMap = TaskHelper.getUserDefinedRealTaskMap(workflow); // cancel internally failed tasks realTaskMap.values().stream() .filter(task -> !StepHelper.retrieveStepStatus(task.getOutputData()).isTerminal()) .forEach(task -> maestroTask.cancel(workflow, task, null)); WorkflowRuntimeOverview overview = TaskHelper.computeOverview( objectMapper, summary, runtimeSummary.getRollupBase(), realTaskMap); try { validateAndUpdateOverview(overview, summary); switch (workflow.getStatus()) { case TERMINATED: // stopped due to stop request if (reason != null && reason.startsWith(FAILURE_REASON_PREFIX)) { update(workflow, WorkflowInstance.Status.FAILED, summary, overview); } else { update(workflow, WorkflowInstance.Status.STOPPED, summary, overview); } break; case TIMED_OUT: update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview); break; default: // other status (FAILED, COMPLETED, PAUSED, RUNNING) to be handled here. Optional<Task.Status> done = TaskHelper.checkProgress(realTaskMap, summary, overview, true); switch (done.orElse(Task.Status.IN_PROGRESS)) { /** * This is a special status to indicate that the workflow has succeeded. Check {@link * TaskHelper#checkProgress} for more details. */ case FAILED_WITH_TERMINAL_ERROR: WorkflowInstance.Status nextStatus = AggregatedViewHelper.deriveAggregatedStatus( instanceDao, summary, WorkflowInstance.Status.SUCCEEDED, overview); if (!nextStatus.isTerminal()) { throw new MaestroInternalError( "Invalid status: [%s], expecting a terminal one", nextStatus); } update(workflow, nextStatus, summary, overview); break; case FAILED: case CANCELED: // due to step failure update(workflow, WorkflowInstance.Status.FAILED, summary, overview); break; case TIMED_OUT: update(workflow, WorkflowInstance.Status.TIMED_OUT, summary, overview); break; // all other status are invalid default: metrics.counter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, getClass(), TYPE_TAG, "invalidStatusOnWorkflowFinalized"); throw new MaestroInternalError( "Invalid status [%s] onWorkflowFinalized", workflow.getStatus()); } break; } } catch (MaestroInternalError | IllegalArgumentException e) { // non-retryable error and still fail the instance LOG.warn("onWorkflowFinalized is failed with a non-retryable error", e); metrics.counter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, getClass(), TYPE_TAG, "nonRetryableErrorOnWorkflowFinalized"); update( workflow, WorkflowInstance.Status.FAILED, summary, overview, Details.create( e.getMessage(), "onWorkflowFinalized is failed with non-retryable error.")); } }
@Test public void testWorkflowFinalizedTerminated() { when(workflow.getStatus()).thenReturn(Workflow.WorkflowStatus.TERMINATED); when(instanceDao.getWorkflowInstanceStatus(eq("test-workflow-id"), anyLong(), anyLong())) .thenReturn(WorkflowInstance.Status.IN_PROGRESS); statusListener.onWorkflowFinalized(workflow); Assert.assertEquals( 1L, metricRepo .getCounter( MetricConstants.WORKFLOW_STATUS_LISTENER_CALL_BACK_METRIC, MaestroWorkflowStatusListener.class, "type", "onWorkflowFinalized", "status", "TERMINATED") .count()); verify(instanceDao, times(1)) .updateWorkflowInstance( any(), any(), any(), eq(WorkflowInstance.Status.STOPPED), anyLong()); verify(publisher, times(1)).publishOrThrow(any(), any()); }
public Class<?> getClassNameViaImplicitRules(String name, AggregationType aggregationType, DefaultNestedComponentRegistry registry) { Class<?> registryResult = registry.findDefaultComponentType(obj.getClass(), name); if (registryResult != null) { return registryResult; } // find the relevant method for the given property name and aggregationType Method relevantMethod = getRelevantMethod(name, aggregationType); if (relevantMethod == null) { return null; } Class<?> byAnnotation = getDefaultClassNameByAnnonation(name, relevantMethod); if (byAnnotation != null) { return byAnnotation; } return getByConcreteType(name, relevantMethod); }
@Test public void testgetComplexPropertyColleClassNameViaImplicitRules() { Class<?> compClass = setter.getClassNameViaImplicitRules("window", AggregationType.AS_COMPLEX_PROPERTY_COLLECTION, defaultComponentRegistry); assertEquals(Window.class, compClass); }
private void stop(int numOfServicesStarted, boolean stopOnlyStartedServices) { // stop in reverse order of start Exception firstException = null; List<Service> services = getServices(); for (int i = numOfServicesStarted - 1; i >= 0; i--) { Service service = services.get(i); if (LOG.isDebugEnabled()) { LOG.debug("Stopping service #" + i + ": " + service); } STATE state = service.getServiceState(); //depending on the stop police if (state == STATE.STARTED || (!stopOnlyStartedServices && state == STATE.INITED)) { Exception ex = ServiceOperations.stopQuietly(LOG, service); if (ex != null && firstException == null) { firstException = ex; } } } //after stopping all services, rethrow the first exception raised if (firstException != null) { throw ServiceStateException.convert(firstException); } }
@Test(timeout = 10000) public void testAddUninitedChildBeforeInit() throws Throwable { CompositeService parent = new CompositeService("parent"); BreakableService child = new BreakableService(); AddSiblingService.addChildToService(parent, child); parent.init(new Configuration()); assertInState(STATE.INITED, child); parent.start(); assertInState(STATE.STARTED, child); parent.stop(); assertInState(STATE.STOPPED, child); }
public static long parseSize(final String propertyName, final String propertyValue) { final int lengthMinusSuffix = propertyValue.length() - 1; final char lastCharacter = propertyValue.charAt(lengthMinusSuffix); if (Character.isDigit(lastCharacter)) { return Long.parseLong(propertyValue); } final long value = AsciiEncoding.parseLongAscii(propertyValue, 0, lengthMinusSuffix); switch (lastCharacter) { case 'k': case 'K': if (value > MAX_K_VALUE) { throw new NumberFormatException(propertyName + " would overflow a long: " + propertyValue); } return value * 1024; case 'm': case 'M': if (value > MAX_M_VALUE) { throw new NumberFormatException(propertyName + " would overflow a long: " + propertyValue); } return value * 1024 * 1024; case 'g': case 'G': if (value > MAX_G_VALUE) { throw new NumberFormatException(propertyName + " would overflow a long: " + propertyValue); } return value * 1024 * 1024 * 1024; default: throw new NumberFormatException( propertyName + ": " + propertyValue + " should end with: k, m, or g."); } }
@Test void shouldParseSizesWithSuffix() { assertEquals(1L, parseSize("", "1")); assertEquals(1024L, parseSize("", "1k")); assertEquals(1024L, parseSize("", "1K")); assertEquals(1024L * 1024L, parseSize("", "1m")); assertEquals(1024L * 1024L, parseSize("", "1M")); assertEquals(1024L * 1024L * 1024L, parseSize("", "1g")); assertEquals(1024L * 1024L * 1024L, parseSize("", "1G")); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testDisableForwardIndexForSingleRawColumn() throws Exception { Set<String> forwardIndexDisabledColumns = new HashSet<>(SV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS); forwardIndexDisabledColumns.addAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); for (String column : _noDictionaryColumns) { if (FORWARD_INDEX_DISABLED_RAW_COLUMNS.contains(column) || RAW_SORTED_INDEX_COLUMNS.contains(column)) { // Forward index already disabled for these columns, skip them continue; } SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); forwardIndexDisabledColumns.add(column); indexLoadingConfig.setForwardIndexDisabledColumns(forwardIndexDisabledColumns); indexLoadingConfig.removeNoDictionaryColumns(forwardIndexDisabledColumns); indexLoadingConfig.addNoDictionaryColumns(FORWARD_INDEX_DISABLED_RAW_COLUMNS); Set<String> invertedIndexColumns = new HashSet<>(forwardIndexDisabledColumns); invertedIndexColumns.removeAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); indexLoadingConfig.setInvertedIndexColumns(invertedIndexColumns); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); validateIndexMap(column, true, true); validateIndexesForForwardIndexDisabledColumns(column); // In column metadata, nothing other than hasDictionary and dictionaryElementSize should change. int dictionaryElementSize = 0; ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(column); FieldSpec.DataType dataType = metadata.getDataType(); if (dataType == FieldSpec.DataType.STRING || dataType == FieldSpec.DataType.BYTES) { // This value is based on the rows in createTestData(). dictionaryElementSize = 7; } else if (dataType == FieldSpec.DataType.BIG_DECIMAL) { dictionaryElementSize = 4; } validateMetadataProperties(column, true, dictionaryElementSize, metadata.getCardinality(), metadata.getTotalDocs(), dataType, metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); } }
@PostConstruct public void init() { // blockRequestHandlerOptional has low priority blockRequestHandlerOptional.ifPresent(GatewayCallbackManager::setBlockHandler); initAppType(); initFallback(); }
@Test public void testInitWithFallbackMsgResponse() { FallbackProperties fallbackProperties = mock(FallbackProperties.class); when(gatewayProperties.getFallback()).thenReturn(fallbackProperties); when(fallbackProperties.getMode()).thenReturn(ConfigConstants.FALLBACK_MSG_RESPONSE); when(fallbackProperties.getResponseStatus()).thenReturn(200); when(fallbackProperties.getContentType()).thenReturn(MediaType.APPLICATION_JSON.toString()); when(fallbackProperties.getResponseBody()).thenReturn("test"); config.init(); Mono<ServerResponse> responseMono = GatewayCallbackManager.getBlockHandler() .handleRequest(mock(ServerWebExchange.class), null); Assert.assertEquals(200, Objects.requireNonNull(responseMono.block()).statusCode().value()); }
public static SqlPrimitiveType of(final String typeName) { switch (typeName.toUpperCase()) { case INT: return SqlPrimitiveType.of(SqlBaseType.INTEGER); case VARCHAR: return SqlPrimitiveType.of(SqlBaseType.STRING); default: try { final SqlBaseType sqlType = SqlBaseType.valueOf(typeName.toUpperCase()); return SqlPrimitiveType.of(sqlType); } catch (final IllegalArgumentException e) { throw new SchemaException("Unknown primitive type: " + typeName, e); } } }
@Test public void shouldThrowOnArrayType() { // When: final Exception e = assertThrows( SchemaException.class, () -> SqlPrimitiveType.of(SqlBaseType.ARRAY) ); // Then: assertThat(e.getMessage(), containsString("Invalid primitive type: ARRAY")); }
public String toBaseMessageIdString(Object messageId) { if (messageId == null) { return null; } else if (messageId instanceof String) { String stringId = (String) messageId; // If the given string has a type encoding prefix, // we need to escape it as an encoded string (even if // the existing encoding prefix was also for string) if (hasTypeEncodingPrefix(stringId)) { return AMQP_STRING_PREFIX + stringId; } else { return stringId; } } else if (messageId instanceof UUID) { return AMQP_UUID_PREFIX + messageId.toString(); } else if (messageId instanceof UnsignedLong) { return AMQP_ULONG_PREFIX + messageId.toString(); } else if (messageId instanceof Binary) { ByteBuffer dup = ((Binary) messageId).asByteBuffer(); byte[] bytes = new byte[dup.remaining()]; dup.get(bytes); String hex = convertBinaryToHexString(bytes); return AMQP_BINARY_PREFIX + hex; } else { throw new IllegalArgumentException("Unsupported type provided: " + messageId.getClass()); } }
@Test public void testToBaseMessageIdStringWithString() { String stringMessageId = "myIdString"; String baseMessageIdString = messageIdHelper.toBaseMessageIdString(stringMessageId); assertNotNull("null string should not have been returned", baseMessageIdString); assertEquals("expected base id string was not returned", stringMessageId, baseMessageIdString); }
public static boolean isPlateNumber(CharSequence value) { return isMatchRegex(PLATE_NUMBER, value); }
@Test public void isPlateNumberTest() { assertTrue(Validator.isPlateNumber("粤BA03205")); assertTrue(Validator.isPlateNumber("闽20401领")); }
public FEELFnResult<List> invoke(@ParameterName( "list" ) List list, @ParameterName( "position" ) BigDecimal position, @ParameterName( "newItem" ) Object newItem) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } if ( position == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be null")); } if ( position.intValue() == 0 ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be zero (parameter 'position' is 1-based)")); } if ( position.abs().intValue() > list.size() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "inconsistent with 'list' size")); } // spec requires us to return a new list final List<Object> result = new ArrayList<>( list ); if( position.intValue() > 0 ) { result.add( position.intValue() - 1, newItem ); } else { result.add( list.size() + position.intValue(), newItem ); } return FEELFnResult.ofResult( result ); }
@Test void invokeInsertIntoEmptyList() { // According to spec, inserting into empty list shouldn't be possible. For inserting into empty list, user // should use append() function. FunctionTestUtil.assertResultError(insertBeforeFunction.invoke(Collections.emptyList(), BigDecimal.ONE, null) , InvalidParametersEvent.class); }
public static <T> Iterator<T> iterator(Class<T> expectedType, String factoryId, ClassLoader classLoader) throws Exception { Iterator<Class<T>> classIterator = classIterator(expectedType, factoryId, classLoader); return new NewInstanceIterator<>(classIterator); }
@Test public void loadServicesFromInMemoryClassLoader() throws Exception { Class<ServiceLoaderTestInterface> type = ServiceLoaderTestInterface.class; String factoryId = "com.hazelcast.InMemoryFileForTesting"; ClassLoader parent = this.getClass().getClassLoader(); // Handles META-INF/services/com.hazelcast.CustomServiceLoaderTestInterface ClassLoader given = new CustomUrlStreamHandlerClassloader(parent); Set<ServiceLoaderTestInterface> implementations = new HashSet<>(); Iterator<ServiceLoaderTestInterface> iterator = ServiceLoader.iterator(type, factoryId, given); while (iterator.hasNext()) { implementations.add(iterator.next()); } assertEquals(1, implementations.size()); }
public <T> void resolve(T resolvable) { ParamResolver resolver = this; if (ParamScope.class.isAssignableFrom(resolvable.getClass())) { ParamScope newScope = (ParamScope) resolvable; resolver = newScope.applyOver(resolver); } resolveStringLeaves(resolvable, resolver); resolveNonStringLeaves(resolvable, resolver); resolveNodes(resolvable, resolver); }
@Test public void shouldResolveConfigValue() { PipelineConfig pipelineConfig = PipelineConfigMother.createPipelineConfig("cruise", "dev", "ant"); pipelineConfig.setLabelTemplate("2.1-${COUNT}-#{foo}-bar-#{bar}"); StageConfig stageConfig = pipelineConfig.get(0); stageConfig.updateApproval(new Approval(new AuthConfig(new AdminUser(new CaseInsensitiveString("#{foo}")), new AdminUser(new CaseInsensitiveString("#{bar}"))))); new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("bar", "jj"))), fieldCache).resolve(pipelineConfig); assertThat(pipelineConfig.getLabelTemplate(), is("2.1-${COUNT}-pavan-bar-jj")); assertThat(stageConfig.getApproval().getAuthConfig(), is(new AuthConfig(new AdminUser(new CaseInsensitiveString("pavan")), new AdminUser(new CaseInsensitiveString("jj"))))); }
@Override public void freeMeterId(DeviceId deviceId, MeterId meterId) { MeterTableKey meterTableKey = MeterTableKey.key(deviceId, MeterScope.globalScope()); freeMeterId(meterTableKey, meterId); }
@Test public void testFreeIdInUserMode() { initMeterStore(true); meterStore.freeMeterId(did1, mid1); MeterTableKey globalKey = MeterTableKey.key(did1, MeterScope.globalScope()); assertNotNull(meterStore.availableMeterIds.get(globalKey)); assertTrue(meterStore.availableMeterIds.get(globalKey).isEmpty()); }
@Override public void and(String... bitSetNames) { get(andAsync(bitSetNames)); }
@Test public void testAnd() { RBitSet bs1 = redisson.getBitSet("testbitset1"); bs1.set(3, 5); assertThat(bs1.cardinality()).isEqualTo(2); assertThat(bs1.size()).isEqualTo(8); RBitSet bs2 = redisson.getBitSet("testbitset2"); bs2.set(4); bs2.set(10); bs1.and(bs2.getName()); assertThat(bs1.get(3)).isFalse(); assertThat(bs1.get(4)).isTrue(); assertThat(bs1.get(5)).isFalse(); assertThat(bs2.get(10)).isTrue(); assertThat(bs1.cardinality()).isEqualTo(1); assertThat(bs1.size()).isEqualTo(16); }
@Override public FileSystemKind getKind() { return FileSystemKind.FILE_SYSTEM; }
@Test void testKind() { final FileSystem fs = FileSystem.getLocalFileSystem(); assertThat(fs.getKind()).isEqualTo(FileSystemKind.FILE_SYSTEM); }
public static <T> NavigableSet<Point<T>> fastKNearestPoints(SortedSet<Point<T>> points, Instant time, int k) { checkNotNull(points, "The input SortedSet of Points cannot be null"); checkNotNull(time, "The input time cannot be null"); checkArgument(k >= 0, "k (" + k + ") must be non-negative"); if (k >= points.size()) { return newTreeSet(points); } Point<T> stub = points.first(); Point<T> searchPoint = Point.builder(stub).time(time).latLong(0.0, 0.0).build(); //create two iterators, one goes up from the searchPoint, one goes down from the searchPoint NavigableSet<Point<T>> headSet = ((NavigableSet<Point<T>>) points).headSet(searchPoint, true); NavigableSet<Point<T>> tailSet = ((NavigableSet<Point<T>>) points).tailSet(searchPoint, false); Iterator<Point<T>> headIter = headSet.descendingIterator(); Iterator<Point<T>> tailIter = tailSet.iterator(); TreeSet<Point<T>> results = newTreeSet(); Point<T> up = (headIter.hasNext()) ? headIter.next() : null; Point<T> down = (tailIter.hasNext()) ? tailIter.next() : null; while (results.size() < k) { //add an element from the "down set" when we are out of elements in the "up set" if (up == null) { results.add(down); down = tailIter.next(); continue; } //add an element from the "up set" when we are out of elements in the "down set" if (down == null) { results.add(up); up = headIter.next(); continue; } //add the nearest point when we can choose between the "up set" and the "down set" Duration upDistance = Duration.between(up.time(), time); Duration downDistance = Duration.between(time, down.time()); if (theDuration(upDistance).isLessThanOrEqualTo(downDistance)) { results.add(up); up = (headIter.hasNext()) ? headIter.next() : null; } else { results.add(down); down = (tailIter.hasNext()) ? tailIter.next() : null; } } return results; }
@Test public void testFastKNearestPoints_5() { //Searching for a "time" that is used in the points dataset works NavigableSet<Point<String>> knn = fastKNearestPoints(points, EPOCH.plusSeconds(5), 3); assertEquals(3, knn.size()); Point one = knn.pollFirst(); Point two = knn.pollFirst(); Point three = knn.pollFirst(); //note: the neighbors are in time order, not "closest to search time" order assertTrue(one == p7); assertTrue(two == p8); assertTrue(three == p9); }
public long[] decode(String hash) { if (hash.isEmpty()) { return new long[0]; } String validChars = this.alphabet + this.guards + this.seps; for (int i = 0; i < hash.length(); i++) { if (validChars.indexOf(hash.charAt(i)) == -1) { return new long[0]; } } return this._decode(hash, this.alphabet); }
@Test public void test_issue45() throws Exception { Hashids hashids = new Hashids("this is my salt"); long[] numbers = hashids.decode("()"); Assert.assertEquals(numbers.length, 0); numbers = hashids.decode("[]"); Assert.assertEquals(numbers.length, 0); numbers = hashids.decode("недействительный"); Assert.assertEquals(numbers.length, 0); numbers = hashids.decode("無效"); Assert.assertEquals(numbers.length, 0); }
public static BiMap<String, Integer> createOutputMap(Iterable<String> localOutputs) { ImmutableBiMap.Builder<String, Integer> builder = ImmutableBiMap.builder(); int outputIndex = 0; // sort localOutputs for stable indexing for (String tag : Sets.newTreeSet(localOutputs)) { builder.put(tag, outputIndex); outputIndex++; } return builder.build(); }
@Test public void testOutputMapCreation() { List<String> outputs = Arrays.asList("output1", "output2", "output3"); BiMap<String, Integer> outputMap = PipelineTranslatorUtils.createOutputMap(outputs); Map<Object, Object> expected = ImmutableMap.builder().put("output1", 0).put("output2", 1).put("output3", 2).build(); assertThat(outputMap, is(expected)); }
public static FromMatchesFilter create(Jid address) { return new FromMatchesFilter(address, address != null ? address.hasNoResource() : false) ; }
@Test public void autoCompareMatchingEntityFullJid() { FromMatchesFilter filter = FromMatchesFilter.create(FULL_JID1_R1); Stanza packet = StanzaBuilder.buildMessage().build(); packet.setFrom(FULL_JID1_R1); assertTrue(filter.accept(packet)); packet.setFrom(BASE_JID1); assertFalse(filter.accept(packet)); packet.setFrom(FULL_JID1_R2); assertFalse(filter.accept(packet)); packet.setFrom(BASE_JID2); assertFalse(filter.accept(packet)); packet.setFrom(FULL_JID2); assertFalse(filter.accept(packet)); packet.setFrom(BASE_JID3); assertFalse(filter.accept(packet)); }
public static CompressionType getCompressionType() { return getCompressionType(System.getenv(VESPA_CONFIG_PROTOCOL_COMPRESSION), System.getProperty(VESPA_CONFIG_PROTOCOL_COMPRESSION)); }
@Test public void testCompressionType() { assertThat(JRTConfigRequestFactory.getCompressionType("", ""), is(CompressionType.LZ4)); assertThat(JRTConfigRequestFactory.getCompressionType("UNCOMPRESSED", ""), is(CompressionType.UNCOMPRESSED)); assertThat(JRTConfigRequestFactory.getCompressionType("", "UNCOMPRESSED"), is(CompressionType.UNCOMPRESSED)); assertThat(JRTConfigRequestFactory.getCompressionType("UNCOMPRESSED", "UNCOMPRESSED"), is(CompressionType.UNCOMPRESSED)); assertThat(JRTConfigRequestFactory.getCompressionType("", ""), is(CompressionType.LZ4)); assertThat(JRTConfigRequestFactory.getCompressionType("LZ4", ""), is(CompressionType.LZ4)); assertThat(JRTConfigRequestFactory.getCompressionType("", "LZ4"), is(CompressionType.LZ4)); assertThat(JRTConfigRequestFactory.getCompressionType("LZ4", "LZ4"), is(CompressionType.LZ4)); assertThat(JRTConfigRequestFactory.getCompressionType("UNCOMPRESSED", "LZ4"), is(CompressionType.UNCOMPRESSED)); assertThat(JRTConfigRequestFactory.getCompressionType("LZ4", "UNCOMPRESSED"), is(CompressionType.LZ4)); }
@Override public String getDestination(Exchange exchange, Endpoint endpoint) { String destination = super.getDestination(exchange, endpoint); if (destination.startsWith(QUEUE_PREFIX)) { destination = destination.substring(QUEUE_PREFIX.length()); } return destination; }
@Test public void testGetDestination() { Endpoint endpoint = Mockito.mock(Endpoint.class); Mockito.when(endpoint.getEndpointUri()).thenReturn("stomp:queue:test"); AbstractMessagingSpanDecorator decorator = new StompSpanDecorator(); assertEquals("test", decorator.getDestination(null, endpoint)); }
public int findActualTableIndex(final String dataSourceName, final String actualTableName) { return dataNodeIndexMap.getOrDefault(new DataNode(dataSourceName, actualTableName), -1); }
@Test void assertNotFindActualTableIndex() { ShardingTable actual = new ShardingTable(new ShardingTableRuleConfiguration("LOGIC_TABLE", "ds${0..1}.table_${0..2}"), Arrays.asList("ds0", "ds1"), null); assertThat(actual.findActualTableIndex("ds2", "table_2"), is(-1)); }
public static long getSnapshotIdFromVersion(org.apache.iceberg.Table table, ConnectorTableVersion version) { switch (version.getPointerType()) { case TEMPORAL: return getSnapshotIdFromTemporalVersion(table, version.getConstantOperator()); case VERSION: return getTargetSnapshotIdFromVersion(table, version.getConstantOperator()); case UNKNOWN: default: throw new StarRocksConnectorException("Unknown version type %s", version.getPointerType()); } }
@Test public void testGetSnapshotIdFromVersion() { ConstantOperator constantOperator = new ConstantOperator("2023-01-01", VARCHAR); ConnectorTableVersion tableVersion = new ConnectorTableVersion(PointerType.TEMPORAL, constantOperator); ConnectorTableVersion finalTableVersion = tableVersion; ExceptionChecker.expectThrowsWithMsg(StarRocksConnectorException.class, "Invalid temporal version", () -> IcebergMetadata.getSnapshotIdFromVersion(mockedNativeTableB, finalTableVersion)); constantOperator = new ConstantOperator(LocalDateTime.now(), DATE); tableVersion = new ConnectorTableVersion(PointerType.TEMPORAL, constantOperator); ConnectorTableVersion finalTableVersion1 = tableVersion; ExceptionChecker.expectThrowsWithMsg(StarRocksConnectorException.class, "Invalid temporal version", () -> IcebergMetadata.getSnapshotIdFromVersion(mockedNativeTableB, finalTableVersion1)); constantOperator = new ConstantOperator("2000-01-01 00:00:00", VARCHAR); tableVersion = new ConnectorTableVersion(PointerType.TEMPORAL, constantOperator); ConnectorTableVersion finalTableVersion2 = tableVersion; ExceptionChecker.expectThrowsWithMsg(StarRocksConnectorException.class, "Invalid temporal version", () -> IcebergMetadata.getSnapshotIdFromVersion(mockedNativeTableB, finalTableVersion2)); constantOperator = new ConstantOperator("not_exist", VARCHAR); tableVersion = new ConnectorTableVersion(PointerType.VERSION, constantOperator); ConnectorTableVersion finalTableVersion3 = tableVersion; ExceptionChecker.expectThrowsWithMsg(StarRocksConnectorException.class, "Cannot find snapshot with reference name", () -> IcebergMetadata.getSnapshotIdFromVersion(mockedNativeTableB, finalTableVersion3)); constantOperator = new ConstantOperator(123, INT); tableVersion = new ConnectorTableVersion(PointerType.VERSION, constantOperator); ConnectorTableVersion finalTableVersion4 = tableVersion; ExceptionChecker.expectThrowsWithMsg(StarRocksConnectorException.class, "Unsupported type for table version", () -> IcebergMetadata.getSnapshotIdFromVersion(mockedNativeTableB, finalTableVersion4)); }
public static String capitalize(final String str) { int strLen; if (str == null || (strLen = str.length()) == 0) { return str; } final int firstCodepoint = str.codePointAt(0); final int newCodePoint = Character.toTitleCase(firstCodepoint); if (firstCodepoint == newCodePoint) { // already capitalized return str; } final int[] newCodePoints = new int[strLen]; // cannot be longer than the char array int outOffset = 0; newCodePoints[outOffset++] = newCodePoint; // copy the first codepoint for (int inOffset = Character.charCount(firstCodepoint); inOffset < strLen; ) { final int codepoint = str.codePointAt(inOffset); newCodePoints[outOffset++] = codepoint; // copy the remaining ones inOffset += Character.charCount(codepoint); } return new String(newCodePoints, 0, outOffset); }
@Test public void testCapitalize() { assertEquals(StringUtils.capitalize("abc"), "Abc"); assertEquals(StringUtils.uncapitalize("Abc"), "abc"); }
public <T> Map<String, T> lookupByType(Class<T> type) { return registry.findByTypeWithName(type); }
@Test public void testRouteWithSpringProcessor() throws Exception { doTestMain("classpath:org/apache/camel/main/xml/spring-camel1.xml", (main, camelContext) -> { try { MockEndpoint endpoint = camelContext.getEndpoint("mock:finish", MockEndpoint.class); endpoint.expectedBodiesReceived("Hello World (2147483647)"); main.getCamelTemplate().sendBody("direct:start", "I'm World"); endpoint.assertIsSatisfied(); assertTrue(camelContext.getUuidGenerator() instanceof ShortUuidGenerator); Bean1 bean1 = main.lookupByType(Bean1.class).get("bean1"); Bean2 bean2 = bean1.getBean(); assertSame(bean1, bean2.getBean()); } catch (Exception e) { fail(e.getMessage()); } }); }