focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@VisibleForTesting public static JobGraph createJobGraph(StreamGraph streamGraph) { return new StreamingJobGraphGenerator( Thread.currentThread().getContextClassLoader(), streamGraph, null, Runnable::run) .createJobGraph(); }
@Test void testChainNodeSetParallelism() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.fromSequence(1L, 3L).map(value -> value).print().setParallelism(env.getParallelism()); StreamGraph streamGraph = env.getStreamGraph(); // check the streamGraph parallelism configured final List<StreamNode> streamNodes = streamGraph.getStreamNodes().stream() .sorted(Comparator.comparingInt(StreamNode::getId)) .collect(Collectors.toList()); assertThat(streamNodes.get(0).isParallelismConfigured()).isFalse(); assertThat(streamNodes.get(1).isParallelismConfigured()).isFalse(); assertThat(streamNodes.get(2).isParallelismConfigured()).isTrue(); // check the jobGraph parallelism configured JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph); List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources(); assertThat(jobGraph.getNumberOfVertices()).isEqualTo(1); assertThat(vertices.get(0).isParallelismConfigured()).isTrue(); }
public GapEncodedVariableLengthIntegerReader[] split(int numSplits) { if (numSplits<=0 || !((numSplits&(numSplits-1))==0)) { throw new IllegalStateException("Split should only be called with powers of 2, it was called with " + numSplits); } final int toMask = numSplits - 1; final int toOrdinalShift = 31 - Integer.numberOfLeadingZeros(numSplits); GapEncodedVariableLengthIntegerReader[] to = new GapEncodedVariableLengthIntegerReader[numSplits]; List<Integer> ordinals = new ArrayList<>(); reset(); while(nextElement() != Integer.MAX_VALUE) { ordinals.add(nextElement()); advance(); } ByteDataArray[] splitOrdinals = new ByteDataArray[numSplits]; int previousSplitOrdinal[] = new int[numSplits]; for (int ordinal : ordinals) { int toIndex = ordinal & toMask; int toOrdinal = ordinal >> toOrdinalShift; if (splitOrdinals[toIndex] == null) { splitOrdinals[toIndex] = new ByteDataArray(WastefulRecycler.DEFAULT_INSTANCE); } VarInt.writeVInt(splitOrdinals[toIndex], toOrdinal - previousSplitOrdinal[toIndex]); previousSplitOrdinal[toIndex] = toOrdinal; } for(int i=0;i<numSplits;i++) { if (splitOrdinals[i] == null) { to[i] = EMPTY_READER; } else { to[i] = new GapEncodedVariableLengthIntegerReader(splitOrdinals[i].getUnderlyingArray(), (int) splitOrdinals[i].length()); } } return to; }
@Test public void testSplit() { GapEncodedVariableLengthIntegerReader reader = reader(1, 10, 100, 105, 107, 200); GapEncodedVariableLengthIntegerReader[] splitBy2 = reader.split(2); assertEquals(2, splitBy2.length); assertValues(splitBy2[0], 5, 50, 100); // (split[i]*numSplits + i) is the original ordinal assertValues(splitBy2[1], 0, 52, 53); GapEncodedVariableLengthIntegerReader[] splitBy256 = reader.split(256); assertEquals(256, splitBy256.length); assertValues(splitBy256[1], 0); assertValues(splitBy256[200], 0); assertEquals(EMPTY_READER, splitBy256[0]); assertEquals(EMPTY_READER, splitBy256[255]); GapEncodedVariableLengthIntegerReader[] splitBy2Empty = EMPTY_READER.split(2); assertEquals(2, splitBy2Empty.length); assertEquals(EMPTY_READER, splitBy2Empty[0]); assertEquals(EMPTY_READER, splitBy2Empty[1]); assertIllegalStateException(() ->reader.split(0)); assertIllegalStateException(() -> reader.split(3)); }
public static void run(String[] args, Properties properties) { if (args.length == 0) { System.err.println("You must specify a classname to launch"); } // Scan the arguments looking for -s, --server-root=, -P, --properties= String root = null; String propertyFile = null; for (int i = 0; i < args.length; i++) { if ("-s".equals(args[i]) && i < args.length - 1) { root = args[i + 1]; break; } else if (args[i].startsWith("--server-root=")) { root = args[i].substring(args[i].indexOf('=') + 1); break; } else if ("-P".equals(args[i]) && i < args.length - 1) { propertyFile = args[i + 1]; break; } else if (args[i].startsWith("--properties=")) { propertyFile = args[i].substring(args[i].indexOf('=') + 1); break; } } if (propertyFile != null) { try (Reader r = Files.newBufferedReader(Paths.get(propertyFile))) { Properties loaded = new Properties(); loaded.load(r); loaded.forEach(properties::putIfAbsent); } catch (IOException e) { throw new IllegalArgumentException(e); } } String home = properties.getProperty(INFINISPAN_SERVER_HOME_PATH, properties.getProperty("user.dir")); ClassLoader bootClassLoader = Loader.class.getClassLoader(); ClassLoader serverClassLoader = classLoaderFromPath(Paths.get(home, "lib"), bootClassLoader); if (root == null) { root = properties.getProperty(INFINISPAN_SERVER_ROOT_PATH, Paths.get(home, DEFAULT_SERVER_ROOT_DIR).toString()); } String lib = properties.getProperty(INFINISPAN_SERVER_LIB_PATH); if (lib != null) { for (String item : lib.split(File.pathSeparator)) { serverClassLoader = classLoaderFromPath(Paths.get(item), serverClassLoader); } } else { serverClassLoader = classLoaderFromPath(Paths.get(root, "lib"), serverClassLoader); } Thread.currentThread().setContextClassLoader(serverClassLoader); try { Class<?> mainClass = serverClassLoader.loadClass(args[0]); Method mainMethod = mainClass.getMethod("main", String[].class); String[] mainArgs = new String[args.length - 1]; System.arraycopy(args, 1, mainArgs, 0, mainArgs.length); mainMethod.invoke(null, (Object) mainArgs); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(System.err); } }
@Test public void testLoaderViaSystemProperty() { Properties properties = new Properties(); properties.put(Loader.INFINISPAN_SERVER_LIB_PATH, String.join(File.pathSeparator, lib1.toString(), lib2.toString(), lib3.toString())); properties.put("user.dir", System.getProperty("user.dir")); Loader.run(new String[]{LoaderTest.class.getName()}, properties); }
public void setup(final Map<String, InternalTopicConfig> topicConfigs) { log.info("Starting to setup internal topics {}.", topicConfigs.keySet()); final long now = time.milliseconds(); final long deadline = now + retryTimeoutMs; final Map<String, Map<String, String>> streamsSideTopicConfigs = topicConfigs.values().stream() .collect(Collectors.toMap( InternalTopicConfig::name, topicConfig -> topicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention) )); final Set<String> createdTopics = new HashSet<>(); final Set<String> topicStillToCreate = new HashSet<>(topicConfigs.keySet()); while (!topicStillToCreate.isEmpty()) { final Set<NewTopic> newTopics = topicStillToCreate.stream() .map(topicName -> new NewTopic( topicName, topicConfigs.get(topicName).numberOfPartitions(), Optional.of(replicationFactor) ).configs(streamsSideTopicConfigs.get(topicName)) ).collect(Collectors.toSet()); log.info("Going to create internal topics: " + newTopics); final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics); processCreateTopicResults(createTopicsResult, topicStillToCreate, createdTopics, deadline); maybeSleep(Collections.singletonList(topicStillToCreate), deadline, "created"); } log.info("Completed setup of internal topics {}.", topicConfigs.keySet()); }
@Test public void shouldCleanUpWhenUnexpectedExceptionIsThrownDuringSetup() { final AdminClient admin = mock(AdminClient.class); final StreamsConfig streamsConfig = new StreamsConfig(config); final MockTime time = new MockTime( (Integer) config.get(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG)) / 3 ); final InternalTopicManager topicManager = new InternalTopicManager(time, admin, streamsConfig); final InternalTopicConfig internalTopicConfig1 = setupRepartitionTopicConfig(topic1, 1); final InternalTopicConfig internalTopicConfig2 = setupRepartitionTopicConfig(topic2, 1); setupCleanUpScenario(admin, streamsConfig, internalTopicConfig1, internalTopicConfig2); final KafkaFutureImpl<Void> deleteTopicSuccessfulFuture = new KafkaFutureImpl<>(); deleteTopicSuccessfulFuture.complete(null); when(admin.deleteTopics(mkSet(topic1))) .thenAnswer(answer -> new MockDeleteTopicsResult(mkMap(mkEntry(topic1, deleteTopicSuccessfulFuture)))); assertThrows( StreamsException.class, () -> topicManager.setup(mkMap( mkEntry(topic1, internalTopicConfig1), mkEntry(topic2, internalTopicConfig2) )) ); }
public void transitionTo(ClassicGroupState groupState) { assertValidTransition(groupState); previousState = state; state = groupState; currentStateTimestamp = Optional.of(time.milliseconds()); metrics.onClassicGroupStateTransition(previousState, state); }
@Test public void testEmptyToAwaitingRebalanceIllegalTransition() { assertThrows(IllegalStateException.class, () -> group.transitionTo(COMPLETING_REBALANCE)); }
@Produces @DefaultBean @Singleton public JobScheduler jobScheduler(StorageProvider storageProvider) { if (jobRunrBuildTimeConfiguration.jobScheduler().enabled()) { final JobDetailsGenerator jobDetailsGenerator = newInstance(jobRunrRuntimeConfiguration.jobScheduler().jobDetailsGenerator().orElse(CachingJobDetailsGenerator.class.getName())); return new JobScheduler(storageProvider, jobDetailsGenerator, emptyList()); } return null; }
@Test void jobSchedulerIsSetupWhenConfigured() { when(jobSchedulerBuildTimeConfiguration.enabled()).thenReturn(true); assertThat(jobRunrProducer.jobScheduler(storageProvider)).isNotNull(); }
public static long getNextScheduledTime(final String cronEntry, long currentTime) throws MessageFormatException { long result = 0; if (cronEntry == null || cronEntry.length() == 0) { return result; } // Handle the once per minute case "* * * * *" // starting the next event at the top of the minute. if (cronEntry.equals("* * * * *")) { result = currentTime + 60 * 1000; result = result / 60000 * 60000; return result; } List<String> list = tokenize(cronEntry); List<CronEntry> entries = buildCronEntries(list); Calendar working = Calendar.getInstance(); working.setTimeInMillis(currentTime); working.set(Calendar.SECOND, 0); CronEntry minutes = entries.get(MINUTES); CronEntry hours = entries.get(HOURS); CronEntry dayOfMonth = entries.get(DAY_OF_MONTH); CronEntry month = entries.get(MONTH); CronEntry dayOfWeek = entries.get(DAY_OF_WEEK); // Start at the top of the next minute, cron is only guaranteed to be // run on the minute. int timeToNextMinute = 60 - working.get(Calendar.SECOND); working.add(Calendar.SECOND, timeToNextMinute); // If its already to late in the day this will roll us over to tomorrow // so we'll need to check again when done updating month and day. int currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } int currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } // We can roll into the next month here which might violate the cron setting // rules so we check once then recheck again after applying the month settings. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Start by checking if we are in the right month, if not then calculations // need to start from the beginning of the month to ensure that we don't end // up on the wrong day. (Can happen when DAY_OF_WEEK is set and current time // is ahead of the day of the week to execute on). doUpdateCurrentMonth(working, month); // Now Check day of week and day of month together since they can be specified // together in one entry, if both "day of month" and "day of week" are restricted // (not "*"), then either the "day of month" field (3) or the "day of week" field // (5) must match the current day or the Calenday must be advanced. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Now we can chose the correct hour and minute of the day in question. currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } result = working.getTimeInMillis(); if (result <= currentTime) { throw new ArithmeticException("Unable to compute next scheduled exection time."); } return result; }
@Test public void testgetNextTimeDays() throws MessageFormatException { // using an absolute date so that result will be absolute - Monday 15 Nov 2010 Calendar current = Calendar.getInstance(); current.set(2010, Calendar.NOVEMBER, 15, 9, 15, 30); LOG.debug("start:" + current.getTime()); String test = "* * 16 * *"; long next = CronParser.getNextScheduledTime(test, current.getTimeInMillis()); Calendar result = Calendar.getInstance(); result.setTimeInMillis(next); LOG.debug("next:" + result.getTime()); assertEquals(0,result.get(Calendar.SECOND)); assertEquals(0,result.get(Calendar.MINUTE)); assertEquals(0,result.get(Calendar.HOUR)); assertEquals(16,result.get(Calendar.DAY_OF_MONTH)); assertEquals(Calendar.NOVEMBER,result.get(Calendar.MONTH)); assertEquals(2010,result.get(Calendar.YEAR)); }
protected String buildQueryFilter(String streamId, String query) { checkArgument(streamId != null, "streamId parameter cannot be null"); final String trimmedStreamId = streamId.trim(); checkArgument(!trimmedStreamId.isEmpty(), "streamId parameter cannot be empty"); final StringBuilder builder = new StringBuilder().append("streams:").append(trimmedStreamId); if (query != null) { final String trimmedQuery = query.trim(); if (!trimmedQuery.isEmpty() && !"*".equals(trimmedQuery)) { builder.append(" AND (").append(trimmedQuery).append(")"); } } return builder.toString(); }
@Test public void testQueryFilterBuilder() { final AbstractAlertCondition condition = (AbstractAlertCondition) getDummyAlertCondition(ImmutableMap.of()); assertThatThrownBy(() -> condition.buildQueryFilter(null, null)) .hasMessageContaining("streamId") .hasMessageContaining("be null"); assertThatThrownBy(() -> condition.buildQueryFilter("", null)) .hasMessageContaining("streamId") .hasMessageContaining("be empty"); assertThat(condition.buildQueryFilter(" abc123 ", null)) .isEqualTo("streams:abc123"); assertThat(condition.buildQueryFilter("abc123", "")) .isEqualTo("streams:abc123"); assertThat(condition.buildQueryFilter("abc123", "*")) .isEqualTo("streams:abc123"); assertThat(condition.buildQueryFilter("abc123", " * ")) .isEqualTo("streams:abc123"); assertThat(condition.buildQueryFilter("abc123", " hello:world foo:\"bar baz\" ")) .isEqualTo("streams:abc123 AND (hello:world foo:\"bar baz\")"); assertThat(condition.buildQueryFilter("abc123", "hello:world AND foo:\"bar baz\"")) .isEqualTo("streams:abc123 AND (hello:world AND foo:\"bar baz\")"); assertThat(condition.buildQueryFilter("abc123", "hello:world AND (foo:\"bar baz\" OR foo:yolo)")) .isEqualTo("streams:abc123 AND (hello:world AND (foo:\"bar baz\" OR foo:yolo))"); }
public void handleEventFromOperator(int subtask, int attemptNumber, OperatorEvent event) throws Exception { mainThreadExecutor.assertRunningInMainThread(); if (event instanceof AcknowledgeCheckpointEvent) { subtaskGatewayMap .get(subtask) .openGatewayAndUnmarkCheckpoint( ((AcknowledgeCheckpointEvent) event).getCheckpointID()); return; } coordinator.handleEventFromOperator(subtask, attemptNumber, event); }
@Test void acknowledgeCheckpointEventReleasesBlockedEvents() throws Exception { final EventReceivingTasks tasks = EventReceivingTasks.createForRunningTasks(); final OperatorCoordinatorHolder holder = createCoordinatorHolder(tasks, TestingOperatorCoordinator::new); triggerAndCompleteCheckpoint(holder, 1111L); getCoordinator(holder).getSubtaskGateway(0).sendEvent(new TestOperatorEvent(1337)); holder.handleEventFromOperator(0, 0, new AcknowledgeCheckpointEvent(1111L)); assertThat(tasks.getSentEventsForSubtask(0)).containsExactly(new TestOperatorEvent(1337)); }
static void dissectResolve( final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int absoluteOffset = offset; absoluteOffset += dissectLogHeader(CONTEXT, NAME_RESOLUTION_RESOLVE, buffer, absoluteOffset, builder); final boolean isReResolution = 1 == buffer.getByte(absoluteOffset); absoluteOffset += SIZE_OF_BYTE; final long durationNs = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; builder.append(": resolver="); absoluteOffset += buffer.getStringAscii(absoluteOffset, builder); absoluteOffset += SIZE_OF_INT; builder.append(" durationNs=").append(durationNs); builder.append(" name="); absoluteOffset += buffer.getStringAscii(absoluteOffset, builder); absoluteOffset += SIZE_OF_INT; builder.append(" isReResolution=").append(isReResolution); builder.append(" address="); dissectInetAddress(buffer, absoluteOffset, builder); }
@Test void dissectResolveWithReallyLongNames() throws UnknownHostException { final String longString = "testResolver.this.is.a.really.long.string.to.force.truncation.0000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "00000000000000000000000000000000000000000000000000000000000000000000000000000000"; final String expected = "DRIVER: NAME_RESOLUTION_RESOLVE [537/537]: resolver=testResolver." + "this.is.a.really.long.string.to.force.truncation.0000000000000000000000000000000000000000000000000000000" + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "00000000000000000000000000000000... " + "durationNs=555 " + "name=testResolver.this.is.a.really.long.string.to.force.truncation.0000000000000000000000000000000000000" + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "00000000000000000000000000000000000000000000000000... " + "isReResolution=false " + "address=127.0.0.1"; final InetAddress address = InetAddress.getByName("127.0.0.1"); final int length = SIZE_OF_BOOLEAN + SIZE_OF_LONG + trailingStringLength(longString, MAX_HOST_NAME_LENGTH) + trailingStringLength(longString, MAX_HOST_NAME_LENGTH) + inetAddressLength(address); DriverEventEncoder.encodeResolve(buffer, 0, length, length, longString, 555, longString, false, address); final StringBuilder builder = new StringBuilder(); DriverEventDissector.dissectResolve(buffer, 0, builder); assertThat(builder.toString(), endsWith(expected)); }
public void onUnloaded() { groups.values().forEach(group -> { switch (group.type()) { case CONSUMER: ConsumerGroup consumerGroup = (ConsumerGroup) group; log.info("[GroupId={}] Unloaded group metadata for group epoch {}.", consumerGroup.groupId(), consumerGroup.groupEpoch()); break; case CLASSIC: ClassicGroup classicGroup = (ClassicGroup) group; log.info("[GroupId={}] Unloading group metadata for generation {}.", classicGroup.groupId(), classicGroup.generationId()); classicGroup.transitionTo(DEAD); switch (classicGroup.previousState()) { case EMPTY: case DEAD: break; case PREPARING_REBALANCE: classicGroup.allMembers().forEach(member -> { classicGroup.completeJoinFuture(member, new JoinGroupResponseData() .setMemberId(member.memberId()) .setErrorCode(NOT_COORDINATOR.code())); }); break; case COMPLETING_REBALANCE: case STABLE: classicGroup.allMembers().forEach(member -> { classicGroup.completeSyncFuture(member, new SyncGroupResponseData() .setErrorCode(NOT_COORDINATOR.code())); }); } break; case SHARE: ShareGroup shareGroup = (ShareGroup) group; log.info("[GroupId={}] Unloaded group metadata for group epoch {}.", shareGroup.groupId(), shareGroup.groupEpoch()); break; default: log.warn("onUnloaded group with an unknown group type {}.", group.type()); break; } }); }
@Test public void testClassicGroupOnUnloadedCompletingRebalance() throws Exception { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .build(); ClassicGroup group = context.createClassicGroup("group-id"); // Set up a group in with a leader, follower, and a pending member. // Have the pending member join the group and both the pending member // and the follower sync. We should have 2 members awaiting sync. GroupMetadataManagerTestContext.PendingMemberGroupResult pendingGroupResult = context.setupGroupWithPendingMember(group); String pendingMemberId = pendingGroupResult.pendingMemberResponse.memberId(); // Compete join group for the pending member JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() .withGroupId("group-id") .withMemberId(pendingMemberId) .withDefaultProtocolTypeAndProtocols() .build(); GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(request); assertTrue(joinResult.records.isEmpty()); assertTrue(joinResult.joinFuture.isDone()); assertEquals(Errors.NONE.code(), joinResult.joinFuture.get().errorCode()); assertEquals(3, group.allMembers().size()); assertEquals(0, group.numPendingJoinMembers()); // Follower and pending send SyncGroup request. // Follower and pending member should be awaiting sync while the leader is pending sync. GroupMetadataManagerTestContext.SyncResult followerSyncResult = context.sendClassicGroupSync( new GroupMetadataManagerTestContext.SyncGroupRequestBuilder() .withGroupId("group-id") .withMemberId(pendingGroupResult.followerId) .withGenerationId(joinResult.joinFuture.get().generationId()) .build()); GroupMetadataManagerTestContext.SyncResult pendingMemberSyncResult = context.sendClassicGroupSync( new GroupMetadataManagerTestContext.SyncGroupRequestBuilder() .withGroupId("group-id") .withMemberId(pendingMemberId) .withGenerationId(joinResult.joinFuture.get().generationId()) .build()); assertFalse(followerSyncResult.syncFuture.isDone()); assertFalse(pendingMemberSyncResult.syncFuture.isDone()); assertTrue(group.isInState(COMPLETING_REBALANCE)); context.onUnloaded(); assertTrue(group.isInState(DEAD)); assertTrue(followerSyncResult.syncFuture.isDone()); assertTrue(pendingMemberSyncResult.syncFuture.isDone()); assertEquals(new SyncGroupResponseData() .setAssignment(EMPTY_ASSIGNMENT) .setErrorCode(NOT_COORDINATOR.code()), followerSyncResult.syncFuture.get()); assertEquals(new SyncGroupResponseData() .setAssignment(EMPTY_ASSIGNMENT) .setErrorCode(NOT_COORDINATOR.code()), pendingMemberSyncResult.syncFuture.get()); }
String buildDefaultMessage(EventNotificationContext ctx, SlackEventNotificationConfig config) { String title = buildMessageTitle(ctx); // Build custom message String audience = ""; if (config.notifyChannel() || config.notifyHere()) { audience = config.notifyChannel() ? "@channel " : "@here "; } String description = ctx.eventDefinition().map(EventDefinitionDto::description).orElse(""); return String.format(Locale.ROOT, "%s*Alert %s* triggered:\n> %s \n", audience, title, description); }
@Test public void buildDefaultMessage() { String message = slackEventNotification.buildDefaultMessage(eventNotificationContext, slackEventNotificationConfig); assertThat(message).isNotBlank(); assertThat(message).isNotEmpty(); assertThat(message).isNotNull(); assertThat(message).contains("@channel"); assertThat(message.getBytes(StandardCharsets.UTF_8).length).isEqualTo(95); }
@Override public int compareTo(PluginDesc<?> other) { int nameComp = name.compareTo(other.name); int versionComp = encodedVersion.compareTo(other.encodedVersion); // isolated plugins appear after classpath plugins when they have identical versions. int isolatedComp = Boolean.compare(other.loader instanceof PluginClassLoader, loader instanceof PluginClassLoader); // choose an arbitrary order between different locations and types int loaderComp = location.compareTo(other.location); int typeComp = type.compareTo(other.type); return nameComp != 0 ? nameComp : versionComp != 0 ? versionComp : isolatedComp != 0 ? isolatedComp : loaderComp != 0 ? loaderComp : typeComp; }
@SuppressWarnings("rawtypes") @Test public void testPluginDescComparison() { PluginDesc<SinkConnector> connectorDescPluginPath = new PluginDesc<>( SinkConnector.class, regularVersion, PluginType.SINK, pluginLoader ); PluginDesc<SinkConnector> connectorDescClasspath = new PluginDesc<>( SinkConnector.class, newerVersion, PluginType.SINK, systemLoader ); assertNewer(connectorDescPluginPath, connectorDescClasspath); PluginDesc<Converter> converterDescPluginPath = new PluginDesc<>( Converter.class, noVersion, PluginType.CONVERTER, pluginLoader ); PluginDesc<Converter> converterDescClasspath = new PluginDesc<>( Converter.class, snapshotVersion, PluginType.CONVERTER, systemLoader ); assertNewer(converterDescPluginPath, converterDescClasspath); PluginDesc<Transformation> transformDescPluginPath = new PluginDesc<>( Transformation.class, null, PluginType.TRANSFORMATION, pluginLoader ); PluginDesc<Transformation> transformDescClasspath = new PluginDesc<>( Transformation.class, regularVersion, PluginType.TRANSFORMATION, systemLoader ); assertNewer(transformDescPluginPath, transformDescClasspath); PluginDesc<Predicate> predicateDescPluginPath = new PluginDesc<>( Predicate.class, regularVersion, PluginType.PREDICATE, pluginLoader ); PluginDesc<Predicate> predicateDescClasspath = new PluginDesc<>( Predicate.class, regularVersion, PluginType.PREDICATE, systemLoader ); assertNewer(predicateDescPluginPath, predicateDescClasspath); PluginDesc<ConfigProvider> configProviderDescPluginPath = new PluginDesc<>( FileConfigProvider.class, regularVersion, PluginType.CONFIGPROVIDER, pluginLoader ); PluginDesc<ConfigProvider> configProviderDescOtherPluginLoader = new PluginDesc<>( FileConfigProvider.class, regularVersion, PluginType.CONFIGPROVIDER, otherPluginLoader ); assertTrue(configProviderDescPluginPath.compareTo(configProviderDescOtherPluginLoader) != 0, "Different plugin loaders should have an ordering"); PluginDesc<Converter> jsonConverterPlugin = new PluginDesc<>( JsonConverter.class, regularVersion, PluginType.CONVERTER, systemLoader ); PluginDesc<HeaderConverter> jsonHeaderConverterPlugin = new PluginDesc<>( JsonConverter.class, regularVersion, PluginType.HEADER_CONVERTER, systemLoader ); assertNewer(jsonConverterPlugin, jsonHeaderConverterPlugin); }
static String[] maybeAddTls13(boolean enableTls13, String... defaultProtocols) { if (enableTls13) { String[] protocols = new String[defaultProtocols.length + 1]; System.arraycopy(defaultProtocols, 0, protocols, 1, defaultProtocols.length); protocols[0] = "TLSv1.3"; return protocols; } else { return defaultProtocols; } }
@Test void disableTls13() { String[] protos = ClientSslContextFactory.maybeAddTls13(false, "TLSv1.2"); assertEquals(Arrays.asList("TLSv1.2"), Arrays.asList(protos)); }
@Override public boolean equals(Object obj) { COSObjectKey objToBeCompared = obj instanceof COSObjectKey ? (COSObjectKey)obj : null; return objToBeCompared != null && objToBeCompared.numberAndGeneration == numberAndGeneration; }
@Test void testEquals() { assertEquals(new COSObjectKey(100, 0), new COSObjectKey(100, 0)); assertNotEquals(new COSObjectKey(100, 0), new COSObjectKey(101, 0)); }
public Response daemonLogPage(String fileName, Integer start, Integer length, String grep, String user) throws IOException, InvalidRequestException { Path file = daemonLogRoot.resolve(fileName).toAbsolutePath().normalize(); if (!file.startsWith(daemonLogRoot) || Paths.get(fileName).getNameCount() != 1) { //Prevent fileName from pathing into worker logs, or outside daemon log root return LogviewerResponseBuilder.buildResponsePageNotFound(); } if (file.toFile().exists()) { // all types of files included List<File> logFiles = Arrays.stream(daemonLogRoot.toFile().listFiles()) .filter(File::isFile) .collect(toList()); List<String> reorderedFilesStr = logFiles.stream() .map(File::getName) .filter(fName -> !StringUtils.equals(fileName, fName)) .collect(toList()); reorderedFilesStr.add(fileName); length = length != null ? Math.min(10485760, length) : LogviewerConstant.DEFAULT_BYTES_PER_PAGE; final boolean isZipFile = file.getFileName().toString().endsWith(".gz"); long fileLength = getFileLength(file.toFile(), isZipFile); if (start == null) { start = Long.valueOf(fileLength - length).intValue(); } String logString = isTxtFile(fileName) ? escapeHtml(pageFile(file.toString(), isZipFile, fileLength, start, length)) : escapeHtml("This is a binary file and cannot display! You may download the full file."); List<DomContent> bodyContents = new ArrayList<>(); if (StringUtils.isNotEmpty(grep)) { String matchedString = String.join("\n", Arrays.stream(logString.split("\n")) .filter(str -> str.contains(grep)).collect(toList())); bodyContents.add(pre(matchedString).withId("logContent")); } else { DomContent pagerData = null; if (isTxtFile(fileName)) { pagerData = pagerLinks(fileName, start, length, Long.valueOf(fileLength).intValue(), "daemonlog"); } bodyContents.add(searchFileForm(fileName, "yes")); // list all daemon logs bodyContents.add(logFileSelectionForm(reorderedFilesStr, fileName, "daemonlog")); if (pagerData != null) { bodyContents.add(pagerData); } bodyContents.add(daemonDownloadLink(fileName)); bodyContents.add(pre(logString).withClass("logContent")); if (pagerData != null) { bodyContents.add(pagerData); } } String content = logTemplate(bodyContents, fileName, user).render(); return LogviewerResponseBuilder.buildSuccessHtmlResponse(content); } else { return LogviewerResponseBuilder.buildResponsePageNotFound(); } }
@Test public void testDaemonLogPagePathIntoWorkerLogs() throws Exception { try (TmpPath rootPath = new TmpPath()) { LogviewerLogPageHandler handler = createHandlerForTraversalTests(rootPath.getFile().toPath()); final Response returned = handler.daemonLogPage("workers-artifacts/topoA/worker.log", 0, 100, null, "user"); Utils.forceDelete(rootPath.toString()); //Should not show files outside log root. assertThat(returned.getStatus(), is(Response.Status.NOT_FOUND.getStatusCode())); } }
@Nonnull public <K, V> KafkaProducer<K, V> getProducer(@Nullable String transactionalId) { if (getConfig().isShared()) { if (transactionalId != null) { throw new IllegalArgumentException("Cannot use transactions with shared " + "KafkaProducer for DataConnection" + getConfig().getName()); } retain(); //noinspection unchecked return (KafkaProducer<K, V>) producerSupplier.get(); } else { if (transactionalId != null) { @SuppressWarnings({"rawtypes", "unchecked"}) Map<String, Object> castProperties = (Map) getConfig().getProperties(); Map<String, Object> copy = new HashMap<>(castProperties); copy.put("transactional.id", transactionalId); return new KafkaProducer<>(copy); } else { return new KafkaProducer<>(getConfig().getProperties()); } } }
@Test public void shared_producer_should_not_be_created_with_additional_props() { kafkaDataConnection = createKafkaDataConnection(kafkaTestSupport); Properties properties = new Properties(); properties.setProperty("A", "B"); assertThatThrownBy(() -> kafkaDataConnection.getProducer(null, properties)) .isInstanceOf(HazelcastException.class) .hasMessageContaining("For shared Kafka producer, please provide all serialization options"); kafkaDataConnection.release(); }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test void testFunctionDependingOnInputAsSuperclass() { IdentityMapper<Boolean> function = new IdentityMapper<Boolean>() { private static final long serialVersionUID = 1L; }; TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(function, Types.BOOLEAN); assertThat(ti.isBasicType()).isTrue(); assertThat(ti).isEqualTo(BasicTypeInfo.BOOLEAN_TYPE_INFO); }
public DeviceId deviceId() { return deviceId; }
@Test public void testEquality() { DeviceId deviceId1 = DeviceId.deviceId("of:0001"); DeviceId deviceId2 = DeviceId.deviceId("of:0002"); long apply = 2; ImmutableSet<LabelResource> releaseCollection = ImmutableSet .copyOf(Collections.emptySet()); LabelResourceRequest h1 = new LabelResourceRequest( deviceId1, LabelResourceRequest.Type.APPLY, apply, null); LabelResourceRequest h2 = new LabelResourceRequest( deviceId1, LabelResourceRequest.Type.APPLY, apply, null); LabelResourceRequest h3 = new LabelResourceRequest( deviceId2, LabelResourceRequest.Type.RELEASE, 0, releaseCollection); LabelResourceRequest h4 = new LabelResourceRequest( deviceId2, LabelResourceRequest.Type.RELEASE, 0, releaseCollection); new EqualsTester().addEqualityGroup(h1, h2).addEqualityGroup(h3, h4) .testEquals(); }
public static void checkRowIDPartitionComponent(List<HiveColumnHandle> columns, Optional<byte[]> rowIdPartitionComponent) { boolean supplyRowIDs = columns.stream().anyMatch(column -> HiveColumnHandle.isRowIdColumnHandle(column)); if (supplyRowIDs) { checkArgument(rowIdPartitionComponent.isPresent(), "rowIDPartitionComponent required when supplying row IDs"); } }
@Test public void testCheckRowIDPartitionComponent_noRowID() { HiveColumnHandle handle = HiveColumnHandle.pathColumnHandle(); List<HiveColumnHandle> columns = ImmutableList.of(handle); checkRowIDPartitionComponent(columns, Optional.empty()); }
@Override public E take() throws InterruptedException { final E e = super.take(); memoryLimiter.releaseInterruptibly(e); return e; }
@Test void testTake() throws InterruptedException, ExecutionException { MemoryLimitedLinkedBlockingQueue<Runnable> queue = new MemoryLimitedLinkedBlockingQueue<>(instrumentation); ExecutorService executorService = Executors.newFixedThreadPool(1); Future<Runnable> takeResult = executorService.submit(queue::take); Thread.sleep(2000); queue.put(() -> { }); takeResult.get(); assertEquals(0, queue.size()); }
@Override public void commitAsync() { commitAsync(null); }
@Test public void testNoInterceptorCommitAsyncFailed() { Properties props = requiredConsumerConfigAndGroupId("test-id"); props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName()); props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); consumer = newConsumer(props); assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); completeCommitAsyncApplicationEventExceptionally(new KafkaException("Test exception")); consumer.commitAsync(mockTopicPartitionOffset(), new MockCommitCallback()); assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get()); forceCommitCallbackInvocation(); assertEquals(0, MockConsumerInterceptor.ON_COMMIT_COUNT.get()); }
public static Index simple(String name) { return new Index(name, false); }
@Test public void simple_index_name_can_not_contain_underscore_except__all_keyword() { // doesn't fail Index.simple("_all"); assertThatThrownBy(() -> Index.simple("_")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Index name must be lower-case letters or '_all': _"); }
@Override public String getDriverClass() { return "com.amazon.redshift.jdbc.Driver"; }
@Test public void testGetDriverClass() throws Exception { assertEquals( "com.amazon.redshift.jdbc.Driver", dbMeta.getDriverClass() ); }
public synchronized void write(Mutation tableRecord) throws IllegalStateException { write(ImmutableList.of(tableRecord)); }
@Test public void testWriteMultipleRecordsShouldThrowExceptionWhenCalledBeforeExecuteDdlStatement() { // arrange ImmutableList<Mutation> testMutations = ImmutableList.of( Mutation.newInsertOrUpdateBuilder("SingerId") .set("SingerId") .to(1) .set("FirstName") .to("Marc") .set("LastName") .to("Richards") .build(), Mutation.newInsertOrUpdateBuilder("SingerId") .set("SingerId") .to(2) .set("FirstName") .to("Catalina") .set("LastName") .to("Smith") .build()); // act & assert assertThrows(IllegalStateException.class, () -> testManager.write(testMutations)); }
public CompletableFuture<List<BatchAckResult>> batchAckMessage( ProxyContext ctx, List<ReceiptHandleMessage> handleMessageList, String consumerGroup, String topic, long timeoutMillis ) { CompletableFuture<List<BatchAckResult>> future = new CompletableFuture<>(); try { List<BatchAckResult> batchAckResultList = new ArrayList<>(handleMessageList.size()); Map<String, List<ReceiptHandleMessage>> brokerHandleListMap = new HashMap<>(); for (ReceiptHandleMessage handleMessage : handleMessageList) { if (handleMessage.getReceiptHandle().isExpired()) { batchAckResultList.add(new BatchAckResult(handleMessage, EXPIRED_HANDLE_PROXY_EXCEPTION)); continue; } List<ReceiptHandleMessage> brokerHandleList = brokerHandleListMap.computeIfAbsent(handleMessage.getReceiptHandle().getBrokerName(), key -> new ArrayList<>()); brokerHandleList.add(handleMessage); } if (brokerHandleListMap.isEmpty()) { return FutureUtils.addExecutor(CompletableFuture.completedFuture(batchAckResultList), this.executor); } Set<Map.Entry<String, List<ReceiptHandleMessage>>> brokerHandleListMapEntrySet = brokerHandleListMap.entrySet(); CompletableFuture<List<BatchAckResult>>[] futures = new CompletableFuture[brokerHandleListMapEntrySet.size()]; int futureIndex = 0; for (Map.Entry<String, List<ReceiptHandleMessage>> entry : brokerHandleListMapEntrySet) { futures[futureIndex++] = processBrokerHandle(ctx, consumerGroup, topic, entry.getValue(), timeoutMillis); } CompletableFuture.allOf(futures).whenComplete((val, throwable) -> { if (throwable != null) { future.completeExceptionally(throwable); } for (CompletableFuture<List<BatchAckResult>> resultFuture : futures) { batchAckResultList.addAll(resultFuture.join()); } future.complete(batchAckResultList); }); } catch (Throwable t) { future.completeExceptionally(t); } return FutureUtils.addExecutor(future, this.executor); }
@Test public void testBatchAckExpireMessage() throws Throwable { String brokerName1 = "brokerName1"; List<ReceiptHandleMessage> receiptHandleMessageList = new ArrayList<>(); for (int i = 0; i < 3; i++) { MessageExt expireMessage = createMessageExt(TOPIC, "", 0, 3000, System.currentTimeMillis() - 10000, 0, 0, 0, i, brokerName1); ReceiptHandle expireHandle = create(expireMessage); receiptHandleMessageList.add(new ReceiptHandleMessage(expireHandle, expireMessage.getMsgId())); } List<BatchAckResult> batchAckResultList = this.consumerProcessor.batchAckMessage(createContext(), receiptHandleMessageList, CONSUMER_GROUP, TOPIC, 3000).get(); verify(this.messageService, never()).batchAckMessage(any(), anyList(), anyString(), anyString(), anyLong()); assertEquals(receiptHandleMessageList.size(), batchAckResultList.size()); for (BatchAckResult batchAckResult : batchAckResultList) { assertNull(batchAckResult.getAckResult()); assertNotNull(batchAckResult.getProxyException()); assertNotNull(batchAckResult.getReceiptHandleMessage()); } }
@Override public void getConfig(StorServerConfig.Builder builder) { super.getConfig(builder); provider.getConfig(builder); }
@Test void testMaxMergesPerNode() { StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder(); DistributorCluster dcluster = parse("<content id=\"storage\">\n" + " <redundancy>3</redundancy>" + " <documents/>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"); ((ContentCluster) dcluster.getParent()).getConfig(builder); StorDistributormanagerConfig conf = new StorDistributormanagerConfig(builder); assertEquals(16, conf.maximum_nodes_per_merge()); builder = new StorDistributormanagerConfig.Builder(); dcluster = parse("<content id=\"storage\">\n" + " <redundancy>3</redundancy>" + " <documents/>" + " <tuning>\n" + " <merges max-nodes-per-merge=\"4\"/>\n" + " </tuning>\n" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"); ((ContentCluster) dcluster.getParent()).getConfig(builder); conf = new StorDistributormanagerConfig(builder); assertEquals(4, conf.maximum_nodes_per_merge()); }
@Override public synchronized void putTaskConfigs(String connector, List<Map<String, String>> configs) { ConnectorState state = connectors.get(connector); if (state == null) throw new IllegalArgumentException("Cannot put tasks for non-existing connector"); Map<ConnectorTaskId, Map<String, String>> taskConfigsMap = taskConfigListAsMap(connector, configs); state.taskConfigs = taskConfigsMap; state.applyConfig(); if (updateListener != null) updateListener.onTaskConfigUpdate(taskConfigsMap.keySet()); }
@Test public void testPutTaskConfigs() { // Can't write task configs for non-existent connector assertThrows(IllegalArgumentException.class, () -> configStore.putTaskConfigs(CONNECTOR_IDS.get(0), Collections.singletonList(SAMPLE_CONFIGS.get(1)))); configStore.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), null); configStore.putTaskConfigs(CONNECTOR_IDS.get(0), Collections.singletonList(SAMPLE_CONFIGS.get(1))); ClusterConfigState configState = configStore.snapshot(); ConnectorTaskId taskId = new ConnectorTaskId(CONNECTOR_IDS.get(0), 0); assertEquals(1, configState.taskCount(CONNECTOR_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(taskId)); verify(configUpdateListener).onConnectorConfigUpdate(eq(CONNECTOR_IDS.get(0))); verify(configUpdateListener).onTaskConfigUpdate(eq(Collections.singleton(taskId))); }
@Override public HttpResponseOutputStream<Chunk> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final String uploadUri; final String resourceId; if(null == status.getUrl()) { if(status.isExists()) { resourceId = fileid.getFileId(file); uploadUri = EueUploadHelper.updateResource(session, resourceId, status, UploadType.SIMPLE).getUploadURI(); } else { final ResourceCreationResponseEntry uploadResourceCreationResponseEntry = EueUploadHelper .createResource(session, fileid.getFileId(file.getParent()), file.getName(), status, UploadType.SIMPLE); resourceId = EueResourceIdProvider.getResourceIdFromResourceUri(uploadResourceCreationResponseEntry.getHeaders().getLocation()); uploadUri = uploadResourceCreationResponseEntry.getEntity().getUploadURI(); } } else { uploadUri = status.getUrl(); resourceId = status.getParameters().get(RESOURCE_ID); } final HttpResponseOutputStream<Chunk> stream = this.write(file, status, new DelayedHttpEntityCallable<Chunk>(file) { @Override public Chunk call(final HttpEntity entity) throws BackgroundException { try { final HttpResponse response; final StringBuilder uploadUriWithParameters = new StringBuilder(uploadUri); if(!Checksum.NONE.equals(status.getChecksum())) { uploadUriWithParameters.append(String.format("&x_cdash64=%s", new ChunkListSHA256ChecksumCompute().compute(status.getLength(), Hex.decodeHex(status.getChecksum().hash)))); } if(status.getLength() != -1) { uploadUriWithParameters.append(String.format("&x_size=%d", status.getLength())); } if(status.isSegment()) { // Chunked upload from large upload service uploadUriWithParameters.append(String.format("&x_offset=%d", new HostPreferences(session.getHost()).getLong("eue.upload.multipart.size") * (status.getPart() - 1))); final HttpPut request = new HttpPut(uploadUriWithParameters.toString()); request.setEntity(entity); response = session.getClient().execute(request); } else { final HttpPost request = new HttpPost(uploadUriWithParameters.toString()); request.setEntity(entity); request.setHeader(HttpHeaders.CONTENT_TYPE, MimeTypeService.DEFAULT_CONTENT_TYPE); response = session.getClient().execute(request); } try { if(response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { return new Chunk(resourceId, status.getPart(), status.getLength(), status.getChecksum()); } EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity())); throw new EueExceptionMappingService().map(response); } finally { EntityUtils.consume(response.getEntity()); } } catch(HttpResponseException e) { throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } catch(DecoderException e) { throw new ChecksumException(LocaleFactory.localizedString("Checksum failure", "Error"), e); } } @Override public long getContentLength() { return status.getLength(); } } ); fileid.cache(file, resourceId); return stream; }
@Test public void testWrite() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final EueWriteFeature feature = new EueWriteFeature(session, fileid); final Path container = new EueDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus()); long containerModification = new EueAttributesFinderFeature(session, fileid).find(container).getModificationDate(); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); String resourceId; { final byte[] content = RandomUtils.nextBytes(8235); final long modified = System.currentTimeMillis(); final long created = 1695161463630L; final TransferStatus status = new TransferStatus().withLength(content.length).withModified(modified).withCreated(created); final Checksum checksum = feature.checksum(file, status).compute(new ByteArrayInputStream(content), new TransferStatus().withLength(content.length)); status.withChecksum(checksum); final HttpResponseOutputStream<EueWriteFeature.Chunk> out = feature.write(file, status, new DisabledConnectionCallback()); final ByteArrayInputStream in = new ByteArrayInputStream(content); final TransferStatus progress = new TransferStatus(); final BytecountStreamListener count = new BytecountStreamListener(); new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out); assertEquals(content.length, count.getSent()); in.close(); out.close(); assertEquals(checksum, out.getStatus().getChecksum()); assertNotEquals(containerModification, new EueAttributesFinderFeature(session, fileid).find(container).getModificationDate()); resourceId = status.getResponse().getFileId(); assertTrue(new EueFindFeature(session, fileid).find(file)); final PathAttributes attributes = new EueAttributesFinderFeature(session, fileid).find(file); assertEquals(content.length, attributes.getSize()); assertEquals(modified, attributes.getModificationDate()); assertEquals(created, attributes.getCreationDate()); final byte[] compare = new byte[content.length]; final InputStream stream = new EueReadFeature(session, fileid).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); } containerModification = new EueAttributesFinderFeature(session, fileid).find(container).getModificationDate(); // Override { final PathAttributes previous = new EueAttributesFinderFeature(session, fileid).find(file); final byte[] content = RandomUtils.nextBytes(6231); final long ts = System.currentTimeMillis(); final TransferStatus status = new TransferStatus().withLength(content.length).withModified(ts); final Checksum checksum = feature.checksum(file, status).compute(new ByteArrayInputStream(content), new TransferStatus().withLength(content.length)); status.withChecksum(checksum).exists(true); final HttpResponseOutputStream<EueWriteFeature.Chunk> out = feature.write(file, status, new DisabledConnectionCallback()); final ByteArrayInputStream in = new ByteArrayInputStream(content); final TransferStatus progress = new TransferStatus(); final BytecountStreamListener count = new BytecountStreamListener(); new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out); assertEquals(content.length, count.getSent()); in.close(); out.close(); assertEquals(checksum, out.getStatus().getChecksum()); assertTrue(new EueFindFeature(session, fileid).find(file)); assertNotEquals(containerModification, new EueAttributesFinderFeature(session, fileid).find(container).getModificationDate()); final PathAttributes attributes = new EueAttributesFinderFeature(session, fileid).find(file); assertNotEquals(previous, attributes); assertNotEquals(previous.getETag(), attributes.getETag()); assertNotEquals(previous.getRevision(), attributes.getRevision()); assertEquals(ts, attributes.getModificationDate()); assertEquals(content.length, attributes.getSize()); final byte[] compare = new byte[content.length]; final InputStream stream = new EueReadFeature(session, fileid).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); } new EueDeleteFeature(session, fileid).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
static void addClusterToMirrorMaker2ConnectorConfig(Map<String, Object> config, KafkaMirrorMaker2ClusterSpec cluster, String configPrefix) { config.put(configPrefix + "alias", cluster.getAlias()); config.put(configPrefix + AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); String securityProtocol = addTLSConfigToMirrorMaker2ConnectorConfig(config, cluster, configPrefix); if (cluster.getAuthentication() != null) { if (cluster.getAuthentication() instanceof KafkaClientAuthenticationTls) { config.put(configPrefix + SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PKCS12"); config.put(configPrefix + SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, STORE_LOCATION_ROOT + cluster.getAlias() + KEYSTORE_SUFFIX); config.put(configPrefix + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "${file:" + CONNECTORS_CONFIG_FILE + ":" + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG + "}"); } else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationPlain plainAuthentication) { securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT"; config.put(configPrefix + SaslConfigs.SASL_MECHANISM, "PLAIN"); config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG, AuthenticationUtils.jaasConfig("org.apache.kafka.common.security.plain.PlainLoginModule", Map.of("username", plainAuthentication.getUsername(), "password", "${file:" + CONNECTORS_CONFIG_FILE + ":" + cluster.getAlias() + ".sasl.password}"))); } else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationScram scramAuthentication) { securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT"; config.put(configPrefix + SaslConfigs.SASL_MECHANISM, scramAuthentication instanceof KafkaClientAuthenticationScramSha256 ? "SCRAM-SHA-256" : "SCRAM-SHA-512"); config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG, AuthenticationUtils.jaasConfig("org.apache.kafka.common.security.scram.ScramLoginModule", Map.of("username", scramAuthentication.getUsername(), "password", "${file:" + CONNECTORS_CONFIG_FILE + ":" + cluster.getAlias() + ".sasl.password}"))); } else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationOAuth oauthAuthentication) { securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT"; config.put(configPrefix + SaslConfigs.SASL_MECHANISM, "OAUTHBEARER"); config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG, oauthJaasConfig(cluster, oauthAuthentication)); config.put(configPrefix + SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler"); } } // Security protocol config.put(configPrefix + AdminClientConfig.SECURITY_PROTOCOL_CONFIG, securityProtocol); config.putAll(cluster.getConfig().entrySet().stream() .collect(Collectors.toMap(entry -> configPrefix + entry.getKey(), Map.Entry::getValue))); config.putAll(cluster.getAdditionalProperties()); }
@Test public void testAddClusterToMirrorMaker2ConnectorConfigWithScram() { Map<String, Object> config = new HashMap<>(); KafkaMirrorMaker2ClusterSpec cluster = new KafkaMirrorMaker2ClusterSpecBuilder() .withAlias("sourceClusterAlias") .withBootstrapServers("sourceClusterAlias.sourceNamespace.svc:9092") .withNewKafkaClientAuthenticationScramSha512() .withUsername("shaza") .withNewPasswordSecret() .withPassword("pa55word") .endPasswordSecret() .endKafkaClientAuthenticationScramSha512() .build(); KafkaMirrorMaker2Connectors.addClusterToMirrorMaker2ConnectorConfig(config, cluster, PREFIX); String jaasConfig = (String) config.remove("prefix.sasl.jaas.config"); AppConfigurationEntry configEntry = AuthenticationUtilsTest.parseJaasConfig(jaasConfig); assertThat(configEntry.getLoginModuleName(), is("org.apache.kafka.common.security.scram.ScramLoginModule")); assertThat(configEntry.getOptions(), is(Map.of("username", "shaza", "password", "${file:/tmp/strimzi-mirrormaker2-connector.properties:sourceClusterAlias.sasl.password}"))); assertThat(new TreeMap<>(config), is(new TreeMap<>(Map.of("prefix.alias", "sourceClusterAlias", "prefix.security.protocol", "SASL_PLAINTEXT", "prefix.sasl.mechanism", "SCRAM-SHA-512", "prefix.bootstrap.servers", "sourceClusterAlias.sourceNamespace.svc:9092")))); }
@VisibleForTesting public Collection<String> getCollection() { return entries; }
@Test public void testGetCollection() { //create MachineList with a mix of ip addresses , hostnames and ip ranges MachineList ml = new MachineList(HOSTNAME_IP_CIDR_LIST, new TestAddressFactory()); Collection<String> col = ml.getCollection(); //test getCollectionton to return the full collection assertEquals(7,ml.getCollection().size()); for (String item:StringUtils.getTrimmedStringCollection(HOSTNAME_IP_CIDR_LIST)) { assertTrue(col.contains(item)); } }
public static DataCleaner<Track<NopHit>> coreSmoothing() { return CompositeCleaner.of( //removes error-prone synthetic "assumed" points from Nop data new CoastedPointRemover<>(), //remove both points if any two sequential points are within 500 Milliseconds new HighFrequencyPointRemover<>(Duration.ofMillis(500)), //remove tracks with small just a handful of points, new SmallTrackRemover(9), /* * ensure any two sequential points have at least 4 seconds between them (by removing * only the trailing points) */ new TimeDownSampler<>(Duration.ofMillis(4_000)), //removes near-stationary Tracks produces by "radar mirages" off of skyscrapers and such new RemoveLowVariabilityTracks<>(), //removes near-duplicate points when a track is stationary. new DistanceDownSampler<>(), //forces 000 altitudes to null new ZeroAltitudeToNull<>(), //correct missing altitude values new FillMissingAltitudes<>(), //correct the altitude values for outlying Points new VerticalOutlierDetector<>(), //remove points with inconsistent LatLong values new LateralOutlierDetector<>(), //remove radar noise using polynomial fitting new TrackFilter<>() ); }
@Test public void bug292_generatingNevativeSpeeds() { for (String exampleFile : exampleFiles()) { Track<NopHit> rawTrackDataThatGeneratesNegativeSpeeds = createTrackFromResource( TrackSmoothing.class, exampleFile ); DataCleaner<Track<NopHit>> cleaner = coreSmoothing(); assertDoesNotThrow( () -> cleaner.clean(rawTrackDataThatGeneratesNegativeSpeeds) ); } }
@Override public byte[] serialize(final String topic, final LeftOrRightValue<V1, V2> data) { if (data == null) { return null; } final byte[] rawValue = (data.getLeftValue() != null) ? leftSerializer.serialize(topic, data.getLeftValue()) : rightSerializer.serialize(topic, data.getRightValue()); if (rawValue == null) { return null; } return ByteBuffer .allocate(1 + rawValue.length) .put((byte) (data.getLeftValue() != null ? 1 : 0)) .put(rawValue) .array(); }
@Test public void shouldSerializeIntegerValue() { final int value = 5; final LeftOrRightValue<String, Integer> leftOrRightValue = LeftOrRightValue.makeRightValue(value); final byte[] serialized = STRING_OR_INTEGER_SERDE.serializer().serialize(TOPIC, leftOrRightValue); assertThat(serialized, is(notNullValue())); final LeftOrRightValue<String, Integer> deserialized = STRING_OR_INTEGER_SERDE.deserializer().deserialize(TOPIC, serialized); assertThat(deserialized, is(leftOrRightValue)); }
@Override public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { try { String partitionColumn = job.get(Constants.JDBC_PARTITION_COLUMN); int numPartitions = job.getInt(Constants.JDBC_NUM_PARTITIONS, -1); String lowerBound = job.get(Constants.JDBC_LOW_BOUND); String upperBound = job.get(Constants.JDBC_UPPER_BOUND); InputSplit[] splits; if (!job.getBoolean(Constants.JDBC_SPLIT_QUERY, true) || numPartitions <= 1) { // We will not split this query if: // 1. hive.sql.query.split is set to false (either manually or automatically by calcite // 2. numPartitions == 1 splits = new InputSplit[1]; splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]); LOGGER.info("Creating 1 input split " + splits[0]); return splits; } dbAccessor = DatabaseAccessorFactory.getAccessor(job); Path[] tablePaths = FileInputFormat.getInputPaths(job); // We will split this query into n splits LOGGER.debug("Creating {} input splits", numPartitions); if (partitionColumn != null) { List<String> columnNames = dbAccessor.getColumnNames(job); if (!columnNames.contains(partitionColumn)) { throw new IOException("Cannot find partitionColumn:" + partitionColumn + " in " + columnNames); } List<TypeInfo> hiveColumnTypesList = dbAccessor.getColumnTypes(job); TypeInfo typeInfo = hiveColumnTypesList.get(columnNames.indexOf(partitionColumn)); if (!(typeInfo instanceof PrimitiveTypeInfo)) { throw new IOException(partitionColumn + " is a complex type, only primitive type can be a partition column"); } if (lowerBound == null || upperBound == null) { Pair<String, String> boundary = dbAccessor.getBounds(job, partitionColumn, lowerBound == null, upperBound == null); if (lowerBound == null) { lowerBound = boundary.getLeft(); } if (upperBound == null) { upperBound = boundary.getRight(); } } if (lowerBound == null) { throw new IOException("lowerBound of " + partitionColumn + " cannot be null"); } if (upperBound == null) { throw new IOException("upperBound of " + partitionColumn + " cannot be null"); } IntervalSplitter intervalSplitter = IntervalSplitterFactory.newIntervalSpitter(typeInfo); List<MutablePair<String, String>> intervals = intervalSplitter.getIntervals(lowerBound, upperBound, numPartitions, typeInfo); if (intervals.size()<=1) { LOGGER.debug("Creating 1 input splits"); splits = new InputSplit[1]; splits[0] = new JdbcInputSplit(FileInputFormat.getInputPaths(job)[0]); return splits; } intervals.get(0).setLeft(null); intervals.get(intervals.size()-1).setRight(null); splits = new InputSplit[intervals.size()]; for (int i = 0; i < intervals.size(); i++) { splits[i] = new JdbcInputSplit(partitionColumn, intervals.get(i).getLeft(), intervals.get(i).getRight(), tablePaths[0]); } } else { int numRecords = dbAccessor.getTotalNumberOfRecords(job); if (numRecords < numPartitions) { numPartitions = numRecords; } int numRecordsPerSplit = numRecords / numPartitions; int numSplitsWithExtraRecords = numRecords % numPartitions; LOGGER.debug("Num records = {}", numRecords); splits = new InputSplit[numPartitions]; int offset = 0; for (int i = 0; i < numPartitions; i++) { int numRecordsInThisSplit = numRecordsPerSplit; if (i < numSplitsWithExtraRecords) { numRecordsInThisSplit++; } splits[i] = new JdbcInputSplit(numRecordsInThisSplit, offset, tablePaths[0]); offset += numRecordsInThisSplit; } } dbAccessor = null; LOGGER.info("Num input splits created {}", splits.length); for (InputSplit split : splits) { LOGGER.info("split:" + split.toString()); } return splits; } catch (Exception e) { LOGGER.error("Error while splitting input data.", e); throw new IOException(e); } }
@Test public void testIntervalSplit_NoSplit() throws HiveJdbcDatabaseAccessException, IOException { JdbcInputFormat f = new JdbcInputFormat(); when(mockDatabaseAccessor.getColumnNames(any(Configuration.class))).thenReturn(Lists.newArrayList("a")); List<TypeInfo> columnTypes = Collections.singletonList(TypeInfoFactory.intTypeInfo); when(mockDatabaseAccessor.getColumnTypes(any(Configuration.class))).thenReturn(columnTypes); JobConf conf = new JobConf(); conf.set("mapred.input.dir", "/temp"); conf.set("hive.sql.partitionColumn", "a"); conf.set("hive.sql.numPartitions", "5"); conf.set("hive.sql.lowerBound", "1"); conf.set("hive.sql.upperBound", "2"); InputSplit[] splits = f.getSplits(conf, -1); assertThat(splits, is(notNullValue())); assertThat(splits.length, is(1)); assertNull(((JdbcInputSplit)splits[0]).getPartitionColumn()); }
public static long getNextScheduledTime(final String cronEntry, long currentTime) throws MessageFormatException { long result = 0; if (cronEntry == null || cronEntry.length() == 0) { return result; } // Handle the once per minute case "* * * * *" // starting the next event at the top of the minute. if (cronEntry.equals("* * * * *")) { result = currentTime + 60 * 1000; result = result / 60000 * 60000; return result; } List<String> list = tokenize(cronEntry); List<CronEntry> entries = buildCronEntries(list); Calendar working = Calendar.getInstance(); working.setTimeInMillis(currentTime); working.set(Calendar.SECOND, 0); CronEntry minutes = entries.get(MINUTES); CronEntry hours = entries.get(HOURS); CronEntry dayOfMonth = entries.get(DAY_OF_MONTH); CronEntry month = entries.get(MONTH); CronEntry dayOfWeek = entries.get(DAY_OF_WEEK); // Start at the top of the next minute, cron is only guaranteed to be // run on the minute. int timeToNextMinute = 60 - working.get(Calendar.SECOND); working.add(Calendar.SECOND, timeToNextMinute); // If its already to late in the day this will roll us over to tomorrow // so we'll need to check again when done updating month and day. int currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } int currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } // We can roll into the next month here which might violate the cron setting // rules so we check once then recheck again after applying the month settings. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Start by checking if we are in the right month, if not then calculations // need to start from the beginning of the month to ensure that we don't end // up on the wrong day. (Can happen when DAY_OF_WEEK is set and current time // is ahead of the day of the week to execute on). doUpdateCurrentMonth(working, month); // Now Check day of week and day of month together since they can be specified // together in one entry, if both "day of month" and "day of week" are restricted // (not "*"), then either the "day of month" field (3) or the "day of week" field // (5) must match the current day or the Calenday must be advanced. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Now we can chose the correct hour and minute of the day in question. currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } result = working.getTimeInMillis(); if (result <= currentTime) { throw new ArithmeticException("Unable to compute next scheduled exection time."); } return result; }
@Test public void testgetNextTimeMonth() throws MessageFormatException { // using an absolute date so that result will be absolute - Monday 15 Nov 2010 Calendar current = Calendar.getInstance(); current.set(2010, Calendar.NOVEMBER, 15, 9, 15, 30); LOG.debug("start:" + current.getTime()); String test = "* * * 12 *"; long next = CronParser.getNextScheduledTime(test, current.getTimeInMillis()); Calendar result = Calendar.getInstance(); result.setTimeInMillis(next); LOG.debug("next:" + result.getTime()); assertEquals(0,result.get(Calendar.SECOND)); assertEquals(0,result.get(Calendar.MINUTE)); assertEquals(0,result.get(Calendar.HOUR_OF_DAY)); assertEquals(1,result.get(Calendar.DAY_OF_MONTH)); assertEquals(Calendar.DECEMBER,result.get(Calendar.MONTH)); assertEquals(2010,result.get(Calendar.YEAR)); }
public void disablePortIfInstancePopulated() { String serverInstance = serverInstanceBox.getValue(); if ( serverInstance != null && serverInstance.length() > 0 ) { portNumberBox.setDisabled( true ); } else { portNumberBox.setDisabled( false ); } }
@Test public void testDisablePortIfInstancePopulated() throws Exception { dataHandler.getControls(); dataHandler.disablePortIfInstancePopulated(); // Because portNumberBox is a mock, the setDisabled() will not persist, so the above call is for branch coverage when( serverInstanceBox.getValue() ).thenReturn( null ); dataHandler.disablePortIfInstancePopulated(); assertFalse( dataHandler.portNumberBox.isDisabled() ); }
public static AopAuthorizeDefinition from(Class<?> targetClass, Method method) { AopAuthorizeDefinitionParser parser = new AopAuthorizeDefinitionParser(targetClass, method); return parser.parse(); }
@Test @SneakyThrows public void testCustomAnn() { AopAuthorizeDefinition definition = DefaultBasicAuthorizeDefinition.from(TestController.class, TestController.class.getMethod("test")); ResourceDefinition resource = definition.getResources() .getResource("test").orElseThrow(NullPointerException::new); Assert.assertNotNull(resource); Assert.assertTrue(resource.hasAction(Arrays.asList("add"))); Assert.assertTrue(resource.getAction("add") .map(act->act.getDataAccess().getType("user_own_data")) .isPresent()); }
@CanIgnoreReturnValue public final Ordered containsAtLeast( @Nullable Object firstExpected, @Nullable Object secondExpected, @Nullable Object @Nullable ... restOfExpected) { return containsAtLeastElementsIn(accumulate(firstExpected, secondExpected, restOfExpected)); }
@Test public void iterableContainsAtLeastInOrderWithFailureWithActualOrder() { expectFailureWhenTestingThat(asList(1, 2, null, 3, 4)).containsAtLeast(null, 1, 3).inOrder(); assertFailureKeys( "required elements were all found, but order was wrong", "expected order for required elements", "but order was", "full contents"); assertFailureValue("expected order for required elements", "[null, 1, 3]"); assertFailureValue("but order was", "[1, null, 3]"); assertFailureValue("full contents", "[1, 2, null, 3, 4]"); }
public Table getTable(String dbName, String tableName) { org.apache.hadoop.hive.metastore.api.Table table = client.getTable(dbName, tableName); StorageDescriptor sd = table.getSd(); if (sd == null) { throw new StarRocksConnectorException("Table is missing storage descriptor"); } if (HiveMetastoreApiConverter.isHudiTable(table.getSd().getInputFormat())) { return HiveMetastoreApiConverter.toHudiTable(table, catalogName); } else if (HiveMetastoreApiConverter.isKuduTable(table.getSd().getInputFormat())) { return HiveMetastoreApiConverter.toKuduTable(table, catalogName); } else { validateHiveTableType(table.getTableType()); if (AcidUtils.isFullAcidTable(table)) { throw new StarRocksConnectorException(String.format( "%s.%s is a hive transactional table(full acid), sr didn't support it yet", dbName, tableName)); } if (table.getTableType().equalsIgnoreCase("VIRTUAL_VIEW")) { return HiveMetastoreApiConverter.toHiveView(table, catalogName); } else { return HiveMetastoreApiConverter.toHiveTable(table, catalogName); } } }
@Test public void testGetTable() { HiveMetaClient client = new MockedHiveMetaClient(); HiveMetastore metastore = new HiveMetastore(client, "hive_catalog", MetastoreType.HMS); com.starrocks.catalog.Table table = metastore.getTable("db1", "tbl1"); HiveTable hiveTable = (HiveTable) table; Assert.assertEquals("db1", hiveTable.getDbName()); Assert.assertEquals("tbl1", hiveTable.getTableName()); Assert.assertEquals(Lists.newArrayList("col1"), hiveTable.getPartitionColumnNames()); Assert.assertEquals(Lists.newArrayList("col2"), hiveTable.getDataColumnNames()); Assert.assertEquals("hdfs://127.0.0.1:10000/hive", hiveTable.getTableLocation()); Assert.assertEquals(ScalarType.INT, hiveTable.getPartitionColumns().get(0).getType()); Assert.assertEquals(ScalarType.INT, hiveTable.getBaseSchema().get(0).getType()); Assert.assertEquals("hive_catalog", hiveTable.getCatalogName()); }
@Override public R apply(R record) { final Matcher matcher = regex.matcher(record.topic()); if (matcher.matches()) { final String topic = matcher.replaceFirst(replacement); log.trace("Rerouting from topic '{}' to new topic '{}'", record.topic(), topic); return record.newRecord(topic, record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value(), record.timestamp()); } else { log.trace("Not rerouting topic '{}' as it does not match the configured regex", record.topic()); } return record; }
@Test public void addSuffix() { assertEquals("orig-suffix", apply("(.*)", "$1-suffix", "orig")); }
@Override public void processElement(StreamRecord<IN> element) throws Exception { if (userFunction.filter(element.getValue())) { output.collect(element); } }
@Test void testOpenClose() throws Exception { StreamFilter<String> operator = new StreamFilter<String>(new TestOpenCloseFilterFunction()); OneInputStreamOperatorTestHarness<String, String> testHarness = new OneInputStreamOperatorTestHarness<String, String>(operator); long initialTime = 0L; testHarness.open(); testHarness.processElement(new StreamRecord<String>("fooHello", initialTime)); testHarness.processElement(new StreamRecord<String>("bar", initialTime)); testHarness.close(); assertThat(TestOpenCloseFilterFunction.closeCalled) .as("RichFunction methods where not called.") .isTrue(); assertThat(testHarness.getOutput()).as("Output contains no elements.").isNotEmpty(); }
public static CA readCA(final String pemFileContent, final String keyPassword) throws CACreationException { try (var bundleReader = new StringReader(pemFileContent)) { PEMParser pemParser = new PEMParser(bundleReader); JcaPEMKeyConverter converter = new JcaPEMKeyConverter().setProvider("BC"); var certificates = new ArrayList<Certificate>(); PrivateKey privateKey = null; var pemObjects = readPemObjects(pemParser); for (var pemObject : pemObjects) { if (pemObject instanceof X509Certificate cert) { certificates.add(cert); } else if (pemObject instanceof X509CertificateHolder cert) { certificates.add(new JcaX509CertificateConverter().getCertificate(cert)); } else if (pemObject instanceof PKCS8EncryptedPrivateKeyInfo encryptedPrivateKey) { if (keyPassword == null || keyPassword.isBlank()) { throw new CACreationException("Private key is encrypted, but no password was supplied!"); } var decryptorBuilder = new JceOpenSSLPKCS8DecryptorProviderBuilder().setProvider("BC"); var keyDecryptorBuilder = decryptorBuilder.build(keyPassword.toCharArray()); var privateKeyInfo = encryptedPrivateKey.decryptPrivateKeyInfo(keyDecryptorBuilder); privateKey = converter.getPrivateKey(privateKeyInfo); } else if (pemObject instanceof PrivateKeyInfo privateKeyInfo) { privateKey = converter.getPrivateKey(privateKeyInfo); } } if (privateKey == null) { throw new CACreationException("No private key supplied in CA bundle!"); } if (certificates.isEmpty()) { throw new CACreationException("No certificate supplied in CA bundle!"); } return new CA(certificates, privateKey); } catch (PKCSException e) { throw new CACreationException("Error while decrypting private key. Wrong password?", e); } catch (CertificateException | IOException | OperatorCreationException e) { throw new CACreationException("Failed to parse CA bundle: ", e); } }
@Test void throwsExceptionIfCertificatesAreMissing() throws Exception { assertThatThrownBy(() -> PemCaReader.readCA(UNENCRYPTED_KEY, null)) .isInstanceOf(CACreationException.class) .hasMessage("No certificate supplied in CA bundle!"); }
public ServerHealthState trump(ServerHealthState otherServerHealthState) { int result = healthStateLevel.compareTo(otherServerHealthState.healthStateLevel); return result > 0 ? this : otherServerHealthState; }
@Test public void shouldTrumpSuccessIfCurrentIsWarning() { assertThat(SUCCESS_SERVER_HEALTH_STATE.trump(WARNING_SERVER_HEALTH_STATE), is(WARNING_SERVER_HEALTH_STATE)); }
public static String[] parseUri(String uri) { return doParseUri(uri, false); }
@Test public void testParseInvalid() { assertNull(CamelURIParser.parseUri("doesnotexists")); assertNull(CamelURIParser.parseUri("doesnotexists:")); assertNull(CamelURIParser.parseUri("doesnotexists/foo")); assertNull(CamelURIParser.parseUri("doesnotexists?")); }
@Deprecated @Restricted(DoNotUse.class) public static String resolve(ConfigurationContext context, String toInterpolate) { return context.getSecretSourceResolver().resolve(toInterpolate); }
@Test public void resolve_emptyDefault() { assertThat(resolve("${FOO:-}"), equalTo("")); }
public Map<Integer, Slice> toKeyMap(List<OrcType> types, List<HiveColumnHandle> physicalColumnHandles) { Map<String, Integer> columnIndexMap = physicalColumnHandles.stream() .collect(toImmutableMap(HiveColumnHandle::getName, HiveColumnHandle::getHiveColumnIndex)); return toKeyMap(types, columnIndexMap); }
@Test public void testWholeTable() { DwrfEncryptionMetadata dwrfEncryptionMetadata = new DwrfEncryptionMetadata( ImmutableMap.of(TABLE_IDENTIFIER, "abcd".getBytes()), ImmutableMap.of(), "test_algo", "test_provider"); List<HiveColumnHandle> columnHandleList = ImmutableList.of( new HiveColumnHandle("c1", HIVE_INT, TypeSignature.parseTypeSignature(BIGINT), 0, HiveColumnHandle.ColumnType.REGULAR, Optional.empty(), Optional.empty()), new HiveColumnHandle("c2", HIVE_INT, TypeSignature.parseTypeSignature(BIGINT), 2, HiveColumnHandle.ColumnType.REGULAR, Optional.empty(), Optional.empty())); List<OrcType> orcTypes = ImmutableList.of( new OrcType(OrcType.OrcTypeKind.INT, ImmutableList.of(), ImmutableList.of(), Optional.empty(), Optional.empty(), Optional.empty()), new OrcType(OrcType.OrcTypeKind.INT, ImmutableList.of(), ImmutableList.of(), Optional.empty(), Optional.empty(), Optional.empty())); Map<Integer, Slice> actualKeyMap = dwrfEncryptionMetadata.toKeyMap(orcTypes, columnHandleList); Map<Integer, Slice> expectedKeyMap = ImmutableMap.of(0, Slices.wrappedBuffer("abcd".getBytes())); assertEquals(actualKeyMap, expectedKeyMap); }
public static < EventTypeT, EventKeyTypeT, ResultTypeT, StateTypeT extends MutableState<EventTypeT, ResultTypeT>> OrderedEventProcessor<EventTypeT, EventKeyTypeT, ResultTypeT, StateTypeT> create( OrderedProcessingHandler<EventTypeT, EventKeyTypeT, StateTypeT, ResultTypeT> handler) { return new AutoValue_OrderedEventProcessor<>(handler); }
@Test public void testLargeBufferedOutputInTimer() throws CannotProvideCoderException { int maxResultsPerOutput = 100; // Array of sequences starting with 2 and the last element - 1. // Output will be buffered until the last event arrives long[] sequences = new long[maxResultsPerOutput * 3]; for (int i = 0; i < sequences.length - 1; i++) { sequences[i] = i + 2L; } sequences[sequences.length - 1] = 1; List<Event> events = new ArrayList<>(sequences.length); Collection<KV<String, String>> expectedOutput = new ArrayList<>(sequences.length); Collection<KV<String, OrderedProcessingStatus>> expectedStatuses = new ArrayList<>(sequences.length + 10); StringBuilder output = new StringBuilder(); String outputPerElement = "."; String key = "id-1"; int bufferedEventCount = 0; for (long sequence : sequences) { ++bufferedEventCount; events.add(Event.create(sequence, key, outputPerElement)); output.append(outputPerElement); expectedOutput.add(KV.of(key, output.toString())); if (bufferedEventCount < sequences.length) { // Last event will result in a batch of events being produced. That's why it's excluded // here. expectedStatuses.add( KV.of( key, OrderedProcessingStatus.create( null, bufferedEventCount, 2L, sequence, bufferedEventCount, 0L, 0, false))); } } // Statuses produced by the batched processing for (int i = maxResultsPerOutput; i < sequences.length; i += maxResultsPerOutput) { long lastOutputSequence = i; expectedStatuses.add( KV.of( key, OrderedProcessingStatus.create( lastOutputSequence, sequences.length - lastOutputSequence, lastOutputSequence + 1, (long) sequences.length, sequences.length, lastOutputSequence, 0, false))); } // -- Final status - indicates that everything has been fully processed expectedStatuses.add( KV.of( key, OrderedProcessingStatus.create( (long) sequences.length, 0, null, null, sequences.length, sequences.length, 0, false))); testProcessing( events.toArray(new Event[events.size()]), expectedStatuses, expectedOutput, EMISSION_FREQUENCY_ON_EVERY_ELEMENT, 1L /* This dataset assumes 1 as the starting sequence */, maxResultsPerOutput, PRODUCE_STATUS_ON_EVERY_EVENT); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position ) { try { final ReadOnlySessionStore<GenericKey, GenericRow> store = stateStore .store(QueryableStoreTypes.sessionStore(), partition); return KsMaterializedQueryResult.rowIterator( findSession(store, key, windowStart, windowEnd).iterator()); } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldIgnoreSessionsThatEndAtLowerBoundIfLowerBoundOpen() { // Given: final Range<Instant> endBounds = Range.openClosed( LOWER_INSTANT, UPPER_INSTANT ); givenSingleSession(LOWER_INSTANT.minusMillis(1), LOWER_INSTANT); // When: final Iterator<WindowedRow> rowIterator = table.get(A_KEY, PARTITION, Range.all(), endBounds).rowIterator; // Then: assertThat(rowIterator.hasNext(), is(false)); }
@Override public AlterClientQuotasResult alterClientQuotas(Collection<ClientQuotaAlteration> entries, AlterClientQuotasOptions options) { Map<ClientQuotaEntity, KafkaFutureImpl<Void>> futures = new HashMap<>(entries.size()); for (ClientQuotaAlteration entry : entries) { futures.put(entry.entity(), new KafkaFutureImpl<>()); } final long now = time.milliseconds(); runnable.call(new Call("alterClientQuotas", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override AlterClientQuotasRequest.Builder createRequest(int timeoutMs) { return new AlterClientQuotasRequest.Builder(entries, options.validateOnly()); } @Override void handleResponse(AbstractResponse abstractResponse) { AlterClientQuotasResponse response = (AlterClientQuotasResponse) abstractResponse; response.complete(futures); } @Override void handleFailure(Throwable throwable) { completeAllExceptionally(futures.values(), throwable); } }, now); return new AlterClientQuotasResult(Collections.unmodifiableMap(futures)); }
@Test public void testAlterClientQuotas() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); ClientQuotaEntity goodEntity = newClientQuotaEntity(ClientQuotaEntity.USER, "user-1"); ClientQuotaEntity unauthorizedEntity = newClientQuotaEntity(ClientQuotaEntity.USER, "user-0"); ClientQuotaEntity invalidEntity = newClientQuotaEntity("", "user-0"); Map<ClientQuotaEntity, ApiError> responseData = new HashMap<>(2); responseData.put(goodEntity, new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, "Authorization failed")); responseData.put(unauthorizedEntity, new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, "Authorization failed")); responseData.put(invalidEntity, new ApiError(Errors.INVALID_REQUEST, "Invalid quota entity")); env.kafkaClient().prepareResponse(AlterClientQuotasResponse.fromQuotaEntities(responseData, 0)); List<ClientQuotaAlteration> entries = new ArrayList<>(3); entries.add(new ClientQuotaAlteration(goodEntity, singleton(new ClientQuotaAlteration.Op("consumer_byte_rate", 10000.0)))); entries.add(new ClientQuotaAlteration(unauthorizedEntity, singleton(new ClientQuotaAlteration.Op("producer_byte_rate", 10000.0)))); entries.add(new ClientQuotaAlteration(invalidEntity, singleton(new ClientQuotaAlteration.Op("producer_byte_rate", 100.0)))); AlterClientQuotasResult result = env.adminClient().alterClientQuotas(entries); result.values().get(goodEntity); TestUtils.assertFutureError(result.values().get(unauthorizedEntity), ClusterAuthorizationException.class); TestUtils.assertFutureError(result.values().get(invalidEntity), InvalidRequestException.class); // ensure immutable assertThrows(UnsupportedOperationException.class, () -> result.values().put(newClientQuotaEntity(ClientQuotaEntity.USER, "user-3"), null)); } }
public LionConfig load(String source) { try (Reader r = new InputStreamReader(getClass().getResourceAsStream(source), UTF_8)) { lines = CharStreams.readLines(r); } catch (IOException e) { throw new IllegalArgumentException("Failed to read: " + source, e); } stripCommentsAndWhitespace(); parse(); processAliases(); processFroms(); return this; }
@Test public void basic() { title("basic"); cfg = cfg().load(CARD_GAME_1); print(cfg); verifyStats("CardGame1", 1, 3); }
public ServiceInfo getPushData(Service service) { ServiceInfo result = emptyServiceInfo(service); if (!ServiceManager.getInstance().containSingleton(service)) { return result; } Service singleton = ServiceManager.getInstance().getSingleton(service); result.setHosts(getAllInstancesFromIndex(singleton)); serviceDataIndexes.put(singleton, result); return result; }
@Test void testGetPushData() { ServiceInfo pushData = serviceStorage.getPushData(SERVICE); Mockito.verify(switchDomain).getDefaultPushCacheMillis(); assertNotNull(pushData); }
public static BundleCounter bundleProcessingThreadCounter(String shortId, MetricName name) { return new BundleProcessingThreadCounter(shortId, name); }
@Test public void testAccurateBundleCounterUsingMultipleThreads() throws Exception { BundleCounter bundleCounter = Metrics.bundleProcessingThreadCounter(TEST_ID, TEST_NAME); List<ByteString> values = testAccurateBundleMetricUsingMultipleThreads(bundleCounter, () -> bundleCounter.inc()); assertTrue(values.size() >= 10); List<Long> sortedValues = new ArrayList<>(); for (ByteString value : values) { sortedValues.add(MonitoringInfoEncodings.decodeInt64Counter(value)); } Collections.sort(sortedValues); List<ByteString> sortedEncodedValues = new ArrayList<>(); for (Long value : sortedValues) { sortedEncodedValues.add(MonitoringInfoEncodings.encodeInt64Counter(value)); } assertThat(values, contains(sortedEncodedValues.toArray())); }
@Operation( summary = "Monitor the given search keys in the key transparency log", description = """ Enforced unauthenticated endpoint. Return proofs proving that the log tree has been constructed correctly in later entries for each of the given search keys . """ ) @ApiResponse(responseCode = "200", description = "All search keys exist in the log", useReturnTypeSchema = true) @ApiResponse(responseCode = "404", description = "At least one search key lookup did not find the key") @ApiResponse(responseCode = "413", description = "Ratelimited") @ApiResponse(responseCode = "422", description = "Invalid request format") @POST @Path("/monitor") @RateLimitedByIp(RateLimiters.For.KEY_TRANSPARENCY_MONITOR_PER_IP) @Produces(MediaType.APPLICATION_JSON) public KeyTransparencyMonitorResponse monitor( @ReadOnly @Auth final Optional<AuthenticatedDevice> authenticatedAccount, @NotNull @Valid final KeyTransparencyMonitorRequest request) { // Disallow clients from making authenticated requests to this endpoint requireNotAuthenticated(authenticatedAccount); try { final List<MonitorKey> monitorKeys = new ArrayList<>(List.of( createMonitorKey(getFullSearchKeyByteString(ACI_PREFIX, request.aci().toCompactByteArray()), request.aciPositions()) )); request.usernameHash().ifPresent(usernameHash -> monitorKeys.add(createMonitorKey(getFullSearchKeyByteString(USERNAME_PREFIX, usernameHash), request.usernameHashPositions().get())) ); request.e164().ifPresent(e164 -> monitorKeys.add( createMonitorKey(getFullSearchKeyByteString(E164_PREFIX, e164.getBytes(StandardCharsets.UTF_8)), request.e164Positions().get())) ); return new KeyTransparencyMonitorResponse(keyTransparencyServiceClient.monitor( monitorKeys, request.lastNonDistinguishedTreeHeadSize(), request.lastDistinguishedTreeHeadSize(), KEY_TRANSPARENCY_RPC_TIMEOUT).join()); } catch (final CancellationException exception) { LOGGER.error("Unexpected cancellation from key transparency service", exception); throw new ServerErrorException(Response.Status.SERVICE_UNAVAILABLE, exception); } catch (final CompletionException exception) { handleKeyTransparencyServiceError(exception); } // This is unreachable return null; }
@Test void monitorRatelimited() { MockUtils.updateRateLimiterResponseToFail( rateLimiters, RateLimiters.For.KEY_TRANSPARENCY_MONITOR_PER_IP, "127.0.0.1", Duration.ofMinutes(10), true); final Invocation.Builder request = resources.getJerseyTest() .target("/v1/key-transparency/monitor") .request(); try (Response response = request.post( Entity.json(createMonitorRequestJson(ACI, List.of(3L), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty())))) { assertEquals(429, response.getStatus()); verify(keyTransparencyServiceClient, never()).monitor(any(), any(), any(), any()); } }
public static Node build(final List<JoinInfo> joins) { Node root = null; for (final JoinInfo join : joins) { if (root == null) { root = new Leaf(join.getLeftSource()); } if (root.containsSource(join.getRightSource()) && root.containsSource(join.getLeftSource())) { throw new KsqlException("Cannot perform circular join - both " + join.getRightSource() + " and " + join.getLeftJoinExpression() + " are already included in the current join tree: " + root.debugString(0)); } else if (root.containsSource(join.getLeftSource())) { root = new Join(root, new Leaf(join.getRightSource()), join); } else if (root.containsSource(join.getRightSource())) { root = new Join(root, new Leaf(join.getLeftSource()), join.flip()); } else { throw new KsqlException( "Cannot build JOIN tree; neither source in the join is the FROM source or included " + "in a previous JOIN: " + join + ". The current join tree is " + root.debugString(0) ); } } return root; }
@Test public void shouldComputeEmptyEquivalenceSetForOuterJoins() { // Given: when(j1.getLeftSource()).thenReturn(a); when(j1.getRightSource()).thenReturn(b); when(j1.getType()).thenReturn(JoinType.OUTER); final List<JoinInfo> joins = ImmutableList.of(j1); // When: final Node root = JoinTree.build(joins); // Then: assertThat(root.joinEquivalenceSet(), is(empty())); }
@Override public ByteBuf getBytes(int index, byte[] dst) { getBytes(index, dst, 0, dst.length); return this; }
@Test public void testGetBytesAfterRelease3() { final ByteBuf buffer = buffer(); try { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().getBytes(0, buffer, 0, 1); } }); } finally { buffer.release(); } }
@Override public String toString(@Nullable String root, Iterable<String> names) { StringBuilder builder = new StringBuilder(); if (root != null) { builder.append(root); } joiner().appendTo(builder, names); return builder.toString(); }
@Test public void testWindows_toUri_unc() { URI fileUri = PathType.windows() .toUri(fileSystemUri, "\\\\host\\share\\", ImmutableList.of("foo", "bar"), false); assertThat(fileUri.toString()).isEqualTo("jimfs://foo//host/share/foo/bar"); assertThat(fileUri.getPath()).isEqualTo("//host/share/foo/bar"); URI rootUri = PathType.windows() .toUri(fileSystemUri, "\\\\host\\share\\", ImmutableList.<String>of(), true); assertThat(rootUri.toString()).isEqualTo("jimfs://foo//host/share/"); assertThat(rootUri.getPath()).isEqualTo("//host/share/"); }
@Override protected void advanceToEndOfEventTime() throws Exception { for (Output<StreamRecord<?>> sourceOutput : operatorChain.getChainedSourceOutputs()) { sourceOutput.emitWatermark(Watermark.MAX_WATERMARK); } }
@TestTemplate void testAdvanceToEndOfEventTime() throws Exception { try (StreamTaskMailboxTestHarness<String> testHarness = buildWatermarkTestHarness(2, false)) { testHarness.processElement(Watermark.MAX_WATERMARK, 0, 0); testHarness.processElement(Watermark.MAX_WATERMARK, 0, 1); testHarness.getStreamTask().advanceToEndOfEventTime(); testHarness.processElement(Watermark.MAX_WATERMARK, 1, 0); assertThat(testHarness.getOutput()).doesNotContain(Watermark.MAX_WATERMARK); testHarness.processElement(Watermark.MAX_WATERMARK, 1, 1); assertThat(testHarness.getOutput()).containsExactly(Watermark.MAX_WATERMARK); } }
public static DateTime truncate(Date date, DateField dateField) { return new DateTime(truncate(calendar(date), dateField)); }
@Test public void truncateTest() { final String dateStr2 = "2020-02-29 12:59:34"; final Date date2 = DateUtil.parse(dateStr2); final DateTime dateTime = DateUtil.truncate(date2, DateField.MINUTE); assertEquals("2020-02-29 12:59:00", dateTime.toString()); }
@JsonProperty public void setValidationQueryTimeout(@Nullable Duration validationQueryTimeout) { this.validationQueryTimeout = validationQueryTimeout; }
@Test void testValidationQueryTimeoutIsSet() throws Exception { factory.setValidationQueryTimeout(Duration.seconds(3)); try (Connection connection = dataSource().getConnection()) { try (PreparedStatement statement = connection.prepareStatement("select 1")) { assertThat(statement.getQueryTimeout()).isEqualTo(3); } } }
@Override public KinesisReaderCheckpoint generate(SimplifiedKinesisClient kinesis) throws TransientKinesisException { List<Shard> streamShards = kinesis.listShardsAtPoint(streamName, startingPoint); LOG.info( "Creating a checkpoint with following shards {} at {}", streamShards, startingPoint.getTimestamp()); return new KinesisReaderCheckpoint( streamShards.stream() .map(shard -> new ShardCheckpoint(streamName, shard.getShardId(), startingPoint)) .collect(Collectors.toList())); }
@Test public void shouldMapAllShardsToCheckpoints() throws Exception { when(shard1.getShardId()).thenReturn("shard-01"); when(shard2.getShardId()).thenReturn("shard-02"); when(shard3.getShardId()).thenReturn("shard-03"); List<Shard> shards = ImmutableList.of(shard1, shard2, shard3); String streamName = "stream"; StartingPoint startingPoint = new StartingPoint(InitialPositionInStream.LATEST); when(kinesisClient.listShardsAtPoint(streamName, startingPoint)).thenReturn(shards); DynamicCheckpointGenerator underTest = new DynamicCheckpointGenerator(streamName, startingPoint); KinesisReaderCheckpoint checkpoint = underTest.generate(kinesisClient); assertThat(checkpoint).hasSize(3); }
public Optional<KsMaterialization> create( final String stateStoreName, final KafkaStreams kafkaStreams, final Topology topology, final LogicalSchema schema, final Serializer<GenericKey> keySerializer, final Optional<WindowInfo> windowInfo, final Map<String, ?> streamsProperties, final KsqlConfig ksqlConfig, final String applicationId, final String queryId ) { final Object appServer = streamsProperties.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (appServer == null) { return Optional.empty(); } final URL localHost = buildLocalHost(appServer); final KsLocator locator = locatorFactory.create( stateStoreName, kafkaStreams, topology, keySerializer, localHost, ksqlConfig.getBoolean(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED), queryId ); final KsStateStore stateStore = storeFactory.create( stateStoreName, kafkaStreams, schema, ksqlConfig, queryId ); final KsMaterialization materialization = materializationFactory.create( windowInfo, locator, stateStore ); return Optional.of(materialization); }
@Test public void shouldReturnMaterialization() { // When: final Optional<KsMaterialization> result = factory .create(STORE_NAME, kafkaStreams, topology, SCHEMA, keySerializer, Optional.empty(), streamsProperties, ksqlConfig, APPLICATION_ID, any()); // Then: assertThat(result, is(Optional.of(materialization))); }
public Importer getCompatibleImporter(TransferExtension extension, DataVertical jobType) { Importer<?, ?> importer = getImporterOrNull(extension, jobType); if (importer != null) { return importer; } switch (jobType) { case MEDIA: importer = getMediaImporter(extension); break; case PHOTOS: importer = getPhotosImporter(extension); break; case VIDEOS: importer = getVideosImporter(extension); break; } if (importer == null) { return extension.getImporter(jobType); } return importer; }
@Test public void shouldConstructPhotoAndVideoImportersFromMedia() { TransferExtension ext = mock(TransferExtension.class); when(ext.getImporter(eq(MEDIA))).thenReturn(mock(Importer.class)); assertThat(compatibilityProvider.getCompatibleImporter(ext, PHOTOS)) .isInstanceOf(AnyToAnyImporter.class); assertThat(compatibilityProvider.getCompatibleImporter(ext, VIDEOS)) .isInstanceOf(AnyToAnyImporter.class); }
public static String getPartitionNameFromPartitionType(MetadataPartitionType partitionType, HoodieTableMetaClient metaClient, String indexName) { if (MetadataPartitionType.FUNCTIONAL_INDEX.equals(partitionType)) { checkArgument(metaClient.getIndexMetadata().isPresent(), "Index definition is not present"); return metaClient.getIndexMetadata().get().getIndexDefinitions().get(indexName).getIndexName(); } return partitionType.getPartitionPath(); }
@Test public void testExceptionForMissingFunctionalIndexMetadata() { MetadataPartitionType partitionType = MetadataPartitionType.FUNCTIONAL_INDEX; HoodieTableMetaClient metaClient = mock(HoodieTableMetaClient.class); when(metaClient.getIndexMetadata()).thenReturn(Option.empty()); assertThrows(IllegalArgumentException.class, () -> HoodieIndexUtils.getPartitionNameFromPartitionType(partitionType, metaClient, "testIndex")); }
@Override public void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); }
@Test public void testFailOnClosedConsumer() { consumer = newConsumer(); completeShareAcknowledgeOnCloseApplicationEventSuccessfully(); completeShareUnsubscribeApplicationEventSuccessfully(); consumer.close(); final IllegalStateException res = assertThrows(IllegalStateException.class, consumer::subscription); assertEquals("This consumer has already been closed.", res.getMessage()); }
public B callbacks(Integer callbacks) { this.callbacks = callbacks; return getThis(); }
@Test void callbacks() { InterfaceBuilder builder = new InterfaceBuilder(); builder.callbacks(2); Assertions.assertEquals(2, builder.build().getCallbacks().intValue()); }
@VisibleForTesting static DeterminismEnvelope<ResourceID> getTaskManagerResourceID( Configuration config, String rpcAddress, int rpcPort) { final String metadata = config.get(TaskManagerOptionsInternal.TASK_MANAGER_RESOURCE_ID_METADATA, ""); return config.getOptional(TaskManagerOptions.TASK_MANAGER_RESOURCE_ID) .map( value -> DeterminismEnvelope.deterministicValue( new ResourceID(value, metadata))) .orElseGet( FunctionUtils.uncheckedSupplier( () -> { final String hostName = InetAddress.getLocalHost().getHostName(); final String value = StringUtils.isNullOrWhitespaceOnly(rpcAddress) ? hostName + "-" + new AbstractID() .toString() .substring(0, 6) : rpcAddress + ":" + rpcPort + "-" + new AbstractID() .toString() .substring(0, 6); return DeterminismEnvelope.nondeterministicValue( new ResourceID(value, metadata)); })); }
@Test void testGenerateTaskManagerResourceIDWithoutMetaData() throws Exception { final Configuration configuration = createConfiguration(); final String resourceID = "test"; configuration.set(TaskManagerOptions.TASK_MANAGER_RESOURCE_ID, resourceID); final ResourceID taskManagerResourceID = TaskManagerRunner.getTaskManagerResourceID(configuration, "", -1).unwrap(); assertThat(taskManagerResourceID.getMetadata()).isEmpty(); assertThat(taskManagerResourceID.getStringWithMetadata()).isEqualTo("test"); }
@Override public boolean hasNext() { if (currentIterator == null || !currentIterator.hasNext()) { while (listIterator.hasNext()) { Iterator<T> iterator = listIterator.next(); currentIterator = iterator; if (iterator.hasNext()) { if (limit == 0) { return true; } else { return limit >= counter + 1; } } } return false; } if (currentIterator.hasNext()) { if (limit == 0) { return true; } else { return limit >= counter + 1; } } return false; }
@Test public void testHasNextWithEmpty() { List<Integer> emptyList = new ArrayList<Integer>(); CompositeIterable<Integer> compositeIterable = new CompositeIterable<Integer>( emptyList); assertThat(compositeIterable.iterator().hasNext()).isFalse(); }
@Override public void preflight(Path file) throws BackgroundException { assumeRole(file, WRITEPERMISSION); }
@Test public void testPreflightFileAccessDeniedCustomProps() throws Exception { final Path file = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); file.setAttributes(file.attributes().withAcl(new Acl(new Acl.CanonicalUser(), READPERMISSION))); assertThrows(AccessDeniedException.class, () -> new CteraWriteFeature(session).preflight(file)); }
protected String toAnsiString(String in, AnsiElement element) { return AnsiOutput.toString(element, in); }
@Test void testToAnsiString() { CRLFLogConverter cut = new CRLFLogConverter(); AnsiElement ansiElement = AnsiColor.RED; String result = cut.toAnsiString("input", ansiElement); assertThat(result).isEqualTo("input"); }
@Override public RateLimiter rateLimiter(final String name) { return rateLimiter(name, getDefaultConfig()); }
@Test public void rateLimiterNewWithNullNameAndNonDefaultConfig() throws Exception { exception.expect(NullPointerException.class); exception.expectMessage(NAME_MUST_NOT_BE_NULL); RateLimiterRegistry registry = new InMemoryRateLimiterRegistry(config); registry.rateLimiter(null, config); }
@Override public InputStream fetch(String fetchKey, Metadata metadata, ParseContext parseContext) throws IOException, TikaException { HttpFetcherConfig additionalHttpFetcherConfig = getAdditionalHttpFetcherConfig(parseContext); HttpGet get = new HttpGet(fetchKey); RequestConfig requestConfig = RequestConfig .custom() .setMaxRedirects(httpFetcherConfig.getMaxRedirects()) .setRedirectsEnabled(httpFetcherConfig.getMaxRedirects() > 0) .build(); get.setConfig(requestConfig); setHttpRequestHeaders(metadata, get); putAdditionalHeadersOnRequest(additionalHttpFetcherConfig, get); return execute(get, metadata, httpClient, true); }
@Test public void test4xxResponse() throws Exception { // Setup client to respond with 403 mockClientResponse(buildMockResponse(HttpStatus.SC_FORBIDDEN, null)); final Metadata meta = new Metadata(); assertThrows(IOException.class, () -> httpFetcher.fetch(TEST_URL, meta, new ParseContext())); // Meta still populated assertEquals("403", meta.get("http-header:status-code")); assertEquals(TEST_URL, meta.get("http-connection:target-url")); }
public static <T> Class<? extends Serializer<T>> loadOrGenObjectCodecClass( Class<T> cls, Fury fury) { Preconditions.checkNotNull(fury); BaseObjectCodecBuilder codecBuilder = new ObjectCodecBuilder(cls, fury); return loadOrGenCodecClass(cls, fury, codecBuilder); }
@SuppressWarnings("unchecked") @Test public void loadOrGenObjectCodecClass() throws Exception { Fury fury = Fury.builder() .withLanguage(Language.JAVA) .withRefTracking(false) .requireClassRegistration(false) .build(); Class<?> seqCodecClass = fury.getClassResolver().getSerializerClass(BeanA.class); Generated.GeneratedSerializer serializer = seqCodecClass .asSubclass(Generated.GeneratedSerializer.class) .getConstructor(Fury.class, Class.class) .newInstance(fury, BeanA.class); MemoryBuffer buffer = MemoryUtils.buffer(32); BeanA beanA = BeanA.createBeanA(2); serializer.write(buffer, beanA); byte[] bytes = buffer.getBytes(0, buffer.writerIndex()); Object obj = serializer.read(MemoryUtils.wrap(bytes)); assertEquals(obj, beanA); }
public static void updateDetailMessage( @Nullable Throwable root, @Nullable Function<Throwable, String> throwableToMessage) { if (throwableToMessage == null) { return; } Throwable it = root; while (it != null) { String newMessage = throwableToMessage.apply(it); if (newMessage != null) { updateDetailMessageOfThrowable(it, newMessage); } it = it.getCause(); } }
@Test void testUpdateDetailMessageWithoutRelevantThrowable() { Throwable originalThrowable = new IllegalStateException( "root message", new IllegalArgumentException("cause message")); ExceptionUtils.updateDetailMessage(originalThrowable, t -> null); assertThat(originalThrowable.getMessage()).isEqualTo("root message"); assertThat(originalThrowable.getCause().getMessage()).isEqualTo("cause message"); }
@Override public TYPE getType() { return Delta.TYPE.INSERT; }
@Test void testGetType() { // given Chunk<String> chunk = new Chunk<>(1, EMPTY_LIST); Delta<String> delta = new InsertDelta<>(chunk, chunk); // when Delta.TYPE type = delta.getType(); // then assertThat(type).isEqualTo(Delta.TYPE.INSERT); }
public String getLastElementOfPath(String path) { return Paths.get(path).getFileName().toString(); }
@Test public void testGetLastElementOfPath() { Path path = Paths.get(temporaryDirectory.getPath(), FOOBAR); String result = fileUtil.getLastElementOfPath(path.toString()); assertEquals(FOOBAR, result); }
public static DisplayData from(HasDisplayData component) { checkNotNull(component, "component argument cannot be null"); InternalBuilder builder = new InternalBuilder(); builder.include(Path.root(), component); return builder.build(); }
@Test public void testExceptionMessage() { final RuntimeException cause = new RuntimeException("oh noes!"); HasDisplayData component = new HasDisplayData() { @Override public void populateDisplayData(DisplayData.Builder builder) { throw cause; } }; thrown.expectMessage(component.getClass().getName()); thrown.expectCause(is(cause)); DisplayData.from(component); }
private void handle(FailureBatch failureBatch) { suitableHandlers(failureBatch) .forEach(handler -> { try { handler.handle(failureBatch); } catch (Exception e) { logger.error("Error occurred while handling failures by {}", handler.getClass().getName()); } }); final List<Message> requiresAcknowledgement = failureBatch.getFailures().stream() .filter(Failure::requiresAcknowledgement) .map(Failure::failedMessage) .filter(Message.class::isInstance) .map(Message.class::cast) .collect(Collectors.toList()); if (!requiresAcknowledgement.isEmpty()) { acknowledger.acknowledge(requiresAcknowledgement); } }
@Test public void run_whenNoSuitableCustomHandlerAndNoSuitableFallbackHandlerFound_thenNoHandlingDone() throws Exception { // given final FailureBatch indexingFailureBatch = indexingFailureBatch(createIndexingFailure()); final FailureHandler customFailureHandler = enabledFailureHandler(); final FailureHandler fallbackFailureHandler = enabledFailureHandler(); final FailureHandlingService underTest = new FailureHandlingService(fallbackFailureHandler, ImmutableSet.of(customFailureHandler), failureSubmissionQueue, configuration, acknowledger); underTest.startAsync(); underTest.awaitRunning(); //when failureSubmissionQueue.submitBlocking(indexingFailureBatch); Awaitility.waitAtMost(Durations.ONE_SECOND) .until(() -> failureSubmissionQueue.queueSize() == 0); //then verify(customFailureHandler, times(0)).handle(any()); verify(fallbackFailureHandler, times(0)).handle(any()); }
@Override public String toString() { return format(); }
@Test public void formatBetweenTest() { long betweenMs = DateUtil.betweenMs(DateUtil.parse("2018-07-16 11:23:19"), DateUtil.parse("2018-07-16 11:23:20")); BetweenFormatter formater = new BetweenFormatter(betweenMs, Level.SECOND, 1); assertEquals(formater.toString(), "1秒"); }
protected String packagePath(PackageName packageName) { return packageName.toRestPath() + storage.dataPath(); }
@Test public void testPackagePath() { PackagesManagementImpl impl = (PackagesManagementImpl) packagesManagement; PackageName pn = PackageName.get("function://public/default/test@v1"); String metaPath = impl.metadataPath(pn); Assert.assertEquals(metaPath, "function/public/default/test/v1/meta"); String dataPath = impl.packagePath(pn); Assert.assertEquals(dataPath, "function/public/default/test/v1"); impl.initialize(new PackagesStorage() { @Override public void initialize() { } @Override public CompletableFuture<Void> writeAsync(String path, InputStream inputStream) { return null; } @Override public CompletableFuture<Void> readAsync(String path, OutputStream outputStream) { return null; } @Override public CompletableFuture<Void> deleteAsync(String path) { return null; } @Override public CompletableFuture<List<String>> listAsync(String path) { return null; } @Override public CompletableFuture<Boolean> existAsync(String path) { return null; } @Override public CompletableFuture<Void> closeAsync() { return null; } @Override public String dataPath() { return "/tmp"; } }); metaPath = impl.metadataPath(pn); Assert.assertEquals(metaPath, "function/public/default/test/v1/meta"); dataPath = impl.packagePath(pn); Assert.assertEquals(dataPath, "function/public/default/test/v1/tmp"); }
@Override public Collection<SchemaMetaData> load(final MetaDataLoaderMaterial material) throws SQLException { try (Connection connection = material.getDataSource().getConnection()) { Collection<String> schemaNames = SchemaMetaDataLoader.loadSchemaNames(connection, TypedSPILoader.getService(DatabaseType.class, "PostgreSQL")); Map<String, Multimap<String, IndexMetaData>> schemaIndexMetaDataMap = loadIndexMetaDataMap(connection, schemaNames); Map<String, Multimap<String, ColumnMetaData>> schemaColumnMetaDataMap = loadColumnMetaDataMap(connection, material.getActualTableNames(), schemaNames); Map<String, Multimap<String, ConstraintMetaData>> schemaConstraintMetaDataMap = loadConstraintMetaDataMap(connection, schemaNames); Map<String, Collection<String>> schemaViewNames = loadViewNames(connection, schemaNames, material.getActualTableNames()); Collection<SchemaMetaData> result = new LinkedList<>(); for (String each : schemaNames) { Multimap<String, IndexMetaData> tableIndexMetaDataMap = schemaIndexMetaDataMap.getOrDefault(each, LinkedHashMultimap.create()); Multimap<String, ColumnMetaData> tableColumnMetaDataMap = schemaColumnMetaDataMap.getOrDefault(each, LinkedHashMultimap.create()); Multimap<String, ConstraintMetaData> tableConstraintMetaDataMap = schemaConstraintMetaDataMap.getOrDefault(each, LinkedHashMultimap.create()); Collection<String> viewNames = schemaViewNames.getOrDefault(each, Collections.emptySet()); result.add(new SchemaMetaData(each, createTableMetaDataList(tableIndexMetaDataMap, tableColumnMetaDataMap, tableConstraintMetaDataMap, viewNames))); } return result; } }
@Test void assertLoadWithTables() throws SQLException { DataSource dataSource = mockDataSource(); ResultSet schemaResultSet = mockSchemaMetaDataResultSet(); when(dataSource.getConnection().getMetaData().getSchemas()).thenReturn(schemaResultSet); ResultSet tableResultSet = mockTableMetaDataResultSet(); when(dataSource.getConnection().prepareStatement(TABLE_META_DATA_SQL_WITH_TABLES).executeQuery()).thenReturn(tableResultSet); ResultSet primaryKeyResultSet = mockPrimaryKeyMetaDataResultSet(); when(dataSource.getConnection().prepareStatement(PRIMARY_KEY_META_DATA_SQL).executeQuery()).thenReturn(primaryKeyResultSet); ResultSet indexResultSet = mockIndexMetaDataResultSet(); when(dataSource.getConnection().prepareStatement(BASIC_INDEX_META_DATA_SQL).executeQuery()).thenReturn(indexResultSet); ResultSet advanceIndexResultSet = mockAdvanceIndexMetaDataResultSet(); when(dataSource.getConnection().prepareStatement(ADVANCE_INDEX_META_DATA_SQL).executeQuery()).thenReturn(advanceIndexResultSet); ResultSet constraintResultSet = mockConstraintMetaDataResultSet(); when(dataSource.getConnection().prepareStatement(BASIC_CONSTRAINT_META_DATA_SQL).executeQuery()).thenReturn(constraintResultSet); ResultSet roleTableGrantsResultSet = mockRoleTableGrantsResultSet(); when(dataSource.getConnection().prepareStatement(startsWith(LOAD_ALL_ROLE_TABLE_GRANTS_SQL)).executeQuery()).thenReturn(roleTableGrantsResultSet); assertTableMetaDataMap(getDialectTableMetaDataLoader().load(new MetaDataLoaderMaterial(Collections.singletonList("tbl"), dataSource, new PostgreSQLDatabaseType(), "sharding_db"))); }
public NearCachePreloaderConfig setDirectory(String directory) { this.directory = checkNotNull(directory, "directory cannot be null!"); return this; }
@Test public void setDirectory() { config.setDirectory("myParentDirectory"); assertEquals("myParentDirectory", config.getDirectory()); }
public DdlCommandResult execute( final String sql, final DdlCommand ddlCommand, final boolean withQuery, final Set<SourceName> withQuerySources ) { return execute(sql, ddlCommand, withQuery, withQuerySources, false); }
@Test public void shouldDropStreamIfConstraintExistsAndRestoreIsInProgress() { // Given: final CreateStreamCommand stream1 = buildCreateStream(SourceName.of("s1"), SCHEMA, false, false); final CreateStreamCommand stream2 = buildCreateStream(SourceName.of("s2"), SCHEMA, false, false); final CreateStreamCommand stream3 = buildCreateStream(SourceName.of("s3"), SCHEMA, false, false); cmdExec.execute(SQL_TEXT, stream1, true, Collections.emptySet()); cmdExec.execute(SQL_TEXT, stream2, true, Collections.singleton(SourceName.of("s1"))); cmdExec.execute(SQL_TEXT, stream3, true, Collections.singleton(SourceName.of("s1"))); // When: final DropSourceCommand dropStream = buildDropSourceCommand(SourceName.of("s1")); final DdlCommandResult result = cmdExec.execute(SQL_TEXT, dropStream, false, Collections.emptySet(), true); // Then assertThat(result.isSuccess(), is(true)); assertThat( result.getMessage(), equalTo(String.format("Source %s (topic: %s) was dropped.", STREAM_NAME, TOPIC_NAME)) ); }
@Override public void startWatching() { if (settings.getProps().valueAsBoolean(ENABLE_STOP_COMMAND.getKey())) { super.startWatching(); } }
@Test public void stop_watching_commands_if_thread_is_interrupted() { TestAppSettings appSettings = new TestAppSettings(); StopRequestWatcherImpl underTest = new StopRequestWatcherImpl(appSettings, scheduler, commands); underTest.startWatching(); underTest.interrupt(); await().until(() -> !underTest.isAlive()); assertThat(underTest.isAlive()).isFalse(); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() { buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); long currentOffset = 0; currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes())); currentOffset += abortTransaction(buffer, 1L, currentOffset); buffer.flip(); List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList( new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0)); MemoryRecords records = MemoryRecords.readableRecords(buffer); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); // normal fetch assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0)); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords(); // Ensure that we don't return any of the aborted records, but yet advance the consumer position. assertFalse(fetchedRecords.containsKey(tp0)); assertEquals(currentOffset, subscriptions.position(tp0).offset); }
static int getDefaultMaxParallelism( Configuration configuration, ExecutionConfig executionConfig) { return configuration .getOptional(BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_MAX_PARALLELISM) .orElse( executionConfig.getParallelism() == ExecutionConfig.PARALLELISM_DEFAULT ? BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_MAX_PARALLELISM .defaultValue() : executionConfig.getParallelism()); }
@Test void testMaxParallelismFallsBackToExecutionConfigDefaultParallelism() { Configuration configuration = new Configuration(); ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.setParallelism(5); assertThat( AdaptiveBatchSchedulerFactory.getDefaultMaxParallelism( configuration, executionConfig)) .isEqualTo(5); }
public static SchemaKStream<?> buildSource( final PlanBuildContext buildContext, final DataSource dataSource, final QueryContext.Stacker contextStacker ) { final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed(); switch (dataSource.getDataSourceType()) { case KSTREAM: return windowed ? buildWindowedStream( buildContext, dataSource, contextStacker ) : buildStream( buildContext, dataSource, contextStacker ); case KTABLE: return windowed ? buildWindowedTable( buildContext, dataSource, contextStacker ) : buildTable( buildContext, dataSource, contextStacker ); default: throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType()); } }
@Test public void shouldCreateWindowedTableSourceWithNewPseudoColumnVersionIfNoOldQuery() { // Given: givenWindowedTable(); // When: final SchemaKStream<?> result = SchemaKSourceFactory.buildSource( buildContext, dataSource, contextStacker ); // Then: assertThat(((WindowedTableSource) result.getSourceStep()).getPseudoColumnVersion(), equalTo(CURRENT_PSEUDOCOLUMN_VERSION_NUMBER)); assertValidSchema(result); }
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException { checkMaybeCompatible(source, target); if (source.isOptional() && !target.isOptional()) { if (target.defaultValue() != null) { if (record != null) { return projectRequiredSchema(source, record, target); } else { return target.defaultValue(); } } else { throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value."); } } else { if (record != null) { return projectRequiredSchema(source, record, target); } else { return null; } } }
@Test public void testProjectMissingOptionalStructField() { final Schema source = SchemaBuilder.struct().build(); final Schema target = SchemaBuilder.struct().field("id", SchemaBuilder.OPTIONAL_INT64_SCHEMA).build(); assertNull(((Struct) SchemaProjector.project(source, new Struct(source), target)).getInt64("id")); }
public ServiceInfo getFailoverServiceInfo(final String serviceName, final String groupName, final String clusters) { String groupedServiceName = NamingUtils.getGroupedName(serviceName, groupName); String key = ServiceInfo.getKey(groupedServiceName, clusters); return failoverReactor.getService(key); }
@Test void testGetFailoverServiceInfo() throws IllegalAccessException, NoSuchFieldException, NacosException { FailoverReactor mock = injectMockFailoverReactor(); ServiceInfo serviceInfo = new ServiceInfo("a@@b@@c"); when(mock.getService("a@@b@@c")).thenReturn(serviceInfo); assertEquals(serviceInfo, holder.getFailoverServiceInfo("b", "a", "c")); }
@Override public byte[] serialize() { byte[] optionsData = null; if (this.options.hasOptions()) { optionsData = this.options.serialize(); } int optionsLength = 0; if (optionsData != null) { optionsLength = optionsData.length; } final byte[] data = new byte[HEADER_LENGTH + optionsLength]; final ByteBuffer bb = ByteBuffer.wrap(data); bb.putInt((this.routerFlag & 0x1) << 31 | (this.solicitedFlag & 0x1) << 30 | (this.overrideFlag & 0x1) << 29); bb.put(this.targetAddress, 0, Ip6Address.BYTE_LENGTH); if (optionsData != null) { bb.put(optionsData); } return data; }
@Test public void testSerialize() { NeighborAdvertisement na = new NeighborAdvertisement(); na.setRouterFlag((byte) 1); na.setSolicitedFlag((byte) 1); na.setOverrideFlag((byte) 1); na.setTargetAddress(TARGET_ADDRESS); na.addOption(NeighborDiscoveryOptions.TYPE_TARGET_LL_ADDRESS, MAC_ADDRESS.toBytes()); assertArrayEquals(na.serialize(), bytePacket); }
@Override void toHtml() throws IOException { writeLinks(); writeln("<br/>"); final String title; if (path.isEmpty()) { title = getString("Arbre_JNDI"); } else { title = getFormattedString("Arbre_JNDI_pour_contexte", htmlEncode(path)); } writeTitle("jndi.png", title); writeTable(); }
@Test public void testToHtml() throws IOException, NamingException { doToHtmlWithServerName("Mock"); doToHtmlWithServerName("GlassFish"); doToHtmlWithServerName("WebLogic"); }
public MongoClientURI getMongoClientURI() { final MongoClientOptions.Builder mongoClientOptionsBuilder = MongoClientOptions.builder() .connectionsPerHost(getMaxConnections()); return new MongoClientURI(uri, mongoClientOptionsBuilder); }
@Test public void validateSucceedsIfUriIsValid() throws Exception { MongoDbConfiguration configuration = new MongoDbConfiguration(); final Map<String, String> properties = singletonMap( "mongodb_uri", "mongodb://example.com:1234,127.0.0.1:5678/TEST" ); new JadConfig(new InMemoryRepository(properties), configuration).process(); assertEquals("mongodb://example.com:1234,127.0.0.1:5678/TEST", configuration.getMongoClientURI().toString()); }
@Override public MessageQueueView getCurrentMessageQueueView(ProxyContext ctx, String topic) throws Exception { TopicConfig topicConfig = this.brokerController.getTopicConfigManager().getTopicConfigTable().get(topic); return new MessageQueueView(topic, toTopicRouteData(topicConfig), null); }
@Test public void testGetCurrentMessageQueueView() throws Throwable { ProxyContext ctx = ProxyContext.create(); this.topicConfigTable.put(TOPIC, new TopicConfig(TOPIC, 3, 2, PermName.PERM_WRITE | PermName.PERM_READ)); MessageQueueView messageQueueView = this.topicRouteService.getCurrentMessageQueueView(ctx, TOPIC); assertEquals(3, messageQueueView.getReadSelector().getQueues().size()); assertEquals(2, messageQueueView.getWriteSelector().getQueues().size()); assertEquals(1, messageQueueView.getReadSelector().getBrokerActingQueues().size()); assertEquals(1, messageQueueView.getWriteSelector().getBrokerActingQueues().size()); assertEquals(LOCAL_ADDR, messageQueueView.getReadSelector().selectOne(true).getBrokerAddr()); assertEquals(LOCAL_BROKER_NAME, messageQueueView.getReadSelector().selectOne(true).getBrokerName()); assertEquals(messageQueueView.getReadSelector().selectOne(true), messageQueueView.getWriteSelector().selectOne(true)); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer, final Merger<? super K, V> sessionMerger) { return aggregate(initializer, sessionMerger, Materialized.with(null, null)); }
@Test public void sessionWindowAggregate2Test() { final KTable<Windowed<String>, String> customers = groupedStream.cogroup(MockAggregator.TOSTRING_ADDER) .windowedBy(SessionWindows.with(ofMillis(500))) .aggregate(MockInitializer.STRING_INIT, sessionMerger, Materialized.with(Serdes.String(), Serdes.String())); customers.toStream().to(OUTPUT); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic<String, String> testInputTopic = driver.createInputTopic( TOPIC, new StringSerializer(), new StringSerializer()); final TestOutputTopic<Windowed<String>, String> testOutputTopic = driver.createOutputTopic( OUTPUT, new SessionWindowedDeserializer<>(new StringDeserializer()), new StringDeserializer()); testInputTopic.pipeInput("k1", "A", 0); testInputTopic.pipeInput("k1", "A", 0); testInputTopic.pipeInput("k2", "B", 599); testInputTopic.pipeInput("k1", "B", 607); assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+A", 0); assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+0+A+A", 0); assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+B", 599); assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+B", 607); } }
@Override public boolean hasNext() { try { if (this.nextElement == null) { if (this.readPhase) { // read phase, get next element from buffer T tmp = getNextRecord(this.reuseElement); if (tmp != null) { this.nextElement = tmp; return true; } else { return false; } } else { if (this.input.hasNext()) { final T next = this.input.next(); if (writeNextRecord(next)) { this.nextElement = next; return true; } else { this.leftOverElement = next; return false; } } else { this.noMoreBlocks = true; return false; } } } else { return true; } } catch (IOException ioex) { throw new RuntimeException( "Error (de)serializing record in block resettable iterator.", ioex); } }
@Test void testTwelveFoldBufferedBlockResettableIterator() throws Exception { final AbstractInvokable memOwner = new DummyInvokable(); // create the resettable Iterator final ReusingBlockResettableIterator<Record> iterator = new ReusingBlockResettableIterator<Record>( this.memman, this.reader, this.serializer, 12, memOwner); // open the iterator iterator.open(); // now test walking through the iterator int lower = 0; int upper = 0; do { lower = upper; upper = lower; // find the upper bound while (iterator.hasNext()) { Record target = iterator.next(); int val = target.getField(0, IntValue.class).getValue(); assertThat(val).isEqualTo(upper++); } // now reset the buffer a few times for (int i = 0; i < 5; ++i) { iterator.reset(); int count = 0; while (iterator.hasNext()) { Record target = iterator.next(); int val = target.getField(0, IntValue.class).getValue(); assertThat(val).isEqualTo(lower + (count++)); } assertThat(count).isEqualTo(upper - lower); } } while (iterator.nextBlock()); assertThat(upper).isEqualTo(NUM_VALUES); // close the iterator iterator.close(); }
@Override public void setGPSLocation(double latitude, double longitude) { }
@Test public void testSetGPSLocation() { List<SensorsDataAPI.AutoTrackEventType> types = new ArrayList<>(); types.add(SensorsDataAPI.AutoTrackEventType.APP_START); types.add(SensorsDataAPI.AutoTrackEventType.APP_END); mSensorsAPI.setGPSLocation(1000.0, 45.5); }
public CustomToggle config(Properties properties) { for (String key : properties.stringPropertyNames()) { String value = properties.getProperty(key); key = key.toLowerCase(); // compare with legal configuration names for (Property p: Property.values()) { if (key.equals(p.key())) { String ability = key.split("\\.")[1]; if (key.contains("enabled") && value.equalsIgnoreCase("false")) { this.turnOff(ability); }else if (key.contains("toggle")) { this.toggle(ability, value); } } } } return this; }
@Test public void canTurnOffAbilitiesThroughProperties() { Properties properties = new Properties(); try { properties.load(Files.newInputStream(Paths.get(filename))); toggle = new CustomToggle().config(properties); } catch (IOException e) { System.out.println("No such file"); } customBot = new DefaultBot(null, EMPTY, db, toggle); customBot.onRegister(); assertFalse(customBot.getAbilities().containsKey(DefaultAbilities.CLAIM)); }
@Override public Collection<Subscriber> getFuzzySubscribers(String namespaceId, String serviceName) { Collection<Subscriber> result = new HashSet<>(); Stream<Service> serviceStream = getServiceStream(); String serviceNamePattern = NamingUtils.getServiceName(serviceName); String groupNamePattern = NamingUtils.getGroupName(serviceName); serviceStream.filter(service -> service.getNamespace().equals(namespaceId) && service.getName() .contains(serviceNamePattern) && service.getGroup().contains(groupNamePattern)) .forEach(service -> result.addAll(getSubscribers(service))); return result; }
@Test void testGetFuzzySubscribersByString() { Collection<Subscriber> actual = subscriberService.getFuzzySubscribers(service.getNamespace(), service.getGroupedServiceName()); assertEquals(2, actual.size()); }
@Override public Temporal plus(long amountToAdd, TemporalUnit unit) { return getNewZoneOffset(offsetTime.plus(amountToAdd, unit)); }
@Test void plusLong() { ZoneTime expected = new ZoneTime(offsetTime.plus(3, ChronoUnit.HOURS), zoneId, false); assertEquals(expected, zoneTime.plus(3, ChronoUnit.HOURS)); }
public static <IN, OUT> TypeInformation<OUT> getOutputTypeForOneInputProcessFunction( OneInputStreamProcessFunction<IN, OUT> processFunction, TypeInformation<IN> inTypeInformation) { return TypeExtractor.getUnaryOperatorReturnType( processFunction, OneInputStreamProcessFunction.class, 0, 1, new int[] {1, 0}, inTypeInformation, Utils.getCallLocationName(), true); }
@Test void testGetOneInputOutputType() { TypeInformation<Long> outputType = StreamUtils.getOutputTypeForOneInputProcessFunction( new OneInputStreamProcessFunction<Integer, Long>() { @Override public void processRecord( Integer record, Collector<Long> output, PartitionedContext ctx) throws Exception { // ignore } }, Types.INT); assertThat(outputType).isEqualTo(Types.LONG); }
@Override public void createDb(String dbName, Map<String, String> properties) throws AlreadyExistsException { if (dbExists(dbName)) { throw new AlreadyExistsException("Database Already Exists"); } hmsOps.createDb(dbName, properties); }
@Test public void createDbTest() throws AlreadyExistsException { ExceptionChecker.expectThrowsWithMsg(AlreadyExistsException.class, "Database Already Exists", () -> hiveMetadata.createDb("db1", new HashMap<>())); Map<String, String> conf = new HashMap<>(); conf.put("location", "abs://xxx/zzz"); ExceptionChecker.expectThrowsWithMsg(StarRocksConnectorException.class, "Invalid location URI: abs://xxx/zzz", () -> hiveMetadata.createDb("db3", conf)); conf.clear(); conf.put("not_support_prop", "xxx"); ExceptionChecker.expectThrowsWithMsg(IllegalArgumentException.class, "Unrecognized property: not_support_prop", () -> hiveMetadata.createDb("db3", conf)); conf.clear(); hiveMetadata.createDb("db4", conf); }