focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public boolean persistent() { return store.persistent(); }
@Test public void shouldReturnPersistentForTimestampedStore() { givenWrapperWithTimestampedStore(); // test "persistent = true" when(timestampedStore.persistent()).thenReturn(true); assertThat(wrapper.persistent(), equalTo(true)); // test "persistent = false" when(timestampedStore.persistent()).thenReturn(false); assertThat(wrapper.persistent(), equalTo(false)); }
public List<Chapter> getChapters() { return chapters; }
@Test public void testRealFileAuphonic() throws IOException, ID3ReaderException { CountingInputStream inputStream = new CountingInputStream(getClass().getClassLoader() .getResource("auphonic.mp3").openStream()); ChapterReader reader = new ChapterReader(inputStream); reader.readInputStream(); List<Chapter> chapters = reader.getChapters(); assertEquals(4, chapters.size()); assertEquals(0, chapters.get(0).getStart()); assertEquals(3000, chapters.get(1).getStart()); assertEquals(6000, chapters.get(2).getStart()); assertEquals(9000, chapters.get(3).getStart()); assertEquals("Chapter 1 - ❤️😊", chapters.get(0).getTitle()); assertEquals("Chapter 2 - ßöÄ", chapters.get(1).getTitle()); assertEquals("Chapter 3 - 爱", chapters.get(2).getTitle()); assertEquals("Chapter 4", chapters.get(3).getTitle()); assertEquals("https://example.com", chapters.get(0).getLink()); assertEquals("https://example.com", chapters.get(1).getLink()); assertEquals("https://example.com", chapters.get(2).getLink()); assertEquals("https://example.com", chapters.get(3).getLink()); assertEquals(EmbeddedChapterImage.makeUrl(765, 308), chapters.get(0).getImageUrl()); assertEquals(EmbeddedChapterImage.makeUrl(1271, 308), chapters.get(1).getImageUrl()); assertEquals(EmbeddedChapterImage.makeUrl(1771, 308), chapters.get(2).getImageUrl()); assertEquals(EmbeddedChapterImage.makeUrl(2259, 308), chapters.get(3).getImageUrl()); }
public static Builder builder(String testId) { return new Builder(testId); }
@Test public void testCreateResourceManagerBuilderReturnsKafkaResourceManager() throws IOException { assertThat( KafkaResourceManager.builder(TEST_ID) .useStaticContainer() .setHost(HOST) .setPort(KAFKA_PORT) .build()) .isInstanceOf(KafkaResourceManager.class); }
static String generateDatabaseName(String baseString) { return generateResourceId( baseString, ILLEGAL_DATABASE_NAME_CHARS, REPLACE_DATABASE_NAME_CHAR, MAX_DATABASE_NAME_LENGTH, TIME_FORMAT); }
@Test public void testGenerateDatabaseNameShouldReplaceDoubleQuotes() { String testBaseString = "Test\"DB\"Name"; String actual = generateDatabaseName(testBaseString); assertThat(actual).matches("test-db-name-\\d{8}-\\d{6}-\\d{6}"); }
public static DistCpOptions parse(String[] args) throws IllegalArgumentException { CommandLineParser parser = new CustomParser(); CommandLine command; try { command = parser.parse(cliOptions, args, true); } catch (ParseException e) { throw new IllegalArgumentException("Unable to parse arguments. " + Arrays.toString(args), e); } DistCpOptions.Builder builder = parseSourceAndTargetPaths(command); builder .withAtomicCommit( command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch())) .withSyncFolder( command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch())) .withDeleteMissing( command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch())) .withIgnoreFailures( command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch())) .withOverwrite( command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch())) .withAppend( command.hasOption(DistCpOptionSwitch.APPEND.getSwitch())) .withSkipCRC( command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch())) .withBlocking( !command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch())) .withVerboseLog( command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch())) .withDirectWrite( command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch())) .withUseIterator( command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch())) .withUpdateRoot( command.hasOption(DistCpOptionSwitch.UPDATE_ROOT.getSwitch())); if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseDiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.RDIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.RDIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseRdiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) { builder.withFiltersFile( getVal(command, DistCpOptionSwitch.FILTERS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) { builder.withLogPath( new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) { final String workPath = getVal(command, DistCpOptionSwitch.WORK_PATH.getSwitch()); if (workPath != null && !workPath.isEmpty()) { builder.withAtomicWorkPath(new Path(workPath)); } } if (command.hasOption(DistCpOptionSwitch.TRACK_MISSING.getSwitch())) { builder.withTrackMissing( new Path(getVal( command, DistCpOptionSwitch.TRACK_MISSING.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) { try { final Float mapBandwidth = Float.parseFloat( getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch())); builder.withMapBandwidth(mapBandwidth); } catch (NumberFormatException e) { throw new IllegalArgumentException("Bandwidth specified is invalid: " + getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e); } } if (command.hasOption( DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) { try { final Integer numThreads = Integer.parseInt(getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())); builder.withNumListstatusThreads(numThreads); } catch (NumberFormatException e) { throw new IllegalArgumentException( "Number of liststatus threads is invalid: " + getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) { try { final Integer maps = Integer.parseInt( getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch())); builder.maxMaps(maps); } catch (NumberFormatException e) { throw new IllegalArgumentException("Number of maps is invalid: " + getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) { builder.withCopyStrategy( getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) { builder.preserve( getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch())) { final String chunkSizeStr = getVal(command, DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch().trim()); try { int csize = Integer.parseInt(chunkSizeStr); csize = csize > 0 ? csize : 0; LOG.info("Set distcp blocksPerChunk to " + csize); builder.withBlocksPerChunk(csize); } catch (NumberFormatException e) { throw new IllegalArgumentException("blocksPerChunk is invalid: " + chunkSizeStr, e); } } if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) { final String copyBufferSizeStr = getVal(command, DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim()); try { int copyBufferSize = Integer.parseInt(copyBufferSizeStr); builder.withCopyBufferSize(copyBufferSize); } catch (NumberFormatException e) { throw new IllegalArgumentException("copyBufferSize is invalid: " + copyBufferSizeStr, e); } } return builder.build(); }
@Test public void testParseDeleteMissing() { DistCpOptions options = OptionsParser.parse(new String[] { "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldDeleteMissing()); options = OptionsParser.parse(new String[] { "-update", "-delete", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldSyncFolder()); Assert.assertTrue(options.shouldDeleteMissing()); options = OptionsParser.parse(new String[] { "-overwrite", "-delete", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldOverwrite()); Assert.assertTrue(options.shouldDeleteMissing()); try { OptionsParser.parse(new String[] { "-atomic", "-delete", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.fail("Atomic and delete folders were allowed"); } catch (IllegalArgumentException ignore) { } }
@Override public void build(final DefaultGoPublisher publisher, final EnvironmentVariableContext environmentVariableContext, TaskExtension taskExtension, ArtifactExtension artifactExtension, PluginRequestProcessorRegistry pluginRequestProcessorRegistry, Charset consoleLogCharset) { ExecutionResult executionResult = null; try { executionResult = taskExtension.execute(pluginId, (task, pluginDescriptor) -> executeTask(task, publisher, environmentVariableContext, consoleLogCharset)); } catch (Exception e) { logException(publisher, e); } finally { JobConsoleLoggerInternal.unsetContext(); } if (executionResult == null) { logError(publisher, "ExecutionResult cannot be null. Please return a success or a failure response."); } else if (!executionResult.isSuccessful()) { logError(publisher, executionResult.getMessagesForDisplay()); } }
@Test public void shouldRegisterTaskConfigDuringExecutionAndUnregisterOnSuccessfulCompletion() { final PluggableTaskBuilder builder = spy(new PluggableTaskBuilder(runIfConfigs, cancelBuilder, pluggableTask, "", "")); taskExtension = mock(TaskExtension.class); when(taskExtension.execute(eq(TEST_PLUGIN_ID), any())).thenReturn(ExecutionResult.success("yay")); builder.build(goPublisher, variableContext, taskExtension, null, null, UTF_8); assertThat((TaskExecutionContext) ReflectionUtil.getStaticField(JobConsoleLogger.class, "context")).isNull(); }
public static void retainMatching(Collection<String> values, String... patterns) { retainMatching(values, Arrays.asList(patterns)); }
@Test public void testRetainMatchingWithMatchingPattern() throws Exception { Collection<String> values = stringToList("A"); StringCollectionUtil.retainMatching(values, "A"); assertTrue(values.contains("A")); }
public abstract void filter(Metadata metadata) throws TikaException;
@Test public void testConfigIncludeFilter() throws Exception { TikaConfig config = getConfig("TIKA-3137-include.xml"); Metadata metadata = new Metadata(); metadata.set("title", "title"); metadata.set("author", "author"); metadata.set("content", "content"); config.getMetadataFilter().filter(metadata); assertEquals(2, metadata.size()); assertEquals("title", metadata.get("title")); assertEquals("author", metadata.get("author")); }
public int tryClaim(final int msgTypeId, final int length) { checkTypeId(msgTypeId); checkMsgLength(length); final AtomicBuffer buffer = this.buffer; final int recordLength = length + HEADER_LENGTH; final int recordIndex = claimCapacity(buffer, recordLength); if (INSUFFICIENT_CAPACITY == recordIndex) { return recordIndex; } buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); MemoryAccess.releaseFence(); buffer.putInt(typeOffset(recordIndex), msgTypeId); return encodedMsgOffset(recordIndex); }
@Test void tryClaimReturnsInsufficientCapacityHead() { final int length = 10; final long headPosition = 0; final long tailPosition = CAPACITY - 10; when(buffer.getLongVolatile(HEAD_COUNTER_CACHE_INDEX)).thenReturn(headPosition); when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn(headPosition); when(buffer.getLongVolatile(TAIL_COUNTER_INDEX)).thenReturn(tailPosition); final int index = ringBuffer.tryClaim(MSG_TYPE_ID, length); assertEquals(INSUFFICIENT_CAPACITY, index); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).getLongVolatile(HEAD_COUNTER_CACHE_INDEX); inOrder.verify(buffer).getLongVolatile(TAIL_COUNTER_INDEX); inOrder.verify(buffer).getLongVolatile(HEAD_COUNTER_INDEX); inOrder.verifyNoMoreInteractions(); }
@SafeVarargs public static <T> Stream<T> of(T... array) { Assert.notNull(array, "Array must be not null!"); return Stream.of(array); }
@Test public void streamTestOrdinaryIterator() { final ArrayList<Integer> arrayList = CollUtil.newArrayList(1, 2, 3); assertArrayEquals(new Integer[]{1, 2, 3}, StreamUtil.of(arrayList.iterator()).toArray()); final HashSet<Integer> hashSet = CollUtil.newHashSet(1, 2, 3); assertEquals(hashSet, StreamUtil.of(hashSet.iterator()).collect(Collectors.toSet())); }
@VisibleForTesting ImmutableList<EventWithContext> eventsFromAggregationResult(EventFactory eventFactory, AggregationEventProcessorParameters parameters, AggregationResult result) throws EventProcessorException { final ImmutableList.Builder<EventWithContext> eventsWithContext = ImmutableList.builder(); final Set<String> sourceStreams = eventStreamService.buildEventSourceStreams(getStreams(parameters), result.sourceStreams()); for (final AggregationKeyResult keyResult : result.keyResults()) { if (!satisfiesConditions(keyResult)) { LOG.debug("Skipping result <{}> because the conditions <{}> don't match", keyResult, config.conditions()); continue; } final String keyString = String.join("|", keyResult.key()); final String eventMessage = createEventMessageString(keyString, keyResult); // Extract event time and range from the key result or use query time range as fallback. // These can be different, e.g. during catch up processing. final DateTime eventTime = keyResult.timestamp().orElse(result.effectiveTimerange().to()); final Event event = eventFactory.createEvent(eventDefinition, eventTime, eventMessage); // The keyResult timestamp is set to the end of the range event.setTimerangeStart(keyResult.timestamp().map(t -> t.minus(config.searchWithinMs())).orElse(parameters.timerange().getFrom())); event.setTimerangeEnd(keyResult.timestamp().orElse(parameters.timerange().getTo())); event.setReplayInfo(EventReplayInfo.builder() .timerangeStart(event.getTimerangeStart()) .timerangeEnd(event.getTimerangeEnd()) .query(config.query()) .streams(sourceStreams) .filters(config.filters()) .build()); sourceStreams.forEach(event::addSourceStream); final Map<String, Object> fields = new HashMap<>(); // Each group value will be a separate field in the message to make it usable as event fields. // // Example result: // groupBy=["application_name", "username"] // result-key=["sshd", "jane"] // // Message fields: // application_name=sshd // username=jane for (int i = 0; i < config.groupBy().size(); i++) { try { fields.put(config.groupBy().get(i), keyResult.key().get(i)); } catch (IndexOutOfBoundsException e) { throw new EventProcessorException( "Couldn't create events for: " + eventDefinition.title() + " (possibly due to non-existing grouping fields)", false, eventDefinition.id(), eventDefinition, e); } } // Group By fields need to be saved on the event so they are available to the subsequent notification events event.setGroupByFields(fields.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toString()))); // The field name for the series value is composed of the series function and field. We don't take the // series ID into account because it would be very hard to use for the user. That means a series with // the same function and field but different ID would overwrite a previous one. // This shouldn't be a problem though, because the same function and field will always compute the same // value. // // Examples: // aggregation_value_count_source=42 // aggregation_value_card_anonid=23 for (AggregationSeriesValue seriesValue : keyResult.seriesValues()) { final String function = seriesValue.series().type().toLowerCase(Locale.ROOT); final Optional<String> field = fieldFromSeries(seriesValue.series()); final String fieldName = field.map(f -> String.format(Locale.ROOT, "aggregation_value_%s_%s", function, f)) .orElseGet(() -> String.format(Locale.ROOT, "aggregation_value_%s", function)); fields.put(fieldName, seriesValue.value()); } // This is the concatenated key value fields.put("aggregation_key", keyString); // TODO: Can we find a useful source value? final Message message = messageFactory.createMessage(eventMessage, "", result.effectiveTimerange().to()); message.addFields(fields); // Ask any event query modifier for its state and collect it into the event modifier state final Map<String, Object> eventModifierState = eventQueryModifiers.stream() .flatMap(modifier -> modifier.eventModifierData(result.additionalResults()).entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); LOG.debug("Creating event {}/{} - {} {} ({})", eventDefinition.title(), eventDefinition.id(), keyResult.key(), seriesString(keyResult), fields); eventsWithContext.add(EventWithContext.builder() .event(event) .messageContext(message) .eventModifierState(eventModifierState) .build()); } return eventsWithContext.build(); }
@Test public void testEventsFromAggregationResultWithEmptyResultUsesEventDefinitionStreamAsSourceStreams() throws EventProcessorException { final DateTime now = DateTime.now(DateTimeZone.UTC); final AbsoluteRange timerange = AbsoluteRange.create(now.minusHours(1), now.minusHours(1).plusMillis(SEARCH_WINDOW_MS)); // We expect to get the end of the aggregation timerange as event time final TestEvent event1 = new TestEvent(timerange.to()); final TestEvent event2 = new TestEvent(timerange.to()); when(eventFactory.createEvent(any(EventDefinition.class), any(DateTime.class), anyString())) .thenReturn(event1) // first invocation return value .thenReturn(event2); // second invocation return value final EventDefinitionDto eventDefinitionDto = buildEventDefinitionDto(ImmutableSet.of("stream-2"), ImmutableList.of(), null, emptyList()); final AggregationEventProcessorParameters parameters = AggregationEventProcessorParameters.builder() .timerange(timerange) .build(); final AggregationEventProcessor eventProcessor = new AggregationEventProcessor(eventDefinitionDto, searchFactory, eventProcessorDependencyCheck, stateService, moreSearch, eventStreamService, messages, notificationService, permittedStreams, Set.of(), messageFactory); final AggregationResult result = buildAggregationResult(timerange, timerange.to(), ImmutableList.of("one", "two")); final ImmutableList<EventWithContext> eventsWithContext = eventProcessor.eventsFromAggregationResult(eventFactory, parameters, result); assertThat(eventsWithContext).hasSize(1); assertThat(eventsWithContext.get(0)).satisfies(eventWithContext -> { final Event event = eventWithContext.event(); assertThat(event.getId()).isEqualTo(event1.getId()); assertThat(event.getMessage()).isEqualTo(event1.getMessage()); assertThat(event.getEventTimestamp()).isEqualTo(timerange.to()); assertThat(event.getTimerangeStart()).isEqualTo(timerange.from()); assertThat(event.getTimerangeEnd()).isEqualTo(timerange.to()); // Must contain the stream from the event definition because there is none in the result assertThat(event.getSourceStreams()).containsOnly("stream-2"); final Message message = eventWithContext.messageContext().orElse(null); assertThat(message).isNotNull(); assertThat(message.getField("group_field_one")).isEqualTo("one"); assertThat(message.getField("group_field_two")).isEqualTo("two"); assertThat(message.getField("aggregation_key")).isEqualTo("one|two"); assertThat(message.getField("aggregation_value_count")).isEqualTo(0.0d); }); }
public static void addConfigsToProperties( Properties props, Map<String, String> commonConf, Map<String, String> clientConf) { for (Map.Entry<String, String> commonEntry : commonConf.entrySet()) { props.setProperty(commonEntry.getKey(), commonEntry.getValue()); } for (Map.Entry<String, String> entry : clientConf.entrySet()) { props.setProperty(entry.getKey(), entry.getValue()); } }
@Test public void testCommonConfigOverwritesDefaultProps() { Properties props = new Properties(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(ProducerConfig.ACKS_CONFIG, "all"); Properties resultProps = new Properties(); resultProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); resultProps.put(ProducerConfig.ACKS_CONFIG, "1"); resultProps.put(ProducerConfig.LINGER_MS_CONFIG, "1000"); WorkerUtils.addConfigsToProperties( props, Collections.singletonMap(ProducerConfig.ACKS_CONFIG, "1"), Collections.singletonMap(ProducerConfig.LINGER_MS_CONFIG, "1000")); assertEquals(resultProps, props); }
@Override public void analyzeImpl(Analyzer analyzer) throws AnalysisException { desc = analyzer.registerVirtualColumnRef(super.getColumnName(), type, tupleDescriptor); numDistinctValues = desc.getStats().getNumDistinctValues(); }
@Test public void analyzeImpl() { try { virtualSlot.analyzeImpl(analyzer); } catch (Exception e) { Assert.fail("analyze throw exception"); } }
public static void main(String[] args) { var context = new ClassPathXmlApplicationContext("applicationContext.xml"); var repository = context.getBean(PersonRepository.class); var peter = new Person("Peter", "Sagan", 17); var nasta = new Person("Nasta", "Kuzminova", 25); var john = new Person("John", "lawrence", 35); var terry = new Person("Terry", "Law", 36); // Add new Person records repository.save(peter); repository.save(nasta); repository.save(john); repository.save(terry); // Count Person records LOGGER.info("Count Person records: {}", repository.count()); // Print all records var persons = (List<Person>) repository.findAll(); persons.stream().map(Person::toString).forEach(LOGGER::info); // Update Person nasta.setName("Barbora"); nasta.setSurname("Spotakova"); repository.save(nasta); repository.findById(2L).ifPresent(p -> LOGGER.info("Find by id 2: {}", p)); // Remove record from Person repository.deleteById(2L); // count records LOGGER.info("Count Person records: {}", repository.count()); // find by name repository .findOne(new PersonSpecifications.NameEqualSpec("John")) .ifPresent(p -> LOGGER.info("Find by John is {}", p)); // find by age persons = repository.findAll(new PersonSpecifications.AgeBetweenSpec(20, 40)); LOGGER.info("Find Person with age between 20,40: "); persons.stream().map(Person::toString).forEach(LOGGER::info); repository.deleteAll(); context.close(); }
@Test void shouldExecuteWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public static Optional<CheckpointStorage> fromConfig( ReadableConfig config, ClassLoader classLoader, @Nullable Logger logger) throws IllegalStateException, DynamicCodeLoadingException { Preconditions.checkNotNull(config, "config"); Preconditions.checkNotNull(classLoader, "classLoader"); final String storageName = config.get(CheckpointingOptions.CHECKPOINT_STORAGE); if (storageName == null) { if (logger != null) { logger.debug( "The configuration {} has not be set in the current" + " sessions config.yaml. Falling back to a default CheckpointStorage" + " type. Users are strongly encouraged explicitly set this configuration" + " so they understand how their applications are checkpointing" + " snapshots for fault-tolerance.", CheckpointingOptions.CHECKPOINT_STORAGE.key()); } return Optional.empty(); } switch (storageName.toLowerCase()) { case JOB_MANAGER_STORAGE_NAME: return Optional.of(createJobManagerCheckpointStorage(config, classLoader, logger)); case FILE_SYSTEM_STORAGE_NAME: return Optional.of(createFileSystemCheckpointStorage(config, classLoader, logger)); default: if (logger != null) { logger.info("Loading state backend via factory '{}'", storageName); } CheckpointStorageFactory<?> factory; try { @SuppressWarnings("rawtypes") Class<? extends CheckpointStorageFactory> clazz = Class.forName(storageName, false, classLoader) .asSubclass(CheckpointStorageFactory.class); factory = clazz.newInstance(); } catch (ClassNotFoundException e) { throw new DynamicCodeLoadingException( "Cannot find configured state backend factory class: " + storageName, e); } catch (ClassCastException | InstantiationException | IllegalAccessException e) { throw new DynamicCodeLoadingException( "The class configured under '" + CheckpointingOptions.CHECKPOINT_STORAGE.key() + "' is not a valid checkpoint storage factory (" + storageName + ')', e); } return Optional.of(factory.createFromConfig(config, classLoader)); } }
@Test void testLoadJobManagerStorageNoParameters() throws Exception { // we configure with the explicit string (rather than // AbstractStateBackend#X_STATE_BACKEND_NAME) // to guard against config-breaking changes of the name final Configuration config = new Configuration(); config.set(CheckpointingOptions.CHECKPOINT_STORAGE, "jobmanager"); CheckpointStorage storage = CheckpointStorageLoader.fromConfig(config, cl, null).get(); assertThat(storage).isInstanceOf(JobManagerCheckpointStorage.class); }
@Override public DataNodeDto resetNode(String nodeId) throws NodeNotFoundException { final DataNodeDto node = nodeService.byNodeId(nodeId); if (node.getDataNodeStatus() != DataNodeStatus.REMOVED) { throw new IllegalArgumentException("Only previously removed data nodes can rejoin the cluster."); } DataNodeLifecycleEvent e = DataNodeLifecycleEvent.create(node.getNodeId(), DataNodeLifecycleTrigger.RESET); clusterEventBus.post(e); return node; }
@Test public void resetNodeFailsWhenNodeNotRemoved() throws NodeNotFoundException { final String testNodeId = "node"; nodeService.registerServer(buildTestNode(testNodeId, DataNodeStatus.AVAILABLE)); Exception e = assertThrows(IllegalArgumentException.class, () -> { classUnderTest.resetNode(testNodeId); }); assertEquals("Only previously removed data nodes can rejoin the cluster.", e.getMessage()); verifyNoMoreInteractions(clusterEventBus); }
public double[][] test(DataFrame data) { DataFrame x = formula.x(data); int n = x.nrow(); int ntrees = models.length; double[][] prediction = new double[ntrees][n]; for (int j = 0; j < n; j++) { Tuple xj = x.get(j); double base = 0; for (int i = 0; i < ntrees; i++) { base = base + models[i].tree.predict(xj); prediction[i][j] = base / (i+1); } } return prediction; }
@Test public void testAutoMPG() { test("autoMPG", AutoMPG.formula, AutoMPG.data, 3.5588); }
@Override public KeyValueStore<K, V> build() { return new MeteredKeyValueStore<>( maybeWrapCaching(maybeWrapLogging(storeSupplier.get())), storeSupplier.metricsScope(), time, keySerde, valueSerde); }
@Test public void shouldHaveCachingAndChangeLoggingWhenBothEnabled() { setUp(); final KeyValueStore<String, String> store = builder .withLoggingEnabled(Collections.emptyMap()) .withCachingEnabled() .build(); final WrappedStateStore caching = (WrappedStateStore) ((WrappedStateStore) store).wrapped(); final WrappedStateStore changeLogging = (WrappedStateStore) caching.wrapped(); assertThat(store, instanceOf(MeteredKeyValueStore.class)); assertThat(caching, instanceOf(CachingKeyValueStore.class)); assertThat(changeLogging, instanceOf(ChangeLoggingKeyValueBytesStore.class)); assertThat(changeLogging.wrapped(), CoreMatchers.equalTo(inner)); }
public Value parse(String json) { return this.delegate.parse(json); }
@Test public void testExponentialInteger2() throws Exception { final JsonParser parser = new JsonParser(); final Value msgpackValue = parser.parse("123e2"); assertTrue(msgpackValue.getValueType().isNumberType()); // TODO: Consider this needs to be an integer? // See: https://github.com/embulk/embulk/issues/775 assertTrue(msgpackValue.getValueType().isFloatType()); assertFalse(msgpackValue.getValueType().isIntegerType()); assertFalse(msgpackValue.getValueType().isStringType()); assertEquals(12300.0, msgpackValue.asFloatValue().toDouble(), 0.000000001); // Not sure this |toString| is to be tested... assertEquals("12300.0", msgpackValue.asFloatValue().toString()); }
@SuppressWarnings("unchecked") @Udf public <T> List<T> union( @UdfParameter(description = "First array of values") final List<T> left, @UdfParameter(description = "Second array of values") final List<T> right) { if (left == null || right == null) { return null; } final Set<T> combined = Sets.newLinkedHashSet(left); combined.addAll(right); return (List<T>) Arrays.asList(combined.toArray()); }
@Test public void shouldReturnDistinctValues() { final List<String> input1 = Arrays.asList("foo", "foo", "bar"); final List<String> input2 = Arrays.asList("baz", "foo"); final List<String> result = udf.union(input1, input2); assertThat(result, contains("foo", "bar", "baz")); }
@Override public boolean trySetCapacity(int capacity) { return get(trySetCapacityAsync(capacity)); }
@Test public void testAddFullQueueError() { RBoundedBlockingQueue<Integer> queue1 = redisson.getBoundedBlockingQueue("bounded-queue:testAddFullQueueError"); assertThat(queue1.trySetCapacity(1)).isTrue(); assertThat(queue1.add(1)).isTrue(); try { queue1.add(2); } catch (RedisException e) { assertThat(e.getCause()).isInstanceOf(IllegalStateException.class); } }
public static String buildURIFromPattern(String pattern, List<Parameter> parameters) { if (parameters != null) { // Browse parameters and choose between template or query one. for (Parameter parameter : parameters) { String wadlTemplate = "{" + parameter.getName() + "}"; String swaggerTemplate = "/:" + parameter.getName(); if (pattern.contains(wadlTemplate)) { // It's a template parameter. pattern = pattern.replace(wadlTemplate, encodePath(parameter.getValue())); } else if (pattern.contains(swaggerTemplate)) { // It's a template parameter. pattern = pattern.replace(":" + parameter.getName(), encodePath(parameter.getValue())); } else { // It's a query parameter, ensure we have started delimiting them. if (!pattern.contains("?")) { pattern += "?"; } if (pattern.contains("=")) { pattern += "&"; } pattern += parameter.getName() + "=" + encodeValue(parameter.getValue()); } } } return pattern; }
@Test void testBuildURIFromPatternWithEncoding() { // Prepare a bunch of parameters. Parameter nameParam = new Parameter(); nameParam.setName("name"); nameParam.setValue("Eclair Cafe"); Parameter descriptionParam = new Parameter(); descriptionParam.setName("description"); descriptionParam.setValue("My desc"); List<Parameter> parameters = new ArrayList<>(); parameters.add(nameParam); parameters.add(descriptionParam); // Test with old wadl like template format. String pattern = "http://localhost:8080/pastry/{name}"; String uri = URIBuilder.buildURIFromPattern(pattern, parameters); assertTrue("http://localhost:8080/pastry/Eclair%20Cafe?description=My+desc".equals(uri)); // Test with new swagger like template format. pattern = "http://localhost:8080/pastry/:name"; uri = URIBuilder.buildURIFromPattern(pattern, parameters); assertTrue("http://localhost:8080/pastry/Eclair%20Cafe?description=My+desc".equals(uri)); }
@Override public void stopPlugins() { throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute stopPlugins!"); }
@Test public void stopPlugins() { assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.stopPlugins()); }
public static <T> LengthPrefixCoder<T> of(Coder<T> valueCoder) { checkNotNull(valueCoder, "Coder not expected to be null"); return new LengthPrefixCoder<>(valueCoder); }
@Test public void testCoderIsSerializableWithWellKnownCoderType() throws Exception { CoderProperties.coderSerializable(LengthPrefixCoder.of(GlobalWindow.Coder.INSTANCE)); }
public static URL getResourceUrl(String resource) throws IOException { if (resource.startsWith(CLASSPATH_PREFIX)) { String path = resource.substring(CLASSPATH_PREFIX.length()); ClassLoader classLoader = ResourceUtils.class.getClassLoader(); URL url = (classLoader != null ? classLoader.getResource(path) : ClassLoader.getSystemResource(path)); if (url == null) { throw new FileNotFoundException("Resource [" + resource + "] does not exist"); } return url; } try { return new URL(resource); } catch (MalformedURLException ex) { return new File(resource).toURI().toURL(); } }
@Test void testGetResourceUrlForFile() throws IOException { File file = File.createTempFile("test", ".txt"); try { URL url = ResourceUtils.getResourceUrl("file://" + file.getPath()); assertNotNull(url); } finally { file.deleteOnExit(); } }
public WorkingDir createWorkingDirectory() { return new LocalWorkingDir(getTmpDir()); }
@Test void shouldCreateWorkingDirGivenKestraTmpDir() { // Given WorkingDir workingDirectory = workingDirFactory.createWorkingDirectory(); // When Path path = workingDirectory.path(); // Then assertThat(path.toFile().getAbsolutePath().startsWith("/tmp/sub/dir/tmp/"), is(true)); }
@Override public void onMsg(TbContext ctx, TbMsg msg) throws TbNodeException { try { Alarm alarm = JacksonUtil.fromString(msg.getData(), Alarm.class); Objects.requireNonNull(alarm, "alarm is null"); ListenableFuture<Alarm> latest = ctx.getAlarmService().findAlarmByIdAsync(ctx.getTenantId(), alarm.getId()); Futures.addCallback(latest, new FutureCallback<>() { @Override public void onSuccess(@Nullable Alarm result) { if (result == null) { ctx.tellFailure(msg, new TbNodeException("No such alarm found.")); return; } boolean isPresent = config.getAlarmStatusList().stream() .anyMatch(alarmStatus -> result.getStatus() == alarmStatus); ctx.tellNext(msg, isPresent ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE); } @Override public void onFailure(Throwable t) { ctx.tellFailure(msg, t); } }, ctx.getDbCallbackExecutor()); } catch (Exception e) { if (e instanceof IllegalArgumentException || e instanceof NullPointerException) { log.debug("[{}][{}] Failed to parse alarm: [{}] error [{}]", ctx.getTenantId(), ctx.getRuleChainName(), msg.getData(), e.getMessage()); } else { log.error("[{}][{}] Failed to parse alarm: [{}]", ctx.getTenantId(), ctx.getRuleChainName(), msg.getData(), e); } throw new TbNodeException(e); } }
@Test void givenUnparseableAlarm_whenOnMsg_then_Failure() { String msgData = "{\"Number\":1113718,\"id\":8.1}"; TbMsg msg = getTbMsg(msgData); willReturn("Default Rule Chain").given(ctx).getRuleChainName(); assertThatThrownBy(() -> node.onMsg(ctx, msg)) .as("onMsg") .isInstanceOf(TbNodeException.class) .hasCauseInstanceOf(IllegalArgumentException.class) .hasMessage("java.lang.IllegalArgumentException: The given string value cannot be transformed to Json object: {\"Number\":1113718,\"id\":8.1}"); }
public static void watch(Thread thread, Runnable task) { ObjectUtil.checkNotNull(thread, "thread"); ObjectUtil.checkNotNull(task, "task"); if (!thread.isAlive()) { throw new IllegalArgumentException("thread must be alive."); } schedule(thread, task, true); }
@Test @Timeout(value = 10000, unit = TimeUnit.MILLISECONDS) public void testWatch() throws Exception { final CountDownLatch latch = new CountDownLatch(1); final Thread t = new Thread() { @Override public void run() { for (;;) { try { Thread.sleep(1000); } catch (InterruptedException ignore) { break; } } } }; final Runnable task = new Runnable() { @Override public void run() { if (!t.isAlive()) { latch.countDown(); } } }; try { ThreadDeathWatcher.watch(t, task); fail("must reject to watch a non-alive thread."); } catch (IllegalArgumentException e) { // expected } t.start(); ThreadDeathWatcher.watch(t, task); // As long as the thread is alive, the task should not run. assertThat(latch.await(750, TimeUnit.MILLISECONDS), is(false)); // Interrupt the thread to terminate it. t.interrupt(); // The task must be run on termination. latch.await(); }
public static MappingAllocator buildMappingAllocator(Map<Integer, String> idToBroker, Map<String, Integer> brokerNumMap, Map<String, Integer> brokerNumMapBeforeRemapping) { return new MappingAllocator(idToBroker, brokerNumMap, brokerNumMapBeforeRemapping); }
@Test public void testAllocator() { //stability for (int i = 0; i < 10; i++) { int num = 3; Map<String, Integer> brokerNumMap = buildBrokerNumMap(num); TopicQueueMappingUtils.MappingAllocator allocator = TopicQueueMappingUtils.buildMappingAllocator(new HashMap<>(), brokerNumMap, null); allocator.upToNum(num * 2); for (Map.Entry<String, Integer> entry: allocator.getBrokerNumMap().entrySet()) { Assert.assertEquals(2L, entry.getValue().longValue()); } Assert.assertEquals(num * 2, allocator.getIdToBroker().size()); testIdToBroker(allocator.idToBroker, allocator.getBrokerNumMap()); allocator.upToNum(num * 3 - 1); for (Map.Entry<String, Integer> entry: allocator.getBrokerNumMap().entrySet()) { Assert.assertTrue(entry.getValue() >= 2); Assert.assertTrue(entry.getValue() <= 3); } Assert.assertEquals(num * 3 - 1, allocator.getIdToBroker().size()); testIdToBroker(allocator.idToBroker, allocator.getBrokerNumMap()); } }
@VisibleForTesting void checkNoPendingTasks(DbSession dbSession, EntityDto entityDto) { //This check likely can be removed when we remove the column 'private' from components table in SONAR-20126. checkState(countPendingTask(dbSession, entityDto.getKey()) == 0, "Component visibility can't be changed as long as it has background task(s) pending or in progress"); }
@Test void checkNoPendingTasks_whenEntityFoundAndNoTaskInQueue_doesNotThrow() { EntityDto entityDto = mockEntityDto(); when(dbClient.entityDao().selectByKey(dbSession, entityDto.getKey())).thenReturn(Optional.of(entityDto)); visibilityService.checkNoPendingTasks(dbSession, entityDto); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowStorageUnitsStatement sqlStatement, final ContextManager contextManager) { Collection<LocalDataQueryResultRow> result = new LinkedList<>(); for (Entry<String, StorageUnit> entry : getToBeShownStorageUnits(sqlStatement).entrySet()) { ConnectionProperties connectionProps = entry.getValue().getConnectionProperties(); DataSourcePoolProperties dataSourcePoolProps = getDataSourcePoolProperties(entry.getValue()); Map<String, Object> poolProps = dataSourcePoolProps.getPoolPropertySynonyms().getStandardProperties(); Map<String, Object> customProps = getCustomProperties(dataSourcePoolProps.getCustomProperties().getProperties(), connectionProps.getQueryProperties()); result.add(new LocalDataQueryResultRow(entry.getKey(), entry.getValue().getStorageType().getType(), connectionProps.getHostname(), connectionProps.getPort(), connectionProps.getCatalog(), getStandardProperty(poolProps, "connectionTimeoutMilliseconds"), getStandardProperty(poolProps, "idleTimeoutMilliseconds"), getStandardProperty(poolProps, "maxLifetimeMilliseconds"), getStandardProperty(poolProps, "maxPoolSize"), getStandardProperty(poolProps, "minPoolSize"), getStandardProperty(poolProps, "readOnly"), customProps)); } return result; }
@Test void assertGetRowsWithUnusedStorageUnits() { RuleMetaData metaData = mockUnusedStorageUnitsRuleMetaData(); when(database.getRuleMetaData()).thenReturn(metaData); Collection<LocalDataQueryResultRow> actual = executor.getRows(new ShowStorageUnitsStatement(mock(DatabaseSegment.class), null, 0), mock(ContextManager.class)); assertThat(actual.size(), is(1)); LocalDataQueryResultRow row = actual.iterator().next(); assertThat(row.getCell(1), is("ds_2")); assertThat(row.getCell(2), is("MySQL")); assertThat(row.getCell(3), is("localhost")); assertThat(row.getCell(4), is("3307")); assertThat(row.getCell(5), is("ds_2")); assertThat(row.getCell(6), is("")); assertThat(row.getCell(7), is("")); assertThat(row.getCell(8), is("")); assertThat(row.getCell(9), is("100")); assertThat(row.getCell(10), is("10")); assertThat(row.getCell(11), is("")); assertThat(row.getCell(12), is("{\"openedConnections\":[],\"closed\":false}")); }
public EvaluationResult evaluate(Condition condition, Measure measure) { checkArgument(SUPPORTED_METRIC_TYPE.contains(condition.getMetric().getType()), "Conditions on MetricType %s are not supported", condition.getMetric().getType()); Comparable measureComparable = parseMeasure(measure); if (measureComparable == null) { return new EvaluationResult(Measure.Level.OK, null); } return evaluateCondition(condition, measureComparable) .orElseGet(() -> new EvaluationResult(Measure.Level.OK, measureComparable)); }
@Test public void testErrorLevel() { Metric metric = createMetric(FLOAT); Measure measure = newMeasureBuilder().create(10.2d, 1, null); assertThat(underTest.evaluate(createCondition(metric, LESS_THAN, "10.3"), measure)).hasLevel(ERROR); assertThat(underTest.evaluate(createCondition(metric, LESS_THAN, "10.1"), measure)).hasLevel(OK); assertThat(underTest.evaluate(new Condition(metric, LESS_THAN.getDbValue(), "10.3"), measure)).hasLevel(Measure.Level.ERROR); }
public static ObjectNode convertFromGHResponse(GHResponse ghResponse, TranslationMap translationMap, Locale locale, DistanceConfig distanceConfig) { ObjectNode json = JsonNodeFactory.instance.objectNode(); if (ghResponse.hasErrors()) throw new IllegalStateException( "If the response has errors, you should use the method NavigateResponseConverter#convertFromGHResponseError"); PointList waypoints = ghResponse.getBest().getWaypoints(); final ArrayNode routesJson = json.putArray("routes"); List<ResponsePath> paths = ghResponse.getAll(); for (int i = 0; i < paths.size(); i++) { ResponsePath path = paths.get(i); ObjectNode pathJson = routesJson.addObject(); putRouteInformation(pathJson, path, i, translationMap, locale, distanceConfig); } final ArrayNode waypointsJson = json.putArray("waypoints"); for (int i = 0; i < waypoints.size(); i++) { ObjectNode waypointJson = waypointsJson.addObject(); // TODO get names waypointJson.put("name", ""); putLocation(waypoints.getLat(i), waypoints.getLon(i), waypointJson); } json.put("code", "Ok"); // TODO: Maybe we need a different format... uuid: "cji4ja4f8004o6xrsta8w4p4h" json.put("uuid", UUID.randomUUID().toString().replaceAll("-", "")); return json; }
@Test public void intersectionTest() { GHResponse rsp = hopper.route(new GHRequest(42.554851, 1.536198, 42.510071, 1.548128).setProfile(profile) .setPathDetails(Collections.singletonList("intersection"))); ObjectNode json = NavigateResponseConverter.convertFromGHResponse(rsp, trMap, Locale.ENGLISH, distanceConfig); JsonNode steps = json.get("routes").get(0).get("legs").get(0).get("steps"); JsonNode step = steps.get(0); JsonNode intersection = step.get("intersections").get(0); assertFalse(intersection.has("in")); assertEquals(1, intersection.get("out").asInt()); JsonNode location = intersection.get("location"); // The first intersection to be equal to the first snapped waypoint assertEquals(rsp.getBest().getWaypoints().get(0).lon, location.get(0).asDouble(), .000001); assertEquals(rsp.getBest().getWaypoints().get(0).lat, location.get(1).asDouble(), .000001); step = steps.get(4); intersection = step.get("intersections").get(3); assertEquals(2, intersection.get("in").asInt()); assertEquals(0, intersection.get("out").asInt()); location = intersection.get("location"); assertEquals(1.534679, location.get(0).asDouble(), .000001); assertEquals(42.556444, location.get(1).asDouble(), .000001); }
public CreateTableCommand createTableCommand( final KsqlStructuredDataOutputNode outputNode, final Optional<RefinementInfo> emitStrategy ) { Optional<WindowInfo> windowInfo = outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(); if (windowInfo.isPresent() && emitStrategy.isPresent()) { final WindowInfo info = windowInfo.get(); windowInfo = Optional.of(WindowInfo.of( info.getType(), info.getSize(), Optional.of(emitStrategy.get().getOutputRefinement()) )); } return new CreateTableCommand( outputNode.getSinkName().get(), outputNode.getSchema(), outputNode.getTimestampColumn(), outputNode.getKsqlTopic().getKafkaTopicName(), Formats.from(outputNode.getKsqlTopic()), windowInfo, Optional.of(outputNode.getOrReplace()), Optional.of(false) ); }
@Test public void shouldCreateCommandForCreateSourceTable() { // Given: final CreateTable ddlStatement = new CreateTable(SOME_NAME, TableElements.of( tableElement("COL1", new Type(BIGINT), PRIMARY_KEY_CONSTRAINT), tableElement("COL2", new Type(SqlTypes.STRING))), false, true, withProperties, true); // When: final CreateTableCommand result = createSourceFactory .createTableCommand(ddlStatement, ksqlConfig); // Then: assertThat(result.getSourceName(), is(SOME_NAME)); assertThat(result.getTopicName(), is(TOPIC_NAME)); assertThat(result.getIsSource(), is(true)); }
public List<IdentityProvider> getAllEnabledAndSorted() { return providersByKey.values().stream() .filter(IS_ENABLED_FILTER) .sorted(Comparator.comparing(TO_NAME)) .toList(); }
@Test public void return_nothing_when_no_identity_provider() { IdentityProviderRepository underTest = new IdentityProviderRepository(null); assertThat(underTest.getAllEnabledAndSorted()).isEmpty(); }
@Override public Decision decideActionWithGlobalInfo(HsSpillingInfoProvider spillingInfoProvider) { Decision.Builder builder = Decision.builder(); // Save the cost of lock, if pool size is changed between checkSpill and checkRelease, pool // size checker will handle this inconsistency. int poolSize = spillingInfoProvider.getPoolSize(); checkSpill(spillingInfoProvider, poolSize, builder); checkRelease(spillingInfoProvider, poolSize, builder); return builder.build(); }
@Test void testDecideActionWithGlobalInfo() { final int subpartition1 = 0; final int subpartition2 = 1; List<BufferIndexAndChannel> subpartitionBuffers1 = createBufferIndexAndChannelsList(subpartition1, 1, 2, 3, 4, 5); List<BufferIndexAndChannel> subpartitionBuffers2 = createBufferIndexAndChannelsList(subpartition2, 1, 2, 3, 4, 5); TestingSpillingInfoProvider spillInfoProvider = TestingSpillingInfoProvider.builder() .setGetNumSubpartitionsSupplier(() -> NUM_SUBPARTITIONS) .addSubpartitionBuffers(subpartition1, subpartitionBuffers1) .addSubpartitionBuffers(subpartition2, subpartitionBuffers2) .addSpillBuffers(subpartition1, Arrays.asList(0, 1, 2, 3)) .addSpillBuffers(subpartition2, Arrays.asList(1, 2, 3)) .setGetNumTotalUnSpillBuffersSupplier( () -> (int) (10 * NUM_BUFFERS_TRIGGER_SPILLING_RATIO)) .setGetNumTotalRequestedBuffersSupplier(() -> 10) .setGetPoolSizeSupplier(() -> 10) .build(); Decision decision = spillStrategy.decideActionWithGlobalInfo(spillInfoProvider); // all not spilled buffers need to spill. Map<Integer, List<BufferIndexAndChannel>> expectedSpillBuffers = new HashMap<>(); expectedSpillBuffers.put(subpartition1, subpartitionBuffers1.subList(4, 5)); expectedSpillBuffers.put( subpartition2, new ArrayList<>(subpartitionBuffers2.subList(0, 1))); expectedSpillBuffers.get(subpartition2).addAll(subpartitionBuffers2.subList(4, 5)); assertThat(decision.getBufferToSpill()).isEqualTo(expectedSpillBuffers); Map<Integer, List<BufferIndexAndChannel>> expectedReleaseBuffers = new HashMap<>(); expectedReleaseBuffers.put( subpartition1, new ArrayList<>(subpartitionBuffers1.subList(0, 3))); expectedReleaseBuffers.put( subpartition2, new ArrayList<>(subpartitionBuffers2.subList(1, 4))); assertThat(decision.getBufferToRelease()).isEqualTo(expectedReleaseBuffers); }
public static boolean isPrimitiveArray(final Object obj) { return obj instanceof boolean[] || obj instanceof byte[] || obj instanceof short[] || obj instanceof char[] || obj instanceof int[] || obj instanceof long[] || obj instanceof float[] || obj instanceof double[]; }
@SuppressWarnings("ConstantValue") @Test void isPrimitiveArray() { Assertions.assertTrue(DataTypeUtil.isPrimitiveArray(new int[]{})); Assertions.assertTrue(DataTypeUtil.isPrimitiveArray(new int[]{1})); Assertions.assertTrue(DataTypeUtil.isPrimitiveArray(new int[]{1, 2})); Assertions.assertTrue(DataTypeUtil.isPrimitiveArray(new long[]{1})); Assertions.assertTrue(DataTypeUtil.isPrimitiveArray(new double[]{1})); Assertions.assertTrue(DataTypeUtil.isPrimitiveArray(new byte[]{1})); Assertions.assertTrue(DataTypeUtil.isPrimitiveArray(new char[]{'c'})); Assertions.assertTrue(DataTypeUtil.isPrimitiveArray(new float[]{1})); Assertions.assertTrue(DataTypeUtil.isPrimitiveArray(new short[]{1})); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(null)); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(1)); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(1L)); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(1.0)); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray('c')); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(1f)); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray("test")); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(new Object())); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(new Object[]{})); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(new Object[]{new Object()})); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(new Object[]{new Object(), new Object()})); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(new Object[]{null})); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(new Object[]{null, new Object()})); Assertions.assertFalse(DataTypeUtil.isPrimitiveArray(new String[]{"test"})); }
@Override public byte[] encode(ILoggingEvent event) { final int initialCapacity = event.getThrowableProxy() == null ? DEFAULT_SIZE : DEFAULT_SIZE_WITH_THROWABLE; StringBuilder sb = new StringBuilder(initialCapacity); sb.append(OPEN_OBJ); if (withSequenceNumber) { appenderMemberWithLongValue(sb, SEQUENCE_NUMBER_ATTR_NAME, event.getSequenceNumber()); sb.append(VALUE_SEPARATOR); } if (withTimestamp) { appenderMemberWithLongValue(sb, TIMESTAMP_ATTR_NAME, event.getTimeStamp()); sb.append(VALUE_SEPARATOR); } if (withNanoseconds) { appenderMemberWithLongValue(sb, NANOSECONDS_ATTR_NAME, event.getNanoseconds()); sb.append(VALUE_SEPARATOR); } if (withLevel) { String levelStr = event.getLevel() != null ? event.getLevel().levelStr : NULL_STR; appenderMember(sb, LEVEL_ATTR_NAME, levelStr); sb.append(VALUE_SEPARATOR); } if (withThreadName) { appenderMember(sb, THREAD_NAME_ATTR_NAME, jsonEscape(event.getThreadName())); sb.append(VALUE_SEPARATOR); } if (withLoggerName) { appenderMember(sb, LOGGER_ATTR_NAME, event.getLoggerName()); sb.append(VALUE_SEPARATOR); } if (withContext) { appendLoggerContext(sb, event.getLoggerContextVO()); sb.append(VALUE_SEPARATOR); } if (withMarkers) appendMarkers(sb, event); if (withMDC) appendMDC(sb, event); if (withKVPList) appendKeyValuePairs(sb, event); if (withMessage) { appenderMember(sb, MESSAGE_ATTR_NAME, jsonEscape(event.getMessage())); sb.append(VALUE_SEPARATOR); } if (withFormattedMessage) { appenderMember(sb, FORMATTED_MESSAGE_ATTR_NAME, jsonEscape(event.getFormattedMessage())); sb.append(VALUE_SEPARATOR); } if (withArguments) appendArgumentArray(sb, event); if (withThrowable) appendThrowableProxy(sb, THROWABLE_ATTR_NAME, event.getThrowableProxy()); sb.append(CLOSE_OBJ); sb.append(CoreConstants.JSON_LINE_SEPARATOR); return sb.toString().getBytes(UTF_8_CHARSET); }
@Test void withThrowableHavingCause() throws JsonProcessingException { Throwable cause = new IllegalStateException("test cause"); Throwable t = new RuntimeException("test", cause); LoggingEvent event = new LoggingEvent("in withThrowableHavingCause test", logger, Level.WARN, "hello kvp", t, null); byte[] resultBytes = jsonEncoder.encode(event); String resultString = new String(resultBytes, StandardCharsets.UTF_8); //System.out.println(resultString); JsonLoggingEvent resultEvent = stringToLoggingEventMapper.mapStringToLoggingEvent(resultString); compareEvents(event, resultEvent); }
@Override @SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:MethodLength"}) protected IdentifiedDataSerializable getConfig() { MapConfig config = new MapConfig(parameters.name); config.setAsyncBackupCount(parameters.asyncBackupCount); config.setBackupCount(parameters.backupCount); config.setCacheDeserializedValues(CacheDeserializedValues.valueOf(parameters.cacheDeserializedValues)); if (parameters.listenerConfigs != null && !parameters.listenerConfigs.isEmpty()) { config.setEntryListenerConfigs( (List<EntryListenerConfig>) adaptListenerConfigs(parameters.listenerConfigs, parameters.userCodeNamespace)); } if (parameters.merkleTreeConfig != null) { config.setMerkleTreeConfig(parameters.merkleTreeConfig); } if (parameters.eventJournalConfig != null) { config.setEventJournalConfig(parameters.eventJournalConfig); } if (parameters.hotRestartConfig != null) { config.setHotRestartConfig(parameters.hotRestartConfig); } config.setInMemoryFormat(InMemoryFormat.valueOf(parameters.inMemoryFormat)); config.setAttributeConfigs(parameters.attributeConfigs); config.setReadBackupData(parameters.readBackupData); config.setStatisticsEnabled(parameters.statisticsEnabled); config.setPerEntryStatsEnabled(parameters.perEntryStatsEnabled); config.setIndexConfigs(parameters.indexConfigs); if (parameters.mapStoreConfig != null) { config.setMapStoreConfig(parameters.mapStoreConfig.asMapStoreConfig(serializationService, parameters.userCodeNamespace)); } config.setTimeToLiveSeconds(parameters.timeToLiveSeconds); config.setMaxIdleSeconds(parameters.maxIdleSeconds); if (parameters.evictionConfig != null) { config.setEvictionConfig(parameters.evictionConfig.asEvictionConfig(serializationService)); } if (parameters.mergePolicy != null) { config.setMergePolicyConfig(mergePolicyConfig(parameters.mergePolicy, parameters.mergeBatchSize)); } if (parameters.nearCacheConfig != null) { config.setNearCacheConfig(parameters.nearCacheConfig.asNearCacheConfig(serializationService)); } config.setPartitioningStrategyConfig(getPartitioningStrategyConfig()); if (parameters.partitionLostListenerConfigs != null && !parameters.partitionLostListenerConfigs.isEmpty()) { config.setPartitionLostListenerConfigs( (List<MapPartitionLostListenerConfig>) adaptListenerConfigs(parameters.partitionLostListenerConfigs, parameters.userCodeNamespace)); } config.setSplitBrainProtectionName(parameters.splitBrainProtectionName); if (parameters.queryCacheConfigs != null && !parameters.queryCacheConfigs.isEmpty()) { List<QueryCacheConfig> queryCacheConfigs = new ArrayList<>(parameters.queryCacheConfigs.size()); for (QueryCacheConfigHolder holder : parameters.queryCacheConfigs) { queryCacheConfigs.add(holder.asQueryCacheConfig(serializationService, parameters.userCodeNamespace)); } config.setQueryCacheConfigs(queryCacheConfigs); } config.setWanReplicationRef(parameters.wanReplicationRef); config.setMetadataPolicy(MetadataPolicy.getById(parameters.metadataPolicy)); if (parameters.isDataPersistenceConfigExists) { config.setDataPersistenceConfig(parameters.dataPersistenceConfig); } if (parameters.isTieredStoreConfigExists) { config.setTieredStoreConfig(parameters.tieredStoreConfig); } if (parameters.isPartitioningAttributeConfigsExists) { config.setPartitioningAttributeConfigs(parameters.partitioningAttributeConfigs); } if (parameters.isUserCodeNamespaceExists) { config.setUserCodeNamespace(parameters.userCodeNamespace); } return config; }
@Test public void doNotThrowException_whenNullValuesProvidedForNullableFields() { MapConfig mapConfig = new MapConfig("my-map"); ClientMessage addMapConfigClientMessage = DynamicConfigAddMapConfigCodec.encodeRequest( mapConfig.getName(), mapConfig.getBackupCount(), mapConfig.getAsyncBackupCount(), mapConfig.getTimeToLiveSeconds(), mapConfig.getMaxIdleSeconds(), null, mapConfig.isReadBackupData(), mapConfig.getCacheDeserializedValues().name(), mapConfig.getMergePolicyConfig().getPolicy(), mapConfig.getMergePolicyConfig().getBatchSize(), mapConfig.getInMemoryFormat().name(), null, null, mapConfig.isStatisticsEnabled(), null, null, null, null, null, null, null, null, null, null, null, null, mapConfig.getMetadataPolicy().getId(), mapConfig.isPerEntryStatsEnabled(), mapConfig.getDataPersistenceConfig(), mapConfig.getTieredStoreConfig(), null, mapConfig.getUserCodeNamespace() ); AddMapConfigMessageTask addMapConfigMessageTask = createMessageTask(addMapConfigClientMessage); addMapConfigMessageTask.run(); MapConfig transmittedMapConfig = (MapConfig) addMapConfigMessageTask.getConfig(); assertEquals(mapConfig, transmittedMapConfig); }
@Override public int launch(AgentLaunchDescriptor descriptor) { LogConfigurator logConfigurator = new LogConfigurator("agent-launcher-logback.xml"); return logConfigurator.runWithLogger(() -> doLaunch(descriptor)); }
@Test public void shouldPassLauncherVersionToAgent() throws IOException { final List<String> actualVersion = new ArrayList<>(); final AgentLauncher launcher = new AgentLauncherImpl((launcherVersion, launcherMd5, urlConstructor, environmentVariables, context) -> { actualVersion.add(launcherVersion); return 0; }); TEST_AGENT_LAUNCHER.copyTo(AGENT_LAUNCHER_JAR); launcher.launch(launchDescriptor()); assertThat(actualVersion.size(), is(1)); assertThat(actualVersion.get(0), is(CurrentGoCDVersion.getInstance().fullVersion())); }
protected String cutMessageIfNeeded(final String message) { return (message.length() > MAX_EXCEPTION_MESSAGE_SIZE)? message.substring(0, MAX_EXCEPTION_MESSAGE_SIZE - 3) + "..." : message; }
@Test public void cutMessageIfNeeded() throws MessageFormatException { char[] message = new char[2056]; Arrays.fill(message, '1'); String cutMessage = (new WireFormatInfoMarshaller()).cutMessageIfNeeded(String.valueOf(message)); assertEquals("Expected length " + MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH, cutMessage.length()); assertTrue("Expected message tail ...", cutMessage.endsWith("...")); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testContinueOnStepFailure8() { // issue #1913 // side issue found - dsl keywords with multi line was not supported fail = true; run( "def var = 'foo'", "configure continueOnStepFailure = { enabled: true, continueAfter: true, keywords: [ 'eval' ] }", "match var == 'foo'", "eval", "\"\"\"", "if(true == true) { syntax error within JS line }", "\"\"\"", "match var == 'foo'", "configure continueOnStepFailure = { enabled: false }", "match var == 'foo'" ); assertEquals("eval", sr.result.getFailedStep().getStep().getText()); }
@Override public MailTemplateDO getMailTemplate(Long id) {return mailTemplateMapper.selectById(id);}
@Test public void testGetMailTemplate() { // mock 数据 MailTemplateDO dbMailTemplate = randomPojo(MailTemplateDO.class); mailTemplateMapper.insert(dbMailTemplate); // 准备参数 Long id = dbMailTemplate.getId(); // 调用 MailTemplateDO mailTemplate = mailTemplateService.getMailTemplate(id); // 断言 assertPojoEquals(dbMailTemplate, mailTemplate); }
public static Map<TopicPartition, Long> fetchCommittedOffsets(final Set<TopicPartition> partitions, final Consumer<byte[], byte[]> consumer) { if (partitions.isEmpty()) { return Collections.emptyMap(); } final Map<TopicPartition, Long> committedOffsets; try { // those which do not have a committed offset would default to 0 committedOffsets = consumer.committed(partitions).entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue() == null ? 0L : e.getValue().offset())); } catch (final TimeoutException timeoutException) { LOG.warn("The committed offsets request timed out, try increasing the consumer client's default.api.timeout.ms", timeoutException); throw timeoutException; } catch (final KafkaException fatal) { LOG.warn("The committed offsets request failed.", fatal); throw new StreamsException(String.format("Failed to retrieve end offsets for %s", partitions), fatal); } return committedOffsets; }
@Test public void fetchCommittedOffsetsShouldReturnEmptyMapIfPartitionsAreEmpty() { @SuppressWarnings("unchecked") final Consumer<byte[], byte[]> consumer = mock(Consumer.class); assertTrue(fetchCommittedOffsets(emptySet(), consumer).isEmpty()); }
public static <T> JAXBCoder<T> of(Class<T> jaxbClass) { return new JAXBCoder<>(jaxbClass); }
@Test public void testEncodeDecodeAfterClone() throws Exception { JAXBCoder<TestType> coder = SerializableUtils.clone(JAXBCoder.of(TestType.class)); byte[] encoded = CoderUtils.encodeToByteArray(coder, new TestType("abc", 9999)); assertEquals(new TestType("abc", 9999), CoderUtils.decodeFromByteArray(coder, encoded)); }
public LogoutRequestModel parseLogoutRequest(HttpServletRequest request) throws SamlValidationException, SamlParseException, SamlSessionException, DienstencatalogusException { final LogoutRequestModel logoutRequestModel = new LogoutRequestModel(); try { final BaseHttpServletRequestXMLMessageDecoder decoder = decodeRequest(request); var logoutRequest = (LogoutRequest) decoder.getMessageContext().getMessage(); final SAMLBindingContext bindingContext = decoder.getMessageContext().getSubcontext(SAMLBindingContext.class); logoutRequestModel.setLogoutRequest(logoutRequest); logoutRequestModel.setRequest(request); validateRequest(logoutRequestModel); var id = logoutRequest.getNameID() != null ? logoutRequest.getNameID().getValue() : logoutRequest.getSessionIndexes().get(0).getValue(); var samlSession = samlSessionRepository.findById(id) .orElseThrow(() -> new SamlSessionException("LogoutRequest no saml session found for nameID: " + id)); logoutRequestModel.setConnectionEntityId(samlSession.getConnectionEntityId()); logoutRequestModel.setServiceEntityId(samlSession.getServiceEntityId()); logoutRequestModel.setServiceUuid(samlSession.getServiceUuid()); logoutRequestModel.setRelayState(bindingContext.getRelayState()); logoutRequestModel.setEntranceSession(samlSession.getProtocolType().equals(ProtocolType.SAML_COMBICONNECT)); dcMetadataService.resolveDcMetadata(logoutRequestModel); if (!logoutRequestModel.getConnectionEntityId().equals(logoutRequestModel.getLogoutRequest().getIssuer().getValue())) { throw new SamlValidationException("Issuer not equal to connectorEntityId"); } verifySignature(logoutRequestModel, logoutRequestModel.getLogoutRequest().getSignature()); logout(samlSession); if (logger.isDebugEnabled()) OpenSAMLUtils.logSAMLObject((LogoutRequest) decoder.getMessageContext().getMessage()); } catch (MessageDecodingException e) { throw new SamlParseException("Authentication deflate decode exception", e); } catch (ComponentInitializationException e) { throw new SamlParseException("Authentication deflate initialization exception", e); } return logoutRequestModel; }
@Test public void parseLogoutRequestNoIssueInstant() { httpRequestMock.setParameter("SAMLRequest", "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"); Exception exception = assertThrows(SamlValidationException.class, () -> logoutService.parseLogoutRequest(httpRequestMock)); assertEquals("LogoutRequest validation error", exception.getMessage()); }
@Override public void stop() { MatchManager.INSTANCE.getMatchedCache().release(); }
@Test public void test() { final FlowControlInitServiceImpl flowControlInitService = new FlowControlInitServiceImpl(); flowControlInitService.start(); flowControlInitService.stop(); }
public WithJsonPath(JsonPath jsonPath, Matcher<T> resultMatcher) { this.jsonPath = jsonPath; this.resultMatcher = resultMatcher; }
@Test public void shouldNotMatchNonExistingJsonPath() { assertThat(BOOKS_JSON, not(withJsonPath(compile("$.not_there")))); assertThat(BOOKS_JSON, not(withJsonPath(compile("$.store.book[5].title")))); assertThat(BOOKS_JSON, not(withJsonPath(compile("$.store.book[1].not_there")))); }
public static String gensalt(int log_rounds, SecureRandom random) { if (log_rounds < MIN_LOG_ROUNDS || log_rounds > MAX_LOG_ROUNDS) { throw new IllegalArgumentException("Bad number of rounds"); } StringBuilder rs = new StringBuilder(); byte rnd[] = new byte[BCRYPT_SALT_LEN]; random.nextBytes(rnd); rs.append("$2a$"); if (log_rounds < 10) { rs.append("0"); } rs.append(log_rounds); rs.append("$"); encode_base64(rnd, rnd.length, rs); return rs.toString(); }
@Test public void testGensaltTooManyRounds() throws IllegalArgumentException { thrown.expect(IllegalArgumentException.class); BCrypt.gensalt(32); }
static void enableDNSCaching(Configuration conf) { if (conf.getBoolean(SLSConfiguration.DNS_CACHING_ENABLED, SLSConfiguration.DNS_CACHING_ENABLED_DEFAULT)) { Security.setProperty(NETWORK_CACHE_TTL, "-1"); Security.setProperty(NETWORK_NEGATIVE_CACHE_TTL, "-1"); } }
@Test public void testEnableCaching() { String networkCacheDefault = Security.getProperty( SLSRunner.NETWORK_CACHE_TTL); String networkNegativeCacheDefault = Security.getProperty(SLSRunner.NETWORK_NEGATIVE_CACHE_TTL); try { Configuration conf = new Configuration(false); // check when dns caching is disabled conf.setBoolean(SLSConfiguration.DNS_CACHING_ENABLED, false); SLSRunner.enableDNSCaching(conf); assertEquals(networkCacheDefault, Security.getProperty(SLSRunner.NETWORK_CACHE_TTL)); assertEquals(networkNegativeCacheDefault, Security.getProperty(SLSRunner.NETWORK_NEGATIVE_CACHE_TTL)); // check when dns caching is enabled conf.setBoolean(SLSConfiguration.DNS_CACHING_ENABLED, true); SLSRunner.enableDNSCaching(conf); assertEquals("-1", Security.getProperty(SLSRunner.NETWORK_CACHE_TTL)); assertEquals("-1", Security.getProperty(SLSRunner.NETWORK_NEGATIVE_CACHE_TTL)); } finally { // set security settings back to default Security.setProperty(SLSRunner.NETWORK_CACHE_TTL, String.valueOf(networkCacheDefault)); Security.setProperty(SLSRunner.NETWORK_NEGATIVE_CACHE_TTL, String.valueOf(networkNegativeCacheDefault)); } }
@Override public void onLeaderInformationChange(String componentId, LeaderInformation leaderInformation) { synchronized (lock) { notifyLeaderInformationChangeInternal( componentId, leaderInformation, confirmedLeaderInformation.forComponentIdOrEmpty(componentId)); } }
@Test void testGrantDoesNotBlockNotifyLeaderInformationChange() throws Exception { testLeaderEventDoesNotBlockLeaderInformationChangeEventHandling( (listener, componentId, storedLeaderInformation) -> { listener.onLeaderInformationChange( componentId, storedLeaderInformation.forComponentIdOrEmpty(componentId)); }); }
@Override public Map<String, String> getAll() { return flags.values().stream().collect(Collectors.toMap(flag -> flag.name, flag -> flag.value)); }
@Test void testOverrideOrder() throws IOException { Map<String, String> defaultProps = Map.of( FEATURE_1, DEFAULT_PROPERTY_VALUE, FEATURE_2, DEFAULT_PROPERTY_VALUE, FEATURE_3, DEFAULT_PROPERTY_VALUE, FEATURE_4, DEFAULT_PROPERTY_VALUE); Map<String, String> customProps = Map.of( FEATURE_2, CUSTOM_PROPERTY_VALUE, FEATURE_3, CUSTOM_PROPERTY_VALUE, FEATURE_4, CUSTOM_PROPERTY_VALUE); Map<String, String> systemProps = Map.of( PREFIX_SYSTEM_PROPERTY + FEATURE_3, SYSTEM_PROPERTY_VALUE, PREFIX_SYSTEM_PROPERTY + FEATURE_4, SYSTEM_PROPERTY_VALUE); Map<String, String> envVars = Map.of( PREFIX_ENVIRONMENT_VARIABLE + FEATURE_4, ENVIRONMENT_VARIABLE_VALUE); FeatureFlags flags = create(defaultProps, customProps, systemProps, envVars); assertThat(flags.getAll()).isEqualTo(Map.of( FEATURE_1, DEFAULT_PROPERTY_VALUE, FEATURE_2, CUSTOM_PROPERTY_VALUE, FEATURE_3, SYSTEM_PROPERTY_VALUE, FEATURE_4, ENVIRONMENT_VARIABLE_VALUE )); }
@Override public void configure(Map<String, ?> configs) { super.configure(configs); configureSamplingInterval(configs); configurePrometheusAdapter(configs); configureQueryMap(configs); }
@Test public void testGetSamplesWithCustomSamplingInterval() throws Exception { Map<String, Object> config = new HashMap<>(); config.put(PROMETHEUS_SERVER_ENDPOINT_CONFIG, "http://kafka-cluster-1.org:9090"); config.put(PROMETHEUS_QUERY_RESOLUTION_STEP_MS_CONFIG, "5000"); addCapacityConfig(config); _prometheusMetricSampler.configure(config); assertEquals(5000, _prometheusMetricSampler._prometheusAdapter.samplingIntervalMs()); }
@Udf(description = "Converts a string representation of a time in the given format" + " into the TIME value.") public Time parseTime( @UdfParameter( description = "The string representation of a time.") final String formattedTime, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (formattedTime == null | formatPattern == null) { return null; } try { final TemporalAccessor ta = formatters.get(formatPattern).parse(formattedTime); final Optional<ChronoField> dateField = Arrays.stream(ChronoField.values()) .filter(ChronoField::isDateBased) .filter(ta::isSupported) .findFirst(); if (dateField.isPresent()) { throw new KsqlFunctionException("Time format contains date field."); } return new Time(TimeUnit.NANOSECONDS.toMillis(LocalTime.from(ta).toNanoOfDay())); } catch (ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to parse time '" + formattedTime + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldHandleNullDate() { // When: final Time result = udf.parseTime(null, "HHmmss"); // Then: assertThat(result, is(nullValue())); }
@Override // TODO(yimin) integrate this method with load() method public void cacheData(String ufsPath, long length, long pos, boolean isAsync) throws IOException { List<CompletableFuture<Void>> futures = new ArrayList<>(); // TODO(yimin) To implement the sync data caching. alluxio.grpc.FileInfo fi = getGrpcFileInfo(ufsPath, -1); String fileId = new AlluxioURI(ufsPath).hash(); for (long i = pos / mPageSize; i <= Math.min(pos + length, fi.getLength()) / mPageSize; ++i) { PageId pageId = new PageId(fileId, i); // TODO(yimin) As an optimization, data does not need to load on a page basis. // Can implement a bulk load mechanism and load a couple of pages at the same time, // to improve the performance. if (mCacheManager.hasPageUnsafe(pageId)) { continue; } long loadPos = i * mPageSize; long loadLength = Math.min(mPageSize, fi.getLength() - loadPos); if (loadLength == 0) { continue; } if (!mLoadingPages.addIfAbsent(pageId)) { continue; } futures.add(CompletableFuture.runAsync(() -> { try { if (mCacheManager.hasPageUnsafe(pageId)) { return; } LOG.debug("Preloading {} pos: {} length: {} started", ufsPath, loadPos, loadLength); loadPages(ufsPath, Collections.singletonList(pageId), fi.getLength()); LOG.debug("Preloading {} pos: {} length: {} finished", ufsPath, loadPos, loadLength); } catch (Exception e) { LOG.info("Preloading failed for {} page: {}", ufsPath, pageId, e); } finally { mLoadingPages.remove(pageId); } }, mCacheDataExecutor)); if (!isAsync) { try { CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get(); } catch (Exception e) { throw new RuntimeException(e); } } } }
@Test public void testCacheDataNotPageAligned() throws Exception { int numPages = 10; long length = mPageSize * numPages - 1; String ufsPath = mTestFolder.newFile("test").getAbsolutePath(); byte[] buffer = BufferUtils.getIncreasingByteArray((int) length); BufferUtils.writeBufferToFile(ufsPath, buffer); mWorker.cacheData(ufsPath, length, 0, false); List<PageId> cachedPages = mCacheManager.getCachedPageIdsByFileId(new AlluxioURI(ufsPath).hash(), length); assertEquals(numPages, cachedPages.size()); int start = 0; for (PageId pageId : cachedPages) { long size = numPages == pageId.getPageIndex() + 1 ? length % mPageSize : mPageSize; byte[] buff = new byte[(int) size]; mCacheManager.get(pageId, (int) size, buff, 0); assertTrue(BufferUtils.equalIncreasingByteArray(start, (int) size, buff)); start += mPageSize; } }
public static Writer createWriter(Configuration conf, Writer.Option... opts ) throws IOException { Writer.CompressionOption compressionOption = Options.getOption(Writer.CompressionOption.class, opts); CompressionType kind; if (compressionOption != null) { kind = compressionOption.getValue(); } else { kind = getDefaultCompressionType(conf); opts = Options.prependOptions(opts, Writer.compression(kind)); } switch (kind) { default: case NONE: return new Writer(conf, opts); case RECORD: return new RecordCompressWriter(conf, opts); case BLOCK: return new BlockCompressWriter(conf, opts); } }
@SuppressWarnings("deprecation") @Test public void testCreateWriterOnExistingFile() throws IOException { Configuration conf = new Configuration(); FileSystem fs = FileSystem.getLocal(conf); Path name = new Path(new Path(GenericTestUtils.getTempPath( "createWriterOnExistingFile")), "file"); fs.create(name); SequenceFile.createWriter(fs, conf, name, RandomDatum.class, RandomDatum.class, 512, (short) 1, 4096, false, CompressionType.NONE, null, new Metadata()); }
public static double sd(int[] array) { return sqrt(var(array)); }
@Test public void testSd() { System.out.println("sd"); double[] data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; assertEquals(2.73861, MathEx.sd(data), 1E-5); }
@Override public Block toBlock(Type desiredType) { checkArgument(BIGINT.equals(desiredType), "type doesn't match: %s", desiredType); int numberOfRecords = numberOfRecords(); return new LongArrayBlock( numberOfRecords, Optional.ofNullable(nulls), longs == null ? new long[numberOfRecords] : longs); }
@Test public void testReadBlockAllNullsOption2() { PrestoThriftBlock columnsData = longColumn( new boolean[] {true, true, true, true, true, true, true}, new long[] {0, 0, 0, 0, 0, 0, 0}); Block actual = columnsData.toBlock(BIGINT); assertBlockEquals(actual, list(null, null, null, null, null, null, null)); }
public void cancel(long jobId) throws JobDoesNotExistException { try (JobMasterAuditContext auditContext = createAuditContext("cancel")) { auditContext.setJobId(jobId); PlanCoordinator planCoordinator = mPlanTracker.getCoordinator(jobId); if (planCoordinator == null) { if (!mWorkflowTracker.cancel(jobId)) { throw new JobDoesNotExistException(jobId); } return; } planCoordinator.cancel(); auditContext.setSucceeded(true); } }
@Test public void cancelNonExistingJob() { try { mJobMaster.cancel(1); Assert.fail("cannot cancel non-existing job"); } catch (JobDoesNotExistException e) { Assert.assertEquals(ExceptionMessage.JOB_DOES_NOT_EXIST.getMessage(1), e.getMessage()); } }
@Udf public List<Long> generateSeriesLong( @UdfParameter(description = "The beginning of the series") final long start, @UdfParameter(description = "Marks the end of the series (inclusive)") final long end ) { return generateSeriesLong(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldComputeLongRange() { final List<Long> range = rangeUdf.generateSeriesLong(0, 9); assertThat(range, hasSize(10)); long val = 0; for (final Long i : range) { assertThat(val++, is(i)); } }
public static Collection<ExecutionUnit> build(final ShardingSphereDatabase database, final SQLRewriteResult sqlRewriteResult, final SQLStatementContext sqlStatementContext) { return sqlRewriteResult instanceof GenericSQLRewriteResult ? build(database, (GenericSQLRewriteResult) sqlRewriteResult, sqlStatementContext) : build((RouteSQLRewriteResult) sqlRewriteResult); }
@Test void assertBuildGenericSQLRewriteResult() { String sql = "sql"; GenericSQLRewriteResult genericSQLRewriteResult = new GenericSQLRewriteResult(new SQLRewriteUnit(sql, Collections.singletonList("foo_param"))); ResourceMetaData resourceMetaData = mock(ResourceMetaData.class); String firstDataSourceName = "firstDataSourceName"; when(resourceMetaData.getAllInstanceDataSourceNames()).thenReturn(Arrays.asList(firstDataSourceName, "lastDataSourceName")); RuleMetaData ruleMetaData = new RuleMetaData(Collections.emptyList()); ShardingSphereDatabase database = new ShardingSphereDatabase(DefaultDatabase.LOGIC_NAME, mock(DatabaseType.class), resourceMetaData, ruleMetaData, buildDatabase()); Collection<ExecutionUnit> actual = ExecutionContextBuilder.build(database, genericSQLRewriteResult, mock(SQLStatementContext.class)); Collection<ExecutionUnit> expected = Collections.singletonList(new ExecutionUnit(firstDataSourceName, new SQLUnit(sql, Collections.singletonList("foo_param")))); assertThat(actual, is(expected)); }
public <InputT, OutputT, CollectionT extends PCollection<? extends InputT>> DataSet<OutputT> applyBeamPTransform( DataSet<InputT> input, PTransform<CollectionT, PCollection<OutputT>> transform) { return (DataSet) getNonNull( applyBeamPTransformInternal( ImmutableMap.of("input", input), (pipeline, map) -> (CollectionT) getNonNull(map, "input"), (output) -> ImmutableMap.of("output", output), transform, input.getExecutionEnvironment()), "output"); }
@Test public void testApplyCompositeTransform() throws Exception { ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment(); DataSet<String> input = env.fromCollection(ImmutableList.of("a", "b", "c")); DataSet<String> result = new BeamFlinkDataSetAdapter() .applyBeamPTransform( input, new PTransform<PCollection<String>, PCollection<String>>() { @Override public PCollection<String> expand(PCollection<String> input) { return input.apply(withPrefix("x")).apply(withPrefix("y")); } }); assertThat(result.collect(), containsInAnyOrder("yxa", "yxb", "yxc")); }
@Bean public RetryRegistry retryRegistry(RetryConfigurationProperties retryConfigurationProperties, EventConsumerRegistry<RetryEvent> retryEventConsumerRegistry, RegistryEventConsumer<Retry> retryRegistryEventConsumer, @Qualifier("compositeRetryCustomizer") CompositeCustomizer<RetryConfigCustomizer> compositeRetryCustomizer) { RetryRegistry retryRegistry = createRetryRegistry(retryConfigurationProperties, retryRegistryEventConsumer, compositeRetryCustomizer); registerEventConsumer(retryRegistry, retryEventConsumerRegistry, retryConfigurationProperties); initRetryRegistry(retryConfigurationProperties, compositeRetryCustomizer, retryRegistry); return retryRegistry; }
@Test public void testCreateRetryRegistryWithSharedConfigs() { InstanceProperties defaultProperties = new InstanceProperties(); defaultProperties.setMaxAttempts(3); defaultProperties.setWaitDuration(Duration.ofMillis(100L)); InstanceProperties sharedProperties = new InstanceProperties(); sharedProperties.setMaxAttempts(2); sharedProperties.setWaitDuration(Duration.ofMillis(100L)); InstanceProperties backendWithDefaultConfig = new InstanceProperties(); backendWithDefaultConfig.setBaseConfig("default"); backendWithDefaultConfig.setWaitDuration(Duration.ofMillis(200L)); InstanceProperties backendWithSharedConfig = new InstanceProperties(); backendWithSharedConfig.setBaseConfig("sharedConfig"); backendWithSharedConfig.setWaitDuration(Duration.ofMillis(300L)); RetryConfigurationProperties retryConfigurationProperties = new RetryConfigurationProperties(); retryConfigurationProperties.getConfigs().put("default", defaultProperties); retryConfigurationProperties.getConfigs().put("sharedConfig", sharedProperties); retryConfigurationProperties.getInstances() .put("backendWithDefaultConfig", backendWithDefaultConfig); retryConfigurationProperties.getInstances() .put("backendWithSharedConfig", backendWithSharedConfig); RetryConfiguration retryConfiguration = new RetryConfiguration(); DefaultEventConsumerRegistry<RetryEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); RetryRegistry retryRegistry = retryConfiguration .retryRegistry(retryConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeRetryCustomizerTest()); assertThat(retryRegistry.getAllRetries().size()).isEqualTo(2); // Should get default config and overwrite max attempt and wait time Retry retry1 = retryRegistry.retry("backendWithDefaultConfig"); assertThat(retry1).isNotNull(); assertThat(retry1.getRetryConfig().getMaxAttempts()).isEqualTo(3); assertThat(retry1.getRetryConfig().getIntervalBiFunction().apply(1, null)).isEqualTo(200L); // Should get shared config and overwrite wait time Retry retry2 = retryRegistry.retry("backendWithSharedConfig"); assertThat(retry2).isNotNull(); assertThat(retry2.getRetryConfig().getMaxAttempts()).isEqualTo(2); assertThat(retry2.getRetryConfig().getIntervalBiFunction().apply(1, null)).isEqualTo(300L); // Unknown backend should get default config of Registry Retry retry3 = retryRegistry.retry("unknownBackend"); assertThat(retry3).isNotNull(); assertThat(retry3.getRetryConfig().getMaxAttempts()).isEqualTo(3); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(3); }
@Override public boolean delete() throws FileSystemException { return requireResolvedFileObject().delete(); }
@Test public void testDelegatesDeleteWithFileSelector() throws FileSystemException { FileSelector fileSelector = mock( FileSelector.class ); fileObject.delete( fileSelector ); verify( resolvedFileObject, times( 1 ) ).delete( fileSelector ); }
public boolean hasProjectSubscribersForTypes(String projectUuid, Set<Class<? extends Notification>> notificationTypes) { Set<String> dispatcherKeys = handlers.stream() .filter(handler -> notificationTypes.stream().anyMatch(notificationType -> handler.getNotificationClass() == notificationType)) .map(NotificationHandler::getMetadata) .filter(Optional::isPresent) .map(Optional::get) .map(NotificationDispatcherMetadata::getDispatcherKey) .collect(Collectors.toSet()); return dbClient.propertiesDao().hasProjectNotificationSubscribersForDispatchers(projectUuid, dispatcherKeys); }
@Test public void hasProjectSubscribersForType_checks_property_for_each_dispatcher_key_supporting_Notification_type() { String dispatcherKey1A = randomAlphabetic(5); String dispatcherKey1B = randomAlphabetic(6); String projectUuid = randomAlphabetic(7); NotificationHandler<Notification1> handler1A = getMockOfNotificationHandlerForType(Notification1.class); when(handler1A.getMetadata()).thenReturn(Optional.of(NotificationDispatcherMetadata.create(dispatcherKey1A))); NotificationHandler<Notification1> handler1B = getMockOfNotificationHandlerForType(Notification1.class); when(handler1B.getMetadata()).thenReturn(Optional.of(NotificationDispatcherMetadata.create(dispatcherKey1B))); NotificationHandler<Notification2> handler2 = getMockOfNotificationHandlerForType(Notification2.class); when(handler2.getMetadata()).thenReturn(Optional.empty()); boolean expected = true; when(propertiesDao.hasProjectNotificationSubscribersForDispatchers(projectUuid, ImmutableSet.of(dispatcherKey1A, dispatcherKey1B))) .thenReturn(expected); NotificationService underTest = new NotificationService(dbClient, new NotificationHandler[]{handler1A, handler1B, handler2}); boolean flag = underTest.hasProjectSubscribersForTypes(projectUuid, ImmutableSet.of(Notification1.class)); verify(propertiesDao).hasProjectNotificationSubscribersForDispatchers(projectUuid, ImmutableSet.of(dispatcherKey1A, dispatcherKey1B)); verifyNoMoreInteractions(propertiesDao); assertThat(flag).isEqualTo(expected); flag = underTest.hasProjectSubscribersForTypes(projectUuid, ImmutableSet.of(Notification1.class, Notification2.class)); verify(propertiesDao, times(2)).hasProjectNotificationSubscribersForDispatchers(projectUuid, ImmutableSet.of(dispatcherKey1A, dispatcherKey1B)); verifyNoMoreInteractions(propertiesDao); assertThat(flag).isEqualTo(expected); }
public static String[] concat(String[] array1, String[] array2) { if (array1.length == 0) { return array2; } if (array2.length == 0) { return array1; } String[] resultArray = new String[array1.length + array2.length]; System.arraycopy(array1, 0, resultArray, 0, array1.length); System.arraycopy(array2, 0, resultArray, array1.length, array2.length); return resultArray; }
@Test void concatWithEmptyArray() { String[] emptyArray = new String[] {}; String[] nonEmptyArray = new String[] {"some value"}; assertThat(ArrayUtils.concat(emptyArray, nonEmptyArray)).isSameAs(nonEmptyArray); assertThat(ArrayUtils.concat(nonEmptyArray, emptyArray)).isSameAs(nonEmptyArray); }
public void deleteObservers() { obs.clear(); }
@Test void testDeleteObservers() { observable.addObserver(observer); observable.deleteObservers(); assertEquals(1, observable.countObservers()); }
public void verifyComplete() { if (path == null) throw new IllegalStateException("HTTP requests must have a path set. Use '/' for top level"); if (httpOperation == null) throw new IllegalStateException("HTTP requests must have an HTTP method defined"); }
@Test void testVerifyComplete() { // To be a complete request, an HTTP request must include: // - A path // - The HTTP operation type try { new HttpRequest().setPath("/foo").verifyComplete(); fail(); } catch (IllegalStateException e) { } try { new HttpRequest().setHttpOperation(HttpRequest.HttpOp.GET).verifyComplete(); fail(); } catch (IllegalStateException e) { } new HttpRequest().setHttpOperation(HttpRequest.HttpOp.GET).setPath("/bar").verifyComplete(); }
public <T> T parse(String input, Class<T> cls) { return readFlow(input, cls, type(cls)); }
@Test void validation() { assertThrows(ConstraintViolationException.class, () -> { modelValidator.validate(this.parse("flows/invalids/invalid.yaml")); }); try { this.parse("flows/invalids/invalid.yaml"); } catch (ConstraintViolationException e) { assertThat(e.getConstraintViolations().size(), is(4)); } }
static void validateDependencies(Set<Artifact> dependencies, Set<String> allowedRules, boolean failOnUnmatched) throws EnforcerRuleException { SortedSet<Artifact> unmatchedArtifacts = new TreeSet<>(); Set<String> matchedRules = new HashSet<>(); for (Artifact dependency : dependencies) { boolean matches = false; for (String rule : allowedRules) { if (matches(dependency, rule)){ matchedRules.add(rule); matches = true; break; } } if (!matches) { unmatchedArtifacts.add(dependency); } } SortedSet<String> unmatchedRules = new TreeSet<>(allowedRules); unmatchedRules.removeAll(matchedRules); if (!unmatchedArtifacts.isEmpty() || (failOnUnmatched && !unmatchedRules.isEmpty())) { StringBuilder errorMessage = new StringBuilder("Vespa dependency enforcer failed:\n"); if (!unmatchedArtifacts.isEmpty()) { errorMessage.append("Dependencies not matching any rule:\n"); unmatchedArtifacts.forEach(a -> errorMessage.append(" - ").append(a.toString()).append('\n')); } if (failOnUnmatched && !unmatchedRules.isEmpty()) { errorMessage.append("Rules not matching any dependency:\n"); unmatchedRules.forEach(p -> errorMessage.append(" - ").append(p).append('\n')); } throw new EnforcerRuleException(errorMessage.toString()); } }
@Test void fails_on_version_mismatch() { Set<Artifact> dependencies = Set.of( artifact("com.yahoo.vespa", "testutils", "8.0.0", "test")); Set<String> rules = Set.of( "com.yahoo.vespa:testutils:jar:7.0.0:test"); EnforcerRuleException exception = assertThrows( EnforcerRuleException.class, () -> EnforceDependencies.validateDependencies(dependencies, rules, true)); String expectedErrorMessage = """ Vespa dependency enforcer failed: Dependencies not matching any rule: - com.yahoo.vespa:testutils:jar:8.0.0:test Rules not matching any dependency: - com.yahoo.vespa:testutils:jar:7.0.0:test """; assertEquals(expectedErrorMessage, exception.getMessage()); }
@Override public void checkCanCreateTable(ConnectorTransactionHandle transaction, ConnectorIdentity identity, AccessControlContext context, SchemaTableName tableName) { if (!isDatabaseOwner(identity, tableName.getSchemaName())) { denyCreateTable(tableName.toString()); } }
@Test public void testSchemaRules() throws IOException { ConnectorAccessControl accessControl = createAccessControl("schema.json"); accessControl.checkCanCreateTable(TRANSACTION_HANDLE, user("admin"), CONTEXT, new SchemaTableName("test", "test")); accessControl.checkCanCreateTable(TRANSACTION_HANDLE, user("bob"), CONTEXT, new SchemaTableName("bob", "test")); assertDenied(() -> accessControl.checkCanCreateTable(TRANSACTION_HANDLE, user("bob"), CONTEXT, new SchemaTableName("test", "test"))); assertDenied(() -> accessControl.checkCanCreateTable(TRANSACTION_HANDLE, user("admin"), CONTEXT, new SchemaTableName("secret", "test"))); }
public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) throws JournalInconsistentException { short opCode = journal.getOpCode(); if (opCode != OperationType.OP_SAVE_NEXTID && opCode != OperationType.OP_TIMESTAMP && opCode != OperationType.OP_TIMESTAMP_V2) { LOG.debug("replay journal op code: {}", opCode); } try { switch (opCode) { case OperationType.OP_SAVE_NEXTID: { String idString = journal.getData().toString(); long id = Long.parseLong(idString); globalStateMgr.setNextId(id + 1); break; } case OperationType.OP_SAVE_TRANSACTION_ID_V2: { TransactionIdInfo idInfo = (TransactionIdInfo) journal.getData(); GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().getTransactionIDGenerator() .initTransactionId(idInfo.getTxnId() + 1); break; } case OperationType.OP_SAVE_AUTO_INCREMENT_ID: case OperationType.OP_DELETE_AUTO_INCREMENT_ID: { AutoIncrementInfo info = (AutoIncrementInfo) journal.getData(); LocalMetastore metastore = globalStateMgr.getLocalMetastore(); if (opCode == OperationType.OP_SAVE_AUTO_INCREMENT_ID) { metastore.replayAutoIncrementId(info); } else if (opCode == OperationType.OP_DELETE_AUTO_INCREMENT_ID) { metastore.replayDeleteAutoIncrementId(info); } break; } case OperationType.OP_CREATE_DB_V2: { CreateDbInfo db = (CreateDbInfo) journal.getData(); LocalMetastore metastore = (LocalMetastore) globalStateMgr.getMetadata(); metastore.replayCreateDb(db); break; } case OperationType.OP_DROP_DB: { DropDbInfo dropDbInfo = (DropDbInfo) journal.getData(); LocalMetastore metastore = (LocalMetastore) globalStateMgr.getMetadata(); metastore.replayDropDb(dropDbInfo.getDbName(), dropDbInfo.isForceDrop()); break; } case OperationType.OP_ALTER_DB: case OperationType.OP_ALTER_DB_V2: { DatabaseInfo dbInfo = (DatabaseInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayAlterDatabaseQuota(dbInfo); break; } case OperationType.OP_ERASE_DB: { Text dbId = (Text) journal.getData(); globalStateMgr.getLocalMetastore().replayEraseDatabase(Long.parseLong(dbId.toString())); break; } case OperationType.OP_RECOVER_DB: case OperationType.OP_RECOVER_DB_V2: { RecoverInfo info = (RecoverInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRecoverDatabase(info); break; } case OperationType.OP_RENAME_DB: case OperationType.OP_RENAME_DB_V2: { DatabaseInfo dbInfo = (DatabaseInfo) journal.getData(); String dbName = dbInfo.getDbName(); LOG.info("Begin to unprotect rename db {}", dbName); globalStateMgr.getLocalMetastore().replayRenameDatabase(dbName, dbInfo.getNewDbName()); break; } case OperationType.OP_CREATE_TABLE_V2: { CreateTableInfo info = (CreateTableInfo) journal.getData(); if (info.getTable().isMaterializedView()) { LOG.info("Begin to unprotect create materialized view. db = " + info.getDbName() + " create materialized view = " + info.getTable().getId() + " tableName = " + info.getTable().getName()); } else { LOG.info("Begin to unprotect create table. db = " + info.getDbName() + " table = " + info.getTable().getId()); } globalStateMgr.getLocalMetastore().replayCreateTable(info); break; } case OperationType.OP_DROP_TABLE: case OperationType.OP_DROP_TABLE_V2: { DropInfo info = (DropInfo) journal.getData(); Database db = globalStateMgr.getDb(info.getDbId()); if (db == null) { LOG.warn("failed to get db[{}]", info.getDbId()); break; } LOG.info("Begin to unprotect drop table. db = " + db.getOriginName() + " table = " + info.getTableId()); globalStateMgr.getLocalMetastore().replayDropTable(db, info.getTableId(), info.isForceDrop()); break; } case OperationType.OP_ADD_PARTITION_V2: { PartitionPersistInfoV2 info = (PartitionPersistInfoV2) journal.getData(); LOG.info("Begin to unprotect add partition. db = " + info.getDbId() + " table = " + info.getTableId() + " partitionName = " + info.getPartition().getName()); globalStateMgr.getLocalMetastore().replayAddPartition(info); break; } case OperationType.OP_ADD_PARTITIONS_V2: { AddPartitionsInfoV2 infos = (AddPartitionsInfoV2) journal.getData(); for (PartitionPersistInfoV2 info : infos.getAddPartitionInfos()) { globalStateMgr.getLocalMetastore().replayAddPartition(info); } break; } case OperationType.OP_ADD_SUB_PARTITIONS_V2: { AddSubPartitionsInfoV2 infos = (AddSubPartitionsInfoV2) journal.getData(); for (PhysicalPartitionPersistInfoV2 info : infos.getAddSubPartitionInfos()) { globalStateMgr.getLocalMetastore().replayAddSubPartition(info); } break; } case OperationType.OP_DROP_PARTITION: { DropPartitionInfo info = (DropPartitionInfo) journal.getData(); LOG.info("Begin to unprotect drop partition. db = " + info.getDbId() + " table = " + info.getTableId() + " partitionName = " + info.getPartitionName()); globalStateMgr.getLocalMetastore().replayDropPartition(info); break; } case OperationType.OP_DROP_PARTITIONS: { DropPartitionsInfo info = (DropPartitionsInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayDropPartitions(info); break; } case OperationType.OP_MODIFY_PARTITION: case OperationType.OP_MODIFY_PARTITION_V2: { ModifyPartitionInfo info = (ModifyPartitionInfo) journal.getData(); LOG.info("Begin to unprotect modify partition. db = " + info.getDbId() + " table = " + info.getTableId() + " partitionId = " + info.getPartitionId()); globalStateMgr.getAlterJobMgr().replayModifyPartition(info); break; } case OperationType.OP_BATCH_MODIFY_PARTITION: { BatchModifyPartitionsInfo info = (BatchModifyPartitionsInfo) journal.getData(); for (ModifyPartitionInfo modifyPartitionInfo : info.getModifyPartitionInfos()) { globalStateMgr.getAlterJobMgr().replayModifyPartition(modifyPartitionInfo); } break; } case OperationType.OP_ERASE_TABLE: { Text tableId = (Text) journal.getData(); globalStateMgr.getLocalMetastore().replayEraseTable(Long.parseLong(tableId.toString())); break; } case OperationType.OP_ERASE_MULTI_TABLES: { MultiEraseTableInfo multiEraseTableInfo = (MultiEraseTableInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayEraseMultiTables(multiEraseTableInfo); break; } case OperationType.OP_DISABLE_TABLE_RECOVERY: { DisableTableRecoveryInfo disableTableRecoveryInfo = (DisableTableRecoveryInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayDisableTableRecovery(disableTableRecoveryInfo); break; } case OperationType.OP_DISABLE_PARTITION_RECOVERY: { DisablePartitionRecoveryInfo disableRecoveryInfo = (DisablePartitionRecoveryInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayDisablePartitionRecovery(disableRecoveryInfo); break; } case OperationType.OP_ERASE_PARTITION: { Text partitionId = (Text) journal.getData(); globalStateMgr.getLocalMetastore().replayErasePartition(Long.parseLong(partitionId.toString())); break; } case OperationType.OP_RECOVER_TABLE: case OperationType.OP_RECOVER_TABLE_V2: { RecoverInfo info = (RecoverInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRecoverTable(info); break; } case OperationType.OP_RECOVER_PARTITION: case OperationType.OP_RECOVER_PARTITION_V2: { RecoverInfo info = (RecoverInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRecoverPartition(info); break; } case OperationType.OP_RENAME_TABLE: case OperationType.OP_RENAME_TABLE_V2: { TableInfo info = (TableInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRenameTable(info); break; } case OperationType.OP_CHANGE_MATERIALIZED_VIEW_REFRESH_SCHEME: { ChangeMaterializedViewRefreshSchemeLog log = (ChangeMaterializedViewRefreshSchemeLog) journal.getData(); globalStateMgr.getAlterJobMgr().replayChangeMaterializedViewRefreshScheme(log); break; } case OperationType.OP_ALTER_MATERIALIZED_VIEW_PROPERTIES: { ModifyTablePropertyOperationLog log = (ModifyTablePropertyOperationLog) journal.getData(); globalStateMgr.getAlterJobMgr().replayAlterMaterializedViewProperties(opCode, log); break; } case OperationType.OP_ALTER_MATERIALIZED_VIEW_STATUS: { AlterMaterializedViewStatusLog log = (AlterMaterializedViewStatusLog) journal.getData(); globalStateMgr.getAlterJobMgr().replayAlterMaterializedViewStatus(log); break; } case OperationType.OP_ALTER_MATERIALIZED_VIEW_BASE_TABLE_INFOS: { AlterMaterializedViewBaseTableInfosLog log = (AlterMaterializedViewBaseTableInfosLog) journal.getData(); globalStateMgr.getAlterJobMgr().replayAlterMaterializedViewBaseTableInfos(log); break; } case OperationType.OP_RENAME_MATERIALIZED_VIEW: { RenameMaterializedViewLog log = (RenameMaterializedViewLog) journal.getData(); globalStateMgr.getAlterJobMgr().replayRenameMaterializedView(log); break; } case OperationType.OP_MODIFY_VIEW_DEF: { AlterViewInfo info = (AlterViewInfo) journal.getData(); globalStateMgr.getAlterJobMgr().alterView(info); break; } case OperationType.OP_RENAME_PARTITION: case OperationType.OP_RENAME_PARTITION_V2: { TableInfo info = (TableInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRenamePartition(info); break; } case OperationType.OP_RENAME_COLUMN_V2: { ColumnRenameInfo info = (ColumnRenameInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRenameColumn(info); break; } case OperationType.OP_BACKUP_JOB: case OperationType.OP_BACKUP_JOB_V2: { BackupJob job = (BackupJob) journal.getData(); globalStateMgr.getBackupHandler().replayAddJob(job); break; } case OperationType.OP_RESTORE_JOB: case OperationType.OP_RESTORE_JOB_V2: { RestoreJob job = (RestoreJob) journal.getData(); job.setGlobalStateMgr(globalStateMgr); globalStateMgr.getBackupHandler().replayAddJob(job); break; } case OperationType.OP_DROP_ROLLUP: case OperationType.OP_DROP_ROLLUP_V2: { DropInfo info = (DropInfo) journal.getData(); globalStateMgr.getRollupHandler().replayDropRollup(info, globalStateMgr); break; } case OperationType.OP_BATCH_DROP_ROLLUP: { BatchDropInfo batchDropInfo = (BatchDropInfo) journal.getData(); for (long indexId : batchDropInfo.getIndexIdSet()) { globalStateMgr.getRollupHandler().replayDropRollup( new DropInfo(batchDropInfo.getDbId(), batchDropInfo.getTableId(), indexId, false), globalStateMgr); } break; } case OperationType.OP_FINISH_CONSISTENCY_CHECK: case OperationType.OP_FINISH_CONSISTENCY_CHECK_V2: { ConsistencyCheckInfo info = (ConsistencyCheckInfo) journal.getData(); globalStateMgr.getConsistencyChecker().replayFinishConsistencyCheck(info, globalStateMgr); break; } case OperationType.OP_CLEAR_ROLLUP_INFO: { // Nothing to do break; } case OperationType.OP_RENAME_ROLLUP: case OperationType.OP_RENAME_ROLLUP_V2: { TableInfo info = (TableInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRenameRollup(info); break; } case OperationType.OP_EXPORT_CREATE: case OperationType.OP_EXPORT_CREATE_V2: { ExportJob job = (ExportJob) journal.getData(); ExportMgr exportMgr = globalStateMgr.getExportMgr(); exportMgr.replayCreateExportJob(job); break; } case OperationType.OP_EXPORT_UPDATE_STATE: ExportJob.StateTransfer op = (ExportJob.StateTransfer) journal.getData(); ExportMgr exportMgr = globalStateMgr.getExportMgr(); exportMgr.replayUpdateJobState(op.getJobId(), op.getState()); break; case OperationType.OP_EXPORT_UPDATE_INFO_V2: case OperationType.OP_EXPORT_UPDATE_INFO: ExportJob.ExportUpdateInfo exportUpdateInfo = (ExportJob.ExportUpdateInfo) journal.getData(); globalStateMgr.getExportMgr().replayUpdateJobInfo(exportUpdateInfo); break; case OperationType.OP_FINISH_DELETE: { DeleteInfo info = (DeleteInfo) journal.getData(); DeleteMgr deleteHandler = globalStateMgr.getDeleteMgr(); deleteHandler.replayDelete(info, globalStateMgr); break; } case OperationType.OP_FINISH_MULTI_DELETE: { MultiDeleteInfo info = (MultiDeleteInfo) journal.getData(); DeleteMgr deleteHandler = globalStateMgr.getDeleteMgr(); deleteHandler.replayMultiDelete(info, globalStateMgr); break; } case OperationType.OP_ADD_REPLICA: case OperationType.OP_ADD_REPLICA_V2: { ReplicaPersistInfo info = (ReplicaPersistInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayAddReplica(info); break; } case OperationType.OP_UPDATE_REPLICA: case OperationType.OP_UPDATE_REPLICA_V2: { ReplicaPersistInfo info = (ReplicaPersistInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayUpdateReplica(info); break; } case OperationType.OP_DELETE_REPLICA: case OperationType.OP_DELETE_REPLICA_V2: { ReplicaPersistInfo info = (ReplicaPersistInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayDeleteReplica(info); break; } case OperationType.OP_BATCH_DELETE_REPLICA: { BatchDeleteReplicaInfo info = (BatchDeleteReplicaInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayBatchDeleteReplica(info); break; } case OperationType.OP_ADD_COMPUTE_NODE: { ComputeNode computeNode = (ComputeNode) journal.getData(); GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().replayAddComputeNode(computeNode); break; } case OperationType.OP_DROP_COMPUTE_NODE: { DropComputeNodeLog dropComputeNodeLog = (DropComputeNodeLog) journal.getData(); GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo() .replayDropComputeNode(dropComputeNodeLog.getComputeNodeId()); break; } case OperationType.OP_ADD_BACKEND: case OperationType.OP_ADD_BACKEND_V2: { Backend be = (Backend) journal.getData(); GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().replayAddBackend(be); break; } case OperationType.OP_DROP_BACKEND: case OperationType.OP_DROP_BACKEND_V2: { Backend be = (Backend) journal.getData(); GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().replayDropBackend(be); break; } case OperationType.OP_BACKEND_STATE_CHANGE: case OperationType.OP_BACKEND_STATE_CHANGE_V2: { Backend be = (Backend) journal.getData(); GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().updateInMemoryStateBackend(be); break; } case OperationType.OP_ADD_FIRST_FRONTEND: case OperationType.OP_ADD_FIRST_FRONTEND_V2: case OperationType.OP_ADD_FRONTEND: case OperationType.OP_ADD_FRONTEND_V2: { Frontend fe = (Frontend) journal.getData(); globalStateMgr.getNodeMgr().replayAddFrontend(fe); break; } case OperationType.OP_REMOVE_FRONTEND: case OperationType.OP_REMOVE_FRONTEND_V2: { Frontend fe = (Frontend) journal.getData(); globalStateMgr.getNodeMgr().replayDropFrontend(fe); if (fe.getNodeName().equals(GlobalStateMgr.getCurrentState().getNodeMgr().getNodeName())) { throw new JournalInconsistentException("current fe " + fe + " is removed. will exit"); } break; } case OperationType.OP_UPDATE_FRONTEND: case OperationType.OP_UPDATE_FRONTEND_V2: { Frontend fe = (Frontend) journal.getData(); globalStateMgr.getNodeMgr().replayUpdateFrontend(fe); break; } case OperationType.OP_TIMESTAMP: case OperationType.OP_TIMESTAMP_V2: { Timestamp stamp = (Timestamp) journal.getData(); globalStateMgr.setSynchronizedTime(stamp.getTimestamp()); break; } case OperationType.OP_LEADER_INFO_CHANGE: case OperationType.OP_LEADER_INFO_CHANGE_V2: { LeaderInfo info = (LeaderInfo) journal.getData(); globalStateMgr.setLeader(info); break; } //compatible with old community meta, newly added log using OP_META_VERSION_V2 case OperationType.OP_META_VERSION: { break; } case OperationType.OP_META_VERSION_V2: { MetaVersion metaVersion = (MetaVersion) journal.getData(); if (!MetaVersion.isCompatible(metaVersion.getStarRocksVersion(), FeConstants.STARROCKS_META_VERSION)) { throw new JournalInconsistentException("Not compatible with meta version " + metaVersion.getStarRocksVersion() + ", current version is " + FeConstants.STARROCKS_META_VERSION); } MetaContext.get().setStarRocksMetaVersion(metaVersion.getStarRocksVersion()); break; } case OperationType.OP_CREATE_CLUSTER: { // ignore break; } case OperationType.OP_ADD_BROKER: case OperationType.OP_ADD_BROKER_V2: { final BrokerMgr.ModifyBrokerInfo param = (BrokerMgr.ModifyBrokerInfo) journal.getData(); globalStateMgr.getBrokerMgr().replayAddBrokers(param.brokerName, param.brokerAddresses); break; } case OperationType.OP_DROP_BROKER: case OperationType.OP_DROP_BROKER_V2: { final BrokerMgr.ModifyBrokerInfo param = (BrokerMgr.ModifyBrokerInfo) journal.getData(); globalStateMgr.getBrokerMgr().replayDropBrokers(param.brokerName, param.brokerAddresses); break; } case OperationType.OP_DROP_ALL_BROKER: { final String param = journal.getData().toString(); globalStateMgr.getBrokerMgr().replayDropAllBroker(param); break; } case OperationType.OP_SET_LOAD_ERROR_HUB: { final LoadErrorHub.Param param = (LoadErrorHub.Param) journal.getData(); globalStateMgr.getLoadInstance().setLoadErrorHubInfo(param); break; } case OperationType.OP_UPDATE_CLUSTER_AND_BACKENDS: { final BackendIdsUpdateInfo info = (BackendIdsUpdateInfo) journal.getData(); globalStateMgr.replayUpdateClusterAndBackends(info); break; } case OperationType.OP_UPSERT_TRANSACTION_STATE_V2: { final TransactionState state = (TransactionState) journal.getData(); GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().replayUpsertTransactionState(state); LOG.debug("opcode: {}, tid: {}", opCode, state.getTransactionId()); break; } case OperationType.OP_UPSERT_TRANSACTION_STATE_BATCH: { final TransactionStateBatch stateBatch = (TransactionStateBatch) journal.getData(); GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().replayUpsertTransactionStateBatch(stateBatch); LOG.debug("opcode: {}, txn ids: {}", opCode, stateBatch.getTxnIds()); break; } case OperationType.OP_CREATE_REPOSITORY: case OperationType.OP_CREATE_REPOSITORY_V2: { Repository repository = (Repository) journal.getData(); globalStateMgr.getBackupHandler().getRepoMgr().addAndInitRepoIfNotExist(repository, true); break; } case OperationType.OP_DROP_REPOSITORY: { String repoName = ((Text) journal.getData()).toString(); globalStateMgr.getBackupHandler().getRepoMgr().removeRepo(repoName, true); break; } case OperationType.OP_TRUNCATE_TABLE: { TruncateTableInfo info = (TruncateTableInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayTruncateTable(info); break; } case OperationType.OP_COLOCATE_ADD_TABLE: case OperationType.OP_COLOCATE_ADD_TABLE_V2: { final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayAddTableToGroup(info); break; } case OperationType.OP_COLOCATE_REMOVE_TABLE: { final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayRemoveTable(info); break; } case OperationType.OP_COLOCATE_BACKENDS_PER_BUCKETSEQ: case OperationType.OP_COLOCATE_BACKENDS_PER_BUCKETSEQ_V2: { final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayAddBackendsPerBucketSeq(info); break; } case OperationType.OP_COLOCATE_MARK_UNSTABLE: case OperationType.OP_COLOCATE_MARK_UNSTABLE_V2: { final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayMarkGroupUnstable(info); break; } case OperationType.OP_COLOCATE_MARK_STABLE: case OperationType.OP_COLOCATE_MARK_STABLE_V2: { final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayMarkGroupStable(info); break; } case OperationType.OP_MODIFY_TABLE_COLOCATE: case OperationType.OP_MODIFY_TABLE_COLOCATE_V2: { final TablePropertyInfo info = (TablePropertyInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayModifyTableColocate(info); break; } case OperationType.OP_HEARTBEAT_V2: case OperationType.OP_HEARTBEAT: { final HbPackage hbPackage = (HbPackage) journal.getData(); GlobalStateMgr.getCurrentState().getHeartbeatMgr().replayHearbeat(hbPackage); break; } case OperationType.OP_ADD_FUNCTION: case OperationType.OP_ADD_FUNCTION_V2: { final Function function = (Function) journal.getData(); if (function.getFunctionName().isGlobalFunction()) { GlobalStateMgr.getCurrentState().getGlobalFunctionMgr().replayAddFunction(function); } else { Database.replayCreateFunctionLog(function); } break; } case OperationType.OP_DROP_FUNCTION: case OperationType.OP_DROP_FUNCTION_V2: { FunctionSearchDesc function = (FunctionSearchDesc) journal.getData(); if (function.getName().isGlobalFunction()) { GlobalStateMgr.getCurrentState().getGlobalFunctionMgr().replayDropFunction(function); } else { Database.replayDropFunctionLog(function); } break; } case OperationType.OP_BACKEND_TABLETS_INFO: case OperationType.OP_BACKEND_TABLETS_INFO_V2: { BackendTabletsInfo backendTabletsInfo = (BackendTabletsInfo) journal.getData(); GlobalStateMgr.getCurrentState().getLocalMetastore().replayBackendTabletsInfo(backendTabletsInfo); break; } case OperationType.OP_CREATE_ROUTINE_LOAD_JOB_V2: case OperationType.OP_CREATE_ROUTINE_LOAD_JOB: { RoutineLoadJob routineLoadJob = (RoutineLoadJob) journal.getData(); GlobalStateMgr.getCurrentState().getRoutineLoadMgr().replayCreateRoutineLoadJob(routineLoadJob); break; } case OperationType.OP_CHANGE_ROUTINE_LOAD_JOB_V2: case OperationType.OP_CHANGE_ROUTINE_LOAD_JOB: { RoutineLoadOperation operation = (RoutineLoadOperation) journal.getData(); GlobalStateMgr.getCurrentState().getRoutineLoadMgr().replayChangeRoutineLoadJob(operation); break; } case OperationType.OP_REMOVE_ROUTINE_LOAD_JOB: { RoutineLoadOperation operation = (RoutineLoadOperation) journal.getData(); globalStateMgr.getRoutineLoadMgr().replayRemoveOldRoutineLoad(operation); break; } case OperationType.OP_CREATE_STREAM_LOAD_TASK: case OperationType.OP_CREATE_STREAM_LOAD_TASK_V2: { StreamLoadTask streamLoadTask = (StreamLoadTask) journal.getData(); globalStateMgr.getStreamLoadMgr().replayCreateLoadTask(streamLoadTask); break; } case OperationType.OP_CREATE_LOAD_JOB_V2: case OperationType.OP_CREATE_LOAD_JOB: { com.starrocks.load.loadv2.LoadJob loadJob = (com.starrocks.load.loadv2.LoadJob) journal.getData(); globalStateMgr.getLoadMgr().replayCreateLoadJob(loadJob); break; } case OperationType.OP_END_LOAD_JOB_V2: case OperationType.OP_END_LOAD_JOB: { LoadJobFinalOperation operation = (LoadJobFinalOperation) journal.getData(); globalStateMgr.getLoadMgr().replayEndLoadJob(operation); break; } case OperationType.OP_UPDATE_LOAD_JOB: { LoadJobStateUpdateInfo info = (LoadJobStateUpdateInfo) journal.getData(); globalStateMgr.getLoadMgr().replayUpdateLoadJobStateInfo(info); break; } case OperationType.OP_CREATE_RESOURCE: { final Resource resource = (Resource) journal.getData(); globalStateMgr.getResourceMgr().replayCreateResource(resource); break; } case OperationType.OP_DROP_RESOURCE: { final DropResourceOperationLog operationLog = (DropResourceOperationLog) journal.getData(); globalStateMgr.getResourceMgr().replayDropResource(operationLog); break; } case OperationType.OP_RESOURCE_GROUP: { final ResourceGroupOpEntry entry = (ResourceGroupOpEntry) journal.getData(); globalStateMgr.getResourceGroupMgr().replayResourceGroupOp(entry); break; } case OperationType.OP_CREATE_TASK: { final Task task = (Task) journal.getData(); globalStateMgr.getTaskManager().replayCreateTask(task); break; } case OperationType.OP_DROP_TASKS: { DropTasksLog dropTasksLog = (DropTasksLog) journal.getData(); globalStateMgr.getTaskManager().replayDropTasks(dropTasksLog.getTaskIdList()); break; } case OperationType.OP_ALTER_TASK: { final Task task = (Task) journal.getData(); globalStateMgr.getTaskManager().replayAlterTask(task); break; } case OperationType.OP_CREATE_TASK_RUN: { final TaskRunStatus status = (TaskRunStatus) journal.getData(); globalStateMgr.getTaskManager().replayCreateTaskRun(status); break; } case OperationType.OP_UPDATE_TASK_RUN: { final TaskRunStatusChange statusChange = (TaskRunStatusChange) journal.getData(); globalStateMgr.getTaskManager().replayUpdateTaskRun(statusChange); break; } case OperationType.OP_DROP_TASK_RUNS: { DropTaskRunsLog dropTaskRunsLog = (DropTaskRunsLog) journal.getData(); globalStateMgr.getTaskManager().replayDropTaskRuns(dropTaskRunsLog.getQueryIdList()); break; } case OperationType.OP_UPDATE_TASK_RUN_STATE: { TaskRunPeriodStatusChange taskRunPeriodStatusChange = (TaskRunPeriodStatusChange) journal.getData(); globalStateMgr.getTaskManager().replayAlterRunningTaskRunProgress( taskRunPeriodStatusChange.getTaskRunProgressMap()); break; } case OperationType.OP_ARCHIVE_TASK_RUNS: { ArchiveTaskRunsLog log = (ArchiveTaskRunsLog) journal.getData(); globalStateMgr.getTaskManager().replayArchiveTaskRuns(log); break; } case OperationType.OP_CREATE_SMALL_FILE: case OperationType.OP_CREATE_SMALL_FILE_V2: { SmallFile smallFile = (SmallFile) journal.getData(); globalStateMgr.getSmallFileMgr().replayCreateFile(smallFile); break; } case OperationType.OP_DROP_SMALL_FILE: case OperationType.OP_DROP_SMALL_FILE_V2: { SmallFile smallFile = (SmallFile) journal.getData(); globalStateMgr.getSmallFileMgr().replayRemoveFile(smallFile); break; } case OperationType.OP_ALTER_JOB_V2: { AlterJobV2 alterJob = (AlterJobV2) journal.getData(); switch (alterJob.getType()) { case ROLLUP: globalStateMgr.getRollupHandler().replayAlterJobV2(alterJob); break; case SCHEMA_CHANGE: case OPTIMIZE: globalStateMgr.getSchemaChangeHandler().replayAlterJobV2(alterJob); break; default: break; } break; } case OperationType.OP_BATCH_ADD_ROLLUP: case OperationType.OP_BATCH_ADD_ROLLUP_V2: { BatchAlterJobPersistInfo batchAlterJobV2 = (BatchAlterJobPersistInfo) journal.getData(); for (AlterJobV2 alterJobV2 : batchAlterJobV2.getAlterJobV2List()) { globalStateMgr.getRollupHandler().replayAlterJobV2(alterJobV2); } break; } case OperationType.OP_MODIFY_DISTRIBUTION_TYPE: case OperationType.OP_MODIFY_DISTRIBUTION_TYPE_V2: { TableInfo tableInfo = (TableInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayConvertDistributionType(tableInfo); break; } case OperationType.OP_DYNAMIC_PARTITION: case OperationType.OP_MODIFY_IN_MEMORY: case OperationType.OP_SET_FORBIDDEN_GLOBAL_DICT: case OperationType.OP_MODIFY_REPLICATION_NUM: case OperationType.OP_MODIFY_WRITE_QUORUM: case OperationType.OP_MODIFY_REPLICATED_STORAGE: case OperationType.OP_MODIFY_BUCKET_SIZE: case OperationType.OP_MODIFY_MUTABLE_BUCKET_NUM: case OperationType.OP_MODIFY_BINLOG_AVAILABLE_VERSION: case OperationType.OP_MODIFY_BINLOG_CONFIG: case OperationType.OP_MODIFY_ENABLE_PERSISTENT_INDEX: case OperationType.OP_MODIFY_PRIMARY_INDEX_CACHE_EXPIRE_SEC: case OperationType.OP_ALTER_TABLE_PROPERTIES: case OperationType.OP_MODIFY_TABLE_CONSTRAINT_PROPERTY: { ModifyTablePropertyOperationLog modifyTablePropertyOperationLog = (ModifyTablePropertyOperationLog) journal.getData(); globalStateMgr.getLocalMetastore().replayModifyTableProperty(opCode, modifyTablePropertyOperationLog); break; } case OperationType.OP_REPLACE_TEMP_PARTITION: { ReplacePartitionOperationLog replaceTempPartitionLog = (ReplacePartitionOperationLog) journal.getData(); globalStateMgr.getLocalMetastore().replayReplaceTempPartition(replaceTempPartitionLog); break; } case OperationType.OP_INSTALL_PLUGIN: { PluginInfo pluginInfo = (PluginInfo) journal.getData(); try { globalStateMgr.getPluginMgr().replayLoadDynamicPlugin(pluginInfo); } catch (Exception e) { LOG.warn("replay install plugin failed.", e); } break; } case OperationType.OP_UNINSTALL_PLUGIN: { PluginInfo pluginInfo = (PluginInfo) journal.getData(); try { globalStateMgr.getPluginMgr().uninstallPlugin(pluginInfo.getName()); } catch (Exception e) { LOG.warn("replay uninstall plugin failed.", e); } break; } case OperationType.OP_SET_REPLICA_STATUS: { SetReplicaStatusOperationLog log = (SetReplicaStatusOperationLog) journal.getData(); globalStateMgr.getLocalMetastore().replaySetReplicaStatus(log); break; } case OperationType.OP_REMOVE_ALTER_JOB_V2: { RemoveAlterJobV2OperationLog log = (RemoveAlterJobV2OperationLog) journal.getData(); switch (log.getType()) { case ROLLUP: globalStateMgr.getRollupHandler().replayRemoveAlterJobV2(log); break; case SCHEMA_CHANGE: globalStateMgr.getSchemaChangeHandler().replayRemoveAlterJobV2(log); break; case OPTIMIZE: globalStateMgr.getSchemaChangeHandler().replayRemoveAlterJobV2(log); break; default: break; } break; } case OperationType.OP_ALTER_ROUTINE_LOAD_JOB: { AlterRoutineLoadJobOperationLog log = (AlterRoutineLoadJobOperationLog) journal.getData(); globalStateMgr.getRoutineLoadMgr().replayAlterRoutineLoadJob(log); break; } case OperationType.OP_ALTER_LOAD_JOB: { AlterLoadJobOperationLog log = (AlterLoadJobOperationLog) journal.getData(); globalStateMgr.getLoadMgr().replayAlterLoadJob(log); break; } case OperationType.OP_GLOBAL_VARIABLE_V2: { GlobalVarPersistInfo info = (GlobalVarPersistInfo) journal.getData(); VariableMgr.replayGlobalVariableV2(info); break; } case OperationType.OP_SWAP_TABLE: { SwapTableOperationLog log = (SwapTableOperationLog) journal.getData(); globalStateMgr.getAlterJobMgr().replaySwapTable(log); break; } case OperationType.OP_ADD_ANALYZER_JOB: { NativeAnalyzeJob nativeAnalyzeJob = (NativeAnalyzeJob) journal.getData(); globalStateMgr.getAnalyzeMgr().replayAddAnalyzeJob(nativeAnalyzeJob); break; } case OperationType.OP_REMOVE_ANALYZER_JOB: { NativeAnalyzeJob nativeAnalyzeJob = (NativeAnalyzeJob) journal.getData(); globalStateMgr.getAnalyzeMgr().replayRemoveAnalyzeJob(nativeAnalyzeJob); break; } case OperationType.OP_ADD_ANALYZE_STATUS: { NativeAnalyzeStatus analyzeStatus = (NativeAnalyzeStatus) journal.getData(); globalStateMgr.getAnalyzeMgr().replayAddAnalyzeStatus(analyzeStatus); break; } case OperationType.OP_REMOVE_ANALYZE_STATUS: { NativeAnalyzeStatus analyzeStatus = (NativeAnalyzeStatus) journal.getData(); globalStateMgr.getAnalyzeMgr().replayRemoveAnalyzeStatus(analyzeStatus); break; } case OperationType.OP_ADD_EXTERNAL_ANALYZE_STATUS: { ExternalAnalyzeStatus analyzeStatus = (ExternalAnalyzeStatus) journal.getData(); globalStateMgr.getAnalyzeMgr().replayAddAnalyzeStatus(analyzeStatus); break; } case OperationType.OP_REMOVE_EXTERNAL_ANALYZE_STATUS: { ExternalAnalyzeStatus analyzeStatus = (ExternalAnalyzeStatus) journal.getData(); globalStateMgr.getAnalyzeMgr().replayRemoveAnalyzeStatus(analyzeStatus); break; } case OperationType.OP_ADD_EXTERNAL_ANALYZER_JOB: { ExternalAnalyzeJob externalAnalyzeJob = (ExternalAnalyzeJob) journal.getData(); globalStateMgr.getAnalyzeMgr().replayAddAnalyzeJob(externalAnalyzeJob); break; } case OperationType.OP_REMOVE_EXTERNAL_ANALYZER_JOB: { ExternalAnalyzeJob externalAnalyzeJob = (ExternalAnalyzeJob) journal.getData(); globalStateMgr.getAnalyzeMgr().replayRemoveAnalyzeJob(externalAnalyzeJob); break; } case OperationType.OP_ADD_BASIC_STATS_META: { BasicStatsMeta basicStatsMeta = (BasicStatsMeta) journal.getData(); globalStateMgr.getAnalyzeMgr().replayAddBasicStatsMeta(basicStatsMeta); // The follower replays the stats meta log, indicating that the master has re-completed // statistic, and the follower's should refresh cache here. // We don't need to refresh statistics when checkpointing if (!GlobalStateMgr.isCheckpointThread()) { globalStateMgr.getAnalyzeMgr().refreshBasicStatisticsCache(basicStatsMeta.getDbId(), basicStatsMeta.getTableId(), basicStatsMeta.getColumns(), true); } break; } case OperationType.OP_REMOVE_BASIC_STATS_META: { BasicStatsMeta basicStatsMeta = (BasicStatsMeta) journal.getData(); globalStateMgr.getAnalyzeMgr().replayRemoveBasicStatsMeta(basicStatsMeta); break; } case OperationType.OP_ADD_HISTOGRAM_STATS_META: { HistogramStatsMeta histogramStatsMeta = (HistogramStatsMeta) journal.getData(); globalStateMgr.getAnalyzeMgr().replayAddHistogramStatsMeta(histogramStatsMeta); // The follower replays the stats meta log, indicating that the master has re-completed // statistic, and the follower's should expire cache here. // We don't need to refresh statistics when checkpointing if (!GlobalStateMgr.isCheckpointThread()) { globalStateMgr.getAnalyzeMgr().refreshHistogramStatisticsCache( histogramStatsMeta.getDbId(), histogramStatsMeta.getTableId(), Lists.newArrayList(histogramStatsMeta.getColumn()), true); } break; } case OperationType.OP_REMOVE_HISTOGRAM_STATS_META: { HistogramStatsMeta histogramStatsMeta = (HistogramStatsMeta) journal.getData(); globalStateMgr.getAnalyzeMgr().replayRemoveHistogramStatsMeta(histogramStatsMeta); break; } case OperationType.OP_ADD_EXTERNAL_BASIC_STATS_META: { ExternalBasicStatsMeta basicStatsMeta = (ExternalBasicStatsMeta) journal.getData(); globalStateMgr.getAnalyzeMgr().replayAddExternalBasicStatsMeta(basicStatsMeta); // The follower replays the stats meta log, indicating that the master has re-completed // statistic, and the follower's should refresh cache here. // We don't need to refresh statistics when checkpointing if (!GlobalStateMgr.isCheckpointThread()) { globalStateMgr.getAnalyzeMgr().refreshConnectorTableBasicStatisticsCache( basicStatsMeta.getCatalogName(), basicStatsMeta.getDbName(), basicStatsMeta.getTableName(), basicStatsMeta.getColumns(), true); } break; } case OperationType.OP_REMOVE_EXTERNAL_BASIC_STATS_META: { ExternalBasicStatsMeta basicStatsMeta = (ExternalBasicStatsMeta) journal.getData(); globalStateMgr.getAnalyzeMgr().replayRemoveExternalBasicStatsMeta(basicStatsMeta); break; } case OperationType.OP_ADD_EXTERNAL_HISTOGRAM_STATS_META: { ExternalHistogramStatsMeta histogramStatsMeta = (ExternalHistogramStatsMeta) journal.getData(); globalStateMgr.getAnalyzeMgr().replayAddExternalHistogramStatsMeta(histogramStatsMeta); // The follower replays the stats meta log, indicating that the master has re-completed // statistic, and the follower's should expire cache here. // We don't need to refresh statistics when checkpointing if (!GlobalStateMgr.isCheckpointThread()) { globalStateMgr.getAnalyzeMgr().refreshConnectorTableHistogramStatisticsCache( histogramStatsMeta.getCatalogName(), histogramStatsMeta.getDbName(), histogramStatsMeta.getTableName(), Lists.newArrayList(histogramStatsMeta.getColumn()), true); } break; } case OperationType.OP_REMOVE_EXTERNAL_HISTOGRAM_STATS_META: { ExternalHistogramStatsMeta histogramStatsMeta = (ExternalHistogramStatsMeta) journal.getData(); globalStateMgr.getAnalyzeMgr().replayRemoveExternalHistogramStatsMeta(histogramStatsMeta); break; } case OperationType.OP_MODIFY_HIVE_TABLE_COLUMN: { ModifyTableColumnOperationLog modifyTableColumnOperationLog = (ModifyTableColumnOperationLog) journal.getData(); globalStateMgr.getLocalMetastore().replayModifyHiveTableColumn(opCode, modifyTableColumnOperationLog); break; } case OperationType.OP_CREATE_CATALOG: { Catalog catalog = (Catalog) journal.getData(); globalStateMgr.getCatalogMgr().replayCreateCatalog(catalog); break; } case OperationType.OP_DROP_CATALOG: { DropCatalogLog dropCatalogLog = (DropCatalogLog) journal.getData(); globalStateMgr.getCatalogMgr().replayDropCatalog(dropCatalogLog); break; } case OperationType.OP_ALTER_CATALOG: AlterCatalogLog alterCatalogLog = (AlterCatalogLog) journal.getData(); globalStateMgr.getCatalogMgr().replayAlterCatalog(alterCatalogLog); break; case OperationType.OP_CREATE_INSERT_OVERWRITE: { CreateInsertOverwriteJobLog jobInfo = (CreateInsertOverwriteJobLog) journal.getData(); globalStateMgr.getInsertOverwriteJobMgr().replayCreateInsertOverwrite(jobInfo); break; } case OperationType.OP_INSERT_OVERWRITE_STATE_CHANGE: { InsertOverwriteStateChangeInfo stateChangeInfo = (InsertOverwriteStateChangeInfo) journal.getData(); globalStateMgr.getInsertOverwriteJobMgr().replayInsertOverwriteStateChange(stateChangeInfo); break; } case OperationType.OP_ADD_UNUSED_SHARD: case OperationType.OP_DELETE_UNUSED_SHARD: // Deprecated: Nothing to do break; case OperationType.OP_STARMGR: { StarMgrJournal j = (StarMgrJournal) journal.getData(); StarMgrServer.getCurrentState().getStarMgr().replay(j.getJournal()); break; } case OperationType.OP_CREATE_USER_V2: { CreateUserInfo info = (CreateUserInfo) journal.getData(); globalStateMgr.getAuthenticationMgr().replayCreateUser( info.getUserIdentity(), info.getAuthenticationInfo(), info.getUserProperty(), info.getUserPrivilegeCollection(), info.getPluginId(), info.getPluginVersion()); break; } case OperationType.OP_UPDATE_USER_PRIVILEGE_V2: { UserPrivilegeCollectionInfo info = (UserPrivilegeCollectionInfo) journal.getData(); globalStateMgr.getAuthorizationMgr().replayUpdateUserPrivilegeCollection( info.getUserIdentity(), info.getPrivilegeCollection(), info.getPluginId(), info.getPluginVersion()); break; } case OperationType.OP_ALTER_USER_V2: { AlterUserInfo info = (AlterUserInfo) journal.getData(); globalStateMgr.getAuthenticationMgr().replayAlterUser( info.getUserIdentity(), info.getAuthenticationInfo(), info.getProperties()); break; } case OperationType.OP_UPDATE_USER_PROP_V2: case OperationType.OP_UPDATE_USER_PROP_V3: { UserPropertyInfo info = (UserPropertyInfo) journal.getData(); globalStateMgr.getAuthenticationMgr().replayUpdateUserProperty(info); break; } case OperationType.OP_DROP_USER_V2: case OperationType.OP_DROP_USER_V3: { UserIdentity userIdentity = (UserIdentity) journal.getData(); globalStateMgr.getAuthenticationMgr().replayDropUser(userIdentity); break; } case OperationType.OP_UPDATE_ROLE_PRIVILEGE_V2: { RolePrivilegeCollectionInfo info = (RolePrivilegeCollectionInfo) journal.getData(); globalStateMgr.getAuthorizationMgr().replayUpdateRolePrivilegeCollection(info); break; } case OperationType.OP_DROP_ROLE_V2: { RolePrivilegeCollectionInfo info = (RolePrivilegeCollectionInfo) journal.getData(); globalStateMgr.getAuthorizationMgr().replayDropRole(info); break; } case OperationType.OP_AUTH_UPGRADE_V2: { // for compatibility reason, just ignore the auth upgrade log break; } case OperationType.OP_MV_JOB_STATE: { MVMaintenanceJob job = (MVMaintenanceJob) journal.getData(); GlobalStateMgr.getCurrentState().getMaterializedViewMgr().replay(job); break; } case OperationType.OP_MV_EPOCH_UPDATE: { MVEpoch epoch = (MVEpoch) journal.getData(); GlobalStateMgr.getCurrentState().getMaterializedViewMgr().replayEpoch(epoch); break; } case OperationType.OP_MODIFY_TABLE_ADD_OR_DROP_COLUMNS: { final TableAddOrDropColumnsInfo info = (TableAddOrDropColumnsInfo) journal.getData(); globalStateMgr.getSchemaChangeHandler().replayModifyTableAddOrDrop(info); break; } case OperationType.OP_SET_DEFAULT_STORAGE_VOLUME: { SetDefaultStorageVolumeLog log = (SetDefaultStorageVolumeLog) journal.getData(); globalStateMgr.getStorageVolumeMgr().replaySetDefaultStorageVolume(log); break; } case OperationType.OP_CREATE_STORAGE_VOLUME: { StorageVolume sv = (StorageVolume) journal.getData(); globalStateMgr.getStorageVolumeMgr().replayCreateStorageVolume(sv); break; } case OperationType.OP_UPDATE_STORAGE_VOLUME: { StorageVolume sv = (StorageVolume) journal.getData(); globalStateMgr.getStorageVolumeMgr().replayUpdateStorageVolume(sv); break; } case OperationType.OP_DROP_STORAGE_VOLUME: { DropStorageVolumeLog log = (DropStorageVolumeLog) journal.getData(); globalStateMgr.getStorageVolumeMgr().replayDropStorageVolume(log); break; } case OperationType.OP_PIPE: { PipeOpEntry opEntry = (PipeOpEntry) journal.getData(); globalStateMgr.getPipeManager().getRepo().replay(opEntry); break; } case OperationType.OP_CREATE_DICTIONARY: { Dictionary dictionary = (Dictionary) journal.getData(); globalStateMgr.getDictionaryMgr().replayCreateDictionary(dictionary); break; } case OperationType.OP_DROP_DICTIONARY: { DropDictionaryInfo dropInfo = (DropDictionaryInfo) journal.getData(); globalStateMgr.getDictionaryMgr().replayDropDictionary(dropInfo.getDictionaryName()); break; } case OperationType.OP_MODIFY_DICTIONARY_MGR: { DictionaryMgrInfo modifyInfo = (DictionaryMgrInfo) journal.getData(); globalStateMgr.getDictionaryMgr().replayModifyDictionaryMgr(modifyInfo); break; } case OperationType.OP_DECOMMISSION_DISK: { DecommissionDiskInfo info = (DecommissionDiskInfo) journal.getData(); globalStateMgr.getNodeMgr().getClusterInfo().replayDecommissionDisks(info); break; } case OperationType.OP_CANCEL_DECOMMISSION_DISK: { CancelDecommissionDiskInfo info = (CancelDecommissionDiskInfo) journal.getData(); globalStateMgr.getNodeMgr().getClusterInfo().replayCancelDecommissionDisks(info); break; } case OperationType.OP_DISABLE_DISK: { DisableDiskInfo info = (DisableDiskInfo) journal.getData(); globalStateMgr.getNodeMgr().getClusterInfo().replayDisableDisks(info); break; } case OperationType.OP_CANCEL_DISABLE_DISK: { CancelDisableDiskInfo info = (CancelDisableDiskInfo) journal.getData(); globalStateMgr.getNodeMgr().getClusterInfo().replayCancelDisableDisks(info); break; } case OperationType.OP_REPLICATION_JOB: { ReplicationJobLog replicationJobLog = (ReplicationJobLog) journal.getData(); globalStateMgr.getReplicationMgr().replayReplicationJob(replicationJobLog.getReplicationJob()); break; } case OperationType.OP_RECOVER_PARTITION_VERSION: { PartitionVersionRecoveryInfo info = (PartitionVersionRecoveryInfo) journal.getData(); GlobalStateMgr.getCurrentState().getMetaRecoveryDaemon().recoverPartitionVersion(info); break; } default: { if (Config.ignore_unknown_log_id) { LOG.warn("UNKNOWN Operation Type {}", opCode); } else { throw new IOException("UNKNOWN Operation Type " + opCode); } } } } catch (Exception e) { JournalInconsistentException exception = new JournalInconsistentException(opCode, "failed to load journal type " + opCode); exception.initCause(e); throw exception; } }
@Test public void testLoadJournalException(@Mocked GlobalStateMgr globalStateMgr) { JournalEntity journal = new JournalEntity(); journal.setOpCode(OperationType.OP_SAVE_NEXTID); // set data to null, and it will throw NPE in loadJournal() journal.setData(null); EditLog editLog = new EditLog(null); new Expectations() { { globalStateMgr.getEditLog(); result = editLog; } }; try { GlobalStateMgr.getCurrentState().getEditLog().loadJournal(GlobalStateMgr.getCurrentState(), journal); } catch (JournalInconsistentException e) { Assert.assertEquals(OperationType.OP_SAVE_NEXTID, e.getOpCode()); } }
@Override public int configInfoBetaCount() { ConfigInfoBetaMapper configInfoBetaMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_BETA); String sql = configInfoBetaMapper.count(null); Integer result = databaseOperate.queryOne(sql, Integer.class); if (result == null) { throw new IllegalArgumentException("configInfoBetaCount error"); } return result; }
@Test void testConfigInfoBetaCount() { Mockito.when(databaseOperate.queryOne(anyString(), eq(Integer.class))).thenReturn(101); int returnCount = embeddedConfigInfoBetaPersistService.configInfoBetaCount(); assertEquals(101, returnCount); }
@Override public long inc() { return COUNTER.incrementAndGet(this); }
@Test public void inc() { counter.inc(); assertEquals(1, counter.get()); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseTaobaoTest() { final String uaString = "Mozilla/5.0 (Linux; U; Android 4.4.4; zh-cn; MI 2C Build/KTU84P) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36 AliApp(TB/4.9.2) WindVane/5.2.2 TBANDROID/700342@taobao_android_4.9.2 720X1280"; final UserAgent ua = UserAgentUtil.parse(uaString); assertEquals("Taobao", ua.getBrowser().toString()); assertEquals("4.9.2", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("537.36", ua.getEngineVersion()); assertEquals("Android", ua.getOs().toString()); assertEquals("4.4.4", ua.getOsVersion()); assertEquals("Android", ua.getPlatform().toString()); assertTrue(ua.isMobile()); }
@SuppressWarnings("unchecked") public <T extends Expression> T rewrite(final T expression, final C context) { return (T) rewriter.process(expression, context); }
@Test public void shouldRewriteIsNotNullPredicate() { // Given: final IsNotNullPredicate parsed = parseExpression("col0 IS NOT NULL"); when(processor.apply(parsed.getValue(), context)).thenReturn(expr1); // When: final Expression rewritten = expressionRewriter.rewrite(parsed, context); // Then: assertThat(rewritten, equalTo(new IsNotNullPredicate(parsed.getLocation(), expr1))); }
@Override public RedisClusterNode clusterGetNodeForKey(byte[] key) { int slot = executorService.getConnectionManager().calcSlot(key); return clusterGetNodeForSlot(slot); }
@Test public void testClusterGetNodeForKey() { RedisClusterNode node = connection.clusterGetNodeForKey("123".getBytes()); assertThat(node).isNotNull(); }
public Schema mergeTables( Map<FeatureOption, MergingStrategy> mergingStrategies, Schema sourceSchema, List<SqlNode> derivedColumns, List<SqlWatermark> derivedWatermarkSpecs, SqlTableConstraint derivedPrimaryKey) { SchemaBuilder schemaBuilder = new SchemaBuilder( mergingStrategies, sourceSchema, (FlinkTypeFactory) validator.getTypeFactory(), dataTypeFactory, validator, escapeExpression); schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns); schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs); schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey); return schemaBuilder.build(); }
@Test void mergeIncludingWatermarksFailsOnDuplicate() { Schema sourceSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .column("timestamp", DataTypes.TIMESTAMP()) .watermark("timestamp", "timestamp - INTERVAL '5' SECOND") .build(); List<SqlWatermark> derivedWatermarkSpecs = Collections.singletonList( new SqlWatermark( SqlParserPos.ZERO, identifier("timestamp"), boundedStrategy("timestamp", "10"))); assertThatThrownBy( () -> util.mergeTables( getDefaultMergingStrategies(), sourceSchema, Collections.emptyList(), derivedWatermarkSpecs, null)) .isInstanceOf(ValidationException.class) .hasMessage( "There already exists a watermark spec for column 'timestamp' in the " + "base table. You might want to specify EXCLUDING WATERMARKS " + "or OVERWRITING WATERMARKS."); }
@Override @NotNull public BTreeMutable getMutableCopy() { final BTreeMutable result = new BTreeMutable(this); result.addExpiredLoggable(rootLoggable); return result; }
@Test public void testPutAndDelete() { tm = new BTreeEmpty(log, createTestSplittingPolicy(), true, 1).getMutableCopy(); for (int i = 0; i < 100; i++) { getTreeMutable().put(kv(i, "v" + i)); } long rootAddress = saveTree(); tm = new BTree(log, getTreeMutable().getBalancePolicy(), rootAddress, true, 1).getMutableCopy(); checkTree(getTreeMutable(), 100).run(); for (int i = 0; i < 100; i++) { getTreeMutable().put(kv(i, "v" + i)); } Assert.assertEquals(1L, (long) tm.getExpiredLoggables().getSize()); for (int i = 0; i < 100; i++) { final INode ln = kv(i, "v" + i); getTreeMutable().delete(ln.getKey(), ln.getValue()); } Assert.assertEquals(0, tm.getSize()); assertMatchesIterator(tm, Collections.emptyList()); rootAddress = saveTree(); reopen(); t = new BTree(log, rootAddress, true, 1); assertMatchesIterator(tm, Collections.emptyList()); }
@Override public FlinkPod decorateFlinkPod(FlinkPod flinkPod) { final Pod podWithMount = decoratePod(flinkPod.getPodWithoutMainContainer()); final Container containerWithMount = decorateMainContainer(flinkPod.getMainContainer()); return new FlinkPod.Builder(flinkPod) .withPod(podWithMount) .withMainContainer(containerWithMount) .build(); }
@Test void testWhetherPodOrContainerIsDecorated() { final FlinkPod resultFlinkPod = mountSecretsDecorator.decorateFlinkPod(baseFlinkPod); assertThat( VolumeTestUtils.podHasVolume( baseFlinkPod.getPodWithoutMainContainer(), SECRET_NAME + "-volume")) .isFalse(); assertThat( VolumeTestUtils.podHasVolume( resultFlinkPod.getPodWithoutMainContainer(), SECRET_NAME + "-volume")) .isTrue(); assertThat( VolumeTestUtils.containerHasVolume( baseFlinkPod.getMainContainer(), SECRET_NAME + "-volume", SECRET_MOUNT_PATH)) .isFalse(); assertThat( VolumeTestUtils.containerHasVolume( resultFlinkPod.getMainContainer(), SECRET_NAME + "-volume", SECRET_MOUNT_PATH)) .isTrue(); }
public HttpResult getBinary(String url) throws IOException, NotModifiedException { return getBinary(url, null, null); }
@Test void connectTimeout() { Mockito.when(config.httpClient().connectTimeout()).thenReturn(Duration.ofMillis(500)); this.getter = new HttpGetter(config, Mockito.mock(CommaFeedVersion.class), Mockito.mock(MetricRegistry.class)); // try to connect to a non-routable address // https://stackoverflow.com/a/904609 Assertions.assertThrows(ConnectTimeoutException.class, () -> getter.getBinary("http://10.255.255.1")); }
@ConstantFunction(name = "next_day", argTypes = {DATETIME, VARCHAR}, returnType = DATE, isMonotonic = true) public static ConstantOperator nextDay(ConstantOperator date, ConstantOperator dow) { int dateDowValue = date.getDate().getDayOfWeek().getValue(); switch (dow.getVarchar()) { case "Sunday": case "Sun": case "Su": return ConstantOperator.createDateOrNull(date.getDate().plusDays((13L - dateDowValue) % 7 + 1L)); case "Monday": case "Mon": case "Mo": return ConstantOperator.createDateOrNull(date.getDate().plusDays((7L - dateDowValue) % 7 + 1L)); case "Tuesday": case "Tue": case "Tu": return ConstantOperator.createDateOrNull(date.getDate().plusDays((8L - dateDowValue) % 7 + 1L)); case "Wednesday": case "Wed": case "We": return ConstantOperator.createDateOrNull(date.getDate().plusDays((9L - dateDowValue) % 7 + 1L)); case "Thursday": case "Thu": case "Th": return ConstantOperator.createDateOrNull(date.getDate().plusDays((10L - dateDowValue) % 7 + 1L)); case "Friday": case "Fri": case "Fr": return ConstantOperator.createDateOrNull(date.getDate().plusDays((11L - dateDowValue) % 7 + 1L)); case "Saturday": case "Sat": case "Sa": return ConstantOperator.createDateOrNull(date.getDate().plusDays((12L - dateDowValue) % 7 + 1L)); default: throw new IllegalArgumentException(dow + " not supported in next_day dow_string"); } }
@Test public void nextDay() { assertEquals("2015-03-29T09:23:55", ScalarOperatorFunctions.nextDay(O_DT_20150323_092355, ConstantOperator.createVarchar("Sunday")).getDate().toString()); Assert.assertThrows("undefine_dow not supported in next_day dow_string", IllegalArgumentException.class, () -> ScalarOperatorFunctions.nextDay(O_DT_20150323_092355, ConstantOperator.createVarchar("undefine_dow")) .getVarchar()); }
public static Method getMethodIgnoreCase(Class<?> clazz, String methodName, Class<?>... paramTypes) throws SecurityException { return getMethod(clazz, true, methodName, paramTypes); }
@Test public void getMethodIgnoreCaseTest() { Method method = ReflectUtil.getMethodIgnoreCase(ExamInfoDict.class, "getId"); assertEquals("getId", method.getName()); assertEquals(0, method.getParameterTypes().length); method = ReflectUtil.getMethodIgnoreCase(ExamInfoDict.class, "GetId"); assertEquals("getId", method.getName()); assertEquals(0, method.getParameterTypes().length); method = ReflectUtil.getMethodIgnoreCase(ExamInfoDict.class, "setanswerIs", Integer.class); assertEquals("setAnswerIs", method.getName()); assertEquals(1, method.getParameterTypes().length); }
protected Dependency getMainDotnetDependency(Dependency dependency1, Dependency dependency2) { if (dependency1.getName() != null && dependency1.getVersion() != null && dependency2.getName() != null && dependency2.getVersion() != null && Ecosystem.DOTNET.equals(dependency1.getEcosystem()) && Ecosystem.DOTNET.equals(dependency2.getEcosystem()) && dependency1.getName().equals(dependency2.getName()) && dependency1.getVersion().equals(dependency2.getVersion())) { if (dependency1.isVirtual()) { return dependency2; } return dependency1; } return null; }
@Test public void testGetMainDotnetDependency() { Dependency dependency1 = new Dependency(BaseTest.getResourceAsFile(this, "log4net.dll")); dependency1.setEcosystem(AssemblyAnalyzer.DEPENDENCY_ECOSYSTEM); dependency1.setName("log4net"); dependency1.setVersion("1.2.13"); Dependency dependency2 = new Dependency(true); dependency2.setEcosystem(NugetconfAnalyzer.DEPENDENCY_ECOSYSTEM); dependency2.setName("test"); dependency2.setVersion("1.2.13"); DependencyMergingAnalyzer instance = new DependencyMergingAnalyzer(); Dependency expResult = null; Dependency result = instance.getMainDotnetDependency(dependency1, dependency2); assertEquals(expResult, result); dependency2.setName("log4net"); expResult = dependency1; result = instance.getMainDotnetDependency(dependency1, dependency2); assertEquals(expResult, result); result = instance.getMainDotnetDependency(dependency2, dependency1); assertEquals(expResult, result); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void returnOnCompleteUsingFlowable() throws InterruptedException { RetryConfig config = retryConfig(); Retry retry = Retry.of("testName", config); RetryTransformer<Object> retryTransformer = RetryTransformer.of(retry); given(helloWorldService.returnHelloWorld()) .willThrow(new HelloWorldException()); Flowable.fromCallable(helloWorldService::returnHelloWorld) .compose(retryTransformer) .test() .await() .assertError(HelloWorldException.class) .assertNotComplete(); Flowable.fromCallable(helloWorldService::returnHelloWorld) .compose(retryTransformer) .test() .await() .assertError(HelloWorldException.class) .assertNotComplete(); then(helloWorldService).should(times(6)).returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(2); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero(); }
public static GenericRecord convertToAvro(Schema schema, Message message) { return AvroSupport.convert(schema, message); }
@Test public void noFieldsSet_defaultOptions() throws IOException { Sample sample = Sample.newBuilder().build(); Schema.Parser parser = new Schema.Parser(); Schema convertedSchema = parser.parse(getClass().getClassLoader().getResourceAsStream("schema-provider/proto/sample_schema_defaults.avsc")); GenericRecord actual = serializeAndDeserializeAvro(ProtoConversionUtil.convertToAvro(convertedSchema, sample), convertedSchema); Assertions.assertEquals(createDefaultOutput(convertedSchema), actual); }
public CoercedExpressionResult coerce() { final Class<?> leftClass = left.getRawClass(); final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass); final Class<?> rightClass = right.getRawClass(); final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass); boolean sameClass = leftClass == rightClass; boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression; if (sameClass || isUnificationExpression) { return new CoercedExpressionResult(left, right); } if (!canCoerce()) { throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass)); } if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) { CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression()); return new CoercedExpressionResult( new TypedExpression(castExpression, double.class, left.getType()), right, false); } final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass ); final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass); boolean rightAsStaticField = false; final Expression rightExpression = right.getExpression(); final TypedExpression coercedRight; if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) { final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass); coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType); coercedRight.setType( leftClass ); } else if (shouldCoerceBToString(left, right)) { coercedRight = coerceToString(right); } else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) { coercedRight = castToClass(leftClass); } else if (leftClass == long.class && rightClass == int.class) { coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression())); } else if (leftClass == Date.class && rightClass == String.class) { coercedRight = coerceToDate(right); rightAsStaticField = true; } else if (leftClass == LocalDate.class && rightClass == String.class) { coercedRight = coerceToLocalDate(right); rightAsStaticField = true; } else if (leftClass == LocalDateTime.class && rightClass == String.class) { coercedRight = coerceToLocalDateTime(right); rightAsStaticField = true; } else if (shouldCoerceBToMap()) { coercedRight = castToClass(toNonPrimitiveType(leftClass)); } else if (isBoolean(leftClass) && !isBoolean(rightClass)) { coercedRight = coerceBoolean(right); } else { coercedRight = right; } final TypedExpression coercedLeft; if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) { coercedLeft = coerceToString(left); } else { coercedLeft = left; } return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField); }
@Test public void stringToInt2() { final TypedExpression left = expr(THIS_PLACEHOLDER + ".getAge()", int.class); final TypedExpression right = expr("\"50\"", String.class); final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce(); assertThat(coerce.getCoercedLeft()).isEqualTo(expr(THIS_PLACEHOLDER + ".getAge()", int.class)); assertThat(coerce.getCoercedRight()).isEqualTo(expr("50", int.class)); }
void addDisplayNamesToSearchIndex(SearchIndexBuilder sib, Collection<TopLevelItem> items) { for (TopLevelItem item : items) { if (LOGGER.isLoggable(Level.FINE)) { LOGGER.fine(String.format("Adding url=%s,displayName=%s", item.getSearchUrl(), item.getDisplayName())); } sib.add(item.getSearchUrl(), item.getDisplayName()); } }
@Test public void testAddDisplayNamesToSearchIndex() { final String url1 = "url1"; final String displayName1 = "displayName1"; final String url2 = "url2"; final String displayName2 = "displayName2"; SearchIndexBuilder sib = new SearchIndexBuilder(); // mock the items to be indexed TopLevelItem item1 = Mockito.mock(TopLevelItem.class); Mockito.when(item1.getSearchUrl()).thenReturn(url1); Mockito.when(item1.getDisplayName()).thenReturn(displayName1); TopLevelItem item2 = Mockito.mock(TopLevelItem.class); Mockito.when(item2.getSearchUrl()).thenReturn(url2); Mockito.when(item2.getDisplayName()).thenReturn(displayName2); Collection<TopLevelItem> items = new ArrayList<>(); items.add(item1); items.add(item2); // mock the view class except for the addDisplayNamesToSearchIndex() call as that // is what we are testing View view = Mockito.mock(View.class); Mockito.doCallRealMethod().when(view).addDisplayNamesToSearchIndex(sib, items); // now make the actual call to index items view.addDisplayNamesToSearchIndex(sib, items); // make and index with sib SearchIndex index = sib.make(); // now make sure we can fetch item1 from the index List<SearchItem> result = new ArrayList<>(); index.find(displayName1, result); assertEquals(1, result.size()); SearchItem actual = result.get(0); assertEquals(actual.getSearchName(), item1.getDisplayName()); assertEquals(actual.getSearchUrl(), item1.getSearchUrl()); // clear the result array for the next search result to test result.clear(); // make sure we can fetch item 2 from the index index.find(displayName2, result); assertEquals(1, result.size()); actual = result.get(0); assertEquals(actual.getSearchName(), item2.getDisplayName()); assertEquals(actual.getSearchUrl(), item2.getSearchUrl()); }
public static AuthenticationResult continued() { return new AuthenticationResult(null, null, null, false); }
@Test void assertContinuedWithEmpty() { AuthenticationResult actual = AuthenticationResultBuilder.continued(); assertNull(actual.getUsername()); assertNull(actual.getDatabase()); assertFalse(actual.isFinished()); }
@Override public void deleteTenant(Long id) { // 校验存在 validateUpdateTenant(id); // 删除 tenantMapper.deleteById(id); }
@Test public void testDeleteTenant_system() { // mock 数据 TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setPackageId(PACKAGE_ID_SYSTEM)); tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbTenant.getId(); // 调用, 并断言异常 assertServiceException(() -> tenantService.deleteTenant(id), TENANT_CAN_NOT_UPDATE_SYSTEM); }
public void validateAll(final Map<String, Object> properties) { final Set<String> propsDenied = Sets.intersection(immutableProps, properties.keySet()); if (!propsDenied.isEmpty()) { throw new KsqlException(String.format("One or more properties overrides set locally are " + "prohibited by the KSQL server (use UNSET to reset their default value): %s", propsDenied)); } }
@Test public void shouldThrowOnDenyListedProperty() { // When: final KsqlException e = assertThrows( KsqlException.class, () -> validator.validateAll(ImmutableMap.of( "immutable-property-1", "v1", "anything", "v2", "immutable-property-2", "v3" )) ); // Then: assertThat(e.getMessage(), containsString( "One or more properties overrides set locally are prohibited by the KSQL server " + "(use UNSET to reset their default value): " + "[immutable-property-1, immutable-property-2]" )); }
public Optional<VoterSet> removeVoter(ReplicaKey voterKey) { VoterNode oldVoter = voters.get(voterKey.id()); if (oldVoter != null && Objects.equals(oldVoter.voterKey(), voterKey) && voters.size() > 1 ) { HashMap<Integer, VoterNode> newVoters = new HashMap<>(voters); newVoters.remove(voterKey.id()); return Optional.of(new VoterSet(newVoters)); } return Optional.empty(); }
@Test void testRemoveVoter() { Map<Integer, VoterSet.VoterNode> aVoterMap = voterMap(IntStream.of(1, 2, 3), true); VoterSet voterSet = VoterSet.fromMap(new HashMap<>(aVoterMap)); assertEquals(Optional.empty(), voterSet.removeVoter(ReplicaKey.of(4, ReplicaKey.NO_DIRECTORY_ID))); assertEquals(Optional.empty(), voterSet.removeVoter(ReplicaKey.of(4, Uuid.randomUuid()))); VoterSet.VoterNode voter3 = aVoterMap.remove(3); assertEquals( Optional.of(VoterSet.fromMap(new HashMap<>(aVoterMap))), voterSet.removeVoter(voter3.voterKey()) ); }
public static Future<MetricsAndLogging> metricsAndLogging(Reconciliation reconciliation, ConfigMapOperator configMapOperations, LoggingModel logging, MetricsModel metrics) { return Future .join(metricsConfigMap(reconciliation, configMapOperations, metrics), loggingConfigMap(reconciliation, configMapOperations, logging)) .map(result -> new MetricsAndLogging(result.resultAt(0), result.resultAt(1))); }
@Test public void testMetricsAndExternalLogging(VertxTestContext context) { LoggingModel logging = new LoggingModel(new KafkaConnectSpecBuilder().withLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelector("log4j.properties", "logging-cm", false)).endValueFrom().build()).build(), "KafkaConnectCluster", false, true); MetricsModel metrics = new MetricsModel(new KafkaConnectSpecBuilder().withMetricsConfig(new JmxPrometheusExporterMetricsBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelector("metrics.yaml", "metrics-cm", false)).endValueFrom().build()).build()); ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class); when(mockCmOps.getAsync(any(), eq("logging-cm"))).thenReturn(Future.succeededFuture(new ConfigMapBuilder().withNewMetadata().withName("logging-cm").endMetadata().withData(Map.of()).build())); when(mockCmOps.getAsync(any(), eq("metrics-cm"))).thenReturn(Future.succeededFuture(new ConfigMapBuilder().withNewMetadata().withName("metrics-cm").endMetadata().withData(Map.of()).build())); Checkpoint async = context.checkpoint(); MetricsAndLoggingUtils.metricsAndLogging(Reconciliation.DUMMY_RECONCILIATION, mockCmOps, logging, metrics) .onComplete(context.succeeding(v -> context.verify(() -> { assertThat(v.loggingCm(), is(notNullValue())); assertThat(v.loggingCm().getMetadata().getName(), is("logging-cm")); assertThat(v.metricsCm(), is(notNullValue())); assertThat(v.metricsCm().getMetadata().getName(), is("metrics-cm")); verify(mockCmOps, times(2)).getAsync(any(), any()); async.flag(); }))); }
@Override public synchronized void renameTable(ConnectorSession session, ConnectorTableHandle tableHandle, SchemaTableName newTableName) { checkSchemaExists(newTableName.getSchemaName()); checkTableNotExists(newTableName); MemoryTableHandle oldTableHandle = (MemoryTableHandle) tableHandle; MemoryTableHandle newTableHandle = new MemoryTableHandle( oldTableHandle.getConnectorId(), newTableName.getSchemaName(), newTableName.getTableName(), oldTableHandle.getTableId(), oldTableHandle.getColumnHandles()); tableIds.remove(oldTableHandle.toSchemaTableName()); tableIds.put(newTableName, oldTableHandle.getTableId()); tables.remove(oldTableHandle.getTableId()); tables.put(oldTableHandle.getTableId(), newTableHandle); }
@Test public void testRenameTable() { SchemaTableName tableName = new SchemaTableName("test_schema", "test_table_to_be_renamed"); metadata.createSchema(SESSION, "test_schema", ImmutableMap.of()); ConnectorOutputTableHandle table = metadata.beginCreateTable( SESSION, new ConnectorTableMetadata(tableName, ImmutableList.of(), ImmutableMap.of()), Optional.empty()); metadata.finishCreateTable(SESSION, table, ImmutableList.of(), ImmutableList.of()); // rename table to schema which does not exist SchemaTableName invalidSchemaTableName = new SchemaTableName("test_schema_not_exist", "test_table_renamed"); ConnectorTableHandle tableHandle = metadata.getTableHandle(SESSION, tableName); Throwable throwable = expectThrows(SchemaNotFoundException.class, () -> metadata.renameTable(SESSION, tableHandle, invalidSchemaTableName)); assertTrue(throwable.getMessage().equals("Schema test_schema_not_exist not found")); // rename table to same schema SchemaTableName sameSchemaTableName = new SchemaTableName("test_schema", "test_renamed"); metadata.renameTable(SESSION, metadata.getTableHandle(SESSION, tableName), sameSchemaTableName); assertEquals(metadata.listTables(SESSION, "test_schema"), ImmutableList.of(sameSchemaTableName)); // rename table to different schema metadata.createSchema(SESSION, "test_different_schema", ImmutableMap.of()); SchemaTableName differentSchemaTableName = new SchemaTableName("test_different_schema", "test_renamed"); metadata.renameTable(SESSION, metadata.getTableHandle(SESSION, sameSchemaTableName), differentSchemaTableName); assertEquals(metadata.listTables(SESSION, "test_schema"), ImmutableList.of()); assertEquals(metadata.listTables(SESSION, "test_different_schema"), ImmutableList.of(differentSchemaTableName)); }
public static URL urlForResource(String location) throws MalformedURLException, FileNotFoundException { if (location == null) { throw new NullPointerException("location is required"); } URL url = null; if (!location.matches(SCHEME_PATTERN)) { url = Loader.getResourceBySelfClassLoader(location); } else if (location.startsWith(CLASSPATH_SCHEME)) { String path = location.substring(CLASSPATH_SCHEME.length()); if (path.startsWith("/")) { path = path.substring(1); } if (path.length() == 0) { throw new MalformedURLException("path is required"); } url = Loader.getResourceBySelfClassLoader(path); } else { url = new URL(location); } if (url == null) { throw new FileNotFoundException(location); } return url; }
@Test public void testExplicitClasspathUrlWithLeadingSlash() throws Exception { URL url = LocationUtil.urlForResource(LocationUtil.CLASSPATH_SCHEME + "/" + TEST_CLASSPATH_RESOURCE); validateResource(url); }
void snapshotSession(final ClusterSession session) { final String responseChannel = session.responseChannel(); final int length = MessageHeaderEncoder.ENCODED_LENGTH + ClusterSessionEncoder.BLOCK_LENGTH + ClusterSessionEncoder.responseChannelHeaderLength() + responseChannel.length(); if (length <= publication.maxPayloadLength()) { idleStrategy.reset(); while (true) { final long result = publication.tryClaim(length, bufferClaim); if (result > 0) { encodeSession(session, responseChannel, bufferClaim.buffer(), bufferClaim.offset()); bufferClaim.commit(); break; } checkResultAndIdle(result); } } else { final int offset = 0; encodeSession(session, responseChannel, offerBuffer, offset); offer(offerBuffer, offset, length); } }
@Test void snapshotSessionShouldUseOfferIfDataDoesNotFitIntoMaxPayloadSize() { final String responseChannel = "aeron:ipc?alias=very very very long string|mtu=4444"; final int length = MessageHeaderEncoder.ENCODED_LENGTH + ClusterSessionEncoder.BLOCK_LENGTH + ClusterSessionEncoder.responseChannelHeaderLength() + responseChannel.length(); final ClusterSession clusterSession = new ClusterSession(42, 4, responseChannel); clusterSession.loadSnapshotState( -1, 76, 98, CloseReason.TIMEOUT); when(publication.maxPayloadLength()).thenReturn(length - 1); when(publication.offer(any(), eq(0), eq(length))) .thenReturn(BACK_PRESSURED, ADMIN_ACTION) .thenAnswer(mockOffer()); snapshotTaker.snapshotSession(clusterSession); final InOrder inOrder = inOrder(idleStrategy, publication); inOrder.verify(publication).maxPayloadLength(); inOrder.verify(idleStrategy).reset(); inOrder.verify(publication).offer(any(), anyInt(), anyInt()); inOrder.verify(idleStrategy).idle(); inOrder.verify(publication).offer(any(), anyInt(), anyInt()); inOrder.verify(idleStrategy).idle(); inOrder.verify(publication).offer(any(), anyInt(), anyInt()); inOrder.verifyNoMoreInteractions(); clusterSessionDecoder.wrapAndApplyHeader(buffer, 0, messageHeaderDecoder); assertEquals(clusterSession.id(), clusterSessionDecoder.clusterSessionId()); assertEquals(clusterSession.correlationId(), clusterSessionDecoder.correlationId()); assertEquals(clusterSession.openedLogPosition(), clusterSessionDecoder.openedLogPosition()); assertEquals(Aeron.NULL_VALUE, clusterSessionDecoder.timeOfLastActivity()); assertEquals(clusterSession.closeReason(), clusterSessionDecoder.closeReason()); assertEquals(clusterSession.responseStreamId(), clusterSessionDecoder.responseStreamId()); assertEquals(responseChannel, clusterSessionDecoder.responseChannel()); }
public File purgeSegment() throws Exception { SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(_indexDir); String segmentName = segmentMetadata.getName(); String tableNameWithType = _tableConfig.getTableName(); LOGGER.info("Start purging table: {}, segment: {}", tableNameWithType, segmentName); try (PurgeRecordReader purgeRecordReader = new PurgeRecordReader()) { // Make a first pass through the data to see if records need to be purged or modified while (purgeRecordReader.hasNext()) { purgeRecordReader.next(); } if (_numRecordsModified == 0 && _numRecordsPurged == 0) { // Returns null if no records to be modified or purged return null; } SegmentGeneratorConfig config = new SegmentGeneratorConfig(_tableConfig, _schema); config.setOutDir(_workingDir.getPath()); config.setSegmentName(segmentName); // Keep index creation time the same as original segment because both segments use the same raw data. // This way, for REFRESH case, when new segment gets pushed to controller, we can use index creation time to // identify if the new pushed segment has newer data than the existing one. config.setCreationTime(String.valueOf(segmentMetadata.getIndexCreationTime())); // The time column type info is not stored in the segment metadata. // Keep segment start/end time to properly handle time column type other than EPOCH (e.g.SIMPLE_FORMAT). if (segmentMetadata.getTimeInterval() != null) { config.setTimeColumnName(_tableConfig.getValidationConfig().getTimeColumnName()); config.setStartTime(Long.toString(segmentMetadata.getStartTime())); config.setEndTime(Long.toString(segmentMetadata.getEndTime())); config.setSegmentTimeUnit(segmentMetadata.getTimeUnit()); } SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl(); purgeRecordReader.rewind(); driver.init(config, purgeRecordReader); driver.build(); } LOGGER.info("Finish purging table: {}, segment: {}, purged {} records, modified {} records", tableNameWithType, segmentName, _numRecordsPurged, _numRecordsModified); return new File(_workingDir, segmentName); }
@Test public void testPurgeSegment() throws Exception { // Purge records with d1 = 0 SegmentPurger.RecordPurger recordPurger = row -> row.getValue(D1).equals(0); // Modify records with d2 = 0 to d2 = Integer.MAX_VALUE SegmentPurger.RecordModifier recordModifier = row -> { if (row.getValue(D2).equals(0)) { row.putValue(D2, Integer.MAX_VALUE); return true; } else { return false; } }; SegmentPurger segmentPurger = new SegmentPurger(_originalIndexDir, PURGED_SEGMENT_DIR, _tableConfig, _schema, recordPurger, recordModifier); File purgedIndexDir = segmentPurger.purgeSegment(); // Check the purge/modify counter in segment purger assertEquals(segmentPurger.getNumRecordsPurged(), _expectedNumRecordsPurged); assertEquals(segmentPurger.getNumRecordsModified(), _expectedNumRecordsModified); // Check crc and index creation time SegmentMetadataImpl purgedSegmentMetadata = new SegmentMetadataImpl(purgedIndexDir); SegmentMetadataImpl originalSegmentMetadata = new SegmentMetadataImpl(_originalIndexDir); assertNotEquals(purgedSegmentMetadata.getCrc(), originalSegmentMetadata.getCrc()); assertEquals(purgedSegmentMetadata.getIndexCreationTime(), originalSegmentMetadata.getIndexCreationTime()); try (PinotSegmentRecordReader pinotSegmentRecordReader = new PinotSegmentRecordReader(purgedIndexDir)) { int numRecordsRemaining = 0; int numRecordsModified = 0; GenericRow row = new GenericRow(); while (pinotSegmentRecordReader.hasNext()) { row = pinotSegmentRecordReader.next(row); // Purged segment should not have any record with d1 = 0 or d2 = 0 assertNotEquals(row.getValue(D1), 0); assertNotEquals(row.getValue(D2), 0); numRecordsRemaining++; if (row.getValue(D2).equals(Integer.MAX_VALUE)) { numRecordsModified++; } } assertEquals(numRecordsRemaining, NUM_ROWS - _expectedNumRecordsPurged); assertEquals(numRecordsModified, _expectedNumRecordsModified); } // Check inverted index Map<String, Object> props = new HashMap<>(); props.put(IndexLoadingConfig.READ_MODE_KEY, ReadMode.mmap.toString()); try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader() .load(purgedIndexDir.toURI(), new SegmentDirectoryLoaderContext.Builder().setTableConfig(_tableConfig) .setSegmentName(purgedSegmentMetadata.getName()).setSegmentDirectoryConfigs(new PinotConfiguration(props)) .build()); SegmentDirectory.Reader reader = segmentDirectory.createReader()) { assertTrue(reader.hasIndexFor(D1, StandardIndexes.inverted())); assertFalse(reader.hasIndexFor(D2, StandardIndexes.inverted())); } }
public static String generateDatabaseId(String baseString) { checkArgument(baseString.length() != 0, "baseString cannot be empty!"); String databaseId = generateResourceId( baseString, ILLEGAL_DATABASE_CHARS, REPLACE_DATABASE_CHAR, MAX_DATABASE_ID_LENGTH, DATABASE_TIME_FORMAT); // replace hyphen with underscore, so there's no need for backticks String trimmed = CharMatcher.is('_').trimTrailingFrom(databaseId); checkArgument( trimmed.length() > 0, "Database id is empty after removing illegal characters and trailing underscores"); // if first char is not a letter, replace with a padding letter, so it doesn't // violate spanner's database naming rules char padding = generatePadding(); if (!Character.isLetter(trimmed.charAt(0))) { trimmed = padding + trimmed.substring(1); } return trimmed; }
@Test public void testGenerateDatabaseIdShouldNotReplaceDigitLastCharWithLetter() { String testBaseString = "db_0"; String actual = generateDatabaseId(testBaseString); assertThat(actual).matches("db_0_\\d{8}_\\d{6}_\\d{6}"); }
void badRequest(String s) { setStatus(HttpServletResponse.SC_BAD_REQUEST); String title = "Bad request: "; setTitle((s != null) ? join(title, s) : title); }
@Test public void testBadRequest() { String message = "test string"; appController.badRequest(message); verifyExpectations(message); }
public KsqlConfig overrideBreakingConfigsWithOriginalValues(final Map<String, ?> props) { final KsqlConfig originalConfig = new KsqlConfig(ConfigGeneration.LEGACY, props); final Map<String, Object> mergedProperties = new HashMap<>(originals()); COMPATIBLY_BREAKING_CONFIG_DEFS.stream() .map(CompatibilityBreakingConfigDef::getName) .forEach( k -> mergedProperties.put(k, originalConfig.get(k))); final Map<String, ConfigValue> mergedStreamConfigProps = new HashMap<>(this.ksqlStreamConfigProps); COMPATIBILITY_BREAKING_STREAMS_CONFIGS.stream() .map(CompatibilityBreakingStreamsConfig::getName) .forEach( k -> mergedStreamConfigProps.put(k, originalConfig.ksqlStreamConfigProps.get(k))); return new KsqlConfig(ConfigGeneration.LEGACY, mergedProperties, mergedStreamConfigProps); }
@Test public void shouldUseCurrentValueForCompatibilityInsensitiveConfigs() { final Map<String, String> originalProperties = Collections.singletonMap(KsqlConfig.KSQL_ENABLE_UDFS, "false"); final KsqlConfig currentConfig = new KsqlConfig(Collections.singletonMap(KsqlConfig.KSQL_ENABLE_UDFS, true)); final KsqlConfig compatibleConfig = currentConfig.overrideBreakingConfigsWithOriginalValues(originalProperties); assertThat(compatibleConfig.getBoolean(KsqlConfig.KSQL_ENABLE_UDFS), is(true)); }