focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public Map<String, Object> offsetStorageTopicSettings() { return topicSettings(OFFSET_STORAGE_PREFIX); }
@Test public void shouldAllowSettingOffsetTopicSettings() { Map<String, String> topicSettings = new HashMap<>(); topicSettings.put("foo", "foo value"); topicSettings.put("bar", "bar value"); topicSettings.put("baz.bim", "100"); Map<String, String> settings = configs(); topicSettings.forEach((k, v) -> settings.put(DistributedConfig.OFFSET_STORAGE_PREFIX + k, v)); DistributedConfig config = new DistributedConfig(settings); assertEquals(topicSettings, config.offsetStorageTopicSettings()); }
public static Level toLevel(String sArg) { return toLevel(sArg, Level.DEBUG); }
@Test public void withSpacePrefix( ) { assertEquals(Level.INFO, Level.toLevel(" INFO ")); }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void pathStyleExpansionEncodesReservedCharacters() { String template = "{;half}"; UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8); String expanded = uriTemplate.expand(Collections.singletonMap("half", "50%")); assertThat(expanded).isEqualToIgnoringCase(";half=50%25"); }
public void link(Name name, File file) { DirectoryEntry entry = new DirectoryEntry(this, checkNotReserved(name, "link"), file); put(entry); file.linked(entry); }
@Test public void testLink_parentAndSelfNameFails() { try { dir.link(Name.simple("."), createDirectory(2)); fail(); } catch (IllegalArgumentException expected) { } try { dir.link(Name.simple(".."), createDirectory(2)); fail(); } catch (IllegalArgumentException expected) { } }
public static Collection<MdbValidityStatus> assertEjbClassValidity(final ClassInfo mdbClass) { Collection<MdbValidityStatus> mdbComplianceIssueList = new ArrayList<>(MdbValidityStatus.values().length); final String className = mdbClass.name().toString(); verifyModifiers(className, mdbClass.flags(), mdbComplianceIssueList); for (MethodInfo method : mdbClass.methods()) { if ("onMessage".equals(method.name())) { verifyOnMessageMethod(className, method.flags(), mdbComplianceIssueList); } if ("finalize".equals(method.name())) { EjbLogger.DEPLOYMENT_LOGGER.mdbCantHaveFinalizeMethod(className); mdbComplianceIssueList.add(MdbValidityStatus.MDB_SHOULD_NOT_HAVE_FINALIZE_METHOD); } } return mdbComplianceIssueList; }
@Test public void mdbWithFinalOnMessageMethod() { assertTrue(assertEjbClassValidity(buildClassInfoForClass(InvalidMdbOnMessageCantBeFinal.class.getName())).contains( MdbValidityStatus.MDB_ON_MESSAGE_METHOD_CANT_BE_FINAL)); }
@Override public void validateDeleteGroup() throws ApiException { if (state() != ConsumerGroupState.EMPTY) { throw Errors.NON_EMPTY_GROUP.exception(); } }
@Test public void testValidateDeleteGroup() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); assertEquals(ConsumerGroup.ConsumerGroupState.EMPTY, consumerGroup.state()); assertDoesNotThrow(consumerGroup::validateDeleteGroup); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1") .setMemberEpoch(1) .setPreviousMemberEpoch(0) .build(); consumerGroup.updateMember(member1); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, consumerGroup.state()); assertThrows(GroupNotEmptyException.class, consumerGroup::validateDeleteGroup); consumerGroup.setGroupEpoch(1); assertEquals(ConsumerGroup.ConsumerGroupState.ASSIGNING, consumerGroup.state()); assertThrows(GroupNotEmptyException.class, consumerGroup::validateDeleteGroup); consumerGroup.setTargetAssignmentEpoch(1); assertEquals(ConsumerGroup.ConsumerGroupState.STABLE, consumerGroup.state()); assertThrows(GroupNotEmptyException.class, consumerGroup::validateDeleteGroup); }
@VisibleForTesting public static void normalizeRequest( ResourceRequest ask, ResourceCalculator resourceCalculator, Resource minimumResource, Resource maximumResource) { ask.setCapability( getNormalizedResource(ask.getCapability(), resourceCalculator, minimumResource, maximumResource, minimumResource)); }
@Test(timeout = 30000) public void testNormalizeRequest() { ResourceCalculator resourceCalculator = new DefaultResourceCalculator(); final int minMemory = 1024; final int maxMemory = 8192; Resource minResource = Resources.createResource(minMemory, 0); Resource maxResource = Resources.createResource(maxMemory, 0); ResourceRequest ask = new ResourceRequestPBImpl(); // case negative memory ask.setCapability(Resources.createResource(-1024)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource, maxResource); assertEquals(minMemory, ask.getCapability().getMemorySize()); // case zero memory ask.setCapability(Resources.createResource(0)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource, maxResource); assertEquals(minMemory, ask.getCapability().getMemorySize()); // case memory is a multiple of minMemory ask.setCapability(Resources.createResource(2 * minMemory)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource, maxResource); assertEquals(2 * minMemory, ask.getCapability().getMemorySize()); // case memory is not a multiple of minMemory ask.setCapability(Resources.createResource(minMemory + 10)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource, maxResource); assertEquals(2 * minMemory, ask.getCapability().getMemorySize()); // case memory is equal to max allowed ask.setCapability(Resources.createResource(maxMemory)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource, maxResource); assertEquals(maxMemory, ask.getCapability().getMemorySize()); // case memory is just less than max ask.setCapability(Resources.createResource(maxMemory - 10)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource, maxResource); assertEquals(maxMemory, ask.getCapability().getMemorySize()); // max is not a multiple of min maxResource = Resources.createResource(maxMemory - 10, 0); ask.setCapability(Resources.createResource(maxMemory - 100)); // multiple of minMemory > maxMemory, then reduce to maxMemory SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource, maxResource); assertEquals(maxResource.getMemorySize(), ask.getCapability().getMemorySize()); // ask is more than max maxResource = Resources.createResource(maxMemory, 0); ask.setCapability(Resources.createResource(maxMemory + 100)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, minResource, maxResource); assertEquals(maxResource.getMemorySize(), ask.getCapability().getMemorySize()); }
@Override public List<ConfigInfoStateWrapper> findChangeConfig(final Timestamp startTime, long lastMaxId, final int pageSize) { ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); MapperContext context = new MapperContext(); context.putWhereParameter(FieldConstant.START_TIME, startTime); context.putWhereParameter(FieldConstant.PAGE_SIZE, pageSize); context.putWhereParameter(FieldConstant.LAST_MAX_ID, lastMaxId); MapperResult mapperResult = configInfoMapper.findChangeConfig(context); return databaseOperate.queryMany(mapperResult.getSql(), mapperResult.getParamList().toArray(), CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER); }
@Test void testFindChangeConfig() { //mock page list List<ConfigInfoStateWrapper> result = new ArrayList<>(); result.add(createMockConfigInfoStateWrapper(0)); result.add(createMockConfigInfoStateWrapper(1)); result.add(createMockConfigInfoStateWrapper(2)); Timestamp startTime = new Timestamp(System.currentTimeMillis() - 1000L); long lastMaxId = 10000L; int pageSize = 30; when(databaseOperate.queryMany(anyString(), eq(new Object[] {startTime, lastMaxId, pageSize}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(result); List<ConfigInfoStateWrapper> configInfo4List = embeddedConfigInfoPersistService.findChangeConfig(startTime, lastMaxId, pageSize); assertEquals(result.size(), configInfo4List.size()); }
public HashMap<ByteStringRange, Instant> readDetectNewPartitionMissingPartitions() { @Nonnull HashMap<ByteStringRange, Instant> missingPartitions = new HashMap<>(); Filter missingPartitionsFilter = FILTERS .chain() .filter(FILTERS.family().exactMatch(MetadataTableAdminDao.CF_MISSING_PARTITIONS)) .filter(FILTERS.qualifier().exactMatch(MetadataTableAdminDao.QUALIFIER_DEFAULT)) .filter(FILTERS.limit().cellsPerColumn(1)); Row row = dataClient.readRow(tableId, getFullDetectNewPartition(), missingPartitionsFilter); if (row == null || row.getCells( MetadataTableAdminDao.CF_MISSING_PARTITIONS, MetadataTableAdminDao.QUALIFIER_DEFAULT) .isEmpty()) { return missingPartitions; } ByteString serializedMissingPartition = row.getCells( MetadataTableAdminDao.CF_MISSING_PARTITIONS, MetadataTableAdminDao.QUALIFIER_DEFAULT) .get(0) .getValue(); try { missingPartitions = SerializationUtils.deserialize(serializedMissingPartition.toByteArray()); } catch (SerializationException | NullPointerException exception) { LOG.warn("Failed to deserialize missingPartitions: {}", exception.toString()); } return missingPartitions; }
@Test public void readMissingPartitionsWithoutDNPRow() { HashMap<ByteStringRange, Instant> missingPartitionsDuration = new HashMap<>(); HashMap<ByteStringRange, Instant> actualMissingPartitionsDuration = metadataTableDao.readDetectNewPartitionMissingPartitions(); assertEquals(missingPartitionsDuration, actualMissingPartitionsDuration); }
@Override public int partition(Integer bucketId, int numPartitions) { Preconditions.checkNotNull(bucketId, BUCKET_NULL_MESSAGE); Preconditions.checkArgument(bucketId >= 0, BUCKET_LESS_THAN_LOWER_BOUND_MESSAGE, bucketId); Preconditions.checkArgument( bucketId < maxNumBuckets, BUCKET_GREATER_THAN_UPPER_BOUND_MESSAGE, bucketId, maxNumBuckets); if (numPartitions <= maxNumBuckets) { return bucketId % numPartitions; } else { return getPartitionWithMoreWritersThanBuckets(bucketId, numPartitions); } }
@Test public void testPartitionerBucketIdNullFail() { PartitionSpec partitionSpec = TableSchemaType.ONE_BUCKET.getPartitionSpec(DEFAULT_NUM_BUCKETS); BucketPartitioner bucketPartitioner = new BucketPartitioner(partitionSpec); assertThatExceptionOfType(RuntimeException.class) .isThrownBy(() -> bucketPartitioner.partition(null, DEFAULT_NUM_BUCKETS)) .withMessage(BUCKET_NULL_MESSAGE); }
@Override protected Object createObject(ValueWrapper<Object> initialInstance, String className, Map<List<String>, Object> params, ClassLoader classLoader) { return fillBean(initialInstance, className, params, classLoader); }
@Test public void createObjectDirectMappingSimpleTypeNull() { Map<List<String>, Object> params = new HashMap<>(); params.put(List.of(), null); ValueWrapper<Object> initialInstance = runnerHelper.getDirectMapping(params); Object objectRaw = runnerHelper.createObject( initialInstance, String.class.getCanonicalName(), params, getClass().getClassLoader()); assertThat(objectRaw).isNull(); }
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) { // we will do the validation / topic-creation in a loop, until we have confirmed all topics // have existed with the expected number of partitions, or some create topic returns fatal errors. log.debug("Starting to validate internal topics {} in partition assignor.", topics); long currentWallClockMs = time.milliseconds(); final long deadlineMs = currentWallClockMs + retryTimeoutMs; Set<String> topicsNotReady = new HashSet<>(topics.keySet()); final Set<String> newlyCreatedTopics = new HashSet<>(); while (!topicsNotReady.isEmpty()) { final Set<String> tempUnknownTopics = new HashSet<>(); topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics); newlyCreatedTopics.addAll(topicsNotReady); if (!topicsNotReady.isEmpty()) { final Set<NewTopic> newTopics = new HashSet<>(); for (final String topicName : topicsNotReady) { if (tempUnknownTopics.contains(topicName)) { // for the tempUnknownTopics, don't create topic for them // we'll check again later if remaining retries > 0 continue; } final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName)); final Map<String, String> topicConfig = internalTopicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention); log.debug("Going to create topic {} with {} partitions and config {}.", internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), topicConfig); newTopics.add( new NewTopic( internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), Optional.of(replicationFactor)) .configs(topicConfig)); } // it's possible that although some topics are not ready yet because they // are temporarily not available, not that they do not exist; in this case // the new topics to create may be empty and hence we can skip here if (!newTopics.isEmpty()) { final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics); for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) { final String topicName = createTopicResult.getKey(); try { createTopicResult.getValue().get(); topicsNotReady.remove(topicName); } catch (final InterruptedException fatalException) { // this should not happen; if it ever happens it indicate a bug Thread.currentThread().interrupt(); log.error(INTERRUPTED_ERROR_MESSAGE, fatalException); throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException); } catch (final ExecutionException executionException) { final Throwable cause = executionException.getCause(); if (cause instanceof TopicExistsException) { // This topic didn't exist earlier or its leader not known before; just retain it for next round of validation. log.info( "Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n" + "Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n" + "Error message was: {}", topicName, retryBackOffMs, cause.toString()); } else { log.error("Unexpected error during topic creation for {}.\n" + "Error message was: {}", topicName, cause.toString()); if (cause instanceof UnsupportedVersionException) { final String errorMessage = cause.getMessage(); if (errorMessage != null && errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) { throw new StreamsException(String.format( "Could not create topic %s, because brokers don't support configuration replication.factor=-1." + " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.", topicName) ); } } else if (cause instanceof TimeoutException) { log.error("Creating topic {} timed out.\n" + "Error message was: {}", topicName, cause.toString()); } else { throw new StreamsException( String.format("Could not create topic %s.", topicName), cause ); } } } } } } if (!topicsNotReady.isEmpty()) { currentWallClockMs = time.milliseconds(); if (currentWallClockMs >= deadlineMs) { final String timeoutError = String.format("Could not create topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs); log.error(timeoutError); throw new TimeoutException(timeoutError); } log.info( "Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}", topicsNotReady, retryBackOffMs, deadlineMs - currentWallClockMs ); Utils.sleep(retryBackOffMs); } } log.debug("Completed validating internal topics and created {}", newlyCreatedTopics); return newlyCreatedTopics; }
@Test public void shouldCompleteValidateWhenTopicLeaderNotAvailableAndThenDescribeSuccess() { final AdminClient admin = mock(AdminClient.class); final InternalTopicManager topicManager = new InternalTopicManager( time, admin, new StreamsConfig(config) ); final TopicPartitionInfo partitionInfo = new TopicPartitionInfo(0, broker1, Collections.singletonList(broker1), Collections.singletonList(broker1)); final KafkaFutureImpl<TopicDescription> topicDescriptionFailFuture = new KafkaFutureImpl<>(); topicDescriptionFailFuture.completeExceptionally(new LeaderNotAvailableException("Leader Not Available!")); final KafkaFutureImpl<TopicDescription> topicDescriptionSuccessFuture = new KafkaFutureImpl<>(); topicDescriptionSuccessFuture.complete( new TopicDescription(topic1, false, Collections.singletonList(partitionInfo), Collections.emptySet()) ); when(admin.describeTopics(Collections.singleton(topic1))) .thenAnswer(answer -> new MockDescribeTopicsResult( Collections.singletonMap(topic1, topicDescriptionFailFuture))); when(admin.describeTopics(Collections.singleton(topic1))) .thenAnswer(answer -> new MockDescribeTopicsResult( Collections.singletonMap(topic1, topicDescriptionSuccessFuture))); final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap()); internalTopicConfig.setNumberOfPartitions(1); topicManager.makeReady(Collections.singletonMap(topic1, internalTopicConfig)); }
public static ByteRange parse(final String byteRange, final int resourceLength) { final String asciiString = new String(byteRange.getBytes(), StandardCharsets.US_ASCII); // missing separator if (!byteRange.contains("-")) { final int start = Integer.parseInt(asciiString); return new ByteRange(start, resourceLength - 1); } // negative range if (byteRange.indexOf("-") == 0) { final int start = Integer.parseInt(asciiString); return new ByteRange(resourceLength + start, resourceLength - 1); } final List<String> parts = Arrays.stream(asciiString.split("-", -1)) .map(String::trim) .filter(s -> !s.isEmpty()) .collect(Collectors.toList()); final int start = Integer.parseInt(parts.get(0)); if (parts.size() == 2) { int end = Integer.parseInt(parts.get(1)); if (end > resourceLength) { end = resourceLength - 1; } return new ByteRange(start, end); } else { return new ByteRange(start, resourceLength - 1); } }
@Test void nonASCIIDisallowed() { assertThatExceptionOfType(NumberFormatException.class) .isThrownBy(() -> ByteRange.parse("០-០", RESOURCE_LENGTH)); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Override public GenericRow apply( final GenericKey k, final GenericRow rowValue, final GenericRow aggRowValue ) { final GenericRow result = GenericRow.fromList(aggRowValue.values()); for (int idx = 0; idx < nonAggColumnCount; idx++) { result.set(idx, rowValue.get(idx)); } for (int idx = nonAggColumnCount; idx < columnCount; idx++) { final TableAggregationFunction function = aggregateFunctions.get(idx - nonAggColumnCount); final Object argument = getCurrentValue( rowValue, function.getArgIndicesInValue(), function::convertToInput ); final Object previous = result.get(idx); result.set(idx, function.undo(argument, previous)); } return result; }
@Test public void shouldApplyUndoableAggregateFunctions() { // Given: final GenericRow value = genericRow(1, 2L); final GenericRow aggRow = genericRow(1, 2L, 3); // When: final GenericRow resultRow = aggregator.apply(key, value, aggRow); // Then: assertThat(resultRow, equalTo(genericRow(1, 2L, "func1-undone"))); }
@Override public void collectSizeStats(StateObjectSizeStatsCollector collector) { // TODO: for now this ignores that only some key groups might be accessed when reading the // state, so this only reports the upper bound. We could introduce // #collectSizeStats(StateObjectSizeStatsCollector, KeyGroupRange) in KeyedStateHandle // that computes which groups where actually touched and computes the size, depending on // the exact state handle type, from either the offsets (e.g. here) or for the full size // (e.g. remote incremental) when we restore from managed/raw keyed state. stateHandle.collectSizeStats(collector); }
@Test void testCollectSizeStats() { final KeyGroupRangeOffsets offsets = new KeyGroupRangeOffsets(0, 7); final byte[] data = new byte[5]; final ByteStreamStateHandle innerHandle = new ByteStreamStateHandle("name", data); KeyGroupsStateHandle handle = new KeyGroupsStateHandle(offsets, innerHandle); StateObject.StateObjectSizeStatsCollector statsCollector = StateObject.StateObjectSizeStatsCollector.create(); handle.collectSizeStats(statsCollector); Assertions.assertEquals( new HashMap<StateObject.StateObjectLocation, Long>() { { put(StateObject.StateObjectLocation.LOCAL_MEMORY, (long) data.length); } }, statsCollector.getStats()); }
@Override public void onEvent(Event event) { Set<NacosTraceSubscriber> subscribers = interestedEvents.get(event.getClass()); if (null == subscribers) { return; } TraceEvent traceEvent = (TraceEvent) event; for (NacosTraceSubscriber each : subscribers) { if (null != each.executor()) { each.executor().execute(() -> onEvent0(each, traceEvent)); } else { onEvent0(each, traceEvent); } } }
@Test void testOnEvent() { // Test RegisterInstanceTraceEvent. RegisterInstanceTraceEvent registerInstanceTraceEvent = new RegisterInstanceTraceEvent(1L, "", true, "", "", "", "", 1); doThrow(new RuntimeException("test")).when(mockInstanceSubscriber).onEvent(registerInstanceTraceEvent); combinedTraceSubscriber.onEvent(registerInstanceTraceEvent); verify(mockInstanceSubscriber, times(1)).onEvent(registerInstanceTraceEvent); verify(mockServiceSubscriber, never()).onEvent(registerInstanceTraceEvent); verify(mockOtherSubscriber, never()).onEvent(registerInstanceTraceEvent); // Test DeregisterInstanceTraceEvent. DeregisterInstanceTraceEvent deregisterInstanceTraceEvent = new DeregisterInstanceTraceEvent(1L, "", true, DeregisterInstanceReason.REQUEST, "", "", "", "", 1); combinedTraceSubscriber.onEvent(deregisterInstanceTraceEvent); verify(mockInstanceSubscriber, times(1)).onEvent(deregisterInstanceTraceEvent); verify(mockServiceSubscriber, never()).onEvent(deregisterInstanceTraceEvent); verify(mockOtherSubscriber, never()).onEvent(deregisterInstanceTraceEvent); // Test UpdateInstanceTraceEvent. UpdateInstanceTraceEvent updateInstanceTraceEvent = new UpdateInstanceTraceEvent(1L, "", "", "", "", "", 123, null); combinedTraceSubscriber.onEvent(updateInstanceTraceEvent); verify(mockInstanceSubscriber, times(1)).onEvent(updateInstanceTraceEvent); verify(mockServiceSubscriber, never()).onEvent(updateInstanceTraceEvent); verify(mockOtherSubscriber, never()).onEvent(updateInstanceTraceEvent); // Test RegisterServiceTraceEvent. RegisterServiceTraceEvent registerServiceTraceEvent = new RegisterServiceTraceEvent(1L, "", "", ""); combinedTraceSubscriber.onEvent(registerServiceTraceEvent); verify(mockInstanceSubscriber, never()).onEvent(registerServiceTraceEvent); verify(mockServiceSubscriber, times(1)).onEvent(registerServiceTraceEvent); verify(mockOtherSubscriber, never()).onEvent(registerServiceTraceEvent); // Test DeregisterServiceTraceEvent. DeregisterServiceTraceEvent deregisterServiceTraceEvent = new DeregisterServiceTraceEvent(1L, "", "", ""); combinedTraceSubscriber.onEvent(deregisterServiceTraceEvent); verify(mockInstanceSubscriber, never()).onEvent(deregisterServiceTraceEvent); verify(mockServiceSubscriber, times(1)).onEvent(deregisterServiceTraceEvent); verify(mockOtherSubscriber, never()).onEvent(deregisterServiceTraceEvent); // Test SubscribeServiceTraceEvent. SubscribeServiceTraceEvent subscribeServiceTraceEvent = new SubscribeServiceTraceEvent(1L, "", "", "", ""); combinedTraceSubscriber.onEvent(subscribeServiceTraceEvent); verify(mockInstanceSubscriber, never()).onEvent(subscribeServiceTraceEvent); verify(mockServiceSubscriber, times(1)).onEvent(subscribeServiceTraceEvent); verify(mockOtherSubscriber, never()).onEvent(subscribeServiceTraceEvent); // Test UnsubscribeServiceTraceEvent. UnsubscribeServiceTraceEvent unsubscribeServiceTraceEvent = new UnsubscribeServiceTraceEvent(1L, "", "", "", ""); combinedTraceSubscriber.onEvent(unsubscribeServiceTraceEvent); verify(mockInstanceSubscriber, never()).onEvent(unsubscribeServiceTraceEvent); verify(mockServiceSubscriber, times(1)).onEvent(unsubscribeServiceTraceEvent); verify(mockOtherSubscriber, never()).onEvent(unsubscribeServiceTraceEvent); // Test UpdateServiceTraceEvent. UpdateServiceTraceEvent updateServiceTraceEvent = new UpdateServiceTraceEvent(1L, "", "", "", null); combinedTraceSubscriber.onEvent(updateServiceTraceEvent); verify(mockInstanceSubscriber, never()).onEvent(updateServiceTraceEvent); verify(mockServiceSubscriber, times(1)).onEvent(updateServiceTraceEvent); verify(mockOtherSubscriber, never()).onEvent(updateServiceTraceEvent); // Test PushServiceTraceEvent. PushServiceTraceEvent pushServiceTraceEvent = new PushServiceTraceEvent(1L, 1L, 1L, 1L, "", "", "", "", 1); combinedTraceSubscriber.onEvent(pushServiceTraceEvent); verify(mockInstanceSubscriber, never()).onEvent(pushServiceTraceEvent); verify(mockServiceSubscriber, times(1)).onEvent(pushServiceTraceEvent); verify(mockOtherSubscriber, never()).onEvent(pushServiceTraceEvent); // Test HealthStateChangeTraceEvent. HealthStateChangeTraceEvent healthStateChangeTraceEvent = new HealthStateChangeTraceEvent(1L, "", "", "", "", 8867, true, ""); combinedTraceSubscriber.onEvent(healthStateChangeTraceEvent); verify(mockInstanceSubscriber, never()).onEvent(healthStateChangeTraceEvent); verify(mockServiceSubscriber, never()).onEvent(healthStateChangeTraceEvent); verify(mockOtherSubscriber, times(1)).onEvent(healthStateChangeTraceEvent); // Test TraceEvent. TraceEvent traceEvent = new TraceEvent("", 1L, "", "", ""); combinedTraceSubscriber.onEvent(traceEvent); verify(mockInstanceSubscriber, never()).onEvent(traceEvent); verify(mockServiceSubscriber, never()).onEvent(traceEvent); verify(mockOtherSubscriber, never()).onEvent(traceEvent); }
@Override public Future<RecordMetadata> send(ProducerRecord<K, V> record) { return this.send(record, null); }
@Test void should_add_b3_headers_to_records() { tracingProducer.send(producerRecord); List<String> headerKeys = mockProducer.history().stream() .flatMap(records -> Arrays.stream(records.headers().toArray())) .map(Header::key) .collect(Collectors.toList()); assertThat(headerKeys).containsOnly("b3"); }
@VisibleForTesting static void addJvmArgFilesLayer( RawConfiguration rawConfiguration, ProjectProperties projectProperties, JibContainerBuilder jibContainerBuilder, String classpath, String mainClass) throws IOException, InvalidAppRootException { Path projectCache = projectProperties.getDefaultCacheDirectory(); Path classpathFile = projectCache.resolve(JIB_CLASSPATH_FILE); Path mainClassFile = projectCache.resolve(JIB_MAIN_CLASS_FILE); // It's perfectly fine to always generate a new temp file or rewrite an existing file. However, // fixing the source file path and preserving the file timestamp prevents polluting the Jib // layer cache space by not creating new cache selectors every time. (Note, however, creating // new selectors does not affect correctness at all.) writeFileConservatively(classpathFile, classpath); writeFileConservatively(mainClassFile, mainClass); AbsoluteUnixPath appRoot = getAppRootChecked(rawConfiguration, projectProperties); jibContainerBuilder.addFileEntriesLayer( FileEntriesLayer.builder() .setName(LayerType.JVM_ARG_FILES.getName()) .addEntry(classpathFile, appRoot.resolve(JIB_CLASSPATH_FILE)) .addEntry(mainClassFile, appRoot.resolve(JIB_MAIN_CLASS_FILE)) .build()); }
@Test public void testAddJvmArgFilesLayer() throws IOException, InvalidAppRootException { String classpath = "/extra:/app/classes:/app/libs/dep.jar"; String mainClass = "com.example.Main"; PluginConfigurationProcessor.addJvmArgFilesLayer( rawConfiguration, projectProperties, jibContainerBuilder, classpath, mainClass); Path classpathFile = appCacheDirectory.resolve("jib-classpath-file"); Path mainClassFile = appCacheDirectory.resolve("jib-main-class-file"); String classpathRead = new String(Files.readAllBytes(classpathFile), StandardCharsets.UTF_8); String mainClassRead = new String(Files.readAllBytes(mainClassFile), StandardCharsets.UTF_8); assertThat(classpathRead).isEqualTo(classpath); assertThat(mainClassRead).isEqualTo(mainClass); List<FileEntry> layerEntries = getLayerEntries(jibContainerBuilder.toContainerBuildPlan(), "jvm arg files"); assertThat(layerEntries) .comparingElementsUsing(SOURCE_FILE_OF) .containsExactly( appCacheDirectory.resolve("jib-classpath-file"), appCacheDirectory.resolve("jib-main-class-file")); assertThat(layerEntries) .comparingElementsUsing(EXTRACTION_PATH_OF) .containsExactly("/app/jib-classpath-file", "/app/jib-main-class-file"); }
@Override public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { return invoker.invoke(invocation); }
@Test void testInvokeWithoutTimeout() { int timeout = 3000; Invoker invoker = Mockito.mock(Invoker.class); when(invoker.invoke(any(Invocation.class))).thenReturn(new AppResponse("result")); when(invoker.getUrl()) .thenReturn( URL.valueOf("test://test:11/test?accesslog=true&group=dubbo&version=1.1&timeout=" + timeout)); Invocation invocation = Mockito.mock(Invocation.class); when(invocation.getMethodName()).thenReturn("testInvokeWithoutTimeout"); Result result = timeoutFilter.invoke(invoker, invocation); Assertions.assertEquals("result", result.getValue()); }
@Deprecated public static Schema parse(File file) throws IOException { return new Parser().parse(file); }
@Test void serialization() throws IOException, ClassNotFoundException { try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream oos = new ObjectOutputStream(bos); InputStream jsonSchema = getClass().getResourceAsStream("/SchemaBuilder.avsc")) { Schema payload = new Schema.Parser().parse(jsonSchema); oos.writeObject(payload); try (ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray()); ObjectInputStream ois = new ObjectInputStream(bis)) { Schema sp = (Schema) ois.readObject(); assertEquals(payload, sp); } } }
public static Ip6Address valueOf(byte[] value) { return new Ip6Address(value); }
@Test public void testEqualityIPv6() { new EqualsTester() .addEqualityGroup( Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8888"), Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8888")) .addEqualityGroup( Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:888a"), Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:888a")) .addEqualityGroup( Ip6Address.valueOf("::"), Ip6Address.valueOf("::")) .addEqualityGroup( Ip6Address.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"), Ip6Address.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")) .testEquals(); }
@Override public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility( TypeSerializerSnapshot<T> oldSerializerSnapshot) { if (!(oldSerializerSnapshot instanceof PojoSerializerSnapshot)) { return TypeSerializerSchemaCompatibility.incompatible(); } PojoSerializerSnapshot<T> previousPojoSerializerSnapshot = (PojoSerializerSnapshot<T>) oldSerializerSnapshot; final Class<T> previousPojoClass = previousPojoSerializerSnapshot.snapshotData.getPojoClass(); final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots = previousPojoSerializerSnapshot.snapshotData.getFieldSerializerSnapshots(); final LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> registeredSubclassSerializerSnapshots = previousPojoSerializerSnapshot.snapshotData .getRegisteredSubclassSerializerSnapshots(); final LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>> nonRegisteredSubclassSerializerSnapshots = previousPojoSerializerSnapshot.snapshotData .getNonRegisteredSubclassSerializerSnapshots(); if (previousPojoClass != snapshotData.getPojoClass()) { return TypeSerializerSchemaCompatibility.incompatible(); } if (registeredSubclassSerializerSnapshots.hasAbsentKeysOrValues()) { return TypeSerializerSchemaCompatibility.incompatible(); } if (nonRegisteredSubclassSerializerSnapshots.hasAbsentKeysOrValues()) { return TypeSerializerSchemaCompatibility.incompatible(); } final IntermediateCompatibilityResult<T> preExistingFieldSerializersCompatibility = getCompatibilityOfPreExistingFields(fieldSerializerSnapshots); if (preExistingFieldSerializersCompatibility.isIncompatible()) { return TypeSerializerSchemaCompatibility.incompatible(); } final IntermediateCompatibilityResult<T> preExistingRegistrationsCompatibility = getCompatibilityOfPreExistingRegisteredSubclasses( registeredSubclassSerializerSnapshots); if (preExistingRegistrationsCompatibility.isIncompatible()) { return TypeSerializerSchemaCompatibility.incompatible(); } if (newPojoSerializerIsCompatibleAfterMigration( preExistingFieldSerializersCompatibility, preExistingRegistrationsCompatibility, fieldSerializerSnapshots)) { return TypeSerializerSchemaCompatibility.compatibleAfterMigration(); } if (newPojoSerializerIsCompatibleWithReconfiguredSerializer( preExistingFieldSerializersCompatibility, preExistingRegistrationsCompatibility, registeredSubclassSerializerSnapshots, nonRegisteredSubclassSerializerSnapshots)) { return TypeSerializerSchemaCompatibility.compatibleWithReconfiguredSerializer( constructReconfiguredPojoSerializer( preExistingFieldSerializersCompatibility, registeredSubclassSerializerSnapshots, preExistingRegistrationsCompatibility, nonRegisteredSubclassSerializerSnapshots)); } return TypeSerializerSchemaCompatibility.compatibleAsIs(); }
@Test void testResolveSchemaCompatibilityWithNewAndRemovedFields() { final PojoSerializerSnapshot<TestPojo> oldSnapshot = buildTestSnapshot(Collections.singletonList(mockRemovedField(ID_FIELD))); final PojoSerializerSnapshot<TestPojo> newSnapshot = buildTestSnapshot(Arrays.asList(NAME_FIELD, HEIGHT_FIELD)); final TypeSerializerSchemaCompatibility<TestPojo> resultCompatibility = newSnapshot.resolveSchemaCompatibility(oldSnapshot); assertThat(resultCompatibility.isCompatibleAfterMigration()).isTrue(); }
public Page<PermissionInfo> getPermissionsFromDatabase(String role, int pageNo, int pageSize) { Page<PermissionInfo> pageInfo = permissionPersistService.getPermissions(role, pageNo, pageSize); if (pageInfo == null) { return new Page<>(); } return pageInfo; }
@Test void getPermissionsFromDatabase() { Page<PermissionInfo> permissionsFromDatabase = nacosRoleService.getPermissionsFromDatabase("role-admin", 1, Integer.MAX_VALUE); assertEquals(0, permissionsFromDatabase.getTotalCount()); }
public void run() { LOGGER.info("Start game."); isRunning = true; var thread = new Thread(this::gameLoop); thread.start(); }
@Test void testRun() { world.run(); assertTrue(world.isRunning); }
public static long getNextScheduledTime(final String cronEntry, long currentTime) throws MessageFormatException { long result = 0; if (cronEntry == null || cronEntry.length() == 0) { return result; } // Handle the once per minute case "* * * * *" // starting the next event at the top of the minute. if (cronEntry.equals("* * * * *")) { result = currentTime + 60 * 1000; result = result / 60000 * 60000; return result; } List<String> list = tokenize(cronEntry); List<CronEntry> entries = buildCronEntries(list); Calendar working = Calendar.getInstance(); working.setTimeInMillis(currentTime); working.set(Calendar.SECOND, 0); CronEntry minutes = entries.get(MINUTES); CronEntry hours = entries.get(HOURS); CronEntry dayOfMonth = entries.get(DAY_OF_MONTH); CronEntry month = entries.get(MONTH); CronEntry dayOfWeek = entries.get(DAY_OF_WEEK); // Start at the top of the next minute, cron is only guaranteed to be // run on the minute. int timeToNextMinute = 60 - working.get(Calendar.SECOND); working.add(Calendar.SECOND, timeToNextMinute); // If its already to late in the day this will roll us over to tomorrow // so we'll need to check again when done updating month and day. int currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } int currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } // We can roll into the next month here which might violate the cron setting // rules so we check once then recheck again after applying the month settings. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Start by checking if we are in the right month, if not then calculations // need to start from the beginning of the month to ensure that we don't end // up on the wrong day. (Can happen when DAY_OF_WEEK is set and current time // is ahead of the day of the week to execute on). doUpdateCurrentMonth(working, month); // Now Check day of week and day of month together since they can be specified // together in one entry, if both "day of month" and "day of week" are restricted // (not "*"), then either the "day of month" field (3) or the "day of week" field // (5) must match the current day or the Calenday must be advanced. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Now we can chose the correct hour and minute of the day in question. currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } result = working.getTimeInMillis(); if (result <= currentTime) { throw new ArithmeticException("Unable to compute next scheduled exection time."); } return result; }
@Test public void testgetStartNextMonth() throws MessageFormatException { // using an absolute date so that result will be absolute - Wednesday 15 Dec 2010 Calendar current = Calendar.getInstance(); current.set(2010, Calendar.DECEMBER, 15, 9, 15, 30); LOG.debug("start:" + current.getTime()); String test = "* * 1 * *"; long next = CronParser.getNextScheduledTime(test, current.getTimeInMillis()); Calendar result = Calendar.getInstance(); result.setTimeInMillis(next); LOG.debug("next:" + result.getTime()); assertEquals(0,result.get(Calendar.SECOND)); assertEquals(0,result.get(Calendar.MINUTE)); assertEquals(0,result.get(Calendar.HOUR_OF_DAY)); assertEquals(1,result.get(Calendar.DAY_OF_MONTH)); assertEquals(Calendar.JANUARY,result.get(Calendar.MONTH)); assertEquals(2011,result.get(Calendar.YEAR)); }
public static Deserializer<LacpCollectorTlv> deserializer() { return (data, offset, length) -> { checkInput(data, offset, length, LENGTH - HEADER_LENGTH); LacpCollectorTlv lacpCollectorTlv = new LacpCollectorTlv(); ByteBuffer bb = ByteBuffer.wrap(data, offset, length); lacpCollectorTlv.setCollectorMaxDelay(bb.getShort()); return lacpCollectorTlv; }; }
@Test public void deserializer() throws Exception { LacpCollectorTlv lacpCollectorTlv = LacpCollectorTlv.deserializer().deserialize(data, 0, data.length); assertEquals(COLLECTOR_MAX_DELAY, lacpCollectorTlv.getCollectorMaxDelay()); }
@SuppressWarnings("unchecked") @Override public boolean setFlushListener(final CacheFlushListener<Windowed<K>, V> listener, final boolean sendOldValues) { final WindowStore<Bytes, byte[]> wrapped = wrapped(); if (wrapped instanceof CachedStateStore) { return ((CachedStateStore<byte[], byte[]>) wrapped).setFlushListener( record -> listener.apply( record.withKey(WindowKeySchema.fromStoreKey(record.key(), windowSizeMs, serdes.keyDeserializer(), serdes.topic())) .withValue(new Change<>( record.value().newValue != null ? serdes.valueFrom(record.value().newValue) : null, record.value().oldValue != null ? serdes.valueFrom(record.value().oldValue) : null, record.value().isLatest )) ), sendOldValues); } return false; }
@Test public void shouldNotSetFlushListenerOnWrappedNoneCachingStore() { assertFalse(store.setFlushListener(null, false)); }
public static Stream<Vertex> depthFirst(Graph g) { return depthFirst(g.getRoots()); }
@Test public void testDFSBasic() { DepthFirst.depthFirst(g).forEach(v -> visitCount.incrementAndGet()); assertEquals("It should visit each node once", visitCount.get(), 3); }
public static ReplaceAll replaceAll(String regex, String replacement) { return replaceAll(Pattern.compile(regex), replacement); }
@Test @Category(NeedsRunner.class) public void testReplaceAllMixed() { PCollection<String> output = p.apply(Create.of("abc", "xj", "yj", "zj", "def")).apply(Regex.replaceAll("[xyz]", "new")); PAssert.that(output).containsInAnyOrder("abc", "newj", "newj", "newj", "def"); p.run(); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { if (Objects.isNull(rule)) { return Mono.empty(); } final ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT); assert shenyuContext != null; final SpringCloudSelectorHandle springCloudSelectorHandle = SpringCloudPluginDataHandler.SELECTOR_CACHED.get().obtainHandle(selector.getId()); final SpringCloudRuleHandle ruleHandle = buildRuleHandle(rule); String serviceId = springCloudSelectorHandle.getServiceId(); if (StringUtils.isBlank(serviceId)) { Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.CANNOT_CONFIG_SPRINGCLOUD_SERVICEID); return WebFluxResultUtils.result(exchange, error); } final String ip = Objects.requireNonNull(exchange.getRequest().getRemoteAddress()).getAddress().getHostAddress(); final Upstream upstream = serviceChooser.choose(serviceId, selector.getId(), ip, ruleHandle.getLoadBalance()); if (Objects.isNull(upstream)) { Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SPRINGCLOUD_SERVICEID_IS_ERROR); return WebFluxResultUtils.result(exchange, error); } final String domain = upstream.buildDomain(); setDomain(URI.create(domain + shenyuContext.getRealUrl()), exchange); //set time out. exchange.getAttributes().put(Constants.HTTP_TIME_OUT, ruleHandle.getTimeout()); return chain.execute(exchange); }
@Test public void testSpringCloudPluginErrorServiceId() { SpringCloudSelectorHandle springCloudSelectorHandle = new SpringCloudSelectorHandle(); springCloudSelectorHandle.setServiceId("springcloud"); List<DivideUpstream> divideUpstreams = Stream.of(3, 4, 5) .map(weight -> DivideUpstream.builder() .upstreamUrl("divide-upstream-" + weight) .build()) .collect(Collectors.toList()); springCloudSelectorHandle.setDivideUpstreams(divideUpstreams); final SelectorData selectorData = SelectorData.builder() .id("springcloud") .handle(GsonUtils.getInstance().toJson(springCloudSelectorHandle)) .build(); final RuleData rule = RuleData.builder() .id("springcloud") .selectorId("springcloud") .handle("{\"path\":\"service/\"}") .build(); SpringCloudPluginDataHandler.SELECTOR_CACHED.get().cachedHandle(selectorData.getId(), springCloudSelectorHandle); SpringCloudRuleHandle springCloudRuleHandle = GsonUtils.getGson().fromJson(rule.getHandle(), SpringCloudRuleHandle.class); SpringCloudPluginDataHandler.RULE_CACHED.get().cachedHandle(CacheKeyUtils.INST.getKey(rule), springCloudRuleHandle); Mono<Void> execute = springCloudPlugin.doExecute(exchange, chain, selectorData, rule); StepVerifier.create(execute).expectSubscription().verifyComplete(); }
public static Optional<String> getDatabaseName(final String path) { Pattern pattern = Pattern.compile(getMetaDataNode() + "/([\\w\\-]+)$", Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(path); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); }
@Test void assertGetDatabaseName() { Optional<String> actual = DatabaseMetaDataNode.getDatabaseName("/metadata/foo_db"); assertTrue(actual.isPresent()); assertThat(actual.get(), is("foo_db")); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) { return new IteratorStreamMergedResult(queryResults); } Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0)); SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext; selectStatementContext.setIndexes(columnLabelIndexMap); MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database); return decorate(queryResults, selectStatementContext, mergedResult); }
@Test void assertBuildGroupByMemoryMergedResult() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL")); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(mock(ShardingSphereSchema.class)); MySQLSelectStatement selectStatement = (MySQLSelectStatement) buildSelectStatement(new MySQLSelectStatement()); selectStatement.setGroupBy(new GroupBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST)))); selectStatement.setProjections(new ProjectionsSegment(0, 0)); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(), selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); assertThat(resultMerger.merge(createQueryResults(), selectStatementContext, createDatabase(), mock(ConnectionContext.class)), instanceOf(GroupByStreamMergedResult.class)); }
static <T> @Nullable JdbcReadWithPartitionsHelper<T> getPartitionsHelper(TypeDescriptor<T> type) { // This cast is unchecked, thus this is a small type-checking risk. We just need // to make sure that all preset helpers in `JdbcUtil.PRESET_HELPERS` are matched // in type from their Key and their Value. return (JdbcReadWithPartitionsHelper<T>) PRESET_HELPERS.get(type.getRawType()); }
@Test public void testLongPartitioningWithSingleKey() { JdbcReadWithPartitionsHelper<Long> helper = JdbcUtil.getPartitionsHelper(TypeDescriptors.longs()); List<KV<Long, Long>> expectedRanges = Lists.newArrayList(KV.of(12L, 13L)); List<KV<Long, Long>> ranges = Lists.newArrayList(helper.calculateRanges(12L, 12L, 10L)); // It is not possible to generate any more than one range, because the lower and upper range are // exactly the same. // The range goes from the current Long element to ONE ELEMENT AFTER. // Because the query's filter statement is : WHERE column >= lowerBound AND column < upperBound. assertEquals(1, ranges.size()); assertArrayEquals(expectedRanges.toArray(), ranges.toArray()); }
public static DataSchema buildSchemaByProjection(DataSchema schema, DataMap maskMap) { return buildSchemaByProjection(schema, maskMap, Collections.emptyList()); }
@Test(dataProvider = "provideBuildSchemaByProjectionData") public void testBuildSchemaByProjection(DataMap projectionMask, String[] expectedIncludedFields, String[] expectedExcludedFields) { DataSchema schema = DataTemplateUtil.getSchema(RecordTemplateWithPrimitiveKey.class); RecordDataSchema validatingSchema = (RecordDataSchema) buildSchemaByProjection(schema, projectionMask); for (String fieldName : expectedIncludedFields) { Assert.assertTrue(validatingSchema.contains(fieldName)); } for (String fieldName : expectedExcludedFields) { Assert.assertFalse(validatingSchema.contains(fieldName)); } }
@VisibleForTesting void filterAppsByAggregatedStatus() throws IOException, YarnException { YarnClient client = YarnClient.createYarnClient(); try { client.init(getConf()); client.start(); for (Iterator<AppInfo> it = eligibleApplications .iterator(); it.hasNext();) { AppInfo app = it.next(); try { ApplicationReport report = client.getApplicationReport( ApplicationId.fromString(app.getAppId())); LogAggregationStatus aggStatus = report.getLogAggregationStatus(); if (aggStatus.equals(LogAggregationStatus.RUNNING) || aggStatus.equals(LogAggregationStatus.RUNNING_WITH_FAILURE) || aggStatus.equals(LogAggregationStatus.NOT_START) || aggStatus.equals(LogAggregationStatus.DISABLED) || aggStatus.equals(LogAggregationStatus.FAILED)) { if (verbose) { LOG.info("Skipping " + app.getAppId() + " due to aggregation status being " + aggStatus); } it.remove(); } else { if (verbose) { LOG.info(app.getAppId() + " has aggregation status " + aggStatus); } app.setFinishTime(report.getFinishTime()); } } catch (ApplicationNotFoundException e) { // Assume the aggregation has finished if (verbose) { LOG.info(app.getAppId() + " not in the ResourceManager"); } } } } finally { if (client != null) { client.stop(); } } }
@Test(timeout = 30000) public void testFilterAppsByAggregatedStatus() throws Exception { try (MiniYARNCluster yarnCluster = new MiniYARNCluster(TestHadoopArchiveLogs.class.getSimpleName(), 1, 1, 1, 1)) { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); yarnCluster.init(conf); yarnCluster.start(); conf = yarnCluster.getConfig(); RMContext rmContext = yarnCluster.getResourceManager().getRMContext(); RMAppImpl appImpl1 = (RMAppImpl)createRMApp(1, conf, rmContext, LogAggregationStatus.DISABLED); RMAppImpl appImpl2 = (RMAppImpl)createRMApp(2, conf, rmContext, LogAggregationStatus.FAILED); RMAppImpl appImpl3 = (RMAppImpl)createRMApp(3, conf, rmContext, LogAggregationStatus.NOT_START); RMAppImpl appImpl4 = (RMAppImpl)createRMApp(4, conf, rmContext, LogAggregationStatus.SUCCEEDED); RMAppImpl appImpl5 = (RMAppImpl)createRMApp(5, conf, rmContext, LogAggregationStatus.RUNNING); RMAppImpl appImpl6 = (RMAppImpl)createRMApp(6, conf, rmContext, LogAggregationStatus.RUNNING_WITH_FAILURE); RMAppImpl appImpl7 = (RMAppImpl)createRMApp(7, conf, rmContext, LogAggregationStatus.TIME_OUT); RMAppImpl appImpl8 = (RMAppImpl)createRMApp(8, conf, rmContext, LogAggregationStatus.SUCCEEDED); rmContext.getRMApps().put(appImpl1.getApplicationId(), appImpl1); rmContext.getRMApps().put(appImpl2.getApplicationId(), appImpl2); rmContext.getRMApps().put(appImpl3.getApplicationId(), appImpl3); rmContext.getRMApps().put(appImpl4.getApplicationId(), appImpl4); rmContext.getRMApps().put(appImpl5.getApplicationId(), appImpl5); rmContext.getRMApps().put(appImpl6.getApplicationId(), appImpl6); rmContext.getRMApps().put(appImpl7.getApplicationId(), appImpl7); // appImpl8 is not in the RM HadoopArchiveLogs hal = new HadoopArchiveLogs(conf); Assert.assertEquals(0, hal.eligibleApplications.size()); hal.eligibleApplications.add( new HadoopArchiveLogs.AppInfo(appImpl1.getApplicationId().toString(), USER)); hal.eligibleApplications.add( new HadoopArchiveLogs.AppInfo(appImpl2.getApplicationId().toString(), USER)); hal.eligibleApplications.add( new HadoopArchiveLogs.AppInfo(appImpl3.getApplicationId().toString(), USER)); HadoopArchiveLogs.AppInfo app4 = new HadoopArchiveLogs.AppInfo(appImpl4.getApplicationId().toString(), USER); hal.eligibleApplications.add(app4); hal.eligibleApplications.add( new HadoopArchiveLogs.AppInfo(appImpl5.getApplicationId().toString(), USER)); hal.eligibleApplications.add( new HadoopArchiveLogs.AppInfo(appImpl6.getApplicationId().toString(), USER)); HadoopArchiveLogs.AppInfo app7 = new HadoopArchiveLogs.AppInfo(appImpl7.getApplicationId().toString(), USER); hal.eligibleApplications.add(app7); HadoopArchiveLogs.AppInfo app8 = new HadoopArchiveLogs.AppInfo(appImpl8.getApplicationId().toString(), USER); hal.eligibleApplications.add(app8); Assert.assertEquals(8, hal.eligibleApplications.size()); hal.filterAppsByAggregatedStatus(); Assert.assertEquals(3, hal.eligibleApplications.size()); Assert.assertTrue(hal.eligibleApplications.contains(app4)); Assert.assertTrue(hal.eligibleApplications.contains(app7)); Assert.assertTrue(hal.eligibleApplications.contains(app8)); } }
public static Proxy create(final URI uri) { Proxy proxy = new Proxy(uri.getHost(), uri.getPort(), uri.getScheme()); String userInfo = uri.getUserInfo(); if (userInfo != null) { String[] up = userInfo.split(":"); if (up.length == 1) { proxy.username = up[0].isEmpty() ? null : up[0]; } else { proxy.username = up[0].isEmpty() ? null : up[0]; proxy.password = up[1].isEmpty() ? null : up[1]; } } return proxy; }
@Test void testCreate() { Proxy proxy = Proxy.create(URI.create("//127.0.0.1:8080")); assertNull(proxy.getScheme()); assertNull(proxy.getUsername()); assertNull(proxy.getPassword()); assertEquals("127.0.0.1", proxy.getHost()); assertEquals(8080, proxy.getPort()); proxy = Proxy.create(URI.create("http://127.0.0.1:8080")); assertEquals("http", proxy.getScheme()); assertNull(proxy.getUsername()); assertNull(proxy.getPassword()); assertEquals("127.0.0.1", proxy.getHost()); assertEquals(8080, proxy.getPort()); proxy = Proxy.create(URI.create("//username:password@127.0.0.1:8080")); assertNull(proxy.getScheme()); assertEquals("username", proxy.getUsername()); assertEquals("password", proxy.getPassword()); assertEquals("127.0.0.1", proxy.getHost()); assertEquals(8080, proxy.getPort()); proxy = Proxy.create(URI.create("//username@127.0.0.1:8080")); assertNull(proxy.getScheme()); assertEquals("username", proxy.getUsername()); assertNull(proxy.getPassword()); assertEquals("127.0.0.1", proxy.getHost()); assertEquals(8080, proxy.getPort()); proxy = Proxy.create(URI.create("//:password@127.0.0.1:8080")); assertNull(proxy.getScheme()); assertNull(proxy.getUsername()); assertEquals("password", proxy.getPassword()); assertEquals("127.0.0.1", proxy.getHost()); assertEquals(8080, proxy.getPort()); }
@Override public double sd() { return sd; }
@Test public void testSd() { System.out.println("sd"); KernelDensity instance = new KernelDensity(x); double expResult = 3.066752; double result = instance.sd(); assertEquals(expResult, result, 1E-6); }
public synchronized List<SplunkEvent> getEvents() { return getEvents("search"); }
@Test public void testGetEventsShouldThrowErrorWhenServiceClientFailsToExecuteRequest() { Job mockJob = clientFactory.getServiceClient(any(ServiceArgs.class)).getJobs().create(anyString()); doThrow(ConditionTimeoutException.class).when(mockJob).isDone(); assertThrows(ConditionTimeoutException.class, () -> testManager.getEvents(QUERY)); }
@Override public void write(int c) { mBuffer.append((char) c); }
@Test void testWriteCharWithWrongCombineLength() throws IOException { Assertions.assertThrows(IndexOutOfBoundsException.class, () -> { UnsafeStringWriter writer = new UnsafeStringWriter(); char[] chars = new char[1]; writer.write(chars, 1, 1); }); }
@Override public TypeInformation<Tuple2<K, V>> getProducedType() { return new TupleTypeInfo<>( TypeExtractor.createTypeInfo(keyClass), TypeExtractor.createTypeInfo(valueClass)); }
@Test void checkTypeInformation() throws Exception { HadoopInputFormat<Void, Long> hadoopInputFormat = new HadoopInputFormat<>( new DummyVoidKeyInputFormat<Long>(), Void.class, Long.class, new JobConf()); TypeInformation<Tuple2<Void, Long>> tupleType = hadoopInputFormat.getProducedType(); TypeInformation<Tuple2<Void, Long>> expectedType = new TupleTypeInfo<>(BasicTypeInfo.VOID_TYPE_INFO, BasicTypeInfo.LONG_TYPE_INFO); assertThat(tupleType.isTupleType()).isTrue(); assertThat(tupleType).isEqualTo(expectedType); }
@Override public boolean getBooleanValue() { checkValueType(BOOLEAN); return measure.getBooleanValue(); }
@Test public void get_boolean_value() { MeasureImpl measure = new MeasureImpl(Measure.newMeasureBuilder().create(true)); assertThat(measure.getBooleanValue()).isTrue(); }
public static <T> NavigableSet<Point<T>> fastKNearestPoints(SortedSet<Point<T>> points, Instant time, int k) { checkNotNull(points, "The input SortedSet of Points cannot be null"); checkNotNull(time, "The input time cannot be null"); checkArgument(k >= 0, "k (" + k + ") must be non-negative"); if (k >= points.size()) { return newTreeSet(points); } Point<T> stub = points.first(); Point<T> searchPoint = Point.builder(stub).time(time).latLong(0.0, 0.0).build(); //create two iterators, one goes up from the searchPoint, one goes down from the searchPoint NavigableSet<Point<T>> headSet = ((NavigableSet<Point<T>>) points).headSet(searchPoint, true); NavigableSet<Point<T>> tailSet = ((NavigableSet<Point<T>>) points).tailSet(searchPoint, false); Iterator<Point<T>> headIter = headSet.descendingIterator(); Iterator<Point<T>> tailIter = tailSet.iterator(); TreeSet<Point<T>> results = newTreeSet(); Point<T> up = (headIter.hasNext()) ? headIter.next() : null; Point<T> down = (tailIter.hasNext()) ? tailIter.next() : null; while (results.size() < k) { //add an element from the "down set" when we are out of elements in the "up set" if (up == null) { results.add(down); down = tailIter.next(); continue; } //add an element from the "up set" when we are out of elements in the "down set" if (down == null) { results.add(up); up = headIter.next(); continue; } //add the nearest point when we can choose between the "up set" and the "down set" Duration upDistance = Duration.between(up.time(), time); Duration downDistance = Duration.between(time, down.time()); if (theDuration(upDistance).isLessThanOrEqualTo(downDistance)) { results.add(up); up = (headIter.hasNext()) ? headIter.next() : null; } else { results.add(down); down = (tailIter.hasNext()) ? tailIter.next() : null; } } return results; }
@Test public void testFastKNearestPoints_2() { NavigableSet<Point<String>> knn = fastKNearestPoints(points, EPOCH, 2); assertEquals(2, knn.size()); Point one = knn.pollFirst(); Point two = knn.pollFirst(); assertFalse(one == two, "This objects are different"); assertEquals(one.time(), EPOCH, "Both match the search time"); assertEquals(two.time(), EPOCH, "Both match the search time"); }
public static UserCodeException wrap(Throwable t) { if (t instanceof UserCodeException) { return (UserCodeException) t; } return new UserCodeException(t); }
@Test public void existingUserCodeExceptionsNotWrapped() { UserCodeException existing = UserCodeException.wrap(new IOException()); UserCodeException wrapped = UserCodeException.wrap(existing); assertEquals(existing, wrapped); }
@Override public ApiResult<CoordinatorKey, Map<TopicPartition, Errors>> handleResponse( Node coordinator, Set<CoordinatorKey> groupIds, AbstractResponse abstractResponse ) { validateKeys(groupIds); final OffsetCommitResponse response = (OffsetCommitResponse) abstractResponse; final Set<CoordinatorKey> groupsToUnmap = new HashSet<>(); final Set<CoordinatorKey> groupsToRetry = new HashSet<>(); final Map<TopicPartition, Errors> partitionResults = new HashMap<>(); for (OffsetCommitResponseTopic topic : response.data().topics()) { for (OffsetCommitResponsePartition partition : topic.partitions()) { TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex()); Errors error = Errors.forCode(partition.errorCode()); if (error != Errors.NONE) { handleError( groupId, topicPartition, error, partitionResults, groupsToUnmap, groupsToRetry ); } else { partitionResults.put(topicPartition, error); } } } if (groupsToUnmap.isEmpty() && groupsToRetry.isEmpty()) { return ApiResult.completed(groupId, partitionResults); } else { return ApiResult.unmapped(new ArrayList<>(groupsToUnmap)); } }
@Test public void testHandleSuccessfulResponse() { AlterConsumerGroupOffsetsHandler handler = new AlterConsumerGroupOffsetsHandler(groupId, partitions, logContext); Map<TopicPartition, Errors> responseData = Collections.singletonMap(t0p0, Errors.NONE); OffsetCommitResponse response = new OffsetCommitResponse(0, responseData); ApiResult<CoordinatorKey, Map<TopicPartition, Errors>> result = handler.handleResponse(node, singleton(CoordinatorKey.byGroupId(groupId)), response); assertCompleted(result, responseData); }
public ShareGroupDescribeResponseData.DescribedGroup asDescribedGroup( long committedOffset, String defaultAssignor, TopicsImage topicsImage ) { ShareGroupDescribeResponseData.DescribedGroup describedGroup = new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(groupId) .setAssignorName(defaultAssignor) .setGroupEpoch(groupEpoch.get(committedOffset)) .setGroupState(state.get(committedOffset).toString()) .setAssignmentEpoch(targetAssignmentEpoch.get(committedOffset)); members.entrySet(committedOffset).forEach( entry -> describedGroup.members().add( entry.getValue().asShareGroupDescribeMember( topicsImage ) ) ); return describedGroup; }
@Test public void testAsDescribedGroup() { SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext()); ShareGroup shareGroup = new ShareGroup(snapshotRegistry, "group-id-1"); snapshotRegistry.idempotentCreateSnapshot(0); assertEquals(ShareGroupState.EMPTY.toString(), shareGroup.stateAsString(0)); shareGroup.updateMember(new ShareGroupMember.Builder("member1") .setSubscribedTopicNames(Collections.singletonList("foo")) .build()); shareGroup.updateMember(new ShareGroupMember.Builder("member2") .build()); snapshotRegistry.idempotentCreateSnapshot(1); ShareGroupDescribeResponseData.DescribedGroup expected = new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId("group-id-1") .setGroupState(ShareGroupState.STABLE.toString()) .setGroupEpoch(0) .setAssignmentEpoch(0) .setAssignorName("assignorName") .setMembers(Arrays.asList( new ShareGroupDescribeResponseData.Member() .setMemberId("member1") .setSubscribedTopicNames(Collections.singletonList("foo")), new ShareGroupDescribeResponseData.Member().setMemberId("member2") )); ShareGroupDescribeResponseData.DescribedGroup actual = shareGroup.asDescribedGroup(1, "assignorName", new MetadataImageBuilder().build().topics()); assertEquals(expected, actual); }
public Num getGrossReturn() { if (isOpened()) { return zero(); } else { return getGrossReturn(exit.getPricePerAsset()); } }
@Test public void testGetGrossReturnForLongPositionsUsingBarCloseOnNaN() { MockBarSeries series = new MockBarSeries(DoubleNum::valueOf, 100, 105); Position position = new Position(new Trade(0, TradeType.BUY, NaN, NaN), new Trade(1, TradeType.SELL, NaN, NaN)); assertNumEquals(DoubleNum.valueOf(1.05), position.getGrossReturn(series)); }
@Override public int mkdir(String path, long mode) { return AlluxioFuseUtils.call(LOG, () -> mkdirInternal(path, mode), FuseConstants.FUSE_MKDIR, "path=%s,mode=%o,", path, mode); }
@Test public void mkDirWithLengthLimit() { long mode = 0755L; String c256 = String.join("", Collections.nCopies(16, "0123456789ABCDEF")); assertEquals(-ErrorCodes.ENAMETOOLONG(), mFuseFs.mkdir("/foo/" + c256, mode)); }
@SuppressWarnings("unchecked") @Override public final <T> T unwrap(final Class<T> iface) throws SQLException { if (isWrapperFor(iface)) { return (T) this; } throw new SQLFeatureNotSupportedException(String.format("`%s` cannot be unwrapped as `%s`", getClass().getName(), iface.getName())); }
@Test void assertUnwrapFailure() { assertThrows(SQLException.class, () -> wrapperAdapter.unwrap(String.class)); }
@Override public Object getObject(final int columnIndex) throws SQLException { return mergeResultSet.getValue(columnIndex, Object.class); }
@Test void assertGetObjectWithShort() throws SQLException { short result = (short) 0; when(mergeResultSet.getValue(1, short.class)).thenReturn(result); assertThat(shardingSphereResultSet.getObject(1, short.class), is(result)); when(mergeResultSet.getValue(1, Short.class)).thenReturn(result); assertThat(shardingSphereResultSet.getObject(1, Short.class), is(result)); }
@Override public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) { if (client.getId() != null) { // if it's not null, it's already been saved, this is an error throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId()); } if (client.getRegisteredRedirectUri() != null) { for (String uri : client.getRegisteredRedirectUri()) { if (blacklistedSiteService.isBlacklisted(uri)) { throw new IllegalArgumentException("Client URI is blacklisted: " + uri); } } } // assign a random clientid if it's empty // NOTE: don't assign a random client secret without asking, since public clients have no secret if (Strings.isNullOrEmpty(client.getClientId())) { client = generateClientId(client); } // make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa ensureRefreshTokenConsistency(client); // make sure we don't have both a JWKS and a JWKS URI ensureKeyConsistency(client); // check consistency when using HEART mode checkHeartMode(client); // timestamp this to right now client.setCreatedAt(new Date()); // check the sector URI checkSectorIdentifierUri(client); ensureNoReservedScopes(client); ClientDetailsEntity c = clientRepository.saveClient(client); statsService.resetCache(); return c; }
@Test(expected = IllegalArgumentException.class) public void heartMode_multipleRedirectClass() { Mockito.when(config.isHeartMode()).thenReturn(true); ClientDetailsEntity client = new ClientDetailsEntity(); Set<String> grantTypes = new LinkedHashSet<>(); grantTypes.add("authorization_code"); grantTypes.add("refresh_token"); client.setGrantTypes(grantTypes); client.setTokenEndpointAuthMethod(AuthMethod.PRIVATE_KEY); client.setRedirectUris(Sets.newHashSet("http://localhost/", "https://foo.bar", "foo://bar")); client.setJwksUri("https://foo.bar/jwks"); service.saveNewClient(client); }
@Override public State waitUntilFinish(Duration duration) { State state = delegate.waitUntilFinish(duration); this.terminalMetrics = delegate.metrics(); this.terminalState = state; this.cancel.run(); return state; }
@Test public void givenPipelineRunWithDuration_waitUntilFinish_reportsTerminalState() { PipelineResult delegate = mock(PipelineResult.class); when(delegate.waitUntilFinish(Duration.millis(3000L))) .thenReturn(PipelineResult.State.CANCELLED); PrismPipelineResult underTest = new PrismPipelineResult(delegate, exec::stop); assertThat(underTest.waitUntilFinish(Duration.millis(3000L))) .isEqualTo(PipelineResult.State.CANCELLED); }
@Override public ConnectionProxy getConnectionProxy() { return (ConnectionProxy) super.getConnectionProxy(); }
@Test public void testGetConnectionProxy() { Assertions.assertNotNull(statementProxy.getConnectionProxy()); }
public boolean statsHaveChanged() { if (!aggregatedStats.hasUpdatesFromAllDistributors()) { return false; } for (ContentNodeStats contentNodeStats : aggregatedStats.getStats()) { int nodeIndex = contentNodeStats.getNodeIndex(); boolean currValue = mayHaveMergesPendingInGlobalSpace(nodeIndex); Boolean prevValue = prevMayHaveMergesPendingInGlobalSpace(nodeIndex); if (prevValue != null) { if (prevValue != currValue) { return true; } } else { return true; } } return false; }
@Test void stats_have_changed_if_buckets_pending_node_not_found_in_previous_stats() { Fixture f = Fixture.fromStats(stats().bucketsPending(0)); assertTrue(f.statsHaveChanged()); }
void pingSuccess() { agentHealthHolder.pingSuccess(); }
@Test void remembersLastPingTime() { // initial time Date now = new Date(42); clock.setTime(now); agentController.pingSuccess(); assertThat(agentHealthHolder.hasLostContact()).isFalse(); clock.addMillis(pingInterval); assertThat(agentHealthHolder.hasLostContact()).isFalse(); clock.addMillis(pingInterval); assertThat(agentHealthHolder.hasLostContact()).isTrue(); }
public static JSONObject parseObj(String jsonStr) { return new JSONObject(jsonStr); }
@Test public void getStrTest() { final String html = "{\"name\":\"Something must have been changed since you leave\"}"; final JSONObject jsonObject = JSONUtil.parseObj(html); assertEquals("Something must have been changed since you leave", jsonObject.getStr("name")); }
@Override public int getPriority() { return MIN_PRIORITY; }
@Test void testGetPriority() { assertEquals(Integer.MAX_VALUE, converter.getPriority()); }
static Duration timeout(int clients) { Duration timeout = Duration.ofSeconds(Long.max(MIN_TIMEOUT.toSeconds(), clients)); return timeout.compareTo(MAX_TIMEOUT) > 0 ? MAX_TIMEOUT : timeout; }
@Test public void test_timeout_calculation() { assertEquals(MIN_TIMEOUT, timeout(1)); assertEquals(MIN_TIMEOUT, timeout(20)); // These values must be updated if the calculation in the timeout method itself is changed. assertEquals(Duration.ofSeconds(100), timeout(100)); assertEquals(Duration.ofSeconds(200), timeout(200)); assertEquals(MAX_TIMEOUT, timeout(240)); }
public <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData, TypeReference<T> responseFormat) { return httpRequest(url, method, headers, requestBodyData, responseFormat, null, null); }
@Test public void testUnexpectedHttpResponseCausesInternalServerError() throws Exception { int statusCode = Response.Status.NOT_MODIFIED.getStatusCode(); Request req = mock(Request.class); ContentResponse resp = mock(ContentResponse.class); setupHttpClient(statusCode, req, resp); ConnectRestException e = assertThrows(ConnectRestException.class, () -> httpRequest( httpClient, MOCK_URL, TEST_METHOD, TEST_TYPE, TEST_SIGNATURE_ALGORITHM )); assertIsInternalServerError(e); }
@Override public Float getFloatAndRemove(K name) { return null; }
@Test public void testGetFloatAndRemoveDefault() { assertEquals(1, HEADERS.getFloatAndRemove("name1", 1), 0); }
@Override public ClientStat getCountForClientId(String clientId) { Collection<ApprovedSite> approvedSites = approvedSiteService.getByClientId(clientId); ClientStat stat = new ClientStat(); stat.setApprovedSiteCount(approvedSites.size()); return stat; }
@Test public void countForClientId() { // stats for ap1..ap4 assertThat(service.getCountForClientId(clientId1).getApprovedSiteCount(), is(2)); assertThat(service.getCountForClientId(clientId2).getApprovedSiteCount(), is(1)); assertThat(service.getCountForClientId(clientId3).getApprovedSiteCount(), is(1)); assertThat(service.getCountForClientId(clientId4).getApprovedSiteCount(), is(0)); }
private KafkaRebalanceStatus updateStatus(KafkaRebalance kafkaRebalance, KafkaRebalanceStatus desiredStatus, Throwable e) { // Leave the current status when the desired state is null if (desiredStatus != null) { Condition cond = KafkaRebalanceUtils.rebalanceStateCondition(desiredStatus); List<Condition> previous = Collections.emptyList(); if (desiredStatus.getConditions() != null) { previous = desiredStatus.getConditions().stream().filter(condition -> condition != cond).collect(Collectors.toList()); } // If a throwable is supplied, it is set in the status with priority if (e != null) { StatusUtils.setStatusConditionAndObservedGeneration(kafkaRebalance, desiredStatus, KafkaRebalanceState.NotReady.toString(), e); desiredStatus.setConditions(Stream.concat(desiredStatus.getConditions().stream(), previous.stream()).collect(Collectors.toList())); } else if (cond != null) { StatusUtils.setStatusConditionAndObservedGeneration(kafkaRebalance, desiredStatus, cond); desiredStatus.setConditions(Stream.concat(desiredStatus.getConditions().stream(), previous.stream()).collect(Collectors.toList())); } else { throw new IllegalArgumentException("Status related exception and the Status condition's type cannot both be null"); } return desiredStatus; } return kafkaRebalance.getStatus(); }
@Test public void testCruiseControlDisabled(VertxTestContext context) { // build a Kafka cluster without the cruiseControl definition Kafka kafka = new KafkaBuilder(KAFKA) .editSpec() .withCruiseControl(null) .endSpec() .withNewStatus() .withObservedGeneration(1L) .withConditions(new ConditionBuilder() .withType("Ready") .withStatus("True") .build()) .endStatus() .build(); KafkaRebalance kr = createKafkaRebalance(namespace, CLUSTER_NAME, RESOURCE_NAME, EMPTY_KAFKA_REBALANCE_SPEC, false); Crds.kafkaRebalanceOperation(client).inNamespace(namespace).resource(kr).create(); Crds.kafkaOperation(client).inNamespace(namespace).resource(kafka).create(); Crds.kafkaOperation(client).inNamespace(namespace).resource(kafka).updateStatus(); Checkpoint checkpoint = context.checkpoint(); krao.reconcile(new Reconciliation("test-trigger", KafkaRebalance.RESOURCE_KIND, namespace, RESOURCE_NAME)) .onComplete(context.succeeding(v -> context.verify(() -> { // the resource moved from New to NotReady due to the error assertState(context, client, namespace, RESOURCE_NAME, KafkaRebalanceState.NotReady, InvalidResourceException.class, "Kafka resource lacks 'cruiseControl' declaration"); checkpoint.flag(); }))); }
public static URI buildUri(String url, Query query) throws URISyntaxException { if (query != null && !query.isEmpty()) { url = url + "?" + query.toQueryUrl(); } return new URI(url); }
@Test void testBuildUriForEmptyQuery() throws URISyntaxException { URI actual = HttpUtils.buildUri("www.aliyun.com", null); assertEquals("www.aliyun.com", actual.toString()); actual = HttpUtils.buildUri("www.aliyun.com", new Query()); assertEquals("www.aliyun.com", actual.toString()); }
public static ScheduledTaskHandler of(UUID uuid, String schedulerName, String taskName) { return new ScheduledTaskHandlerImpl(uuid, -1, schedulerName, taskName); }
@Test public void of_equalityDifferentTasks() { String urnA = "urn:hzScheduledTaskHandler:39ffc539-a356-444c-bec7-6f644462c208-1SchedulerTask"; String urnB = "urn:hzScheduledTaskHandler:39ffc539-a356-444c-bec7-6f644462c208-1SchedulerTask2"; assertNotEquals(ScheduledTaskHandler.of(urnA), ScheduledTaskHandler.of(urnB)); }
public int errorCode() { return data.errorCode(); }
@Test public void shouldNotErrorAccessingFutureVars() { final SubscriptionInfo info = new SubscriptionInfo(8, LATEST_SUPPORTED_VERSION, PID_1, "localhost:80", TASK_OFFSET_SUMS, IGNORED_UNIQUE_FIELD, IGNORED_ERROR_CODE, EMPTY_CLIENT_TAGS); try { info.errorCode(); } catch (final Exception e) { fail("should not error"); } }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { final CountDownLatch signal = new CountDownLatch(1); final AtomicReference<BackgroundException> failure = new AtomicReference<>(); final ScheduledThreadPool scheduler = new ScheduledThreadPool(new LoggingUncaughtExceptionHandler() { @Override public void uncaughtException(final Thread t, final Throwable e) { super.uncaughtException(t, e); failure.set(new BackgroundException(e)); signal.countDown(); } }, "deletebatch"); try { final Map<Path, List<String>> containers = new HashMap<>(); for(Path f : files.keySet()) { final Path container = containerService.getContainer(f); if(containers.containsKey(container)) { containers.get(container).add(containerService.getKey(f)); } else { final List<String> keys = new ArrayList<>(); keys.add(containerService.getKey(f)); containers.put(container, keys); } callback.delete(f); } for(Path container : containers.keySet()) { final DbxUserFilesRequests requests = new DbxUserFilesRequests(session.getClient(container)); final DeleteBatchLaunch job = requests.deleteBatch(containers.get(container).stream().map(DeleteArg::new).collect(Collectors.toList())); final ScheduledFuture<?> f = scheduler.repeat(() -> { try { // Poll status final DeleteBatchJobStatus status = requests.deleteBatchCheck(job.getAsyncJobIdValue()); if(status.isComplete()) { final List<DeleteBatchResultEntry> entries = status.getCompleteValue().getEntries(); for(DeleteBatchResultEntry entry : entries) { if(entry.isFailure()) { switch(entry.getFailureValue().tag()) { case PATH_LOOKUP: failure.set(new NotfoundException(entry.getFailureValue().toString())); break; default: failure.set(new InteroperabilityException()); } } } signal.countDown(); } if(status.isFailed()) { signal.countDown(); } } catch(DbxException e) { failure.set(new DropboxExceptionMappingService().map(e)); signal.countDown(); } }, new HostPreferences(session.getHost()).getLong("dropbox.delete.poll.interval.ms"), TimeUnit.MILLISECONDS); while(!Uninterruptibles.awaitUninterruptibly(signal, Duration.ofSeconds(1))) { try { if(f.isDone()) { Uninterruptibles.getUninterruptibly(f); } } catch(ExecutionException e) { for(Throwable cause : ExceptionUtils.getThrowableList(e)) { Throwables.throwIfInstanceOf(cause, BackgroundException.class); } throw new DefaultExceptionMappingService().map(Throwables.getRootCause(e)); } } if(null != failure.get()) { throw failure.get(); } } } catch(DbxException e) { throw new DropboxExceptionMappingService().map(e); } finally { scheduler.shutdown(); } }
@Test public void testDeleteDirectory() throws Exception { final Path folder = new DropboxDirectoryFeature(session).mkdir( new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.volume, Path.Type.directory)), new TransferStatus()); final Path file1 = new DropboxTouchFeature(session).touch( new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final Path file2 = new DropboxTouchFeature(session).touch( new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); new DropboxBatchDeleteFeature(session).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new DropboxFindFeature(session).find(file1)); assertFalse(new DropboxFindFeature(session).find(file2)); assertFalse(new DropboxFindFeature(session).find(folder)); }
@Override public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { if (key == null) { return null; } return super.computeIfAbsent(key, mappingFunction); }
@Test public void testComputeIfAbsent() { Assert.assertEquals(null, map.computeIfAbsent(null, key -> "")); Assert.assertEquals(VALUE, map.computeIfAbsent(KEY, key -> "")); Assert.assertEquals(VALUE, map.get(KEY)); }
@Override public void request(Payload grpcRequest, StreamObserver<Payload> responseObserver) { traceIfNecessary(grpcRequest, true); String type = grpcRequest.getMetadata().getType(); long startTime = System.nanoTime(); //server is on starting. if (!ApplicationUtils.isStarted()) { Payload payloadResponse = GrpcUtils.convert( ErrorResponse.build(NacosException.INVALID_SERVER_STATUS, "Server is starting,please try later.")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.INVALID_SERVER_STATUS, null, null, System.nanoTime() - startTime); return; } // server check. if (ServerCheckRequest.class.getSimpleName().equals(type)) { Payload serverCheckResponseP = GrpcUtils.convert(new ServerCheckResponse(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get(), true)); traceIfNecessary(serverCheckResponseP, false); responseObserver.onNext(serverCheckResponseP); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, true, 0, null, null, System.nanoTime() - startTime); return; } RequestHandler requestHandler = requestHandlerRegistry.getByRequestType(type); //no handler found. if (requestHandler == null) { Loggers.REMOTE_DIGEST.warn(String.format("[%s] No handler for request type : %s :", "grpc", type)); Payload payloadResponse = GrpcUtils .convert(ErrorResponse.build(NacosException.NO_HANDLER, "RequestHandler Not Found")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.NO_HANDLER, null, null, System.nanoTime() - startTime); return; } //check connection status. String connectionId = GrpcServerConstants.CONTEXT_KEY_CONN_ID.get(); boolean requestValid = connectionManager.checkValid(connectionId); if (!requestValid) { Loggers.REMOTE_DIGEST .warn("[{}] Invalid connection Id ,connection [{}] is un registered ,", "grpc", connectionId); Payload payloadResponse = GrpcUtils .convert(ErrorResponse.build(NacosException.UN_REGISTER, "Connection is unregistered.")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.UN_REGISTER, null, null, System.nanoTime() - startTime); return; } Object parseObj = null; try { parseObj = GrpcUtils.parse(grpcRequest); } catch (Exception e) { Loggers.REMOTE_DIGEST .warn("[{}] Invalid request receive from connection [{}] ,error={}", "grpc", connectionId, e); Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(NacosException.BAD_GATEWAY, e.getMessage())); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.BAD_GATEWAY, e.getClass().getSimpleName(), null, System.nanoTime() - startTime); return; } if (parseObj == null) { Loggers.REMOTE_DIGEST.warn("[{}] Invalid request receive ,parse request is null", connectionId); Payload payloadResponse = GrpcUtils .convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime); return; } if (!(parseObj instanceof Request)) { Loggers.REMOTE_DIGEST .warn("[{}] Invalid request receive ,parsed payload is not a request,parseObj={}", connectionId, parseObj); Payload payloadResponse = GrpcUtils .convert(ErrorResponse.build(NacosException.BAD_GATEWAY, "Invalid request")); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, NacosException.BAD_GATEWAY, null, null, System.nanoTime() - startTime); return; } Request request = (Request) parseObj; try { Connection connection = connectionManager.getConnection(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get()); RequestMeta requestMeta = new RequestMeta(); requestMeta.setClientIp(connection.getMetaInfo().getClientIp()); requestMeta.setConnectionId(GrpcServerConstants.CONTEXT_KEY_CONN_ID.get()); requestMeta.setClientVersion(connection.getMetaInfo().getVersion()); requestMeta.setLabels(connection.getMetaInfo().getLabels()); requestMeta.setAbilityTable(connection.getAbilityTable()); connectionManager.refreshActiveTime(requestMeta.getConnectionId()); prepareRequestContext(request, requestMeta, connection); Response response = requestHandler.handleRequest(request, requestMeta); Payload payloadResponse = GrpcUtils.convert(response); traceIfNecessary(payloadResponse, false); if (response.getErrorCode() == NacosException.OVER_THRESHOLD) { RpcScheduledExecutor.CONTROL_SCHEDULER.schedule(() -> { traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); }, 1000L, TimeUnit.MILLISECONDS); } else { traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); } MetricsMonitor.recordGrpcRequestEvent(type, response.isSuccess(), response.getErrorCode(), null, request.getModule(), System.nanoTime() - startTime); } catch (Throwable e) { Loggers.REMOTE_DIGEST .error("[{}] Fail to handle request from connection [{}] ,error message :{}", "grpc", connectionId, e); Payload payloadResponse = GrpcUtils.convert(ErrorResponse.build(e)); traceIfNecessary(payloadResponse, false); responseObserver.onNext(payloadResponse); responseObserver.onCompleted(); MetricsMonitor.recordGrpcRequestEvent(type, false, ResponseCode.FAIL.getCode(), e.getClass().getSimpleName(), request.getModule(), System.nanoTime() - startTime); } finally { RequestContextHolder.removeContext(); } }
@Test void testServerCheckRequest() { ApplicationUtils.setStarted(true); RequestMeta metadata = new RequestMeta(); metadata.setClientIp("127.0.0.1"); metadata.setConnectionId(connectId); ServerCheckRequest serverCheckRequest = new ServerCheckRequest(); serverCheckRequest.setRequestId(requestId); Payload request = GrpcUtils.convert(serverCheckRequest, metadata); StreamObserver<Payload> streamObserver = new StreamObserver<Payload>() { @Override public void onNext(Payload payload) { System.out.println("Receive data from server: " + payload); Object res = GrpcUtils.parse(payload); assertTrue(res instanceof ServerCheckResponse); } @Override public void onError(Throwable throwable) { fail(throwable.getMessage()); } @Override public void onCompleted() { System.out.println("complete"); } }; streamStub.request(request, streamObserver); ApplicationUtils.setStarted(false); }
public CredentialRetriever wellKnownCredentialHelpers() { return () -> { for (Map.Entry<String, String> entry : WELL_KNOWN_CREDENTIAL_HELPERS.entrySet()) { try { String registrySuffix = entry.getKey(); if (imageReference.getRegistry().endsWith(registrySuffix)) { String credentialHelper = entry.getValue(); return Optional.of(retrieveFromDockerCredentialHelper(Paths.get(credentialHelper))); } } catch (CredentialHelperNotFoundException | CredentialHelperUnhandledServerUrlException ex) { if (ex.getMessage() != null) { // Warns the user that the specified (or inferred) credential helper cannot be used. logger.accept(LogEvent.info(ex.getMessage())); if (ex.getCause() != null && ex.getCause().getMessage() != null) { logger.accept(LogEvent.info(" Caused by: " + ex.getCause().getMessage())); } } } catch (IOException ex) { throw new CredentialRetrievalException(ex); } } return Optional.empty(); }; }
@Test public void testWellKnownCredentialHelpers() throws CredentialRetrievalException { CredentialRetrieverFactory credentialRetrieverFactory = createCredentialRetrieverFactory("something.gcr.io", "repo"); Assert.assertEquals( Optional.of(FAKE_CREDENTIALS), credentialRetrieverFactory.wellKnownCredentialHelpers().retrieve()); Mockito.verify(mockDockerCredentialHelperFactory) .create("something.gcr.io", Paths.get("docker-credential-gcr"), Collections.emptyMap()); Mockito.verify(mockLogger) .accept( LogEvent.lifecycle( "Using credential helper docker-credential-gcr for something.gcr.io/repo")); }
@Override public void preflight(final Path directory) throws BackgroundException { final Acl acl = directory.attributes().getAcl(); if(Acl.EMPTY == acl) { // Missing initialization log.warn(String.format("Unknown ACLs on %s", directory)); return; } if(!acl.get(new Acl.CanonicalUser()).contains(CANLISTCHILDREN)) { if(log.isWarnEnabled()) { log.warn(String.format("ACL %s for %s does not include %s", acl, directory, CANLISTCHILDREN)); } throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot download {0}", "Error"), directory.getName())).withFile(directory); } }
@Test public void testListChildrenDocuments() throws Exception { final DeepboxIdProvider nodeid = new DeepboxIdProvider(session); final Path folder = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Documents/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final PathAttributes attributes = new DeepboxAttributesFinderFeature(session, nodeid).find(folder); assertTrue(new BoxRestControllerApi(session.getClient()).getBox(ORG4, ORG4_BOX1).getBoxPolicy().isCanAddFilesRoot()); assertTrue(attributes.getAcl().get(new Acl.CanonicalUser()).contains(CANLISTCHILDREN)); // assert no fail new DeepboxListService(session, nodeid).preflight(folder.withAttributes(attributes)); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { try { final AttributedList<Path> children = new AttributedList<>(); ListFoldersResult listFoldersResult; this.parse(directory, listener, children, listFoldersResult = new DbxUserSharingRequests(session.getClient()).listFolders()); while(listFoldersResult.getCursor() != null) { this.parse(directory, listener, children, listFoldersResult = new DbxUserSharingRequests(session.getClient()) .listMountableFoldersContinue(listFoldersResult.getCursor())); } return children; } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Listing directory {0} failed", e, directory); } }
@Test public void testList() throws Exception { final AttributedList<Path> list = new DropboxSharedFoldersListService(session).list( new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)), new DisabledListProgressListener()); assertNotSame(AttributedList.emptyList(), list); }
@ConstantFunction(name = "bitxor", argTypes = {INT, INT}, returnType = INT) public static ConstantOperator bitxorInt(ConstantOperator first, ConstantOperator second) { return ConstantOperator.createInt(first.getInt() ^ second.getInt()); }
@Test public void bitxorInt() { assertEquals(0, ScalarOperatorFunctions.bitxorInt(O_INT_10, O_INT_10).getInt()); }
@VisibleForTesting boolean parseArguments(String[] args) throws IOException { Options opts = new Options(); opts.addOption(Option.builder("h").build()); opts.addOption(Option.builder("help").build()); opts.addOption(Option.builder("input") .desc("Input class path. Defaults to the default classpath.") .hasArg().build()); opts.addOption(Option.builder("whitelist") .desc( "Regex specifying the full path of jars to include in the" + " framework tarball. Default is a hardcoded set of jars" + " considered necessary to include") .hasArg().build()); opts.addOption(Option.builder("blacklist") .desc( "Regex specifying the full path of jars to exclude in the" + " framework tarball. Default is a hardcoded set of jars" + " considered unnecessary to include") .hasArg().build()); opts.addOption(Option.builder("fs") .desc( "Target file system to upload to." + " Example: hdfs://foo.com:8020") .hasArg().build()); opts.addOption(Option.builder("target") .desc( "Target file to upload to with a reference name." + " Example: /usr/mr-framework.tar.gz#mr-framework") .hasArg().build()); opts.addOption(Option.builder("initialReplication") .desc( "Desired initial replication count. Default 3.") .hasArg().build()); opts.addOption(Option.builder("finalReplication") .desc( "Desired final replication count. Default 10.") .hasArg().build()); opts.addOption(Option.builder("acceptableReplication") .desc( "Desired acceptable replication count. Default 9.") .hasArg().build()); opts.addOption(Option.builder("timeout") .desc( "Desired timeout for the acceptable" + " replication in seconds. Default 10") .hasArg().build()); opts.addOption(Option.builder("nosymlink") .desc("Ignore symlinks into the same directory") .build()); GenericOptionsParser parser = new GenericOptionsParser(opts, args); if (parser.getCommandLine().hasOption("help") || parser.getCommandLine().hasOption("h")) { printHelp(opts); return false; } input = parser.getCommandLine().getOptionValue( "input", System.getProperty("java.class.path")); whitelist = parser.getCommandLine().getOptionValue( "whitelist", DefaultJars.DEFAULT_MR_JARS); blacklist = parser.getCommandLine().getOptionValue( "blacklist", DefaultJars.DEFAULT_EXCLUDED_MR_JARS); initialReplication = Short.parseShort(parser.getCommandLine().getOptionValue( "initialReplication", "3")); finalReplication = Short.parseShort(parser.getCommandLine().getOptionValue( "finalReplication", "10")); acceptableReplication = Short.parseShort( parser.getCommandLine().getOptionValue( "acceptableReplication", "9")); timeout = Integer.parseInt( parser.getCommandLine().getOptionValue("timeout", "10")); if (parser.getCommandLine().hasOption("nosymlink")) { ignoreSymlink = true; } String fs = parser.getCommandLine() .getOptionValue("fs", null); String path = parser.getCommandLine().getOptionValue("target", "/usr/lib/mr-framework.tar.gz#mr-framework"); boolean isFullPath = path.startsWith("hdfs://") || path.startsWith("file://"); if (fs == null) { fs = conf.getTrimmed(FS_DEFAULT_NAME_KEY); if (fs == null && !isFullPath) { LOG.error("No filesystem specified in either fs or target."); printHelp(opts); return false; } else { LOG.info(String.format( "Target file system not specified. Using default %s", fs)); } } if (path.isEmpty()) { LOG.error("Target directory not specified"); printHelp(opts); return false; } StringBuilder absolutePath = new StringBuilder(); if (!isFullPath) { absolutePath.append(fs); absolutePath.append(path.startsWith("/") ? "" : "/"); } absolutePath.append(path); target = absolutePath.toString(); if (parser.getRemainingArgs().length > 0) { LOG.warn("Unexpected parameters"); printHelp(opts); return false; } return true; }
@Test void testNoFilesystem() throws IOException { FrameworkUploader uploader = new FrameworkUploader(); boolean success = uploader.parseArguments(new String[]{}); assertTrue(success, "Expected to parse arguments"); assertEquals( "file:////usr/lib/mr-framework.tar.gz#mr-framework", uploader.target, "Expected"); }
@Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); if (commandLine.hasOption('n')) { defaultMQAdminExt.setNamesrvAddr(commandLine.getOptionValue('n').trim()); } try { defaultMQAdminExt.start(); boolean showClientIP = commandLine.hasOption('s') && "true".equalsIgnoreCase(commandLine.getOptionValue('s')); if (commandLine.hasOption('g')) { String consumerGroup = commandLine.getOptionValue('g').trim(); String topicName = commandLine.hasOption('t') ? commandLine.getOptionValue('t').trim() : null; ConsumeStats consumeStats; if (topicName == null) { consumeStats = defaultMQAdminExt.examineConsumeStats(consumerGroup); } else { consumeStats = defaultMQAdminExt.examineConsumeStats(consumerGroup, topicName); } List<MessageQueue> mqList = new LinkedList<>(consumeStats.getOffsetTable().keySet()); Collections.sort(mqList); Map<MessageQueue, String> messageQueueAllocationResult = null; if (showClientIP) { messageQueueAllocationResult = getMessageQueueAllocationResult(defaultMQAdminExt, consumerGroup); } if (showClientIP) { System.out.printf("%-64s %-32s %-4s %-20s %-20s %-20s %-20s %-20s%s%n", "#Topic", "#Broker Name", "#QID", "#Broker Offset", "#Consumer Offset", "#Client IP", "#Diff", "#Inflight", "#LastTime"); } else { System.out.printf("%-64s %-32s %-4s %-20s %-20s %-20s %-20s%s%n", "#Topic", "#Broker Name", "#QID", "#Broker Offset", "#Consumer Offset", "#Diff", "#Inflight", "#LastTime"); } long diffTotal = 0L; long inflightTotal = 0L; for (MessageQueue mq : mqList) { OffsetWrapper offsetWrapper = consumeStats.getOffsetTable().get(mq); long diff = offsetWrapper.getBrokerOffset() - offsetWrapper.getConsumerOffset(); long inflight = offsetWrapper.getPullOffset() - offsetWrapper.getConsumerOffset(); diffTotal += diff; inflightTotal += inflight; String lastTime = ""; try { if (offsetWrapper.getLastTimestamp() == 0) { lastTime = "N/A"; } else { lastTime = UtilAll.formatDate(new Date(offsetWrapper.getLastTimestamp()), UtilAll.YYYY_MM_DD_HH_MM_SS); } } catch (Exception e) { // ignore } String clientIP = null; if (showClientIP) { clientIP = messageQueueAllocationResult.get(mq); } if (showClientIP) { System.out.printf("%-64s %-32s %-4d %-20d %-20d %-20s %-20d %-20d %s%n", UtilAll.frontStringAtLeast(mq.getTopic(), 64), UtilAll.frontStringAtLeast(mq.getBrokerName(), 32), mq.getQueueId(), offsetWrapper.getBrokerOffset(), offsetWrapper.getConsumerOffset(), null != clientIP ? clientIP : "N/A", diff, inflight, lastTime ); } else { System.out.printf("%-64s %-32s %-4d %-20d %-20d %-20d %-20d %s%n", UtilAll.frontStringAtLeast(mq.getTopic(), 64), UtilAll.frontStringAtLeast(mq.getBrokerName(), 32), mq.getQueueId(), offsetWrapper.getBrokerOffset(), offsetWrapper.getConsumerOffset(), diff, inflight, lastTime ); } } System.out.printf("%n"); System.out.printf("Consume TPS: %.2f%n", consumeStats.getConsumeTps()); System.out.printf("Consume Diff Total: %d%n", diffTotal); System.out.printf("Consume Inflight Total: %d%n", inflightTotal); } else { System.out.printf("%-64s %-6s %-24s %-5s %-14s %-7s %s%n", "#Group", "#Count", "#Version", "#Type", "#Model", "#TPS", "#Diff Total" ); TopicList topicList = defaultMQAdminExt.fetchAllTopicList(); for (String topic : topicList.getTopicList()) { if (topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) { String consumerGroup = KeyBuilder.parseGroup(topic); try { ConsumeStats consumeStats = null; try { consumeStats = defaultMQAdminExt.examineConsumeStats(consumerGroup); } catch (Exception e) { log.warn("examineConsumeStats exception, " + consumerGroup, e); } ConsumerConnection cc = null; try { cc = defaultMQAdminExt.examineConsumerConnectionInfo(consumerGroup); } catch (Exception e) { log.warn("examineConsumerConnectionInfo exception, " + consumerGroup, e); } GroupConsumeInfo groupConsumeInfo = new GroupConsumeInfo(); groupConsumeInfo.setGroup(consumerGroup); if (consumeStats != null) { groupConsumeInfo.setConsumeTps((int) consumeStats.getConsumeTps()); groupConsumeInfo.setDiffTotal(consumeStats.computeTotalDiff()); } if (cc != null) { groupConsumeInfo.setCount(cc.getConnectionSet().size()); groupConsumeInfo.setMessageModel(cc.getMessageModel()); groupConsumeInfo.setConsumeType(cc.getConsumeType()); groupConsumeInfo.setVersion(cc.computeMinVersion()); } System.out.printf("%-64s %-6d %-24s %-5s %-14s %-7d %d%n", UtilAll.frontStringAtLeast(groupConsumeInfo.getGroup(), 64), groupConsumeInfo.getCount(), groupConsumeInfo.getCount() > 0 ? groupConsumeInfo.versionDesc() : "OFFLINE", groupConsumeInfo.consumeTypeDesc(), groupConsumeInfo.messageModelDesc(), groupConsumeInfo.getConsumeTps(), groupConsumeInfo.getDiffTotal() ); } catch (Exception e) { log.warn("examineConsumeStats or examineConsumerConnectionInfo exception, " + consumerGroup, e); } } } } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); } }
@Ignore @Test public void testExecute() throws SubCommandException { ConsumerProgressSubCommand cmd = new ConsumerProgressSubCommand(); Options options = ServerUtil.buildCommandlineOptions(new Options()); String[] subargs = new String[] {"-g default-group", String.format("-n localhost:%d", nameServerMocker.listenPort())}; final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new DefaultParser()); cmd.execute(commandLine, options, null); }
@Override public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) { return getSqlRecordIteratorBatch(value, descending, null); }
@Test public void getSqlRecordIteratorBatchCursorLeftIncludeRightIncludedAscending() { var expectedOrder = List.of(0, 3, 6, 1, 4, 7); performCursorTest(3, expectedOrder, cursor -> store.getSqlRecordIteratorBatch(0, true, 1, true, false, cursor)); }
@Override public boolean isExecutable() throws FileSystemException { return resolvedFileObject.isExecutable(); }
@Test public void testDelegatesIsExecutable() throws FileSystemException { when( resolvedFileObject.isExecutable() ).thenReturn( true ); assertTrue( fileObject.isExecutable() ); when( resolvedFileObject.isExecutable() ).thenReturn( false ); assertFalse( fileObject.isExecutable() ); verify( resolvedFileObject, times( 2 ) ).isExecutable(); }
public static void checkNotNullAndNotEmpty(@Nullable String value, String propertyName) { Preconditions.checkNotNull(value, "Property '" + propertyName + "' cannot be null"); Preconditions.checkArgument( !value.trim().isEmpty(), "Property '" + propertyName + "' cannot be an empty string"); }
@Test public void testCheckNotEmpty_collectionFailEmpty() { try { Validator.checkNotNullAndNotEmpty(ImmutableList.of(), "test"); Assert.fail(); } catch (IllegalArgumentException iae) { Assert.assertEquals("Property 'test' cannot be an empty collection", iae.getMessage()); } }
public void println() { try { out.write(System.lineSeparator()); } catch (IOException e) { throw new RuntimeException(e); } }
@Test void println() { out.println(); out.println("Hello "); out.close(); assertThat(bytes, bytes(equalTo(System.lineSeparator() + "Hello " + System.lineSeparator()))); }
@Override public <W extends Window> TimeWindowedKStream<K, V> windowedBy(final Windows<W> windows) { return new TimeWindowedKStreamImpl<>( windows, builder, subTopologySourceNodes, name, keySerde, valueSerde, aggregateBuilder, graphNode ); }
@Test public void shouldNotHaveNullWindowsOnWindowedAggregate() { assertThrows(NullPointerException.class, () -> groupedStream.windowedBy((Windows<?>) null)); }
@Override protected void write(final PostgreSQLPacketPayload payload) { payload.writeInt4(AUTH_REQ_SHA256); payload.writeInt4(PASSWORD_STORED_METHOD_SHA256); payload.writeBytes(authHexData.getSalt().getBytes()); payload.writeBytes(authHexData.getNonce().getBytes()); if (version < OpenGaussProtocolVersion.PROTOCOL_350.getVersion()) { payload.writeBytes(serverSignature.getBytes()); } if (OpenGaussProtocolVersion.PROTOCOL_351.getVersion() == version) { payload.writeInt4(serverIteration); } }
@Test void assertWriteProtocol351Packet() { PostgreSQLPacketPayload payload = mock(PostgreSQLPacketPayload.class); OpenGaussAuthenticationSCRAMSha256Packet packet = new OpenGaussAuthenticationSCRAMSha256Packet(OpenGaussProtocolVersion.PROTOCOL_351.getVersion(), 10000, authHexData, ""); packet.write(payload); verify(payload).writeInt4(10); verify(payload).writeInt4(2); verify(payload).writeBytes(authHexData.getSalt().getBytes()); verify(payload).writeBytes(authHexData.getNonce().getBytes()); verify(payload).writeInt4(10000); verify(payload, times(2)).writeBytes(any()); }
public void removeFactMappingByIndex(int index) { clearDatas(scesimModelDescriptor.getFactMappingByIndex(index)); scesimModelDescriptor.removeFactMappingByIndex(index); }
@Test public void removeFactMappingByIndex() { final FactMapping factMappingByIndex = model.scesimModelDescriptor.getFactMappingByIndex(2); model.removeFactMappingByIndex(2); verify(model, times(1)).clearDatas(eq(factMappingByIndex)); assertThat(model.scesimModelDescriptor.getFactMappings()).hasSize(FACT_MAPPINGS - 1).doesNotContain(factMappingByIndex); }
public static ExcelReader getReader(String bookFilePath) { return getReader(bookFilePath, 0); }
@Test public void getReaderByBookFilePathAndSheetNameTest() { final ExcelReader reader = ExcelUtil.getReader("aaa.xlsx", "12"); final List<Map<String, Object>> list = reader.readAll(); reader.close(); assertEquals(1L, list.get(1).get("鞋码")); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final AttributedList<ch.cyberduck.core.Path> paths = new AttributedList<>(); final java.nio.file.Path p = session.toPath(directory); if(!Files.exists(p)) { throw new LocalExceptionMappingService().map("Listing directory {0} failed", new NoSuchFileException(directory.getAbsolute()), directory); } try (DirectoryStream<java.nio.file.Path> stream = Files.newDirectoryStream(p)) { for(java.nio.file.Path n : stream) { if(null == n.getFileName()) { continue; } try { final PathAttributes attributes = feature.toAttributes(n); final EnumSet<Path.Type> type = EnumSet.noneOf(Path.Type.class); if(Files.isDirectory(n)) { type.add(Path.Type.directory); } else { type.add(Path.Type.file); } final Path file = new Path(directory, n.getFileName().toString(), type, attributes); if(this.post(n, file)) { paths.add(file); listener.chunk(directory, paths); } } catch(IOException e) { log.warn(String.format("Failure reading attributes for %s", n)); } } } catch(IOException ex) { throw new LocalExceptionMappingService().map("Listing directory {0} failed", ex, directory); } return paths; }
@Test public void testListSymlink() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); assumeTrue(session.isPosixFilesystem()); assertNotNull(session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback())); assertTrue(session.isConnected()); assertNotNull(session.getClient()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path home = new LocalHomeFinderFeature().find(); final Path file = new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final Path symlinkRelative = new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file, AbstractPath.Type.symboliclink)); final Path symlinkAbsolute = new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file, AbstractPath.Type.symboliclink)); new LocalTouchFeature(session).touch(file, new TransferStatus()); new LocalSymlinkFeature(session).symlink(symlinkRelative, file.getName()); new LocalSymlinkFeature(session).symlink(symlinkAbsolute, file.getAbsolute()); final AttributedList<Path> list = new LocalListService(session).list(home, new DisabledListProgressListener()); assertTrue(list.contains(file)); assertTrue(list.contains(symlinkRelative)); assertFalse(list.get(symlinkRelative).getSymlinkTarget().getAbsolute().endsWith(file.getAbsolute())); assertTrue(list.get(symlinkRelative).getSymlinkTarget().getAbsolute().endsWith(file.getName())); assertTrue(list.contains(symlinkAbsolute)); assertTrue(list.get(symlinkAbsolute).getSymlinkTarget().getAbsolute().endsWith(file.getAbsolute())); new LocalDeleteFeature(session).delete(Arrays.asList(file, symlinkAbsolute, symlinkRelative), new DisabledLoginCallback(), new Delete.DisabledCallback()); session.close(); }
@Override public Configuration toConfiguration(final CommandLine commandLine) { final Configuration resultConfiguration = new Configuration(); final String executorName = commandLine.getOptionValue(executorOption.getOpt()); if (executorName != null) { resultConfiguration.set(DeploymentOptions.TARGET, executorName); } final String targetName = commandLine.getOptionValue(targetOption.getOpt()); if (targetName != null) { resultConfiguration.set(DeploymentOptions.TARGET, targetName); } DynamicPropertiesUtil.encodeDynamicProperties(commandLine, resultConfiguration); resultConfiguration.set(DeploymentOptionsInternal.CONF_DIR, configurationDir); return resultConfiguration; }
@Test void testWithPreexistingConfigurationInConstructor() throws CliArgsException { final Configuration loadedConfig = new Configuration(); loadedConfig.set(CoreOptions.DEFAULT_PARALLELISM, 2); loadedConfig.set(DeploymentOptions.ATTACHED, false); final ConfigOption<List<Integer>> listOption = key("test.list").intType().asList().noDefaultValue(); final List<Integer> listValue = Arrays.asList(41, 42, 23); final String encodedListValue = listValue.stream().map(Object::toString).collect(Collectors.joining(";")); final String[] args = { "-e", "test-executor", "-D" + listOption.key() + "=" + encodedListValue, "-D" + CoreOptions.DEFAULT_PARALLELISM.key() + "=5" }; final GenericCLI cliUnderTest = new GenericCLI(loadedConfig, tmp.toAbsolutePath().toString()); final CommandLine commandLine = CliFrontendParser.parse(testOptions, args, true); final Configuration configuration = cliUnderTest.toConfiguration(commandLine); assertThat(configuration.get(DeploymentOptions.TARGET)).isEqualTo("test-executor"); assertThat(configuration.get(CoreOptions.DEFAULT_PARALLELISM)).isEqualTo(5); assertThat(configuration.get(DeploymentOptions.ATTACHED)).isFalse(); assertThat(configuration.get(listOption)).isEqualTo(listValue); }
@Override public long count() { return byteBufferHeader.limit() + this.getMessageResult.getBufferTotalSize(); }
@Test public void ManyMessageTransferCountTest() { ByteBuffer byteBuffer = ByteBuffer.allocate(20); byteBuffer.putInt(20); GetMessageResult getMessageResult = new GetMessageResult(); ManyMessageTransfer manyMessageTransfer = new ManyMessageTransfer(byteBuffer,getMessageResult); Assert.assertEquals(manyMessageTransfer.count(),20); }
public static ResourceSchema convert(Schema icebergSchema) throws IOException { ResourceSchema result = new ResourceSchema(); result.setFields(convertFields(icebergSchema.columns())); return result; }
@Test public void testLongInBag() throws IOException { Schema icebergSchema = new Schema( optional( 1, "nested_list", MapType.ofOptional( 2, 3, StringType.get(), ListType.ofRequired(5, LongType.get())))); SchemaUtil.convert(icebergSchema); }
@Udf(description = "Returns the cosine of an INT value") public Double cos( @UdfParameter( value = "value", description = "The value in radians to get the cosine of." ) final Integer value ) { return cos(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleLessThanNegative2Pi() { assertThat(udf.cos(-9.1), closeTo(-0.9477216021311119, 0.000000000000001)); assertThat(udf.cos(-6.3), closeTo(0.9998586363834151, 0.000000000000001)); assertThat(udf.cos(-7), closeTo(0.7539022543433046, 0.000000000000001)); assertThat(udf.cos(-7L), closeTo(0.7539022543433046, 0.000000000000001)); }
@Override public void operationComplete(F future) throws Exception { InternalLogger internalLogger = logNotifyFailure ? logger : null; if (future.isSuccess()) { V result = future.get(); for (Promise<? super V> p: promises) { PromiseNotificationUtil.trySuccess(p, result, internalLogger); } } else if (future.isCancelled()) { for (Promise<? super V> p: promises) { PromiseNotificationUtil.tryCancel(p, internalLogger); } } else { Throwable cause = future.cause(); for (Promise<? super V> p: promises) { PromiseNotificationUtil.tryFailure(p, cause, internalLogger); } } }
@Test public void testListenerSuccess() throws Exception { @SuppressWarnings("unchecked") Promise<Void> p1 = mock(Promise.class); @SuppressWarnings("unchecked") Promise<Void> p2 = mock(Promise.class); @SuppressWarnings("unchecked") PromiseNotifier<Void, Future<Void>> notifier = new PromiseNotifier<Void, Future<Void>>(p1, p2); @SuppressWarnings("unchecked") Future<Void> future = mock(Future.class); when(future.isSuccess()).thenReturn(true); when(future.get()).thenReturn(null); when(p1.trySuccess(null)).thenReturn(true); when(p2.trySuccess(null)).thenReturn(true); notifier.operationComplete(future); verify(p1).trySuccess(null); verify(p2).trySuccess(null); }
public ActivateComparator(ExtensionDirector extensionDirector) { extensionDirectors = new ArrayList<>(); extensionDirectors.add(extensionDirector); }
@Test void testActivateComparator() { Filter1 f1 = new Filter1(); Filter2 f2 = new Filter2(); Filter3 f3 = new Filter3(); Filter4 f4 = new Filter4(); List<Class<?>> filters = new ArrayList<>(); filters.add(f1.getClass()); filters.add(f2.getClass()); filters.add(f3.getClass()); filters.add(f4.getClass()); Collections.sort(filters, activateComparator); Assertions.assertEquals(f4.getClass(), filters.get(0)); Assertions.assertEquals(f3.getClass(), filters.get(1)); Assertions.assertEquals(f2.getClass(), filters.get(2)); Assertions.assertEquals(f1.getClass(), filters.get(3)); }
@SuppressWarnings({"deprecation", "checkstyle:linelength"}) public void convertSiteProperties(Configuration conf, Configuration yarnSiteConfig, boolean drfUsed, boolean enableAsyncScheduler, boolean userPercentage, FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName()); if (conf.getBoolean( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); int interval = conf.getInt( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS); yarnSiteConfig.setInt(PREFIX + "schedule-asynchronously.scheduling-interval-ms", interval); } // This should be always true to trigger cs auto // refresh queue. yarnSiteConfig.setBoolean( YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION, FairSchedulerConfiguration.DEFAULT_PREEMPTION)) { preemptionEnabled = true; String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); int waitTimeBeforeKill = conf.getInt( FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL); yarnSiteConfig.setInt( CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, waitTimeBeforeKill); long waitBeforeNextStarvationCheck = conf.getLong( FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS); yarnSiteConfig.setLong( CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, waitBeforeNextStarvationCheck); } else { if (preemptionMode == FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, ""); } } // For auto created queue's auto deletion. if (!userPercentage) { String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); // Set the expired for deletion interval to 10s, consistent with fs. yarnSiteConfig.setInt(CapacitySchedulerConfiguration. AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10); } if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true); } else { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false); } // Make auto cs conf refresh enabled. yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, addMonitorPolicy(QueueConfigurationAutoRefreshPolicy .class.getCanonicalName(), yarnSiteConfig)); int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN, FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN); if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) { yarnSiteConfig.setInt( CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT, maxAssign); } float localityThresholdNode = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE); if (localityThresholdNode != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) { yarnSiteConfig.setFloat(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY, localityThresholdNode); } float localityThresholdRack = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK); if (localityThresholdRack != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) { yarnSiteConfig.setFloat( CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY, localityThresholdRack); } if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT, FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) { sizeBasedWeight = true; } if (drfUsed) { yarnSiteConfig.set( CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, DominantResourceCalculator.class.getCanonicalName()); } if (enableAsyncScheduler) { yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); } }
@Test public void testSiteQueueAutoDeletionConversionWithWeightMode() { converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, false, false, null); assertTrue(yarnConvertedConfig.get(YarnConfiguration. RM_SCHEDULER_ENABLE_MONITORS), true); assertTrue("Scheduling Policies contain auto deletion policy", yarnConvertedConfig. get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES) .contains(DELETION_POLICY_CLASS)); // Test when policy has existed. yarnConvertedConfig. set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, "testPolicy"); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, false, false, null); assertTrue("Scheduling Policies contain auto deletion policy", yarnConvertedConfig. get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES) .contains(DELETION_POLICY_CLASS)); assertEquals("Auto deletion policy expired time should be 10s", 10, yarnConvertedConfig. getLong(CapacitySchedulerConfiguration. AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, CapacitySchedulerConfiguration. DEFAULT_AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME)); }
public void setTemplateEntriesForChild(CapacitySchedulerConfiguration conf, QueuePath childQueuePath) { setTemplateEntriesForChild(conf, childQueuePath, false); }
@Test public void testNonWildCardTemplate() { conf.set(getTemplateKey(TEST_QUEUE_AB, "capacity"), "6w"); AutoCreatedQueueTemplate template = new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB); template.setTemplateEntriesForChild(conf, TEST_QUEUE_ABC); Assert.assertEquals("weight is not set", 6f, conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchWithNoTopicId() { // Should work and default to using old request type. buildFetcher(); TopicIdPartition noId = new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("noId", 0)); assignFromUser(noId.topicPartition()); subscriptions.seek(noId.topicPartition(), 0); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); // Fetch should use request version 12 client.prepareResponse( fetchRequestMatcher((short) 12, noId, 0, Optional.of(validLeaderEpoch)), fullFetchResponse(noId, records, Errors.NONE, 100L, 0) ); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(noId.topicPartition())); List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(noId.topicPartition()); assertEquals(3, records.size()); assertEquals(4L, subscriptions.position(noId.topicPartition()).offset); // this is the next fetching position long offset = 1; for (ConsumerRecord<byte[], byte[]> record : records) { assertEquals(offset, record.offset()); offset += 1; } }
static Optional<KieRuntimeService> getKieRuntimeServiceLocal(EfestoRuntimeContext context, EfestoInput input) { KieRuntimeService cachedKieRuntimeService = getKieRuntimeServiceFromSecondLevelCache(input); if (cachedKieRuntimeService != null) { return Optional.of(cachedKieRuntimeService); } Optional<KieRuntimeService> retrieved = getKieRuntimeServiceFromFirstLevelCache(context, input); if (retrieved.isEmpty()) { retrieved = getKieRuntimeServiceFromEfestoRuntimeContextLocal(context, input); } if (retrieved.isEmpty()) { logger.warn("Cannot find KieRuntimeService for {}", input.getModelLocalUriId()); } else { secondLevelCache.put(input.getSecondLevelCacheKey(), retrieved.get()); } return retrieved; }
@Test void getKieRuntimeServiceLocalNotPresent() { EfestoInput efestoInput = new EfestoInput() { @Override public ModelLocalUriId getModelLocalUriId() { return new ModelLocalUriId(LocalUri.parse("/not-existing/notexisting")); } @Override public Object getInputData() { return null; } }; Optional<KieRuntimeService> retrieved = RuntimeManagerUtils.getKieRuntimeServiceLocal(context, efestoInput); assertThat(retrieved).isNotNull().isNotPresent(); }
public List<R> scanForResourcesUri(URI classpathResourceUri) { requireNonNull(classpathResourceUri, "classpathResourceUri must not be null"); if (CLASSPATH_SCHEME.equals(classpathResourceUri.getScheme())) { return scanForClasspathResource(resourceName(classpathResourceUri), NULL_FILTER); } return findResourcesForUri(classpathResourceUri, DEFAULT_PACKAGE_NAME, NULL_FILTER, createUriResource()); }
@Test void scanForResourcesNestedJarUriUnPackaged() { URI jarFileUri = new File("src/test/resources/io/cucumber/core/resource/test/spring-resource.jar").toURI(); URI resourceUri = URI .create("jar:file://" + jarFileUri.getSchemeSpecificPart() + "!/BOOT-INF/classes!/com/example/"); List<URI> resources = resourceScanner.scanForResourcesUri(resourceUri); assertThat(resources, containsInAnyOrder( URI.create( "jar:file://" + jarFileUri.getSchemeSpecificPart() + "!/BOOT-INF/classes/com/example/resource.txt"))); }
@VisibleForTesting protected void updateFlushTime(Date now) { // In non-initial rounds, add an integer number of intervals to the last // flush until a time in the future is achieved, thus preserving the // original random offset. int millis = (int) (((now.getTime() - nextFlush.getTimeInMillis()) / rollIntervalMillis + 1) * rollIntervalMillis); nextFlush.add(Calendar.MILLISECOND, millis); }
@Test public void testUpdateRollTime() { RollingFileSystemSink rfsSink = new RollingFileSystemSink(1000, 0); Calendar calendar = Calendar.getInstance(); calendar.set(Calendar.MILLISECOND, 0); calendar.set(Calendar.SECOND, 0); calendar.set(Calendar.MINUTE, 0); calendar.set(Calendar.HOUR, 0); calendar.set(Calendar.DAY_OF_YEAR, 1); calendar.set(Calendar.YEAR, 2016); rfsSink.nextFlush = Calendar.getInstance(); rfsSink.nextFlush.setTime(calendar.getTime()); rfsSink.updateFlushTime(calendar.getTime()); assertEquals("The next roll time should have been 1 second in the future", calendar.getTimeInMillis() + 1000, rfsSink.nextFlush.getTimeInMillis()); rfsSink.nextFlush.setTime(calendar.getTime()); calendar.add(Calendar.MILLISECOND, 10); rfsSink.updateFlushTime(calendar.getTime()); assertEquals("The next roll time should have been 990 ms in the future", calendar.getTimeInMillis() + 990, rfsSink.nextFlush.getTimeInMillis()); rfsSink.nextFlush.setTime(calendar.getTime()); calendar.add(Calendar.SECOND, 2); calendar.add(Calendar.MILLISECOND, 10); rfsSink.updateFlushTime(calendar.getTime()); assertEquals("The next roll time should have been 990 ms in the future", calendar.getTimeInMillis() + 990, rfsSink.nextFlush.getTimeInMillis()); }
public static Map<String, String> removeKeySuffix(Map<String, String> keyValues, int suffixLength) { // use O(n) algorithm Map<String, String> map = new HashMap<>(); for(Map.Entry<String, String> entry : keyValues.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); String newKey = key.substring(0, key.length() - suffixLength); map.put(newKey, value); } return map; }
@Test public void testRemoveKeySuffix() { Map<String, String> map = new HashMap<>(); map.put("abc_meta", "none"); map.put("234_meta", "none"); map.put("888_meta", "none"); Map<String, String> afterFilter = KeyValueUtils.removeKeySuffix(map, "_meta".length()); for(Map.Entry<String, String> entry : afterFilter.entrySet()) { String key = entry.getKey(); assertFalse(key.endsWith("_meta")); assertFalse(key.contains("_meta")); } }
public void update(String namespaceName, String extensionName) throws InterruptedException { if(BuiltInExtensionUtil.isBuiltIn(namespaceName)) { LOGGER.debug("SKIP BUILT-IN EXTENSION {}", NamingUtil.toExtensionId(namespaceName, extensionName)); return; } var extension = repositories.findPublicId(namespaceName, extensionName); var extensionUpdates = new HashMap<Long, String>(); updateExtensionPublicId(extension, extensionUpdates, false); if(!extensionUpdates.isEmpty()) { repositories.updateExtensionPublicIds(extensionUpdates); } var namespaceUpdates = new HashMap<Long, String>(); updateNamespacePublicId(extension, namespaceUpdates, false); if(!namespaceUpdates.isEmpty()) { repositories.updateNamespacePublicIds(namespaceUpdates); } }
@Test public void testUpdateDuplicateRecursive() throws InterruptedException { var namespaceName1 = "foo"; var namespacePublicId1 = UUID.randomUUID().toString(); var extensionName1 = "bar"; var extensionPublicId1 = UUID.randomUUID().toString(); var namespace1 = new Namespace(); namespace1.setId(1L); namespace1.setName(namespaceName1); var extension1 = new Extension(); extension1.setId(2L); extension1.setName(extensionName1); extension1.setNamespace(namespace1); var namespaceName2 = "baz"; var namespacePublicId2 = UUID.randomUUID().toString(); var extensionName2 = "foobar"; var extensionPublicId2 = UUID.randomUUID().toString(); var namespace2 = new Namespace(); namespace2.setId(3L); namespace2.setName(namespaceName2); namespace2.setPublicId(namespacePublicId1); var extension2 = new Extension(); extension2.setId(4L); extension2.setName(extensionName2); extension2.setPublicId(extensionPublicId1); extension2.setNamespace(namespace2); var namespaceName3 = "baz2"; var namespacePublicId3 = UUID.randomUUID().toString(); var extensionName3 = "foobar2"; var extensionPublicId3 = UUID.randomUUID().toString(); var namespace3 = new Namespace(); namespace3.setId(5L); namespace3.setName(namespaceName3); namespace3.setPublicId(namespacePublicId2); var extension3 = new Extension(); extension3.setId(6L); extension3.setName(extensionName3); extension3.setPublicId(extensionPublicId2); extension3.setNamespace(namespace3); var namespaceName4 = "baz3"; var namespacePublicId4 = UUID.randomUUID().toString(); var extensionName4 = "foobar3"; var extensionPublicId4 = UUID.randomUUID().toString(); var namespace4 = new Namespace(); namespace4.setId(7L); namespace4.setName(namespaceName4); namespace4.setPublicId(namespacePublicId3); var extension4 = new Extension(); extension4.setId(8L); extension4.setName(extensionName4); extension4.setPublicId(extensionPublicId3); extension4.setNamespace(namespace4); Mockito.when(repositories.findPublicId(namespaceName1, extensionName1)).thenReturn(extension1); Mockito.when(repositories.findPublicId(extensionPublicId1)).thenReturn(extension2); Mockito.when(repositories.findNamespacePublicId(namespacePublicId1)).thenReturn(extension2); Mockito.when(repositories.findPublicId(extensionPublicId2)).thenReturn(extension3); Mockito.when(repositories.findNamespacePublicId(namespacePublicId2)).thenReturn(extension3); Mockito.when(repositories.findPublicId(extensionPublicId3)).thenReturn(extension4); Mockito.when(repositories.findNamespacePublicId(namespacePublicId3)).thenReturn(extension4); Mockito.when(idService.getUpstreamPublicIds(extension1)).thenReturn(new PublicIds(namespacePublicId1, extensionPublicId1)); Mockito.when(idService.getUpstreamPublicIds(extension2)).thenReturn(new PublicIds(namespacePublicId2, extensionPublicId2)); Mockito.when(idService.getUpstreamPublicIds(extension3)).thenReturn(new PublicIds(namespacePublicId3, extensionPublicId3)); Mockito.when(idService.getUpstreamPublicIds(extension4)).thenReturn(new PublicIds(namespacePublicId4, extensionPublicId4)); updateService.update(namespaceName1, extensionName1); Mockito.verify(repositories).updateExtensionPublicIds(Mockito.argThat((Map<Long, String> map) -> { return map.size() == 4 && map.get(extension1.getId()).equals(extensionPublicId1) && map.get(extension2.getId()).equals(extensionPublicId2) && map.get(extension3.getId()).equals(extensionPublicId3) && map.get(extension4.getId()).equals(extensionPublicId4); })); Mockito.verify(repositories).updateNamespacePublicIds(Mockito.argThat((Map<Long, String> map) -> { return map.size() == 4 && map.get(namespace1.getId()).equals(namespacePublicId1) && map.get(namespace2.getId()).equals(namespacePublicId2) && map.get(namespace3.getId()).equals(namespacePublicId3) && map.get(namespace4.getId()).equals(namespacePublicId4); })); }
@Override public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) { // Partition the requested config resources based on which broker they must be sent to with the // null broker being used for config resources which can be obtained from any broker final Map<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> nodeFutures = new HashMap<>(configResources.size()); for (ConfigResource resource : configResources) { Integer broker = nodeFor(resource); nodeFutures.compute(broker, (key, value) -> { if (value == null) { value = new HashMap<>(); } value.put(resource, new KafkaFutureImpl<>()); return value; }); } final long now = time.milliseconds(); for (Map.Entry<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> entry : nodeFutures.entrySet()) { final Integer node = entry.getKey(); Map<ConfigResource, KafkaFutureImpl<Config>> unified = entry.getValue(); runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), node != null ? new ConstantNodeIdProvider(node, true) : new LeastLoadedBrokerOrActiveKController()) { @Override DescribeConfigsRequest.Builder createRequest(int timeoutMs) { return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() .setResources(unified.keySet().stream() .map(config -> new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceName(config.name()) .setResourceType(config.type().id()) .setConfigurationKeys(null)) .collect(Collectors.toList())) .setIncludeSynonyms(options.includeSynonyms()) .setIncludeDocumentation(options.includeDocumentation())); } @Override void handleResponse(AbstractResponse abstractResponse) { DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse; for (Map.Entry<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> entry : response.resultMap().entrySet()) { ConfigResource configResource = entry.getKey(); DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult = entry.getValue(); KafkaFutureImpl<Config> future = unified.get(configResource); if (future == null) { if (node != null) { log.warn("The config {} in the response from node {} is not in the request", configResource, node); } else { log.warn("The config {} in the response from the least loaded broker is not in the request", configResource); } } else { if (describeConfigsResult.errorCode() != Errors.NONE.code()) { future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode()) .exception(describeConfigsResult.errorMessage())); } else { future.complete(describeConfigResult(describeConfigsResult)); } } } completeUnrealizedFutures( unified.entrySet().stream(), configResource -> "The node response did not contain a result for config resource " + configResource); } @Override void handleFailure(Throwable throwable) { completeAllExceptionally(unified.values(), throwable); } }, now); } return new DescribeConfigsResult( nodeFutures.entrySet() .stream() .flatMap(x -> x.getValue().entrySet().stream()) .collect(Collectors.toMap( Map.Entry::getKey, Map.Entry::getValue, (oldValue, newValue) -> { // Duplicate keys should not occur, throw an exception to signal this issue throw new IllegalStateException(String.format("Duplicate key for values: %s and %s", oldValue, newValue)); }, HashMap::new )) ); }
@Test public void testDescribeBrokerAndLogConfigs() throws Exception { ConfigResource brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "0"); ConfigResource brokerLoggerResource = new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "0"); try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse( new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(brokerResource.name()).setResourceType(brokerResource.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList()), new DescribeConfigsResponseData.DescribeConfigsResult() .setResourceName(brokerLoggerResource.name()).setResourceType(brokerLoggerResource.type().id()).setErrorCode(Errors.NONE.code()) .setConfigs(emptyList())))), env.cluster().nodeById(0)); Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList( brokerResource, brokerLoggerResource)).values(); assertEquals(new HashSet<>(asList(brokerResource, brokerLoggerResource)), result.keySet()); result.get(brokerResource).get(); result.get(brokerLoggerResource).get(); } }
public static IpPrefix valueOf(int address, int prefixLength) { return new IpPrefix(IpAddress.valueOf(address), prefixLength); }
@Test(expected = NullPointerException.class) public void testInvalidValueOfNullAddress() { IpAddress ipAddress; IpPrefix ipPrefix; ipAddress = null; ipPrefix = IpPrefix.valueOf(ipAddress, 24); }
@Override public boolean register(String name, ProcNodeInterface node) { return false; }
@Test public void testRegister() { DbsProcDir dir; dir = new DbsProcDir(globalStateMgr); Assert.assertFalse(dir.register("db1", new BaseProcDir())); }
@Override public void close() throws IOException { // users should not be able to actually close the stream, it is closed by the system. // TODO if we want to support async writes, this call could trigger a callback to the // snapshot context that a handle is available. }
@Test void testCloseNotPropagated() throws Exception { KeyedStateCheckpointOutputStream stream = createStream(new KeyGroupRange(0, 0)); TestMemoryCheckpointOutputStream innerStream = (TestMemoryCheckpointOutputStream) stream.getDelegate(); stream.close(); assertThat(innerStream.isClosed()).isFalse(); }