focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Map<String, List<String>> getRequestTag(String path, String methodName, Map<String, List<String>> headers, Map<String, String[]> parameters, Keys keys) { Set<String> matchKeys = keys.getMatchKeys(); return getRequestTag(headers, matchKeys); }
@Test public void testGetRequestTag() { // Normal Map<String, List<String>> headers = new HashMap<>(); Set<String> matchKeys = new HashSet<>(); matchKeys.add("bar"); matchKeys.add("foo"); headers.put("bar", Collections.singletonList("bar1")); headers.put("foo", Collections.singletonList("foo1")); Map<String, List<String>> requestTag = handler.getRequestTag("", "", headers, null, new Keys(matchKeys, null)); Assert.assertNotNull(requestTag); Assert.assertEquals(2, requestTag.size()); Assert.assertEquals("bar1", requestTag.get("bar").get(0)); Assert.assertEquals("foo1", requestTag.get("foo").get(0)); // Test matchKeys as empty requestTag = handler.getRequestTag("", "", null, null, new Keys(null, null)); Assert.assertEquals(Collections.emptyMap(), requestTag); }
@Override public ServletStream stream() { return stream; }
@Test public void test_output() { assertThat(underTest.stream().output()).isEqualTo(output); }
public static void writeUnsignedIntLittleEndian(long data, ByteArrayOutputStream out) { out.write((byte) (data & 0xFF)); out.write((byte) (data >>> 8)); out.write((byte) (data >>> 16)); out.write((byte) (data >>> 24)); }
@Test public void testWriteUnsignedIntLittleEndian() { ByteArrayOutputStream out = new ByteArrayOutputStream(); ByteHelper.writeUnsignedIntLittleEndian(50_332_648L, out); Assert.assertArrayEquals(new byte[] {-24, 3, 0, 3}, out.toByteArray()); }
@Override public void validateContext() throws ExtensionException { List<Class<?>> allExtensionList = new ArrayList<>(); for (IBusiness<T> business : businessManager.listAllBusinesses()) { if (business.usedAbilities() == null) { continue; } allExtensionList.addAll(business.implementsExtensions()); Set<String> codeSet = new HashSet<>(); Set<Integer> prioritySet = new HashSet<>(); codeSet.add(business.code()); prioritySet.add(business.priority()); for (UsedAbility usedAbility : business.usedAbilities()) { IAbility<T> ability = abilityManager.getAbility(usedAbility.code()); allExtensionList.addAll(ability.implementsExtensions()); if (codeSet.contains(usedAbility.code())) { throw new ExtensionException("business " + business.code() + " used ability code " + usedAbility.code() + " duplicate"); } if (prioritySet.contains(usedAbility.priority())) { throw new ExtensionException("business " + business.code() + " used ability priority " + usedAbility.priority() + " duplicate"); } codeSet.add(usedAbility.code()); prioritySet.add(usedAbility.priority()); } } if (!allExtensionList.isEmpty()) { IAbility<T> defaultAbility = abilityManager.getAbility(BaseDefaultAbility.DEFAULT_CODE); List<String> notImplemented = new ArrayList<>(); for (Class<?> clazz : allExtensionList) { if (!clazz.isAssignableFrom(defaultAbility.getClass())) { notImplemented.add(clazz.getName()); } } if (!notImplemented.isEmpty()) { throw new ExtensionException("default ability should implements all extension interface, but current default ability not implements extension " + Arrays.toString(notImplemented.stream().distinct().toArray())); } } }
@Test public void testValidateContext() throws Exception { ExtensionException e; DefaultExtContext<Object> context = new DefaultExtContext<>(false, false); context.validateContext(); context.registerBusiness(new BusinessUnknownAbilityCode()); e = assertThrows(ExtensionException.class, context::validateContext); assertEquals("ability Unknown not found", e.getMessage()); context = new DefaultExtContext<>(false, false); context.registerAbility(new AbilityM()); context.registerBusiness(new BusinessUsedAbilityCodeDuplicate()); e = assertThrows(ExtensionException.class, context::validateContext); assertEquals("business BusinessUsedAbilityCodeDuplicate used ability code AbilityM duplicate", e.getMessage()); context = new DefaultExtContext<>(false, false); context.registerAbility(new AbilityM()); context.registerAbility(new AbilityN()); context.registerBusiness(new BusinessUsedAbilityPriorityDuplicate()); e = assertThrows(ExtensionException.class, context::validateContext); assertEquals("business BusinessUsedAbilityPriorityDuplicate used ability priority 200 duplicate", e.getMessage()); context = new DefaultExtContext<>(false, false); context.registerAbility(new AbilityM()); context.registerAbility(new AbilityN()); context.registerAbility(new ExtDefaultAbilityInvalid()); context.registerBusiness(new BusinessUsedAbilityPriority()); e = assertThrows(ExtensionException.class, context::validateContext); assertEquals("default ability should implements all extension interface, but current default ability not implements extension [io.github.xiaoshicae.extension.core.ExtA]", e.getMessage()); context = new DefaultExtContext<>(false, false); context.registerAbility(new AbilityM()); context.registerAbility(new AbilityN()); context.registerAbility(new ExtDefaultAbility()); context.registerBusiness(new BusinessUsedAbilityPriority()); }
@Override public List<KsqlPartitionLocation> locate( final List<KsqlKey> keys, final RoutingOptions routingOptions, final RoutingFilterFactory routingFilterFactory, final boolean isRangeScan ) { if (isRangeScan && keys.isEmpty()) { throw new IllegalStateException("Query is range scan but found no range keys."); } final ImmutableList.Builder<KsqlPartitionLocation> partitionLocations = ImmutableList.builder(); final Set<Integer> filterPartitions = routingOptions.getPartitions(); final Optional<Set<KsqlKey>> keySet = keys.isEmpty() ? Optional.empty() : Optional.of(Sets.newHashSet(keys)); // Depending on whether this is a key-based lookup, determine which metadata method to use. // If we don't have keys, find the metadata for all partitions since we'll run the query for // all partitions of the state store rather than a particular one. //For issue #7174. Temporarily turn off metadata finding for a partition with keys //if there are more than one key. final List<PartitionMetadata> metadata; if (keys.size() == 1 && keys.get(0).getKey().size() == 1 && !isRangeScan) { metadata = getMetadataForKeys(keys, filterPartitions); } else { metadata = getMetadataForAllPartitions(filterPartitions, keySet); } if (metadata.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Cannot determine which host contains the required partitions to serve the pull query. \n" + "The underlying persistent query may be restarting (e.g. as a result of " + "ALTER SYSTEM) view the status of your by issuing <DESCRIBE foo>."); LOG.debug(materializationException.getMessage()); throw materializationException; } // Go through the metadata and group them by partition. for (PartitionMetadata partitionMetadata : metadata) { LOG.debug("Handling pull query for partition {} of state store {}.", partitionMetadata.getPartition(), storeName); final HostInfo activeHost = partitionMetadata.getActiveHost(); final Set<HostInfo> standByHosts = partitionMetadata.getStandbyHosts(); final int partition = partitionMetadata.getPartition(); final Optional<Set<KsqlKey>> partitionKeys = partitionMetadata.getKeys(); LOG.debug("Active host {}, standby {}, partition {}.", activeHost, standByHosts, partition); // For a given partition, find the ordered, filtered list of hosts to consider final List<KsqlNode> filteredHosts = getFilteredHosts(routingOptions, routingFilterFactory, activeHost, standByHosts, partition); partitionLocations.add(new PartitionLocation(partitionKeys, partition, filteredHosts)); } return partitionLocations.build(); }
@Ignore @Test //For issue #7174. Temporarily ignore this test. It will call getMetadataForAllPartitions(). //Formerly it called getMetadataForKeys(). public void shouldGroupKeysByLocation() { // Given: getActiveStandbyMetadata(SOME_KEY, 0, ACTIVE_HOST_INFO, STANDBY_HOST_INFO1); getActiveStandbyMetadata(SOME_KEY1, 1, STANDBY_HOST_INFO1, ACTIVE_HOST_INFO); getActiveStandbyMetadata(SOME_KEY2, 0, ACTIVE_HOST_INFO, STANDBY_HOST_INFO1); getActiveStandbyMetadata(SOME_KEY3, 2, ACTIVE_HOST_INFO, STANDBY_HOST_INFO1); // When: final List<KsqlPartitionLocation> result = locator.locate( ImmutableList.of(KEY, KEY1, KEY2, KEY3), routingOptions, routingFilterFactoryStandby, false); // Then: assertThat(result.size(), is(3)); assertThat(result.get(0).getKeys().get(), contains(KEY, KEY2)); List<KsqlNode> nodeList = result.get(0).getNodes(); assertThat(nodeList.size(), is(2)); assertThat(nodeList.get(0), is(activeNode)); assertThat(nodeList.get(1), is(standByNode1)); assertThat(result.get(1).getKeys().get(), contains(KEY1)); nodeList = result.get(1).getNodes(); assertThat(nodeList.size(), is(2)); assertThat(nodeList.get(0), is(standByNode1)); assertThat(nodeList.get(1), is(activeNode)); assertThat(result.get(2).getKeys().get(), contains(KEY3)); nodeList = result.get(2).getNodes(); assertThat(nodeList.size(), is(2)); assertThat(nodeList.get(0), is(activeNode)); assertThat(nodeList.get(1), is(standByNode1)); }
public String render(File templateFile) throws IOException { String template = FileUtils.readFileToString(templateFile, Charset.defaultCharset()); return render(template); }
@Test void testRenderWithStrip() { // given K8sSpecTemplate template = new K8sSpecTemplate(); template.put("test", "test"); // when String spec = template.render( " {% if test == \"test\" %}\n" + " After commit\n" + " {% endif %}\n"); // then assertEquals(" After commit\n", spec); }
public static List<String> parseAcceptType(final String header) { return parseAcceptTypeStream(header).collect(Collectors.toList()); }
@Test(dataProvider = "sampleValidAcceptHeaders") public void testParseAcceptTypes(String header, List<String> supportedTypes) { Assert.assertEquals(MIMEParse.parseAcceptType(header), supportedTypes); }
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"}) public static boolean isScalablePushQuery( final Statement statement, final KsqlExecutionContext ksqlEngine, final KsqlConfig ksqlConfig, final Map<String, Object> overrides ) { if (!isPushV2Enabled(ksqlConfig, overrides)) { return false; } if (! (statement instanceof Query)) { return false; } final Query query = (Query) statement; final SourceFinder sourceFinder = new SourceFinder(); sourceFinder.process(query.getFrom(), null); // It will be present if it's not a join, which we don't handle if (!sourceFinder.getSourceName().isPresent()) { return false; } // Find all of the writers to this particular source. final SourceName sourceName = sourceFinder.getSourceName().get(); final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName); // See if the config or override have set the stream to be "latest" final boolean isLatest = isLatest(ksqlConfig, overrides); // Cannot be a pull query, i.e. must be a push return !query.isPullQuery() // Group by is not supported && !query.getGroupBy().isPresent() // Windowing is not supported && !query.getWindow().isPresent() // Having clause is not supported && !query.getHaving().isPresent() // Partition by is not supported && !query.getPartitionBy().isPresent() // There must be an EMIT CHANGES clause && (query.getRefinement().isPresent() && query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES) // Must be reading from "latest" && isLatest // We only handle a single sink source at the moment from a CTAS/CSAS && upstreamQueries.size() == 1 // ROWPARTITION and ROWOFFSET are not currently supported in SPQs && !containsDisallowedColumns(query); }
@Test public void isScalablePushQuery_false_configDisabled() { try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) { // When: expectIsSPQ(ColumnName.of("foo"), columnExtractor); when(ksqlConfig.getBoolean(KsqlConfig.KSQL_QUERY_PUSH_V2_ENABLED)).thenReturn(false); // Then: assertThat(ScalablePushUtil.isScalablePushQuery(query, ksqlEngine, ksqlConfig, ImmutableMap.of(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")), equalTo(false)); } }
public static URI replaceQueryParam(URI uri, String queryParam, DataComplex values, DataMap parameters, ProtocolVersion version) { UriBuilder builder = UriBuilder.fromPath(uri.getPath()); DataMap newQueryParams = new DataMap(); newQueryParams.putAll(parameters); newQueryParams.put(queryParam, values); URIParamUtils.addSortedParams(builder, newQueryParams, version); return builder.build(); }
@Test public void replaceQueryParam() { DataMap queryParams = new DataMap(); queryParams.put("bq", "batch_finder"); queryParams.put("page", "1"); queryParams.put("count", "10"); DataMap criteria1 = new DataMap(); criteria1.put("criteria1_fieldA", "valueA"); criteria1.put("criteria1_fieldB", "valueB"); criteria1.put("criteria1_fieldC", "valueC"); DataMap criteria2 = new DataMap(); criteria2.put("criteria2_fieldA", "valueA"); criteria2.put("criteria2_fieldB", "valueB"); criteria2.put("criteria2_fieldC", "valueC"); DataList paramList = new DataList(); paramList.add(criteria1); paramList.add(criteria2); queryParams.put("criteria", paramList); UriBuilder uriBuilder = new UriBuilder(); URIParamUtils.addSortedParams(uriBuilder, queryParams); URI uri = uriBuilder.build(); DataList newParamList = new DataList(); newParamList.add(criteria1); URI replacedURIV1 = URIParamUtils.replaceQueryParam(uri, "criteria", newParamList, queryParams, AllProtocolVersions.RESTLI_PROTOCOL_1_0_0.getProtocolVersion()); URI replacedURIV2 = URIParamUtils.replaceQueryParam(uri, "criteria", newParamList, queryParams, AllProtocolVersions.RESTLI_PROTOCOL_2_0_0.getProtocolVersion()); String expectedURI = "bq=batch_finder&count=10&criteria=List((criteria1_fieldA:valueA,criteria1_fieldB:valueB," + "criteria1_fieldC:valueC),(criteria2_fieldA:valueA,criteria2_fieldB:valueB,criteria2_fieldC:valueC))&page=1"; String expectedNewURIV2 = "bq=batch_finder&count=10&criteria=List((criteria1_fieldA:valueA,criteria1_fieldB:valueB," + "criteria1_fieldC:valueC))&page=1"; String expectedNewURIV1 = "bq=batch_finder&count=10&criteria[0].criteria1_fieldA=valueA" + "&criteria[0].criteria1_fieldB=valueB&criteria[0].criteria1_fieldC=valueC&page=1"; Assert.assertEquals(uri.getQuery(), expectedURI); Assert.assertEquals(replacedURIV2.getQuery(), expectedNewURIV2); Assert.assertEquals(replacedURIV1.getQuery(), expectedNewURIV1); }
@Override public Object intercept(final Invocation invocation) throws Throwable { Object[] args = invocation.getArgs(); MappedStatement ms = (MappedStatement) args[0]; Object parameter = args[1]; RowBounds rowBounds = (RowBounds) args[2]; ResultHandler<?> resultHandler = (ResultHandler<?>) args[3]; Executor executor = (Executor) invocation.getTarget(); CacheKey cacheKey; BoundSql boundSql; if (args.length == 4) { boundSql = ms.getBoundSql(parameter); cacheKey = executor.createCacheKey(ms, parameter, rowBounds, boundSql); } else { cacheKey = (CacheKey) args[4]; boundSql = (BoundSql) args[5]; } return executor.query(ms, parameter, rowBounds, resultHandler, cacheKey, boundSql); }
@Test public void interceptTest() throws SQLException { final OpenGaussSQLQueryInterceptor openGaussSQLQueryInterceptor = new OpenGaussSQLQueryInterceptor(); final Invocation invocation = mock(Invocation.class); Object[] args = new Object[4]; args[0] = mock(MappedStatement.class); args[1] = mock(Object.class); args[2] = mock(RowBounds.class); args[3] = mock(ResultHandler.class); when(invocation.getArgs()).thenReturn(args); final Executor executor = mock(Executor.class); when(invocation.getTarget()).thenReturn(executor); when(executor.createCacheKey(any(), any(), any(), any())).thenReturn(mock(CacheKey.class)); when(executor.query(any(), any(), any(), any(), any(), any())).thenReturn(new ArrayList<>()); Assertions.assertDoesNotThrow(() -> openGaussSQLQueryInterceptor.intercept(invocation)); args = new Object[6]; args[0] = mock(MappedStatement.class); args[1] = mock(Object.class); args[2] = mock(RowBounds.class); args[3] = mock(ResultHandler.class); args[4] = mock(CacheKey.class); args[5] = mock(BoundSql.class); when(invocation.getArgs()).thenReturn(args); Assertions.assertDoesNotThrow(() -> openGaussSQLQueryInterceptor.intercept(invocation)); }
@Override public V put(K key, V value, Duration ttl) { return get(putAsync(key, value, ttl)); }
@Test public void testExpireOverwrite() throws InterruptedException, ExecutionException { RMapCacheNative<String, Integer> map = redisson.getMapCacheNative("simple"); map.put("123", 3, Duration.ofSeconds(1)); Thread.sleep(800); map.put("123", 3, Duration.ofSeconds(1)); Thread.sleep(800); Assertions.assertEquals(3, (int)map.get("123")); Thread.sleep(200); Assertions.assertFalse(map.containsKey("123")); map.destroy(); }
static <T> T findFirstValidInput(T[] inputs) { for (T input : inputs) { if (input != null) { return input; } } throw new HadoopIllegalArgumentException( "Invalid inputs are found, all being null"); }
@Test public void testFindFirstValidInput() { byte[][] inputs = new byte[numInputs][]; inputs[8] = ByteBuffer.allocate(4).putInt(1234).array(); byte[] firstValidInput = CoderUtil.findFirstValidInput(inputs); assertEquals(firstValidInput, inputs[8]); }
public static List<String> allTasks(ConfigElementImplementationRegistry registry) { List<String> allTasks = new ArrayList<>(); for (Class<? extends Task> task : registry.implementersOf(Task.class)) { AttributeAwareConfigTag attributeAwareConfigTag = task.getAnnotation(AttributeAwareConfigTag.class); if (attributeAwareConfigTag != null && !allTasks.contains(attributeAwareConfigTag.value())) { allTasks.add(attributeAwareConfigTag.value()); } ConfigTag tag = task.getAnnotation(ConfigTag.class); if (tag != null && !allTasks.contains(tag.value())) { allTasks.add(tag.value()); } } return allTasks; }
@Test public void shouldGetAllTasks() { ConfigElementImplementationRegistry registry = new ConfigElementImplementationRegistry(); registry.registerImplementer(Task.class, AntTask.class, ExecTask.class, NantTask.class, RakeTask.class, FetchTask.class, FetchPluggableArtifactTask.class); List<String> tasks = ConfigUtil.allTasks(registry); assertThat(tasks.size(), is(5)); assertThat(tasks, hasItem("ant")); assertThat(tasks, hasItem("exec")); assertThat(tasks, hasItem("nant")); assertThat(tasks, hasItem("rake")); assertThat(tasks, hasItem("fetchartifact")); }
@SuppressWarnings("WeakerAccess") public Map<String, Object> getMainConsumerConfigs(final String groupId, final String clientId, final int threadIdx) { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); // Get main consumer override configs final Map<String, Object> mainConsumerProps = originalsWithPrefix(MAIN_CONSUMER_PREFIX); consumerProps.putAll(mainConsumerProps); // this is a hack to work around StreamsConfig constructor inside StreamsPartitionAssignor to avoid casting consumerProps.put(APPLICATION_ID_CONFIG, groupId); // add group id, client id with stream client id prefix, and group instance id consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId); final String groupInstanceId = (String) consumerProps.get(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG); // Suffix each thread consumer with thread.id to enforce uniqueness of group.instance.id. if (groupInstanceId != null) { consumerProps.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId + "-" + threadIdx); } // add configs required for stream partition assignor consumerProps.put(UPGRADE_FROM_CONFIG, getString(UPGRADE_FROM_CONFIG)); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ACCEPTABLE_RECOVERY_LAG_CONFIG, getLong(ACCEPTABLE_RECOVERY_LAG_CONFIG)); consumerProps.put(MAX_WARMUP_REPLICAS_CONFIG, getInt(MAX_WARMUP_REPLICAS_CONFIG)); consumerProps.put(PROBING_REBALANCE_INTERVAL_MS_CONFIG, getLong(PROBING_REBALANCE_INTERVAL_MS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamsPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, getString(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, getList(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG)); consumerProps.put(TASK_ASSIGNOR_CLASS_CONFIG, getString(TASK_ASSIGNOR_CLASS_CONFIG)); // disable auto topic creation consumerProps.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false"); // verify that producer batch config is no larger than segment size, then add topic configs required for creating topics final Map<String, Object> topicProps = originalsWithPrefix(TOPIC_PREFIX, false); final Map<String, Object> producerProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (topicProps.containsKey(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)) && producerProps.containsKey(ProducerConfig.BATCH_SIZE_CONFIG)) { final int segmentSize = Integer.parseInt(topicProps.get(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)).toString()); final int batchSize = Integer.parseInt(producerProps.get(ProducerConfig.BATCH_SIZE_CONFIG).toString()); if (segmentSize < batchSize) { throw new IllegalArgumentException(String.format("Specified topic segment size %d is is smaller than the configured producer batch size %d, this will cause produced batch not able to be appended to the topic", segmentSize, batchSize)); } } consumerProps.putAll(topicProps); return consumerProps; }
@Test public void shouldSetInternalLeaveGroupOnCloseConfigToFalseInConsumer() { final StreamsConfig streamsConfig = new StreamsConfig(props); final Map<String, Object> consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx); assertThat(consumerConfigs.get("internal.leave.group.on.close"), is(false)); }
@Override public void createDataStream(String dataStreamName, String timestampField, Map<String, Map<String, String>> mappings, Policy ismPolicy) { updateDataStreamTemplate(dataStreamName, timestampField, mappings); dataStreamAdapter.createDataStream(dataStreamName); dataStreamAdapter.applyIsmPolicy(dataStreamName, ismPolicy); dataStreamAdapter.setNumberOfReplicas(dataStreamName, replicas); }
@SuppressWarnings("unchecked") @Test public void fieldTypesCreated() { final Map<String, Map<String, String>> mappings = new HashMap<>(); String customField = "field1"; mappings.put(customField, Map.of("type", "keyword")); String streamName = "teststream"; dataStreamService.createDataStream(streamName, "ts", mappings, mock(Policy.class)); ArgumentCaptor<Template> templateCaptor = ArgumentCaptor.forClass(Template.class); verify(dataStreamAdapter).ensureDataStreamTemplate(anyString(), templateCaptor.capture(), anyString()); Map<String, Object> fieldMappings = (Map<String, Object>) templateCaptor.getValue().mappings().get("properties"); Map<String, String> timestampMapping = (Map<String, String>) fieldMappings.get(customField); assertThat(timestampMapping).isNotNull(); assertThat(timestampMapping.get("type")).isEqualTo("keyword"); ArgumentCaptor<IndexFieldTypesDTO> fieldTypes = ArgumentCaptor.forClass(IndexFieldTypesDTO.class); verify(indexFieldTypesService).upsert(fieldTypes.capture()); IndexFieldTypesDTO fieldTypesDto = fieldTypes.getValue(); assertThat(fieldTypesDto.indexName()).isEqualTo(streamName); assertThat(fieldTypesDto.indexSetId()).isEqualTo(Stream.DATASTREAM_PREFIX + streamName); assertThat(fieldTypesDto.fields()).hasSize(2); }
public static long write(InputStream is, OutputStream os) throws IOException { return write(is, os, BUFFER_SIZE); }
@Test void testWrite3() throws Exception { assertThat((int) IOUtils.write(writer, TEXT), equalTo(TEXT.length())); }
void doStart() { retryWithToleranceOperator.reporters(errorReportersSupplier.get()); initializeAndStart(); statusListener.onStartup(id); }
@Test public void testErrorReportersConfigured() { ConnectorTaskId taskId = new ConnectorTaskId("foo", 0); WorkerTask<Object, SourceRecord> workerTask = new TestWorkerTask(taskId, statusListener, TargetState.STARTED, loader, metrics, errorHandlingMetrics, retryWithToleranceOperator, transformationChain, errorReportersSupplier, Time.SYSTEM, statusBackingStore); List<ErrorReporter<Object>> errorReporters = new ArrayList<>(); when(errorReportersSupplier.get()).thenReturn(errorReporters); workerTask.doStart(); verify(retryWithToleranceOperator).reporters(errorReporters); }
@Override public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) throws StandbyException, YarnException, IOException { // parameter verification. if (request == null) { routerMetrics.incrRefreshQueuesFailedRetrieved(); RouterServerUtil.logAndThrowException("Missing RefreshQueues request.", null); } // call refreshQueues of activeSubClusters. try { long startTime = clock.getTime(); RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod( new Class[] {RefreshQueuesRequest.class}, new Object[] {request}); String subClusterId = request.getSubClusterId(); Collection<RefreshQueuesResponse> refreshQueueResps = remoteMethod.invokeConcurrent(this, RefreshQueuesResponse.class, subClusterId); // If we get the return result from refreshQueueResps, // it means that the call has been successful, // and the RefreshQueuesResponse method can be reconstructed and returned. if (CollectionUtils.isNotEmpty(refreshQueueResps)) { long stopTime = clock.getTime(); routerMetrics.succeededRefreshQueuesRetrieved(stopTime - startTime); return RefreshQueuesResponse.newInstance(); } } catch (YarnException e) { routerMetrics.incrRefreshQueuesFailedRetrieved(); RouterServerUtil.logAndThrowException(e, "Unable to refreshQueue due to exception. " + e.getMessage()); } routerMetrics.incrRefreshQueuesFailedRetrieved(); throw new YarnException("Unable to refreshQueue."); }
@Test public void testSC1RefreshQueues() throws Exception { // We will test 2 cases: // case 1, test the existing subCluster (SC-1). // case 2, test the non-exist subCluster. String existSubCluster = "SC-1"; RefreshQueuesRequest request = RefreshQueuesRequest.newInstance(existSubCluster); interceptor.refreshQueues(request); String notExistsSubCluster = "SC-NON"; RefreshQueuesRequest request1 = RefreshQueuesRequest.newInstance(notExistsSubCluster); LambdaTestUtils.intercept(YarnException.class, "subClusterId = SC-NON is not an active subCluster.", () -> interceptor.refreshQueues(request1)); }
@Override public void checkBeforeUpdate(final AlterReadwriteSplittingRuleStatement sqlStatement) { ReadwriteSplittingRuleStatementChecker.checkAlteration(database, sqlStatement.getRules(), rule.getConfiguration()); }
@Test void assertCheckSQLStatementWithDuplicateReadResourceNamesInStatement() { ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getResourceMetaData()).thenReturn(resourceMetaData); ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class); when(rule.getConfiguration()).thenReturn(createCurrentRuleConfigurationWithMultipleRules()); executor.setRule(rule); assertThrows(DuplicateReadwriteSplittingActualDataSourceException.class, () -> executor.checkBeforeUpdate(createSQLStatementWithDuplicateReadResourceNames("readwrite_ds_0", "readwrite_ds_1", "TEST"))); }
@Override protected double maintain() { if ( ! nodeRepository().nodes().isWorking()) return 0.0; // Don't need to maintain spare capacity in dynamically provisioned zones; can provision more on demand. if (nodeRepository().zone().cloud().dynamicProvisioning()) return 1.0; NodeList allNodes = nodeRepository().nodes().list(); CapacityChecker capacityChecker = new CapacityChecker(allNodes); List<Node> overcommittedHosts = capacityChecker.findOvercommittedHosts(); metric.set(ConfigServerMetrics.OVERCOMMITTED_HOSTS.baseName(), overcommittedHosts.size(), null); retireOvercommitedHosts(allNodes, overcommittedHosts); boolean success = true; Optional<CapacityChecker.HostFailurePath> failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { int spareHostCapacity = failurePath.get().hostsCausingFailure.size() - 1; if (spareHostCapacity == 0) { List<Move> mitigation = findMitigation(failurePath.get()); if (execute(mitigation, failurePath.get())) { // We succeeded or are in the process of taking a step to mitigate. // Report with the assumption this will eventually succeed to avoid alerting before we're stuck spareHostCapacity++; } else { success = false; } } metric.set(ConfigServerMetrics.SPARE_HOST_CAPACITY.baseName(), spareHostCapacity, null); } return success ? 1.0 : 0.0; }
@Test public void testMultipleMovesAreNeeded() { // Moving application id 1 and 2 to the same nodes frees up spares for application 0 // so that it can be moved from size 12 to size 10 hosts, clearing up spare room for the size 12 application var tester = new SpareCapacityMaintainerTester(); tester.addHosts(4, new NodeResources(12, 120, 1200, 1.2)); tester.addHosts(4, new NodeResources(10, 100, 1000, 1)); tester.addNodes(0, 2, new NodeResources(10, 100, 1000, 1.0), 0); tester.addNodes(1, 2, new NodeResources(12, 120, 1200, 1.2), 2); tester.addNodes(2, 2, new NodeResources(5, 50, 500, 0.5), 4); tester.addNodes(3, 2, new NodeResources(5, 50, 500, 0.5), 6); tester.maintainer.maintain(); assertEquals(1, tester.deployer.activations); assertEquals(1, tester.nodeRepository.nodes().list().retired().size()); assertEquals(1, tester.metric.values.get("spareHostCapacity")); }
static String formatRequestBody(String scope) throws IOException { try { StringBuilder requestParameters = new StringBuilder(); requestParameters.append("grant_type=client_credentials"); if (scope != null && !scope.trim().isEmpty()) { scope = scope.trim(); String encodedScope = URLEncoder.encode(scope, StandardCharsets.UTF_8.name()); requestParameters.append("&scope=").append(encodedScope); } return requestParameters.toString(); } catch (UnsupportedEncodingException e) { // The world has gone crazy! throw new IOException(String.format("Encoding %s not supported", StandardCharsets.UTF_8.name())); } }
@Test public void testFormatRequestBody() throws IOException { String expected = "grant_type=client_credentials&scope=scope"; String actual = HttpAccessTokenRetriever.formatRequestBody("scope"); assertEquals(expected, actual); }
public static int toInt( String str, int def ) { int retval; if ( str == null ) { retval = def; } else { try { retval = Integer.parseInt( str ); } catch ( Exception e ) { retval = def; } } return retval; }
@Test public void testToInt() { assertEquals( 123, Const.toInt( "123", -12 ) ); assertEquals( -12, Const.toInt( "123f", -12 ) ); assertEquals( -12, Const.toInt( "", -12 ) ); assertEquals( -12, Const.toInt( null, -12 ) ); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = authentication.get(); if(credentials.isAnonymousLogin()) { if(log.isDebugEnabled()) { log.debug(String.format("Connect with no credentials to %s", host)); } client.setProviderCredentials(null); } else { if(credentials.getTokens().validate()) { if(log.isDebugEnabled()) { log.debug(String.format("Connect with session credentials to %s", host)); } client.setProviderCredentials(new AWSSessionCredentials( credentials.getTokens().getAccessKeyId(), credentials.getTokens().getSecretAccessKey(), credentials.getTokens().getSessionToken())); } else { if(log.isDebugEnabled()) { log.debug(String.format("Connect with basic credentials to %s", host)); } client.setProviderCredentials(new AWSCredentials(credentials.getUsername(), credentials.getPassword())); } } if(host.getCredentials().isPassed()) { log.warn(String.format("Skip verifying credentials with previous successful authentication event for %s", this)); return; } try { final Path home = new DelegatingHomeFeature(new DefaultPathHomeFeature(host)).find(); final Location.Name location = new S3LocationFeature(S3Session.this, regions).getLocation(home); if(log.isDebugEnabled()) { log.debug(String.format("Retrieved region %s", location)); } if(!Location.unknown.equals(location)) { if(log.isDebugEnabled()) { log.debug(String.format("Set default region to %s determined from %s", location, home)); } // host.setProperty("s3.location", location.getIdentifier()); } } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Failure %s querying region", e)); final Path home = new DefaultHomeFinderService(this).find(); if(log.isDebugEnabled()) { log.debug(String.format("Retrieved %s", home)); } } }
@Test public void testConnectSessionTokenStatic() throws Exception { final S3Protocol protocol = new S3Protocol(); final Host host = new Host(protocol, protocol.getDefaultHostname(), new Credentials() .withTokens(new TemporaryAccessTokens( "ASIA5RMYTHDIR37CTCXI", "TsnhChH4FlBt7hql2KnzrwNizmktJnO8YzDQwFqx", "FQoDYXdzEN3//////////wEaDLAz85HLZTQ7zu6/OSKrAfwLewUMHKaswh5sXv50BgMwbeKfCoMATjagvM+KV9++z0I6rItmMectuYoEGCOcnWHKZxtvpZAGcjlvgEDPw1KRYu16riUnd2Yo3doskqAoH0dlL2nH0eoj0d81H5e6IjdlGCm1E3K3zQPFLfMbvn1tdDQR1HV8o9eslmxo54hWMY2M14EpZhcXQMlns0mfYLYHLEVvgpz/8xYjR0yKDxJlXSATEpXtowHtqSi8tL7aBQ==", -1L ))); final S3Session session = new S3Session(host); assertNotNull(session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback())); assertTrue(session.isConnected()); assertNotNull(session.getClient()); assertThrows(ExpiredTokenException.class, () -> session.login(new DisabledLoginCallback(), new DisabledCancelCallback())); }
public static <T> int indexOf(Collection<T> collection, Matcher<T> matcher) { if (isNotEmpty(collection)) { int index = 0; for (T t : collection) { if (null == matcher || matcher.match(t)) { return index; } index++; } } return -1; }
@Test public void indexOfTest() { final ArrayList<String> list = CollUtil.newArrayList("a", "b", "c", "c", "a", "b", "d"); final int i = CollUtil.indexOf(list, (str) -> str.charAt(0) == 'c'); assertEquals(2, i); }
@Override public boolean matches(Job localJob, Job storageProviderJob) { if (storageProviderJob.getVersion() == localJob.getVersion() + 1 && localJob.hasState(PROCESSING) && !storageProviderJob.hasState(PROCESSING)) { return jobSteward.getThreadProcessingJob(localJob) == null; } return false; }
@Test void ifJobIsHavingConcurrentStateChangeAndIsStillProcessingItWillNotMatch() { final Job localJob = aJobInProgress().withVersion(2).build(); final Job storageProviderJob = aCopyOf(localJob).withVersion(3).withState(new SucceededState(ofMillis(10), ofMillis(6))).build(); final Thread jobThread = mock(Thread.class); lenient().when(jobSteward.getThreadProcessingJob(localJob)).thenReturn(jobThread); boolean matchesAllowedStateChange = allowedStateChange.matches(localJob, storageProviderJob); assertThat(matchesAllowedStateChange).isFalse(); }
public static ConfigurableResource parseResourceConfigValue(String value) throws AllocationConfigurationException { return parseResourceConfigValue(value, Long.MAX_VALUE); }
@Test public void testParseNewStyleResourceWithPercentagesMemoryNegativeWithMoreSpaces() throws Exception { expectNegativePercentageNewStyle(); parseResourceConfigValue("vcores = 75%, memory-mb = -40%"); }
@Override public List<PartitionKey> getPrunedPartitions(Table table, ScalarOperator predicate, long limit, TableVersionRange version) { IcebergTable icebergTable = (IcebergTable) table; String dbName = icebergTable.getRemoteDbName(); String tableName = icebergTable.getRemoteTableName(); if (version.end().isEmpty()) { return new ArrayList<>(); } PredicateSearchKey key = PredicateSearchKey.of(dbName, tableName, version.end().get(), predicate); triggerIcebergPlanFilesIfNeeded(key, icebergTable, predicate, limit); List<PartitionKey> partitionKeys = new ArrayList<>(); List<FileScanTask> icebergSplitTasks = splitTasks.get(key); if (icebergSplitTasks == null) { throw new StarRocksConnectorException("Missing iceberg split task for table:[{}.{}]. predicate:[{}]", dbName, tableName, predicate); } Set<List<String>> scannedPartitions = new HashSet<>(); PartitionSpec spec = icebergTable.getNativeTable().spec(); List<Column> partitionColumns = icebergTable.getPartitionColumnsIncludeTransformed(); boolean existPartitionTransformedEvolution = ((IcebergTable) table).hasPartitionTransformedEvolution(); for (FileScanTask fileScanTask : icebergSplitTasks) { org.apache.iceberg.PartitionData partitionData = (org.apache.iceberg.PartitionData) fileScanTask.file().partition(); List<String> values = PartitionUtil.getIcebergPartitionValues( spec, partitionData, existPartitionTransformedEvolution); if (values.size() != partitionColumns.size()) { // ban partition evolution and non-identify column. continue; } if (scannedPartitions.contains(values)) { continue; } else { scannedPartitions.add(values); } try { List<com.starrocks.catalog.Type> srTypes = new ArrayList<>(); for (PartitionField partitionField : spec.fields()) { if (partitionField.transform().isVoid()) { continue; } if (!partitionField.transform().isIdentity()) { Type sourceType = spec.schema().findType(partitionField.sourceId()); Type resultType = partitionField.transform().getResultType(sourceType); if (resultType == Types.DateType.get()) { resultType = Types.IntegerType.get(); } srTypes.add(fromIcebergType(resultType)); continue; } srTypes.add(icebergTable.getColumn(icebergTable.getPartitionSourceName(spec.schema(), partitionField)).getType()); } if (existPartitionTransformedEvolution) { srTypes = partitionColumns.stream() .map(Column::getType) .collect(Collectors.toList()); } partitionKeys.add(createPartitionKeyWithType(values, srTypes, table.getType())); } catch (Exception e) { LOG.error("create partition key failed.", e); throw new StarRocksConnectorException(e.getMessage()); } } return partitionKeys; }
@Test public void testPartitionPrune() { IcebergHiveCatalog icebergHiveCatalog = new IcebergHiveCatalog(CATALOG_NAME, new Configuration(), DEFAULT_CONFIG); List<Column> columns = Lists.newArrayList(new Column("id", INT), new Column("data", STRING)); IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergHiveCatalog, Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null); mockedNativeTableA.newFastAppend().appendFile(FILE_A).commit(); IcebergTable icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME, "resource_name", "db_name", "table_name", "", columns, mockedNativeTableA, Maps.newHashMap()); Map<ColumnRefOperator, Column> colRefToColumnMetaMap = new HashMap<ColumnRefOperator, Column>(); ColumnRefOperator columnRefOperator1 = new ColumnRefOperator(3, Type.INT, "id", true); ColumnRefOperator columnRefOperator2 = new ColumnRefOperator(4, Type.STRING, "data", true); colRefToColumnMetaMap.put(columnRefOperator1, new Column("id", Type.INT)); colRefToColumnMetaMap.put(columnRefOperator2, new Column("data", Type.STRING)); new ConnectContext().setThreadLocalInfo(); TableVersionRange version = TableVersionRange.withEnd(Optional.of( mockedNativeTableA.currentSnapshot().snapshotId())); List<PartitionKey> partitionKeys = metadata.getPrunedPartitions(icebergTable, null, 1, version); Assert.assertEquals(1, partitionKeys.size()); Assert.assertTrue(partitionKeys.get(0) instanceof IcebergPartitionKey); IcebergPartitionKey partitionKey = (IcebergPartitionKey) partitionKeys.get(0); Assert.assertEquals("types: [INT]; keys: [0]; ", partitionKey.toString()); mockedNativeTableA.newFastAppend().appendFile(FILE_A_2).commit(); mockedNativeTableA.refresh(); icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME, "resource_name", "db_name", "table_name", "", columns, mockedNativeTableA, Maps.newHashMap()); TableVersionRange versionRange = TableVersionRange.withEnd(Optional.of( mockedNativeTableA.currentSnapshot().snapshotId())); partitionKeys = metadata.getPrunedPartitions(icebergTable, null, 100, versionRange); Assert.assertEquals(2, partitionKeys.size()); }
public <ReturnType, CreateArgType, ConsumedType> void registerConsumerFactory( AuthenticationConsumerFactory<ReturnType, CreateArgType, ConsumedType> factory ) throws AuthenticationFactoryException { if ( !factory.getConsumedType().isInterface() && !AuthenticationProvider.class.isAssignableFrom( factory.getConsumedType() ) ) { throw new AuthenticationFactoryException( BaseMessages.getString( PKG, "AuthenticationManager.ConsumedTypeError", factory ) ); } Map<Class<?>, AuthenticationConsumerFactory<?, ?, ?>> createTypeMap = getRelevantConsumerFactoryMap( factory.getReturnType(), factory.getCreateArgType() ); synchronized ( createTypeMap ) { createTypeMap.put( factory.getConsumedType(), factory ); } }
@SuppressWarnings( { "rawtypes", "unchecked" } ) @Test public void testRegisterConsumerFactory() throws AuthenticationConsumptionException, AuthenticationFactoryException { AuthenticationConsumer<Object, KerberosAuthenticationProvider> authConsumer = mock( AuthenticationConsumer.class ); AuthenticationConsumerFactory<Object, AuthenticationConsumer, KerberosAuthenticationProvider> factory = mock( AuthenticationConsumerFactory.class ); when( factory.getReturnType() ).thenReturn( Object.class ); when( factory.getCreateArgType() ).thenReturn( AuthenticationConsumer.class ); when( factory.getConsumedType() ).thenReturn( KerberosAuthenticationProvider.class ); when( factory.create( authConsumer ) ).thenReturn( authConsumer ); KerberosAuthenticationProvider kerberosAuthenticationProvider = new KerberosAuthenticationProvider( "kerb", "kerb", true, "pass", true, "none" ); manager.registerAuthenticationProvider( kerberosAuthenticationProvider ); manager.registerConsumerFactory( factory ); manager.getAuthenticationPerformer( Object.class, AuthenticationConsumer.class, kerberosAuthenticationProvider.getId() ).perform( authConsumer ); verify( authConsumer ).consume( kerberosAuthenticationProvider ); }
public Page getPositions(int[] retainedPositions, int offset, int length) { requireNonNull(retainedPositions, "retainedPositions is null"); Block[] blocks = new Block[this.blocks.length]; for (int i = 0; i < blocks.length; i++) { blocks[i] = this.blocks[i].getPositions(retainedPositions, offset, length); } return wrapBlocksWithoutCopy(length, blocks); }
@Test public void testGetPositions() { int entries = 10; BlockBuilder blockBuilder = BIGINT.createBlockBuilder(null, entries); for (int i = 0; i < entries; i++) { BIGINT.writeLong(blockBuilder, i); } Block block = blockBuilder.build(); Page page = new Page(block, block, block).getPositions(new int[] {0, 1, 1, 1, 2, 5, 5}, 1, 5); assertEquals(page.getPositionCount(), 5); for (int i = 0; i < 3; i++) { assertEquals(page.getBlock(i).getLong(0), 1); assertEquals(page.getBlock(i).getLong(1), 1); assertEquals(page.getBlock(i).getLong(2), 1); assertEquals(page.getBlock(i).getLong(3), 2); assertEquals(page.getBlock(i).getLong(4), 5); } }
public static void validateValue(Schema schema, Object value) { validateValue(null, schema, value); }
@Test public void testValidateValueMismatchInt32() { assertThrows(DataException.class, () -> ConnectSchema.validateValue(Schema.INT32_SCHEMA, (long) 1)); }
@Udf public List<Long> generateSeriesLong( @UdfParameter(description = "The beginning of the series") final long start, @UdfParameter(description = "Marks the end of the series (inclusive)") final long end ) { return generateSeriesLong(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldThrowOnStepZeroLong() { // When: final Exception e = assertThrows( KsqlFunctionException.class, () -> rangeUdf.generateSeriesLong(0L, 10L, 0) ); // Then: assertThat(e.getMessage(), containsString( "GENERATE_SERIES step cannot be zero")); }
@Config("session-property-manager.config-file") public FileSessionPropertyManagerConfig setConfigFile(File configFile) { this.configFile = configFile; return this; }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("session-property-manager.config-file", "/test.json") .build(); FileSessionPropertyManagerConfig expected = new FileSessionPropertyManagerConfig() .setConfigFile(new File("/test.json")); assertFullMapping(properties, expected); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNoopWithKafkaVersionOnly(VertxTestContext context) { String kafkaVersion = VERSIONS.defaultVersion().version(); String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(kafkaVersion, null, null), mockNewCluster( null, mockSps(kafkaVersion), mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), is(VERSIONS.defaultVersion().protocolVersion())); assertThat(c.logMessageFormatVersion(), is(VERSIONS.defaultVersion().messageVersion())); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
public void validateEmail() throws ValidationException { validate(Validator.lengthValidator(255), getEmail()); validate(Validator.emailValidator(), getEmail()); }
@Test void shouldValidateEmailLesserThan255() throws Exception { user = new User("UserName", new String[]{"Jez,Pavan"}, "user@mail.com", true); user.validateEmail(); }
@Override public void addPath(String word, int outputSymbol) { MutableState state = getStartState(); if (state == null) { throw new IllegalStateException("Start state cannot be null"); } List<MutableArc> arcs = state.getArcs(); boolean isFound = false; for (MutableArc arc : arcs) { if (arc.getNextState().getLabel() == word.charAt(0)) { state = arc.getNextState(); isFound = true; break; } } int foundPos = -1; if (isFound) { Pair<MutableState, Integer> pair = findPointOfDiversion(state, word); if (pair == null) { // Word already exists return; } foundPos = pair.getRight(); state = pair.getLeft(); } for (int i = foundPos + 1; i < word.length(); i++) { MutableState nextState = new MutableState(); nextState.setLabel(word.charAt(i)); int currentOutputSymbol = -1; if (i == word.length() - 1) { currentOutputSymbol = outputSymbol; } MutableArc mutableArc = new MutableArc(currentOutputSymbol, nextState); state.addArc(mutableArc); state = nextState; } state.setIsTerminal(true); }
@Test public void testRegexMatcherMatchAny() { MutableFST fst = new MutableFSTImpl(); fst.addPath("hello-world", 12); fst.addPath("hello-world123", 21); fst.addPath("still", 123); RoaringBitmapWriter<MutableRoaringBitmap> writer = RoaringBitmapWriter.bufferWriter().get(); RealTimeRegexpMatcher.regexMatch("hello.*123", fst, writer::add); Assert.assertEquals(writer.get().getCardinality(), 1); writer.reset(); RealTimeRegexpMatcher.regexMatch("hello.*", fst, writer::add); Assert.assertEquals(writer.get().getCardinality(), 2); }
public DataEntityGroupLineageList mapGroupLineageDto(final DataEntityGroupLineageDto dataEntityGroupLineageDto) { final List<DataEntityLineageStream> lineageStreams = dataEntityGroupLineageDto.lineageItems() .stream() .map(this::mapStream) .toList(); return new DataEntityGroupLineageList().items(lineageStreams); }
@Test void mapGroupLineageDto() { final var dto = new DataEntityGroupLineageDto(List.of(generateStreamDto())); final var result = mapper.mapGroupLineageDto(dto); assertThat( result.getItems().stream().flatMap(item -> item.getNodes().stream().map(DataEntityLineageNode::getId)) .collect( Collectors.toList())).isEqualTo(dto.lineageItems().stream() .flatMap(item -> item.nodes().stream().map(node -> node.entity().getDataEntity().getId())).collect( Collectors.toList())); assertThat(result.getItems().stream() .flatMap(item -> item.getNodes().stream().flatMap(node -> node.getGroupIdList().stream())).distinct() .collect( Collectors.toList())).isEqualTo(List.of( dto.lineageItems().stream().flatMap(item -> item.groups().stream()).findFirst().orElseThrow() .getDataEntity().getId())); assertThat(result.getItems().stream().flatMap(item -> item.getEdges().stream()).findFirst() .map(DataEntityLineageEdge::getSourceId).orElseThrow()).isIn( result.getItems().stream().flatMap(item -> item.getNodes().stream().map(DataEntityLineageNode::getId)) .collect( Collectors.toList())); }
@Override public void replay( long offset, long producerId, short producerEpoch, CoordinatorRecord record ) throws RuntimeException { ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); switch (key.version()) { case 0: case 1: offsetMetadataManager.replay( offset, producerId, (OffsetCommitKey) key.message(), (OffsetCommitValue) Utils.messageOrNull(value) ); break; case 2: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) Utils.messageOrNull(value) ); break; case 3: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) Utils.messageOrNull(value) ); break; case 4: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 5: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 6: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 7: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 8: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; case 9: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 10: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 11: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) Utils.messageOrNull(value) ); break; case 12: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 13: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 14: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; default: throw new IllegalStateException("Received an unknown record type " + key.version() + " in " + record); } }
@Test public void testReplayConsumerGroupTargetAssignmentMemberKeyWithNullValue() { GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class); OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class); CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); GroupCoordinatorShard coordinator = new GroupCoordinatorShard( new LogContext(), groupMetadataManager, offsetMetadataManager, Time.SYSTEM, new MockCoordinatorTimer<>(Time.SYSTEM), mock(GroupCoordinatorConfig.class), coordinatorMetrics, metricsShard ); ConsumerGroupTargetAssignmentMemberKey key = new ConsumerGroupTargetAssignmentMemberKey(); coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( new ApiMessageAndVersion(key, (short) 7), null )); verify(groupMetadataManager, times(1)).replay(key, null); }
public static String readFile(String path, String fileName) { File file = openFile(path, fileName); if (file.exists()) { return readFile(file); } return null; }
@Test void testReadFileWithPath() { assertNotNull(DiskUtils.readFile(testFile.getParent(), testFile.getName())); }
public static final DatabaseMeta findDatabase( List<? extends SharedObjectInterface> databases, String dbname ) { if ( databases == null || dbname == null ) { return null; } for ( int i = 0; i < databases.size(); i++ ) { DatabaseMeta ci = (DatabaseMeta) databases.get( i ); if ( ci.getName().trim().equalsIgnoreCase( dbname.trim() ) ) { return ci; } } return null; }
@Test public void testfindDatabase() throws KettleDatabaseException { List<DatabaseMeta> databases = new ArrayList<DatabaseMeta>(); databases.add( new DatabaseMeta( " 1", "Infobright", "JDBC", null, "stub:stub", null, null, null ) ); databases.add( new DatabaseMeta( " 1 ", "Infobright", "JDBC", null, "stub:stub", null, null, null ) ); databases.add( new DatabaseMeta( "1 ", "Infobright", "JDBC", null, "stub:stub", null, null, null ) ); Assert.assertNotNull( DatabaseMeta.findDatabase( databases, "1" ) ); Assert.assertNotNull( DatabaseMeta.findDatabase( databases, "1 " ) ); Assert.assertNotNull( DatabaseMeta.findDatabase( databases, " 1" ) ); Assert.assertNotNull( DatabaseMeta.findDatabase( databases, " 1 " ) ); }
public boolean isAdmin(Admin admin) { return !isSecurityEnabled() || noAdminsConfigured() || adminsConfig.isAdmin(admin, rolesConfig.memberRoles(admin)); }
@Test public void shouldValidateRoleAsAdmin() throws Exception { SecurityConfig security = security(passwordFileAuthConfig(), admins(role("role2"))); assertThat(security.isAdmin(new AdminRole(new CaseInsensitiveString("role2"))), is(true)); }
static void dissectRecordingSignal(final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, RECORDING_SIGNAL, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; RECORDING_SIGNAL_EVENT_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); builder.append(": controlSessionId=").append(RECORDING_SIGNAL_EVENT_DECODER.controlSessionId()) .append(" correlationId=").append(RECORDING_SIGNAL_EVENT_DECODER.correlationId()) .append(" recordingId=").append(RECORDING_SIGNAL_EVENT_DECODER.recordingId()) .append(" subscriptionId=").append(RECORDING_SIGNAL_EVENT_DECODER.subscriptionId()) .append(" position=").append(RECORDING_SIGNAL_EVENT_DECODER.position()) .append(" signal=").append(RECORDING_SIGNAL_EVENT_DECODER.signal()); }
@Test void recordingSignal() { internalEncodeLogHeader(buffer, 0, 88, 99, () -> 2_250_000_000L); final RecordingSignalEventEncoder encoder = new RecordingSignalEventEncoder(); encoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(49) .correlationId(-100) .recordingId(42) .subscriptionId(15) .position(234723197419023749L) .signal(RecordingSignal.DELETE); dissectRecordingSignal(buffer, 0, builder); assertEquals("[2.250000000] " + CONTEXT + ": " + RECORDING_SIGNAL.name() + " [88/99]: " + "controlSessionId=49" + " correlationId=-100" + " recordingId=42" + " subscriptionId=15" + " position=234723197419023749" + " signal=DELETE", builder.toString()); }
@PostMapping("/batchDeleted") @RequiresPermissions("system:meta:delete") public ShenyuAdminResult batchDeleted(@RequestBody @NotEmpty final List<@NotBlank String> ids) { Integer deleteCount = metaDataService.delete(ids); return ShenyuAdminResult.success(ShenyuResultMessage.DELETE_SUCCESS, deleteCount); }
@Test public void testBatchDeleted() throws Exception { final List<String> ids = new ArrayList<>(2); ids.add("1"); ids.add("2"); given(this.metaDataService.delete(ids)).willReturn(2); this.mockMvc.perform(MockMvcRequestBuilders.post("/meta-data/batchDeleted") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(ids))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DELETE_SUCCESS))) .andExpect(jsonPath("$.data", is(2))) .andReturn(); }
@Override public boolean equals(Object o) { if (!(o instanceof MapEvent)) { return false; } MapEvent<K, V> that = (MapEvent) o; return Objects.equals(this.name, that.name) && Objects.equals(this.type, that.type) && Objects.equals(this.key, that.key) && Objects.equals(this.newValue, that.newValue) && Objects.equals(this.oldValue, that.oldValue); }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(stats1, stats1) .addEqualityGroup(stats2) .addEqualityGroup(stats3) .testEquals(); }
@Override public void close() throws ProcessorStateException { log.debug("Closing its state manager and all the registered state stores: {}", stores); if (!stateUpdaterEnabled) { changelogReader.unregister(getAllChangelogTopicPartitions()); } RuntimeException firstException = null; // attempting to close the stores, just in case they // are not closed by a ProcessorNode yet if (!stores.isEmpty()) { for (final Map.Entry<String, StateStoreMetadata> entry : stores.entrySet()) { final StateStore store = entry.getValue().stateStore; log.trace("Closing store {}", store.name()); try { store.close(); } catch (final RuntimeException exception) { if (firstException == null) { // do NOT wrap the error if it is actually caused by Streams itself if (exception instanceof StreamsException) firstException = exception; else firstException = new ProcessorStateException( format("%sFailed to close state store %s", logPrefix, store.name()), exception); } log.error("Failed to close state store {}: ", store.name(), exception); } } stores.clear(); } if (firstException != null) { throw firstException; } }
@Test public void shouldBeAbleToCloseWithoutRegisteringAnyStores() { final ProcessorStateManager stateMgr = getStateManager(Task.TaskType.ACTIVE, true); stateMgr.close(); }
@Override public Map<String, Integer> getSummaryStats() { return summaryCache.get(); }
@Test public void calculateSummaryStats_empty() { Mockito.when(approvedSiteService.getAll()).thenReturn(new HashSet<ApprovedSite>()); Map<String, Integer> stats = service.getSummaryStats(); assertThat(stats.get("approvalCount"), is(0)); assertThat(stats.get("userCount"), is(0)); assertThat(stats.get("clientCount"), is(0)); }
public static VerificationMode atMost(final int count) { checkArgument(count > 0, "Times count must be greater than zero"); return new AtMostVerification(count); }
@Test public void should_fail_to_verify_at_most_expected_request_while_expectation_can_not_be_met() throws Exception { final HttpServer server = httpServer(port(), hit); server.get(by(uri("/foo"))).response("bar"); running(server, () -> { assertThat(helper.get(remoteUrl("/foo")), is("bar")); assertThat(helper.get(remoteUrl("/foo")), is("bar")); }); assertThrows(VerificationException.class, () -> hit.verify(by(uri("/foo")), atMost(1))); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatDoubleLiteralWithSmallScale() { assertThat(ExpressionFormatter.formatExpression(new DoubleLiteral(2.0)), equalTo("2E0")); }
public String getDisplayValue() { if (isSecure() || hasSecretParams()) { return "****"; } return getValue(); }
@Test void shouldMaskValueIfSecure() { EnvironmentVariableConfig secureEnvironmentVariable = new EnvironmentVariableConfig(goCipher, "plain_key", "plain_value", true); assertThat(secureEnvironmentVariable.getDisplayValue()).isEqualTo("****"); }
@Operation(summary = "delete", description = "Delete a host") @DeleteMapping("/{id}") public ResponseEntity<Boolean> delete(@PathVariable Long clusterId, @PathVariable Long id) { return ResponseEntity.success(hostService.delete(id)); }
@Test void deleteReturnsSuccess() { Long clusterId = 1L; Long hostId = 1L; when(hostService.delete(hostId)).thenReturn(true); ResponseEntity<Boolean> response = hostController.delete(clusterId, hostId); assertTrue(response.isSuccess()); assertTrue(response.getData()); }
public RunResponse restart(RunRequest runRequest, boolean blocking) { if (!runRequest.isFreshRun() && runRequest.getCurrentPolicy() != RunPolicy.RESTART_FROM_SPECIFIC) { updateRunRequestForRestartFromInlineRoot(runRequest); } RunResponse runResponse = actionHandler.restartRecursively(runRequest); if (runResponse.getStatus() == RunResponse.Status.DELEGATED) { return restartDirectly(runResponse, runRequest, blocking); } return runResponse; }
@Test public void testInvalidRestartFromInlineRoot() { when(instanceDao.getWorkflowInstance( "sample-minimal-wf", 1, Constants.LATEST_INSTANCE_RUN, true)) .thenReturn(instance); when(instance.getStatus()).thenReturn(WorkflowInstance.Status.IN_PROGRESS); when(instance.getInitiator()).thenReturn(new ForeachInitiator()); RunRequest runRequest = RunRequest.builder() .requester(user) .currentPolicy(RunPolicy.RESTART_FROM_INCOMPLETE) .restartConfig( RestartConfig.builder() .addRestartNode("sample-minimal-wf2", 1, "job1") .addRestartNode("sample-minimal-wf", 1, "job1") .build()) .build(); AssertHelper.assertThrows( "Cannot restart from inline root for non-terminal root", IllegalArgumentException.class, "instance [null] is in non-terminal state [IN_PROGRESS]", () -> stepActionHandler.restart(runRequest, true)); when(instance.getStatus()).thenReturn(WorkflowInstance.Status.FAILED); WorkflowInstanceAggregatedInfo aggregatedInfo = mock(WorkflowInstanceAggregatedInfo.class); when(instance.getAggregatedInfo()).thenReturn(aggregatedInfo); StepAggregatedView aggregatedView = mock(StepAggregatedView.class); when(aggregatedInfo.getStepAggregatedViews()).thenReturn(singletonMap("job1", aggregatedView)); when(aggregatedView.getStatus()).thenReturn(StepInstance.Status.RUNNING); AssertHelper.assertThrows( "Cannot restart from inline root for non-terminal step", IllegalArgumentException.class, "step null[job1] is in non-terminal state [RUNNING]", () -> stepActionHandler.restart(runRequest, true)); when(aggregatedView.getStatus()).thenReturn(StepInstance.Status.FATALLY_FAILED); AssertHelper.assertThrows( "Cannot restart from inline root for invalid restart path", IllegalArgumentException.class, "restart-path size is not 1", () -> stepActionHandler.restart(runRequest, true)); }
@Nonnull @EvolvingApi public <T, S extends StreamSerializer<T>> JobConfig registerSerializer( @Nonnull Class<T> clazz, @Nonnull Class<S> serializerClass ) { throwIfLocked(); Preconditions.checkFalse(serializerConfigs.containsKey(clazz.getName()), "Serializer for " + clazz + " already registered"); serializerConfigs.put(clazz.getName(), serializerClass.getName()); return this; }
@Test public void when_registerSerializerTwice_then_fails() { // Given JobConfig config = new JobConfig(); config.registerSerializer(Object.class, ObjectSerializer.class); // When // Then Assert.assertThrows("Serializer for class java.lang.Object already registered", IllegalArgumentException.class, () -> config.registerSerializer(Object.class, ObjectSerializer.class)); }
public Connection createConnectionProxy(Connection connection) { assert connection != null; // même si le counter sql n'est pas affiché on crée un proxy de la connexion // pour avoir les graphiques USED_CONNECTION_COUNT et ACTIVE_CONNECTION_COUNT (cf issue 160) if (isMonitoringDisabled()) { return connection; } final ConnectionInvocationHandler invocationHandler = new ConnectionInvocationHandler( connection); final Connection result = createProxy(connection, invocationHandler); if (result != connection) { invocationHandler.init(); } return result; }
@Test public void testCreateConnectionProxy() throws SQLException, IllegalAccessException { DriverManager.registerDriver(driver); final int usedConnectionCount = JdbcWrapper.getUsedConnectionCount(); // nécessite la dépendance vers la base de données H2 Connection connection = DriverManager.getConnection(H2_DATABASE_URL); assertEquals("getUsedConnectionCount1", usedConnectionCount, JdbcWrapper.getUsedConnectionCount()); try { jdbcWrapper.rewrapConnection(connection); connection = jdbcWrapper.createConnectionProxy(connection); assertEquals("getUsedConnectionCount1", usedConnectionCount + 1, JdbcWrapper.getUsedConnectionCount()); assertNotNull("createConnectionProxy", connection); assertEquals(EQUALS, connection, connection); connection.hashCode(); final int activeConnectionCount = JdbcWrapper.getActiveConnectionCount(); connection.prepareStatement("select 1").close(); connection.prepareCall("select 2").close(); assertEquals("getActiveConnectionCount", activeConnectionCount, JdbcWrapper.getActiveConnectionCount()); connection.rollback(); jdbcWrapper.getSqlCounter().setDisplayed(false); connection = jdbcWrapper.createConnectionProxy(connection); assertEquals("getUsedConnectionCount2", usedConnectionCount + 1, JdbcWrapper.getUsedConnectionCount()); jdbcWrapper.getSqlCounter().setDisplayed(true); Utils.setProperty(Parameter.DISABLED, "true"); try { connection = jdbcWrapper.createConnectionProxy(connection); assertEquals("getUsedConnectionCount3", usedConnectionCount + 1, JdbcWrapper.getUsedConnectionCount()); } finally { Utils.setProperty(Parameter.DISABLED, "false"); } // il peut arriver que getConnectionInformationsList retourne une liste vide // si la classe JdbcWrapper a été initialisée alors que system-actions-enabled=false // ou que no-database=true ce est le cas vu l'ordre des tests dans le script ant assertNotNull("getConnectionInformationsList", JdbcWrapper.getConnectionInformationsList()); } finally { connection.close(); } assertEquals("getUsedConnectionCount4", usedConnectionCount, JdbcWrapper.getUsedConnectionCount()); connection = DriverManager.getConnection(H2_DATABASE_URL + "?driver=org.h2.Driver"); try { assertEquals("getUsedConnectionCount1", usedConnectionCount + 1, JdbcWrapper.getUsedConnectionCount()); } finally { connection.close(); } assertEquals("proxy of proxy", connection, jdbcWrapper.createConnectionProxy(connection)); final InvocationHandler dummy = (proxy, method, args) -> null; final List<Class<?>> interfaces = List.of(new Class<?>[] { Connection.class }); connection = DriverManager.getConnection(H2_DATABASE_URL); try { assertNotNull("createProxy", JdbcWrapper.createProxy(connection, dummy, interfaces)); } finally { connection.close(); } JdbcWrapper.getActiveThreadCount(); }
@Override public Collection<PluginConfigSpec<?>> configSchema() { return codec.configSchema(); }
@Test public void delegatesConfigSchema() { final JavaCodecDelegator codecDelegator = constructCodecDelegator(); codecDelegator.configSchema(); Mockito.verify(codec, Mockito.times(1)).configSchema(); }
@VisibleForTesting List<MappingRule> getMappingRules(MappingRulesDescription rules) { List<MappingRule> mappingRules = new ArrayList<>(); for (Rule rule : rules.getRules()) { checkMandatoryParameters(rule); MappingRuleMatcher matcher = createMatcher(rule); MappingRuleAction action = createAction(rule); setFallbackToAction(rule, action); MappingRule mappingRule = new MappingRule(matcher, action); mappingRules.add(mappingRule); } return mappingRules; }
@Test public void testSetDefaultRuleWithMissingQueue() { rule.setPolicy(Policy.SET_DEFAULT_QUEUE); expected.expect(IllegalArgumentException.class); expected.expectMessage("default queue is undefined"); ruleCreator.getMappingRules(description); }
@Override public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) { final KafkaFutureImpl<Void> future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override UnregisterBrokerRequest.Builder createRequest(int timeoutMs) { UnregisterBrokerRequestData data = new UnregisterBrokerRequestData().setBrokerId(brokerId); return new UnregisterBrokerRequest.Builder(data); } @Override void handleResponse(AbstractResponse abstractResponse) { final UnregisterBrokerResponse response = (UnregisterBrokerResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); switch (error) { case NONE: future.complete(null); break; case REQUEST_TIMED_OUT: throw error.exception(); default: log.error("Unregister broker request for broker ID {} failed: {}", brokerId, error.message()); future.completeExceptionally(error.exception()); break; } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }; runnable.call(call, now); return new UnregisterBrokerResult(future); }
@Test public void testUnregisterBrokerSuccess() throws InterruptedException, ExecutionException { int nodeId = 1; try (final AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions( NodeApiVersions.create(ApiKeys.UNREGISTER_BROKER.id, (short) 0, (short) 0)); env.kafkaClient().prepareResponse(prepareUnregisterBrokerResponse(Errors.NONE, 0)); UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId); // Validate response assertNotNull(result.all()); result.all().get(); } }
public synchronized long run(JobConfig jobConfig) throws JobDoesNotExistException, ResourceExhaustedException { long jobId = getNewJobId(); run(jobConfig, jobId); return jobId; }
@Test public void run() throws Exception { try (MockedStatic<PlanCoordinator> mockStaticPlanCoordinator = mockPlanCoordinator()) { TestPlanConfig jobConfig = new TestPlanConfig("/test"); List<Long> jobIdList = new ArrayList<>(); for (long i = 0; i < TEST_JOB_MASTER_JOB_CAPACITY; i++) { jobIdList.add(mJobMaster.run(jobConfig)); } final List<Long> list = mJobMaster.list(ListAllPOptions.getDefaultInstance()); Assert.assertEquals(jobIdList, list); Assert.assertEquals(TEST_JOB_MASTER_JOB_CAPACITY, mJobMaster.list(ListAllPOptions.getDefaultInstance()).size()); } }
@Udf public <T extends Comparable<? super T>> List<T> arraySortDefault(@UdfParameter( description = "The array to sort") final List<T> input) { return arraySortWithDirection(input, "ASC"); }
@Test public void shouldSortDecimals() { final List<BigDecimal> input = Arrays.asList(BigDecimal.valueOf(1.2), BigDecimal.valueOf(1.3), BigDecimal.valueOf(-1.2)); final List<BigDecimal> output = udf.arraySortDefault(input); assertThat(output, contains(BigDecimal.valueOf(-1.2), BigDecimal.valueOf(1.2), BigDecimal.valueOf(1.3))); }
@Override public Monitor getMonitor(URL url) { url = url.setPath(MonitorService.class.getName()).addParameter(INTERFACE_KEY, MonitorService.class.getName()); String key = url.toServiceStringWithoutResolving(); Monitor monitor = MONITORS.get(key); Future<Monitor> future = FUTURES.get(key); if (monitor != null || future != null) { return monitor; } LOCK.lock(); try { monitor = MONITORS.get(key); future = FUTURES.get(key); if (monitor != null || future != null) { return monitor; } final URL monitorUrl = url; future = EXECUTOR.submit(() -> { try { Monitor m = createMonitor(monitorUrl); MONITORS.put(key, m); FUTURES.remove(key); return m; } catch (Throwable e) { logger.warn( COMMON_MONITOR_EXCEPTION, "", "", "Create monitor failed, monitor data will not be collected until you fix this problem. monitorUrl: " + monitorUrl, e); return null; } }); FUTURES.put(key, future); return null; } finally { // unlock LOCK.unlock(); } }
@Test void testMonitorFactoryCache() throws Exception { URL url = URL.valueOf("dubbo://" + NetUtils.getLocalAddress().getHostAddress() + ":2233"); Monitor monitor1 = monitorFactory.getMonitor(url); Monitor monitor2 = monitorFactory.getMonitor(url); if (monitor1 == null || monitor2 == null) { Thread.sleep(2000); monitor1 = monitorFactory.getMonitor(url); monitor2 = monitorFactory.getMonitor(url); } Assertions.assertEquals(monitor1, monitor2); }
public static BigDecimal cast(final Integer value, final int precision, final int scale) { if (value == null) { return null; } return cast(value.longValue(), precision, scale); }
@Test public void shouldCastNullDouble() { // When: final BigDecimal decimal = DecimalUtil.cast((Double)null, 2, 1); // Then: assertThat(decimal, is(nullValue())); }
@Override public void writeAttribute(String prefix, String namespaceURI, String localName, String value) throws XMLStreamException { String filteredValue = nonXmlCharFilterer.filter(value); writer.writeAttribute(prefix, namespaceURI, localName, filteredValue); }
@Test public void testWriteAttribute4Args() throws XMLStreamException { filteringXmlStreamWriter.writeAttribute("prefix", "namespaceURI", "localName", "value"); verify(xmlStreamWriterMock).writeAttribute("prefix", "namespaceURI", "localName", "filteredValue"); }
public static String toString(RedisCommand<?> command, Object... params) { if (RedisCommands.AUTH.equals(command)) { return "command: " + command + ", params: (password masked)"; } return "command: " + command + ", params: " + LogHelper.toString(params); }
@Test public void toStringWithSmallCollections() { List<String> strings = Collections.nCopies(1, "0"); List<Integer> ints = Collections.nCopies(1, 1); List<Long> longs = Collections.nCopies(1, 2L); List<Double> doubles = Collections.nCopies(1, 3.1D); List<Float> floats = Collections.nCopies(1, 4.2F); List<Byte> bytes = Collections.nCopies(1, (byte)5); List<Character> chars = Collections.nCopies(1, '6'); assertThat(LogHelper.toString(strings)).isEqualTo("[0]"); assertThat(LogHelper.toString(ints)).isEqualTo("[1]"); assertThat(LogHelper.toString(longs)).isEqualTo("[2]"); assertThat(LogHelper.toString(doubles)).isEqualTo("[3.1]"); assertThat(LogHelper.toString(floats)).isEqualTo("[4.2]"); assertThat(LogHelper.toString(bytes)).isEqualTo("[5]"); assertThat(LogHelper.toString(chars)).isEqualTo("[6]"); }
@Override public Optional<ScmInfo> getScmInfo(Component component) { requireNonNull(component, "Component cannot be null"); if (component.getType() != Component.Type.FILE) { return Optional.empty(); } return scmInfoCache.computeIfAbsent(component, this::getScmInfoForComponent); }
@Test @UseDataProvider("allTypeComponentButFile") public void do_not_query_db_nor_report_if_component_type_is_not_FILE(Component component) { BatchReportReader batchReportReader = mock(BatchReportReader.class); ScmInfoRepositoryImpl underTest = new ScmInfoRepositoryImpl(batchReportReader, analysisMetadata, dbLoader, diff, fileStatuses); assertThat(underTest.getScmInfo(component)).isEmpty(); verifyNoInteractions(batchReportReader, dbLoader); }
@Override public EurekaHttpClient newClient(EurekaEndpoint endpoint) { // we want a copy to modify. Don't change the original WebClient.Builder builder = this.builderSupplier.get().clone(); setUrl(builder, endpoint.getServiceUrl()); setCodecs(builder); builder.filter(http4XxErrorExchangeFilterFunction()); return new WebClientEurekaHttpClient(builder.build()); }
@Test void testWithoutUserInfo() { transportClientFatory.newClient(new DefaultEndpoint("http://localhost:8761")); }
@Description("absolute value") @ScalarFunction @SqlType(StandardTypes.BIGINT) public static long abs(@SqlType(StandardTypes.BIGINT) long num) { checkCondition(num != Long.MIN_VALUE, NUMERIC_VALUE_OUT_OF_RANGE, "Value -9223372036854775808 is out of range for abs(bigint)"); return Math.abs(num); }
@Test public void testAbs() { assertFunction("abs(TINYINT'123')", TINYINT, (byte) 123); assertFunction("abs(TINYINT'-123')", TINYINT, (byte) 123); assertFunction("abs(CAST(NULL AS TINYINT))", TINYINT, null); assertFunction("abs(SMALLINT'123')", SMALLINT, (short) 123); assertFunction("abs(SMALLINT'-123')", SMALLINT, (short) 123); assertFunction("abs(CAST(NULL AS SMALLINT))", SMALLINT, null); assertFunction("abs(123)", INTEGER, 123); assertFunction("abs(-123)", INTEGER, 123); assertFunction("abs(CAST(NULL AS INTEGER))", INTEGER, null); assertFunction("abs(BIGINT '123')", BIGINT, 123L); assertFunction("abs(BIGINT '-123')", BIGINT, 123L); assertFunction("abs(12300000000)", BIGINT, 12300000000L); assertFunction("abs(-12300000000)", BIGINT, 12300000000L); assertFunction("abs(CAST(NULL AS BIGINT))", BIGINT, null); assertFunction("abs(123.0E0)", DOUBLE, 123.0); assertFunction("abs(-123.0E0)", DOUBLE, 123.0); assertFunction("abs(123.45E0)", DOUBLE, 123.45); assertFunction("abs(-123.45E0)", DOUBLE, 123.45); assertFunction("abs(CAST(NULL AS DOUBLE))", DOUBLE, null); assertFunction("abs(REAL '-754.1985')", REAL, 754.1985f); assertInvalidFunction("abs(TINYINT'" + Byte.MIN_VALUE + "')", NUMERIC_VALUE_OUT_OF_RANGE); assertInvalidFunction("abs(SMALLINT'" + Short.MIN_VALUE + "')", NUMERIC_VALUE_OUT_OF_RANGE); assertInvalidFunction("abs(INTEGER'" + Integer.MIN_VALUE + "')", NUMERIC_VALUE_OUT_OF_RANGE); assertInvalidFunction("abs(-9223372036854775807 - if(rand() < 10, 1, 1))", NUMERIC_VALUE_OUT_OF_RANGE); assertFunction("abs(DECIMAL '123.45')", createDecimalType(5, 2), SqlDecimal.of("12345", 5, 2)); assertFunction("abs(DECIMAL '-123.45')", createDecimalType(5, 2), SqlDecimal.of("12345", 5, 2)); assertFunction("abs(DECIMAL '1234567890123456.78')", createDecimalType(18, 2), SqlDecimal.of("123456789012345678", 18, 2)); assertFunction("abs(DECIMAL '-1234567890123456.78')", createDecimalType(18, 2), SqlDecimal.of("123456789012345678", 18, 2)); assertFunction("abs(DECIMAL '12345678901234560.78')", createDecimalType(19, 2), SqlDecimal.of("1234567890123456078", 18, 2)); assertFunction("abs(DECIMAL '-12345678901234560.78')", createDecimalType(19, 2), SqlDecimal.of("1234567890123456078", 18, 2)); assertFunction("abs(CAST(NULL AS DECIMAL(1,0)))", createDecimalType(1, 0), null); }
@Override public E peek() { E item = peekNext(); if (item != null) { return item; } if (!drainPutStack()) { return null; } return peekNext(); }
@Test public void when_peekEmpty_then_Null() { assertNull(queue.peek()); }
public final void containsKey(@Nullable Object key) { check("keySet()").that(checkNotNull(actual).keySet()).contains(key); }
@Test public void containsKey_failsWithSameToString() { expectFailureWhenTestingThat(ImmutableMap.of(1L, "value1", 2L, "value2", "1", "value3")) .containsKey(1); assertFailureKeys( "value of", "expected to contain", "an instance of", "but did not", "though it did contain", "full contents", "map was"); assertFailureValue("value of", "map.keySet()"); assertFailureValue("expected to contain", "1"); }
@Override public boolean equals(Object o) { if (o == null) return false; if (!o.getClass().equals(DelegationTokenData.class)) return false; DelegationTokenData other = (DelegationTokenData) o; return tokenInformation.equals(other.tokenInformation); }
@Test public void testEquals() { assertNotEquals(DELEGATIONTOKENDATA.get(0), DELEGATIONTOKENDATA.get(1)); assertNotEquals(DELEGATIONTOKENDATA.get(1), DELEGATIONTOKENDATA.get(0)); assertNotEquals(DELEGATIONTOKENDATA.get(0), DELEGATIONTOKENDATA.get(2)); assertNotEquals(DELEGATIONTOKENDATA.get(2), DELEGATIONTOKENDATA.get(0)); assertEquals(DELEGATIONTOKENDATA.get(0), DELEGATIONTOKENDATA.get(0)); assertEquals(DELEGATIONTOKENDATA.get(1), DELEGATIONTOKENDATA.get(1)); assertEquals(DELEGATIONTOKENDATA.get(2), DELEGATIONTOKENDATA.get(2)); }
@Override public void validateConnectorConfig(Map<String, String> connectorProps, Callback<ConfigInfos> callback) { validateConnectorConfig(connectorProps, callback, true); }
@Test public void testConfigValidationNullConfig() { AbstractHerder herder = createConfigValidationHerder(SampleSourceConnector.class, noneConnectorClientConfigOverridePolicy); Map<String, String> config = new HashMap<>(); config.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, SampleSourceConnector.class.getName()); config.put("name", "somename"); config.put("required", "value"); config.put("testKey", null); final ConfigInfos configInfos = herder.validateConnectorConfig(config, s -> null, false); assertEquals(1, configInfos.errorCount()); assertErrorForKey(configInfos, "testKey"); verifyValidationIsolation(); }
public WsResponse call(WsRequest request) { checkState(!globalMode.isMediumTest(), "No WS call should be made in medium test mode"); WsResponse response = target.wsConnector().call(request); failIfUnauthorized(response); checkAuthenticationWarnings(response); return response; }
@Test public void call_whenMissingCredentials_shouldFailWithMsg() { WsRequest request = newRequest(); server.stubFor(get(urlEqualTo(URL_ENDPOINT)) .willReturn(aResponse() .withStatus(401) .withBody("Missing authentication"))); DefaultScannerWsClient client = new DefaultScannerWsClient(wsClient, false, new GlobalAnalysisMode(new ScannerProperties(Collections.emptyMap())), analysisWarnings); assertThatThrownBy(() -> client.call(request)) .isInstanceOf(MessageException.class) .hasMessage("Not authorized. Analyzing this project requires authentication. Please check the user token in the property 'sonar.token' " + "or the credentials in the properties 'sonar.login' and 'sonar.password'."); }
public void resolveAssertionConsumerService(AuthenticationRequest authenticationRequest) throws SamlValidationException { // set URL if set in authnRequest final String authnAcsURL = authenticationRequest.getAuthnRequest().getAssertionConsumerServiceURL(); if (authnAcsURL != null) { authenticationRequest.setAssertionConsumerURL(authnAcsURL); return; } // search url from metadata endpoints final Integer authnAcsIdx = authenticationRequest.getAuthnRequest().getAssertionConsumerServiceIndex(); List<Endpoint> endpoints = authenticationRequest.getConnectionEntity().getRoleDescriptors().get(0).getEndpoints(AssertionConsumerService.DEFAULT_ELEMENT_NAME); if (endpoints.isEmpty()) { throw new SamlValidationException("Authentication: Assertion Consumer Service not found in metadata"); } if (authnAcsIdx != null && endpoints.size() <= authnAcsIdx) { throw new SamlValidationException("Authentication: Assertion Consumer Index is out of bounds"); } // TODO: check if this statement is correct if (endpoints.size() == 1) { authenticationRequest.setAssertionConsumerURL(endpoints.get(0).getLocation()); return; } if(authnAcsIdx == null) { AssertionConsumerService defaultAcs = endpoints.stream() .filter(e -> e instanceof AssertionConsumerService) .map(acs -> (AssertionConsumerService) acs) .filter(IndexedEndpoint::isDefault) .findAny() .orElse(null); if (defaultAcs == null) { throw new SamlValidationException("Authentication: There is no default AssertionConsumerService"); } authenticationRequest.setAssertionConsumerURL(defaultAcs.getLocation()); return; } authenticationRequest.setAssertionConsumerURL(endpoints.get(authnAcsIdx).getLocation()); }
@Test void resolveAcsUrlWithAcsUrlAndIndex() throws SamlValidationException { AuthnRequest authnRequest = OpenSAMLUtils.buildSAMLObject(AuthnRequest.class); authnRequest.setAssertionConsumerServiceURL(URL_ASSERTION_CONSUMER_SERVICE); authnRequest.setAssertionConsumerServiceIndex(1); AuthenticationRequest authenticationRequest = new AuthenticationRequest(); authenticationRequest.setAuthnRequest(authnRequest); assertionConsumerServiceUrlService.resolveAssertionConsumerService(authenticationRequest); assertEquals(URL_ASSERTION_CONSUMER_SERVICE, authenticationRequest.getAssertionConsumerURL()); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(REDSHIFT_BOOLEAN); builder.dataType(REDSHIFT_BOOLEAN); break; case TINYINT: case SMALLINT: builder.columnType(REDSHIFT_SMALLINT); builder.dataType(REDSHIFT_SMALLINT); break; case INT: builder.columnType(REDSHIFT_INTEGER); builder.dataType(REDSHIFT_INTEGER); break; case BIGINT: builder.columnType(REDSHIFT_BIGINT); builder.dataType(REDSHIFT_BIGINT); break; case FLOAT: builder.columnType(REDSHIFT_REAL); builder.dataType(REDSHIFT_REAL); break; case DOUBLE: builder.columnType(REDSHIFT_DOUBLE_PRECISION); builder.dataType(REDSHIFT_DOUBLE_PRECISION); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%d,%d)", REDSHIFT_NUMERIC, precision, scale)); builder.dataType(REDSHIFT_NUMERIC); builder.precision(precision); builder.scale(scale); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType( String.format( "%s(%d)", REDSHIFT_CHARACTER_VARYING, MAX_CHARACTER_VARYING_LENGTH)); builder.dataType(REDSHIFT_CHARACTER_VARYING); builder.length((long) MAX_CHARACTER_VARYING_LENGTH); } else if (column.getColumnLength() <= MAX_CHARACTER_VARYING_LENGTH) { builder.columnType( String.format( "%s(%d)", REDSHIFT_CHARACTER_VARYING, column.getColumnLength())); builder.dataType(REDSHIFT_CHARACTER_VARYING); builder.length(column.getColumnLength()); } else { log.warn( "The length of string column {} is {}, which exceeds the maximum length of {}, " + "the length will be set to {}", column.getName(), column.getColumnLength(), MAX_SUPER_LENGTH, MAX_SUPER_LENGTH); builder.columnType(REDSHIFT_SUPER); builder.dataType(REDSHIFT_SUPER); } break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType( String.format( "%s(%d)", REDSHIFT_BINARY_VARYING, MAX_BINARY_VARYING_LENGTH)); builder.dataType(REDSHIFT_BINARY_VARYING); } else if (column.getColumnLength() <= MAX_BINARY_VARYING_LENGTH) { builder.columnType( String.format( "%s(%d)", REDSHIFT_BINARY_VARYING, column.getColumnLength())); builder.dataType(REDSHIFT_BINARY_VARYING); builder.length(column.getColumnLength()); } else { builder.columnType( String.format( "%s(%d)", REDSHIFT_BINARY_VARYING, MAX_BINARY_VARYING_LENGTH)); builder.dataType(REDSHIFT_BINARY_VARYING); log.warn( "The length of binary column {} is {}, which exceeds the maximum length of {}, " + "the length will be set to {}", column.getName(), column.getColumnLength(), MAX_BINARY_VARYING_LENGTH, MAX_BINARY_VARYING_LENGTH); } break; case TIME: Integer timeScale = column.getScale(); if (timeScale != null && timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(REDSHIFT_TIME); builder.dataType(REDSHIFT_TIME); builder.scale(timeScale); break; case TIMESTAMP: Integer timestampScale = column.getScale(); if (timestampScale != null && timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(REDSHIFT_TIMESTAMP); builder.dataType(REDSHIFT_TIMESTAMP); builder.scale(timestampScale); break; case MAP: case ARRAY: case ROW: builder.columnType(REDSHIFT_SUPER); builder.dataType(REDSHIFT_SUPER); break; default: try { return super.reconvert(column); } catch (SeaTunnelRuntimeException e) { throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.REDSHIFT, column.getDataType().getSqlType().name(), column.getName()); } } return builder.build(); }
@Test public void testReconvertBoolean() { Column column = PhysicalColumn.builder() .name("test") .dataType(BasicType.BOOLEAN_TYPE) .nullable(true) .defaultValue(true) .comment("test") .build(); BasicTypeDefine typeDefine = RedshiftTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(RedshiftTypeConverter.REDSHIFT_BOOLEAN, typeDefine.getColumnType()); Assertions.assertEquals(RedshiftTypeConverter.REDSHIFT_BOOLEAN, typeDefine.getDataType()); Assertions.assertEquals(column.isNullable(), typeDefine.isNullable()); Assertions.assertEquals(column.getDefaultValue(), typeDefine.getDefaultValue()); Assertions.assertEquals(column.getComment(), typeDefine.getComment()); }
@Override public Point calculatePositionForPreview( Keyboard.Key key, PreviewPopupTheme theme, int[] windowOffset) { Point point = new Point(key.x + windowOffset[0], key.y + windowOffset[1]); Rect padding = new Rect(); theme.getPreviewKeyBackground().getPadding(padding); point.offset((key.width / 2), padding.bottom); if (theme.getPreviewAnimationType() == PreviewPopupTheme.ANIMATION_STYLE_EXTEND) { // taking it down a bit to the edge of the origin key point.offset(0, key.height); } return point; }
@Test public void testCalculatePositionForPreviewWithExtendAnimation() throws Exception { mTheme.setPreviewAnimationType(PreviewPopupTheme.ANIMATION_STYLE_EXTEND); int[] offsets = new int[] {50, 60}; Point result = mUnderTest.calculatePositionForPreview(mTestKey, mTheme, offsets); Assert.assertEquals(mTestKey.x + mTestKey.width / 2 + offsets[0], result.x); Assert.assertEquals(mTestKey.y + mTestKey.height + offsets[1], result.y); }
public static SimpleImputer fit(DataFrame data, String... columns) { return fit(data, 0.5, 0.5, columns); }
@Test public void testUSArrests() throws Exception { System.out.println(USArrests.data); SimpleImputer imputer = SimpleImputer.fit(USArrests.data); System.out.println(imputer); }
@Override public void run() { doHealthCheck(); }
@Test void testRunHealthyInstanceWithTimeoutFromInstance() throws InterruptedException { injectInstance(true, System.currentTimeMillis()).getExtendDatum().put(PreservedMetadataKeys.HEART_BEAT_TIMEOUT, 800); when(globalConfig.isExpireInstance()).thenReturn(true); TimeUnit.SECONDS.sleep(1); beatCheckTask.run(); assertFalse(client.getAllInstancePublishInfo().isEmpty()); assertFalse(client.getInstancePublishInfo(Service.newService(NAMESPACE, GROUP_NAME, SERVICE_NAME)).isHealthy()); }
public boolean similarTo(ClusterStateBundle other) { if (!baselineState.getClusterState().similarToIgnoringInitProgress(other.baselineState.getClusterState())) { return false; } if (clusterFeedIsBlocked() != other.clusterFeedIsBlocked()) { return false; } if (clusterFeedIsBlocked() && !feedBlock.similarTo(other.feedBlock)) { return false; } // Distribution configs must match exactly for bundles to be similar. // It may be the case that they are both null, in which case they are also considered equal. if (!Objects.equals(distributionConfig, other.distributionConfig)) { return false; } // FIXME we currently treat mismatching bucket space sets as unchanged to avoid breaking some tests return derivedBucketSpaceStates.entrySet().stream() .allMatch(entry -> other.derivedBucketSpaceStates.getOrDefault(entry.getKey(), entry.getValue()) .getClusterState().similarToIgnoringInitProgress(entry.getValue().getClusterState())); }
@Test void similarity_test_considers_distribution_config() { var bundle5Nodes = createTestBundleWithDistributionConfig(DistributionBuilder.configForFlatCluster(5)); var bundle5Nodes2 = createTestBundleWithDistributionConfig(DistributionBuilder.configForFlatCluster(5)); var bundle6Nodes = createTestBundleWithDistributionConfig(DistributionBuilder.configForFlatCluster(6)); var bundle2x3Grouped = createTestBundleWithDistributionConfig(DistributionBuilder.configForHierarchicCluster( DistributionBuilder.withGroups(2).eachWithNodeCount(3))); assertTrue(bundle5Nodes.similarTo(bundle5Nodes)); assertTrue(bundle5Nodes.similarTo(bundle5Nodes2)); assertTrue(bundle5Nodes2.similarTo(bundle5Nodes)); assertFalse(bundle5Nodes.similarTo(bundle6Nodes)); assertFalse(bundle6Nodes.similarTo(bundle2x3Grouped)); }
@Bean @ConditionalOnMissingBean(ConsulDataChangedInit.class) public DataChangedInit consulDataChangedInit(final ConsulClient consulClient) { return new ConsulDataChangedInit(consulClient); }
@Test public void testConsulDataInit() { ConsulSyncConfiguration consulListener = new ConsulSyncConfiguration(); ConsulClient consulClient = mock(ConsulClient.class); assertNotNull(consulListener.consulDataChangedInit(consulClient)); }
public FEELFnResult<Range> invoke(@ParameterName("from") String from) { if (from == null || from.isEmpty() || from.isBlank()) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "cannot be null")); } Range.RangeBoundary startBoundary; if (from.startsWith("(") || from.startsWith("]")) { startBoundary = RangeBoundary.OPEN; } else if (from.startsWith("[")) { startBoundary = RangeBoundary.CLOSED; } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not start with a valid character")); } Range.RangeBoundary endBoundary; if (from.endsWith(")") || from.endsWith("[")) { endBoundary = RangeBoundary.OPEN; } else if (from.endsWith("]")) { endBoundary = RangeBoundary.CLOSED; } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not end with a valid character")); } String[] split = from.split("\\.\\."); if (split.length != 2) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not include two literals separated by `..` two dots characters")); } String leftString = split[0].substring(1); String rightString = split[1].substring(0, split[1].length() - 1); if ((leftString.isEmpty() || leftString.isBlank()) && (rightString.isEmpty() || rightString.isBlank())) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "at least one endpoint must not be null")); } BaseNode leftNode = parse(leftString); if (!nodeIsAllowed(leftNode)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "left endpoint is not a recognised valid literal")); } BaseNode rightNode = parse(rightString); if (!nodeIsAllowed(rightNode)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "right endpoint is not a recognised valid literal")); } Object left = leftNode.evaluate(getStubbed()); if (!nodeValueIsAllowed(left)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "left endpoint is not a valid value " + left.getClass())); } Object right = rightNode.evaluate(getStubbed()); if (!nodeValueIsAllowed(right)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "right endpoint is not a valid value " + right.getClass())); } if (!nodesReturnsSameType(left, right)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "endpoints must be of equivalent types")); } return FEELFnResult.ofResult(new RangeImpl(startBoundary, (Comparable) left, (Comparable) right, endBoundary)); }
@Test void evaluateWithValidFunctionInvocationNode() { Object[][] data = validFunctionInvocationNodeData(); Arrays.stream(data).forEach(objects -> { String expression = String.format("[%1$s..%1$s]", objects[0]); FEELFnResult<Range> retrieved = rangeFunction.invoke(expression); assertThat(retrieved.isRight()) .withFailMessage(() -> String.format("Expected 'retrieved.isRight()' from, %s", expression)) .isTrue(); }); }
public void go() { KieServices ks = KieServices.Factory.get(); KieContainer kContainer = ks.getKieClasspathContainer(); KieSession ksession = kContainer.newKieSession(); List<String> list = new ArrayList<>(); ksession.setGlobal( "list", list ); ksession.fireAllRules(); System.out.println(list); list.clear(); ksession.insert("Debbie"); ksession.fireAllRules(); System.out.println( list ); }
@Test public void testGo() { KieServices ks = KieServices.Factory.get(); KieContainer kContainer = ks.getKieClasspathContainer(); KieSession ksession = kContainer.newKieSession(); List<String> list = new ArrayList<String>(); ksession.setGlobal( "list", list ); ksession.fireAllRules(); assertEquals( 2, list.size() ); assertTrue( list.contains( "car" ) ); assertTrue( list.contains( "ball" ) ); list.clear(); ksession.insert("Debbie"); ksession.fireAllRules(); ksession.fireAllRules(); assertEquals( 1, list.size() ); assertTrue( list.contains( "doll" ) ); }
public abstract boolean compare(A actual, E expected);
@Test public void testTransforming_both_compare_nullInputValues() { try { HYPHENS_MATCH_COLONS.compare(null, "abcde:fghij"); fail("Expected NullPointerException to be thrown but wasn't"); } catch (NullPointerException expected) { } try { HYPHENS_MATCH_COLONS.compare("mailing-list", null); fail("Expected NullPointerException to be thrown but wasn't"); } catch (NullPointerException expected) { } }
@Override public AuthenticationResult authenticate(final ChannelHandlerContext context, final PacketPayload payload) { if (SSL_REQUEST_PAYLOAD_LENGTH == payload.getByteBuf().markReaderIndex().readInt() && SSL_REQUEST_CODE == payload.getByteBuf().readInt()) { if (ProxySSLContext.getInstance().isSSLEnabled()) { SslHandler sslHandler = new SslHandler(ProxySSLContext.getInstance().newSSLEngine(context.alloc()), true); context.pipeline().addFirst(SslHandler.class.getSimpleName(), sslHandler); context.writeAndFlush(new PostgreSQLSSLWillingPacket()); } else { context.writeAndFlush(new PostgreSQLSSLUnwillingPacket()); } return AuthenticationResultBuilder.continued(); } payload.getByteBuf().resetReaderIndex(); AuthorityRule rule = ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getGlobalRuleMetaData().getSingleRule(AuthorityRule.class); return startupMessageReceived ? processPasswordMessage(context, (PostgreSQLPacketPayload) payload, rule) : processStartupMessage(context, (PostgreSQLPacketPayload) payload, rule); }
@Test void assertSSLWilling() { ByteBuf byteBuf = createByteBuf(8, 8); byteBuf.writeInt(8); byteBuf.writeInt(80877103); PacketPayload payload = new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8); ChannelHandlerContext context = mock(ChannelHandlerContext.class, RETURNS_DEEP_STUBS); when(ProxySSLContext.getInstance().isSSLEnabled()).thenReturn(true); AuthenticationResult actual = new OpenGaussAuthenticationEngine().authenticate(context, payload); verify(context).writeAndFlush(any(PostgreSQLSSLWillingPacket.class)); verify(context.pipeline()).addFirst(eq(SslHandler.class.getSimpleName()), any(SslHandler.class)); assertFalse(actual.isFinished()); }
public static String getS3EncryptionContext(String bucket, Configuration conf) throws IOException { // look up the per-bucket value of the encryption context String encryptionContext = S3AUtils.lookupBucketSecret(bucket, conf, S3_ENCRYPTION_CONTEXT); if (encryptionContext == null) { // look up the global value of the encryption context encryptionContext = S3AUtils.lookupPassword(null, conf, S3_ENCRYPTION_CONTEXT); } if (encryptionContext == null) { // no encryption context, return "" return ""; } return encryptionContext; }
@Test public void testGetS3EncryptionContextNoSet() throws IOException { Configuration configuration = new Configuration(false); final String result = S3AEncryption.getS3EncryptionContext("bucket1", configuration); Assert.assertEquals("", result); }
public static Guess performGuess(List<Date> releaseDates) { if (releaseDates.size() <= 1) { return new Guess(Schedule.UNKNOWN, null, null); } else if (releaseDates.size() > MAX_DATA_POINTS) { releaseDates = releaseDates.subList(releaseDates.size() - MAX_DATA_POINTS, releaseDates.size()); } Stats stats = getStats(releaseDates); final int maxTotalWrongDays = Math.max(1, releaseDates.size() / 5); final int maxSingleDayOff = releaseDates.size() / 10; GregorianCalendar last = new GregorianCalendar(); last.setTime(releaseDates.get(releaseDates.size() - 1)); last.set(Calendar.HOUR_OF_DAY, (int) stats.medianHour); last.set(Calendar.MINUTE, (int) ((stats.medianHour - Math.floor(stats.medianHour)) * 60)); last.set(Calendar.SECOND, 0); last.set(Calendar.MILLISECOND, 0); if (Math.abs(stats.medianDistance - ONE_DAY) < 2 * ONE_HOUR && stats.avgDeltaToMedianDistance < 2 * ONE_HOUR) { addTime(last, ONE_DAY); return new Guess(Schedule.DAILY, Arrays.asList(Calendar.MONDAY, Calendar.TUESDAY, Calendar.WEDNESDAY, Calendar.THURSDAY, Calendar.FRIDAY, Calendar.SATURDAY, Calendar.SUNDAY), last.getTime()); } else if (Math.abs(stats.medianDistance - ONE_WEEK) < ONE_DAY && stats.avgDeltaToMedianDistance < 2 * ONE_DAY) { // Just using last.set(Calendar.DAY_OF_WEEK) could skip a week // when the last release is delayed over week boundaries addTime(last, 3 * ONE_DAY); do { addTime(last, ONE_DAY); } while (last.get(Calendar.DAY_OF_WEEK) != stats.mostOftenDayOfWeek); return new Guess(Schedule.WEEKLY, List.of(stats.mostOftenDayOfWeek), last.getTime()); } else if (Math.abs(stats.medianDistance - 2 * ONE_WEEK) < ONE_DAY && stats.avgDeltaToMedianDistance < 2 * ONE_DAY) { // Just using last.set(Calendar.DAY_OF_WEEK) could skip a week // when the last release is delayed over week boundaries addTime(last, 10 * ONE_DAY); do { addTime(last, ONE_DAY); } while (last.get(Calendar.DAY_OF_WEEK) != stats.mostOftenDayOfWeek); return new Guess(Schedule.BIWEEKLY, List.of(stats.mostOftenDayOfWeek), last.getTime()); } else if (Math.abs(stats.medianDistance - ONE_MONTH) < 5 * ONE_DAY && stats.avgDeltaToMedianDistance < 5 * ONE_DAY) { if (stats.daysOfMonth[stats.mostOftenDayOfMonth] >= releaseDates.size() - maxTotalWrongDays) { // Just using last.set(Calendar.DAY_OF_MONTH) could skip a week // when the last release is delayed over week boundaries addTime(last, 2 * ONE_WEEK); do { addTime(last, ONE_DAY); } while (last.get(Calendar.DAY_OF_MONTH) != stats.mostOftenDayOfMonth); return new Guess(Schedule.MONTHLY, null, last.getTime()); } addTime(last, 3 * ONE_WEEK + 3 * ONE_DAY); do { addTime(last, ONE_DAY); } while (last.get(Calendar.DAY_OF_WEEK) != stats.mostOftenDayOfWeek); return new Guess(Schedule.FOURWEEKLY, List.of(stats.mostOftenDayOfWeek), last.getTime()); } // Find release days List<Integer> largeDays = new ArrayList<>(); for (int i = Calendar.SUNDAY; i <= Calendar.SATURDAY; i++) { if (stats.daysOfWeek[i] > maxSingleDayOff) { largeDays.add(i); } } // Ensure that all release days are used similarly often int averageDays = releaseDates.size() / largeDays.size(); boolean matchesAverageDays = true; for (int day : largeDays) { if (stats.daysOfWeek[day] < averageDays - maxSingleDayOff) { matchesAverageDays = false; break; } } if (matchesAverageDays && stats.medianDistance < ONE_WEEK) { // Fixed daily release schedule (eg Mo, Thu, Fri) addUntil(last, largeDays); if (largeDays.size() == 5 && largeDays.containsAll(Arrays.asList( Calendar.MONDAY, Calendar.TUESDAY, Calendar.WEDNESDAY, Calendar.THURSDAY, Calendar.FRIDAY))) { return new Guess(Schedule.WEEKDAYS, largeDays, last.getTime()); } return new Guess(Schedule.SPECIFIC_DAYS, largeDays, last.getTime()); } else if (largeDays.size() == 1) { // Probably still weekly with more exceptions than others addUntil(last, largeDays); return new Guess(Schedule.WEEKLY, largeDays, last.getTime()); } addTime(last, (long) (0.6f * stats.medianDistance)); return new Guess(Schedule.UNKNOWN, null, last.getTime()); }
@Test public void testWeekly() { ArrayList<Date> releaseDates = new ArrayList<>(); releaseDates.add(makeDate("2024-01-07 16:30")); // Sunday releaseDates.add(makeDate("2024-01-14 16:25")); releaseDates.add(makeDate("2024-01-21 14:25")); releaseDates.add(makeDate("2024-01-28 16:15")); // Next week ReleaseScheduleGuesser.Guess guess = performGuess(releaseDates); assertEquals(ReleaseScheduleGuesser.Schedule.WEEKLY, guess.schedule); assertClose(makeDate("2024-02-04 16:30"), guess.nextExpectedDate, 2 * ONE_HOUR); // One-off early release releaseDates.add(makeDate("2024-02-02 16:35")); guess = performGuess(releaseDates); assertEquals(ReleaseScheduleGuesser.Schedule.WEEKLY, guess.schedule); assertClose(makeDate("2024-02-11 16:30"), guess.nextExpectedDate, 2 * ONE_HOUR); // One-off late release releaseDates.add(makeDate("2024-02-13 16:35")); guess = performGuess(releaseDates); assertEquals(ReleaseScheduleGuesser.Schedule.WEEKLY, guess.schedule); assertClose(makeDate("2024-02-18 16:30"), guess.nextExpectedDate, 2 * ONE_HOUR); }
@Override public ClientDetailsEntity updateClient(ClientDetailsEntity oldClient, ClientDetailsEntity newClient) throws IllegalArgumentException { if (oldClient != null && newClient != null) { for (String uri : newClient.getRegisteredRedirectUri()) { if (blacklistedSiteService.isBlacklisted(uri)) { throw new IllegalArgumentException("Client URI is blacklisted: " + uri); } } // if the client is flagged to allow for refresh tokens, make sure it's got the right scope ensureRefreshTokenConsistency(newClient); // make sure we don't have both a JWKS and a JWKS URI ensureKeyConsistency(newClient); // check consistency when using HEART mode checkHeartMode(newClient); // check the sector URI checkSectorIdentifierUri(newClient); // make sure a client doesn't get any special system scopes ensureNoReservedScopes(newClient); return clientRepository.updateClient(oldClient.getId(), newClient); } throw new IllegalArgumentException("Neither old client or new client can be null!"); }
@Test public void updateClient_yesOfflineAccess() { ClientDetailsEntity oldClient = new ClientDetailsEntity(); ClientDetailsEntity client = new ClientDetailsEntity(); Set<String> grantTypes = new HashSet<>(); grantTypes.add("refresh_token"); client.setGrantTypes(grantTypes); client = service.updateClient(oldClient, client); Mockito.verify(scopeService, Mockito.atLeastOnce()).removeReservedScopes(Matchers.anySet()); assertThat(client.getScope().contains(SystemScopeService.OFFLINE_ACCESS), is(equalTo(true))); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM) { String message = Text.removeTags(event.getMessage()); Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message); Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message); Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message); Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message); Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message); Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message); Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message); Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message); Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message); Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message); Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message); Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message); Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message); Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message); Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message); Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message); Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message); Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message); if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE)) { notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered"); } else if (dodgyBreakMatcher.find()) { notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust."); updateDodgyNecklaceCharges(MAX_DODGY_CHARGES); } else if (dodgyCheckMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1))); } else if (dodgyProtectMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1))); } else if (amuletOfChemistryCheckMatcher.find()) { updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1))); } else if (amuletOfChemistryUsedMatcher.find()) { final String match = amuletOfChemistryUsedMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateAmuletOfChemistryCharges(charges); } else if (amuletOfChemistryBreakMatcher.find()) { notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust."); updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES); } else if (amuletOfBountyCheckMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1))); } else if (amuletOfBountyUsedMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1))); } else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT)) { updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES); } else if (message.contains(BINDING_BREAK_TEXT)) { notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1); } else if (bindingNecklaceUsedMatcher.find()) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); if (equipment.contains(ItemID.BINDING_NECKLACE)) { updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1); } } else if (bindingNecklaceCheckMatcher.find()) { final String match = bindingNecklaceCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateBindingNecklaceCharges(charges); } else if (ringOfForgingCheckMatcher.find()) { final String match = ringOfForgingCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateRingOfForgingCharges(charges); } else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player smelted with a Ring of Forging equipped. if (equipment == null) { return; } if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1)) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES); updateRingOfForgingCharges(charges); } } else if (message.equals(RING_OF_FORGING_BREAK_TEXT)) { notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted."); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1); } else if (chronicleAddMatcher.find()) { final String match = chronicleAddMatcher.group(1); if (match.equals("one")) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match)); } } else if (chronicleUseAndCheckMatcher.find()) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1))); } else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0); } else if (message.equals(CHRONICLE_FULL_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000); } else if (slaughterActivateMatcher.find()) { final String found = slaughterActivateMatcher.group(1); if (found == null) { updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT); } else { updateBraceletOfSlaughterCharges(Integer.parseInt(found)); } } else if (slaughterCheckMatcher.find()) { updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1))); } else if (expeditiousActivateMatcher.find()) { final String found = expeditiousActivateMatcher.group(1); if (found == null) { updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT); } else { updateExpeditiousBraceletCharges(Integer.parseInt(found)); } } else if (expeditiousCheckMatcher.find()) { updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1))); } else if (bloodEssenceCheckMatcher.find()) { updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1))); } else if (bloodEssenceExtractMatcher.find()) { updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1))); } else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT)) { updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES); } else if (braceletOfClayCheckMatcher.find()) { updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1))); } else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN)) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player mined with a Bracelet of Clay equipped. if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); // Charge is not used if only 1 inventory slot is available when mining in Prifddinas boolean ignore = inventory != null && inventory.count() == 27 && message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN); if (!ignore) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES); updateBraceletOfClayCharges(charges); } } } else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT)) { notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust"); updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES); } } }
@Test public void testDodgyBreak() { ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", BREAK, "", 0); itemChargePlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_DODGY_NECKLACE, 10); }
public static ByteBuffer serializeRecordToFlatBuffer(Record<GenericObject> record) { DEFAULT_FB_BUILDER.clear(); return serializeRecordToFlatBuffer(DEFAULT_FB_BUILDER, record); }
@Test(dataProvider = "encryption") public void testFbSerialization(boolean isEncryption) { final String[] keyNames = {"key1", "key2"}; final String param = "param"; final String algo = "algo"; int batchSize = 10; int compressionMsgSize = 10; for (int k = 0; k < 5; k++) { String payloadString = RandomStringUtils.random(142342 * k, String.valueOf(System.currentTimeMillis())); final String key1Value = payloadString + "test1"; final String key2Value = payloadString + "test2"; final byte[][] keyValues = {key1Value.getBytes(), key2Value.getBytes()}; byte[] data = payloadString.getBytes(); Map<String, String> properties = Maps.newHashMap(); properties.put("prop1", payloadString); Map<String, String> metadata1 = Maps.newHashMap(); metadata1.put("version", "v1"); metadata1.put("ckms", "cmks-1"); Map<String, String> metadata2 = Maps.newHashMap(); metadata2.put("version", "v2"); metadata2.put("ckms", "cmks-2"); Record<GenericObject> record = createRecord(data, algo, keyNames, keyValues, param.getBytes(), metadata1, metadata2, batchSize, compressionMsgSize, properties, isEncryption); ByteBuffer flatBuffer = Utils.serializeRecordToFlatBuffer(record); Message kinesisJsonResponse = Message.getRootAsMessage(flatBuffer); byte[] fbPayloadBytes = new byte[kinesisJsonResponse.payloadLength()]; kinesisJsonResponse.payloadAsByteBuffer().get(fbPayloadBytes); assertEquals(data, fbPayloadBytes); if (isEncryption) { org.apache.pulsar.io.kinesis.fbs.EncryptionCtx encryptionCtxDeser = kinesisJsonResponse.encryptionCtx(); byte compressionType = encryptionCtxDeser.compressionType(); int fbBatchSize = encryptionCtxDeser.batchSize(); boolean isBathcMessage = encryptionCtxDeser.isBatchMessage(); int fbCompressionMsgSize = encryptionCtxDeser.uncompressedMessageSize(); int totalKeys = encryptionCtxDeser.keysLength(); Map<String, Map<String, String>> fbKeyMetadataResult = Maps.newHashMap(); Map<String, byte[]> fbKeyValueResult = Maps.newHashMap(); for (int i = 0; i < encryptionCtxDeser.keysLength(); i++) { org.apache.pulsar.io.kinesis.fbs.EncryptionKey encryptionKey = encryptionCtxDeser.keys(i); String keyName = encryptionKey.key(); byte[] keyValueBytes = new byte[encryptionKey.valueLength()]; encryptionKey.valueAsByteBuffer().get(keyValueBytes); fbKeyValueResult.put(keyName, keyValueBytes); Map<String, String> fbMetadata = Maps.newHashMap(); for (int j = 0; j < encryptionKey.metadataLength(); j++) { KeyValue encMtdata = encryptionKey.metadata(j); fbMetadata.put(encMtdata.key(), encMtdata.value()); } fbKeyMetadataResult.put(keyName, fbMetadata); } byte[] paramBytes = new byte[encryptionCtxDeser.paramLength()]; encryptionCtxDeser.paramAsByteBuffer().get(paramBytes); assertEquals(totalKeys, 2); assertEquals(batchSize, fbBatchSize); assertTrue(isBathcMessage); assertEquals(compressionMsgSize, fbCompressionMsgSize); assertEquals(keyValues[0], fbKeyValueResult.get(keyNames[0])); assertEquals(keyValues[1], fbKeyValueResult.get(keyNames[1])); assertEquals(metadata1, fbKeyMetadataResult.get(keyNames[0])); assertEquals(metadata2, fbKeyMetadataResult.get(keyNames[1])); assertEquals(compressionType, org.apache.pulsar.io.kinesis.fbs.CompressionType.LZ4); assertEquals(param.getBytes(), paramBytes); assertEquals(algo, encryptionCtxDeser.algo()); } Map<String, String> fbproperties = Maps.newHashMap(); for (int i = 0; i < kinesisJsonResponse.propertiesLength(); i++) { KeyValue property = kinesisJsonResponse.properties(i); fbproperties.put(property.key(), property.value()); } assertEquals(properties, fbproperties); } }
public <T> boolean parse(Handler<T> handler, T target, CharSequence input) { if (input == null) throw new NullPointerException("input == null"); return parse(handler, target, input, 0, input.length()); }
@Test void parse_valuesAreRequired() { for (String missingValue : Arrays.asList("k1", "k1 ", "k1=v1,k2", "k1 ,k2=v1")) { assertThatThrownBy(() -> entrySplitter.parse(parseIntoMap, map, missingValue)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid input: missing key value separator '='"); } assertThat(map.isEmpty()); }
@Override public Set<Name> getLocations() { if(StringUtils.isNotBlank(session.getHost().getRegion())) { final S3Region region = new S3Region(session.getHost().getRegion()); if(log.isDebugEnabled()) { log.debug(String.format("Return single region %s set in bookmark", region)); } return Collections.singleton(region); } if(StringUtils.isNotEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) { if(log.isDebugEnabled()) { log.debug(String.format("Return empty set for hostname %s", session.getHost())); } // Connected to single bucket return Collections.emptySet(); } if(!S3Session.isAwsHostname(session.getHost().getHostname(), false)) { if(new S3Protocol().getRegions().equals(session.getHost().getProtocol().getRegions())) { // Return empty set for unknown provider if(log.isDebugEnabled()) { log.debug(String.format("Return empty set for unknown provider %s", session.getHost())); } return Collections.emptySet(); } } return session.getHost().getProtocol().getRegions(); }
@Test public void testVirtualhost() { assertTrue(new S3LocationFeature(virtualhost, virtualhost.getClient().getRegionEndpointCache()).getLocations().isEmpty()); }
public static String stringEmptyAndThenExecute(String source, Callable<String> callable) { if (StringUtils.isEmpty(source)) { try { return callable.call(); } catch (Exception e) { LogUtils.NAMING_LOGGER.error("string empty and then execute cause an exception.", e); } } return source == null ? null : source.trim(); }
@Test void testStringEmptyAndThenExecuteFail() { String word = ""; final String expect = "call"; String actual = TemplateUtils.stringEmptyAndThenExecute(word, () -> expect); assertEquals(expect, actual); }
@Override public void assign(Object obj) { if (obj instanceof URI) { obj = obj.toString(); } super.assign(obj); }
@Test public void requireThatURICanBeAssigned() { UriFieldValue value = new UriFieldValue(); String uri = "http://user:pass@localhost:69/path#fragment?query"; value.assign(URI.create(uri)); assertEquals(uri, value.getWrappedValue()); }
public void heartbeat(boolean successful, Instant lastHeartbeatAttempt) { if (successful) { heartbeatLastSuccess = lastHeartbeatAttempt; heartbeatSuccessesSinceLastFailure++; heartbeatFailuresSinceLastSuccess = 0; } else { heartbeatLastFailure = lastHeartbeatAttempt; heartbeatSuccessesSinceLastFailure = 0; heartbeatFailuresSinceLastSuccess++; } }
@Test void success_resets_failing() { HeartbeatState state = new HeartbeatState( clock, clock.now(), new HeartbeatConfig(Duration.ofMinutes(1), 4, Duration.ofMinutes(4))); assertOk(state); clock.tick(Duration.ofSeconds(30)); state.heartbeat(false, clock.now()); assertFailing(state, 1, 0.125); clock.tick(Duration.ofSeconds(60)); state.heartbeat(false, clock.now()); assertFailing(state, 2, 0.375); clock.tick(Duration.ofSeconds(60)); state.heartbeat(true, clock.now()); assertOk(state); }
@Override public Map<String, String> apply(Object obj) { Object instanceInfo = ReflectUtils.invokeWithNoneParameter(obj, "getInstance"); return SpringRouterUtils.getMetadata(ReflectUtils.invokeWithNoneParameter(instanceInfo, "getPayload")); }
@Test public void testApply() throws Exception { Map<String, String> map = Collections.singletonMap("foo", "bar"); ZookeeperInstance instance = new ZookeeperInstance("id", "name", map); ServiceInstanceBuilder<ZookeeperInstance> builder = ServiceInstance.builder(); ServiceInstance<ZookeeperInstance> serviceInstance = builder.address("localhost").port(80).name("name").payload(instance).build(); ZookeeperServer server = new ZookeeperServer(serviceInstance); ZookeeperMetadataMapper mapper = new ZookeeperMetadataMapper(); Map<String, String> metadata = mapper.apply(server); Assert.assertNotNull(metadata); Assert.assertEquals("bar", metadata.get("foo")); }
@Override public void setParameters(Collection<CompoundVariable> parameters) throws InvalidVariableException { checkParameterCount(parameters, MIN_PARAMETER_COUNT, MAX_PARAMETER_COUNT); values = parameters.toArray(new CompoundVariable[parameters.size()]); }
@Test public void testIsPropDefinedError() { Assertions.assertThrows( InvalidVariableException.class, () -> isPropDefined.setParameters(params)); }
static List<MappingField> resolveFields(Schema schema) { Map<String, MappingField> fields = new LinkedHashMap<>(); for (Schema.Field schemaField : schema.getFields()) { String name = schemaField.name(); // SQL types are nullable by default and NOT NULL is currently unsupported. Schema.Type schemaFieldType = unwrapNullableType(schemaField.schema()).getType(); QueryDataType type = AVRO_TO_SQL.getOrDefault(schemaFieldType, OBJECT); MappingField field = new MappingField(name, type); fields.putIfAbsent(field.name(), field); } return new ArrayList<>(fields.values()); }
@Test public void test_resolveNullableFields() { // given Schema schema = SchemaBuilder.record("name") .fields() .name("boolean").type().nullable().booleanType().noDefault() .name("int").type().nullable().intType().noDefault() .name("long").type().nullable().longType().noDefault() .name("float").type().nullable().floatType().noDefault() .name("double").type().nullable().doubleType().noDefault() .name("string").type().nullable().stringType().noDefault() .name("object").type().nullable().record("object").fields().endRecord().noDefault() .endRecord(); // when List<MappingField> fields = AvroResolver.resolveFields(schema); // then assertIterableEquals(fields, new MappingField("boolean", QueryDataType.BOOLEAN), new MappingField("int", QueryDataType.INT), new MappingField("long", QueryDataType.BIGINT), new MappingField("float", QueryDataType.REAL), new MappingField("double", QueryDataType.DOUBLE), new MappingField("string", QueryDataType.VARCHAR), new MappingField("object", QueryDataType.OBJECT)); }
public Optional<BoolQueryBuilder> getTopAggregationFilter(TopAggregationDefinition<?> topAggregation) { checkArgument(topAggregations.contains(topAggregation), "topAggregation must have been declared in constructor"); return toBoolQuery( postFilters, (e, v) -> !topAggregation.isSticky() || !topAggregation.getFilterScope().intersect(e.getFilterScope())); }
@Test public void getTopAggregationFilters_return_empty_when_no_declared_sticky_topAggregation() { AllFilters allFilters = randomNonEmptyAllFilters(); Set<TopAggregationDefinition<?>> atLeastOneNonStickyTopAggs = randomNonEmptyTopAggregations(() -> false); RequestFiltersComputer underTest = new RequestFiltersComputer(allFilters, atLeastOneNonStickyTopAggs); atLeastOneNonStickyTopAggs.forEach(topAgg -> assertThat(underTest.getTopAggregationFilter(topAgg)).isEmpty()); }
@Override public Object convert(String value) { if (value == null || value.isEmpty()) { return value; } return value.toUpperCase(Locale.ENGLISH); }
@Test public void testConvert() throws Exception { Converter c = new UppercaseConverter(new HashMap<String, Object>()); assertNull(c.convert(null)); assertEquals("", c.convert("")); assertEquals("FOOBAR", c.convert("foobar")); assertEquals("FOO BAR", c.convert("foo BAR")); assertEquals("FOOBAR", c.convert("FooBar")); assertEquals("FOOBAR ", c.convert("foobar ")); assertEquals(" FOOBAR", c.convert(" foobar")); assertEquals("FOOBAR", c.convert("FOOBAR")); }
@Override protected Optional<ErrorResponse> filter(DiscFilterRequest req) { var certs = req.getClientCertificateChain(); log.fine(() -> "Certificate chain contains %d elements".formatted(certs.size())); if (certs.isEmpty()) { log.fine("Missing client certificate"); return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized")); } if (legacyMode) { log.fine("Legacy mode validation complete"); ClientPrincipal.attachToRequest(req, Set.of(), Set.of(READ, WRITE)); return Optional.empty(); } var permission = Permission.getRequiredPermission(req).orElse(null); if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); var clientCert = certs.get(0); var clientIds = new TreeSet<String>(); var permissions = new TreeSet<Permission>(); for (Client c : allowedClients) { if (!c.permissions().contains(permission)) continue; if (!c.certificates().contains(clientCert)) continue; clientIds.add(c.id()); permissions.addAll(c.permissions()); } if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden")); ClientPrincipal.attachToRequest(req, clientIds, permissions); return Optional.empty(); }
@Test void accepts_any_trusted_client_certificate_in_legacy_mode() { var req = FilterTestUtils.newRequestBuilder().withClientCertificate(LEGACY_CLIENT_CERT).build(); var responseHandler = new MockResponseHandler(); newFilterWithLegacyMode().filter(req, responseHandler); assertNull(responseHandler.getResponse()); assertEquals(new ClientPrincipal(Set.of(), Set.of(READ, WRITE)), req.getUserPrincipal()); }
public static void evalPlaceholders(Map<String, Object> headersMap, String path, String consumerPath) { evalPlaceholders(headersMap::put, path, consumerPath); }
@Test @DisplayName("Test that the placeholders throw OutOfBoundsException if the sizes differ") void testEvalPlaceholdersOutOfBound() { Map<String, Object> headers = new HashMap<>(); assertThrows(ArrayIndexOutOfBoundsException.class, () -> HttpHelper.evalPlaceholders(headers, "/some/url", "/some/url/{key}"), "The sizes of the URLs differ and it should throw an exception"); }
@Override public FailureCollector getFailureCollector() { return this.failureCollector; }
@Test public void getFailureCollector() { /** arrange */ BatchContextImpl context = new BatchSinkContextImpl(); /** act */ FailureCollector failureCollector = context.getFailureCollector(); /** assert */ ValidationException validationException = failureCollector.getOrThrowException(); assertEquals(0, validationException.getFailures().size()); }
@Override protected void delete(Collection<LocalResourceId> resourceIds) throws IOException { for (LocalResourceId resourceId : resourceIds) { try { Files.delete(resourceId.getPath()); } catch (NoSuchFileException e) { LOG.info( "Ignoring failed deletion of file {} which already does not exist.", resourceId, e); } } }
@Test public void testDelete() throws Exception { File f1 = temporaryFolder.newFile("file1"); File f2 = temporaryFolder.newFile("file2"); File f3 = temporaryFolder.newFile("other-file"); localFileSystem.delete( toLocalResourceIds(Lists.newArrayList(f1.toPath(), f2.toPath()), false /* isDirectory */)); assertFalse(f1.exists()); assertFalse(f2.exists()); assertTrue(f3.exists()); }
public static void load(Configuration conf, InputStream is) throws IOException { conf.addResource(is); }
@Test public void testCompactFormatProperty() throws IOException { final String testfile = "test-compact-format-property.xml"; Configuration conf = new Configuration(false); assertEquals(0, conf.size()); ConfigurationUtils.load(conf, Thread.currentThread() .getContextClassLoader().getResource(testfile).openStream()); assertEquals(2, conf.size()); assertEquals("val1", conf.get("key.1")); assertEquals("val2", conf.get("key.2")); }