focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
protected static List<String> getParameters(String path) { List<String> parameters = null; int startIndex = path.indexOf('{'); while (startIndex != -1) { int endIndex = path.indexOf('}', startIndex); if (endIndex != -1) { if (parameters == null) { parameters = new ArrayList<>(); } parameters.add(path.substring(startIndex + 1, endIndex)); startIndex = path.indexOf('{', endIndex); } else { // Break out of loop as no valid end token startIndex = -1; } } return parameters == null ? Collections.emptyList() : parameters; }
@Test public void testGetParametersNone() { assertTrue(RestSpanDecorator.getParameters("rest://put:/persons/hello/world?routeId=route4").isEmpty()); }
static void finishUpdateChecker( ConsoleLogger logger, Future<Optional<String>> updateCheckFuture) { UpdateChecker.finishUpdateCheck(updateCheckFuture) .ifPresent( latestVersion -> { String cliReleaseUrl = ProjectInfo.GITHUB_URL + "/releases/tag/v" + latestVersion + "-cli"; String changelogUrl = ProjectInfo.GITHUB_URL + "/blob/master/jib-cli/CHANGELOG.md"; String privacyUrl = ProjectInfo.GITHUB_URL + "/blob/master/docs/privacy.md"; String message = String.format( "\n\u001B[33mA new version of Jib CLI (%s) is available (currently using %s). Download the latest" + " Jib CLI version from %s\n%s\u001B[0m\n\nPlease see %s for info on disabling this update check.\n", latestVersion, VersionInfo.getVersionSimple(), cliReleaseUrl, changelogUrl, privacyUrl); logger.log(LogEvent.Level.LIFECYCLE, message); }); }
@Test public void testFinishUpdateChecker_correctMessageLogged() { Future<Optional<String>> updateCheckFuture = Futures.immediateFuture(Optional.of("2.0.0")); JibCli.finishUpdateChecker(logger, updateCheckFuture); verify(logger) .log( eq(LogEvent.Level.LIFECYCLE), contains( "A new version of Jib CLI (2.0.0) is available (currently using " + VersionInfo.getVersionSimple() + "). Download the latest Jib CLI version from " + ProjectInfo.GITHUB_URL + "/releases/tag/v2.0.0-cli")); }
@Override public int hashCode() { int result = 1; result = 31 * result + Objects.hashCode(username); result = 31 * result + Objects.hashCode(getPasswordValue()); result = 31 * result + Objects.hashCode(getSocketAddress().get()); result = 31 * result + Boolean.hashCode(getNonProxyHostsValue()); result = 31 * result + Objects.hashCode(httpHeaders.get()); result = 31 * result + Objects.hashCode(getType()); result = 31 * result + Long.hashCode(connectTimeoutMillis); return result; }
@Test void differentAuthHeaders() { assertThat(createHeaderProxy(ADDRESS_1, HEADER_1)).isNotEqualTo(createHeaderProxy(ADDRESS_1, HEADER_2)); assertThat(createHeaderProxy(ADDRESS_1, HEADER_1).hashCode()).isNotEqualTo(createHeaderProxy(ADDRESS_1, HEADER_2).hashCode()); }
@Override public String selectForUpdateSkipLocked() { return supportsSelectForUpdateSkipLocked ? " FOR UPDATE SKIP LOCKED" : ""; }
@Test void mariaDB106DoesSupportSelectForUpdateSkipLocked() { assertThat(new MariaDbDialect("MariaDB", "10.6").selectForUpdateSkipLocked()) .isEqualTo(" FOR UPDATE SKIP LOCKED"); }
@Override public void invalidate(final Path container, final Distribution.Method method, final List<Path> files, final LoginCallback prompt) throws BackgroundException { try { for(Path file : files) { if(containerService.isContainer(file)) { session.getClient().purgeCDNContainer(regionService.lookup(containerService.getContainer(file)), container.getName(), null); } else { session.getClient().purgeCDNObject(regionService.lookup(containerService.getContainer(file)), container.getName(), containerService.getKey(file), null); } } } catch(GenericException e) { throw new SwiftExceptionMappingService().map("Cannot write CDN configuration", e); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot write CDN configuration", e); } }
@Test public void testInvalidateFile() throws Exception { final SwiftDistributionPurgeFeature feature = new SwiftDistributionPurgeFeature(session); final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.volume, Path.Type.directory)); container.attributes().setRegion("IAD"); feature.invalidate(container, Distribution.DOWNLOAD, Collections.singletonList(new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file))), new DisabledLoginCallback()); }
@Override public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return join(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullValueJoinerWithKeyOnTableJoinWithJoiner() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.join(testTable, (ValueJoinerWithKey<? super String, ? super String, ? super String, ?>) null, Joined.as("name"))); assertThat(exception.getMessage(), equalTo("joiner can't be null")); }
public static boolean isMaskBitValid(int maskBit) { return MaskBit.get(maskBit) != null; }
@Test public void isMaskBitValidTest() { final boolean maskBitValid = Ipv4Util.isMaskBitValid(32); assertTrue( maskBitValid); }
public static String getMaskedStatement(final String query) { try { final ParseTree tree = DefaultKsqlParser.getParseTree(query); return new Visitor().visit(tree); } catch (final Exception | StackOverflowError e) { return fallbackMasking(query); } }
@Test public void shouldMaskMixedValidInvalidStatements() { // Given: // Typo in "WITH" => "WIT" final String query = "CREATE SOURCE CONNECTOR test_connector WITH (" + " \"connector.class\" = 'PostgresSource', \n" + " 'connection.url' = 'jdbc:postgresql://localhost:5432/my.db',\n" + " `mode`='bulk',\n" + " \"topic.prefix\"='jdbc-',\n" + " \"table.whitelist\"='users',\n" + " \"key\"='username');\n" + "CREATE STREAM `stream` (id varchar) WIT ('format' = 'avro', \"kafka_topic\" = 'test_topic', partitions=3);"; // When final String maskedQuery = QueryMask.getMaskedStatement(query); // Then final String expected = "CREATE SOURCE CONNECTOR test_connector WITH ( \"connector.class\" = 'PostgresSource', \n " + "'connection.url'='[string]',\n " + "`mode`='[string]',\n " + "\"topic.prefix\"='[string]',\n " + "\"table.whitelist\"='[string]',\n " + "\"key\"='[string]');\n" + "CREATE STREAM `stream` (id varchar) WIT ('format'='[string]', \"kafka_topic\"='[string]', partitions=3);"; assertThat(maskedQuery, is(expected)); }
public static Number toNumber(String string) { BigDecimal bigDecimal = new BigDecimal(string); if (StringUtils.containsAny(string, '.', 'e', 'E')) { double d = bigDecimal.doubleValue(); if (Double.isFinite(d)) { return d; } else { return bigDecimal; } } else { long l = bigDecimal.longValueExact(); int i = (int) l; if (i == l) { return (int) l; } else { return l; } } }
@Test public void testToNumber() { Number n1 = PdlParseUtils.toNumber("1"); assertEquals(n1.getClass(), Integer.class); assertEquals(n1.intValue(), 1); Number n10000000000 = PdlParseUtils.toNumber("10000000000"); assertEquals(n10000000000.getClass(), Long.class); assertEquals(n10000000000.longValue(), 10000000000L); Number n1_0 = PdlParseUtils.toNumber("1.0"); assertEquals(n1_0.getClass(), Double.class); assertEquals(n1_0.doubleValue(), 1.0d, 0.001d); Number n1_0e10 = PdlParseUtils.toNumber("1234567.1e1000"); assertEquals(n1_0e10.getClass(), BigDecimal.class); }
public static String getClassName(Schema schema) { String namespace = schema.getNamespace(); String name = schema.getName(); if (namespace == null || "".equals(namespace)) return name; String dot = namespace.endsWith("$") ? "" : "."; // back-compatibly handle $ return mangle(namespace) + dot + mangleTypeIdentifier(name); }
@Test void testCanGetClassOfMangledType() { assertEquals("org.apache.avro.specific.int$", SpecificData.getClassName(int$.getClassSchema())); }
@Override public QualityGate findEffectiveQualityGate(Project project) { return findQualityGate(project).orElseGet(this::findDefaultQualityGate); }
@Test public void findDefaultQualityGate_by_property_not_found() { assertThatThrownBy(() -> underTest.findEffectiveQualityGate(mock(Project.class))).isInstanceOf(IllegalStateException.class); }
@Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new); String shardingResultSuffix = getShardingResultSuffix(cutShardingValue(shardingValue.getValue()).mod(new BigInteger(String.valueOf(shardingCount))).toString()); return ShardingAutoTableAlgorithmUtils.findMatchedTargetName(availableTargetNames, shardingResultSuffix, shardingValue.getDataNodeInfo()).orElse(null); }
@Test void assertRangeDoShardingWithPartTargets() { ModShardingAlgorithm algorithm = (ModShardingAlgorithm) TypedSPILoader.getService(ShardingAlgorithm.class, "MOD", PropertiesBuilder.build(new Property("sharding-count", "16"))); Collection<String> actual = algorithm.doSharding(createAvailableTargetNames(), new RangeShardingValue<>("t_order", "order_id", DATA_NODE_INFO, Range.closed(1L, 2L))); assertThat(actual.size(), is(2)); assertTrue(actual.contains("t_order_1")); assertTrue(actual.contains("t_order_2")); }
public Matcher parse(String xpath) { if (xpath.equals("/text()")) { return TextMatcher.INSTANCE; } else if (xpath.equals("/node()")) { return NodeMatcher.INSTANCE; } else if (xpath.equals("/descendant::node()") || xpath.equals("/descendant:node()")) { // for compatibility return new CompositeMatcher(TextMatcher.INSTANCE, new ChildMatcher(new SubtreeMatcher(NodeMatcher.INSTANCE))); } else if (xpath.equals("/@*")) { return AttributeMatcher.INSTANCE; } else if (xpath.length() == 0) { return ElementMatcher.INSTANCE; } else if (xpath.startsWith("/@")) { String name = xpath.substring(2); String prefix = null; int colon = name.indexOf(':'); if (colon != -1) { prefix = name.substring(0, colon); name = name.substring(colon + 1); } if (prefixes.containsKey(prefix)) { return new NamedAttributeMatcher(prefixes.get(prefix), name); } else { return Matcher.FAIL; } } else if (xpath.startsWith("/*")) { return new ChildMatcher(parse(xpath.substring(2))); } else if (xpath.startsWith("///")) { return Matcher.FAIL; } else if (xpath.startsWith("//")) { return new SubtreeMatcher(parse(xpath.substring(1))); } else if (xpath.startsWith("/")) { int slash = xpath.indexOf('/', 1); if (slash == -1) { slash = xpath.length(); } String name = xpath.substring(1, slash); String prefix = null; int colon = name.indexOf(':'); if (colon != -1) { prefix = name.substring(0, colon); name = name.substring(colon + 1); } if (prefixes.containsKey(prefix)) { return new NamedElementMatcher(prefixes.get(prefix), name, parse(xpath.substring(slash))); } else { return Matcher.FAIL; } } else { return Matcher.FAIL; } }
@Test public void testPrefixedElement() { Matcher matcher = parser.parse("/prefix:name"); assertFalse(matcher.matchesText()); assertFalse(matcher.matchesElement()); assertFalse(matcher.matchesAttribute(null, "name")); assertFalse(matcher.matchesAttribute(NS, "name")); assertFalse(matcher.matchesAttribute(NS, "eman")); assertEquals(Matcher.FAIL, matcher.descend(null, "name")); assertEquals(Matcher.FAIL, matcher.descend(NS, "enam")); matcher = matcher.descend(NS, "name"); assertFalse(matcher.matchesText()); assertTrue(matcher.matchesElement()); assertFalse(matcher.matchesAttribute(null, "name")); assertFalse(matcher.matchesAttribute(NS, "name")); assertFalse(matcher.matchesAttribute(NS, "eman")); }
protected void handleHAProxyTLV(HAProxyTLV tlv, Attributes.Builder builder) { byte[] valueBytes = ByteBufUtil.getBytes(tlv.content()); if (!BinaryUtil.isAscii(valueBytes)) { return; } Attributes.Key<String> key = AttributeKeys.valueOf( HAProxyConstants.PROXY_PROTOCOL_TLV_PREFIX + String.format("%02x", tlv.typeByteValue())); builder.set(key, new String(valueBytes, CharsetUtil.UTF_8)); }
@Test public void handleHAProxyTLV() { ByteBuf content = Unpooled.buffer(); content.writeBytes("xxxx".getBytes(StandardCharsets.UTF_8)); HAProxyTLV haProxyTLV = new HAProxyTLV((byte) 0xE1, content); negotiator.handleHAProxyTLV(haProxyTLV, Attributes.newBuilder()); }
@Override public List<String> getGroups(String userName) throws IOException { return new ArrayList(getUnixGroups(userName)); }
@Test public void testGetGroupsResolvable() throws Exception { TestGroupResolvable mapping = new TestGroupResolvable(); List<String> groups = mapping.getGroups("user"); assertTrue(groups.size() == 3); assertTrue(groups.contains("abc")); assertTrue(groups.contains("def")); assertTrue(groups.contains("hij")); }
public int getLocalParallelism() { return localParallelism; }
@Test public void when_constructed_then_hasDefaultParallelism() { // When v = new Vertex("v", NoopP::new); // Then assertEquals(-1, v.getLocalParallelism()); }
public static int getPid() throws UtilException { return Pid.INSTANCE.get(); }
@Test public void getPidTest(){ int pid = RuntimeUtil.getPid(); assertTrue(pid > 0); }
Object getFromInstance(String fieldName) { try { Object ret = executor .submit(() -> fromInstance(fieldName)) .get(TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS); // As param does not allow null, using empty space indicates unset for now. return ret == null ? "" : ret; } catch (Exception e) { throw new MaestroInternalError( e, "getFromInstance throws an exception for fieldName=[%s]", fieldName); } }
@Test public void testInvalidGetFromInstance() { when(instanceWrapper.isWorkflowParam()).thenReturn(false); AssertHelper.assertThrows( "Invalid field", MaestroInternalError.class, "getFromInstance throws an exception for fieldName=[invalid_field]", () -> paramExtension.getFromInstance("invalid_field")); AssertHelper.assertThrows( "Invalid call for non-workflow param", MaestroInternalError.class, "getFromInstance throws an exception for fieldName=[owner]", () -> paramExtension.getFromInstance(Constants.WORKFLOW_OWNER_PARAM)); AssertHelper.assertThrows( "Invalid call for non-workflow param", MaestroInternalError.class, "getFromInstance throws an exception for fieldName=[FIRST_TIME_TRIGGER_TIMEZONE]", () -> paramExtension.getFromInstance(Constants.FIRST_TIME_TRIGGER_TIMEZONE_PARAM)); when(instanceWrapper.getInitiator()).thenReturn(new SubworkflowInitiator()); AssertHelper.assertThrows( "Invalid call for non-workflow param", MaestroInternalError.class, "getFromInstance throws an exception for fieldName=[INITIATOR_RUNNER_NAME]", () -> paramExtension.getFromInstance(Constants.INITIATOR_RUNNER_NAME)); }
static void checkNearCacheNativeMemoryConfig(InMemoryFormat inMemoryFormat, NativeMemoryConfig nativeMemoryConfig, boolean isEnterprise) { if (!isEnterprise) { return; } if (inMemoryFormat != NATIVE) { return; } if (nativeMemoryConfig != null && nativeMemoryConfig.isEnabled()) { return; } throw new InvalidConfigurationException("Enable native memory config to use NATIVE in-memory-format for Near Cache"); }
@Test public void checkNearCacheNativeMemoryConfig_shouldNotNeedNativeMemoryConfig_BINARY_onEE() { checkNearCacheNativeMemoryConfig(BINARY, null, true); }
public String encode(final Path file) { final String encoded = URIEncoder.encode(file.getAbsolute()); if(file.isDirectory()) { if(file.isRoot()) { return encoded; } return String.format("%s/", encoded); } return encoded; }
@Test public void testEncode() { assertEquals("/", new DAVPathEncoder().encode(new Path("/", EnumSet.of(Path.Type.directory)))); assertEquals("/dav/", new DAVPathEncoder().encode(new Path("/dav", EnumSet.of(Path.Type.directory)))); assertEquals("/dav", new DAVPathEncoder().encode(new Path("/dav", EnumSet.of(Path.Type.file)))); assertEquals("/dav/file%20path", new DAVPathEncoder().encode(new Path("/dav/file path", EnumSet.of(Path.Type.file)))); }
public Builder defaultProxy() { initializationWrapper(new AsyncInitializationWrapper()) .requestReader((RequestReader<RequestType, ContainerRequestType>) new AwsProxyHttpServletRequestReader()) .responseWriter((ResponseWriter<AwsHttpServletResponse, ResponseType>) new AwsProxyHttpServletResponseWriter()) .securityContextWriter((SecurityContextWriter<RequestType>) new AwsProxySecurityContextWriter()) .exceptionHandler(defaultExceptionHandler()) .requestTypeClass((Class<RequestType>) AwsProxyRequest.class) .responseTypeClass((Class<ResponseType>) AwsProxyResponse.class); return self(); }
@Test void defaultProxy_setsValuesCorrectly() { TestBuilder test = new TestBuilder().defaultProxy().name("test"); assertNotNull(test.initializationWrapper); assertTrue(test.exceptionHandler instanceof AwsProxyExceptionHandler); assertTrue(test.requestReader instanceof AwsProxyHttpServletRequestReader); assertTrue(test.responseWriter instanceof AwsProxyHttpServletResponseWriter); assertTrue(test.securityContextWriter instanceof AwsProxySecurityContextWriter); assertSame(AwsProxyRequest.class, test.requestTypeClass); assertSame(AwsProxyResponse.class, test.responseTypeClass); assertEquals("test", test.name); }
public List<String> toList(boolean trim) { return toList((str) -> trim ? StrUtil.trim(str) : str); }
@Test public void splitByEmptyTest(){ assertThrows(IllegalArgumentException.class, () -> { String text = "aa,bb,cc"; SplitIter splitIter = new SplitIter(text, new StrFinder("", false), 3, false ); final List<String> strings = splitIter.toList(false); assertEquals(1, strings.size()); }); }
@Bean public TimeLimiterRegistry timeLimiterRegistry( TimeLimiterConfigurationProperties timeLimiterConfigurationProperties, EventConsumerRegistry<TimeLimiterEvent> timeLimiterEventConsumerRegistry, RegistryEventConsumer<TimeLimiter> timeLimiterRegistryEventConsumer, @Qualifier("compositeTimeLimiterCustomizer") CompositeCustomizer<TimeLimiterConfigCustomizer> compositeTimeLimiterCustomizer) { TimeLimiterRegistry timeLimiterRegistry = createTimeLimiterRegistry(timeLimiterConfigurationProperties, timeLimiterRegistryEventConsumer, compositeTimeLimiterCustomizer); registerEventConsumer(timeLimiterRegistry, timeLimiterEventConsumerRegistry, timeLimiterConfigurationProperties); initTimeLimiterRegistry(timeLimiterRegistry, timeLimiterConfigurationProperties, compositeTimeLimiterCustomizer); return timeLimiterRegistry; }
@Test public void testTimeLimiterRegistry() { // Given io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties instanceProperties1 = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); instanceProperties1.setTimeoutDuration(Duration.ofSeconds(3)); io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties instanceProperties2 = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); instanceProperties2.setTimeoutDuration(Duration.ofSeconds(2)); TimeLimiterConfigurationProperties timeLimiterConfigurationProperties = new TimeLimiterConfigurationProperties(); timeLimiterConfigurationProperties.getInstances().put("backend1", instanceProperties1); timeLimiterConfigurationProperties.getInstances().put("backend2", instanceProperties2); timeLimiterConfigurationProperties.setTimeLimiterAspectOrder(200); TimeLimiterConfiguration timeLimiterConfiguration = new TimeLimiterConfiguration(); DefaultEventConsumerRegistry<TimeLimiterEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); // When TimeLimiterRegistry timeLimiterRegistry = timeLimiterConfiguration.timeLimiterRegistry(timeLimiterConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeTimeLimiterCustomizerTestInstance()); // Then assertThat(timeLimiterConfigurationProperties.getTimeLimiterAspectOrder()).isEqualTo(200); assertThat(timeLimiterRegistry.getAllTimeLimiters().size()).isEqualTo(2); TimeLimiter timeLimiter1 = timeLimiterRegistry.timeLimiter("backend1"); assertThat(timeLimiter1).isNotNull(); assertThat(timeLimiter1.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(3)); TimeLimiter timeLimiter2 = timeLimiterRegistry.timeLimiter("backend2"); assertThat(timeLimiter2).isNotNull(); assertThat(timeLimiter2.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(2)); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(2); }
@Converter public static PartitionKey toPartitionKey(final String partitionKeyAsString) { if (ObjectHelper.isNotEmpty(partitionKeyAsString)) { return new PartitionKey(partitionKeyAsString); } return null; }
@Test void testStringToPartitionKey() { final String inputStr = "test-key"; final PartitionKey output = CosmosDbTypeConverter.toPartitionKey(inputStr); assertTrue(output.toString().contains(inputStr)); // in empty case assertNull(CosmosDbTypeConverter.toPartitionKey("")); }
protected void handle(com.drew.metadata.Metadata metadataExtractor) throws MetadataException { handle(metadataExtractor.getDirectories().iterator()); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test public void testHandleDirectories() throws MetadataException { Metadata metadata = Mockito.mock(Metadata.class); ImageMetadataExtractor.DirectoryHandler handler1 = Mockito.mock(ImageMetadataExtractor.DirectoryHandler.class); ImageMetadataExtractor e = new ImageMetadataExtractor(metadata, handler1); Directory directory = new JpegCommentDirectory(); Iterator directories = Mockito.mock(Iterator.class); Mockito.when(directories.hasNext()).thenReturn(true, false); Mockito.when(directories.next()).thenReturn(directory); Mockito.when(handler1.supports(JpegCommentDirectory.class)).thenReturn(true); e.handle(directories); Mockito.verify(handler1).supports(JpegCommentDirectory.class); Mockito.verify(handler1).handle(directory, metadata); }
@VisibleForTesting static Object convertAvroField(Object avroValue, Schema schema) { if (avroValue == null) { return null; } switch (schema.getType()) { case NULL: case INT: case LONG: case DOUBLE: case FLOAT: case BOOLEAN: return avroValue; case ENUM: case STRING: return avroValue.toString(); // can be a String or org.apache.avro.util.Utf8 case UNION: for (Schema s : schema.getTypes()) { if (s.getType() == Schema.Type.NULL) { continue; } return convertAvroField(avroValue, s); } throw new IllegalArgumentException("Found UNION schema but it doesn't contain any type"); case ARRAY: case BYTES: case FIXED: case RECORD: case MAP: default: throw new UnsupportedOperationException("Unsupported avro schema type=" + schema.getType() + " for value field schema " + schema.getName()); } }
@Test(expectedExceptions = UnsupportedOperationException.class, expectedExceptionsMessageRegExp = "Unsupported avro schema type.*") public void testNotSupportedAvroTypesArray() { BaseJdbcAutoSchemaSink.convertAvroField(new Object(), createFieldAndGetSchema((builder) -> builder.name("field").type().array().items().stringType().noDefault())); }
@Override public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) { AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future = ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet()); Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue()))); ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext); invokeDriver(handler, future, options.timeoutMs); return new ListOffsetsResult(future.all()); }
@Test public void testListOffsetsWithMultiplePartitionsLeaderChange() throws Exception { Node node0 = new Node(0, "localhost", 8120); Node node1 = new Node(1, "localhost", 8121); Node node2 = new Node(2, "localhost", 8122); List<Node> nodes = asList(node0, node1, node2); final PartitionInfo oldPInfo1 = new PartitionInfo("foo", 0, node0, new Node[]{node0, node1, node2}, new Node[]{node0, node1, node2}); final PartitionInfo oldPnfo2 = new PartitionInfo("foo", 1, node0, new Node[]{node0, node1, node2}, new Node[]{node0, node1, node2}); List<PartitionInfo> oldPInfos = asList(oldPInfo1, oldPnfo2); final Cluster oldCluster = new Cluster("mockClusterId", nodes, oldPInfos, Collections.emptySet(), Collections.emptySet(), node0); final TopicPartition tp0 = new TopicPartition("foo", 0); final TopicPartition tp1 = new TopicPartition("foo", 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(oldCluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(oldCluster, Errors.NONE)); ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1L, 345L, 543); ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.LEADER_NOT_AVAILABLE, -2L, 123L, 456); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(asList(t0, t1)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0); final PartitionInfo newPInfo1 = new PartitionInfo("foo", 0, node1, new Node[]{node0, node1, node2}, new Node[]{node0, node1, node2}); final PartitionInfo newPInfo2 = new PartitionInfo("foo", 1, node2, new Node[]{node0, node1, node2}, new Node[]{node0, node1, node2}); List<PartitionInfo> newPInfos = asList(newPInfo1, newPInfo2); final Cluster newCluster = new Cluster("mockClusterId", nodes, newPInfos, Collections.emptySet(), Collections.emptySet(), node0); env.kafkaClient().prepareResponse(prepareMetadataResponse(newCluster, Errors.NONE)); t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(singletonList(t0)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1); t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -2L, 123L, 456); responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(singletonList(t1)); env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node2); Map<TopicPartition, OffsetSpec> partitions = new HashMap<>(); partitions.put(tp0, OffsetSpec.latest()); partitions.put(tp1, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get(); assertFalse(offsets.isEmpty()); assertEquals(345L, offsets.get(tp0).offset()); assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue()); assertEquals(-1L, offsets.get(tp0).timestamp()); assertEquals(123L, offsets.get(tp1).offset()); assertEquals(456, offsets.get(tp1).leaderEpoch().get().intValue()); assertEquals(-2L, offsets.get(tp1).timestamp()); } }
@Override public boolean acquirePermission(final int permits) { long timeoutInNanos = state.get().config.getTimeoutDuration().toNanos(); State modifiedState = updateStateWithBackOff(permits, timeoutInNanos); boolean result = waitForPermissionIfNecessary(timeoutInNanos, modifiedState.nanosToWait); publishRateLimiterAcquisitionEvent(result, permits); return result; }
@Test public void acquireAndRefreshWithEventPublishing() throws Exception { setup(Duration.ZERO); setTimeOnNanos(CYCLE_IN_NANOS); boolean permission = rateLimiter.acquirePermission(); then(permission).isTrue(); then(metrics.getAvailablePermissions()).isZero(); then(metrics.getNanosToWait()).isEqualTo(CYCLE_IN_NANOS); boolean secondPermission = rateLimiter.acquirePermission(); then(secondPermission).isFalse(); then(metrics.getAvailablePermissions()).isZero(); then(metrics.getNanosToWait()).isEqualTo(CYCLE_IN_NANOS); setTimeOnNanos(CYCLE_IN_NANOS * 2); boolean thirdPermission = rateLimiter.acquirePermission(); then(thirdPermission).isTrue(); then(metrics.getAvailablePermissions()).isZero(); then(metrics.getNanosToWait()).isEqualTo(CYCLE_IN_NANOS); boolean fourthPermission = rateLimiter.acquirePermission(); then(fourthPermission).isFalse(); then(metrics.getAvailablePermissions()).isZero(); then(metrics.getNanosToWait()).isEqualTo(CYCLE_IN_NANOS); }
public String anonymize(final ParseTree tree) { return build(tree); }
@Test public void shouldAnonymizeSelectStatementCorrectly() { Assert.assertEquals("SELECT * FROM source1;", anon.anonymize("SELECT * FROM S1;")); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestAttachSegments() { internalEncodeLogHeader(buffer, 0, 1000, 1000, () -> 500_000_000L); final AttachSegmentsRequestEncoder requestEncoder = new AttachSegmentsRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(30) .correlationId(560) .recordingId(50); dissectControlRequest(CMD_IN_ATTACH_SEGMENTS, buffer, 0, builder); assertEquals("[0.500000000] " + CONTEXT + ": " + CMD_IN_ATTACH_SEGMENTS.name() + " [1000/1000]:" + " controlSessionId=30" + " correlationId=560" + " recordingId=50", builder.toString()); }
@Override public byte[] fetch(final Bytes key, final long timestamp) { return wrapped().fetch(key, timestamp); }
@Test public void shouldDelegateToUnderlyingStoreWhenFetching() { store.fetch(bytesKey, ofEpochMilli(0), ofEpochMilli(10)); verify(inner).fetch(bytesKey, 0, 10); }
@Deprecated public static Schema convert(TableSchema schema) { LogicalType schemaType = schema.toRowDataType().getLogicalType(); Preconditions.checkArgument( schemaType instanceof RowType, "Schema logical type should be row type."); RowType root = (RowType) schemaType; Type converted = root.accept(new FlinkTypeToType(root)); Schema icebergSchema = new Schema(converted.asStructType().fields()); if (schema.getPrimaryKey().isPresent()) { return freshIdentifierFieldIds(icebergSchema, schema.getPrimaryKey().get().getColumns()); } else { return icebergSchema; } }
@Test public void testConvertFlinkSchemaBaseOnIcebergSchema() { Schema baseSchema = new Schema( Lists.newArrayList( Types.NestedField.required(101, "int", Types.IntegerType.get()), Types.NestedField.optional(102, "string", Types.StringType.get())), Sets.newHashSet(101)); TableSchema flinkSchema = TableSchema.builder() .field("int", DataTypes.INT().notNull()) .field("string", DataTypes.STRING().nullable()) .primaryKey("int") .build(); Schema convertedSchema = FlinkSchemaUtil.convert(baseSchema, flinkSchema); assertThat(convertedSchema.asStruct()).isEqualTo(baseSchema.asStruct()); assertThat(convertedSchema.identifierFieldIds()).containsExactly(101); }
@Override public Map<String, Object> assembleFrom(OAuth2AccessTokenEntity accessToken, UserInfo userInfo, Set<String> authScopes) { Map<String, Object> result = newLinkedHashMap(); OAuth2Authentication authentication = accessToken.getAuthenticationHolder().getAuthentication(); result.put(ACTIVE, true); if (accessToken.getPermissions() != null && !accessToken.getPermissions().isEmpty()) { Set<Object> permissions = Sets.newHashSet(); for (Permission perm : accessToken.getPermissions()) { Map<String, Object> o = newLinkedHashMap(); o.put("resource_set_id", perm.getResourceSet().getId().toString()); Set<String> scopes = Sets.newHashSet(perm.getScopes()); o.put("scopes", scopes); permissions.add(o); } result.put("permissions", permissions); } else { Set<String> scopes = Sets.intersection(authScopes, accessToken.getScope()); result.put(SCOPE, Joiner.on(SCOPE_SEPARATOR).join(scopes)); } if (accessToken.getExpiration() != null) { try { result.put(EXPIRES_AT, dateFormat.valueToString(accessToken.getExpiration())); result.put(EXP, accessToken.getExpiration().getTime() / 1000L); } catch (ParseException e) { logger.error("Parse exception in token introspection", e); } } if (userInfo != null) { // if we have a UserInfo, use that for the subject result.put(SUB, userInfo.getSub()); } else { // otherwise, use the authentication's username result.put(SUB, authentication.getName()); } if(authentication.getUserAuthentication() != null) { result.put(USER_ID, authentication.getUserAuthentication().getName()); } result.put(CLIENT_ID, authentication.getOAuth2Request().getClientId()); result.put(TOKEN_TYPE, accessToken.getTokenType()); return result; }
@Test public void shouldAssembleExpectedResultForRefreshTokenWithoutUserAuthentication() throws ParseException { // given OAuth2RefreshTokenEntity refreshToken = refreshToken(null, oauth2Authentication(oauth2Request("clientId", scopes("foo", "bar")), null)); Set<String> authScopes = scopes("foo", "bar", "baz"); // when Map<String, Object> result = assembler.assembleFrom(refreshToken, null, authScopes); // then `user_id` should not be present Map<String, Object> expected = new ImmutableMap.Builder<String, Object>() .put("sub", "clientId") .put("scope", "bar foo") .put("active", Boolean.TRUE) .put("client_id", "clientId") .build(); assertThat(result, is(equalTo(expected))); }
public static Table resolveCalciteTable(SchemaPlus schemaPlus, List<String> tablePath) { Schema subSchema = schemaPlus; // subSchema.getSubschema() for all except last for (int i = 0; i < tablePath.size() - 1; i++) { subSchema = subSchema.getSubSchema(tablePath.get(i)); if (subSchema == null) { throw new IllegalStateException( String.format( "While resolving table path %s, no sub-schema found for component %s (\"%s\")", tablePath, i, tablePath.get(i))); } } // for the final one call getTable() return subSchema.getTable(Iterables.getLast(tablePath)); }
@Test public void testResolveFlat() { String tableName = "fake_table"; when(mockSchemaPlus.getTable(tableName)).thenReturn(mockTable); Table table = TableResolution.resolveCalciteTable(mockSchemaPlus, ImmutableList.of(tableName)); assertThat(table, Matchers.is(mockTable)); }
@Description("returns index of last occurrence of a substring (or 0 if not found)") @ScalarFunction("strrpos") @LiteralParameters({"x", "y"}) @SqlType(StandardTypes.BIGINT) public static long stringReversePosition(@SqlType("varchar(x)") Slice string, @SqlType("varchar(y)") Slice substring) { return stringPositionFromEnd(string, substring, 1); }
@Test public void testStringReversePosition() { assertFunction("STRRPOS('high', 'ig')", BIGINT, 2L); assertFunction("STRRPOS('high', 'igx')", BIGINT, 0L); assertFunction("STRRPOS('Quadratically', 'a')", BIGINT, 10L); assertFunction("STRRPOS('foobar', 'foobar')", BIGINT, 1L); assertFunction("STRRPOS('foobar', 'obar')", BIGINT, 3L); assertFunction("STRRPOS('zoo!', '!')", BIGINT, 4L); assertFunction("STRRPOS('x', '')", BIGINT, 1L); assertFunction("STRRPOS('', '')", BIGINT, 1L); assertFunction("STRRPOS('\u4FE1\u5FF5,\u7231,\u5E0C\u671B', '\u7231')", BIGINT, 2L); assertFunction("STRRPOS('\u4FE1\u5FF5,\u7231,\u5E0C\u671B', '\u5E0C\u671B')", BIGINT, 3L); assertFunction("STRRPOS('\u4FE1\u5FF5,\u7231,\u5E0C\u671B', 'nice')", BIGINT, 0L); assertFunction("STRRPOS(NULL, '')", BIGINT, null); assertFunction("STRRPOS('', NULL)", BIGINT, null); assertFunction("STRRPOS(NULL, NULL)", BIGINT, null); assertFunction("STRRPOS('abc/xyz/foo/bar', '/')", BIGINT, 12L); assertFunction("STRRPOS('abc/xyz/foo/bar', '/', 1)", BIGINT, 12L); assertFunction("STRRPOS('abc/xyz/foo/bar', '/', 2)", BIGINT, 8L); assertFunction("STRRPOS('abc/xyz/foo/bar', '/', 3)", BIGINT, 4L); assertFunction("STRRPOS('abc/xyz/foo/bar', '/', 4)", BIGINT, 0L); assertFunction("STRRPOS('highhigh', 'ig', 1)", BIGINT, 6L); assertFunction("STRRPOS('highhigh', 'ig', 2)", BIGINT, 2L); assertFunction("STRRPOS('foobarfoo', 'fb', 1)", BIGINT, 0L); assertFunction("STRRPOS('foobarfoo', 'oo', 1)", BIGINT, 8L); // Assert invalid instance argument assertInvalidFunction("STRRPOS('abc/xyz/foo/bar', '/', 0)", "'instance' must be a positive number."); assertInvalidFunction("STRRPOS('', '', 0)", "'instance' must be a positive number."); assertInvalidFunction("STRRPOS('foobarfoo', 'obar', -1)", "'instance' must be a positive number."); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testEnableForwardIndexInRawModeForSingleForwardIndexDisabledColumn() throws Exception { Set<String> forwardIndexDisabledColumns = new HashSet<>(SV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS); forwardIndexDisabledColumns.addAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); List<String> allForwardIndexDisabledColumns = new ArrayList<>(SV_FORWARD_INDEX_DISABLED_COLUMNS); allForwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_COLUMNS); for (String column : allForwardIndexDisabledColumns) { SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); forwardIndexDisabledColumns.remove(column); indexLoadingConfig.setForwardIndexDisabledColumns(forwardIndexDisabledColumns); Set<String> invertedIndexColumns = new HashSet<>(forwardIndexDisabledColumns); invertedIndexColumns.removeAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); indexLoadingConfig.setInvertedIndexColumns(invertedIndexColumns); indexLoadingConfig.addNoDictionaryColumns(column); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(column); validateIndexMap(column, false, false); validateForwardIndex(column, CompressionCodec.LZ4, metadata.isSorted()); // In column metadata, nothing should change. validateMetadataProperties(column, false, 0, metadata.getCardinality(), metadata.getTotalDocs(), metadata.getDataType(), metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); } }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void returnOnCompleteUsingObservable() throws InterruptedException { RetryConfig config = retryConfig(); Retry retry = Retry.of("testName", config); RetryTransformer<Object> retryTransformer = RetryTransformer.of(retry); given(helloWorldService.returnHelloWorld()) .willThrow(new HelloWorldException()); Observable.fromCallable(helloWorldService::returnHelloWorld) .compose(retryTransformer) .test() .await() .assertError(HelloWorldException.class) .assertNotComplete() .assertSubscribed(); Observable.fromCallable(helloWorldService::returnHelloWorld) .compose(retryTransformer) .test() .await() .assertError(HelloWorldException.class) .assertNotComplete() .assertSubscribed(); then(helloWorldService).should(times(6)).returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(2); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero(); }
public static void applySchemaChangeEvent(SchemaChangeEvent event) { TableId tableId = event.tableId(); if (event instanceof CreateTableEvent) { if (!globalTables.containsKey(tableId)) { globalTables.put( tableId, new ValuesTable(tableId, ((CreateTableEvent) event).getSchema())); } } else if (event instanceof DropTableEvent) { globalTables.remove(tableId); } else if (event instanceof TruncateTableEvent) { if (globalTables.containsKey(tableId)) { ValuesTable table = globalTables.get(event.tableId()); table.applyTruncateTableEvent((TruncateTableEvent) event); } } else { ValuesTable table = globalTables.get(event.tableId()); Preconditions.checkNotNull(table, event.tableId() + " is not existed"); table.applySchemaChangeEvent(event); } }
@Test public void testApplySchemaChangeEvent() throws SchemaEvolveException { AddColumnEvent.ColumnWithPosition columnWithPosition = new AddColumnEvent.ColumnWithPosition( Column.physicalColumn("col3", new CharType())); AddColumnEvent addColumnEvent = new AddColumnEvent(table1, Collections.singletonList(columnWithPosition)); metadataApplier.applySchemaChange(addColumnEvent); Schema schema = Schema.newBuilder() .physicalColumn("col1", new CharType()) .physicalColumn("col2", new CharType()) .physicalColumn("col3", new CharType()) .primaryKey("col1") .build(); Assert.assertEquals(schema, metadataAccessor.getTableSchema(table1)); Map<String, String> nameMapping = new HashMap<>(); nameMapping.put("col2", "newCol2"); nameMapping.put("col3", "newCol3"); RenameColumnEvent renameColumnEvent = new RenameColumnEvent(table1, nameMapping); metadataApplier.applySchemaChange(renameColumnEvent); schema = Schema.newBuilder() .physicalColumn("col1", new CharType()) .physicalColumn("newCol2", new CharType()) .physicalColumn("newCol3", new CharType()) .primaryKey("col1") .build(); Assert.assertEquals(schema, metadataAccessor.getTableSchema(table1)); DropColumnEvent dropColumnEvent = new DropColumnEvent(table1, Collections.singletonList("newCol2")); metadataApplier.applySchemaChange(dropColumnEvent); schema = Schema.newBuilder() .physicalColumn("col1", new CharType()) .physicalColumn("newCol3", new CharType()) .primaryKey("col1") .build(); Assert.assertEquals(schema, metadataAccessor.getTableSchema(table1)); Map<String, DataType> typeMapping = new HashMap<>(); typeMapping.put("newCol3", new VarCharType()); AlterColumnTypeEvent alterColumnTypeEvent = new AlterColumnTypeEvent(table1, typeMapping); metadataApplier.applySchemaChange(alterColumnTypeEvent); schema = Schema.newBuilder() .physicalColumn("col1", new CharType()) .physicalColumn("newCol3", new VarCharType()) .primaryKey("col1") .build(); Assert.assertEquals(schema, metadataAccessor.getTableSchema(table1)); }
@PublicAPI(usage = ACCESS) public JavaClasses importClasses(Class<?>... classes) { return importClasses(Arrays.asList(classes)); }
@Test public void imports_enclosing_constructor_of_anonymous_class() throws ClassNotFoundException { @SuppressWarnings("unused") class ClassCreatingAnonymousClassInConstructor { ClassCreatingAnonymousClassInConstructor() { new Serializable() { }; } } String anonymousClassName = ClassCreatingAnonymousClassInConstructor.class.getName() + "$1"; JavaClasses classes = new ClassFileImporter().importClasses( ClassCreatingAnonymousClassInConstructor.class, Class.forName(anonymousClassName) ); JavaClass enclosingClass = classes.get(ClassCreatingAnonymousClassInConstructor.class); JavaClass anonymousClass = classes.get(anonymousClassName); assertThat(anonymousClass.getEnclosingCodeUnit()).contains(enclosingClass.getConstructor(getClass())); assertThat(anonymousClass.getEnclosingClass()).contains(enclosingClass); }
@Override public T computeIfAbsent(String key, Function<String, T> supplier) { return cache.compute( key, (String k, T current) -> { if (isValidLongEnough(current)) { return current; } return supplier.apply(key); }); }
@Test void expires() { var clock = new MockClock(); var ttl = Duration.ofSeconds(10); var sut = new InMemoryCacheImpl<CacheEntry>(clock, ttl); var now = Instant.parse("2024-01-01T13:11:00.000Z"); clock.set(now); // do NOT use lambda, Mockito can not handle it var source = spy( new Function<String, CacheEntry>() { @Override public CacheEntry apply(String k) { return CacheEntry.of(k, clock.instant().plusSeconds(20)); } }); var key = "1"; var N = 10; // when for (int i = 0; i < N; i++) { var r = sut.computeIfAbsent(key, source); assertEquals(key, r.value()); clock.advanceSeconds(30); } // then verify(source, times(N)).apply(key); }
public boolean evaluateIfActiveVersion(UpdateCenter updateCenter) { Version installedVersion = Version.create(sonarQubeVersion.get().toString()); if (compareWithoutPatchVersion(installedVersion, updateCenter.getSonar().getLtaVersion().getVersion()) == 0) { return true; } SortedSet<Release> allReleases = updateCenter.getSonar().getAllReleases(); if (compareWithoutPatchVersion(installedVersion, updateCenter.getSonar().getPastLtaVersion().getVersion()) == 0) { Release initialLtaRelease = findInitialVersionOfMajorRelease(allReleases, updateCenter.getSonar().getLtaVersion().getVersion()); Date initialLtaReleaseDate = initialLtaRelease.getDate(); if (initialLtaReleaseDate == null) { throw new IllegalStateException("Initial Major release date is missing in releases"); } // date of the latest major release should be within 6 months Calendar c = Calendar.getInstance(); c.setTime(new Date(system2.now())); c.add(Calendar.MONTH, -6); return initialLtaReleaseDate.after(c.getTime()); } else { return compareWithoutPatchVersion(installedVersion, findPreviousReleaseIgnoringPatch(allReleases).getVersion()) >= 0; } }
@Test void evaluateIfActiveVersion_whenInstalledVersionIsSnapshot_shouldReturnVersionIsActive() { when(sonarQubeVersion.get()).thenReturn(parse("10.11-SNAPSHOT")); when(updateCenter.getSonar().getAllReleases()).thenReturn(getReleases()); assertThat(underTest.evaluateIfActiveVersion(updateCenter)).isTrue(); }
@Override public AllocateResponse allocate(AllocateRequest request) throws YarnException, IOException { this.metrics.incrAllocateCount(); long startTime = clock.getTime(); try { AMRMTokenIdentifier amrmTokenIdentifier = YarnServerSecurityUtils.authorizeRequest(); RequestInterceptorChainWrapper pipeline = getInterceptorChain(amrmTokenIdentifier); AllocateResponse allocateResponse = pipeline.getRootInterceptor().allocate(request); updateAMRMTokens(amrmTokenIdentifier, pipeline, allocateResponse); long endTime = clock.getTime(); this.metrics.succeededAllocateRequests(endTime - startTime); LOG.info("Allocate processing finished in {} ms for application {}.", endTime - startTime, pipeline.getApplicationAttemptId()); return allocateResponse; } catch (Throwable t) { this.metrics.incrFailedAllocateRequests(); throw t; } }
@Test public void testAllocateRequestWithoutRegistering() { try { // Try to allocate an application master without registering. allocate(1); Assert .fail("The request to allocate application master should have failed"); } catch (Throwable ex) { // This is expected. So nothing required here. LOG.info("AllocateRequest failed as expected because AM was not registered"); } }
public static boolean shouldStartHazelcast(AppSettings appSettings) { return isClusterEnabled(appSettings.getProps()) && toNodeType(appSettings.getProps()).equals(NodeType.APPLICATION); }
@Test public void shouldStartHazelcast_should_return_false_when_cluster_not_enabled() { TestAppSettings settings = new TestAppSettings(); assertThat(ClusterSettings.shouldStartHazelcast(settings)).isFalse(); }
public static <T> ClassPluginDocumentation<T> of(JsonSchemaGenerator jsonSchemaGenerator, RegisteredPlugin plugin, Class<? extends T> cls, Class<T> baseCls) { return new ClassPluginDocumentation<>(jsonSchemaGenerator, plugin, cls, baseCls, null); }
@Test void taskRunner() throws URISyntaxException { Helpers.runApplicationContext(throwConsumer((applicationContext) -> { JsonSchemaGenerator jsonSchemaGenerator = applicationContext.getBean(JsonSchemaGenerator.class); PluginScanner pluginScanner = new PluginScanner(ClassPluginDocumentationTest.class.getClassLoader()); RegisteredPlugin scan = pluginScanner.scan(); ClassPluginDocumentation<? extends TaskRunner> doc = ClassPluginDocumentation.of(jsonSchemaGenerator, scan, Process.class, null); assertThat((Map<?, ?>) doc.getPropertiesSchema().get("properties"), anEmptyMap()); assertThat(doc.getCls(), is("io.kestra.plugin.core.runner.Process")); assertThat(doc.getPropertiesSchema().get("title"), is("Task runner that executes a task as a subprocess on the Kestra host.")); assertThat(doc.getDefs(), anEmptyMap()); })); }
public static MetadataUpdate fromJson(String json) { return JsonUtil.parse(json, MetadataUpdateParser::fromJson); }
@Test public void testRemoveSnapshotRefFromJson() { String action = MetadataUpdateParser.REMOVE_SNAPSHOT_REF; String snapshotRef = "snapshot-ref"; String json = "{\"action\":\"remove-snapshot-ref\",\"ref-name\":\"snapshot-ref\"}"; MetadataUpdate expected = new MetadataUpdate.RemoveSnapshotRef(snapshotRef); assertEquals(action, expected, MetadataUpdateParser.fromJson(json)); }
@Override public <T> T getProxy(Invoker<T> invoker) throws RpcException { return StubSuppliers.createStub(invoker.getUrl().getServiceInterface(), invoker); }
@Test void getProxy() { Invoker<?> invoker = Mockito.mock(Invoker.class); URL url = Mockito.mock(URL.class); when(invoker.getUrl()).thenReturn(url); String service = "SERV_PROX"; when(url.getServiceInterface()).thenReturn(service); StubSuppliers.addSupplier(service, i -> invoker); Assertions.assertEquals(invoker, factory.getProxy(invoker)); Assertions.assertEquals(invoker, factory.getProxy(invoker, false)); }
@PostMapping @Operation( security = @SecurityRequirement(name = "keycloak"), requestBody = @io.swagger.v3.oas.annotations.parameters.RequestBody( content = @Content( mediaType = MediaType.APPLICATION_JSON_VALUE, schema = @Schema( type = "object", properties = { @StringToClassMapItem(key = "title", value = String.class), @StringToClassMapItem(key = "details", value = String.class) } ) ) ), responses = { @ApiResponse( responseCode = "201", headers = @Header(name = "Content-Type", description = "Тип данных"), content = { @Content( mediaType = MediaType.APPLICATION_JSON_VALUE, schema = @Schema( type = "object", properties = { @StringToClassMapItem(key = "id", value = Integer.class), @StringToClassMapItem(key = "title", value = String.class), @StringToClassMapItem(key = "details", value = String.class) } ) ) } ) }) public ResponseEntity<?> createProduct(@Valid @RequestBody NewProductPayload payload, BindingResult bindingResult, UriComponentsBuilder uriComponentsBuilder) throws BindException { if (bindingResult.hasErrors()) { if (bindingResult instanceof BindException exception) { throw exception; } else { throw new BindException(bindingResult); } } else { Product product = this.productService.createProduct(payload.title(), payload.details()); return ResponseEntity .created(uriComponentsBuilder .replacePath("/catalogue-api/products/{productId}") .build(Map.of("productId", product.getId()))) .body(product); } }
@Test void createProduct_RequestIsValid_ReturnsNoContent() throws BindException { // given var payload = new NewProductPayload("Новое название", "Новое описание"); var bindingResult = new MapBindingResult(Map.of(), "payload"); var uriComponentsBuilder = UriComponentsBuilder.fromUriString("http://localhost"); doReturn(new Product(1, "Новое название", "Новое описание")) .when(this.productService).createProduct("Новое название", "Новое описание"); // when var result = this.controller.createProduct(payload, bindingResult, uriComponentsBuilder); // then assertNotNull(result); assertEquals(HttpStatus.CREATED, result.getStatusCode()); assertEquals(URI.create("http://localhost/catalogue-api/products/1"), result.getHeaders().getLocation()); assertEquals(new Product(1, "Новое название", "Новое описание"), result.getBody()); verify(this.productService).createProduct("Новое название", "Новое описание"); verifyNoMoreInteractions(this.productService); }
@PostMapping("/token") @PermitAll @Operation(summary = "获得访问令牌", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【获取】调用") @Parameters({ @Parameter(name = "grant_type", required = true, description = "授权类型", example = "code"), @Parameter(name = "code", description = "授权范围", example = "userinfo.read"), @Parameter(name = "redirect_uri", description = "重定向 URI", example = "https://www.iocoder.cn"), @Parameter(name = "state", description = "状态", example = "1"), @Parameter(name = "username", example = "tudou"), @Parameter(name = "password", example = "cai"), // 多个使用空格分隔 @Parameter(name = "scope", example = "user_info"), @Parameter(name = "refresh_token", example = "123424233"), }) public CommonResult<OAuth2OpenAccessTokenRespVO> postAccessToken(HttpServletRequest request, @RequestParam("grant_type") String grantType, @RequestParam(value = "code", required = false) String code, // 授权码模式 @RequestParam(value = "redirect_uri", required = false) String redirectUri, // 授权码模式 @RequestParam(value = "state", required = false) String state, // 授权码模式 @RequestParam(value = "username", required = false) String username, // 密码模式 @RequestParam(value = "password", required = false) String password, // 密码模式 @RequestParam(value = "scope", required = false) String scope, // 密码模式 @RequestParam(value = "refresh_token", required = false) String refreshToken) { // 刷新模式 List<String> scopes = OAuth2Utils.buildScopes(scope); // 1.1 校验授权类型 OAuth2GrantTypeEnum grantTypeEnum = OAuth2GrantTypeEnum.getByGrantType(grantType); if (grantTypeEnum == null) { throw exception0(BAD_REQUEST.getCode(), StrUtil.format("未知授权类型({})", grantType)); } if (grantTypeEnum == OAuth2GrantTypeEnum.IMPLICIT) { throw exception0(BAD_REQUEST.getCode(), "Token 接口不支持 implicit 授权模式"); } // 1.2 校验客户端 String[] clientIdAndSecret = obtainBasicAuthorization(request); OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientIdAndSecret[0], clientIdAndSecret[1], grantType, scopes, redirectUri); // 2. 根据授权模式,获取访问令牌 OAuth2AccessTokenDO accessTokenDO; switch (grantTypeEnum) { case AUTHORIZATION_CODE: accessTokenDO = oauth2GrantService.grantAuthorizationCodeForAccessToken(client.getClientId(), code, redirectUri, state); break; case PASSWORD: accessTokenDO = oauth2GrantService.grantPassword(username, password, client.getClientId(), scopes); break; case CLIENT_CREDENTIALS: accessTokenDO = oauth2GrantService.grantClientCredentials(client.getClientId(), scopes); break; case REFRESH_TOKEN: accessTokenDO = oauth2GrantService.grantRefreshToken(refreshToken, client.getClientId()); break; default: throw new IllegalArgumentException("未知授权类型:" + grantType); } Assert.notNull(accessTokenDO, "访问令牌不能为空"); // 防御性检查 return success(OAuth2OpenConvert.INSTANCE.convert(accessTokenDO)); }
@Test public void testPostAccessToken_refreshToken() { // 准备参数 String granType = OAuth2GrantTypeEnum.REFRESH_TOKEN.getGrantType(); String refreshToken = randomString(); String password = randomString(); HttpServletRequest request = mockRequest("test_client_id", "test_client_secret"); // mock 方法(client) OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId("test_client_id"); when(oauth2ClientService.validOAuthClientFromCache(eq("test_client_id"), eq("test_client_secret"), eq(granType), eq(Lists.newArrayList()), isNull())).thenReturn(client); // mock 方法(访问令牌) OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class) .setExpiresTime(LocalDateTimeUtil.offset(LocalDateTime.now(), 30000L, ChronoUnit.MILLIS)); when(oauth2GrantService.grantRefreshToken(eq(refreshToken), eq("test_client_id"))).thenReturn(accessTokenDO); // 调用 CommonResult<OAuth2OpenAccessTokenRespVO> result = oauth2OpenController.postAccessToken(request, granType, null, null, null, null, password, null, refreshToken); // 断言 assertEquals(0, result.getCode()); assertPojoEquals(accessTokenDO, result.getData()); assertTrue(ObjectUtils.equalsAny(result.getData().getExpiresIn(), 29L, 30L)); // 执行过程会过去几毫秒 }
@VisibleForTesting static Object convertAvroField(Object avroValue, Schema schema) { if (avroValue == null) { return null; } switch (schema.getType()) { case NULL: case INT: case LONG: case DOUBLE: case FLOAT: case BOOLEAN: return avroValue; case ENUM: case STRING: return avroValue.toString(); // can be a String or org.apache.avro.util.Utf8 case UNION: for (Schema s : schema.getTypes()) { if (s.getType() == Schema.Type.NULL) { continue; } return convertAvroField(avroValue, s); } throw new IllegalArgumentException("Found UNION schema but it doesn't contain any type"); case ARRAY: case BYTES: case FIXED: case RECORD: case MAP: default: throw new UnsupportedOperationException("Unsupported avro schema type=" + schema.getType() + " for value field schema " + schema.getName()); } }
@Test public void testConvertAvroString() { Object converted = BaseJdbcAutoSchemaSink.convertAvroField("mystring", createFieldAndGetSchema((builder) -> builder.name("field").type().stringType().noDefault())); Assert.assertEquals(converted, "mystring"); converted = BaseJdbcAutoSchemaSink.convertAvroField(new Utf8("mystring"), createFieldAndGetSchema((builder) -> builder.name("field").type().stringType().noDefault())); Assert.assertEquals(converted, "mystring"); }
public static KeyManager[] keyManagerOf(X509Certificate cert, PrivateKey privateKey) { var pw = new char[0]; try { var ks = KeyStore.getInstance(KeyStore.getDefaultType()); ks.load(null); ks.setKeyEntry("client-auth", privateKey, pw, new java.security.cert.Certificate[] {cert}); var kmf = KeyManagerFactory.getInstance("PKIX"); kmf.init(ks, pw); return kmf.getKeyManagers(); } catch (IOException | GeneralSecurityException e) { throw new IllegalArgumentException("failed to initialize client certificate store", e); } }
@Test void keyManager() throws Exception { var key = generateSigningKey(URI.create(ISSUER)); // when var kms = TlsContext.keyManagerOf(key.getParsedX509CertChain().get(0), key.toPrivateKey()); // then assertContainsCert(kms, key); }
@Private public WebApp getWebapp() { return this.webApp; }
@Test public void testRouterSupportCrossOrigin() throws ServletException, IOException { // We design test cases like this // We start the Router and enable the Router to support Cross-origin. // In the configuration, we allow example.com to access. // 1. We simulate example.com and get the correct response // 2. We simulate example.org and cannot get a response // Initialize RouterWeb's CrossOrigin capability Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.ROUTER_WEBAPP_ENABLE_CORS_FILTER, true); conf.set("hadoop.http.filter.initializers", HttpCrossOriginFilterInitializer.class.getName()); conf.set(HttpCrossOriginFilterInitializer.PREFIX + CrossOriginFilter.ALLOWED_ORIGINS, "example.com"); conf.set(HttpCrossOriginFilterInitializer.PREFIX + CrossOriginFilter.ALLOWED_HEADERS, "X-Requested-With,Accept"); conf.set(HttpCrossOriginFilterInitializer.PREFIX + CrossOriginFilter.ALLOWED_METHODS, "GET,POST"); // Start the router Router router = new Router(); router.init(conf); router.start(); router.getServices(); // Get assigned to Filter. // The name of the filter is "Cross Origin Filter", // which is specified in HttpCrossOriginFilterInitializer. WebApp webApp = router.getWebapp(); HttpServer2 httpServer2 = webApp.getHttpServer(); WebAppContext webAppContext = httpServer2.getWebAppContext(); ServletHandler servletHandler = webAppContext.getServletHandler(); FilterHolder holder = servletHandler.getFilter("Cross Origin Filter"); CrossOriginFilter filter = (CrossOriginFilter) holder.getFilter(); // 1. Simulate [example.com] for access HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class); Mockito.when(mockReq.getHeader("Origin")).thenReturn("example.com"); Mockito.when(mockReq.getHeader("Access-Control-Request-Method")).thenReturn("GET"); Mockito.when(mockReq.getHeader("Access-Control-Request-Headers")) .thenReturn("X-Requested-With"); // Objects to verify interactions based on request HttpServletResponseForRouterTest mockRes = new HttpServletResponseForRouterTest(); FilterChain mockChain = Mockito.mock(FilterChain.class); // Object under test filter.doFilter(mockReq, mockRes, mockChain); // Why is 5, because when Filter passes, // CrossOriginFilter will set 5 values to Map Assert.assertEquals(5, mockRes.getHeaders().size()); String allowResult = mockRes.getHeader("Access-Control-Allow-Credentials"); Assert.assertEquals("true", allowResult); // 2. Simulate [example.org] for access HttpServletRequest mockReq2 = Mockito.mock(HttpServletRequest.class); Mockito.when(mockReq2.getHeader("Origin")).thenReturn("example.org"); Mockito.when(mockReq2.getHeader("Access-Control-Request-Method")).thenReturn("GET"); Mockito.when(mockReq2.getHeader("Access-Control-Request-Headers")) .thenReturn("X-Requested-With"); // Objects to verify interactions based on request HttpServletResponseForRouterTest mockRes2 = new HttpServletResponseForRouterTest(); FilterChain mockChain2 = Mockito.mock(FilterChain.class); // Object under test filter.doFilter(mockReq2, mockRes2, mockChain2); // Why is 0, because when the Filter fails, // CrossOriginFilter will not set any value Assert.assertEquals(0, mockRes2.getHeaders().size()); router.stop(); }
@Override public String toString() { return "ResourceConfig{" + "url=" + url + ", id='" + id + '\'' + ", resourceType=" + resourceType + '}'; }
@Test public void when_addNonexistentJarWithPath_then_throwsException() { // Given String path = Paths.get("/i/do/not/exist").toString(); // Then expectedException.expect(JetException.class); expectedException.expectMessage("Not an existing, readable file: " + path); // When config.addJar(path); }
@SuppressWarnings({"unchecked", "rawtypes"}) public static int compareTo(final Comparable thisValue, final Comparable otherValue, final OrderDirection orderDirection, final NullsOrderType nullsOrderType, final boolean caseSensitive) { if (null == thisValue && null == otherValue) { return 0; } if (null == thisValue) { return NullsOrderType.FIRST == nullsOrderType ? -1 : 1; } if (null == otherValue) { return NullsOrderType.FIRST == nullsOrderType ? 1 : -1; } if (!caseSensitive && thisValue instanceof String && otherValue instanceof String) { return compareToCaseInsensitiveString((String) thisValue, (String) otherValue, orderDirection); } return OrderDirection.ASC == orderDirection ? thisValue.compareTo(otherValue) : -thisValue.compareTo(otherValue); }
@Test void assertCompareToWhenDesc() { assertThat(CompareUtils.compareTo(1, 2, OrderDirection.DESC, NullsOrderType.FIRST, caseSensitive), is(1)); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseMobileTest() { final String uaStr = "User-Agent:Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5"; final UserAgent ua = UserAgentUtil.parse(uaStr); assertEquals("Safari", ua.getBrowser().toString()); assertEquals("5.0.2", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("533.17.9", ua.getEngineVersion()); assertEquals("iPhone", ua.getOs().toString()); assertEquals("4_3_3", ua.getOsVersion()); assertEquals("iPhone", ua.getPlatform().toString()); assertTrue(ua.isMobile()); }
@Override public void cycle() { if (!getConfig().isWritable()) { LOG.debug("Not cycling non-writable index set <{}> ({})", getConfig().id(), getConfig().title()); return; } int oldTargetNumber; try { oldTargetNumber = getNewestIndexNumber(); } catch (NoTargetIndexException ex) { oldTargetNumber = -1; } final int newTargetNumber = oldTargetNumber + 1; final String newTarget = buildIndexName(newTargetNumber); final String oldTarget = buildIndexName(oldTargetNumber); if (oldTargetNumber == -1) { LOG.info("Cycling from <none> to <{}>.", newTarget); } else { LOG.info("Cycling from <{}> to <{}>.", oldTarget, newTarget); } // Create new index. LOG.info("Creating target index <{}>.", newTarget); if (!indices.create(newTarget, this)) { throw new RuntimeException("Could not create new target index <" + newTarget + ">."); } LOG.info("Waiting for allocation of index <{}>.", newTarget); final HealthStatus healthStatus = indices.waitForRecovery(newTarget); checkIfHealthy(healthStatus, (status) -> new RuntimeException("New target index did not become healthy (target index: <" + newTarget + ">)")); LOG.debug("Health status of index <{}>: {}", newTarget, healthStatus); addDeflectorIndexRange(newTarget); LOG.info("Index <{}> has been successfully allocated.", newTarget); // Point deflector to new index. final String indexAlias = getWriteIndexAlias(); LOG.info("Pointing index alias <{}> to new index <{}>.", indexAlias, newTarget); final Activity activity = new Activity(IndexSet.class); if (oldTargetNumber == -1) { // Only pointing, not cycling. pointTo(newTarget); activity.setMessage("Cycled index alias <" + indexAlias + "> from <none> to <" + newTarget + ">."); } else { // Re-pointing from existing old index to the new one. LOG.debug("Switching over index alias <{}>.", indexAlias); pointTo(newTarget, oldTarget); setIndexReadOnlyAndCalculateRange(oldTarget); activity.setMessage("Cycled index alias <" + indexAlias + "> from <" + oldTarget + "> to <" + newTarget + ">."); } LOG.info("Successfully pointed index alias <{}> to index <{}>.", indexAlias, newTarget); activityWriter.write(activity); auditEventSender.success(AuditActor.system(nodeId), ES_WRITE_INDEX_UPDATE, ImmutableMap.of("indexName", newTarget)); }
@Test public void cyclePointsIndexAliasToInitialTarget() { final String indexName = config.indexPrefix() + "_0"; final Map<String, Set<String>> indexNameAliases = ImmutableMap.of(); when(indices.getIndexNamesAndAliases(anyString())).thenReturn(indexNameAliases); when(indices.create(indexName, mongoIndexSet)).thenReturn(true); when(indices.waitForRecovery(indexName)).thenReturn(HealthStatus.Green); final MongoIndexSet mongoIndexSet = createIndexSet(config); mongoIndexSet.cycle(); verify(indices, times(1)).cycleAlias("graylog_deflector", indexName); }
@GetMapping("/logs/opName") @PreAuthorize(value = "@apolloAuditLogQueryApiPreAuthorizer.hasQueryPermission()") public List<ApolloAuditLogDTO> findAllAuditLogsByOpNameAndTime(@RequestParam String opName, @RequestParam int page, @RequestParam int size, @RequestParam(value = "startDate", required = false) @DateTimeFormat(pattern = "yyyy-MM-dd HH:mm:ss.S") Date startDate, @RequestParam(value = "endDate", required = false) @DateTimeFormat(pattern = "yyyy-MM-dd HH:mm:ss.S") Date endDate) { List<ApolloAuditLogDTO> logDTOList = api.queryLogsByOpName(opName, startDate, endDate, page, size); return logDTOList; }
@Test public void testFindAllAuditLogsByOpNameAndTime() throws Exception { final String opName = "query-op-name"; final Date startDate = new Date(2023, Calendar.OCTOBER, 15); final Date endDate = new Date(2023, Calendar.OCTOBER, 16); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S"); { List<ApolloAuditLogDTO> mockLogDTOList = MockBeanFactory.mockAuditLogDTOListByLength(size); mockLogDTOList.forEach(e -> { e.setOpName(opName); }); Mockito.when( api.queryLogsByOpName(Mockito.eq(opName), Mockito.eq(startDate), Mockito.eq(endDate), Mockito.eq(page), Mockito.eq(size))).thenReturn(mockLogDTOList); } mockMvc.perform(MockMvcRequestBuilders.get("/apollo/audit/logs/opName").param("opName", opName) .param("startDate", sdf.format(startDate)) .param("endDate", sdf.format(endDate)) .param("page", String.valueOf(page)) .param("size", String.valueOf(size))) .andExpect(MockMvcResultMatchers.status().isOk()) .andExpect(MockMvcResultMatchers.jsonPath("$").isArray()) .andExpect(MockMvcResultMatchers.jsonPath("$.length()").value(size)) .andExpect(MockMvcResultMatchers.jsonPath("$.[0].opName").value(opName)); Mockito.verify(api, Mockito.times(1)) .queryLogsByOpName(Mockito.eq(opName), Mockito.eq(startDate), Mockito.eq(endDate), Mockito.eq(page), Mockito.eq(size)); }
@Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { if ("equals".equals(method.getName())) { try { Object otherHandler = args.length > 0 && args[0] != null ? Proxy.getInvocationHandler(args[0]) : null; return equals(otherHandler); } catch (IllegalArgumentException e) { return false; } } else if ("hashCode".equals(method.getName())) { return hashCode(); } else if ("toString".equals(method.getName())) { return toString(); } else if (!dispatch.containsKey(method)) { throw new UnsupportedOperationException( String.format("Method \"%s\" should not be called", method.getName())); } return this.invoke(method, this.dispatch.get(method), args); }
@SuppressWarnings("unchecked") @Test void invokeOnSubscribeReactor() throws Throwable { given(this.methodHandler.invoke(any())).willReturn("Result"); ReactorInvocationHandler handler = new ReactorInvocationHandler(this.target, Collections.singletonMap(method, this.methodHandler), Schedulers.boundedElastic()); Object result = handler.invoke(method, this.methodHandler, new Object[] {}); assertThat(result).isInstanceOf(Mono.class); verifyNoInteractions(this.methodHandler); /* subscribe and execute the method */ StepVerifier.create((Mono) result) .expectNext("Result") .expectComplete() .verify(); verify(this.methodHandler, times(1)).invoke(any()); }
@Override public Collection<V> values() { List<V> values = Lists.newArrayList(); for (Map<StructLike, V> partitionMap : partitionMaps.values()) { values.addAll(partitionMap.values()); } return Collections.unmodifiableCollection(values); }
@Test public void testValues() { PartitionMap<Integer> map = PartitionMap.create(SPECS); map.put(BY_DATA_SPEC.specId(), Row.of("aaa"), 1); map.put(BY_DATA_CATEGORY_BUCKET_SPEC.specId(), Row.of("aaa", 2), 2); map.put(BY_DATA_SPEC.specId(), Row.of("bbb"), 3); assertThat(map.values()).containsAll(ImmutableList.of(1, 2, 3)); }
@Description("decode bigint value from a 64-bit 2's complement big endian varbinary") @ScalarFunction("from_big_endian_64") @SqlType(StandardTypes.BIGINT) public static long fromBigEndian64(@SqlType(StandardTypes.VARBINARY) Slice slice) { if (slice.length() != Long.BYTES) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "expected 8-byte input, but got instead: " + slice.length()); } return Long.reverseBytes(slice.getLong(0)); }
@Test public void testFromBigEndian64() { assertFunction("from_big_endian_64(from_hex('0000000000000000'))", BIGINT, 0L); assertFunction("from_big_endian_64(from_hex('0000000000000001'))", BIGINT, 1L); assertFunction("from_big_endian_64(from_hex('7FFFFFFFFFFFFFFF'))", BIGINT, 9223372036854775807L); assertFunction("from_big_endian_64(from_hex('8000000000000001'))", BIGINT, -9223372036854775807L); assertInvalidFunction("from_big_endian_64(from_hex(''))", INVALID_FUNCTION_ARGUMENT); assertInvalidFunction("from_big_endian_64(from_hex('1111'))", INVALID_FUNCTION_ARGUMENT); assertInvalidFunction("from_big_endian_64(from_hex('000000000000000011'))", INVALID_FUNCTION_ARGUMENT); }
public Range<PartitionKey> handleNewSinglePartitionDesc(Map<ColumnId, Column> schema, SingleRangePartitionDesc desc, long partitionId, boolean isTemp) throws DdlException { Range<PartitionKey> range; try { range = checkAndCreateRange(schema, desc, isTemp); setRangeInternal(partitionId, isTemp, range); } catch (IllegalArgumentException e) { // Range.closedOpen may throw this if (lower > upper) throw new DdlException("Invalid key range: " + e.getMessage()); } idToDataProperty.put(partitionId, desc.getPartitionDataProperty()); idToReplicationNum.put(partitionId, desc.getReplicationNum()); idToInMemory.put(partitionId, desc.isInMemory()); idToStorageCacheInfo.put(partitionId, desc.getDataCacheInfo()); return range; }
@Test(expected = AnalysisException.class) public void testFixedRange3() throws DdlException, AnalysisException { //add columns int columns = 2; Column k1 = new Column("k1", new ScalarType(PrimitiveType.INT), true, null, "", ""); Column k2 = new Column("k2", new ScalarType(PrimitiveType.BIGINT), true, null, "", ""); partitionColumns.add(k1); partitionColumns.add(k2); //add RangePartitionDescs PartitionKeyDesc p1 = new PartitionKeyDesc( Lists.newArrayList(new PartitionValue("20190101"), new PartitionValue("200")), new ArrayList<>()); singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", p1, null)); partitionInfo = new RangePartitionInfo(partitionColumns); for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { singleRangePartitionDesc.analyze(columns, null); partitionInfo.handleNewSinglePartitionDesc(MetaUtils.buildIdToColumn(partitionColumns), singleRangePartitionDesc, 20000L, false); } }
public static <T> Optional<T> quietlyEval(String action, String path, CallableRaisingIOE<T> operation) { try { return Optional.of(once(action, path, operation)); } catch (Exception e) { LOG.debug("Action {} failed", action, e); return Optional.empty(); } }
@Test public void testQuietlyVoid() { quietlyEval("", "", () -> { throw HADOOP_CONNECTION_TIMEOUT_EX; }); }
@Override public List<RemoteInstance> queryRemoteNodes() { List<RemoteInstance> remoteInstances = new ArrayList<>(); try { HealthClient healthClient = client.healthClient(); // Discover only "passing" nodes List<ServiceHealth> nodes = healthClient.getHealthyServiceInstances(serviceName).getResponse(); if (CollectionUtils.isNotEmpty(nodes)) { nodes.forEach(node -> { if (!Strings.isNullOrEmpty(node.getService().getAddress())) { Address address = new Address(node.getService().getAddress(), node.getService().getPort(), false); if (address.equals(selfAddress)) { address.setSelf(true); } remoteInstances.add(new RemoteInstance(address)); } }); } ClusterHealthStatus healthStatus = OAPNodeChecker.isHealth(remoteInstances); if (healthStatus.isHealth()) { this.healthChecker.health(); } else { this.healthChecker.unHealth(healthStatus.getReason()); } } catch (Throwable e) { healthChecker.unHealth(e); throw new ServiceQueryException(e.getMessage()); } if (log.isDebugEnabled()) { remoteInstances.forEach(instance -> log.debug("Cosule cluster instance: {}", instance)); } return remoteInstances; }
@Test @SuppressWarnings("unchecked") public void queryRemoteNodesWithNonOrEmpty() { when(consulResponse.getResponse()).thenReturn(null, Collections.emptyList()); assertEquals(0, coordinator.queryRemoteNodes().size()); assertEquals(0, coordinator.queryRemoteNodes().size()); }
public boolean contains(final T value) { return values.contains(value); }
@Test public void shouldReturnCorrectForContainsWithEmptySet() { // Given: CompatibleSet<CompatibleInteger> compatibleSet = new CompatibleSet<>( ImmutableSet.<CompatibleInteger>of()); // When: Then: assertThat(compatibleSet.contains(one), is(false)); assertThat(compatibleSet.contains(two), is(false)); assertThat(compatibleSet.contains(three), is(false)); assertThat(compatibleSet.contains(four), is(false)); }
public void verify(int number, byte[] dg) { final byte[] compare = digests.get(number); if (compare == null) { throw new CryptoException("Could not find digest of data group " + number); } final byte[] calculated = DigestUtils.digest(algorithm).digest(dg); if (!CryptoUtils.compare(compare, calculated, 0)) { throw new VerificationException("Digest of data group " + number + " is not equal to security object"); } }
@Test public void invalidPcaRvigDg14ShouldThrowException() throws Exception { thrown.expect(VerificationException.class); thrown.expectMessage("Digest of data group 14 is not equal to security object"); final byte[] dg14 = createPcaRvigDg14(); dg14[0]++; final LdsSecurityObject ldsSecurityObject = mapper.read( readFromCms("pca-rvig"), LdsSecurityObject.class); ldsSecurityObject.verify(14, dg14); }
@Override public String buildAuthRequestUrl(ServerConfiguration serverConfig, RegisteredClient clientConfig, String redirectUri, String nonce, String state, Map<String, String> options, String loginHint) { try { URIBuilder uriBuilder = new URIBuilder(serverConfig.getAuthorizationEndpointUri()); uriBuilder.addParameter("response_type", "code"); uriBuilder.addParameter("client_id", clientConfig.getClientId()); uriBuilder.addParameter("scope", Joiner.on(" ").join(clientConfig.getScope())); uriBuilder.addParameter("redirect_uri", redirectUri); uriBuilder.addParameter("nonce", nonce); uriBuilder.addParameter("state", state); // Optional parameters: for (Entry<String, String> option : options.entrySet()) { uriBuilder.addParameter(option.getKey(), option.getValue()); } // if there's a login hint, send it if (!Strings.isNullOrEmpty(loginHint)) { uriBuilder.addParameter("login_hint", loginHint); } return uriBuilder.build().toString(); } catch (URISyntaxException e) { throw new AuthenticationServiceException("Malformed Authorization Endpoint Uri", e); } }
@Test public void buildAuthRequestUrl() { String expectedUrl = "https://server.example.com/authorize?" + "response_type=code" + "&client_id=s6BhdRkqt3" + "&scope=openid+profile" + // plus sign used for space per application/x-www-form-encoded standard "&redirect_uri=https%3A%2F%2Fclient.example.org%2F" + "&nonce=34fasf3ds" + "&state=af0ifjsldkj" + "&foo=bar"; Map<String, String> options = ImmutableMap.of("foo", "bar"); String actualUrl = urlBuilder.buildAuthRequestUrl(serverConfig, clientConfig, "https://client.example.org/", "34fasf3ds", "af0ifjsldkj", options, null); assertThat(actualUrl, equalTo(expectedUrl)); }
public List<T> findCycle() { resetState(); for (T vertex : graph.getVertices()) { if (colors.get(vertex) == WHITE) { if (visitDepthFirst(vertex, new ArrayList<>(List.of(vertex)))) { if (cycle == null) throw new IllegalStateException("Null cycle - this should never happen"); if (cycle.isEmpty()) throw new IllegalStateException("Empty cycle - this should never happen"); log.log(FINE, () -> "Cycle detected: " + cycle); return cycle; } } } return new ArrayList<>(); }
@Test void graph_with_self_referencing_vertex_returns_cycle() { var graph = new Graph<Vertices>(); graph.edge(A, A); var cycleFinder = new CycleFinder<>(graph); assertTrue(cycleFinder.findCycle().containsAll(List.of(A, A))); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(REDSHIFT_BOOLEAN); builder.dataType(REDSHIFT_BOOLEAN); break; case TINYINT: case SMALLINT: builder.columnType(REDSHIFT_SMALLINT); builder.dataType(REDSHIFT_SMALLINT); break; case INT: builder.columnType(REDSHIFT_INTEGER); builder.dataType(REDSHIFT_INTEGER); break; case BIGINT: builder.columnType(REDSHIFT_BIGINT); builder.dataType(REDSHIFT_BIGINT); break; case FLOAT: builder.columnType(REDSHIFT_REAL); builder.dataType(REDSHIFT_REAL); break; case DOUBLE: builder.columnType(REDSHIFT_DOUBLE_PRECISION); builder.dataType(REDSHIFT_DOUBLE_PRECISION); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%d,%d)", REDSHIFT_NUMERIC, precision, scale)); builder.dataType(REDSHIFT_NUMERIC); builder.precision(precision); builder.scale(scale); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType( String.format( "%s(%d)", REDSHIFT_CHARACTER_VARYING, MAX_CHARACTER_VARYING_LENGTH)); builder.dataType(REDSHIFT_CHARACTER_VARYING); builder.length((long) MAX_CHARACTER_VARYING_LENGTH); } else if (column.getColumnLength() <= MAX_CHARACTER_VARYING_LENGTH) { builder.columnType( String.format( "%s(%d)", REDSHIFT_CHARACTER_VARYING, column.getColumnLength())); builder.dataType(REDSHIFT_CHARACTER_VARYING); builder.length(column.getColumnLength()); } else { log.warn( "The length of string column {} is {}, which exceeds the maximum length of {}, " + "the length will be set to {}", column.getName(), column.getColumnLength(), MAX_SUPER_LENGTH, MAX_SUPER_LENGTH); builder.columnType(REDSHIFT_SUPER); builder.dataType(REDSHIFT_SUPER); } break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType( String.format( "%s(%d)", REDSHIFT_BINARY_VARYING, MAX_BINARY_VARYING_LENGTH)); builder.dataType(REDSHIFT_BINARY_VARYING); } else if (column.getColumnLength() <= MAX_BINARY_VARYING_LENGTH) { builder.columnType( String.format( "%s(%d)", REDSHIFT_BINARY_VARYING, column.getColumnLength())); builder.dataType(REDSHIFT_BINARY_VARYING); builder.length(column.getColumnLength()); } else { builder.columnType( String.format( "%s(%d)", REDSHIFT_BINARY_VARYING, MAX_BINARY_VARYING_LENGTH)); builder.dataType(REDSHIFT_BINARY_VARYING); log.warn( "The length of binary column {} is {}, which exceeds the maximum length of {}, " + "the length will be set to {}", column.getName(), column.getColumnLength(), MAX_BINARY_VARYING_LENGTH, MAX_BINARY_VARYING_LENGTH); } break; case TIME: Integer timeScale = column.getScale(); if (timeScale != null && timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(REDSHIFT_TIME); builder.dataType(REDSHIFT_TIME); builder.scale(timeScale); break; case TIMESTAMP: Integer timestampScale = column.getScale(); if (timestampScale != null && timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(REDSHIFT_TIMESTAMP); builder.dataType(REDSHIFT_TIMESTAMP); builder.scale(timestampScale); break; case MAP: case ARRAY: case ROW: builder.columnType(REDSHIFT_SUPER); builder.dataType(REDSHIFT_SUPER); break; default: try { return super.reconvert(column); } catch (SeaTunnelRuntimeException e) { throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.REDSHIFT, column.getDataType().getSqlType().name(), column.getName()); } } return builder.build(); }
@Test public void testReconvertTime() { Column column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_TIME_TYPE) .scale(9) .build(); BasicTypeDefine typeDefine = RedshiftTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(RedshiftTypeConverter.REDSHIFT_TIME, typeDefine.getColumnType()); Assertions.assertEquals(RedshiftTypeConverter.REDSHIFT_TIME, typeDefine.getDataType()); Assertions.assertEquals(RedshiftTypeConverter.MAX_TIME_SCALE, typeDefine.getScale()); }
@Override public T getValue() { T currentValue = this.value.get(); if (shouldLoad() || currentValue == null) { T newValue = loadValue(); if (!this.value.compareAndSet(currentValue, newValue)) { return this.value.get(); } return newValue; } return currentValue; }
@Test public void multipleThreadAccessReturnsConsistentResults() throws Exception { List<Future<Boolean>> futures = new ArrayList<>(THREAD_COUNT); for (int i = 0; i < THREAD_COUNT; i++) { Future<Boolean> future = executor.submit(() -> { long startTime = System.currentTimeMillis(); int lastValue = 0; do { Integer newValue = shortTimeoutGauge.getValue(); if (newValue == null) { LOGGER.warn("Cached gauge returned null value"); return false; } if (newValue < lastValue) { LOGGER.error("Cached gauge returned stale value, last: {}, new: {}", lastValue, newValue); return false; } lastValue = newValue; } while (System.currentTimeMillis() - startTime <= RUNNING_TIME_MILLIS); return true; }); futures.add(future); } for (int i = 0; i < futures.size(); i++) { assertTrue("Future " + i + " failed", futures.get(i).get()); } executor.shutdown(); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema != null && schema.type() != Type.BOOLEAN) throw new DataException("Invalid schema type for BooleanConverter: " + schema.type().toString()); try { return serializer.serialize(topic, (Boolean) value); } catch (ClassCastException e) { throw new DataException("BooleanConverter is not compatible with objects of type " + value.getClass()); } }
@Test public void testFromConnectNullValue() { assertNull(converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, null)); }
@Override public void clearTrackTimer() { }
@Test public void clearTrackTimer() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.trackTimerStart("TestTimerEvent"); mSensorsAPI.clearTrackTimer(); mSensorsAPI.trackTimerEnd("TestTimerEvent"); }
@Udf(description = "When filtering an array, " + "the function provided must have a boolean result. " + "The function is applied to each value in the array " + "and a filtered array is returned." ) public <T> List<T> filterArray( @UdfParameter(description = "The array") final List<T> array, @UdfParameter(description = "The lambda function") final Function<T, Boolean> function ) { if (array == null || function == null) { return null; } return array.stream().filter(function::apply).collect(Collectors.toList()); }
@Test public void shouldThrowErrorOnNullArrayInput() { assertThrows(NullPointerException.class, () -> udf.filterArray(Collections.singletonList(null), function1())); assertThrows(NullPointerException.class, () -> udf.filterArray(Collections.singletonList(null), function2())); }
public SelType evaluate(String expr, Map<String, Object> varsMap, Extension ext) throws Exception { checkExprLength(expr); selParser.ReInit(new ByteArrayInputStream(expr.getBytes())); ASTExecute n = selParser.Execute(); try { selEvaluator.resetWithInput(varsMap, ext); return (SelType) n.jjtAccept(selEvaluator, null); } finally { selEvaluator.clearState(); } }
@Test(expected = IllegalArgumentException.class) public void testEvaluateExpressionTooLong() throws Exception { t1.evaluate("x.IsInvalidExpression();", new HashMap<>(), null); }
public TableStats merge(TableStats other, @Nullable Set<String> partitionKeys) { if (this.rowCount < 0 || other.rowCount < 0) { return TableStats.UNKNOWN; } long rowCount = this.rowCount >= 0 && other.rowCount >= 0 ? this.rowCount + other.rowCount : UNKNOWN.rowCount; return new TableStats(rowCount, mergeColumnStates(other, partitionKeys)); }
@Test void testMergeColumnStatsUnknown() { ColumnStats columnStats0 = new ColumnStats(4L, 5L, 2D, 3, 15, 2); ColumnStats columnStats1 = new ColumnStats(4L, null, 2D, 3, 15, 2); ColumnStats columnStats2 = new ColumnStats(4L, 5L, 2D, null, 15, 2); ColumnStats columnStats3 = new ColumnStats(null, 5L, 2D, 3, 15, 2); ColumnStats columnStats4 = new ColumnStats(4L, 5L, 2D, 3, null, 2); ColumnStats columnStats5 = new ColumnStats(4L, 5L, 2D, 3, 15, null); ColumnStats columnStats6 = new ColumnStats(4L, 5L, null, 3, 15, 2); assertThat(columnStats0.merge(columnStats1, false)) .isEqualTo(new ColumnStats(4L, null, 2D, 3, 15, 2)); assertThat(columnStats0.merge(columnStats2, false)) .isEqualTo(new ColumnStats(4L, 10L, 2D, null, 15, 2)); assertThat(columnStats0.merge(columnStats3, false)) .isEqualTo(new ColumnStats(null, 10L, 2D, 3, 15, 2)); assertThat(columnStats0.merge(columnStats4, false)) .isEqualTo(new ColumnStats(4L, 10L, 2D, 3, null, 2)); assertThat(columnStats0.merge(columnStats5, false)) .isEqualTo(new ColumnStats(4L, 10L, 2D, 3, 15, null)); assertThat(columnStats0.merge(columnStats6, false)) .isEqualTo(new ColumnStats(4L, 10L, null, 3, 15, 2)); assertThat(columnStats6.merge(columnStats6, false)) .isEqualTo(new ColumnStats(4L, 10L, null, 3, 15, 2)); // tet column stats merge while partition key is true. assertThat(columnStats0.merge(columnStats1, true)) .isEqualTo(new ColumnStats(8L, null, 2D, 3, 15, 2)); assertThat(columnStats0.merge(columnStats3, true)) .isEqualTo(new ColumnStats(null, 10L, 2D, 3, 15, 2)); }
public Order petId(Long petId) { this.petId = petId; return this; }
@Test public void petIdTest() { // TODO: test petId }
@Override public Object deserialize(Asn1ObjectInputStream in, Class<? extends Object> type, Asn1ObjectMapper mapper) { final Asn1Entity entity = type.getAnnotation(Asn1Entity.class); final Object instance = ObjectUtils.newInstance(type); final Map<String, List<Asn1Field>> fields = fieldsMap(mapper.getFields(type)); while (!in.atEnd()) { try (final Asn1ObjectInputStream seq = in.next()) { if (seq.tagNo != getNestedTagNo(type)) { throw new Asn1Exception("Expected tag %x, got %x", getNestedTagNo(type), seq.tagNo); } final String id = readIdentifier(seq); final List<Asn1Field> fieldsOfId = fields.remove(id); if (fieldsOfId == null) { if (!entity.partial()) throw new Asn1Exception("Found unknown identifier " + id); seq.advanceToEnd(); continue; } final FieldSequence fieldsSeq = new FieldSequence(false, fieldsOfId); readFields(mapper, seq, fieldsSeq, instance); while (!seq.atEnd()) { try (final Asn1ObjectInputStream obj = seq.next()) { final Asn1Field field = fieldsSeq.get(obj.tagNo); final Object attr = mapper.readValue(obj, field.converter(), field.type()); ObjectUtils.setProperty(field.pd, instance, attr); } } if (!fieldsSeq.done()) { throw new Asn1Exception("At end of data, but still non optional fields"); } } } return instance; }
@Test public void shouldDeserialize() { assertEquals(new Set(1, 2, 3), deserialize( new SetOfIdentifiedConverter(), Set.class, new byte[] { 0x30, 6, 0x06, 1, 83, 0x02, 1, 3, 0x30, 9, 0x06, 1, 84, 0x02, 1, 1, 0x02, 1, 2 } )); }
@Override public HttpRestResult<String> httpGet(String path, Map<String, String> headers, Map<String, String> paramValues, String encode, long readTimeoutMs) throws Exception { final long endTime = System.currentTimeMillis() + readTimeoutMs; String currentServerAddr = serverListMgr.getCurrentServerAddr(); int maxRetry = this.maxRetry; HttpClientConfig httpConfig = HttpClientConfig.builder() .setReadTimeOutMillis(Long.valueOf(readTimeoutMs).intValue()) .setConTimeOutMillis(ConfigHttpClientManager.getInstance().getConnectTimeoutOrDefault(100)).build(); do { try { Header newHeaders = Header.newInstance(); if (headers != null) { newHeaders.addAll(headers); } Query query = Query.newInstance().initParams(paramValues); HttpRestResult<String> result = nacosRestTemplate.get(getUrl(currentServerAddr, path), httpConfig, newHeaders, query, String.class); if (isFail(result)) { LOGGER.error("[NACOS ConnectException] currentServerAddr: {}, httpCode: {}", serverListMgr.getCurrentServerAddr(), result.getCode()); } else { // Update the currently available server addr serverListMgr.updateCurrentServerAddr(currentServerAddr); return result; } } catch (ConnectException connectException) { LOGGER.error("[NACOS ConnectException httpGet] currentServerAddr:{}, err : {}", serverListMgr.getCurrentServerAddr(), connectException.getMessage()); } catch (SocketTimeoutException socketTimeoutException) { LOGGER.error("[NACOS SocketTimeoutException httpGet] currentServerAddr:{}, err : {}", serverListMgr.getCurrentServerAddr(), socketTimeoutException.getMessage()); } catch (Exception ex) { LOGGER.error("[NACOS Exception httpGet] currentServerAddr: " + serverListMgr.getCurrentServerAddr(), ex); throw ex; } if (serverListMgr.getIterator().hasNext()) { currentServerAddr = serverListMgr.getIterator().next(); } else { maxRetry--; if (maxRetry < 0) { throw new ConnectException( "[NACOS HTTP-GET] The maximum number of tolerable server reconnection errors has been reached"); } serverListMgr.refreshCurrentServerAddr(); } } while (System.currentTimeMillis() <= endTime); LOGGER.error("no available server"); throw new ConnectException("no available server"); }
@Test void testHttpGetSuccess() throws Exception { when(nacosRestTemplate.<String>get(eq(SERVER_ADDRESS_1 + "/test"), any(HttpClientConfig.class), any(Header.class), any(Query.class), eq(String.class))).thenReturn(mockResult); when(mockResult.getCode()).thenReturn(HttpURLConnection.HTTP_OK); HttpRestResult<String> actual = serverHttpAgent.httpGet("/test", Collections.emptyMap(), Collections.emptyMap(), "UTF-8", 1000); assertEquals(mockResult, actual); }
public RuntimeOptionsBuilder parse(Map<String, String> properties) { return parse(properties::get); }
@Test void should_parse_execution_order() { properties.put(Constants.EXECUTION_ORDER_PROPERTY_NAME, "reverse"); RuntimeOptions options = cucumberPropertiesParser.parse(properties).build(); assertThat(options.getPickleOrder(), equalTo(StandardPickleOrders.reverseLexicalUriOrder())); }
public void updateEventQueueTime(long durationMs) { eventQueueTimeUpdater.accept(durationMs); }
@Test public void testUpdateEventQueueTime() { MetricsRegistry registry = new MetricsRegistry(); MockTime time = new MockTime(); try (QuorumControllerMetrics metrics = new QuorumControllerMetrics(Optional.of(registry), time, false)) { metrics.updateEventQueueTime(1000); assertMetricHistogram(registry, metricName("ControllerEventManager", "EventQueueTimeMs"), 1, 1000); } finally { registry.shutdown(); } }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { // Must split otherwise 413 Request Entity Too Large is returned for(List<Path> partition : new Partition<>(new ArrayList<>(files.keySet()), new HostPreferences(session.getHost()).getInteger("googledrive.delete.multiple.partition"))) { final BatchRequest batch = session.getClient().batch(); final List<BackgroundException> failures = new CopyOnWriteArrayList<>(); for(Path file : partition) { try { this.queue(file, batch, callback, failures); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Cannot delete {0}", e, file); } } if(!partition.isEmpty()) { try { batch.execute(); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map(e); } for(BackgroundException e : failures) { throw e; } } } }
@Test public void testDeleteMultipleFiles() throws Exception { final DriveFileIdProvider fileid = new DriveFileIdProvider(session); final Path folder = new DriveDirectoryFeature(session, fileid).mkdir(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path file1 = new DriveTouchFeature(session, fileid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final Path file2 = new DriveTouchFeature(session, fileid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertTrue(new DriveFindFeature(session, fileid).find(file1)); assertTrue(new DriveFindFeature(session, fileid).find(file2)); new DriveBatchDeleteFeature(session, fileid).delete(Arrays.asList(file1, file2), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse((new DriveFindFeature(session, fileid).find(file1, new DisabledListProgressListener()))); assertFalse((new DriveFindFeature(session, fileid).find(file2, new DisabledListProgressListener()))); assertTrue(new DriveFindFeature(session, fileid).find(folder, new DisabledListProgressListener())); new DriveBatchDeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse((new DriveFindFeature(session, fileid).find(folder, new DisabledListProgressListener()))); }
boolean sendRecords() { int processed = 0; recordBatch(toSend.size()); final SourceRecordWriteCounter counter = toSend.isEmpty() ? null : new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup); for (final SourceRecord preTransformRecord : toSend) { ProcessingContext<SourceRecord> context = new ProcessingContext<>(preTransformRecord); final SourceRecord record = transformationChain.apply(context, preTransformRecord); final ProducerRecord<byte[], byte[]> producerRecord = convertTransformedRecord(context, record); if (producerRecord == null || context.failed()) { counter.skipRecord(); recordDropped(preTransformRecord); processed++; continue; } log.trace("{} Appending record to the topic {} with key {}, value {}", this, record.topic(), record.key(), record.value()); Optional<SubmittedRecords.SubmittedRecord> submittedRecord = prepareToSendRecord(preTransformRecord, producerRecord); try { final String topic = producerRecord.topic(); maybeCreateTopic(topic); producer.send( producerRecord, (recordMetadata, e) -> { if (e != null) { if (producerClosed) { log.trace("{} failed to send record to {}; this is expected as the producer has already been closed", AbstractWorkerSourceTask.this, topic, e); } else { log.error("{} failed to send record to {}: ", AbstractWorkerSourceTask.this, topic, e); } log.trace("{} Failed record: {}", AbstractWorkerSourceTask.this, preTransformRecord); producerSendFailed(context, false, producerRecord, preTransformRecord, e); if (retryWithToleranceOperator.getErrorToleranceType() == ToleranceType.ALL) { counter.skipRecord(); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); } } else { counter.completeRecord(); log.trace("{} Wrote record successfully: topic {} partition {} offset {}", AbstractWorkerSourceTask.this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset()); recordSent(preTransformRecord, producerRecord, recordMetadata); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); if (topicTrackingEnabled) { recordActiveTopic(producerRecord.topic()); } } }); // Note that this will cause retries to take place within a transaction } catch (RetriableException | org.apache.kafka.common.errors.RetriableException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}'. Backing off before retrying: ", this, producerRecord.topic(), producerRecord.partition(), e); toSend = toSend.subList(processed, toSend.size()); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::drop); counter.retryRemaining(); return false; } catch (ConnectException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}' due to an unrecoverable exception: ", this, producerRecord.topic(), producerRecord.partition(), e); log.trace("{} Failed to send {} with unrecoverable exception: ", this, producerRecord, e); throw e; } catch (KafkaException e) { producerSendFailed(context, true, producerRecord, preTransformRecord, e); } processed++; recordDispatched(preTransformRecord); } toSend = null; batchDispatched(); return true; }
@Test public void testSendRecordsTopicCreateRetriesMidway() { createWorkerTask(); // Differentiate only by Kafka partition so we can reuse conversion expectations SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); SourceRecord record3 = new SourceRecord(PARTITION, OFFSET, OTHER_TOPIC, 3, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); expectPreliminaryCalls(TOPIC); expectPreliminaryCalls(OTHER_TOPIC); when(admin.describeTopics(anyString())).thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))) .thenReturn(createdTopic(TOPIC)) .thenThrow(new RetriableException(new TimeoutException("timeout"))) .thenReturn(createdTopic(OTHER_TOPIC)); // Try to send 3, make first pass, second fail. Should save last record workerTask.toSend = Arrays.asList(record1, record2, record3); workerTask.sendRecords(); assertEquals(Collections.singletonList(record3), workerTask.toSend); verifyTopicCreation(2, TOPIC, OTHER_TOPIC); // Second call to createOrFindTopics will throw // Next they all succeed workerTask.sendRecords(); assertNull(workerTask.toSend); verifyTopicCreation(3, TOPIC, OTHER_TOPIC, OTHER_TOPIC); }
public Period getPeriod() { return period; }
@Test public void testGetPeriod() { assertNotNull("getPeriod", periodRange.getPeriod()); assertNull("getPeriod", customRange.getPeriod()); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testEnableForwardIndexInRawModeForMVForwardIndexDisabledColumnWithDuplicates() throws Exception { SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); Random rand = new Random(); // Remove from forward index list but keep the inverted index enabled String column = MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS.get( rand.nextInt(MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS.size())); indexLoadingConfig.removeForwardIndexDisabledColumns(column); indexLoadingConfig.removeInvertedIndexColumns(column); indexLoadingConfig.addNoDictionaryColumns(column); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); // Column validation. ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(column); validateIndexMap(column, false, false); validateForwardIndex(column, CompressionCodec.LZ4, metadata.isSorted()); // In column metadata, some values can change since MV columns with duplicates lose the duplicates on forward index // regeneration. validateMetadataProperties(column, false, 0, metadata.getCardinality(), metadata.getTotalDocs(), metadata.getDataType(), metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), true); }
@NotNull List<CompactionRecord> getHistory() { List<CompactionRecord> list = new ArrayList<>(); history.forEach(list::add); for (CompactionJob job : getRunningCompactions().values()) { list.add(CompactionRecord.build(job)); } Collections.sort(list, new Comparator<CompactionRecord>() { @Override public int compare(CompactionRecord l, CompactionRecord r) { return l.getStartTs() > r.getStartTs() ? -1 : (l.getStartTs() < r.getStartTs()) ? 1 : 0; } }); return list; }
@Test public void testGetHistory() { CompactionMgr compactionManager = new CompactionMgr(); CompactionScheduler compactionScheduler = new CompactionScheduler(compactionManager, GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(), GlobalStateMgr.getCurrentState().getGlobalTransactionMgr(), GlobalStateMgr.getCurrentState(), ""); new MockUp<CompactionScheduler>() { @Mock public ConcurrentHashMap<PartitionIdentifier, CompactionJob> getRunningCompactions() { ConcurrentHashMap<PartitionIdentifier, CompactionJob> r = new ConcurrentHashMap<>(); Database db = new Database(); Table table = new LakeTable(); PartitionIdentifier partitionIdentifier1 = new PartitionIdentifier(1, 2, 3); PartitionIdentifier partitionIdentifier2 = new PartitionIdentifier(1, 2, 4); PhysicalPartition partition1 = new Partition(123, "aaa", null, null); PhysicalPartition partition2 = new Partition(124, "bbb", null, null); CompactionJob job1 = new CompactionJob(db, table, partition1, 100, false); try { Thread.sleep(10); } catch (InterruptedException e) { } CompactionJob job2 = new CompactionJob(db, table, partition2, 101, false); r.put(partitionIdentifier1, job1); r.put(partitionIdentifier2, job2); return r; } }; List<CompactionRecord> list = compactionScheduler.getHistory(); Assert.assertEquals(2, list.size()); Assert.assertTrue(list.get(0).getStartTs() >= list.get(1).getStartTs()); }
@Public @Deprecated public static String toString(ApplicationId appId) { return appId.toString(); }
@Test @SuppressWarnings("deprecation") void testContainerIdNull() throws URISyntaxException { assertNull(ConverterUtils.toString((ContainerId) null)); }
@Override public List<TaskProperty> getPropertiesForDisplay() { ArrayList<TaskProperty> taskProperties = new ArrayList<>(); if (PluggableTaskConfigStore.store().hasPreferenceFor(pluginConfiguration.getId())) { TaskPreference preference = taskPreference(); List<? extends Property> propertyDefinitions = preference.getConfig().list(); for (Property propertyDefinition : propertyDefinitions) { ConfigurationProperty configuredProperty = configuration.getProperty(propertyDefinition.getKey()); if (configuredProperty == null) continue; taskProperties.add(new TaskProperty(propertyDefinition.getOption(Property.DISPLAY_NAME), configuredProperty.getDisplayValue(), configuredProperty.getConfigKeyName())); } return taskProperties; } for (ConfigurationProperty property : configuration) { taskProperties.add(new TaskProperty(property.getConfigKeyName(), property.getDisplayValue())); } return taskProperties; }
@Test public void shouldPopulatePropertiesForDisplay() throws Exception { Configuration configuration = new Configuration( ConfigurationPropertyMother.create("KEY1", false, "value1"), ConfigurationPropertyMother.create("Key2", false, "value2"), ConfigurationPropertyMother.create("key3", true, "encryptedValue1")); PluggableTask task = new PluggableTask(new PluginConfiguration("abc.def", "1"), configuration); List<TaskProperty> propertiesForDisplay = task.getPropertiesForDisplay(); assertThat(propertiesForDisplay.size(), is(3)); assertProperty(propertiesForDisplay.get(0), "KEY1", "value1", "key1"); assertProperty(propertiesForDisplay.get(1), "Key2", "value2", "key2"); assertProperty(propertiesForDisplay.get(2), "key3", "****", "key3"); }
@Override public boolean isSatisfied(int index, TradingRecord tradingRecord) { final boolean satisfied = first.getValue(index).isGreaterThan(second.getValue(index)); traceIsSatisfied(index, satisfied); return satisfied; }
@Test public void isSatisfied() { assertTrue(rule.isSatisfied(0)); assertTrue(rule.isSatisfied(1)); assertTrue(rule.isSatisfied(2)); assertFalse(rule.isSatisfied(3)); assertFalse(rule.isSatisfied(4)); assertFalse(rule.isSatisfied(5)); assertFalse(rule.isSatisfied(6)); assertTrue(rule.isSatisfied(7)); }
@Override public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) { AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future = ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet()); Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue()))); ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext); invokeDriver(handler, future, options.timeoutMs); return new ListOffsetsResult(future.all()); }
@Test public void testListOffsetsNonRetriableErrors() throws Exception { Node node0 = new Node(0, "localhost", 8120); Node node1 = new Node(1, "localhost", 8121); List<Node> nodes = asList(node0, node1); List<PartitionInfo> pInfos = new ArrayList<>(); pInfos.add(new PartitionInfo("foo", 0, node0, new Node[]{node0, node1}, new Node[]{node0, node1})); final Cluster cluster = new Cluster( "mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node0); final TopicPartition tp0 = new TopicPartition("foo", 0); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.TOPIC_AUTHORIZATION_FAILED, -1L, -1L, -1); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(singletonList(t0)); env.kafkaClient().prepareResponse(new ListOffsetsResponse(responseData)); Map<TopicPartition, OffsetSpec> partitions = new HashMap<>(); partitions.put(tp0, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); TestUtils.assertFutureError(result.all(), TopicAuthorizationException.class); } }
@Override public JibContainerBuilder createJibContainerBuilder( JavaContainerBuilder javaContainerBuilder, ContainerizingMode containerizingMode) { try { FileCollection projectDependencies = project.files( project.getConfigurations().getByName(configurationName).getResolvedConfiguration() .getResolvedArtifacts().stream() .filter( artifact -> artifact.getId().getComponentIdentifier() instanceof ProjectComponentIdentifier) .map(ResolvedArtifact::getFile) .collect(Collectors.toList())); if (isWarProject()) { String warFilePath = getWarFilePath(); log(LogEvent.info("WAR project identified, creating WAR image from: " + warFilePath)); Path explodedWarPath = tempDirectoryProvider.newDirectory(); ZipUtil.unzip(Paths.get(warFilePath), explodedWarPath); return JavaContainerBuilderHelper.fromExplodedWar( javaContainerBuilder, explodedWarPath, projectDependencies.getFiles().stream().map(File::getName).collect(Collectors.toSet())); } SourceSet mainSourceSet = getMainSourceSet(); FileCollection classesOutputDirectories = mainSourceSet.getOutput().getClassesDirs().filter(File::exists); Path resourcesOutputDirectory = mainSourceSet.getOutput().getResourcesDir().toPath(); FileCollection allFiles = project.getConfigurations().getByName(configurationName).filter(File::exists); FileCollection nonProjectDependencies = allFiles .minus(classesOutputDirectories) .minus(projectDependencies) .filter(file -> !file.toPath().equals(resourcesOutputDirectory)); FileCollection snapshotDependencies = nonProjectDependencies.filter(file -> file.getName().contains("SNAPSHOT")); FileCollection dependencies = nonProjectDependencies.minus(snapshotDependencies); // Adds dependency files javaContainerBuilder .addDependencies( dependencies.getFiles().stream().map(File::toPath).collect(Collectors.toList())) .addSnapshotDependencies( snapshotDependencies.getFiles().stream() .map(File::toPath) .collect(Collectors.toList())) .addProjectDependencies( projectDependencies.getFiles().stream() .map(File::toPath) .collect(Collectors.toList())); switch (containerizingMode) { case EXPLODED: // Adds resource files if (Files.exists(resourcesOutputDirectory)) { javaContainerBuilder.addResources(resourcesOutputDirectory); } // Adds class files for (File classesOutputDirectory : classesOutputDirectories) { javaContainerBuilder.addClasses(classesOutputDirectory.toPath()); } if (classesOutputDirectories.isEmpty()) { log(LogEvent.warn("No classes files were found - did you compile your project?")); } break; case PACKAGED: // Add a JAR Jar jarTask = (Jar) project.getTasks().findByName("jar"); Path jarPath = jarTask.getArchiveFile().get().getAsFile().toPath(); log(LogEvent.debug("Using JAR: " + jarPath)); javaContainerBuilder.addToClasspath(jarPath); break; default: throw new IllegalStateException("unknown containerizing mode: " + containerizingMode); } return javaContainerBuilder.toContainerBuilder(); } catch (IOException ex) { throw new GradleException("Obtaining project build output files failed", ex); } }
@Test public void testCreateContainerBuilder_noErrorIfWebInfClassesDoesNotExist() throws IOException, InvalidImageReferenceException { temporaryFolder.newFolder("WEB-INF", "lib"); setUpWarProject(temporaryFolder.getRoot().toPath()); assertThat( gradleProjectProperties.createJibContainerBuilder( JavaContainerBuilder.from("ignored"), ContainerizingMode.EXPLODED)) .isNotNull(); }
public List<List<String>> cells() { return raw; }
@Test void cells_row_is_immutable() { assertThrows(UnsupportedOperationException.class, () -> createSimpleTable().cells().remove(0)); }
public String namespaces() { return SLASH.join("v1", prefix, "namespaces"); }
@Test public void testNamespaces() { assertThat(withPrefix.namespaces()).isEqualTo("v1/ws/catalog/namespaces"); assertThat(withoutPrefix.namespaces()).isEqualTo("v1/namespaces"); }
public static LongRef ofLong(long value) { return new LongRef(value); }
@Test public void testLongRef() { PrimitiveRef.LongRef ref = PrimitiveRef.ofLong(5L); assertEquals(5L, ref.value++); assertEquals(6L, ref.value); assertEquals(7L, ++ref.value); assertEquals(7L, ref.value); }
public static void refreshSuperUserGroupsConfiguration() { //load server side configuration; refreshSuperUserGroupsConfiguration(new Configuration()); }
@Test public void testWildcardGroup() { Configuration conf = new Configuration(); conf.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(REAL_USER_NAME), "*"); conf.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserIpConfKey(REAL_USER_NAME), PROXY_IP); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); // First try proxying a group that's allowed UserGroupInformation realUserUgi = UserGroupInformation .createRemoteUser(REAL_USER_NAME); UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting( PROXY_USER_NAME, realUserUgi, GROUP_NAMES); // From good IP assertAuthorized(proxyUserUgi, "1.2.3.4"); // From bad IP assertNotAuthorized(proxyUserUgi, "1.2.3.5"); // Now try proxying a different group (just to make sure we aren't getting spill over // from the other test case!) realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME); proxyUserUgi = UserGroupInformation.createProxyUserForTesting( PROXY_USER_NAME, realUserUgi, OTHER_GROUP_NAMES); // From good IP assertAuthorized(proxyUserUgi, "1.2.3.4"); // From bad IP assertNotAuthorized(proxyUserUgi, "1.2.3.5"); }
@Udf public String extractPath( @UdfParameter(description = "a valid URL") final String input) { return UrlParser.extract(input, URI::getPath); }
@Test public void shouldThrowExceptionForMalformedURL() { // When: final KsqlException e = assertThrows( KsqlException.class, () -> extractUdf.extractPath("http://257.1/bogus/[url") ); // Then: assertThat(e.getMessage(), containsString("URL input has invalid syntax: http://257.1/bogus/[url")); }
@Override public double getValue(double quantile) { if (quantile < 0.0 || quantile > 1.0 || Double.isNaN(quantile)) { throw new IllegalArgumentException(quantile + " is not in [0..1]"); } if (values.length == 0) { return 0.0; } int posx = Arrays.binarySearch(quantiles, quantile); if (posx < 0) posx = ((-posx) - 1) - 1; if (posx < 1) { return values[0]; } if (posx >= values.length) { return values[values.length - 1]; } return values[posx]; }
@Test(expected = IllegalArgumentException.class) public void disallowsQuantileOverOne() { snapshot.getValue(1.5); }
@Override public ByteBuf retainedSlice() { return slice().retain(); }
@Test public void testRetainedSliceAfterRelease2() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().retainedSlice(0, 1); } }); }
@Override public Changeset getChangesetForLine(int lineNumber) { if (!hasChangesetForLine(lineNumber)) { throw new IllegalArgumentException("There's no changeset on line " + lineNumber); } return lineChangesets[lineNumber - 1]; }
@Test public void fail_with_IAE_when_line_is_bigger_than_changetset_size() { assertThatThrownBy(() -> { ScmInfo scmInfo = createScmInfoWithTwoChangestOnFourLines(); scmInfo.getChangesetForLine(5); }) .isInstanceOf(IllegalArgumentException.class) .hasMessage("There's no changeset on line 5"); }
private void freeSlot(InstanceID instanceId, AllocationID allocationId) { final FineGrainedTaskManagerRegistration taskManager = Preconditions.checkNotNull(taskManagerRegistrations.get(instanceId)); Preconditions.checkNotNull(slots.remove(allocationId)); LOG.debug("Free allocated slot with allocationId {}.", allocationId); taskManager.freeSlot(allocationId); }
@Test void testFreeSlot() { final FineGrainedTaskManagerTracker taskManagerTracker = new FineGrainedTaskManagerTracker(); final ResourceProfile totalResource = ResourceProfile.fromResources(10, 1000); final AllocationID allocationId1 = new AllocationID(); final AllocationID allocationId2 = new AllocationID(); final JobID jobId = new JobID(); taskManagerTracker.addTaskManager(TASK_EXECUTOR_CONNECTION, totalResource, totalResource); taskManagerTracker.notifySlotStatus( allocationId1, jobId, TASK_EXECUTOR_CONNECTION.getInstanceID(), ResourceProfile.fromResources(3, 200), SlotState.PENDING); taskManagerTracker.notifySlotStatus( allocationId2, jobId, TASK_EXECUTOR_CONNECTION.getInstanceID(), ResourceProfile.fromResources(2, 300), SlotState.ALLOCATED); // Free pending slot taskManagerTracker.notifySlotStatus( allocationId1, jobId, TASK_EXECUTOR_CONNECTION.getInstanceID(), ResourceProfile.fromResources(3, 200), SlotState.FREE); assertThat(taskManagerTracker.getAllocatedOrPendingSlot(allocationId1)).isNotPresent(); assertThat( taskManagerTracker.getRegisteredTaskManager( TASK_EXECUTOR_CONNECTION.getInstanceID())) .hasValueSatisfying( taskManagerInfo -> assertThat(taskManagerInfo.getAvailableResource()) .isEqualTo(ResourceProfile.fromResources(8, 700))); // Free allocated slot taskManagerTracker.notifySlotStatus( allocationId2, jobId, TASK_EXECUTOR_CONNECTION.getInstanceID(), ResourceProfile.fromResources(2, 300), SlotState.FREE); assertThat(taskManagerTracker.getAllocatedOrPendingSlot(allocationId2)).isNotPresent(); assertThat( taskManagerTracker.getRegisteredTaskManager( TASK_EXECUTOR_CONNECTION.getInstanceID())) .hasValueSatisfying( taskManagerInfo -> assertThat(taskManagerInfo.getAvailableResource()) .isEqualTo(totalResource)); }
public static Future<Void> reconcileJmxSecret(Reconciliation reconciliation, SecretOperator secretOperator, SupportsJmx cluster) { return secretOperator.getAsync(reconciliation.namespace(), cluster.jmx().secretName()) .compose(currentJmxSecret -> { Secret desiredJmxSecret = cluster.jmx().jmxSecret(currentJmxSecret); if (desiredJmxSecret != null) { // Desired secret is not null => should be updated return secretOperator.reconcile(reconciliation, reconciliation.namespace(), cluster.jmx().secretName(), desiredJmxSecret) .map((Void) null); } else if (currentJmxSecret != null) { // Desired secret is null but current is not => we should delete the secret return secretOperator.reconcile(reconciliation, reconciliation.namespace(), cluster.jmx().secretName(), null) .map((Void) null); } else { // Both current and desired secret are null => nothing to do return Future.succeededFuture(); } }); }
@Test public void testEnabledJmxWithAuthWithExistingSecret(VertxTestContext context) { KafkaClusterSpec spec = new KafkaClusterSpecBuilder() .withNewJmxOptions() .withNewKafkaJmxAuthenticationPassword() .endKafkaJmxAuthenticationPassword() .endJmxOptions() .build(); JmxModel jmx = new JmxModel(NAMESPACE, NAME, LABELS, OWNER_REFERENCE, spec); SecretOperator mockSecretOps = mock(SecretOperator.class); when(mockSecretOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(EXISTING_JMX_SECRET)); ArgumentCaptor<Secret> secretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), any(), any(), secretCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.patched(i.getArgument(3)))); Checkpoint async = context.checkpoint(); ReconcilerUtils.reconcileJmxSecret(Reconciliation.DUMMY_RECONCILIATION, mockSecretOps, new MockJmxCluster(jmx)) .onComplete(context.succeeding(v -> context.verify(() -> { verify(mockSecretOps, times(1)).reconcile(eq(Reconciliation.DUMMY_RECONCILIATION), eq(NAMESPACE), eq(NAME), any()); Secret secret = secretCaptor.getValue(); assertThat(secret, is(notNullValue())); assertThat(secret.getMetadata().getName(), is(NAME)); assertThat(secret.getMetadata().getNamespace(), is(NAMESPACE)); assertThat(secret.getMetadata().getOwnerReferences(), is(List.of(OWNER_REFERENCE))); assertThat(secret.getMetadata().getLabels(), is(LABELS.toMap())); assertThat(secret.getMetadata().getAnnotations(), is(nullValue())); assertThat(secret.getData().size(), is(2)); assertThat(secret.getData().get("jmx-username"), is("username")); assertThat(secret.getData().get("jmx-password"), is("password")); async.flag(); }))); }
public static SslContextFactory.Server createSslContextFactory(String sslProviderString, PulsarSslFactory pulsarSslFactory, boolean requireTrustedClientCertOnConnect, Set<String> ciphers, Set<String> protocols) { return new JettySslContextFactory.Server(sslProviderString, pulsarSslFactory, requireTrustedClientCertOnConnect, ciphers, protocols); }
@Test public void testJettyTlsServerTls() throws Exception { @Cleanup("stop") Server server = new Server(); List<ServerConnector> connectors = new ArrayList<>(); PulsarSslConfiguration sslConfiguration = PulsarSslConfiguration.builder() .tlsTrustCertsFilePath(Resources.getResource("ssl/my-ca/ca.pem").getPath()) .tlsCertificateFilePath(Resources.getResource("ssl/my-ca/server-ca.pem").getPath()) .tlsKeyFilePath(Resources.getResource("ssl/my-ca/server-key.pem").getPath()) .allowInsecureConnection(false) .requireTrustedClientCertOnConnect(true) .tlsEnabledWithKeystore(false) .isHttps(true) .build(); PulsarSslFactory sslFactory = new DefaultPulsarSslFactory(); sslFactory.initialize(sslConfiguration); sslFactory.createInternalSslContext(); SslContextFactory factory = JettySslContextFactory.createSslContextFactory(null, sslFactory, true, null, null); ServerConnector connector = new ServerConnector(server, factory); connector.setPort(0); connectors.add(connector); server.setConnectors(connectors.toArray(new ServerConnector[0])); server.start(); // client connect HttpClientBuilder httpClientBuilder = HttpClients.custom(); RegistryBuilder<ConnectionSocketFactory> registryBuilder = RegistryBuilder.create(); registryBuilder.register("https", new SSLConnectionSocketFactory(getClientSslContext(), new NoopHostnameVerifier())); PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(registryBuilder.build()); httpClientBuilder.setConnectionManager(cm); @Cleanup CloseableHttpClient httpClient = httpClientBuilder.build(); HttpGet httpGet = new HttpGet("https://localhost:" + connector.getLocalPort()); httpClient.execute(httpGet); }
@Udf public String concatWS( @UdfParameter(description = "Separator string and values to join") final String... inputs) { if (inputs == null || inputs.length < 2) { throw new KsqlFunctionException("Function Concat_WS expects at least two input arguments."); } final String separator = inputs[0]; if (separator == null) { return null; } return Arrays.stream(inputs, 1, inputs.length) .filter(Objects::nonNull) .collect(Collectors.joining(separator)); }
@Test public void shouldReturnNullForNullSeparator() { assertThat(udf.concatWS(null, "foo", "bar"), is(nullValue())); assertThat(udf.concatWS(null, ByteBuffer.wrap(new byte[] {1}), ByteBuffer.wrap(new byte[] {2})), is(nullValue())); }