focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void upgrade() { if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already completed."); return; } final LegacyAWSPluginConfiguration legacyConfiguration = clusterConfigService.get( CLUSTER_CONFIG_TYPE, LegacyAWSPluginConfiguration.class ); if (legacyConfiguration != null && !Strings.isNullOrEmpty(legacyConfiguration.secretKey())) { final AWSPluginConfiguration migratedPluginConfiguration = AWSPluginConfiguration.fromLegacyConfig(legacyConfiguration, systemConfiguration); clusterConfigService.write(CLUSTER_CONFIG_TYPE, migratedPluginConfiguration); } clusterConfigService.write(MigrationCompleted.create()); }
@Test public void encryptsSecretKeyIfPresent() { mockExistingConfig(V20200505121200_EncryptAWSSecretKey.LegacyAWSPluginConfiguration.create( true, "lookupRegions", "something", "verySecretKey", true )); when(configuration.getPasswordSecret()).thenReturn("systemSecret1234"); this.migration.upgrade(); final ArgumentCaptor<V20200505121200_EncryptAWSSecretKey.AWSPluginConfiguration> writtenConfigCaptor = ArgumentCaptor.forClass( V20200505121200_EncryptAWSSecretKey.AWSPluginConfiguration.class ); verify(clusterConfigService, times(1)).write(eq(PLUGIN_CONFIG_CLASS_NAME), writtenConfigCaptor.capture()); verify(clusterConfigService, times(1)).write(any(V20200505121200_EncryptAWSSecretKey.MigrationCompleted.class)); final V20200505121200_EncryptAWSSecretKey.AWSPluginConfiguration writtenConfig = writtenConfigCaptor.getValue(); assertThat(AESTools.decrypt(writtenConfig.encryptedSecretKey(), "systemSecret1234", writtenConfig.secretKeySalt())) .isEqualTo("verySecretKey"); }
public byte[] encodeToByteArray(List<Integer> input) { checkEncodeInputValidity(input); // Find address byte length by rounding up (totalBitLength / 8) byte[] address = new byte[(totalBitLength + 7) >> 3]; if (!positiveIntegersOnly) { // Modify sign bits to preserve ordering between positive and negative integers int bitIndex = totalBitLength - 1; for (int value : input) { byte signBit = (value < 0) ? (byte) 0 : 1; address[bitIndex >> 3] |= signBit << (bitIndex & 7); bitIndex--; } } int bitIndex = positiveIntegersOnly ? totalBitLength - 1 : totalBitLength - encodingBits.size() - 1; // Interweave input bits into address from the most significant bit to preserve data locality for (int bitsProcessed = 0; bitsProcessed < maxBitLength; bitsProcessed++) { for (int index = 0; index < input.size(); index++) { if (bitsProcessed >= encodingBits.get(index)) { continue; } int bitPosition = encodingBits.get(index) - bitsProcessed - 1; byte maskedBit = (byte) ((input.get(index) >> bitPosition) & 1); address[bitIndex >> 3] |= maskedBit << (bitIndex & 7); bitIndex--; } } return address; }
@Test public void testZOrderTooManyIntegers() { Random rand = new Random(); int listLength = ZOrder.MAX_INPUT_DIMENSIONS + 1; List<Integer> intColumns = new ArrayList<>(listLength); List<Integer> bitPositions = new ArrayList<>(listLength); for (int i = 0; i < listLength; i++) { int value = rand.nextInt(Integer.MAX_VALUE); intColumns.add(value); bitPositions.add(getHighestSetBitPosition(value) + 1); } ZOrder zOrder = new ZOrder(bitPositions, true); try { zOrder.encodeToByteArray(intColumns); fail(format("Expected test to fail: z-ordering does not support more than %d integers.", ZOrder.MAX_INPUT_DIMENSIONS)); } catch (IllegalArgumentException e) { String expectedMessage = format("Current Z-Ordering implementation does not support more than %d input numbers.", ZOrder.MAX_INPUT_DIMENSIONS); assertEquals(e.getMessage(), expectedMessage, format("Expected exception message '%s' to match '%s'", e.getMessage(), expectedMessage)); } }
public boolean matchStage(StageConfigIdentifier stageIdentifier, StageEvent event) { return this.event.include(event) && appliesTo(stageIdentifier.getPipelineName(), stageIdentifier.getStageName()); }
@Test void shouldNotMatchStageWithDifferentPipeline() { NotificationFilter filter = new NotificationFilter("xyz", "dev", StageEvent.All, false); assertThat(filter.matchStage(new StageConfigIdentifier("cruise", "dev"), StageEvent.All)).isFalse(); }
@Override public int hashCode() { return new HashCodeBuilder(811, 67) .append(name) .append(ns) .append(value) .toHashCode(); }
@Test public void testXAttrHashCode() { assertEquals(XATTR.hashCode(), XATTR1.hashCode()); assertNotEquals(XATTR1.hashCode(), XATTR2.hashCode()); assertNotEquals(XATTR2.hashCode(), XATTR3.hashCode()); assertNotEquals(XATTR3.hashCode(), XATTR4.hashCode()); assertNotEquals(XATTR4.hashCode(), XATTR5.hashCode()); }
public static String convertToLowerUnderscore(String identifierName) { return splitToLowercaseTerms(identifierName).stream().collect(Collectors.joining("_")); }
@Test public void convertToLowerUnderscore_givesSingleUnderscore_fromSingleUnderscore() { String identifierName = "_"; String lowerUnderscore = NamingConventions.convertToLowerUnderscore(identifierName); assertThat(lowerUnderscore).isEqualTo("_"); }
@Override public Runnable get() { if (currentRunnable == null && currentStep != null) { currentRunnable = createRunnable(currentStep, state); } return currentRunnable; }
@Test public void step_supplier_get_returns_same_step() throws Exception { HazelcastInstance node = createHazelcastInstance(getConfig()); Data data = Accessors.getSerializationService(node).toData("data"); MapOperation operation = new SetOperation("map", data, data); operation.setNodeEngine(Accessors.getNodeEngineImpl(node)); operation.setPartitionId(1); operation.beforeRun(); StepSupplier stepSupplier = new StepSupplier(operation); Runnable get1 = stepSupplier.get(); Runnable get2 = stepSupplier.get(); assertEquals(get1, get2); }
@ProtoFactory public static MediaType fromString(String tree) { if (tree == null || tree.isEmpty()) throw CONTAINER.missingMediaType(); Matcher matcher = TREE_PATTERN.matcher(tree); return parseSingleMediaType(tree, matcher, false); }
@Test public void testQuotedParamsEscaping() { MediaType mediaType = MediaType.fromString("application/json; charset=\"\\\"UTF-8\\\"\""); assertMediaTypeWithParam(mediaType, "application", "json", "charset", "\"\\\"UTF-8\\\"\""); MediaType mediaType2 = MediaType.fromString("application/json; charset=\"\\a\\\"\\\\\""); assertMediaTypeWithParam(mediaType2, "application", "json", "charset", "\"\\a\\\"\\\\\""); Exceptions.expectException(EncodingException.class, () -> MediaType.fromString("application/json; charset=\"\\\"")); }
T call() throws IOException, RegistryException { String apiRouteBase = "https://" + registryEndpointRequestProperties.getServerUrl() + "/v2/"; URL initialRequestUrl = registryEndpointProvider.getApiRoute(apiRouteBase); return call(initialRequestUrl); }
@Test public void testHttpTimeout_negativeValue() throws IOException, RegistryException { ArgumentCaptor<Request> requestCaptor = ArgumentCaptor.forClass(Request.class); Mockito.when(mockHttpClient.call(Mockito.any(), Mockito.any(), requestCaptor.capture())) .thenReturn(mockResponse); System.setProperty(JibSystemProperties.HTTP_TIMEOUT, "-1"); endpointCaller.call(); // We let the negative value pass through: // https://github.com/GoogleContainerTools/jib/pull/656#discussion_r203562639 Assert.assertEquals(-1, new RequestWrapper(requestCaptor.getValue()).getHttpTimeout()); }
@Override public int run(String[] args) throws Exception { try { this.argv_ = Arrays.copyOf(args, args.length); init(); preProcessArgs(); parseArgv(); if (printUsage) { printUsage(detailedUsage_); return 0; } postProcessArgs(); setJobConf(); } catch (IllegalArgumentException ex) { //ignore, since log will already be printed // print the log in debug mode. LOG.debug("Error in streaming job", ex); return 1; } return submitAndMonitorJob(); }
@Test public void testOptions() throws Exception { StreamJob streamJob = new StreamJob(); assertEquals(1, streamJob.run(new String[0])); assertEquals(0, streamJob.run(new String[] {"-help"})); assertEquals(0, streamJob.run(new String[] {"-info"})); }
public Field getField(Class<?> clazz, String name) { Field field = null; try { field = clazz.getField(name); if (!Modifier.isPublic(field.getModifiers()) && !field.isAccessible()) { field = null; } } catch (NoSuchFieldException e) { field = null; } catch (Exception e) { field = null; } return field; }
@Test public void testGetField() { Cat cat = new Cat(); // Private from super assertNull(rEngine.getField(cat, "age")); // Inexistent assertNull(rEngine.getField(cat, "age1")); // Field shadowing assertEquals(rEngine.getField(cat, "age2").getType(), int.class); assertEquals(rEngine.getField(Cat.class, "age2").getType(), int.class); assertEquals(rEngine.getField("p1.Cat", "age2").getType(), int.class); // Static field assertEquals(rEngine.getField(cat, "CONSTANT").getType(), String.class); // Package assertNull(rEngine.getField(cat, "age4")); // Protected assertNull(rEngine.getField(cat, "age5")); }
public static void disableConsumption(KafkaConsumerWrapper kafkaConsumerWrapper, Set<String> prohibitionTopics) { Set<String> originalTopics = kafkaConsumerWrapper.getOriginalTopics(); // Not subscribed to any Topic, so no action is required if (originalTopics.size() == 0) { return; } Collection<TopicPartition> originalPartitions = kafkaConsumerWrapper.getOriginalPartitions(); KafkaConsumer<?, ?> kafkaConsumer = kafkaConsumerWrapper.getKafkaConsumer(); Collection<String> subtractTopics = CollectionUtils.subtract(originalTopics, prohibitionTopics); if (kafkaConsumerWrapper.isAssign()) { kafkaConsumer.assign(originalPartitions.stream().filter(obj -> subtractTopics.contains(obj.topic())) .collect(Collectors.toSet())); return; } kafkaConsumer.subscribe(subtractTopics); }
@Test public void testDisableConsumptionWithoutSameTopics() { KafkaConsumer<?, ?> mockConsumer = Mockito.mock(KafkaConsumer.class); KafkaConsumerWrapper kafkaConsumerWrapper = new KafkaConsumerWrapper(mockConsumer); HashSet<String> originalTopics = new HashSet<>(); originalTopics.add("testTopic-1"); originalTopics.add("testTopic-2"); kafkaConsumerWrapper.setOriginalTopics(originalTopics); kafkaConsumerWrapper.setAssign(false); Set<String> prohibitionTopics = new HashSet<>(); prohibitionTopics.add("testTopic-3"); KafkaConsumerController.disableConsumption(kafkaConsumerWrapper, prohibitionTopics); Mockito.verify(mockConsumer, Mockito.times(1)).subscribe(Arrays.asList("testTopic-1", "testTopic-2")); }
@Override public final byte readByte() throws EOFException { final int ch = read(); if (ch < 0) { throw new EOFException(); } return (byte) (ch); }
@Test public void testReadBytePosition() throws Exception { int read = in.readByte(1); assertEquals(1, read); }
@SuppressWarnings("unchecked") public T getValue() { final T value = (T) FROM_STRING.get(getConverterClass()).apply(JiveGlobals.getProperty(key), this); if (value == null || (Collection.class.isAssignableFrom(value.getClass()) && ((Collection) value).isEmpty())) { return defaultValue; } if (minValue != null && ((Comparable) minValue).compareTo(value) > 0) { LOGGER.warn("Configured value of {} is less than the minimum value of {} for the SystemProperty {} - will use default value of {} instead", value, minValue, key, defaultValue); return defaultValue; } if (maxValue != null && ((Comparable) maxValue).compareTo(value) < 0) { LOGGER.warn("Configured value of {} is more than the maximum value of {} for the SystemProperty {} - will use default value of {} instead", value, maxValue, key, defaultValue); return defaultValue; } return value; }
@Test public void willReturnNullForInvalidJID() { final String jidString = "@test-domain"; final String key = "an invalid jid property"; final SystemProperty<JID> property = SystemProperty.Builder.ofType(JID.class) .setKey(key) .setDynamic(true) .build(); assertThat(property.getValue(),is(nullValue())); JiveGlobals.setProperty(key, jidString); assertThat(property.getValue(),is(nullValue())); }
void removeWatchers() { List<NamespaceWatcher> localEntries = Lists.newArrayList(entries); while (localEntries.size() > 0) { NamespaceWatcher watcher = localEntries.remove(0); if (entries.remove(watcher)) { try { log.debug("Removing watcher for path: " + watcher.getUnfixedPath()); RemoveWatchesBuilderImpl builder = new RemoveWatchesBuilderImpl(client); builder.internalRemoval(watcher, watcher.getUnfixedPath()); } catch (Exception e) { log.error("Could not remove watcher for path: " + watcher.getUnfixedPath()); } } } }
@Test public void testSameWatcherDifferentKinds1Triggered() throws Exception { CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); try { client.start(); WatcherRemovalFacade removerClient = (WatcherRemovalFacade) client.newWatcherRemoveCuratorFramework(); final CountDownLatch latch = new CountDownLatch(1); Watcher watcher = new Watcher() { @Override public void process(WatchedEvent event) { latch.countDown(); } }; removerClient.create().creatingParentsIfNeeded().forPath("/a/b/c"); removerClient.checkExists().usingWatcher(watcher).forPath("/a/b/c"); removerClient.getData().usingWatcher(watcher).forPath("/a/b/c"); removerClient.setData().forPath("/a/b/c", "new".getBytes()); Timing timing = new Timing(); assertTrue(timing.awaitLatch(latch)); timing.sleepABit(); removerClient.removeWatchers(); } finally { TestCleanState.closeAndTestClean(client); } }
public RateLimitSet getAddress() { return address; }
@Test public void testAddress() { LimitConfig.RateLimitSet limitAddress = limitConfig.getAddress(); Assert.assertEquals(limitAddress.getDirectMaps().size(), 4); }
@Override public ByteBuf writeBytes(byte[] src, int srcIndex, int length) { ensureWritable(length); setBytes(writerIndex, src, srcIndex, length); writerIndex += length; return this; }
@Test public void testDuplicateBytesInArrayMultipleThreads() throws Exception { final byte[] bytes = new byte[8]; random.nextBytes(bytes); final ByteBuf buffer = newBuffer(8); buffer.writeBytes(bytes); try { testBytesInArrayMultipleThreads(buffer, bytes, false); } finally { buffer.release(); } }
@Override public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) { AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future = ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet()); Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue()))); ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext); invokeDriver(handler, future, options.timeoutMs); return new ListOffsetsResult(future.all()); }
@Test public void testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec() throws Exception { Node node = new Node(0, "localhost", 8120); List<Node> nodes = Collections.singletonList(node); List<PartitionInfo> pInfos = new ArrayList<>(); pInfos.add(new PartitionInfo("foo", 0, node, new Node[]{node}, new Node[]{node})); pInfos.add(new PartitionInfo("foo", 1, node, new Node[]{node}, new Node[]{node})); final Cluster cluster = new Cluster( "mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node); final TopicPartition tp0 = new TopicPartition("foo", 0); final TopicPartition tp1 = new TopicPartition("foo", 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create( ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6)); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); // listoffsets response from broker 0 env.kafkaClient().prepareUnsupportedVersionResponse( request -> request instanceof ListOffsetsRequest); ListOffsetsTopicResponse topicResponse = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 345L, 543); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(singletonList(topicResponse)); env.kafkaClient().prepareResponseFrom( // ensure that no max timestamp requests are retried request -> request instanceof ListOffsetsRequest && ((ListOffsetsRequest) request).topics().stream() .flatMap(t -> t.partitions().stream()) .noneMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP), new ListOffsetsResponse(responseData), node); ListOffsetsResult result = env.adminClient().listOffsets(new HashMap<TopicPartition, OffsetSpec>() {{ put(tp0, OffsetSpec.maxTimestamp()); put(tp1, OffsetSpec.latest()); }}); TestUtils.assertFutureThrows(result.partitionResult(tp0), UnsupportedVersionException.class); ListOffsetsResultInfo tp1Offset = result.partitionResult(tp1).get(); assertEquals(345L, tp1Offset.offset()); assertEquals(543, tp1Offset.leaderEpoch().get().intValue()); assertEquals(-1L, tp1Offset.timestamp()); } }
public static long consumerRecordSizeInBytes(final ConsumerRecord<byte[], byte[]> record) { return recordSizeInBytes( record.serializedKeySize(), record.serializedValueSize(), record.topic(), record.headers() ); }
@Test public void shouldComputeSizeInBytesForConsumerRecordWithNullKey() { final ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>( TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 5, null, VALUE, HEADERS, Optional.empty() ); assertThat(consumerRecordSizeInBytes(record), equalTo(NULL_KEY_SIZE_IN_BYTES)); }
@Override public AnalyticsPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) { Capabilities capabilities = capabilities(descriptor.id()); PluggableInstanceSettings pluginSettingsAndView = getPluginSettingsAndView(descriptor, extension); Image image = image(descriptor.id()); return new AnalyticsPluginInfo(descriptor, image, capabilities, pluginSettingsAndView); }
@Test public void shouldContinueBuildingPluginInfoIfPluginSettingsIsNotProvidedByPlugin() { GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build(); doThrow(new RuntimeException("foo")).when(extension).getPluginSettingsConfiguration("plugin1"); AnalyticsPluginInfo pluginInfo = new AnalyticsPluginInfoBuilder(extension).pluginInfoFor(descriptor); assertThat(pluginInfo.getDescriptor(), is(descriptor)); assertThat(pluginInfo.getExtensionName(), is("analytics")); assertNull(pluginInfo.getPluginSettings()); }
public Optional<Distance> verticalDistAtHorizontalClosureTime(Instant time) { Optional<Duration> timeUntilClosure = timeUntilHorizontalClosure(time); //not closing in the horizontal direction if (!timeUntilClosure.isPresent()) { return Optional.empty(); } Speed closureRate = verticalClosureRateAt(time); Distance startingSeparation = verticalSeparationAt(time); Distance distanceClosed = closureRate.times(timeUntilClosure.get()); return Optional.of(startingSeparation.minus(distanceClosed).abs()); }
@Test public void testVerticalDistAtHorizontalClosureTime() { //increasing vertical separation... Distance[] verticalDistances = new Distance[]{ Distance.ofFeet(200), Distance.ofFeet(300), Distance.ofFeet(500) }; //decreasing horizontal separation... Distance[] horizontalDistances = new Distance[]{ Distance.ofNauticalMiles(1), Distance.ofNauticalMiles(.5), Distance.ofNauticalMiles(.25) }; SeparationTimeSeries instance = new SeparationTimeSeries( times(), verticalDistances, horizontalDistances ); assertEquals( Distance.ofFeet(400), //200ft + 200ft in new "closure" instance.verticalDistAtHorizontalClosureTime(EPOCH.plusSeconds(0)).get() ); assertEquals( Distance.ofFeet(220 + 180), //220ft + 180 in new "closure" instance.verticalDistAtHorizontalClosureTime(EPOCH.plusSeconds(1)).get() ); assertEquals( Distance.ofFeet(300 + 400), //300 ft + 10 seconds @ 200ft every 5 sec instance.verticalDistAtHorizontalClosureTime(EPOCH.plusSeconds(5)).get() ); assertEquals( Distance.ofFeet(380 + 320), //380 ft + 8 seconds @ 200ft every 5 sec instance.verticalDistAtHorizontalClosureTime(EPOCH.plusSeconds(7)).get() ); assertEquals( Distance.ofFeet(500 + 200), //500ft + 5 seconds @ 200ft every 5 sec instance.verticalDistAtHorizontalClosureTime(EPOCH.plusSeconds(10)).get() ); }
public Response sendRequest(Member member, Request request) throws NacosException { return sendRequest(member, request, DEFAULT_REQUEST_TIME_OUT); }
@Test void testSendRequest() { try { Response response = clusterRpcClientProxy.sendRequest(member, new HealthCheckRequest()); } catch (NacosException e) { assertEquals(-401, e.getErrCode()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
@Override public <PS extends Serializer<P>, P> KeyValueIterator<K, V> prefixScan(final P prefix, final PS prefixKeySerializer) { Objects.requireNonNull(prefix, "prefix cannot be null"); Objects.requireNonNull(prefixKeySerializer, "prefixKeySerializer cannot be null"); return new MeteredKeyValueIterator(wrapped().prefixScan(prefix, prefixKeySerializer), prefixScanSensor); }
@Test public void shouldThrowNullPointerOnPrefixScanIfPrefixIsNull() { setUpWithoutContext(); final StringSerializer stringSerializer = new StringSerializer(); assertThrows(NullPointerException.class, () -> metered.prefixScan(null, stringSerializer)); }
@POST @Operation(summary = "Create a new connector") public Response createConnector(final @Parameter(hidden = true) @QueryParam("forward") Boolean forward, final @Context HttpHeaders headers, final CreateConnectorRequest createRequest) throws Throwable { // Trim leading and trailing whitespaces from the connector name, replace null with empty string // if no name element present to keep validation within validator (NonEmptyStringWithoutControlChars // allows null values) String name = createRequest.name() == null ? "" : createRequest.name().trim(); Map<String, String> configs = createRequest.config(); checkAndPutConnectorConfigName(name, configs); FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.putConnectorConfig(name, configs, createRequest.initialTargetState(), false, cb); Herder.Created<ConnectorInfo> info = requestHandler.completeOrForwardRequest(cb, "/connectors", "POST", headers, createRequest, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); URI location = UriBuilder.fromUri("/connectors").path(name).build(); return Response.created(location).entity(info.result()).build(); }
@Test public void testCreateConnector() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); final ArgumentCaptor<Callback<Herder.Created<ConnectorInfo>>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, CONNECTOR_TASK_NAMES, ConnectorType.SOURCE)) ).when(herder).putConnectorConfig(eq(CONNECTOR_NAME), eq(body.config()), isNull(), eq(false), cb.capture()); connectorsResource.createConnector(FORWARD, NULL_HEADERS, body); }
static String replaceParameters(String format, String[] params) throws BadFormatString { Matcher match = PARAMETER_PATTERN.matcher(format); int start = 0; StringBuilder result = new StringBuilder(); while (start < format.length() && match.find(start)) { result.append(match.group(1)); String paramNum = match.group(3); if (paramNum != null) { try { int num = Integer.parseInt(paramNum); if (num < 0 || num >= params.length) { throw new BadFormatString("index " + num + " from " + format + " is outside of the valid range 0 to " + (params.length - 1)); } result.append(params[num]); } catch (NumberFormatException nfe) { throw new BadFormatString("bad format in username mapping in " + paramNum, nfe); } } start = match.end(); } return result.toString(); }
@Test public void testReplaceParameters() throws BadFormatString { // positive test cases assertEquals(KerberosRule.replaceParameters("", new String[0]), ""); assertEquals(KerberosRule.replaceParameters("hello", new String[0]), "hello"); assertEquals(KerberosRule.replaceParameters("", new String[]{"too", "many", "parameters", "are", "ok"}), ""); assertEquals(KerberosRule.replaceParameters("hello", new String[]{"too", "many", "parameters", "are", "ok"}), "hello"); assertEquals(KerberosRule.replaceParameters("hello $0", new String[]{"too", "many", "parameters", "are", "ok"}), "hello too"); assertEquals(KerberosRule.replaceParameters("hello $0", new String[]{"no recursion $1"}), "hello no recursion $1"); // negative test cases assertThrows( BadFormatString.class, () -> KerberosRule.replaceParameters("$0", new String[]{}), "An out-of-bounds parameter number should trigger an exception!"); assertThrows( BadFormatString.class, () -> KerberosRule.replaceParameters("hello $a", new String[]{"does not matter"}), "A malformed parameter name should trigger an exception!"); }
@Override public BasicTypeDefine reconvert(Column column) { try { return super.reconvert(column); } catch (SeaTunnelRuntimeException e) { throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.KINGBASE, column.getDataType().getSqlType().name(), column.getName()); } }
@Test public void testReconvertDatetime() { Column column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) .build(); BasicTypeDefine typeDefine = KingbaseTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(KingbaseTypeConverter.PG_TIMESTAMP, typeDefine.getColumnType()); Assertions.assertEquals(KingbaseTypeConverter.PG_TIMESTAMP, typeDefine.getDataType()); column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) .scale(3) .build(); typeDefine = KingbaseTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals( String.format("%s(%s)", KingbaseTypeConverter.PG_TIMESTAMP, column.getScale()), typeDefine.getColumnType()); Assertions.assertEquals(KingbaseTypeConverter.PG_TIMESTAMP, typeDefine.getDataType()); Assertions.assertEquals(column.getScale(), typeDefine.getScale()); column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE) .scale(9) .build(); typeDefine = KingbaseTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals( String.format("%s(%s)", KingbaseTypeConverter.PG_TIMESTAMP, 6), typeDefine.getColumnType()); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromSnapshotTimestampWithInvalidIds() throws Exception { appendTwoSnapshots(); long invalidSnapshotTimestampMs = snapshot2.timestampMillis() + 1000L; ScanContext scanContextWithInvalidSnapshotId = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_TIMESTAMP) .startSnapshotTimestamp(invalidSnapshotTimestampMs) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl( TABLE_RESOURCE.tableLoader().clone(), scanContextWithInvalidSnapshotId, null); assertThatThrownBy(() -> splitPlanner.planSplits(null)) .isInstanceOf(IllegalArgumentException.class) .hasMessageStartingWith("Cannot find a snapshot after:"); }
@Override public CompletableFuture<List<Long>> getSplitBoundary(BundleSplitOption bundleSplitOption) { NamespaceService service = bundleSplitOption.getService(); NamespaceBundle bundle = bundleSplitOption.getBundle(); return service.getOwnedTopicListForNamespaceBundle(bundle).thenCompose(topics -> { if (topics == null || topics.size() <= 1) { return CompletableFuture.completedFuture(null); } List<Long> topicNameHashList = new ArrayList<>(topics.size()); for (String topic : topics) { topicNameHashList.add(bundle.getNamespaceBundleFactory().getLongHashCode(topic)); } Collections.sort(topicNameHashList); long splitStart = topicNameHashList.get(Math.max((topicNameHashList.size() / 2) - 1, 0)); long splitEnd = topicNameHashList.get(topicNameHashList.size() / 2); long splitMiddle = splitStart + (splitEnd - splitStart) / 2; return CompletableFuture.completedFuture(Collections.singletonList(splitMiddle)); }); }
@SuppressWarnings("UnstableApiUsage") @Test public void testAlgorithmReturnCorrectResult() { // -- algorithm TopicCountEquallyDivideBundleSplitAlgorithm algorithm = new TopicCountEquallyDivideBundleSplitAlgorithm(); List<String> mockTopics = Lists.newArrayList("a", "b", "c"); // -- calculate the mock result NamespaceService namespaceServiceForMockResult = mock(NamespaceService.class); NamespaceBundle namespaceBundleForMockResult = mock(NamespaceBundle.class); doReturn(CompletableFuture.completedFuture(mockTopics)) .when(namespaceServiceForMockResult).getOwnedTopicListForNamespaceBundle(namespaceBundleForMockResult); List<Long> hashList = new ArrayList<>(); NamespaceBundleFactory namespaceBundleFactoryForMockResult = mock(NamespaceBundleFactory.class); mockTopics.forEach((topic) -> { long hashValue = Hashing.crc32().hashString(topic, StandardCharsets.UTF_8).padToLong(); doReturn(namespaceBundleFactoryForMockResult) .when(namespaceBundleForMockResult).getNamespaceBundleFactory(); doReturn(hashValue) .when(namespaceBundleFactoryForMockResult).getLongHashCode(topic); hashList.add(hashValue); }); Collections.sort(hashList); long splitStart = hashList.get(Math.max((hashList.size() / 2) - 1, 0)); long splitEnd = hashList.get(hashList.size() / 2); long splitMiddleForMockResult = splitStart + (splitEnd - splitStart) / 2; // -- do test NamespaceService mockNamespaceService = mock(NamespaceService.class); NamespaceBundle mockNamespaceBundle = mock(NamespaceBundle.class); doReturn(CompletableFuture.completedFuture(mockTopics)) .when(mockNamespaceService).getOwnedTopicListForNamespaceBundle(mockNamespaceBundle); NamespaceBundleFactory mockNamespaceBundleFactory = mock(NamespaceBundleFactory.class); mockTopics.forEach((topic) -> { doReturn(mockNamespaceBundleFactory) .when(mockNamespaceBundle).getNamespaceBundleFactory(); long hashValue = Hashing.crc32().hashString(topic, StandardCharsets.UTF_8).padToLong(); doReturn(hashValue) .when(mockNamespaceBundleFactory).getLongHashCode(topic); }); assertEquals((long) algorithm.getSplitBoundary(new BundleSplitOption(mockNamespaceService, mockNamespaceBundle, null)).join().get(0), splitMiddleForMockResult); }
public void cleanupLeakedQueries(final List<PersistentQueryMetadata> persistentQueries) { final Set<String> stateStoreNames = persistentQueries .stream() .flatMap(s -> { final List<String> doNotDelete = new ArrayList<>( Collections.singletonList(s.getQueryApplicationId())); if (s instanceof BinPackedPersistentQueryMetadataImpl) { doNotDelete.add(s.getQueryApplicationId() + "/__" + s.getQueryId().toString() + "__"); } return doNotDelete.stream(); }) .collect(Collectors.toSet()); final String[] stateDirFileNames = new File(stateDir).list(); if (stateDirFileNames == null) { LOG.info("No state stores to clean up"); } else { final Set<String> allStateStores = Arrays.stream(stateDirFileNames) .flatMap(f -> { final String[] fileNames = new File(stateDir + "/" + f).list(); if (null == fileNames) { return Stream.of(f); } else if (Arrays.stream(fileNames).anyMatch(t -> t.matches("__*__"))) { return Arrays.stream(fileNames) .filter(t -> t.matches("__*__")) .map(s -> f + "/" + s); } else { return Stream.of(f); } }) .collect(Collectors.toSet()); allStateStores.removeAll(stateStoreNames); allStateStores.forEach((storeName) -> queryCleanupService.addCleanupTask( new QueryCleanupService.QueryCleanupTask( serviceContext, storeName.split("/")[0], 1 < storeName.split("__").length ? Optional.of(storeName.split("__")[1]) : Optional.empty(), false, stateDir, ksqlConfig.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG), ksqlConfig.getString(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG)))); } }
@Test public void shouldDeleteExtraStateStores() { // Given: final TestAppender appender = new TestAppender(); final Logger logger = Logger.getRootLogger(); logger.addAppender(appender); File fakeStateStore = new File(tempFile.getAbsolutePath() + "/fakeStateStore"); if (!fakeStateStore.exists()) { assertTrue(fakeStateStore.mkdirs()); } // When: cleanup.cleanupLeakedQueries(Collections.emptyList()); awaitCleanupComplete(); // Then: assertFalse(fakeStateStore.exists()); assertTrue(tempFile.exists()); final List<LoggingEvent> log = appender.getLog(); final LoggingEvent firstLogEntry = log.get(0); assertThat((String) firstLogEntry.getMessage(), is( "Deleted local state store for non-existing query fakeStateStore. " + "This is not expected and was likely due to a race condition when the query was dropped before.")); assertThat(firstLogEntry.getLevel(), is(Level.WARN)); }
public static String encodeQuantity(BigInteger value) { if (value.signum() != -1) { return HEX_PREFIX + value.toString(16); } else { throw new MessageEncodingException("Negative values are not supported"); } }
@Test public void testQuantityEncode() { assertEquals(Numeric.encodeQuantity(BigInteger.valueOf(0)), ("0x0")); assertEquals(Numeric.encodeQuantity(BigInteger.valueOf(1)), ("0x1")); assertEquals(Numeric.encodeQuantity(BigInteger.valueOf(1024)), ("0x400")); assertEquals( Numeric.encodeQuantity(BigInteger.valueOf(Long.MAX_VALUE)), ("0x7fffffffffffffff")); assertEquals( Numeric.encodeQuantity(new BigInteger("204516877000845695339750056077105398031")), ("0x99dc848b94efc27edfad28def049810f")); }
public static Sensor getInvocationSensor( final Metrics metrics, final String sensorName, final String groupName, final String functionDescription ) { final Sensor sensor = metrics.sensor(sensorName); if (sensor.hasMetrics()) { return sensor; } final BiFunction<String, String, MetricName> metricNamer = (suffix, descPattern) -> { final String description = String.format(descPattern, functionDescription); return metrics.metricName(sensorName + "-" + suffix, groupName, description); }; sensor.add( metricNamer.apply("avg", AVG_DESC), new Avg() ); sensor.add( metricNamer.apply("max", MAX_DESC), new Max() ); sensor.add( metricNamer.apply("count", COUNT_DESC), new WindowedCount() ); sensor.add( metricNamer.apply("rate", RATE_DESC), new Rate(TimeUnit.SECONDS, new WindowedCount()) ); return sensor; }
@Test public void shouldRegisterMaxMetric() { // Given: when(metrics.metricName(SENSOR_NAME + "-max", GROUP_NAME, description(MAX_DESC))) .thenReturn(specificMetricName); // When: FunctionMetrics .getInvocationSensor(metrics, SENSOR_NAME, GROUP_NAME, FUNC_NAME); // Then: verify(sensor).add(eq(specificMetricName), isA(Max.class)); }
public static <T extends Comparable<? super T>> Range<T> ofString(String str, Function<String, T> converter, Class<T> clazz) { if(str.equals(EMPTY)) { return emptyRange(clazz); } int mask = str.charAt(0) == '[' ? LOWER_INCLUSIVE : LOWER_EXCLUSIVE; mask |= str.charAt(str.length() - 1) == ']' ? UPPER_INCLUSIVE : UPPER_EXCLUSIVE; int delim = str.indexOf(','); if (delim == -1) { throw new IllegalArgumentException("Cannot find comma character"); } String lowerStr = str.substring(1, delim); String upperStr = str.substring(delim + 1, str.length() - 1); if (lowerStr.length() == 0 || lowerStr.endsWith(INFINITY)) { mask |= LOWER_INFINITE; } if (upperStr.length() == 0 || upperStr.endsWith(INFINITY)) { mask |= UPPER_INFINITE; } T lower = null; T upper = null; if ((mask & LOWER_INFINITE) != LOWER_INFINITE) { lower = converter.apply(lowerStr); } if ((mask & UPPER_INFINITE) != UPPER_INFINITE) { upper = converter.apply(upperStr); } return new Range<>(lower, upper, mask, clazz); }
@Test public void ofStringTest() { assertThat(integerRange("[1,3]").lower(), is(1)); assertThat(integerRange("[1,3]").upper(), is(3)); assertThat(integerRange("[1,3]").isUpperBoundClosed(), is(true)); assertThat(integerRange("[1,3]").isLowerBoundClosed(), is(true)); assertThat(integerRange("[,3]").lower(), is(nullValue())); assertThat(integerRange("[,3]").upper(), is(3)); assertThat(integerRange("[,3]").hasLowerBound(), is(false)); assertThat(integerRange("[,3]").hasUpperBound(), is(true)); assertThat(integerRange("[,3]").isUpperBoundClosed(), is(true)); assertThat(integerRange("[,3]").isLowerBoundClosed(), is(false)); assertThat(integerRange("[,]").lower(), is(nullValue())); assertThat(integerRange("[,]").upper(), is(nullValue())); assertThat(integerRange("[,]").hasLowerBound(), is(false)); assertThat(integerRange("[,]").hasUpperBound(), is(false)); assertThat(integerRange("[,]").isUpperBoundClosed(), is(false)); assertThat(integerRange("[,]").isLowerBoundClosed(), is(false)); assertThat(integerRange("(-5,5]").isUpperBoundClosed(), is(true)); assertThat(integerRange("(-5,5]").isLowerBoundClosed(), is(false)); assertThat(integerRange("(,)").contains(integerRange("empty")), is(true)); assertThat(integerRange("empty").contains(integerRange("(,)")), is(false)); }
public T validateHeaders(boolean validate) { this.validateHeaders = validate; return get(); }
@Test void validateHeaders() { checkDefaultValidateHeaders(conf); conf.validateHeaders(false); assertThat(conf.validateHeaders()).as("validate headers").isFalse(); checkDefaultMaxInitialLineLength(conf); checkDefaultMaxHeaderSize(conf); checkDefaultMaxChunkSize(conf); checkDefaultInitialBufferSize(conf); checkDefaultAllowDuplicateContentLengths(conf); }
@Override public int getOrder() { return PluginEnum.PARAM_MAPPING.getCode(); }
@Test public void testGetOrder() { assertEquals(this.paramMappingPlugin.getOrder(), PluginEnum.PARAM_MAPPING.getCode()); }
@CanIgnoreReturnValue @Override public V put(K key, V value) { if (key == null) { throw new NullPointerException("key == null"); } if (value == null && !allowNullValues) { throw new NullPointerException("value == null"); } Node<K, V> created = find(key, true); V result = created.value; created.value = value; return result; }
@Test public void testContainsNonComparableKeyReturnsFalse() { LinkedTreeMap<String, String> map = new LinkedTreeMap<>(); map.put("a", "android"); assertThat(map).doesNotContainKey(new Object()); }
@Override public void execute( RunConfiguration runConfiguration, ExecutionConfiguration executionConfiguration, AbstractMeta meta, VariableSpace variableSpace, Repository repository ) throws KettleException { DefaultRunConfiguration defaultRunConfiguration = (DefaultRunConfiguration) runConfiguration; if ( executionConfiguration instanceof TransExecutionConfiguration ) { configureTransExecution( (TransExecutionConfiguration) executionConfiguration, defaultRunConfiguration, variableSpace, meta, repository ); } if ( executionConfiguration instanceof JobExecutionConfiguration ) { configureJobExecution( (JobExecutionConfiguration) executionConfiguration, defaultRunConfiguration, variableSpace, meta, repository ); } variableSpace.setVariable( "engine", null ); variableSpace.setVariable( "engine.remote", null ); variableSpace.setVariable( "engine.scheme", null ); variableSpace.setVariable( "engine.url", null ); }
@Test public void testExecuteRemoteJob() throws Exception { DefaultRunConfiguration defaultRunConfiguration = new DefaultRunConfiguration(); defaultRunConfiguration.setName( "Default Configuration" ); defaultRunConfiguration.setLocal( false ); defaultRunConfiguration.setRemote( true ); defaultRunConfiguration.setServer( "Test Server" ); JobExecutionConfiguration jobExecutionConfiguration = new JobExecutionConfiguration(); doReturn( slaveServer ).when( abstractMeta ).findSlaveServer( "Test Server" ); defaultRunConfigurationExecutor .execute( defaultRunConfiguration, jobExecutionConfiguration, abstractMeta, variableSpace, null ); assertFalse( jobExecutionConfiguration.isExecutingLocally() ); assertTrue( jobExecutionConfiguration.isExecutingRemotely() ); assertEquals( jobExecutionConfiguration.getRemoteServer(), slaveServer ); }
@Override public boolean matches(Job localJob, Job storageProviderJob) { return AllowedConcurrentStateChange.super.matches(localJob, storageProviderJob) && localJob.getVersion() == storageProviderJob.getVersion() - 1 && localJob.getLastJobStateOfType(FailedState.class).isPresent(); }
@Test void ifJobHasEnqueuedStateAndWasScheduledTooEarlyByJobZooKeeperItWillMatch() { final Job scheduledJob = aJob() .withFailedState() .withScheduledState() .withVersion(5) .build(); final Job enqueuedJob = aCopyOf(scheduledJob) .withEnqueuedState(Instant.now()) .withVersion(4) .build(); boolean matchesAllowedStateChange = allowedStateChange.matches(enqueuedJob, scheduledJob); assertThat(matchesAllowedStateChange).isTrue(); }
@Override public CompletableFuture<YarnWorkerNode> requestResource( TaskExecutorProcessSpec taskExecutorProcessSpec) { checkInitialized(); final CompletableFuture<YarnWorkerNode> requestResourceFuture = new CompletableFuture<>(); final Optional<TaskExecutorProcessSpecContainerResourcePriorityAdapter.PriorityAndResource> priorityAndResourceOpt = taskExecutorProcessSpecContainerResourcePriorityAdapter .getPriorityAndResource(taskExecutorProcessSpec); if (!priorityAndResourceOpt.isPresent()) { requestResourceFuture.completeExceptionally( new ResourceManagerException( String.format( "Could not compute the container Resource from the given TaskExecutorProcessSpec %s. " + "This usually indicates the requested resource is larger than Yarn's max container resource limit.", taskExecutorProcessSpec))); } else { final Priority priority = priorityAndResourceOpt.get().getPriority(); final Resource resource = priorityAndResourceOpt.get().getResource(); FutureUtils.assertNoException( requestResourceFuture.handle( (ignore, t) -> { if (t == null) { return null; } if (t instanceof CancellationException) { final Queue<CompletableFuture<YarnWorkerNode>> pendingRequestResourceFutures = requestResourceFutures.getOrDefault( taskExecutorProcessSpec, new LinkedList<>()); Preconditions.checkState( pendingRequestResourceFutures.remove( requestResourceFuture)); log.info( "cancelling pending request with priority {}, remaining {} pending container requests.", priority, pendingRequestResourceFutures.size()); int pendingRequestsSizeBeforeCancel = pendingRequestResourceFutures.size() + 1; final Iterator<AMRMClient.ContainerRequest> pendingContainerRequestIterator = getPendingRequestsAndCheckConsistency( priority, resource, pendingRequestsSizeBeforeCancel) .iterator(); Preconditions.checkState( pendingContainerRequestIterator.hasNext()); final AMRMClient.ContainerRequest pendingRequest = pendingContainerRequestIterator.next(); removeContainerRequest(pendingRequest); if (pendingRequestResourceFutures.isEmpty()) { requestResourceFutures.remove(taskExecutorProcessSpec); } if (getNumRequestedNotAllocatedWorkers() <= 0) { resourceManagerClient.setHeartbeatInterval( yarnHeartbeatIntervalMillis); } } else { log.error("Error completing resource request.", t); ExceptionUtils.rethrow(t); } return null; })); addContainerRequest(resource, priority); // make sure we transmit the request fast and receive fast news of granted allocations resourceManagerClient.setHeartbeatInterval(containerRequestHeartbeatIntervalMillis); requestResourceFutures .computeIfAbsent(taskExecutorProcessSpec, ignore -> new LinkedList<>()) .add(requestResourceFuture); log.info( "Requesting new TaskExecutor container with resource {}, priority {}.", taskExecutorProcessSpec, priority); } return requestResourceFuture; }
@Test void testCancelRequestedResource() throws Exception { new Context() { { addContainerRequestFutures.add(new CompletableFuture<>()); testingYarnAMRMClientAsyncBuilder.setAddContainerRequestConsumer( (ignored1, ignored2) -> addContainerRequestFutures .get( addContainerRequestFuturesNumCompleted .getAndIncrement()) .complete(null)); runTest( () -> { runInMainThread( () -> { CompletableFuture<YarnWorkerNode> requestFuture = getDriver() .requestResource( testingTaskExecutorProcessSpec); requestFuture.cancel(true); }); verifyFutureCompleted(addContainerRequestFutures.get(0)); verifyFutureCompleted(removeContainerRequestFuture); assertThat(startContainerAsyncFuture.isDone()).isFalse(); }); } }; }
@ScalarOperator(INDETERMINATE) @SqlType(StandardTypes.BOOLEAN) public static boolean indeterminate(@SqlType(StandardTypes.INTEGER) long value, @IsNull boolean isNull) { return isNull; }
@Test public void testIndeterminate() { assertOperator(INDETERMINATE, "cast(null as integer)", BOOLEAN, true); assertOperator(INDETERMINATE, "12", BOOLEAN, false); assertOperator(INDETERMINATE, "0", BOOLEAN, false); assertOperator(INDETERMINATE, "-23", BOOLEAN, false); assertOperator(INDETERMINATE, "cast(1.4 as integer)", BOOLEAN, false); }
public void clearData() { dataStore.clear(); }
@Test void testClearData() { try { var dataMap = new HashMap<Integer, Data>(); dataMap.put(1, data); var field = Shard.class.getDeclaredField("dataStore"); field.setAccessible(true); field.set(shard, dataMap); shard.clearData(); dataMap = (HashMap<Integer, Data>) field.get(shard); assertEquals(0, dataMap.size()); } catch (NoSuchFieldException | IllegalAccessException e) { fail("Fail to modify field access."); } }
public static long validateExpiration(String claimName, Long claimValue) throws ValidateException { if (claimValue == null) throw new ValidateException(String.format("%s value must be non-null", claimName)); if (claimValue < 0) throw new ValidateException(String.format("%s value must be non-negative; value given was \"%s\"", claimName, claimValue)); return claimValue; }
@Test public void testValidateExpirationDisallowsNull() { assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateExpiration("exp", null)); }
public void addInitializationBlock(InitializationBlock initializationBlock) { initializationBlocks.add(initializationBlock); }
@Test void testAddInitializationBlock() { InnerClass clazz = new InnerClass("com.foo.UserClass"); assertEquals(0, clazz.getInitializationBlocks().size()); clazz.addInitializationBlock(new InitializationBlock(false)); assertEquals(1, clazz.getInitializationBlocks().size()); clazz.addInitializationBlock(new InitializationBlock(true)); assertEquals(2, clazz.getInitializationBlocks().size()); }
static String resolveCluster(AwsConfig awsConfig, AwsMetadataApi metadataApi, Environment environment) { if (!isNullOrEmptyAfterTrim(awsConfig.getCluster())) { return awsConfig.getCluster(); } if (environment.isRunningOnEcs()) { String cluster = metadataApi.clusterEcs(); LOGGER.info("No ECS cluster defined, using current cluster: " + cluster); return cluster; } throw new InvalidConfigurationException("You must define 'cluster' property if not running inside ECS cluster"); }
@Test public void resolveClusterAwsConfig() { // given String cluster = "service-name"; AwsConfig config = AwsConfig.builder().setCluster(cluster).build(); // when String result = AwsClientConfigurator.resolveCluster(config, null, null); // then assertEquals(cluster, result); }
@KeyboardExtension.KeyboardExtensionType public int getExtensionType() { return mExtensionType; }
@Test public void testGetCurrentKeyboardExtensionBottomDefault() throws Exception { KeyboardExtension extension = AnyApplication.getBottomRowFactory(getApplicationContext()).getEnabledAddOn(); Assert.assertNotNull(extension); Assert.assertEquals("09f8f280-dee2-11e0-9572-0800200c9a66", extension.getId()); Assert.assertEquals(KeyboardExtension.TYPE_BOTTOM, extension.getExtensionType()); Assert.assertEquals(R.xml.ext_kbd_bottom_row_regular_with_voice, extension.getKeyboardResId()); }
public void resetPositionsIfNeeded() { Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp(); if (offsetResetTimestamps.isEmpty()) return; resetPositionsAsync(offsetResetTimestamps); }
@Test public void testUpdateFetchPositionResetToLatestOffset() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); client.updateMetadata(initialUpdateResponse); client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); offsetFetcher.resetPositionsIfNeeded(); consumerClient.pollNoWakeup(); assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertTrue(subscriptions.isFetchable(tp0)); assertEquals(5, subscriptions.position(tp0).offset); }
public static void checkNullOrNotEmpty(@Nullable String value, String propertyName) { if (value == null) { // pass return; } Preconditions.checkArgument( !value.trim().isEmpty(), "Property '" + propertyName + "' cannot be an empty string"); }
@Test public void testCheckNullOrNotEmpty_valuePass() { Validator.checkNullOrNotEmpty("value", "test"); }
public void reset() { lastSampleTimeMillis = 0; }
@Test public void testReset() throws Exception { sampler.lastSampleTimeMillis = 100L; sampler.reset(); assertThat(sampler.lastSampleTimeMillis, equalTo(0L)); }
public static int size() { InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.getIfSet(); if (threadLocalMap == null) { return 0; } else { return threadLocalMap.size(); } }
@Test void testSize() { final InternalThreadLocal<Integer> internalThreadLocal = new InternalThreadLocal<Integer>(); internalThreadLocal.set(1); Assertions.assertEquals(1, InternalThreadLocal.size(), "size method is wrong!"); final InternalThreadLocal<String> internalThreadLocalString = new InternalThreadLocal<String>(); internalThreadLocalString.set("value"); Assertions.assertEquals(2, InternalThreadLocal.size(), "size method is wrong!"); InternalThreadLocal.removeAll(); }
public static DateTimeFormatter getLongMillsFormatter() { return LONG_MILLS; }
@Test void assertGetLongMillsFormatter() { assertThat(DateTimeFormatterFactory.getLongMillsFormatter().parse("1970-01-01 00:00:00.000").toString(), is("{},ISO resolved to 1970-01-01T00:00")); }
@Override String getProperty(String key) { String checkedKey = checkPropertyName(key); if (checkedKey == null) { final String upperCaseKey = key.toUpperCase(); if (!upperCaseKey.equals(key)) { checkedKey = checkPropertyName(upperCaseKey); } } if (checkedKey == null) { return null; } return env.get(checkedKey); }
@Test void testGetEnvForUpperCaseKeyWithDot() { assertEquals("value4", systemEnvPropertySource.getProperty("TEST.CASE.4")); }
public abstract boolean parse(String value);
@Test public void testBoundCheck() { // tinyint TinyIntParser tinyIntParser = new TinyIntParser(); // 1 normal String tinyint = "100"; Assert.assertTrue(tinyIntParser.parse(tinyint)); // 2 upper String tinyintUpper = "128"; Assert.assertFalse(tinyIntParser.parse(tinyintUpper)); // 3 lower String tinyintLower = "-129"; Assert.assertFalse(tinyIntParser.parse(tinyintLower)); // smallint SmallIntParser smallIntParser = new SmallIntParser(); // 1 normal String smallint = "100"; Assert.assertTrue(smallIntParser.parse(smallint)); // 2 upper String smallintUpper = "32768"; Assert.assertFalse(smallIntParser.parse(smallintUpper)); // 3 lower String smallintLower = "-32769"; Assert.assertFalse(smallIntParser.parse(smallintLower)); // int IntParser intParser = new IntParser(); // 1 normal String intValue = "100"; Assert.assertTrue(intParser.parse(intValue)); // 2 upper String intUpper = "2147483648"; Assert.assertFalse(intParser.parse(intUpper)); // 3 lower String intLower = "-2147483649"; Assert.assertFalse(intParser.parse(intLower)); // bigint BigIntParser bigIntParser = new BigIntParser(); // 1 normal String bigint = "100"; Assert.assertTrue(bigIntParser.parse(bigint)); // 2 upper String bigintUpper = "9223372036854775808"; Assert.assertFalse(bigIntParser.parse(bigintUpper)); // 3 lower String bigintLower = "-9223372036854775809"; Assert.assertFalse(bigIntParser.parse(bigintLower)); // largeint LargeIntParser largeIntParser = new LargeIntParser(); // 1 normal String largeint = "100"; Assert.assertTrue(largeIntParser.parse(largeint)); // 2 upper String largeintUpper = "170141183460469231731687303715884105728"; Assert.assertFalse(largeIntParser.parse(largeintUpper)); // 3 lower String largeintLower = "-170141183460469231731687303715884105729"; Assert.assertFalse(largeIntParser.parse(largeintLower)); // float FloatParser floatParser = new FloatParser(); // normal String floatValue = "1.1"; Assert.assertTrue(floatParser.parse(floatValue)); // inf String inf = "Infinity"; Assert.assertFalse(floatParser.parse(inf)); // nan String nan = "NaN"; // failed Assert.assertFalse(floatParser.parse(nan)); // double DoubleParser doubleParser = new DoubleParser(); // normal Assert.assertTrue(doubleParser.parse(floatValue)); // inf Assert.assertFalse(doubleParser.parse(inf)); // nan Assert.assertFalse(doubleParser.parse(nan)); // decimal EtlJobConfig.EtlColumn etlColumn = new EtlJobConfig.EtlColumn(); etlColumn.precision = 5; etlColumn.scale = 3; DecimalParser decimalParser = new DecimalParser(etlColumn); // normal String decimalValue = "10.333"; Assert.assertTrue(decimalParser.parse(decimalValue)); // overflow String decimalOverflow = "1000.3333333333"; Assert.assertFalse(decimalParser.parse(decimalOverflow)); // string EtlJobConfig.EtlColumn stringColumn = new EtlJobConfig.EtlColumn(); stringColumn.stringLength = 3; StringParser stringParser = new StringParser(stringColumn); // normal String stringnormal = "a"; Assert.assertTrue(stringParser.parse(stringnormal)); // overflow String stringoverflow = "中文"; Assert.assertFalse(stringParser.parse(stringoverflow)); }
@Override protected void decode(final ChannelHandlerContext ctx, final ByteBuf in, final List<Object> out) { MySQLPacketPayload payload = new MySQLPacketPayload(in, ctx.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get()); decodeCommandPacket(payload, out); }
@Test void assertDecodeErrPacket() { MySQLCommandPacketDecoder commandPacketDecoder = new MySQLCommandPacketDecoder(); List<Object> actual = new LinkedList<>(); commandPacketDecoder.decode(channelHandlerContext, mockErrPacket(), actual); assertPacketByType(actual, MySQLErrPacket.class); }
public synchronized void setDoNotLoadBalance(final Callback<None> callback, boolean doNotLoadBalance) { _server.addUriSpecificProperty(_cluster, "setDoNotLoadBalance", _uri, _partitionDataMap, PropertyKeys.DO_NOT_LOAD_BALANCE, doNotLoadBalance, getOperationCallback(callback, "setDoNotLoadBalance")); _log.info("setDoNotLoadBalance called for uri = {}.", _uri); }
@Test public void testSetDoNotLoadBalance() { _announcer.setDoNotLoadBalance(_callback, true); verify(_server).addUriSpecificProperty(any(), any(), any(), any(), eq(PropertyKeys.DO_NOT_LOAD_BALANCE), eq(true), any()); _announcer.setDoNotLoadBalance(_callback, false); verify(_server).addUriSpecificProperty(any(), any(), any(), any(), eq(PropertyKeys.DO_NOT_LOAD_BALANCE), eq(false), any()); }
@Override public void configureAuthDataStatefulSet(V1StatefulSet statefulSet, Optional<FunctionAuthData> functionAuthData) { V1PodSpec podSpec = statefulSet.getSpec().getTemplate().getSpec(); // configure pod mount secret with auth token if (StringUtil.isNotBlank(brokerTrustCertsSecretName)) { podSpec.addVolumesItem(createTrustCertVolume()); } podSpec.addVolumesItem(createServiceAccountVolume()); podSpec.getContainers().forEach(this::addVolumeMountsToContainer); }
@Test public void testConfigureAuthDataStatefulSet() { HashMap<String, Object> config = new HashMap<>(); config.put("brokerClientTrustCertsSecretName", "my-secret"); config.put("serviceAccountTokenExpirationSeconds", "600"); config.put("serviceAccountTokenAudience", "my-audience"); KubernetesServiceAccountTokenAuthProvider provider = new KubernetesServiceAccountTokenAuthProvider(); provider.initialize(null, null, (fd) -> "default", config); // Create a stateful set with a container V1StatefulSet statefulSet = new V1StatefulSet(); statefulSet.setSpec( new V1StatefulSetSpec().template( new V1PodTemplateSpec().spec( new V1PodSpec().containers( Collections.singletonList(new V1Container()))))); provider.configureAuthDataStatefulSet(statefulSet, Optional.empty()); List<V1Volume> volumes = statefulSet.getSpec().getTemplate().getSpec().getVolumes(); Assert.assertEquals(volumes.size(), 2); Assert.assertEquals(volumes.get(0).getName(), "ca-cert"); Assert.assertEquals(volumes.get(0).getSecret().getSecretName(), "my-secret"); Assert.assertEquals(volumes.get(0).getSecret().getItems().size(), 1); Assert.assertEquals(volumes.get(0).getSecret().getItems().get(0).getKey(), "ca.crt"); Assert.assertEquals(volumes.get(0).getSecret().getItems().get(0).getPath(), "ca.crt"); Assert.assertEquals(volumes.get(1).getName(), "service-account-token"); Assert.assertEquals(volumes.get(1).getProjected().getSources().size(), 1); V1ServiceAccountTokenProjection tokenProjection = volumes.get(1).getProjected().getSources().get(0).getServiceAccountToken(); Assert.assertEquals(tokenProjection.getExpirationSeconds(), 600); Assert.assertEquals(tokenProjection.getAudience(), "my-audience"); Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().size(), 1); Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts().size(), 2); Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts().get(0).getName(), "service-account-token"); Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts().get(0).getMountPath(), "/etc/auth"); Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts().get(1).getName(), "ca-cert"); Assert.assertEquals(statefulSet.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts().get(1).getMountPath(), "/etc/auth"); }
@Override public void updateInstancePort(InstancePort instancePort) { checkNotNull(instancePort, ERR_NULL_INSTANCE_PORT); checkArgument(!Strings.isNullOrEmpty(instancePort.portId()), ERR_NULL_INSTANCE_PORT_ID); // in case OpenStack removes the port prior to OVS, we will not update // the instance port as it does not exist in the store if (instancePortStore.instancePort(instancePort.portId()) == null) { log.warn("Unable to update instance port {}, as it does not exist", instancePort.portId()); return; } instancePortStore.updateInstancePort(instancePort); log.info(String.format(MSG_INSTANCE_PORT, instancePort.portId(), MSG_UPDATED)); }
@Test public void testUpdateUnregisteredInstancePort() { target.updateInstancePort(instancePort1); }
public static <K> KTableHolder<K> build( final KTableHolder<K> table, final TableFilter<K> step, final RuntimeBuildContext buildContext) { return build(table, step, buildContext, SqlPredicate::new); }
@Test public void shouldReturnCorrectSchema() { // When: final KTableHolder<Struct> result = step.build(planBuilder, planInfo); // Then: assertThat(result.getSchema(), is(schema)); }
public static void initWebRootContext(NacosClientProperties properties) { final String webContext = properties.getProperty(PropertyKeyConst.CONTEXT_PATH); TemplateUtils.stringNotEmptyAndThenExecute(webContext, () -> { UtilAndComs.webContext = ContextPathUtil.normalizeContextPath(webContext); UtilAndComs.nacosUrlBase = UtilAndComs.webContext + "/v1/ns"; UtilAndComs.nacosUrlInstance = UtilAndComs.nacosUrlBase + "/instance"; }); }
@Test void testInitWebRootContext() { String ctx = "/aaa"; final NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive(); properties.setProperty(PropertyKeyConst.CONTEXT_PATH, ctx); InitUtils.initWebRootContext(properties); assertEquals(ctx, UtilAndComs.webContext); assertEquals(ctx + "/v1/ns", UtilAndComs.nacosUrlBase); assertEquals(ctx + "/v1/ns/instance", UtilAndComs.nacosUrlInstance); }
@Override public TimelineEntity getEntity(String entityId, String entityType, EnumSet<Field> fieldsToRetrieve) throws IOException { LOG.debug("getEntity type={} id={}", entityType, entityId); List<EntityCacheItem> relatedCacheItems = new ArrayList<>(); List<TimelineStore> stores = getTimelineStoresForRead(entityId, entityType, relatedCacheItems); for (TimelineStore store : stores) { LOG.debug("Try timeline store {}:{} for the request", store.getName(), store.toString()); TimelineEntity e = store.getEntity(entityId, entityType, fieldsToRetrieve); if (e != null) { return e; } } LOG.debug("getEntity: Found nothing"); return null; }
@Test void testGetEntityPluginRead() throws Exception { EntityGroupFSTimelineStore store = null; ApplicationId appId = ApplicationId.fromString("application_1501509265053_0001"); String user = UserGroupInformation.getCurrentUser().getShortUserName(); Path userBase = new Path(testActiveDirPath, user); Path userAppRoot = new Path(userBase, appId.toString()); Path attemotDirPath = new Path(userAppRoot, getAttemptDirName(appId)); try { store = createAndStartTimelineStore(AppState.ACTIVE); String logFileName = EntityGroupFSTimelineStore.ENTITY_LOG_PREFIX + EntityGroupPlugInForTest.getStandardTimelineGroupId(appId); createTestFiles(appId, attemotDirPath, logFileName); TimelineEntity entity = store.getEntity(entityNew.getEntityId(), entityNew.getEntityType(), EnumSet.allOf(Field.class)); assertNotNull(entity); assertEquals(entityNew.getEntityId(), entity.getEntityId()); assertEquals(entityNew.getEntityType(), entity.getEntityType()); } finally { if (store != null) { store.stop(); } fs.delete(userBase, true); } }
@Override public V put(@Nullable final K key, final V value) { if (key == null) { if (nullEntry == null) { _size += 1; nullEntry = new Entry<>(null, value); return null; } return nullEntry.setValue(value); } final Entry<K, V>[] table = this.table; final int hash = key.hashCode(); final int index = HashUtil.indexFor(hash, table.length, mask); for (Entry<K, V> e = table[index]; e != null; e = e.hashNext) { final K entryKey; if ((entryKey = e.key) == key || entryKey.equals(key)) { return e.setValue(value); } } final Entry<K, V> e = new Entry<>(key, value); e.hashNext = table[index]; table[index] = e; _size += 1; if (_size > capacity) { rehash(HashUtil.nextCapacity(capacity)); } return null; }
@Test public void testCopyAndModify() { final HashMap<Integer, String> tested = new HashMap<>(); tested.put(7, "a"); tested.put(8, "b"); HashMap<Integer, String> copy = new HashMap<>(tested); tested.put(7, "c"); Assert.assertEquals("a", copy.get(7)); Assert.assertEquals("b", copy.get(8)); Assert.assertEquals(2, copy.size()); }
public void resetProducer() { if (processingMode != EXACTLY_ONCE_V2) { throw new IllegalStateException("Expected eos-v2 to be enabled, but the processing mode was " + processingMode); } oldProducerTotalBlockedTime += totalBlockedTime(producer); final long start = time.nanoseconds(); close(); final long closeTime = time.nanoseconds() - start; oldProducerTotalBlockedTime += closeTime; producer = clientSupplier.getProducer(eosV2ProducerConfigs); }
@Test public void shouldCloseExistingProducerOnResetProducer() { eosBetaStreamsProducer.resetProducer(); assertTrue(eosBetaMockProducer.closed()); }
static JavaType constructType(Type type) { try { return constructTypeInner(type); } catch (Exception e) { throw new InvalidDataTableTypeException(type, e); } }
@Test void object_should_equal_object() { JavaType javaType = TypeFactory.constructType(Object.class); JavaType other = TypeFactory.constructType(Object.class); assertThat(javaType, equalTo(other)); }
public static VersionMessage read(ByteBuffer payload) throws BufferUnderflowException, ProtocolException { int clientVersion = (int) ByteUtils.readUint32(payload); check(clientVersion >= ProtocolVersion.MINIMUM.intValue(), ProtocolException::new); Services localServices = Services.read(payload); Instant time = Instant.ofEpochSecond(ByteUtils.readInt64(payload)); Services receivingServices = Services.read(payload); InetAddress receivingInetAddress = PeerAddress.getByAddress(Buffers.readBytes(payload, 16)); int receivingPort = ByteUtils.readUint16BE(payload); InetSocketAddress receivingAddr = new InetSocketAddress(receivingInetAddress, receivingPort); Buffers.skipBytes(payload, NETADDR_BYTES); // addr_from // uint64 localHostNonce (random data) // We don't care about the localhost nonce. It's used to detect connecting back to yourself in cases where // there are NATs and proxies in the way. However we don't listen for inbound connections so it's // irrelevant. Buffers.skipBytes(payload, 8); // string subVer (currently "") String subVer = Buffers.readLengthPrefixedString(payload); // int bestHeight (size of known block chain). long bestHeight = ByteUtils.readUint32(payload); boolean relayTxesBeforeFilter = clientVersion >= ProtocolVersion.BLOOM_FILTER.intValue() ? payload.get() != 0 : true; return new VersionMessage(clientVersion, localServices, time, receivingServices, receivingAddr, subVer, bestHeight, relayTxesBeforeFilter); }
@Test public void decode_noRelay_bestHeight_subVer() { // Test that we can decode version messages which miss data which some old nodes may not include String hex = "7111010000000000000000003334a85500000000000000000000000000000000000000000000ffff7f000001479d000000000000000000000000000000000000ffff7f000001479d00000000000000000f2f626974636f696e6a3a302e31332f0004000000"; VersionMessage ver = VersionMessage.read(ByteBuffer.wrap(ByteUtils.parseHex(hex))); assertFalse(ver.relayTxesBeforeFilter); assertEquals(1024, ver.bestHeight); assertEquals("/bitcoinj:0.13/", ver.subVer); }
public static <T> T fillBean(T bean, ValueProvider<String> valueProvider, CopyOptions copyOptions) { if (null == valueProvider) { return bean; } return BeanCopier.create(valueProvider, bean, copyOptions).copy(); }
@Test public void fillBeanTest() { final Person person = BeanUtil.fillBean(new Person(), new ValueProvider<String>() { @Override public Object value(final String key, final Type valueType) { switch (key) { case "name": return "张三"; case "age": return 18; } return null; } @Override public boolean containsKey(final String key) { // 总是存在key return true; } }, CopyOptions.create()); assertEquals("张三", person.getName()); assertEquals(18, person.getAge()); }
@Override public void register(Class<?> cls) { classResolver.register(cls); }
@Test(dataProvider = "referenceTrackingConfig") public void registerTest(boolean referenceTracking) { Fury fury = Fury.builder() .withLanguage(Language.JAVA) .withRefTracking(referenceTracking) .requireClassRegistration(false) .build(); fury.register(BeanA.class); BeanA beanA = BeanA.createBeanA(2); assertEquals(beanA, serDe(fury, beanA)); }
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) { SourceConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Function Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getTopicName())) { mergedConfig.setTopicName(newConfig.getTopicName()); } if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) { mergedConfig.setSerdeClassName(newConfig.getSerdeClassName()); } if (!StringUtils.isEmpty(newConfig.getSchemaType())) { mergedConfig.setSchemaType(newConfig.getSchemaType()); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (isBatchSource(existingConfig) != isBatchSource(newConfig)) { throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource"); } if (newConfig.getBatchSourceConfig() != null) { validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig()); mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig()); } if (newConfig.getProducerConfig() != null) { mergedConfig.setProducerConfig(newConfig.getProducerConfig()); } return mergedConfig; }
@Test public void testMergeDifferentProducerConfig() { SourceConfig sourceConfig = createSourceConfig(); ProducerConfig producerConfig = new ProducerConfig(); producerConfig.setMaxPendingMessages(100); producerConfig.setMaxPendingMessagesAcrossPartitions(1000); producerConfig.setUseThreadLocalProducers(true); producerConfig.setBatchBuilder("DEFAULT"); producerConfig.setCompressionType(CompressionType.ZLIB); SourceConfig newSourceConfig = createUpdatedSourceConfig("producerConfig", producerConfig); SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig); assertEquals( mergedConfig.getProducerConfig(), producerConfig ); mergedConfig.setProducerConfig(sourceConfig.getProducerConfig()); assertEquals( new Gson().toJson(sourceConfig), new Gson().toJson(mergedConfig) ); }
@Override public boolean next() throws SQLException { if (orderByValuesQueue.isEmpty()) { return false; } if (isFirstNext) { isFirstNext = false; return true; } OrderByValue firstOrderByValue = orderByValuesQueue.poll(); if (firstOrderByValue.next()) { orderByValuesQueue.offer(firstOrderByValue); } if (orderByValuesQueue.isEmpty()) { return false; } setCurrentQueryResult(orderByValuesQueue.peek().getQueryResult()); return true; }
@Test void assertNextForSomeResultSetsEmpty() throws SQLException { List<QueryResult> queryResults = Arrays.asList(mock(QueryResult.class), mock(QueryResult.class), mock(QueryResult.class)); for (int i = 0; i < 3; i++) { QueryResultMetaData metaData = mock(QueryResultMetaData.class); when(queryResults.get(i).getMetaData()).thenReturn(metaData); when(metaData.getColumnName(1)).thenReturn("col1"); when(metaData.getColumnName(2)).thenReturn("col2"); } ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL")); when(queryResults.get(0).next()).thenReturn(true, false); when(queryResults.get(0).getValue(1, Object.class)).thenReturn("2"); when(queryResults.get(2).next()).thenReturn(true, true, false); when(queryResults.get(2).getValue(1, Object.class)).thenReturn("1", "1", "3", "3"); MergedResult actual = resultMerger.merge(queryResults, selectStatementContext, createDatabase(), mock(ConnectionContext.class)); assertTrue(actual.next()); assertThat(actual.getValue(1, Object.class).toString(), is("1")); assertTrue(actual.next()); assertThat(actual.getValue(1, Object.class).toString(), is("2")); assertTrue(actual.next()); assertThat(actual.getValue(1, Object.class).toString(), is("3")); assertFalse(actual.next()); }
@Override public Space get() throws BackgroundException { final Path home = new DefaultHomeFinderService(session).find(); try { final FileStore store = Files.getFileStore(session.toPath(home)); return new Space(store.getTotalSpace() - store.getUnallocatedSpace(), store.getUnallocatedSpace()); } catch(IOException e) { throw new LocalExceptionMappingService().map("Failure to read attributes of {0}", e, home); } }
@Test public void get() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Quota.Space quota = new LocalQuotaFeature(session).get(); assertNotNull(quota.used); assertNotNull(quota.available); assertNotEquals(Quota.unknown, quota); }
@Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; SessionIdToken that = (SessionIdToken) o; return Objects.equals(sessionId, that.sessionId) && Objects.equals(host, that.host) && Objects.equals(remoteAddr, that.remoteAddr); }
@Test public void testEquals() throws Exception { EqualsVerifier.forClass(SessionIdToken.class).verify(); }
@Override public void check(final String databaseName, final MaskRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) { checkMaskAlgorithms(ruleConfig.getMaskAlgorithms()); checkTables(databaseName, ruleConfig.getTables(), ruleConfig.getMaskAlgorithms()); }
@SuppressWarnings("unchecked") @Test void assertInvalidCheck() { MaskRuleConfiguration ruleConfig = mockInvalidConfiguration(); assertThrows(UnregisteredAlgorithmException.class, () -> checker.check("test", ruleConfig, Collections.emptyMap(), Collections.emptyList())); }
public String sanitizeConnectionName( String connectionName ) { return Const.NVL( connectionName, "" ) .replaceAll( "[" + Pattern.quote( CONNECTION_NAME_INVALID_CHARACTERS ) + "]", "" ); }
@Test public void testSanitizeConnectionNamePreservesValidCharacters() { String sanitizedName = fileNameParser.sanitizeConnectionName( "connection" + CONNECTION_NAME_INVALID_CHARACTERS + ACCEPTED_CHARACTERS_FULL_SET + "name" ); for ( char c : ACCEPTED_CHARACTERS_FULL_SET.toCharArray() ) { assertTrue( sanitizedName.indexOf( c ) >= 0 ); } }
public static RuntimeException wrapIf(boolean condition, Throwable t) { if (condition) { return wrap(t); } if (t instanceof RuntimeException) { return (RuntimeException) t; } return new RuntimeException(t); }
@Test public void testWrapIfReturnsRuntimeExceptionWhenFalse() { IOException cause = new IOException(); RuntimeException wrapped = UserCodeException.wrapIf(false, cause); assertThat(wrapped, is(not(instanceOf(UserCodeException.class)))); assertEquals(cause, wrapped.getCause()); }
public boolean isAuthorizedLogUser(String user, String fileName) { Validate.isTrue(!fileName.contains(".." + FileSystems.getDefault().getSeparator())); if (StringUtils.isEmpty(user) || StringUtils.isEmpty(fileName)) { return false; } LogUserGroupWhitelist whitelist = getLogUserGroupWhitelist(fileName); List<String> logsUsers = new ArrayList<>(); logsUsers.addAll(ObjectReader.getStrings(stormConf.get(DaemonConfig.LOGS_USERS))); logsUsers.addAll(ObjectReader.getStrings(stormConf.get(Config.NIMBUS_ADMINS))); if (whitelist != null) { logsUsers.addAll(whitelist.getUserWhitelist()); } List<String> logsGroups = new ArrayList<>(); logsGroups.addAll(ObjectReader.getStrings(stormConf.get(DaemonConfig.LOGS_GROUPS))); logsGroups.addAll(ObjectReader.getStrings(stormConf.get(Config.NIMBUS_ADMINS_GROUPS))); if (whitelist != null) { logsGroups.addAll(whitelist.getGroupWhitelist()); } String userName = principalToLocal.toLocal(user); Set<String> groups = getUserGroups(userName); return logsUsers.stream().anyMatch(u -> u.equals(userName)) || Sets.intersection(groups, new HashSet<>(logsGroups)).size() > 0; }
@Test public void testFailOnUpwardPathTraversal() { Map<String, Object> stormConf = Utils.readStormConfig(); Map<String, Object> conf = new HashMap<>(stormConf); ResourceAuthorizer authorizer = new ResourceAuthorizer(conf); Assertions.assertThrows(IllegalArgumentException.class, () -> authorizer.isAuthorizedLogUser("user", Paths.get("some/../path").toString())); }
public static ByteBuf wrappedBuffer(byte[] array) { if (array.length == 0) { return EMPTY_BUFFER; } return new UnpooledHeapByteBuf(ALLOC, array, array.length); }
@Test public void wrappedReadOnlyDirectBuffer() { ByteBuffer buffer = ByteBuffer.allocateDirect(12); for (int i = 0; i < 12; i++) { buffer.put((byte) i); } buffer.flip(); ByteBuf wrapped = wrappedBuffer(buffer.asReadOnlyBuffer()); for (int i = 0; i < 12; i++) { assertEquals((byte) i, wrapped.readByte()); } wrapped.release(); }
@Override @Nullable public <T> ParamConverter<T> getConverter(Class<T> rawType, @Nullable Type genericType, Annotation[] annotations) { if (!rawType.isEnum()) { return null; } @SuppressWarnings("unchecked") final Class<Enum<?>> type = (Class<Enum<?>>) rawType; final Enum<?>[] constants = type.getEnumConstants(); final String parameterName = getParameterNameFromAnnotations(annotations).orElse("Parameter"); final Method fromStringMethod = AccessController.doPrivileged(ReflectionHelper.getFromStringStringMethodPA(rawType)); return new FuzzyEnumParamConverter<>(rawType, fromStringMethod, constants, parameterName); }
@Test void testNonEnum() { assertThat(paramConverterProvider.getConverter(Klass.class, null, new Annotation[] {})).isNull(); }
@Subscribe public synchronized void completeToKillLocalProcess(final KillLocalProcessCompletedEvent event) { ProcessOperationLockRegistry.getInstance().notify(event.getProcessId()); }
@Test void assertCompleteToKillLocalProcess() { String processId = "foo_id"; long startMillis = System.currentTimeMillis(); Executors.newFixedThreadPool(1).submit(() -> { Awaitility.await().pollDelay(50L, TimeUnit.MILLISECONDS).until(() -> true); subscriber.completeToKillLocalProcess(new KillLocalProcessCompletedEvent(processId)); }); waitUntilReleaseReady(processId); long currentMillis = System.currentTimeMillis(); assertThat(currentMillis, greaterThanOrEqualTo(startMillis + 50L)); assertThat(currentMillis, lessThanOrEqualTo(startMillis + 5000L)); }
public void createClique(int v, boolean[][] clonedAdjMatrix, Set<Integer> verticesToUpdate, boolean[] adjList) { for ( int i = 0; i < adjList.length; i++ ) { if ( !adjList[i] ) { // not connected to this vertex continue; } getRelatedVerticesToUpdate(v, clonedAdjMatrix, verticesToUpdate, i); boolean needsConnection = false; for ( int j = i+1; j < adjList.length; j++ ) { // i + 1, so it doesn't check if a node is connected with itself if ( !adjList[j] || clonedAdjMatrix[i][j] ) { // edge already exists continue; } connect(adjacencyMatrix, i, j); connect(clonedAdjMatrix, i, j ); getRelatedVerticesToUpdate(v, clonedAdjMatrix, verticesToUpdate, j); needsConnection = true; } if ( needsConnection ) { verticesToUpdate.add( i ); } } }
@Test public void testCreateClique() { Graph<BayesVariable> graph = new BayesNetwork(); GraphNode dX0 = addNode(graph); GraphNode dX1 = addNode(graph); GraphNode dX2 = addNode(graph); GraphNode dX3 = addNode(graph); GraphNode dX4 = addNode(graph); GraphNode dX5 = addNode(graph); connectParentToChildren(dX1, dX2, dX3, dX4); JunctionTreeBuilder jtBuilder = new JunctionTreeBuilder( graph ); // do not moralize, as we want to test just the clique creation through elimination of the provided vertices Set<Integer> vertices = new HashSet<Integer>(); boolean[] adjList = new boolean[] { false, false, true, true, true, false }; boolean[][] clonedAdjMatrix = JunctionTreeBuilder.cloneAdjacencyMarix(jtBuilder.getAdjacencyMatrix()); jtBuilder.createClique(dX1.getId(), clonedAdjMatrix, vertices, adjList ); assertThat(vertices.size()).isEqualTo(3); assertThat(vertices.containsAll(Arrays.asList(2, 3, 4))).isTrue(); assertLinkedNode(jtBuilder, 1, 2, 3, 4); assertLinkedNode(jtBuilder, 2, 1, 3, 4); assertLinkedNode(jtBuilder, 3, 1, 2, 4); assertLinkedNode(jtBuilder, 4, 1, 2, 3); }
public static void assertThatClassIsImmutable(Class<?> clazz) { final ImmutableClassChecker checker = new ImmutableClassChecker(); if (!checker.isImmutableClass(clazz, false)) { final Description toDescription = new StringDescription(); final Description mismatchDescription = new StringDescription(); checker.describeTo(toDescription); checker.describeMismatch(mismatchDescription); final String reason = "\n" + "Expected: is \"" + toDescription.toString() + "\"\n" + " but : was \"" + mismatchDescription.toString() + "\""; throw new AssertionError(reason); } }
@Test public void testClassWithSetter() throws Exception { boolean gotException = false; try { assertThatClassIsImmutable(ClassWithSetter.class); } catch (AssertionError assertion) { assertThat(assertion.getMessage(), containsString("a class with a setter named 'setX'")); gotException = true; } assertThat(gotException, is(true)); }
@SuppressWarnings("squid:S1181") // Yes we really do want to catch Throwable @Override public V apply(U input) { int retryAttempts = 0; while (true) { try { return baseFunction.apply(input); } catch (Throwable t) { if (!exceptionClass.isAssignableFrom(t.getClass()) || retryAttempts == maxRetries) { Throwables.throwIfUnchecked(t); throw new RetriesExceededException(t); } Tools.randomDelay(maxDelayBetweenRetries); retryAttempts++; } } }
@Test(expected = NonRetryableException.class) public void testFailureWithNonRetryableFailure() { new RetryingFunction<>(this::failCompletely, RetryableException.class, 2, 10).apply(null); }
public static Optional<Object> getFieldValue(Object target, String fieldName) { if (target == null) { return Optional.empty(); } return getFieldValueByClazz(target.getClass(), target, fieldName); }
@Test public void getClazzFieldValue() { final Optional<Object> staticField = ReflectUtils.getFieldValue(TestReflect.class.getName(), null, "staticField"); Assert.assertTrue(staticField.isPresent()); Assert.assertEquals(staticField.get(), TestReflect.staticField); final Optional<Object> fieldValue = ReflectUtils.getFieldValue("com.test", null, null); Assert.assertFalse(fieldValue.isPresent()); }
public NearCachePreloaderConfig setStoreIntervalSeconds(int storeIntervalSeconds) { this.storeIntervalSeconds = checkPositive("storeIntervalSeconds", storeIntervalSeconds); return this; }
@Test public void setStoreIntervalSeconds() { config.setStoreIntervalSeconds(1); }
@Override void toHtml() throws IOException { writeHtmlHeader(); htmlCoreReport.toHtml(); writeHtmlFooter(); }
@Test public void testToHtmlWithHsErrPid() throws IOException { final File hsErrPidFile = new File("./hs_err_pid12345.log"); try { hsErrPidFile.createNewFile(); setUp(); final HtmlReport htmlReport = new HtmlReport(collector, null, javaInformationsList, Period.TOUT, writer); htmlReport.toHtml(); assertNotEmptyAndClear(writer); } finally { hsErrPidFile.delete(); } }
public static int hash(String str) { int hash = 0; for (int i = 0; i < str.length(); i++) { hash = str.charAt(i) + ((hash << 5) - hash); } return hash; }
@Test public void testHash() { int hash = Djb2.hash("l49_52"); assertEquals(-1153204821, hash); }
@Override public void validateUserList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得岗位信息 List<AdminUserDO> users = userMapper.selectBatchIds(ids); Map<Long, AdminUserDO> userMap = CollectionUtils.convertMap(users, AdminUserDO::getId); // 校验 ids.forEach(id -> { AdminUserDO user = userMap.get(id); if (user == null) { throw exception(USER_NOT_EXISTS); } if (!CommonStatusEnum.ENABLE.getStatus().equals(user.getStatus())) { throw exception(USER_IS_DISABLE, user.getNickname()); } }); }
@Test public void testValidateUserList_notFound() { // 准备参数 List<Long> ids = singletonList(randomLongId()); // 调用, 并断言异常 assertServiceException(() -> userService.validateUserList(ids), USER_NOT_EXISTS); }
@Override public void bindTo(MeterRegistry registry) { Gauge.builder("sofa.threadpool.config.core", () -> Optional.of(serverConfig) .map(AtomicReference::get) .map(ServerConfig::getCoreThreads) .orElse(0)) .tags(common) .baseUnit(BaseUnits.THREADS) .register(registry); Gauge.builder("sofa.threadpool.config.max", () -> Optional.of(serverConfig) .map(AtomicReference::get) .map(ServerConfig::getMaxThreads) .orElse(0)) .tags(common) .baseUnit(BaseUnits.THREADS) .register(registry); Gauge.builder("sofa.threadpool.config.queue", () -> Optional.of(serverConfig) .map(AtomicReference::get) .map(ServerConfig::getQueues) .orElse(0)) .tags(common) .baseUnit(BaseUnits.TASKS) .register(registry); Gauge.builder("sofa.threadpool.active", () -> Optional.of(executor) .map(AtomicReference::get) .map(ThreadPoolExecutor::getActiveCount) .orElse(0)) .tags(common) .baseUnit(BaseUnits.THREADS) .register(registry); Gauge.builder("sofa.threadpool.idle", () -> Optional.of(executor) .map(AtomicReference::get) .map(e -> e.getPoolSize() - e.getActiveCount()) .orElse(0)) .tags(common) .baseUnit(BaseUnits.THREADS) .register(registry); Gauge.builder("sofa.threadpool.queue.size", () -> Optional.of(executor) .map(AtomicReference::get) .map(ThreadPoolExecutor::getQueue) .map(Collection::size) .orElse(0)) .tags(common) .baseUnit(BaseUnits.TASKS) .register(registry); provider = Counter.builder("sofa.provider") .tags(common) .register(registry); consumer = Counter.builder("sofa.consumer") .tags(common) .register(registry); initialed.set(registry); }
@Test public void testMicrometerMetrics() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { SimpleMeterRegistry registry = new SimpleMeterRegistry(); try (SofaRpcMetrics metrics = new SofaRpcMetrics()) { metrics.bindTo(registry); Method handleEvent = EventBus.class.getDeclaredMethod( "handleEvent", Subscriber.class, Event.class); handleEvent.setAccessible(true); SofaRequest request = buildRequest(); SofaResponse response = buildResponse(); RpcInternalContext.getContext() .setAttachment(RpcConstants.INTERNAL_KEY_CLIENT_ELAPSE, 100) .setAttachment(RpcConstants.INTERNAL_KEY_IMPL_ELAPSE, 10) .setAttachment(RpcConstants.INTERNAL_KEY_REQ_SIZE, 3) .setAttachment(RpcConstants.INTERNAL_KEY_RESP_SIZE, 4); handleEvent.invoke(EventBus.class, metrics, new ClientEndInvokeEvent(request, response, null)); handleEvent.invoke(EventBus.class, metrics, new ServerSendEvent(request, response, null)); ServerConfig serverConfig = new ServerConfig(); handleEvent.invoke(EventBus.class, metrics, new ServerStartedEvent(serverConfig, new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>()))); handleEvent.invoke(EventBus.class, metrics, new ServerStoppedEvent(serverConfig)); handleEvent.invoke(EventBus.class, metrics, new ProviderPubEvent(new ProviderConfig<>())); handleEvent.invoke(EventBus.class, metrics, new ConsumerSubEvent(new ConsumerConfig<>())); Assert.assertEquals(12, registry.getMeters().size()); } }
@Override protected ExecuteContext doBefore(ExecuteContext context) { String database = getDataBaseInfo(context).getDatabaseName(); if (!DatabaseWriteProhibitionManager.getMySqlProhibitionDatabases().contains(database)) { return context; } List<String> sqlList = (List) context.getArguments()[PARAM_INDEX]; for (String sql : sqlList) { if (SqlParserUtils.isWriteOperation(sql)) { DatabaseController.disableDatabaseWriteOperation(database, context); return context; } } return context; }
@Test public void testDoBefore() throws Exception { // the database write prohibition switch is disabled globalConfig.setEnableMySqlWriteProhibition(false); context = ExecuteContext.forMemberMethod(protocolMock, methodMock, argument, null, null); interceptor.before(context); Assert.assertNull(context.getThrowableOut()); // The database write prohibition function is disabled. // The write prohibition database set contains the database that is blocked Set<String> databases = new HashSet<>(); databases.add("database-test"); globalConfig.setMySqlDatabases(databases); interceptor.before(context); Assert.assertNull(context.getThrowableOut()); // The database write prohibition switch is enabled, and the database set contains the database that is blocked globalConfig.setEnableMySqlWriteProhibition(true); context = ExecuteContext.forMemberMethod(protocolMock, methodMock, argument, null, null); interceptor.before(context); Assert.assertEquals("Database prohibit to write, database: database-test", context.getThrowableOut().getMessage()); //The database write prohibition switch is turned on, the sql does not write, // and the database set contains the blocked database sqlList = new ArrayList<>(); argument = new Object[]{null, null, sqlList}; context = ExecuteContext.forMemberMethod(protocolMock, methodMock, argument, null, null); interceptor.before(context); Assert.assertNull(context.getThrowableOut()); //The database write prohibition switch is enabled. The database set does not contain the database that is blocked sqlList.add("INSERT INTO table (name) VALUES ('test')"); globalConfig.setMySqlDatabases(new HashSet<>()); context = ExecuteContext.forMemberMethod(protocolMock, methodMock, argument, null, null); interceptor.before(context); Assert.assertNull(context.getThrowableOut()); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatFunctionCallWithCount() { final FunctionCall functionCall = new FunctionCall(FunctionName.of("COUNT"), Collections.singletonList(new StringLiteral("name"))); assertThat(ExpressionFormatter.formatExpression(functionCall), equalTo("COUNT('name')")); }
@Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void invokeAny() throws Exception { newManagedExecutorService().invokeAny(Collections.singleton(() -> null)); }
@PostMapping @Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "namespaces", action = ActionTypes.WRITE) public Boolean createNamespace(@RequestParam("customNamespaceId") String namespaceId, @RequestParam("namespaceName") String namespaceName, @RequestParam(value = "namespaceDesc", required = false) String namespaceDesc) { if (StringUtils.isBlank(namespaceId)) { namespaceId = UUID.randomUUID().toString(); } else { namespaceId = namespaceId.trim(); if (!namespaceIdCheckPattern.matcher(namespaceId).matches()) { return false; } if (namespaceId.length() > NAMESPACE_ID_MAX_LENGTH) { return false; } // check unique if (namespacePersistService.tenantInfoCountByTenantId(namespaceId) > 0) { return false; } } // contains illegal chars if (!namespaceNameCheckPattern.matcher(namespaceName).matches()) { return false; } try { return namespaceOperationService.createNamespace(namespaceId, namespaceName, namespaceDesc); } catch (NacosException e) { return false; } }
@Test void testEditNamespaceWithIllegalName() { assertFalse(namespaceController.createNamespace(null, "test@Name", "testDesc")); assertFalse(namespaceController.createNamespace(null, "test#Name", "testDesc")); assertFalse(namespaceController.createNamespace(null, "test$Name", "testDesc")); assertFalse(namespaceController.createNamespace(null, "test%Name", "testDesc")); assertFalse(namespaceController.createNamespace(null, "test^Name", "testDesc")); assertFalse(namespaceController.createNamespace(null, "test&Name", "testDesc")); assertFalse(namespaceController.createNamespace(null, "test*Name", "testDesc")); }
@Override public void run(DiagnosticsLogWriter writer) { List<SlowOperationDTO> slowOperations = operationService.getSlowOperationDTOs(); writer.startSection("SlowOperations"); if (!slowOperations.isEmpty()) { for (SlowOperationDTO slowOperation : slowOperations) { render(writer, slowOperation); } } writer.endSection(); }
@Test public void testRun() { spawn(() -> { hz.getMap("foo").executeOnKey("bar", new SlowEntryProcessor()); }); assertTrueEventually(() -> { plugin.run(logWriter); // TODO: can also include com.hazelcast.map.impl.operation.steps //assertContains(EntryOperation.class.getName()); assertContains("stackTrace"); assertContains("invocations=1"); assertContains("startedAt="); assertContains("duration(ms)="); assertContains("operationDetails="); }); }
public Exception getException() { if (exception != null) return exception; try { final Class<? extends Exception> exceptionClass = ReflectionUtils.toClass(getExceptionType()); if (getExceptionCauseType() != null) { final Class<? extends Exception> exceptionCauseClass = ReflectionUtils.toClass(getExceptionCauseType()); final Exception exceptionCause = getExceptionCauseMessage() != null ? ReflectionUtils.newInstanceCE(exceptionCauseClass, getExceptionCauseMessage()) : ReflectionUtils.newInstanceCE(exceptionCauseClass); exceptionCause.setStackTrace(new StackTraceElement[]{}); return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage(), exceptionCause) : ReflectionUtils.newInstanceCE(exceptionClass, exceptionCause); } else { return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage()) : ReflectionUtils.newInstanceCE(exceptionClass); } } catch (ReflectiveOperationException e) { throw new IllegalStateException("Could not reconstruct exception for class " + getExceptionType() + " and message " + getExceptionMessage(), e); } }
@Test void getExceptionForJobNotFoundException() { final FailedState failedState = new FailedState("JobRunr message", new JobNotFoundException("some message")); setInternalState(failedState, "exception", null); assertThat(failedState.getException()) .isInstanceOf(JobNotFoundException.class); }
public void setDelayTime(long delayTime) { this.delayTime = delayTime; }
@Test public void setDelayTime() { SAExposureConfig saExposureConfig = new SAExposureConfig(1,1,true); saExposureConfig.setDelayTime(2); assertEquals(2, saExposureConfig.getDelayTime(), 0.2); }
@Override public String toString() { if(useJdkToStringStyle){ return super.toString(); } return toString(this.timeZone); }
@Test public void toStringTest() { DateTime dateTime = new DateTime("2017-01-05 12:34:23", DatePattern.NORM_DATETIME_FORMAT); assertEquals("2017-01-05 12:34:23", dateTime.toString()); String dateStr = dateTime.toString("yyyy/MM/dd"); assertEquals("2017/01/05", dateStr); }
public ExecutionResult execute(String pluginId, ActionWithReturn<Task, ExecutionResult> actionWithReturn) { JsonBasedPluggableTask task = new JsonBasedPluggableTask(pluginId, pluginRequestHelper, messageHandlerMap); return actionWithReturn.execute(task, pluginManager.getPluginDescriptorFor(pluginId)); }
@Test public void shouldExecuteTheTask() { ActionWithReturn actionWithReturn = mock(ActionWithReturn.class); when(actionWithReturn.execute(any(JsonBasedPluggableTask.class), nullable(GoPluginDescriptor.class))).thenReturn(ExecutionResult.success("yay")); ExecutionResult executionResult = extension.execute(pluginId, actionWithReturn); verify(actionWithReturn).execute(any(JsonBasedPluggableTask.class), nullable(GoPluginDescriptor.class)); assertThat(executionResult.getMessagesForDisplay(), is("yay")); assertTrue(executionResult.isSuccessful()); }
@Override public MergeAppend appendFile(DataFile file) { add(file); return this; }
@TestTemplate public void testMinMergeCount() { // only merge when there are at least 4 manifests table.updateProperties().set("commit.manifest.min-count-to-merge", "4").commit(); assertThat(listManifestFiles()).isEmpty(); assertThat(readMetadata().lastSequenceNumber()).isEqualTo(0); Snapshot snap1 = commit(table, table.newFastAppend().appendFile(FILE_A), branch); long idFileA = snap1.snapshotId(); validateSnapshot(null, snap1, 1, FILE_A); Snapshot snap2 = commit(table, table.newFastAppend().appendFile(FILE_B), branch); long idFileB = snap2.snapshotId(); validateSnapshot(snap1, snap2, 2, FILE_B); assertThat(snap2.allManifests(table.io())).hasSize(2); Snapshot snap3 = commit(table, table.newAppend().appendFile(FILE_C), branch); long idFileC = snap3.snapshotId(); validateSnapshot(snap2, snap3, 3, FILE_C); TableMetadata base = readMetadata(); assertThat(latestSnapshot(table, branch).allManifests(table.io())).hasSize(3); Set<ManifestFile> unmerged = Sets.newHashSet(latestSnapshot(table, branch).allManifests(table.io())); Snapshot committed = commit(table, table.newAppend().appendFile(FILE_D), branch); V2Assert.assertEquals("Snapshot sequence number should be 4", 4, committed.sequenceNumber()); V2Assert.assertEquals( "Last sequence number should be 4", 4, readMetadata().lastSequenceNumber()); V1Assert.assertEquals( "Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber()); assertThat(committed.allManifests(table.io())).hasSize(1); ManifestFile newManifest = committed.allManifests(table.io()).get(0); assertThat(unmerged).doesNotContain(newManifest); long lastSnapshotId = committed.snapshotId(); validateManifest( newManifest, dataSeqs(4L, 3L, 2L, 1L), fileSeqs(4L, 3L, 2L, 1L), ids(lastSnapshotId, idFileC, idFileB, idFileA), files(FILE_D, FILE_C, FILE_B, FILE_A), statuses(Status.ADDED, Status.EXISTING, Status.EXISTING, Status.EXISTING)); }
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testJsRead3() { run("jsread/js-read-3.feature"); }
private TableSchema( List<TableColumn> columns, List<WatermarkSpec> watermarkSpecs, @Nullable UniqueConstraint primaryKey) { this.columns = Preconditions.checkNotNull(columns); this.watermarkSpecs = Preconditions.checkNotNull(watermarkSpecs); this.primaryKey = primaryKey; }
@Test void testTableSchema() { TableSchema schema = TableSchema.builder() .add(TableColumn.physical("f0", DataTypes.BIGINT())) .add( TableColumn.physical( "f1", DataTypes.ROW( DataTypes.FIELD("q1", DataTypes.STRING()), DataTypes.FIELD("q2", DataTypes.TIMESTAMP(3))))) .add(TableColumn.physical("f2", DataTypes.STRING())) .add(TableColumn.computed("f3", DataTypes.BIGINT(), "f0 + 1")) .add(TableColumn.metadata("f4", DataTypes.BIGINT(), "other.key", true)) .watermark("f1.q2", WATERMARK_EXPRESSION, WATERMARK_DATATYPE) .build(); // test toString() String expected = "root\n" + " |-- f0: BIGINT\n" + " |-- f1: ROW<`q1` STRING, `q2` TIMESTAMP(3)>\n" + " |-- f2: STRING\n" + " |-- f3: BIGINT AS f0 + 1\n" + " |-- f4: BIGINT METADATA FROM 'other.key' VIRTUAL\n" + " |-- WATERMARK FOR f1.q2: TIMESTAMP(3) AS localtimestamp\n"; assertThat(schema.toString()).isEqualTo(expected); // test getFieldNames and getFieldDataType assertThat(schema.getFieldName(2)).isEqualTo(Optional.of("f2")); assertThat(schema.getFieldDataType(3)).isEqualTo(Optional.of(DataTypes.BIGINT())); assertThat(schema.getTableColumn(3)) .isEqualTo(Optional.of(TableColumn.computed("f3", DataTypes.BIGINT(), "f0 + 1"))); assertThat(schema.getFieldDataType("f2")).isEqualTo(Optional.of(DataTypes.STRING())); assertThat(schema.getFieldDataType("f1").map(r -> r.getChildren().get(0))) .isEqualTo(Optional.of(DataTypes.STRING())); assertThat(schema.getFieldName(5)).isNotPresent(); assertThat(schema.getFieldType(-1)).isNotPresent(); assertThat(schema.getFieldType("c")).isNotPresent(); assertThat(schema.getFieldDataType("f1.q1")).isNotPresent(); assertThat(schema.getFieldDataType("f1.q3")).isNotPresent(); // test copy() and equals() assertThat(schema.copy()).isEqualTo(schema); assertThat(schema.copy().hashCode()).isEqualTo(schema.hashCode()); }
public void bootstrap(String device, int rootBandwidthMbit, int yarnBandwidthMbit) throws ResourceHandlerException { if (device == null) { throw new ResourceHandlerException("device cannot be null!"); } String tmpDirBase = conf.get("hadoop.tmp.dir"); if (tmpDirBase == null) { throw new ResourceHandlerException("hadoop.tmp.dir not set!"); } tmpDirPath = tmpDirBase + "/nm-tc-rules"; File tmpDir = new File(tmpDirPath); if (!(tmpDir.exists() || tmpDir.mkdirs())) { LOG.warn("Unable to create directory: " + tmpDirPath); throw new ResourceHandlerException("Unable to create directory: " + tmpDirPath); } this.device = device; this.rootBandwidthMbit = rootBandwidthMbit; this.yarnBandwidthMbit = yarnBandwidthMbit; defaultClassBandwidthMbit = (rootBandwidthMbit - yarnBandwidthMbit) <= 0 ? rootBandwidthMbit : (rootBandwidthMbit - yarnBandwidthMbit); boolean recoveryEnabled = conf.getBoolean(YarnConfiguration .NM_RECOVERY_ENABLED, YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED); String state = null; if (!recoveryEnabled) { LOG.info("NM recovery is not enabled. We'll wipe tc state before proceeding."); } else { //NM recovery enabled - run a state check state = readState(); if (checkIfAlreadyBootstrapped(state)) { LOG.info("TC configuration is already in place. Not wiping state."); //We already have the list of existing container classes, if any //that were created after bootstrapping reacquireContainerClasses(state); return; } else { LOG.info("TC configuration is incomplete. Wiping tc state before proceeding"); } } wipeState(); //start over in case preview bootstrap was incomplete initializeState(); }
@Test public void testInvalidBuilder() { conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false); TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock); try { trafficController .bootstrap(DEVICE, ROOT_BANDWIDTH_MBIT, YARN_BANDWIDTH_MBIT); try { //Invalid op type for TC batch builder TrafficController.BatchBuilder invalidBuilder = trafficController. new BatchBuilder( PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP); Assert.fail("Invalid builder check failed!"); } catch (ResourceHandlerException e) { //expected } } catch (ResourceHandlerException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected exception: " + e.getClass().getSimpleName()); } }
@Override public Optional<AuthenticatedDevice> authenticate(BasicCredentials basicCredentials) { boolean succeeded = false; String failureReason = null; try { final UUID accountUuid; final byte deviceId; { final Pair<String, Byte> identifierAndDeviceId = getIdentifierAndDeviceId(basicCredentials.getUsername()); accountUuid = UUID.fromString(identifierAndDeviceId.first()); deviceId = identifierAndDeviceId.second(); } Optional<Account> account = accountsManager.getByAccountIdentifier(accountUuid); if (account.isEmpty()) { failureReason = "noSuchAccount"; return Optional.empty(); } Optional<Device> device = account.get().getDevice(deviceId); if (device.isEmpty()) { failureReason = "noSuchDevice"; return Optional.empty(); } SaltedTokenHash deviceSaltedTokenHash = device.get().getAuthTokenHash(); if (deviceSaltedTokenHash.verify(basicCredentials.getPassword())) { succeeded = true; Account authenticatedAccount = updateLastSeen(account.get(), device.get()); if (deviceSaltedTokenHash.getVersion() != SaltedTokenHash.CURRENT_VERSION) { OLD_TOKEN_VERSION_COUNTER.increment(); authenticatedAccount = accountsManager.updateDeviceAuthentication( authenticatedAccount, device.get(), SaltedTokenHash.generateFor(basicCredentials.getPassword())); // new credentials have current version } return Optional.of(new AuthenticatedDevice(authenticatedAccount, device.get())); } else { failureReason = "incorrectPassword"; return Optional.empty(); } } catch (IllegalArgumentException | InvalidAuthorizationHeaderException iae) { failureReason = "invalidHeader"; return Optional.empty(); } finally { Tags tags = Tags.of( AUTHENTICATION_SUCCEEDED_TAG_NAME, String.valueOf(succeeded)); if (StringUtils.isNotBlank(failureReason)) { tags = tags.and(AUTHENTICATION_FAILURE_REASON_TAG_NAME, failureReason); } Metrics.counter(AUTHENTICATION_COUNTER_NAME, tags).increment(); } }
@Test void testAuthenticateDeviceNotFound() { final UUID uuid = UUID.randomUUID(); final byte deviceId = 1; final String password = "12345"; final Account account = mock(Account.class); final Device device = mock(Device.class); final SaltedTokenHash credentials = mock(SaltedTokenHash.class); clock.unpin(); when(accountsManager.getByAccountIdentifier(uuid)).thenReturn(Optional.of(account)); when(account.getUuid()).thenReturn(uuid); when(account.getDevice(deviceId)).thenReturn(Optional.of(device)); when(device.getId()).thenReturn(deviceId); when(device.getAuthTokenHash()).thenReturn(credentials); when(credentials.verify(password)).thenReturn(true); when(credentials.getVersion()).thenReturn(SaltedTokenHash.CURRENT_VERSION); final Optional<AuthenticatedDevice> maybeAuthenticatedAccount = accountAuthenticator.authenticate(new BasicCredentials(uuid + "." + (deviceId + 1), password)); assertThat(maybeAuthenticatedAccount).isEmpty(); verify(account).getDevice((byte) (deviceId + 1)); }
public static ImmutableList<HttpRequest> fuzzGetParametersWithDefaultParameter( HttpRequest request, String payload, String defaultParameter) { return fuzzGetParameters(request, payload, Optional.of(defaultParameter), ImmutableSet.of()); }
@Test public void fuzzGetParametersWithDefaultParameter_whenNoGetParameters_addsDefaultParameter() { HttpRequest requestWithDefaultParameter = HttpRequest.get("https://google.com?default=<payload>").withEmptyHeaders().build(); assertThat( FuzzingUtils.fuzzGetParametersWithDefaultParameter( REQUEST_WITHOUT_GET_PARAMETERS, "<payload>", "default")) .contains(requestWithDefaultParameter); }
@Override protected double maintain() { if ( ! nodeRepository().nodes().isWorking()) return 0.0; // Don't need to maintain spare capacity in dynamically provisioned zones; can provision more on demand. if (nodeRepository().zone().cloud().dynamicProvisioning()) return 1.0; NodeList allNodes = nodeRepository().nodes().list(); CapacityChecker capacityChecker = new CapacityChecker(allNodes); List<Node> overcommittedHosts = capacityChecker.findOvercommittedHosts(); metric.set(ConfigServerMetrics.OVERCOMMITTED_HOSTS.baseName(), overcommittedHosts.size(), null); retireOvercommitedHosts(allNodes, overcommittedHosts); boolean success = true; Optional<CapacityChecker.HostFailurePath> failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { int spareHostCapacity = failurePath.get().hostsCausingFailure.size() - 1; if (spareHostCapacity == 0) { List<Move> mitigation = findMitigation(failurePath.get()); if (execute(mitigation, failurePath.get())) { // We succeeded or are in the process of taking a step to mitigate. // Report with the assumption this will eventually succeed to avoid alerting before we're stuck spareHostCapacity++; } else { success = false; } } metric.set(ConfigServerMetrics.SPARE_HOST_CAPACITY.baseName(), spareHostCapacity, null); } return success ? 1.0 : 0.0; }
@Test public void testMultipleNodesMustMoveFromOneHostButInsufficientCapacity() { var tester = new SpareCapacityMaintainerTester(); setupMultipleHosts(tester, 3); tester.maintainer.maintain(); assertEquals(0, tester.deployer.activations); assertEquals(0, tester.nodeRepository.nodes().list().retired().size()); assertEquals(0, tester.metric.values.get("spareHostCapacity")); }
static void maybeReportHybridDiscoveryIssue(PluginDiscoveryMode discoveryMode, PluginScanResult serviceLoadingScanResult, PluginScanResult mergedResult) { SortedSet<PluginDesc<?>> missingPlugins = new TreeSet<>(); mergedResult.forEach(missingPlugins::add); serviceLoadingScanResult.forEach(missingPlugins::remove); if (missingPlugins.isEmpty()) { if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN || discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) { log.warn("All plugins have ServiceLoader manifests, consider reconfiguring {}={}", WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.SERVICE_LOAD); } } else { String message = String.format( "One or more plugins are missing ServiceLoader manifests may not be usable with %s=%s: %s%n" + "Read the documentation at %s for instructions on migrating your plugins " + "to take advantage of the performance improvements of %s mode.", WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.SERVICE_LOAD, missingPlugins.stream() .map(pluginDesc -> pluginDesc.location() + "\t" + pluginDesc.className() + "\t" + pluginDesc.type() + "\t" + pluginDesc.version()) .collect(Collectors.joining("\n", "[\n", "\n]")), "https://kafka.apache.org/documentation.html#connect_plugindiscovery", PluginDiscoveryMode.SERVICE_LOAD ); if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN) { log.warn("{} To silence this warning, set {}={} in the worker config.", message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.ONLY_SCAN); } else if (discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) { throw new ConnectException(String.format("%s To silence this error, set %s=%s in the worker config.", message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.HYBRID_WARN)); } } }
@Test public void testHybridWarnNoPlugins() { try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Plugins.class)) { Plugins.maybeReportHybridDiscoveryIssue(PluginDiscoveryMode.HYBRID_WARN, empty, empty); assertTrue(logCaptureAppender.getEvents().stream().anyMatch(e -> e.getLevel().equals("WARN") // These log messages must contain the config name, it is referenced in the documentation. && e.getMessage().contains(WorkerConfig.PLUGIN_DISCOVERY_CONFIG) )); } }