focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void define(Context context) { NewController controller = context.createController(AUTHENTICATION_CONTROLLER); controller.setDescription("Handle authentication."); actions.forEach(action -> action.define(controller)); controller.done(); }
@Test public void define_ws() { WebService.Context context = new WebService.Context(); underTest.define(context); WebService.Controller controller = context.controller("api/authentication"); assertThat(controller).isNotNull(); assertThat(controller.description()).isNotEmpty(); assertThat(controller.actions()).hasSize(1); WebService.Action fooAction = controller.action("foo"); assertThat(fooAction).isNotNull(); assertThat(fooAction.handler()).isNotNull(); }
@Override public void write(final PostgreSQLPacketPayload payload, final Object value) { payload.getByteBuf().writeDouble(Double.parseDouble(value.toString())); }
@Test void assertWrite() { new PostgreSQLDoubleBinaryProtocolValue().write(new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8), 1D); verify(byteBuf).writeDouble(1.0D); }
@SuppressFBWarnings("NP_NULL_ON_SOME_PATH") public void updateForUpstream(String workflowId, long instanceId, @Nullable String stepId) { Checks.checkTrue( restartConfig != null && !ObjectHelper.isCollectionEmptyOrNull(restartConfig.getRestartPath()), "Cannot update restart info in empty restart configuration for initiator [%s]", initiator); restartConfig .getRestartPath() .add(new RestartConfig.RestartNode(workflowId, instanceId, stepId)); // Use RESTART_FROM_SPECIFIC (same in updateForDownstreamIfNeeded) along the restart path this.currentPolicy = RunPolicy.RESTART_FROM_SPECIFIC; }
@Test public void testUpdateForUpstream() { RestartConfig config = RestartConfig.builder().addRestartNode("foo", 1, "bar").build(); RunRequest runRequest = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.RESTART_FROM_INCOMPLETE) .restartConfig(config) .build(); runRequest.updateForUpstream("parent", 2, "sub-step"); Assert.assertEquals("[parent][2]", runRequest.getWorkflowIdentity()); Assert.assertEquals("sub-step", runRequest.getRestartStepId()); Assert.assertEquals(RunPolicy.RESTART_FROM_SPECIFIC, runRequest.getCurrentPolicy()); }
@Override public LocalResourceId resolve(String other, ResolveOptions resolveOptions) { checkState(isDirectory(), "Expected the path is a directory, but had [%s].", pathString); checkArgument( resolveOptions.equals(StandardResolveOptions.RESOLVE_FILE) || resolveOptions.equals(StandardResolveOptions.RESOLVE_DIRECTORY), "ResolveOptions: [%s] is not supported.", resolveOptions); checkArgument( !(resolveOptions.equals(StandardResolveOptions.RESOLVE_FILE) && other.endsWith("/")), "The resolved file: [%s] should not end with '/'.", other); if (SystemUtils.IS_OS_WINDOWS) { return resolveLocalPathWindowsOS(other, resolveOptions); } else { return resolveLocalPath(other, resolveOptions); } }
@Test public void testResolveNormalizationInUnix() { if (SystemUtils.IS_OS_WINDOWS) { // Skip tests return; } // Tests normalization of "." and ".." // // Normalization is the implementation choice of LocalResourceId, // and it is not required by ResourceId.resolve(). assertEquals( toResourceIdentifier("file://home/bb"), toResourceIdentifier("file://root/../home/output/../") .resolve("aa", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("..", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("bb", StandardResolveOptions.RESOLVE_FILE)); assertEquals( toResourceIdentifier("file://root/aa/bb"), toResourceIdentifier("file://root/./") .resolve("aa", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve(".", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("bb", StandardResolveOptions.RESOLVE_FILE)); assertEquals( toResourceIdentifier("aa/bb"), toResourceIdentifier("a/../") .resolve("aa", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve(".", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("bb", StandardResolveOptions.RESOLVE_FILE)); assertEquals( toResourceIdentifier("/aa/bb"), toResourceIdentifier("/a/../") .resolve("aa", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve(".", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("bb", StandardResolveOptions.RESOLVE_FILE)); // Tests "./", "../", "~/". assertEquals( toResourceIdentifier("aa/bb"), toResourceIdentifier("./") .resolve("aa", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve(".", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("bb", StandardResolveOptions.RESOLVE_FILE)); assertEquals( toResourceIdentifier("../aa/bb"), toResourceIdentifier("../") .resolve("aa", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve(".", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("bb", StandardResolveOptions.RESOLVE_FILE)); assertEquals( toResourceIdentifier("~/aa/bb/"), toResourceIdentifier("~/") .resolve("aa", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve(".", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("bb", StandardResolveOptions.RESOLVE_DIRECTORY)); }
public static byte[] getSecureMungedAddress() { byte[] address = null; try { address = getMacAddress(); } catch (SocketException se) { LOGGER.warn("Unable to get mac address, will use a dummy address", se); // address will be set below } if (!isValidAddress(address)) { LOGGER.warn("Unable to get a valid mac address, will use a dummy address"); address = constructDummyMulticastAddress(); } byte[] mungedBytes = new byte[BYTE_SIZE]; new SecureRandom().nextBytes(mungedBytes); for (int i = 0; i < BYTE_SIZE; ++i) { mungedBytes[i] ^= address[i]; } return mungedBytes; }
@Test public void getSecureMungedAddress() { byte[] address = MacAddressProvider.getSecureMungedAddress(); assertThat(address) .isNotEmpty() .hasSize(6); }
public static Key of(String key, ApplicationId appId) { return new StringKey(key, appId); }
@Test public void longKey() { Key longKey1 = Key.of(LONG_KEY_1, NetTestTools.APP_ID); Key copyOfLongKey1 = Key.of(LONG_KEY_1, NetTestTools.APP_ID); Key longKey2 = Key.of(LONG_KEY_2, NetTestTools.APP_ID); Key copyOfLongKey2 = Key.of(LONG_KEY_2, NetTestTools.APP_ID); Key longKey3 = Key.of(LONG_KEY_3, NetTestTools.APP_ID); new EqualsTester() .addEqualityGroup(longKey1, copyOfLongKey1) .addEqualityGroup(longKey2, copyOfLongKey2) .addEqualityGroup(longKey3) .testEquals(); }
public boolean canViewAndEditTemplate(CaseInsensitiveString username, List<Role> roles) { for (PipelineTemplateConfig templateConfig : this) { if (canUserEditTemplate(templateConfig, username, roles)) { return true; } } return false; }
@Test public void shouldReturnFalseIfUserWithinARoleCannotViewAndEditTemplates() { CaseInsensitiveString templateAdmin = new CaseInsensitiveString("template-admin"); Role securityConfigRole = getSecurityConfigRole(templateAdmin); List<Role> roles = setupRoles(securityConfigRole); ArrayList<PipelineTemplateConfig> templateList = new ArrayList<>(); templateList.add(PipelineTemplateConfigMother.createTemplate("templateName", new Authorization(new AdminsConfig(new AdminUser(new CaseInsensitiveString("random-user")))), StageConfigMother.manualStage("stage-name"))); TemplatesConfig templates = new TemplatesConfig(templateList.toArray(new PipelineTemplateConfig[0])); assertThat(templates.canViewAndEditTemplate(templateAdmin, roles), is(false)); }
public void clear() { params.clear(); isEmpty = true; }
@Test void testClear() { Query query = Query.newInstance().addParam("key-1", "value-1").addParam("key-2", "value-2"); assertFalse(query.isEmpty()); assertEquals("value-1", query.getValue("key-1")); query.clear(); assertTrue(query.isEmpty()); }
@SuppressWarnings("unchecked") public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException { if (metric == null) { throw new NullPointerException("metric == null"); } if (metric instanceof MetricRegistry) { final MetricRegistry childRegistry = (MetricRegistry) metric; final String childName = name; childRegistry.addListener(new MetricRegistryListener() { @Override public void onGaugeAdded(String name, Gauge<?> gauge) { register(name(childName, name), gauge); } @Override public void onGaugeRemoved(String name) { remove(name(childName, name)); } @Override public void onCounterAdded(String name, Counter counter) { register(name(childName, name), counter); } @Override public void onCounterRemoved(String name) { remove(name(childName, name)); } @Override public void onHistogramAdded(String name, Histogram histogram) { register(name(childName, name), histogram); } @Override public void onHistogramRemoved(String name) { remove(name(childName, name)); } @Override public void onMeterAdded(String name, Meter meter) { register(name(childName, name), meter); } @Override public void onMeterRemoved(String name) { remove(name(childName, name)); } @Override public void onTimerAdded(String name, Timer timer) { register(name(childName, name), timer); } @Override public void onTimerRemoved(String name) { remove(name(childName, name)); } }); } else if (metric instanceof MetricSet) { registerAll(name, (MetricSet) metric); } else { final Metric existing = metrics.putIfAbsent(name, metric); if (existing == null) { onMetricAdded(name, metric); } else { throw new IllegalArgumentException("A metric named " + name + " already exists"); } } return metric; }
@Test public void registeringACounterTriggersANotification() { assertThat(registry.register("thing", counter)) .isEqualTo(counter); verify(listener).onCounterAdded("thing", counter); }
@Override public void setViewProperties(View view, JSONObject properties) { }
@Test public void setViewProperties() { View view = new View(mApplication); mSensorsAPI.setViewProperties(view, new JSONObject()); // Object tag = view.getTag(R.id.sensors_analytics_tag_view_properties); // Assert.assertNull(tag); }
@Override public Mono<Void> withoutFallback(final ServerWebExchange exchange, final Throwable throwable) { Object error; if (throwable instanceof DegradeException) { exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR); error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SERVICE_RESULT_ERROR); } else if (throwable instanceof FlowException) { exchange.getResponse().setStatusCode(HttpStatus.TOO_MANY_REQUESTS); error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.TOO_MANY_REQUESTS); } else if (throwable instanceof BlockException) { exchange.getResponse().setStatusCode(HttpStatus.TOO_MANY_REQUESTS); error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SENTINEL_BLOCK_ERROR); } else if (throwable instanceof SentinelPlugin.SentinelFallbackException) { return exchange.getAttribute(Constants.RESPONSE_MONO); } else { return Mono.error(throwable); } return WebFluxResultUtils.result(exchange, error); }
@Test public void testRuntimeException() { StepVerifier.create(fallbackHandler.withoutFallback(exchange, new RuntimeException())).expectSubscription().verifyError(); }
public synchronized void initialize(Configuration config, NvidiaBinaryHelper nvidiaHelper) throws YarnException { setConf(config); this.nvidiaBinaryHelper = nvidiaHelper; if (isAutoDiscoveryEnabled()) { numOfErrorExecutionSinceLastSucceed = 0; lookUpAutoDiscoveryBinary(config); // Try to discover GPU information once and print try { LOG.info("Trying to discover GPU information ..."); GpuDeviceInformation info = getGpuDeviceInformation(); LOG.info("Discovered GPU information: " + info.toString()); } catch (YarnException e) { String msg = "Failed to discover GPU information from system, exception message:" + e.getMessage() + " continue..."; LOG.warn(msg); } } }
@Test public void testBinaryIsNotNvidiaSmi() throws YarnException { exception.expect(YarnException.class); exception.expectMessage(String.format( "It should point to an %s binary, which is now %s", "nvidia-smi", "badfile")); Configuration conf = new Configuration(false); setupFakeBinary(conf, "badfile", true); GpuDiscoverer plugin = new GpuDiscoverer(); plugin.initialize(conf, binaryHelper); }
public UiTopoLayout region(Region region) { if (isRoot()) { throw new IllegalArgumentException(E_ROOT_REGION); } this.region = region; return this; }
@Test public void setRegionOnRoot() { mkRootLayout(); try { layout.region(REGION); fail(AM_NOEX); } catch (IllegalArgumentException e) { assertEquals(AM_WREXMSG, E_ROOT_REGION, e.getMessage()); } try { layout.region(null); fail(AM_NOEX); } catch (IllegalArgumentException e) { assertEquals(AM_WREXMSG, E_ROOT_REGION, e.getMessage()); } }
@Override @Deprecated public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier, final String... stateStoreNames) { process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames); }
@Test public void shouldNotAllowNullProcessSupplierOnProcess() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.process((ProcessorSupplier<? super String, ? super String, Void, Void>) null)); assertThat(exception.getMessage(), equalTo("processorSupplier can't be null")); }
public static Set<String> parseDeployOutput(File buildResult) throws IOException { try (Stream<String> linesStream = Files.lines(buildResult.toPath())) { return parseDeployOutput(linesStream); } }
@Test void testParseDeployOutputDetectsDeployment() { assertThat( DeployParser.parseDeployOutput( Stream.of( "[INFO] --- maven-deploy-plugin:2.8.2:deploy (default-deploy) @ flink-parent ---", "[INFO] "))) .containsExactly("flink-parent"); }
@TargetApi(8) public String getExternalFilesDirectoryPath() { return this.context != null ? absPath(this.context.getExternalFilesDir(null)) : ""; }
@Test public void getExternalFilesDirectoryPathIsNotEmpty() { assertThat(contextUtil.getExternalFilesDirectoryPath(), endsWith("/external-files")); }
public void addHeader(String... headerColumnNames) { _headerColumnNames = headerColumnNames; }
@Test(priority = 2) public void testAddHeader() { // Run the test String initalTable = _textTableUnderTest.toString(); _textTableUnderTest.addHeader("header1"); String finalTable = _textTableUnderTest.toString(); // Verify the results assertEquals("", initalTable); assertNotEquals("", finalTable); }
@GetMapping(value = "/previous") @Secured(action = ActionTypes.READ, signType = SignType.CONFIG) public ConfigHistoryInfo getPreviousConfigHistoryInfo(@RequestParam("dataId") String dataId, @RequestParam("group") String group, @RequestParam(value = "tenant", required = false, defaultValue = StringUtils.EMPTY) String tenant, @RequestParam("id") Long id) throws AccessException { return historyService.getPreviousConfigHistoryInfo(dataId, group, tenant, id); }
@Test void testGetPreviousConfigHistoryInfo() throws Exception { ConfigHistoryInfo configHistoryInfo = new ConfigHistoryInfo(); configHistoryInfo.setDataId("test"); configHistoryInfo.setGroup("test"); configHistoryInfo.setContent("test"); configHistoryInfo.setTenant(""); configHistoryInfo.setCreatedTime(new Timestamp(new Date().getTime())); configHistoryInfo.setLastModifiedTime(new Timestamp(new Date().getTime())); when(historyService.getPreviousConfigHistoryInfo("test", "test", "", 1L)).thenReturn(configHistoryInfo); MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(Constants.HISTORY_CONTROLLER_PATH + "/previous") .param("dataId", "test").param("group", "test").param("tenant", "").param("id", "1"); String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString(); ConfigHistoryInfo resConfigHistoryInfo = JacksonUtils.toObj(actualValue, ConfigHistoryInfo.class); assertEquals(configHistoryInfo.getDataId(), resConfigHistoryInfo.getDataId()); assertEquals(configHistoryInfo.getGroup(), resConfigHistoryInfo.getGroup()); assertEquals(configHistoryInfo.getContent(), resConfigHistoryInfo.getContent()); }
public static String getAppName() { String appName; appName = getAppNameByProjectName(); if (appName != null) { return appName; } appName = getAppNameByServerHome(); if (appName != null) { return appName; } return DEFAULT_APP_NAME; }
@Test void testGetAppNameByDefault() { String appName = AppNameUtils.getAppName(); assertEquals("unknown", appName); }
public ProtocolBuilder accepts(Integer accepts) { this.accepts = accepts; return getThis(); }
@Test void accepts() { ProtocolBuilder builder = new ProtocolBuilder(); builder.accepts(35); Assertions.assertEquals(35, builder.build().getAccepts()); }
public static Labels fromResource(HasMetadata resource) { Map<String, String> additionalLabels = resource.getMetadata().getLabels(); if (additionalLabels != null) { additionalLabels = additionalLabels .entrySet() .stream() .filter(entryset -> !STRIMZI_LABELS_EXCLUSION_PATTERN.matcher(entryset.getKey()).matches()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); } return additionalLabels(additionalLabels); }
@Test public void testFromResourceWithInvalidStrimziLabelsSecret() { Map<String, String> labelsMap = new HashMap<>(); labelsMap.put(Labels.STRIMZI_DOMAIN + "something", "value"); Secret secret = new SecretBuilder().withNewMetadata().withLabels(labelsMap).endMetadata().build(); assertThrows(IllegalArgumentException.class, () -> Labels.fromResource(secret)); }
@Override public void write(final MySQLPacketPayload payload, final Object value) { LocalDateTime dateTime = value instanceof LocalDateTime ? (LocalDateTime) value : new Timestamp(((Date) value).getTime()).toLocalDateTime(); int year = dateTime.getYear(); int month = dateTime.getMonthValue(); int dayOfMonth = dateTime.getDayOfMonth(); int hours = dateTime.getHour(); int minutes = dateTime.getMinute(); int seconds = dateTime.getSecond(); int nanos = dateTime.getNano(); boolean isTimeAbsent = 0 == hours && 0 == minutes && 0 == seconds; boolean isNanosAbsent = 0 == nanos; if (isTimeAbsent && isNanosAbsent) { payload.writeInt1(4); writeDate(payload, year, month, dayOfMonth); return; } if (isNanosAbsent) { payload.writeInt1(7); writeDate(payload, year, month, dayOfMonth); writeTime(payload, hours, minutes, seconds); return; } payload.writeInt1(11); writeDate(payload, year, month, dayOfMonth); writeTime(payload, hours, minutes, seconds); writeNanos(payload, nanos); }
@Test void assertWriteWithFourBytes() { MySQLDateBinaryProtocolValue actual = new MySQLDateBinaryProtocolValue(); actual.write(payload, Timestamp.valueOf("1970-01-14 0:0:0")); verify(payload).writeInt1(4); verify(payload).writeInt2(1970); verify(payload).writeInt1(1); verify(payload).writeInt1(14); }
public static <T> Globally<T> globally() { return new AutoValue_ApproximateCountDistinct_Globally.Builder<T>() .setPrecision(HllCount.DEFAULT_PRECISION) .build(); }
@Test @Category(NeedsRunner.class) public void testObjectTypesGlobal() { PCollection<Long> approxResultInteger = p.apply( "Int", Create.of( INTS1.stream().map(x -> KV.of(x, KV.of(x, x))).collect(Collectors.toList()))) .apply( "IntHLL", ApproximateCountDistinct.<KV<Integer, KV<Integer, Integer>>>globally() .via((KV<Integer, KV<Integer, Integer>> x) -> (long) x.getValue().hashCode())); PAssert.thatSingleton(approxResultInteger).isEqualTo(INTS1_ESTIMATE); p.run(); }
@Transactional @Cacheable(CACHE_DATABASE_SEARCH) @CacheEvict(value = CACHE_AVERAGE_REVIEW_RATING, allEntries = true) public SearchHits<ExtensionSearch> search(ISearchService.Options options) { // grab all extensions var matchingExtensions = repositories.findAllActiveExtensions(); // no extensions in the database if (matchingExtensions.isEmpty()) { return new SearchHitsImpl<>(0,TotalHitsRelation.OFF, 0f, null, null, Collections.emptyList(), null, null); } // exlude namespaces if(options.namespacesToExclude != null) { for(var namespaceToExclude : options.namespacesToExclude) { matchingExtensions = matchingExtensions.filter(extension -> !extension.getNamespace().getName().equals(namespaceToExclude)); } } // filter target platform if(TargetPlatform.isValid(options.targetPlatform)) { matchingExtensions = matchingExtensions.filter(extension -> extension.getVersions().stream().anyMatch(ev -> ev.getTargetPlatform().equals(options.targetPlatform))); } // filter category if (options.category != null) { matchingExtensions = matchingExtensions.filter(extension -> { var latest = repositories.findLatestVersion(extension, null, false, true); return latest.getCategories().stream().anyMatch(category -> category.equalsIgnoreCase(options.category)); }); } // filter text if (options.queryString != null) { matchingExtensions = matchingExtensions.filter(extension -> { var latest = repositories.findLatestVersion(extension, null, false, true); return extension.getName().toLowerCase().contains(options.queryString.toLowerCase()) || extension.getNamespace().getName().contains(options.queryString.toLowerCase()) || (latest.getDescription() != null && latest.getDescription() .toLowerCase().contains(options.queryString.toLowerCase())) || (latest.getDisplayName() != null && latest.getDisplayName() .toLowerCase().contains(options.queryString.toLowerCase())); }); } // need to perform the sortBy () // 'relevance' | 'timestamp' | 'rating' | 'downloadCount'; Stream<ExtensionSearch> searchEntries; if("relevance".equals(options.sortBy) || "rating".equals(options.sortBy)) { var searchStats = new SearchStats(repositories); searchEntries = matchingExtensions.stream().map(extension -> relevanceService.toSearchEntry(extension, searchStats)); } else { searchEntries = matchingExtensions.stream().map(extension -> { var latest = repositories.findLatestVersion(extension, null, false, true); var targetPlatforms = repositories.findExtensionTargetPlatforms(extension); return extension.toSearch(latest, targetPlatforms); }); } var comparators = new HashMap<>(Map.of( "relevance", new RelevanceComparator(), "timestamp", new TimestampComparator(), "rating", new RatingComparator(), "downloadCount", new DownloadedCountComparator() )); var comparator = comparators.get(options.sortBy); if(comparator != null) { searchEntries = searchEntries.sorted(comparator); } var sortedExtensions = searchEntries.collect(Collectors.toList()); // need to do sortOrder // 'asc' | 'desc'; if ("desc".equals(options.sortOrder)) { // reverse the order Collections.reverse(sortedExtensions); } // Paging var totalHits = sortedExtensions.size(); var endIndex = Math.min(sortedExtensions.size(), options.requestedOffset + options.requestedSize); var startIndex = Math.min(endIndex, options.requestedOffset); sortedExtensions = sortedExtensions.subList(startIndex, endIndex); List<SearchHit<ExtensionSearch>> searchHits; if (sortedExtensions.isEmpty()) { searchHits = Collections.emptyList(); } else { // client is interested only in the extension IDs searchHits = sortedExtensions.stream().map(extensionSearch -> new SearchHit<>(null, null, null, 0.0f, null, null, null, null, null, null, extensionSearch)).collect(Collectors.toList()); } return new SearchHitsImpl<>(totalHits, TotalHitsRelation.OFF, 0f, null, null, searchHits, null, null); }
@Test public void testQueryStringPublisherName() { var ext1 = mockExtension("yaml", 3.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages")); var ext2 = mockExtension("java", 4.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages")); var ext3 = mockExtension("openshift", 4.0, 100, 0, "redhat", List.of("Snippets", "Other")); var ext4 = mockExtension("foo", 4.0, 100, 0, "bar", List.of("Other")); Mockito.when(repositories.findAllActiveExtensions()).thenReturn(Streamable.of(List.of(ext1, ext2, ext3, ext4))); var searchOptions = new ISearchService.Options("redhat", null, TargetPlatform.NAME_UNIVERSAL, 50, 0, null, null, false); var result = search.search(searchOptions); // namespace finding assertThat(result.getTotalHits()).isEqualTo(3); // Check it found the correct extension var hits = result.getSearchHits(); assertThat(getIdFromExtensionHits(hits, 0)).isEqualTo(getIdFromExtensionName("yaml")); assertThat(getIdFromExtensionHits(hits, 1)).isEqualTo(getIdFromExtensionName("java")); assertThat(getIdFromExtensionHits(hits, 2)).isEqualTo(getIdFromExtensionName("openshift")); }
@Udf(description = "Subtracts a duration from a timestamp") public Timestamp timestampSub( @UdfParameter(description = "A unit of time, for example DAY or HOUR") final TimeUnit unit, @UdfParameter( description = "An integer number of intervals to subtract")final Integer interval, @UdfParameter(description = "A TIMESTAMP value.") final Timestamp timestamp ) { if (unit == null || interval == null || timestamp == null) { return null; } return new Timestamp(timestamp.getTime() - unit.toMillis(interval)); }
@Test public void subtractNegativeValueFromTimestamp() { // When: final Timestamp result = udf.timestampSub(TimeUnit.MILLISECONDS, -300, new Timestamp(100)); // Then: final Timestamp expectedResult = new Timestamp(400); assertThat(result, is(expectedResult)); }
@Override public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) { if (!joinKey.isForeignKey()) { ensureMatchingPartitionCounts(buildContext.getServiceContext().getTopicClient()); } final JoinerFactory joinerFactory = new JoinerFactory( buildContext, this, buildContext.buildNodeContext(getId().toString())); return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join(); }
@Test public void shouldNotAllowStreamToTableOuterJoin() { // Given: setupStream(left, leftSchemaKStream); setupTable(right, rightSchemaKTable); final JoinNode joinNode = new JoinNode(nodeId, OUTER, joinKey, true, left, right, empty(), "KAFKA"); // When: final Exception e = assertThrows( KsqlException.class, () -> joinNode.buildStream(planBuildContext) ); // Then: assertThat(e.getMessage(), containsString( "Invalid join type encountered: [FULL] OUTER JOIN")); }
public Mono<Void> resetToOffsets( KafkaCluster cluster, String group, String topic, Map<Integer, Long> targetOffsets) { Preconditions.checkNotNull(targetOffsets); var partitionOffsets = targetOffsets.entrySet().stream() .collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue)); return checkGroupCondition(cluster, group).flatMap( ac -> ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.earliest(), true) .flatMap(earliest -> ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.latest(), true) .map(latest -> editOffsetsBounds(partitionOffsets, earliest, latest)) .flatMap(offsetsToCommit -> resetOffsets(ac, group, offsetsToCommit))) ); }
@Test void resetToOffsets() { sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10)); var expectedOffsets = Map.of(0, 5L, 1, 5L, 2, 5L); offsetsResetService.resetToOffsets(cluster, groupId, topic, expectedOffsets).block(); assertOffsets(expectedOffsets); }
@Override public <T> UncommittedBundle<T> createRootBundle() { return UncommittedImmutableListBundle.create(null, StructuralKey.empty()); }
@Test public void addElementsAtEndOfTimeThrows() { Instant timestamp = BoundedWindow.TIMESTAMP_MAX_VALUE; WindowedValue<Integer> value = WindowedValue.timestampedValueInGlobalWindow(1, timestamp); UncommittedBundle<Integer> bundle = bundleFactory.createRootBundle(); thrown.expect(IllegalArgumentException.class); thrown.expectMessage(timestamp.toString()); bundle.add(value); }
public static <T> void registerBean(String beanName, T bean) { final ConfigurableListableBeanFactory factory = getConfigurableBeanFactory(); factory.autowireBean(bean); factory.registerSingleton(beanName, bean); }
@Test public void registerBeanTest() { Demo2 registerBean = new Demo2(); registerBean.setId(123); registerBean.setName("222"); SpringUtil.registerBean("registerBean", registerBean); Demo2 registerBean2 = SpringUtil.getBean("registerBean"); assertEquals(123, registerBean2.getId()); assertEquals("222", registerBean2.getName()); }
static List<MiningField> getMiningTargetFields(final MiningSchema miningSchema) { return getMiningTargetFields(miningSchema.getMiningFields()); }
@Test void getMiningTargetFieldsFromMiningSchema() throws Exception { final InputStream inputStream = getFileInputStream(NO_MODELNAME_SAMPLE_NAME); final PMML toPopulate = org.jpmml.model.PMMLUtil.unmarshal(inputStream); final Model model = toPopulate.getModels().get(0); List<MiningField> retrieved = KiePMMLUtil.getMiningTargetFields(model.getMiningSchema()); assertThat(retrieved).isNotNull(); assertThat(retrieved).hasSize(1); MiningField targetField = retrieved.get(0); assertThat(targetField.getName()).isEqualTo("car_location"); assertThat(targetField.getUsageType().value()).isEqualTo("target"); }
public void setTemplateEntriesForChild(CapacitySchedulerConfiguration conf, QueuePath childQueuePath) { setTemplateEntriesForChild(conf, childQueuePath, false); }
@Test public void testIgnoredTemplateWithLimitedAutoCreatedQueueDepth() { conf.set(getTemplateKey(TEST_QUEUE_TWO_LEVEL_WILDCARDS, "capacity"), "5w"); conf.setMaximumAutoCreatedQueueDepth(TEST_QUEUE_AB, 1); new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB) .setTemplateEntriesForChild(conf, TEST_QUEUE_ABC); Assert.assertEquals("weight is set incorrectly", -1f, conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6); }
public OrderedProperties addMapPairs(Map<String, String> additionalPairs) { addIterablePairs(additionalPairs.entrySet()); return this; }
@Test public void addMapPairs() { Map<String, String> additional = new LinkedHashMap<>(); additional.put("fifth", "5"); additional.put("first", "6"); OrderedProperties pairs = createTestKeyValues().addMapPairs(additional); assertKeyOrder(pairs, "first", "second", "third", "FOURTH", "fifth"); assertValueOrder(pairs, "6", "2", "3", "4", "5"); }
@Bean("GlobalTempFolder") public TempFolder provide(ScannerProperties scannerProps, SonarUserHome userHome) { var workingPathName = StringUtils.defaultIfBlank(scannerProps.property(CoreProperties.GLOBAL_WORKING_DIRECTORY), CoreProperties.GLOBAL_WORKING_DIRECTORY_DEFAULT_VALUE); var workingPath = Paths.get(workingPathName); if (!workingPath.isAbsolute()) { var home = userHome.getPath(); workingPath = home.resolve(workingPath).normalize(); } try { cleanTempFolders(workingPath); } catch (IOException e) { LOG.error(String.format("failed to clean global working directory: %s", workingPath), e); } var tempDir = createTempFolder(workingPath); return new DefaultTempFolder(tempDir.toFile(), true); }
@Test void homeIsSymbolicLink(@TempDir Path realSonarHome, @TempDir Path symlink) throws IOException { assumeTrue(!System2.INSTANCE.isOsWindows()); symlink.toFile().delete(); Files.createSymbolicLink(symlink, realSonarHome); when(sonarUserHome.getPath()).thenReturn(symlink); ScannerProperties globalProperties = new ScannerProperties(Map.of()); TempFolder tempFolder = underTest.provide(globalProperties, sonarUserHome); File newFile = tempFolder.newFile(); assertThat(newFile.getParentFile().getParentFile().toPath().toAbsolutePath()).isEqualTo(symlink); assertThat(newFile.getParentFile().getName()).startsWith(".sonartmp_"); }
public synchronized void refreshPartition(List<HivePartitionName> partitionNames) { if (metastore instanceof CachingHiveMetastore) { metastore.refreshPartition(partitionNames); } else { Map<HivePartitionName, Partition> updatedPartitions = loadPartitionsByNames(partitionNames); partitionCache.putAll(updatedPartitions); Map<HivePartitionName, HivePartitionStats> updatePartitionStats = loadPartitionsStatistics(partitionNames); partitionStatsCache.putAll(updatePartitionStats); if (enableListNameCache && !partitionNames.isEmpty()) { HivePartitionName firstName = partitionNames.get(0); DatabaseTableName databaseTableName = DatabaseTableName.of(firstName.getDatabaseName(), firstName.getTableName()); // refresh partitionKeysCache with all partition values HivePartitionValue hivePartitionValue = HivePartitionValue.of( databaseTableName, HivePartitionValue.ALL_PARTITION_VALUES); partitionKeysCache.put(hivePartitionValue, loadPartitionKeys(hivePartitionValue)); } } }
@Test public void testRefreshPartition() { CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore( metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, true); List<HivePartitionName> partitionNames = Lists.newArrayList( HivePartitionName.of("db1", "table1", "col1=1"), HivePartitionName.of("db1", "table1", "col1=2")); cachingHiveMetastore.refreshPartition(partitionNames); }
@Override public DescriptiveUrlBag toUrl(final Path file) { final DescriptiveUrlBag list = new DescriptiveUrlBag(); if(new HostPreferences(session.getHost()).getBoolean("s3.bucket.virtualhost.disable")) { list.addAll(new DefaultUrlProvider(session.getHost()).toUrl(file)); } else { list.add(this.toUrl(file, session.getHost().getProtocol().getScheme(), session.getHost().getPort())); list.add(this.toUrl(file, Scheme.http, 80)); if(StringUtils.isNotBlank(session.getHost().getWebURL())) { // Only include when custom domain is configured list.addAll(new HostWebUrlProvider(session.getHost()).toUrl(file)); } } if(file.isFile()) { if(!session.getHost().getCredentials().isAnonymousLogin()) { // X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less // than 604800 seconds // In one hour list.add(this.toSignedUrl(file, (int) TimeUnit.HOURS.toSeconds(1))); // Default signed URL expiring in 24 hours. list.add(this.toSignedUrl(file, (int) TimeUnit.SECONDS.toSeconds( new HostPreferences(session.getHost()).getInteger("s3.url.expire.seconds")))); // 1 Week list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(7))); switch(session.getSignatureVersion()) { case AWS2: // 1 Month list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(30))); // 1 Year list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(365))); break; case AWS4HMACSHA256: break; } } } // AWS services require specifying an Amazon S3 bucket using S3://bucket list.add(new DescriptiveUrl(URI.create(String.format("s3://%s%s", containerService.getContainer(file).getName(), file.isRoot() ? Path.DELIMITER : containerService.isContainer(file) ? Path.DELIMITER : String.format("/%s", URIEncoder.encode(containerService.getKey(file))))), DescriptiveUrl.Type.provider, MessageFormat.format(LocaleFactory.localizedString("{0} URL"), "S3"))); // Filter by matching container name final Optional<Set<Distribution>> filtered = distributions.entrySet().stream().filter(entry -> new SimplePathPredicate(containerService.getContainer(file)).test(entry.getKey())) .map(Map.Entry::getValue).findFirst(); if(filtered.isPresent()) { // Add CloudFront distributions for(Distribution distribution : filtered.get()) { list.addAll(new DistributionUrlProvider(distribution).toUrl(file)); } } return list; }
@Test public void testToHttpURL() throws Exception { final S3Session session = new S3Session(new Host(new S3Protocol() { @Override public String getAuthorization() { return S3Protocol.AuthenticationHeaderSignatureVersion.AWS2.name(); } }, new S3Protocol().getDefaultHostname())) { @Override public RequestEntityRestStorageService getClient() { try { return this.connect(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); } catch(BackgroundException e) { fail(); throw new RuntimeException(e); } } }; Path p = new Path("/bucket/f/key f", EnumSet.of(Path.Type.file)); assertEquals(5, new S3UrlProvider(session, Collections.emptyMap(), new DisabledPasswordStore() { @Override public String findLoginPassword(final Host bookmark) { return "k"; } }).toUrl(p).filter(DescriptiveUrl.Type.signed).size()); }
@SuppressWarnings("unchecked") public static <K> CowSet<K> emptySet() { return (CowSet<K>) EMPTY_SET; }
@Test public void testEmptySet() { final CowSet<String> set = CowUtil.emptySet(); Assert.assertTrue(set.isEmpty()); Assert.assertTrue(set.isReadOnly()); try { mutateCollection(set); Assert.fail("Should have thrown UnsupportedOperationException"); } catch (UnsupportedOperationException e) { // Expected case } Assert.assertTrue(CowUtil.<Object>emptySet().isEmpty()); }
@Override public void updateArticle(ArticleUpdateReqVO updateReqVO) { // 校验存在 validateArticleExists(updateReqVO.getId()); // 校验分类存在 validateArticleCategoryExists(updateReqVO.getCategoryId()); // 更新 ArticleDO updateObj = ArticleConvert.INSTANCE.convert(updateReqVO); articleMapper.updateById(updateObj); }
@Test public void testUpdateArticle_success() { // mock 数据 ArticleDO dbArticle = randomPojo(ArticleDO.class); articleMapper.insert(dbArticle);// @Sql: 先插入出一条存在的数据 // 准备参数 ArticleUpdateReqVO reqVO = randomPojo(ArticleUpdateReqVO.class, o -> { o.setId(dbArticle.getId()); // 设置更新的 ID }); // 调用 articleService.updateArticle(reqVO); // 校验是否更新正确 ArticleDO article = articleMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, article); }
public String relativePathToRoot(DefaultInputModule module) { Path rootBaseDir = root.getBaseDir(); Path moduleBaseDir = module.getBaseDir(); return PathResolver.relativize(rootBaseDir, moduleBaseDir).orElse(null); }
@Test public void testRelativePathToRoot() throws IOException { File rootBaseDir = temp.newFolder(); File mod1BaseDir = new File(rootBaseDir, "mod1"); File mod2BaseDir = new File(rootBaseDir, "mod2"); FileUtils.forceMkdir(mod1BaseDir); FileUtils.forceMkdir(mod2BaseDir); DefaultInputModule root = new DefaultInputModule(ProjectDefinition.create().setKey("root") .setBaseDir(rootBaseDir).setWorkDir(rootBaseDir)); DefaultInputModule mod1 = new DefaultInputModule(ProjectDefinition.create().setKey("mod1") .setBaseDir(mod1BaseDir).setWorkDir(temp.newFolder())); DefaultInputModule mod2 = new DefaultInputModule(ProjectDefinition.create().setKey("mod2") .setBaseDir(mod2BaseDir).setWorkDir(temp.newFolder())); DefaultInputModule mod3 = new DefaultInputModule(ProjectDefinition.create().setKey("mod2") .setBaseDir(temp.newFolder()).setWorkDir(temp.newFolder())); Map<DefaultInputModule, DefaultInputModule> parents = new HashMap<>(); parents.put(mod1, root); parents.put(mod2, mod1); parents.put(mod3, mod1); moduleHierarchy = new DefaultInputModuleHierarchy(root, parents); assertThat(moduleHierarchy.relativePathToRoot(root)).isEmpty(); assertThat(moduleHierarchy.relativePathToRoot(mod1)).isEqualTo("mod1"); assertThat(moduleHierarchy.relativePathToRoot(mod2)).isEqualTo("mod2"); assertThat(moduleHierarchy.relativePathToRoot(mod3)).isNull(); }
protected GrpcClientChannel registerConsumer(ProxyContext ctx, String consumerGroup, ClientType clientType, List<SubscriptionEntry> subscriptionEntryList, boolean updateSubscription) { String clientId = ctx.getClientID(); LanguageCode languageCode = LanguageCode.valueOf(ctx.getLanguage()); GrpcClientChannel channel = this.grpcChannelManager.createChannel(ctx, clientId); ClientChannelInfo clientChannelInfo = new ClientChannelInfo(channel, clientId, languageCode, parseClientVersion(ctx.getClientVersion())); this.messagingProcessor.registerConsumer( ctx, consumerGroup, clientChannelInfo, this.buildConsumeType(clientType), MessageModel.CLUSTERING, ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET, this.buildSubscriptionDataSet(subscriptionEntryList), updateSubscription ); return channel; }
@Test public void testConsumerHeartbeat() throws Throwable { ProxyContext context = createContext(); this.sendConsumerTelemetry(context); ArgumentCaptor<Set<SubscriptionData>> subscriptionDatasArgumentCaptor = ArgumentCaptor.forClass(Set.class); ArgumentCaptor<ClientChannelInfo> channelInfoArgumentCaptor = ArgumentCaptor.forClass(ClientChannelInfo.class); doNothing().when(this.messagingProcessor).registerConsumer(any(), anyString(), channelInfoArgumentCaptor.capture(), any(), any(), any(), subscriptionDatasArgumentCaptor.capture(), anyBoolean() ); HeartbeatResponse response = this.sendConsumerHeartbeat(context); assertEquals(Code.OK, response.getStatus().getCode()); ClientChannelInfo clientChannelInfo = channelInfoArgumentCaptor.getValue(); assertClientChannelInfo(clientChannelInfo, CONSUMER_GROUP); SubscriptionData data = subscriptionDatasArgumentCaptor.getValue().stream().findAny().get(); assertEquals("TAG", data.getExpressionType()); assertEquals("tag", data.getSubString()); }
@Override public Object getObject(final int columnIndex) throws SQLException { return mergeResultSet.getValue(columnIndex, Object.class); }
@Test void assertGetObjectWithLocalDateTime() throws SQLException { LocalDateTime result = LocalDateTime.now(); when(mergeResultSet.getValue(1, Timestamp.class)).thenReturn(Timestamp.valueOf(result)); assertThat(shardingSphereResultSet.getObject(1, LocalDateTime.class), is(result)); }
public static String durationString(long periodMs) { StringBuilder bld = new StringBuilder(); Duration duration = Duration.ofMillis(periodMs); long hours = duration.toHours(); if (hours > 0) { bld.append(hours).append("h"); duration = duration.minusHours(hours); } long minutes = duration.toMinutes(); if (minutes > 0) { bld.append(minutes).append("m"); duration = duration.minusMinutes(minutes); } long seconds = duration.getSeconds(); if ((seconds != 0) || bld.toString().isEmpty()) { bld.append(seconds).append("s"); } return bld.toString(); }
@Test public void testDurationString() { assertEquals("1m", durationString(60000)); assertEquals("1m1s", durationString(61000)); assertEquals("1m1s", durationString(61200)); assertEquals("5s", durationString(5000)); assertEquals("2h", durationString(7200000)); assertEquals("2h1s", durationString(7201000)); assertEquals("2h5m3s", durationString(7503000)); }
@VisibleForTesting static SingleSegmentAssignment getNextSingleSegmentAssignment(Map<String, String> currentInstanceStateMap, Map<String, String> targetInstanceStateMap, int minAvailableReplicas, boolean lowDiskMode, Map<String, Integer> numSegmentsToOffloadMap, Map<Pair<Set<String>, Set<String>>, Set<String>> assignmentMap) { Map<String, String> nextInstanceStateMap = new TreeMap<>(); // Assign the segment the same way as other segments if the current and target instances are the same. We need this // to guarantee the mirror servers for replica-group based routing strategies. Set<String> currentInstances = currentInstanceStateMap.keySet(); Set<String> targetInstances = targetInstanceStateMap.keySet(); Pair<Set<String>, Set<String>> assignmentKey = Pair.of(currentInstances, targetInstances); Set<String> instancesToAssign = assignmentMap.get(assignmentKey); if (instancesToAssign != null) { Set<String> availableInstances = new TreeSet<>(); for (String instanceName : instancesToAssign) { String currentInstanceState = currentInstanceStateMap.get(instanceName); String targetInstanceState = targetInstanceStateMap.get(instanceName); if (currentInstanceState != null) { availableInstances.add(instanceName); // Use target instance state if available in case the state changes nextInstanceStateMap.put(instanceName, targetInstanceState != null ? targetInstanceState : currentInstanceState); } else { nextInstanceStateMap.put(instanceName, targetInstanceState); } } return new SingleSegmentAssignment(nextInstanceStateMap, availableInstances); } // Add all the common instances // Use target instance state in case the state changes for (Map.Entry<String, String> entry : targetInstanceStateMap.entrySet()) { String instanceName = entry.getKey(); if (currentInstanceStateMap.containsKey(instanceName)) { nextInstanceStateMap.put(instanceName, entry.getValue()); } } // Add current instances until the min available replicas achieved int numInstancesToKeep = minAvailableReplicas - nextInstanceStateMap.size(); if (numInstancesToKeep > 0) { // Sort instances by number of segments to offload, and keep the ones with the least segments to offload List<Triple<String, String, Integer>> instancesInfo = getSortedInstancesOnNumSegmentsToOffload(currentInstanceStateMap, nextInstanceStateMap, numSegmentsToOffloadMap); numInstancesToKeep = Integer.min(numInstancesToKeep, instancesInfo.size()); for (int i = 0; i < numInstancesToKeep; i++) { Triple<String, String, Integer> instanceInfo = instancesInfo.get(i); nextInstanceStateMap.put(instanceInfo.getLeft(), instanceInfo.getMiddle()); } } Set<String> availableInstances = new TreeSet<>(nextInstanceStateMap.keySet()); // After achieving the min available replicas, when low disk mode is enabled, only add new instances when all // current instances exist in the next assignment. // We want to first drop the extra instances as one step, then add the target instances as another step to avoid the // case where segments are first added to the instance before other segments are dropped from the instance, which // might cause server running out of disk. Note that even if segment addition and drop happen in the same step, // there is no guarantee that server process the segment drop before the segment addition. if (!lowDiskMode || currentInstanceStateMap.size() == nextInstanceStateMap.size()) { int numInstancesToAdd = targetInstanceStateMap.size() - nextInstanceStateMap.size(); if (numInstancesToAdd > 0) { // Sort instances by number of segments to offload, and add the ones with the least segments to offload List<Triple<String, String, Integer>> instancesInfo = getSortedInstancesOnNumSegmentsToOffload(targetInstanceStateMap, nextInstanceStateMap, numSegmentsToOffloadMap); for (int i = 0; i < numInstancesToAdd; i++) { Triple<String, String, Integer> instanceInfo = instancesInfo.get(i); nextInstanceStateMap.put(instanceInfo.getLeft(), instanceInfo.getMiddle()); } } } assignmentMap.put(assignmentKey, nextInstanceStateMap.keySet()); return new SingleSegmentAssignment(nextInstanceStateMap, availableInstances); }
@Test public void testTwoMinAvailableReplicas() { // With 3 common instances, first assignment should be the same as target assignment Map<String, String> currentInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3", "host4"), ONLINE); Map<String, String> targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3", "host5"), ONLINE); TableRebalancer.SingleSegmentAssignment assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2", "host3"))); // With 2 common instances, first assignment should be the same as target assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5", "host6"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2"))); // With 1 common instance, first assignment should have 2 common instances with current assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host5", "host6", "host7"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5", "host6"), ONLINE)); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2"))); // Second assignment should be the same as target assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host5", "host6"))); // Without common instance, first assignment should have 2 common instances with current assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6", "host7", "host8"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5", "host6"), ONLINE)); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2"))); // Second assignment should be the same as target assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6"))); // With increasing number of replicas, first assignment should have 1 common instances with current assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6", "host7", "host8", "host9"), ONLINE); // [host1, host2, host5, host6, host7] assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5", "host6", "host7"), ONLINE)); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2"))); // Second assignment should be the same as target assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6", "host7"))); // With decreasing number of replicas, first assignment should have 2 common instances with current assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6", "host7"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host5"), ONLINE)); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2"))); // Second assignment should have 2 common instances with first assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host5", "host6"), ONLINE)); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host5"))); // Third assignment should be the same as target assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6"))); // With increasing from 1 replica, first assignment should have 1 common instances with current assignment // NOTE: This is the best we can do because we don't have 2 replicas available currentInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Collections.singletonList("host1"), ONLINE); targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host2", "host3", "host4"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3"), ONLINE)); assertEquals(assignment._availableInstances, Collections.singleton("host1")); // Second assignment should make the assignment the same as target assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 2, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host2", "host3"))); }
@ApiOperation("查看功能按钮详情") @GetMapping("/{actionId}") public ApiResult<BaseAction> detail(@PathVariable Long actionId){ return ApiResult.success(baseActionService.getById(actionId)); }
@Test void detail() { }
@Udf(description = "Adds a duration to a time") public Time timeAdd( @UdfParameter(description = "A unit of time, for example SECOND or HOUR") final TimeUnit unit, @UdfParameter(description = "An integer number of intervals to add") final Integer interval, @UdfParameter(description = "A TIME value.") final Time time ) { if (unit == null || interval == null || time == null) { return null; } final long nanoResult = LocalTime.ofNanoOfDay(time.getTime() * 1000_000) .plus(unit.toNanos(interval), ChronoUnit.NANOS) .toNanoOfDay(); return new Time(TimeUnit.NANOSECONDS.toMillis(nanoResult)); }
@Test public void shouldAddToTime() { // When: assertThat(udf.timeAdd(TimeUnit.MILLISECONDS, 50, new Time(1000)).getTime(), is(1050L)); assertThat(udf.timeAdd(TimeUnit.DAYS, 2, new Time(1000)).getTime(), is(1000L)); assertThat(udf.timeAdd(TimeUnit.DAYS, -2, new Time(1000)).getTime(), is(1000L)); assertThat(udf.timeAdd(TimeUnit.MINUTES, -1, new Time(60000)).getTime(), is(0L)); }
public static boolean isset(String s) { return (s != null) && (s.length() > 0); }
@Test public void testIssetString() { Assert.assertFalse(TempletonUtils.isset((String)null)); Assert.assertFalse(TempletonUtils.isset("")); Assert.assertTrue(TempletonUtils.isset("hello")); }
@Override public Deserializer getDeserializer(String type) throws HessianProtocolException { // 如果类型在过滤列表, 说明是jdk自带类, 直接委托父类处理 if (StringUtils.isEmpty(type) || ClassFilter.filterExcludeClass(type)) { return super.getDeserializer(type); } // 如果是数组类型, 且在name过滤列表, 说明jdk类, 直接委托父类处理 if (type.charAt(0) == ARRAY_PREFIX && ClassFilter.arrayFilter(type)) { return super.getDeserializer(type); } // 查看是否已经包含反序列化器 Deserializer deserializer = DESERIALIZER_MAP.get(type); if (deserializer != null) { return deserializer; } // 自定义Throwable采用JavaDeserializer,反序列化成Throwable而不是GenericObject deserializer = getDeserializerForCustomThrowable(type); if (deserializer != null) { DESERIALIZER_MAP.putIfAbsent(type, deserializer); return deserializer; } // 新建反序列化器, 如果是java.lang.Class使用GenericClassDeserializer,否则使用GenericDeserializer if (ClassFilter.CLASS_NAME.equals(type)) { deserializer = GenericClassDeserializer.getInstance(); } else { deserializer = new GenericDeserializer(type); } DESERIALIZER_MAP.putIfAbsent(type, deserializer); return deserializer; }
@Test public void testCustomThrowableDeserializer() throws Exception { GenericSingleClassLoaderSofaSerializerFactory factory = new GenericSingleClassLoaderSofaSerializerFactory(); ByteArrayOutputStream bsOut = new ByteArrayOutputStream(); Hessian2Output hessian2Output = new Hessian2Output(bsOut); hessian2Output.setSerializerFactory(factory); MockError mockError = new MockError("MockError"); hessian2Output.writeObject(mockError); hessian2Output.flush(); Deserializer genericDeserializer = factory.getDeserializer(MockError.class.getName()); Assert.assertTrue(genericDeserializer instanceof GenericDeserializer); ByteArrayInputStream bsIn = new ByteArrayInputStream(bsOut.toByteArray()); Hessian2Input hessian2Input = new Hessian2Input(bsIn); hessian2Input.setSerializerFactory(factory); Object result = hessian2Input.readObject(); Assert.assertTrue(result instanceof GenericObject); Assert.assertEquals("MockError", ((GenericObject) result).getField("detailMessage")); }
public FEELFnResult<TemporalAccessor> invoke() { return FEELFnResult.ofResult( ZonedDateTime.now() ); }
@Test void invoke() { // The current time that we need to compare will almost never be the same as another one we get for // comparison purposes, // because there is some execution between them, so the comparison assertion doesn't make sense. // Note: We cannot guarantee any part of the date to be the same. E.g. in case when the test is executed // at the exact moment when the year is flipped to the next one, we cannot guarantee the year will be the same. final FEELFnResult<TemporalAccessor> nowResult = nowFunction.invoke(); assertThat(nowResult.isRight()).isTrue(); final TemporalAccessor result = nowResult.cata(left -> null, right -> right); assertThat(result).isNotNull(); assertThat(result).isInstanceOfAny(ZonedDateTime.class); }
public boolean retried() { return retried.get(); }
@Test public void testNoMetrics() { RetryDetector detector = new RetryDetector(); assertThat(detector.retried()).as("Should default to false").isFalse(); }
public CompletableFuture<OperationResult<String>> getSavepointStatus( AsynchronousJobOperationKey operationKey) { return savepointTriggerCache .get(operationKey) .map(CompletableFuture::completedFuture) .orElse( FutureUtils.completedExceptionally( new UnknownOperationKeyException(operationKey))); }
@Test public void getStatusFailsIfKeyUnknown() throws InterruptedException { CompletableFuture<OperationResult<String>> statusFuture = handler.getSavepointStatus(operationKey); assertThat(statusFuture, futureFailedWith(UnknownOperationKeyException.class)); }
public static void validateConfig(Object config, Class annotationClass) { for (Field field : config.getClass().getDeclaredFields()) { Object value = null; field.setAccessible(true); try { value = field.get(config); } catch (IllegalAccessException e) { throw new RuntimeException(e); } validateField(field, value, annotationClass); } validateClass(config, annotationClass); }
@Test public void testGoodConfig() { TestConfig testConfig = createGoodConfig(); ConfigValidation.validateConfig(testConfig); }
@Override public void produceContent(ContentEncoder encoder, IOControl ioControl) throws IOException { httpAsyncRequestProducer.produceContent(encoder, ioControl); }
@Test public void produceContent() throws IOException { final HttpAsyncRequestProducer delegate = Mockito.mock(HttpAsyncRequestProducer.class); final HttpAsyncRequestProducerDecorator decorator = new HttpAsyncRequestProducerDecorator( delegate, null, null); decorator.produceContent(null, null); Mockito.verify(delegate, Mockito.times(1)).produceContent(null, null); }
@Override public String sign(String data, String key) { if (StringUtils.isNotBlank(key) && StringUtils.isNotBlank(data)) { return RamSignAdapter.getRamSign(data, key); } return data; }
@Test public void testGetRamSignNotNull() { String data = "testGroup,127.0.0.1,1702564471650"; String key = "exampleEncryptKey"; String expectedSign = "6g9nMk6BRLFxl7bf5ZfWaEZvGdho3JBmwvx5rqgSUCE="; DefaultAuthSigner signer = new DefaultAuthSigner(); String sign = signer.sign(data, key); Assertions.assertEquals(expectedSign, sign); }
public static <T> ZstdCoder<T> of(Coder<T> innerCoder, byte[] dict, int level) { return new ZstdCoder<>(innerCoder, dict, level); }
@Test public void testEncodingNotBuffered() throws Exception { // This test ensures that the coder does not buffer any data from the inner stream. // This is not of much importance today, since the coder relies on direct compression and uses // ByteArrayCoder to encode the resulting byte[], but this may change if the coder switches to // stream based compression in which case the stream must not buffer input from the inner // stream. for (String value : TEST_VALUES) { CoderProperties.coderDecodeEncodeEqual( KvCoder.of(TEST_CODER, StringUtf8Coder.of()), KV.of(value, value)); } }
public static void closeClient(InetSocketAddress address) { Client client = CLIENT_MAP.remove(address); Optional.ofNullable(client) .ifPresent(c -> { try { c.close(); } catch (IOException e) { throw new OperationException(e); } }); }
@Test public void closeTest() throws IOException { InetSocketAddress address = InetSocketAddress.createUnresolved(addressStr, port.getPort()); ChannelPoolHandler channelPoolHandler = new ClientPoolHandler(new ClientTakeHandler()); ClientConnection clientConnection = new SimpleClientConnection(address, channelPoolHandler); RPCClient rpcClient = new RPCClient(clientConnection); ClientSupport.closeClient(new InetSocketAddress(addressStr, port.getPort())); rpcClient.close(); }
protected List<HeaderValue> parseHeaderValue(String headerValue) { return parseHeaderValue(headerValue, HEADER_VALUE_SEPARATOR, HEADER_QUALIFIER_SEPARATOR); }
@Test void headers_parseHeaderValue_complexAccept() { AwsProxyHttpServletRequest request = new AwsProxyHttpServletRequest(complexAcceptHeader, mockContext, null, config); List<AwsHttpServletRequest.HeaderValue> values = request.parseHeaderValue(request.getHeader(HttpHeaders.ACCEPT), ",", ";"); assertEquals(4, values.size()); }
@NonNull @Override public String getKeyboardId() { return mKeyboardId; }
@Test public void testKeyboardIdPassed() { GenericKeyboard keyboard = new GenericKeyboard( mDefaultAddOn, mContext, R.xml.symbols, R.xml.symbols, "test", "test", Keyboard.KEYBOARD_ROW_MODE_NORMAL); Assert.assertEquals("test", keyboard.getKeyboardId()); Assert.assertNotEquals(keyboard.getKeyboardId(), mDefaultAddOn.getId()); }
@Override public boolean isReachable(DeviceId deviceId) { SnmpDevice snmpDevice = controller.getDevice(deviceId); if (snmpDevice == null) { log.warn("BAD REQUEST: the requested device id: " + deviceId.toString() + " is not associated to any SNMP Device"); return false; } return snmpDevice.isReachable(); }
@Test public void addDeviceNew() { assertTrue("Event should be relevant", provider.cfgLister.isRelevant(deviceAddedNewEvent)); provider.cfgLister.event(deviceAddedNewEvent); AbstractProjectableModel.setDriverService(null, new MockDriverService()); //FIXME this needs sleep assertAfter(DELAY, TEST_DURATION, () -> assertNotNull("Device should be added to controller", controller.getDevice(deviceId))); assertTrue("Device should be reachable", provider.isReachable(deviceId)); }
public static String substVars(String val, PropertyContainer pc1) throws ScanException { return substVars(val, pc1, null); }
@Test public void testSubstVarsTwoLevelsWithDefault() throws ScanException { // Example input taken from LOGBCK-943 bug report context.putProperty("APP_NAME", "LOGBACK"); context.putProperty("ARCHIVE_SUFFIX", "archive.log"); context.putProperty("LOG_HOME", "${logfilepath.default:-logs}"); context.putProperty("ARCHIVE_PATH", "${LOG_HOME}/archive/${APP_NAME}"); String result = OptionHelper.substVars("${ARCHIVE_PATH}_trace_${ARCHIVE_SUFFIX}", context); assertEquals("logs/archive/LOGBACK_trace_archive.log", result); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatStructExpression() { assertThat(ExpressionFormatter.formatExpression(new CreateStructExpression( ImmutableList.of( new Field("foo", new StringLiteral("abc")), new Field("bar", new SubscriptExpression(new UnqualifiedColumnReferenceExp(ColumnName.of("abc")), new IntegerLiteral(1)))) ), FormatOptions.of(exp -> exp.equals("foo"))), equalTo("STRUCT(`foo`:='abc', bar:=abc[1])")); }
public String siteUrlFor(String givenUrl) throws URISyntaxException { return siteUrlFor(givenUrl, false); }
@Test public void shouldGenerateSiteUrlForGivenUrl() throws URISyntaxException { ServerSiteUrlConfig url = new SiteUrl("http://someurl.com"); assertThat(url.siteUrlFor("http://test.host/foo/bar"), is("http://someurl.com/foo/bar")); }
@Override public String rpcType() { return RpcTypeEnum.DUBBO.getName(); }
@Test public void rpcType() { assert RpcTypeEnum.DUBBO.getName().equals(dubboShenyuContextDecorator.rpcType()); }
@Override public Iterable<DiscoveryNode> discoverNodes() { try { Map<String, String> addresses = awsClient.getAddresses(); logResult(addresses); List<DiscoveryNode> result = new ArrayList<>(); for (Map.Entry<String, String> entry : addresses.entrySet()) { for (int port = portRange.getFromPort(); port <= portRange.getToPort(); port++) { Address privateAddress = new Address(entry.getKey(), port); Address publicAddress = new Address(entry.getValue(), port); result.add(new SimpleDiscoveryNode(privateAddress, publicAddress)); } } return result; } catch (NoCredentialsException e) { if (!isKnownExceptionAlreadyLogged) { LOGGER.warning("No AWS credentials found! Starting standalone. To use Hazelcast AWS discovery, configure" + " properties (access-key, secret-key) or assign the required IAM Role to your EC2 instance"); LOGGER.finest(e); isKnownExceptionAlreadyLogged = true; } } catch (RestClientException e) { if (e.getHttpErrorCode() == HTTP_FORBIDDEN) { if (!isKnownExceptionAlreadyLogged) { LOGGER.warning("AWS IAM Role Policy missing 'ec2:DescribeInstances' Action! Starting standalone."); isKnownExceptionAlreadyLogged = true; } LOGGER.finest(e); } else { LOGGER.warning("Cannot discover nodes. Starting standalone.", e); } } catch (Exception e) { LOGGER.warning("Cannot discover nodes. Starting standalone.", e); } return Collections.emptyList(); }
@Test public void discoverNodesEmpty() { // given given(awsClient.getAddresses()).willReturn(Collections.emptyMap()); // when Iterable<DiscoveryNode> result = awsDiscoveryStrategy.discoverNodes(); // then assertEquals(emptyList(), result); }
public static ObjectMapper newObjectMapper() { final ObjectMapper mapper = new ObjectMapper(); return configure(mapper); }
@Test void objectMapperCanHandleNullInsteadOfCustomJsonFactory() { ObjectMapper mapper = Jackson.newObjectMapper(null); assertThat(mapper.getFactory()).isNotNull(); }
public static void writeIdlProtocol(Writer writer, Protocol protocol) throws IOException { final String protocolFullName = protocol.getName(); final int lastDotPos = protocolFullName.lastIndexOf("."); final String protocolNameSpace; if (lastDotPos < 0) { protocolNameSpace = protocol.getNamespace(); } else if (lastDotPos > 0) { protocolNameSpace = protocolFullName.substring(0, lastDotPos); } else { protocolNameSpace = null; } writeIdlProtocol(writer, protocol, protocolNameSpace, protocolFullName.substring(lastDotPos + 1), protocol.getTypes(), protocol.getMessages().values()); }
@Test public void cannotWriteProtocolWithUnnamedTypes() { assertThrows(AvroRuntimeException.class, () -> IdlUtils.writeIdlProtocol(new StringWriter(), Schema.create(Schema.Type.STRING))); }
protected List<HeaderValue> parseHeaderValue(String headerValue) { return parseHeaderValue(headerValue, HEADER_VALUE_SEPARATOR, HEADER_QUALIFIER_SEPARATOR); }
@Test void headers_parseHeaderValue_multiValue() { AwsProxyHttpServletRequest request = new AwsProxyHttpServletRequest(contentTypeRequest, mockContext, null, config); // I'm also using this to double-check that I can get a header ignoring case List<AwsHttpServletRequest.HeaderValue> values = request.parseHeaderValue(request.getHeader("content-type")); assertEquals(2, values.size()); assertEquals("application/xml", values.get(0).getValue()); assertNull(values.get(0).getKey()); assertEquals("charset", values.get(1).getKey()); assertEquals("utf-8", values.get(1).getValue()); }
public MessageType convert(Schema avroSchema) { if (!avroSchema.getType().equals(Schema.Type.RECORD)) { throw new IllegalArgumentException("Avro schema must be a record."); } return new MessageType(avroSchema.getFullName(), convertFields(avroSchema.getFields(), "")); }
@Test public void testLocalTimestampMillisType() throws Exception { Schema date = LogicalTypes.localTimestampMillis().addToSchema(Schema.create(LONG)); Schema expected = Schema.createRecord( "myrecord", null, null, false, Arrays.asList(new Schema.Field("timestamp", date, null, null))); testRoundTripConversion( expected, "message myrecord {\n" + " required int64 timestamp (TIMESTAMP(MILLIS,false));\n" + "}\n"); for (PrimitiveTypeName primitive : new PrimitiveTypeName[] {INT32, INT96, FLOAT, DOUBLE, BOOLEAN, BINARY, FIXED_LEN_BYTE_ARRAY}) { final PrimitiveType type; if (primitive == FIXED_LEN_BYTE_ARRAY) { type = new PrimitiveType(REQUIRED, primitive, 12, "test", TIMESTAMP_MILLIS); } else { type = new PrimitiveType(REQUIRED, primitive, "test", TIMESTAMP_MILLIS); } assertThrows( "Should not allow TIMESTAMP_MILLIS with " + primitive, IllegalArgumentException.class, () -> new AvroSchemaConverter().convert(message(type))); } }
public void setIssueComponent(DefaultIssue issue, String newComponentUuid, String newComponentKey, Date updateDate) { if (!Objects.equals(newComponentUuid, issue.componentUuid())) { issue.setComponentUuid(newComponentUuid); issue.setUpdateDate(updateDate); issue.setChanged(true); } // other fields (such as module, modulePath, componentKey) are read-only and set/reset for consistency only issue.setComponentKey(newComponentKey); }
@Test void setIssueComponent_has_no_effect_if_component_uuid_is_not_changed() { String componentKey = "key"; String componentUuid = "uuid"; issue.setComponentUuid(componentUuid); issue.setComponentKey(componentKey); underTest.setIssueComponent(issue, componentUuid, componentKey, context.date()); assertThat(issue.componentUuid()).isEqualTo(componentUuid); assertThat(issue.componentKey()).isEqualTo(componentKey); assertThat(issue.isChanged()).isFalse(); assertThat(issue.updateDate()).isNull(); assertThat(issue.mustSendNotifications()).isFalse(); }
@Bean public MetaDataHandler motanMetaDataHandler() { return new MotanMetaDataHandler(); }
@Test public void testMotanMetaDataHandler() { applicationContextRunner.run(context -> { MetaDataHandler handler = context.getBean("motanMetaDataHandler", MetaDataHandler.class); assertNotNull(handler); } ); }
public ImmutableSet<Replacement> ascending() { return ImmutableSet.copyOf(replacements.descendingMap().values()); }
@Test public void ascending() { assertThat( Iterables.transform( new Replacements() .add(Replacement.create(0, 0, "hello")) .add(Replacement.create(0, 1, "hello")) .ascending(), AS_RANGES)) .containsExactly(Range.closedOpen(0, 0), Range.closedOpen(0, 1)) .inOrder(); assertThat( Iterables.transform( new Replacements() .add(Replacement.create(0, 1, "hello")) .add(Replacement.create(0, 0, "hello")) .ascending(), AS_RANGES)) .containsExactly(Range.closedOpen(0, 0), Range.closedOpen(0, 1)) .inOrder(); }
public static Inspector inspect(byte[] data) { var input = new BufferedInput(data); var names = new SymbolTable(); BinaryDecoder.decodeSymbolTable(input, names); var index = new DecodeIndex(input.getBacking().length, input.getPosition()); buildIndex(input, index, 0, 0); if (input.failed()) { throw new IllegalArgumentException("bad input: " + input.getErrorMessage()); } return new BinaryView(input.getBacking(), names, index.getBacking(), 0); }
@Test public void testBinaryViewShapesParity() { for (int i = 0; i < numShapes; ++i) { var slime = makeSlime(i); ctx = "case " + i + ": '" + slime.toString() + "'"; byte[] data = BinaryFormat.encode(slime); try { checkParity(slime.get(), BinaryView.inspect(data)); } catch (Exception e) { fail(ctx + ", got exception: " + e); } } }
static boolean isValidIpEntity(String ip) { if (ip == null) return true; try { InetAddress.getByName(ip); return true; } catch (UnknownHostException e) { return false; } }
@Test public void testIsValidIpEntityWithLocalhost() { assertTrue(ClientQuotaControlManager.isValidIpEntity("127.0.0.1")); }
public static List<PartitionSpec> getPartitionspecsGroupedByStorageDescriptor(Table table, Collection<Partition> partitions) { final String tablePath = table.getSd().getLocation(); ImmutableListMultimap<StorageDescriptorKey, Partition> partitionsWithinTableDirectory = Multimaps.index(partitions, input -> { // if sd is not in the list of projected fields, all the partitions // can be just grouped in PartitionSpec object if (input.getSd() == null) { return StorageDescriptorKey.UNSET_KEY; } // if sd has skewed columns we better not group partition, since different partitions // could have different skewed info like skewed location if (input.getSd().getSkewedInfo() != null && input.getSd().getSkewedInfo().getSkewedColNames() != null && !input.getSd().getSkewedInfo().getSkewedColNames().isEmpty()) { return new StorageDescriptorKey(input.getSd()); } // if partitions don't have the same number of buckets we can not group their SD, // this could lead to incorrect number of buckets if (input.getSd().getNumBuckets() != partitions.iterator().next().getSd().getNumBuckets()) { return new StorageDescriptorKey(input.getSd()); } // if the partition is within table, use the tableSDKey to group it with other partitions // within the table directory if (input.getSd().getLocation() != null && input.getSd().getLocation() .startsWith(tablePath)) { return new StorageDescriptorKey(tablePath, input.getSd()); } // if partitions are located outside table location we treat them as non-standard // and do not perform any grouping // if the location is not set partitions are grouped according to the rest of the SD fields return new StorageDescriptorKey(input.getSd()); }); List<PartitionSpec> partSpecs = new ArrayList<>(); // Classify partitions based on shared SD properties. Map<StorageDescriptorKey, List<PartitionWithoutSD>> sdToPartList = new HashMap<>(); // we don't expect partitions to exist outside directory in most cases List<Partition> partitionsOutsideTableDir = new ArrayList<>(0); for (StorageDescriptorKey key : partitionsWithinTableDirectory.keySet()) { boolean isUnsetKey = key.equals(StorageDescriptorKey.UNSET_KEY); // group the partitions together when // case I : sd is not set because it was not in the requested fields // case II : when sd.location is not set because it was not in the requested fields // case III : when sd.location is set and it is located within table directory if (isUnsetKey || key.baseLocation == null || key.baseLocation.equals(tablePath)) { for (Partition partition : partitionsWithinTableDirectory.get(key)) { PartitionWithoutSD partitionWithoutSD = new PartitionWithoutSD(); partitionWithoutSD.setValues(partition.getValues()); partitionWithoutSD.setCreateTime(partition.getCreateTime()); partitionWithoutSD.setLastAccessTime(partition.getLastAccessTime()); partitionWithoutSD.setRelativePath( (isUnsetKey || !partition.getSd().isSetLocation()) ? null : partition.getSd() .getLocation().substring(tablePath.length())); partitionWithoutSD.setParameters(partition.getParameters()); if (!sdToPartList.containsKey(key)) { sdToPartList.put(key, new ArrayList<>()); } sdToPartList.get(key).add(partitionWithoutSD); } } else { // Lump all partitions outside the tablePath into one PartSpec. // if non-standard partitions need not be deDuped create PartitionListComposingSpec // this will be used mostly for keeping backwards compatibility with some HMS APIs which use // PartitionListComposingSpec for non-standard partitions located outside table partitionsOutsideTableDir.addAll(partitionsWithinTableDirectory.get(key)); } } // create sharedSDPartSpec for all the groupings for (Map.Entry<StorageDescriptorKey, List<PartitionWithoutSD>> entry : sdToPartList .entrySet()) { partSpecs.add(getSharedSDPartSpec(table, entry.getKey(), entry.getValue())); } if (!partitionsOutsideTableDir.isEmpty()) { PartitionSpec partListSpec = new PartitionSpec(); partListSpec.setCatName(table.getCatName()); partListSpec.setDbName(table.getDbName()); partListSpec.setTableName(table.getTableName()); partListSpec.setPartitionList(new PartitionListComposingSpec(partitionsOutsideTableDir)); partSpecs.add(partListSpec); } return partSpecs; }
@Test public void testGetPartitionspecsGroupedBySDNullSD() throws MetaException { // Create database and table Table tbl = new TableBuilder() .setDbName(DB_NAME) .setTableName(TABLE_NAME) .addCol("id", "int") .setLocation("/foo") .build(null); Partition p1 = new PartitionBuilder() .setDbName("DB_NAME") .setTableName(TABLE_NAME) .addCol("a", "int") .addValue("val1") .setInputFormat("foo") .build(null); // Set SD to null p1.unsetSd(); assertThat(p1.getSd(), is((StorageDescriptor)null)); List<PartitionSpec> result = MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Collections.singleton(p1)); assertThat(result.size(), is(1)); PartitionSpec ps = result.get(0); assertThat(ps.getRootPath(), is((String)null)); List<PartitionWithoutSD> partitions = ps.getSharedSDPartitionSpec().getPartitions(); assertThat(partitions.size(), is(1)); PartitionWithoutSD partition = partitions.get(0); assertThat(partition.getRelativePath(), is((String)null)); assertThat(partition.getValues(), is(Collections.singletonList("val1"))); }
@Override public SplitResult<OffsetRange> trySplit(double fractionOfRemainder) { // Convert to BigDecimal in computation to prevent overflow, which may result in loss of // precision. BigDecimal cur = (lastAttemptedOffset == null) ? BigDecimal.valueOf(range.getFrom()).subtract(BigDecimal.ONE, MathContext.DECIMAL128) : BigDecimal.valueOf(lastAttemptedOffset); // split = cur + max(1, (range.getTo() - cur) * fractionOfRemainder) BigDecimal splitPos = cur.add( BigDecimal.valueOf(range.getTo()) .subtract(cur, MathContext.DECIMAL128) .multiply(BigDecimal.valueOf(fractionOfRemainder), MathContext.DECIMAL128) .max(BigDecimal.ONE), MathContext.DECIMAL128); long split = splitPos.longValue(); if (split >= range.getTo()) { return null; } OffsetRange res = new OffsetRange(split, range.getTo()); this.range = new OffsetRange(range.getFrom(), split); return SplitResult.of(range, res); }
@Test public void testTrySplitAtEmptyRange() throws Exception { OffsetRangeTracker tracker = new OffsetRangeTracker(new OffsetRange(100, 100)); assertNull(tracker.trySplit(0)); assertNull(tracker.trySplit(0.1)); assertNull(tracker.trySplit(1)); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { for(Path file : files.keySet()) { if(containerService.isContainer(file)) { continue; } callback.delete(file); if(file.getType().contains(Path.Type.upload)) { new B2LargeUploadPartService(session, fileid).delete(file.attributes().getVersionId()); } else { if(file.isDirectory()) { // Delete /.bzEmpty if any final String placeholder; try { placeholder = fileid.getVersionId(file); } catch(NotfoundException e) { log.warn(String.format("Ignore failure %s deleting placeholder file for %s", e, file)); continue; } if(null == placeholder) { continue; } try { session.getClient().deleteFileVersion(containerService.getKey(file), placeholder); } catch(B2ApiException e) { log.warn(String.format("Ignore failure %s deleting placeholder file for %s", e.getMessage(), file)); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot delete {0}", e, file); } } else if(file.isFile()) { try { if(!versioning.isEnabled() || null == file.attributes().getVersionId()) { // Add hide marker if(log.isDebugEnabled()) { log.debug(String.format("Add hide marker %s of %s", file.attributes().getVersionId(), file)); } try { session.getClient().hideFile(fileid.getVersionId(containerService.getContainer(file)), containerService.getKey(file)); } catch(B2ApiException e) { if("already_hidden".equalsIgnoreCase(e.getCode())) { log.warn(String.format("Ignore failure %s hiding file %s already hidden", e.getMessage(), file)); } else { throw e; } } } else { // Delete specific version if(log.isDebugEnabled()) { log.debug(String.format("Delete version %s of %s", file.attributes().getVersionId(), file)); } session.getClient().deleteFileVersion(containerService.getKey(file), file.attributes().getVersionId()); } } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Cannot delete {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot delete {0}", e, file); } } fileid.cache(file, null); } } for(Path file : files.keySet()) { try { if(containerService.isContainer(file)) { callback.delete(file); // Finally delete bucket itself session.getClient().deleteBucket(fileid.getVersionId(file)); } } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Cannot delete {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot delete {0}", e, file); } } }
@Test public void testDelete() throws Exception { final B2VersionIdProvider fileid = new B2VersionIdProvider(session); final Path bucket = new B2DirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path file = new Path(bucket, String.format("%s %s", new AlphanumericRandomStringService().random(), "1"), EnumSet.of(Path.Type.file)); new B2TouchFeature(session, fileid).touch(file, new TransferStatus()); new B2DeleteFeature(session, fileid).delete(Arrays.asList(bucket, file), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new B2FindFeature(session, fileid).find(file)); assertFalse(new B2FindFeature(session, fileid).find(bucket)); }
@Override public Response addToClusterNodeLabels(NodeLabelsInfo newNodeLabels, HttpServletRequest hsr) throws Exception { if (newNodeLabels == null) { routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN, TARGET_WEB_SERVICE, "Parameter error, the newNodeLabels is null."); throw new IllegalArgumentException("Parameter error, the newNodeLabels is null."); } List<NodeLabelInfo> nodeLabelInfos = newNodeLabels.getNodeLabelsInfo(); if (CollectionUtils.isEmpty(nodeLabelInfos)) { routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN, TARGET_WEB_SERVICE, "Parameter error, the nodeLabelsInfo is null or empty."); throw new IllegalArgumentException("Parameter error, the nodeLabelsInfo is null or empty."); } try { long startTime = clock.getTime(); Collection<SubClusterInfo> subClustersActives = federationFacade.getActiveSubClusters(); final HttpServletRequest hsrCopy = clone(hsr); Class[] argsClasses = new Class[]{NodeLabelsInfo.class, HttpServletRequest.class}; Object[] args = new Object[]{newNodeLabels, hsrCopy}; ClientMethod remoteMethod = new ClientMethod("addToClusterNodeLabels", argsClasses, args); Map<SubClusterInfo, Response> responseInfoMap = invokeConcurrent(subClustersActives, remoteMethod, Response.class); StringBuilder buffer = new StringBuilder(); // SubCluster-0:SUCCESS,SubCluster-1:SUCCESS responseInfoMap.forEach((subClusterInfo, response) -> buildAppendMsg(subClusterInfo, buffer, response)); long stopTime = clock.getTime(); RouterAuditLogger.logSuccess(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, TARGET_WEB_SERVICE); routerMetrics.succeededAddToClusterNodeLabelsRetrieved((stopTime - startTime)); return Response.status(Status.OK).entity(buffer.toString()).build(); } catch (NotFoundException e) { routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowIOException("get all active sub cluster(s) error.", e); } catch (YarnException e) { routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowIOException("addToClusterNodeLabels with yarn error.", e); } routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), ADD_TO_CLUSTER_NODELABELS, UNKNOWN, TARGET_WEB_SERVICE, "addToClusterNodeLabels Failed."); throw new RuntimeException("addToClusterNodeLabels Failed."); }
@Test public void testAddToClusterNodeLabelsError() throws Exception { // the newNodeLabels is null LambdaTestUtils.intercept(IllegalArgumentException.class, "Parameter error, the newNodeLabels is null.", () -> interceptor.addToClusterNodeLabels(null, null)); // the nodeLabelsInfo is null NodeLabelsInfo nodeLabelsInfo = new NodeLabelsInfo(); LambdaTestUtils.intercept(IllegalArgumentException.class, "Parameter error, the nodeLabelsInfo is null or empty.", () -> interceptor.addToClusterNodeLabels(nodeLabelsInfo, null)); // error nodeLabelsInfo NodeLabelsInfo nodeLabelsInfo1 = new NodeLabelsInfo(); NodeLabelInfo nodeLabelInfo1 = new NodeLabelInfo("A", true); nodeLabelsInfo1.getNodeLabelsInfo().add(nodeLabelInfo1); LambdaTestUtils.intercept(YarnRuntimeException.class, "addToClusterNodeLabels Error", () -> interceptor.addToClusterNodeLabels(nodeLabelsInfo1, null)); }
@JsonValue @ThriftField(1) public Map<String, RuntimeMetric> getMetrics() { return Collections.unmodifiableMap(metrics); }
@Test(expectedExceptions = UnsupportedOperationException.class) public void testReturnUnmodifiedMetrics() { RuntimeStats stats = new RuntimeStats(); stats.getMetrics().put(TEST_METRIC_NAME_1, new RuntimeMetric(TEST_METRIC_NAME_1, NONE)); }
@Override public ProxyInvocationHandler parserInterfaceToProxy(Object target, String objectName) { // eliminate the bean without two phase annotation. Set<String> methodsToProxy = this.tccProxyTargetMethod(target); if (methodsToProxy.isEmpty()) { return null; } // register resource and enhance with interceptor DefaultResourceRegisterParser.get().registerResource(target, objectName); return new TccActionInterceptorHandler(target, methodsToProxy); }
@Test public void testNestTcc_required_new_should_both_commit() throws Exception { //given RootContext.unbind(); DefaultResourceManager.get(); DefaultResourceManager.mockResourceManager(BranchType.TCC, resourceManager); TransactionManagerHolder.set(transactionManager); TccActionImpl tccAction = new TccActionImpl(); TccAction tccActionProxy = ProxyUtil.createProxy(tccAction); Assertions.assertNotNull(tccActionProxy); NestTccActionImpl nestTccAction = new NestTccActionImpl(); nestTccAction.setTccAction(tccActionProxy); //when ProxyInvocationHandler proxyInvocationHandler = DefaultInterfaceParser.get().parserInterfaceToProxy(nestTccAction, nestTccAction.getClass().getName()); //then Assertions.assertNotNull(proxyInvocationHandler); //when NestTccAction nestTccActionProxy = ProxyUtil.createProxy(nestTccAction); //then Assertions.assertNotNull(nestTccActionProxy); // transaction commit test GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate(); try { tx.begin(60000, "testBiz"); boolean result = nestTccActionProxy.prepareNestRequiredNew(null, 2); Assertions.assertTrue(result); if (result) { tx.commit(); } else { tx.rollback(); } } catch (Exception exx) { tx.rollback(); throw exx; } Assertions.assertTrue(nestTccAction.isCommit()); Assertions.assertTrue(tccAction.isCommit()); }
public static CommonsConfigurationRateLimiterConfiguration of(final Configuration configuration) throws ConfigParseException { CommonsConfigurationRateLimiterConfiguration obj = new CommonsConfigurationRateLimiterConfiguration(); try { obj.getConfigs().putAll(obj.getProperties(configuration.subset(RATE_LIMITER_CONFIGS_PREFIX))); obj.getInstances().putAll(obj.getProperties(configuration.subset(RATE_LIMITER_INSTANCES_PREFIX))); return obj; } catch (Exception ex) { throw new ConfigParseException("Error creating ratelimiter configuration", ex); } }
@Test public void testFromYamlFile() throws ConfigurationException { Configuration config = CommonsConfigurationUtil.getConfiguration(YAMLConfiguration.class, TestConstants.RESILIENCE_CONFIG_YAML_FILE_NAME); CommonsConfigurationRateLimiterConfiguration bulkHeadConfiguration = CommonsConfigurationRateLimiterConfiguration.of(config); assertConfigs(bulkHeadConfiguration.getConfigs()); assertInstances(bulkHeadConfiguration.getInstances()); }
@Bean public BulkheadRegistry bulkheadRegistry( BulkheadConfigurationProperties bulkheadConfigurationProperties, EventConsumerRegistry<BulkheadEvent> bulkheadEventConsumerRegistry, RegistryEventConsumer<Bulkhead> bulkheadRegistryEventConsumer, @Qualifier("compositeBulkheadCustomizer") CompositeCustomizer<BulkheadConfigCustomizer> compositeBulkheadCustomizer) { BulkheadRegistry bulkheadRegistry = createBulkheadRegistry(bulkheadConfigurationProperties, bulkheadRegistryEventConsumer, compositeBulkheadCustomizer); registerEventConsumer(bulkheadRegistry, bulkheadEventConsumerRegistry, bulkheadConfigurationProperties); bulkheadConfigurationProperties.getInstances().forEach((name, properties) -> bulkheadRegistry .bulkhead(name, bulkheadConfigurationProperties .createBulkheadConfig(properties, compositeBulkheadCustomizer, name))); return bulkheadRegistry; }
@Test public void testBulkHeadRegistry() { //Given io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties instanceProperties1 = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties(); instanceProperties1.setMaxConcurrentCalls(3); assertThat(instanceProperties1.getEventConsumerBufferSize()).isNull(); io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties instanceProperties2 = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties(); instanceProperties2.setMaxConcurrentCalls(2); assertThat(instanceProperties2.getEventConsumerBufferSize()).isNull(); BulkheadConfigurationProperties bulkheadConfigurationProperties = new BulkheadConfigurationProperties(); bulkheadConfigurationProperties.getInstances().put("backend1", instanceProperties1); bulkheadConfigurationProperties.getInstances().put("backend2", instanceProperties2); BulkheadConfiguration bulkheadConfiguration = new BulkheadConfiguration(); DefaultEventConsumerRegistry<BulkheadEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); //When BulkheadRegistry bulkheadRegistry = bulkheadConfiguration .bulkheadRegistry(bulkheadConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), new CompositeCustomizer<>( Collections.emptyList())); //Then assertThat(bulkheadRegistry.getAllBulkheads().size()).isEqualTo(2); Bulkhead bulkhead1 = bulkheadRegistry.bulkhead("backend1"); assertThat(bulkhead1).isNotNull(); assertThat(bulkhead1.getBulkheadConfig().getMaxConcurrentCalls()).isEqualTo(3); Bulkhead bulkhead2 = bulkheadRegistry.bulkhead("backend2"); assertThat(bulkhead2).isNotNull(); assertThat(bulkhead2.getBulkheadConfig().getMaxConcurrentCalls()).isEqualTo(2); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(2); }
private void stop(int numOfServicesStarted, boolean stopOnlyStartedServices) { // stop in reverse order of start Exception firstException = null; List<Service> services = getServices(); for (int i = numOfServicesStarted - 1; i >= 0; i--) { Service service = services.get(i); if (LOG.isDebugEnabled()) { LOG.debug("Stopping service #" + i + ": " + service); } STATE state = service.getServiceState(); //depending on the stop police if (state == STATE.STARTED || (!stopOnlyStartedServices && state == STATE.INITED)) { Exception ex = ServiceOperations.stopQuietly(LOG, service); if (ex != null && firstException == null) { firstException = ex; } } } //after stopping all services, rethrow the first exception raised if (firstException != null) { throw ServiceStateException.convert(firstException); } }
@Test(timeout = 10000) public void testAddInitedChildInStop() throws Throwable { CompositeService parent = new CompositeService("parent"); BreakableService child = new BreakableService(); child.init(new Configuration()); parent.init(new Configuration()); parent.start(); parent.stop(); AddSiblingService.addChildToService(parent, child); assertInState(STATE.INITED, child); }
@Override public List<PrivilegedOperation> teardown() throws ResourceHandlerException { return null; }
@Test public void testTeardown() throws Exception { Assert.assertNull(cGroupsBlkioResourceHandlerImpl.teardown()); }
public boolean validateData(Object srcValue, EtlJobConfig.EtlColumn etlColumn, ColumnParser columnParser, Row row) { switch (etlColumn.columnType.toUpperCase()) { case "DECIMALV2": case "DECIMAL32": case "DECIMAL64": case "DECIMAL128": // TODO(wb): support decimal round; see be DecimalV2Value::round DecimalParser decimalParser = (DecimalParser) columnParser; BigDecimal srcBigDecimal = (BigDecimal) srcValue; if (srcValue != null && (decimalParser.getMaxValue().compareTo(srcBigDecimal) < 0 || decimalParser.getMinValue().compareTo(srcBigDecimal) > 0)) { LOG.warn(String.format( "decimal value is not valid for defination, column=%s, value=%s,precision=%s,scale=%s", etlColumn.columnName, srcValue.toString(), srcBigDecimal.precision(), srcBigDecimal.scale())); return false; } break; case "CHAR": case "VARCHAR": // TODO(wb) padding char type int strSize = 0; if (srcValue != null && (strSize = srcValue.toString().getBytes(StandardCharsets.UTF_8).length) > etlColumn.stringLength) { LOG.warn(String.format( "the length of input is too long than schema. column_name:%s," + "input_str[%s],schema length:%s,actual length:%s", etlColumn.columnName, row.toString(), etlColumn.stringLength, strSize)); return false; } break; } return true; }
@Test public void testValidateData() { SparkDpp sparkDpp = new SparkDpp(); // decimal EtlJobConfig.EtlColumn etlColumn = new EtlJobConfig.EtlColumn(); etlColumn.columnType = "DECIMALV2"; etlColumn.precision = 3; etlColumn.scale = 2; DecimalParser decimalParser = new DecimalParser(etlColumn); // test max/min Assert.assertTrue(decimalParser.getMaxValue().toString().equals("9.99")); Assert.assertTrue(decimalParser.getMinValue().toString().equals("-9.99")); // normal BigDecimal bigDecimal = new BigDecimal("1.21"); Assert.assertTrue(sparkDpp.validateData(bigDecimal, etlColumn, decimalParser, RowFactory.create(bigDecimal))); // failed BigDecimal bigDecimalFailed = new BigDecimal("10"); Assert.assertFalse( sparkDpp.validateData(bigDecimalFailed, etlColumn, decimalParser, RowFactory.create(bigDecimalFailed))); // string EtlJobConfig.EtlColumn stringColumn = new EtlJobConfig.EtlColumn(); stringColumn.stringLength = 3; stringColumn.columnType = "VARCHAR"; StringParser stringParser = new StringParser(stringColumn); // normal String normalString = "a1"; Assert.assertTrue( sparkDpp.validateData(normalString, stringColumn, stringParser, RowFactory.create(normalString))); // cn normal String normalStringCN = "中"; Assert.assertTrue( sparkDpp.validateData(normalStringCN, stringColumn, stringParser, RowFactory.create(normalStringCN))); // cn failed String failedStringCN = "中a"; Assert.assertFalse( sparkDpp.validateData(failedStringCN, stringColumn, stringParser, RowFactory.create(failedStringCN))); }
@Override public RetrievableStateHandle<T> addAndLock(String key, T state) throws PossibleInconsistentStateException, Exception { checkNotNull(key, "Key in ConfigMap."); checkNotNull(state, "State."); final RetrievableStateHandle<T> storeHandle = storage.store(state); final byte[] serializedStoreHandle = serializeOrDiscard(new StateHandleWithDeleteMarker<>(storeHandle)); // initialize flag to serve the failure case boolean discardState = true; try { // a successful operation will result in the state not being discarded discardState = !updateConfigMap( cm -> { try { return addEntry(cm, key, serializedStoreHandle); } catch (Exception e) { throw new CompletionException(e); } }) .get(); return storeHandle; } catch (Exception ex) { final Optional<PossibleInconsistentStateException> possibleInconsistentStateException = ExceptionUtils.findThrowable(ex, PossibleInconsistentStateException.class); if (possibleInconsistentStateException.isPresent()) { // it's unclear whether the state handle metadata was written to the ConfigMap - // hence, we don't discard the data discardState = false; throw possibleInconsistentStateException.get(); } throw ExceptionUtils.findThrowable(ex, AlreadyExistException.class) .orElseThrow(() -> ex); } finally { if (discardState) { storeHandle.discardState(); } } }
@Test void testAddFailedWhenConfigMapNotExistAndDiscardState() throws Exception { new Context() { { runTest( () -> { final KubernetesStateHandleStore< TestingLongStateHandleHelper.LongStateHandle> store = new KubernetesStateHandleStore<>( flinkKubeClient, LEADER_CONFIGMAP_NAME, longStateStorage, filter, LOCK_IDENTITY); final String msg = String.format( "ConfigMap %s does not exist.", LEADER_CONFIGMAP_NAME); assertThatThrownBy( () -> store.addAndLock(key, state), "Exception should be thrown.") .satisfies(anyCauseMatches(msg)); assertThat(TestingLongStateHandleHelper.getGlobalStorageSize()) .isEqualTo(1); assertThat( TestingLongStateHandleHelper .getDiscardCallCountForStateHandleByIndex(0)) .isEqualTo(1); }); } }; }
public StepInstance getStepInstanceView( String workflowId, long workflowInstanceId, String stepId) { StepInstance ret = withMetricLogError( () -> withRetryableQuery( GET_STEP_INSTANCE_VIEW_QUERY, stmt -> { int idx = 0; stmt.setString(++idx, workflowId); stmt.setLong(++idx, workflowInstanceId); stmt.setString(++idx, stepId); }, result -> { if (result.next()) { return maestroStepFromResult(result); } return null; }), "getStepInstanceView", "Failed to get the step instance view for [{}][{}][{}]", workflowId, workflowInstanceId, stepId); if (ret == null) { throw new MaestroNotFoundException( "step instance view for [%s][%s][%s] not found (either not created or deleted)", workflowId, workflowInstanceId, stepId); } return ret; }
@Test public void testGetStepInstanceView() throws Exception { si = loadObject("fixtures/instances/sample-step-instance-finishing.json", StepInstance.class); stepDao.insertOrUpsertStepInstance(si, true); si = loadObject("fixtures/instances/sample-step-instance-failed.json", StepInstance.class); stepDao.insertOrUpsertStepInstance(si, true); StepInstance instance = stepDao.getStepInstanceView("sample-dag-test-3", 1L, "job1"); assertEquals(2L, instance.getWorkflowRunId()); assertEquals(2L, instance.getStepAttemptId()); assertEquals(StepInstance.Status.FINISHING, instance.getRuntimeState().getStatus()); }
public InternalOperatorMetricGroup getOrAddOperator(String operatorName) { return getOrAddOperator(OperatorID.fromJobVertexID(vertexId), operatorName); }
@Test void testOperatorNameTruncation() throws Exception { Configuration cfg = new Configuration(); cfg.set(MetricOptions.SCOPE_NAMING_OPERATOR, ScopeFormat.SCOPE_OPERATOR_NAME); MetricRegistryImpl registry = new MetricRegistryImpl(MetricRegistryTestUtils.fromConfiguration(cfg)); TaskManagerMetricGroup tm = TaskManagerMetricGroup.createTaskManagerMetricGroup( registry, "host", new ResourceID("id")); TaskMetricGroup taskMetricGroup = tm.addJob(new JobID(), "jobname").addTask(createExecutionAttemptId(), "task"); String originalName = new String(new char[100]).replace("\0", "-"); InternalOperatorMetricGroup operatorMetricGroup = taskMetricGroup.getOrAddOperator(originalName); String storedName = operatorMetricGroup.getScopeComponents()[0]; assertThat(storedName.length()).isEqualTo(TaskMetricGroup.METRICS_OPERATOR_NAME_MAX_LENGTH); assertThat(originalName.substring(0, TaskMetricGroup.METRICS_OPERATOR_NAME_MAX_LENGTH)) .isEqualTo(storedName); registry.closeAsync().get(); }
public CsvData read() throws IORuntimeException { return read(this.reader, false); }
@Test public void readTest() { CsvReader reader = new CsvReader(); CsvData data = reader.read(ResourceUtil.getReader("test.csv", CharsetUtil.CHARSET_UTF_8)); assertEquals("sss,sss", data.getRow(0).get(0)); assertEquals(1, data.getRow(0).getOriginalLineNumber()); assertEquals("性别", data.getRow(0).get(2)); assertEquals("关注\"对象\"", data.getRow(0).get(3)); }
public static boolean isUrl(CharSequence value) { if (StrUtil.isBlank(value)) { return false; } try { new java.net.URL(StrUtil.str(value)); } catch (MalformedURLException e) { return false; } return true; }
@Test public void isUrlTest() { final String content = "https://detail.tmall.com/item.htm?" + "id=639428931841&ali_refid=a3_430582_1006:1152464078:N:Sk5vwkMVsn5O6DcnvicELrFucL21A32m:0af8611e23c1d07697e"; assertTrue(Validator.isMatchRegex(Validator.URL, content)); assertTrue(Validator.isMatchRegex(Validator.URL_HTTP, content)); }
@Override public boolean isDetected() { return "true".equals(system.envVariable("SEMAPHORE")) && isNotEmpty(system.envVariable("SEMAPHORE_PROJECT_ID")); }
@Test public void isDetected() { setEnvVariable("SEMAPHORE", "true"); setEnvVariable("SEMAPHORE_PROJECT_ID", "d782a107-aaf9-494d-8153-206b1a6bc8e9"); assertThat(underTest.isDetected()).isTrue(); setEnvVariable("SEMAPHORE", "true"); setEnvVariable("SEMAPHORE_PROJECT_ID", null); assertThat(underTest.isDetected()).isFalse(); setEnvVariable("SEMAPHORE", null); setEnvVariable("SEMAPHORE_PROJECT_ID", "d782a107-aaf9-494d-8153-206b1a6bc8e9"); assertThat(underTest.isDetected()).isFalse(); setEnvVariable("SEMAPHORE", "foo"); setEnvVariable("SEMAPHORE_PROJECT_ID", "d782a107-aaf9-494d-8153-206b1a6bc8e9"); assertThat(underTest.isDetected()).isFalse(); }
public Path getParentPath() { ArrayList<String> parentElements = new ArrayList<>(); if (elements.size() > 1) { for (int i = 0; i < elements.size() - 1; i++) { parentElements.add(elements.get(i)); } } return new Path(parentElements, delimiter); }
@Test public void testGetParentPath() { assertEquals("foo/bar", getAbsolutePath().getParentPath().getRelative()); assertEquals("foo/bar", getRelativePath().getParentPath().getRelative()); assertEquals("foo/bar", getWithSlashes().getParentPath().getRelative()); assertEquals("foo/bar", getAppended().getParentPath().getRelative()); assertTrue(getOne().getParentPath().getRelative().isEmpty()); }
public ArrayList<String> extractURIs() throws IOException { for (PDPage page : documentReader.getPages()) { for (PDAnnotation annotation : page.getAnnotations()) { if (annotation instanceof PDAnnotationLink) { PDAnnotationLink link = (PDAnnotationLink) annotation; PDAction action = link.getAction(); if (action instanceof PDActionURI) { PDActionURI uri = (PDActionURI) action; foundURIs.add(uri.getURI()); } } } } return getURIs(); }
@Test public void test() throws IOException { byte[] data = IOUtils.resourceToByteArray("/org/archive/crawler/modules/extractor/PDFParserTest.pdf"); PDFParser parser = new PDFParser(data); ArrayList<String> uris = parser.extractURIs(); Assert.assertEquals(Collections.singletonList("https://example.com/link-annotation"), uris); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testTimerParameterDuplicate() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("duplicate"); thrown.expectMessage("my-id"); thrown.expectMessage("myProcessElement"); thrown.expectMessage("index 2"); thrown.expectMessage(not(mentionsState())); DoFnSignatures.getSignature( new DoFn<KV<String, Integer>, Long>() { @TimerId("my-id") private final TimerSpec myfield = TimerSpecs.timer(TimeDomain.PROCESSING_TIME); @ProcessElement public void myProcessElement( ProcessContext context, @TimerId("my-id") Timer one, @TimerId("my-id") Timer two) {} @OnTimer("my-id") public void onWhatever() {} }.getClass()); }
public KsqlGenericRecord build( final List<ColumnName> columnNames, final List<Expression> expressions, final LogicalSchema schema, final DataSourceType dataSourceType ) { final List<ColumnName> columns = columnNames.isEmpty() ? implicitColumns(schema) : columnNames; if (columns.size() != expressions.size()) { throw new KsqlException( "Expected a value for each column." + " Expected Columns: " + columnNames + ". Got " + expressions); } final LogicalSchema schemaWithPseudoColumns = withPseudoColumns(schema); for (ColumnName col : columns) { if (!schemaWithPseudoColumns.findColumn(col).isPresent()) { throw new KsqlException("Column name " + col + " does not exist."); } if (SystemColumns.isDisallowedForInsertValues(col)) { throw new KsqlException("Inserting into column " + col + " is not allowed."); } } final Map<ColumnName, Object> values = resolveValues( columns, expressions, schemaWithPseudoColumns, functionRegistry, config ); if (dataSourceType == DataSourceType.KTABLE) { final String noValue = schemaWithPseudoColumns.key().stream() .map(Column::name) .filter(colName -> !values.containsKey(colName)) .map(ColumnName::text) .collect(Collectors.joining(", ")); if (!noValue.isEmpty()) { throw new KsqlException("Value for primary key column(s) " + noValue + " is required for tables"); } } final long ts = (long) values.getOrDefault(SystemColumns.ROWTIME_NAME, clock.getAsLong()); final GenericKey key = buildKey(schema, values); final GenericRow value = buildValue(schema, values); return KsqlGenericRecord.of(key, value, ts); }
@Test public void shouldThrowOnColumnMismatchWhenInferred() { // Given: final LogicalSchema schema = LogicalSchema.builder() .keyColumn(KEY, SqlTypes.STRING) .valueColumn(COL0, SqlTypes.STRING) .valueColumn(COL1, SqlTypes.STRING) .build(); final List<ColumnName> names = ImmutableList.of(); final Expression exp = new StringLiteral("a"); // When: final KsqlException e = assertThrows(KsqlException.class, () -> recordFactory.build( names, ImmutableList.of(exp, exp), schema, DataSourceType.KSTREAM )); // Then: assertThat(e.getMessage(), containsString("Expected a value for each column")); }
public static Optional<String> getDatabaseNameByDatabasePath(final String databasePath) { Pattern pattern = Pattern.compile(getShardingSphereDataNodePath() + "/([\\w\\-]+)?", Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(databasePath); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); }
@Test void assertGetDatabaseNameByDatabasePathDbNameNotFoundScenario() { assertThat(ShardingSphereDataNode.getDatabaseNameByDatabasePath("/statistics/databases"), is(Optional.empty())); }
@Override public <VAgg> KTable<K, VAgg> aggregate(final Initializer<VAgg> initializer, final Aggregator<? super K, ? super V, VAgg> adder, final Aggregator<? super K, ? super V, VAgg> subtractor, final Materialized<K, VAgg, KeyValueStore<Bytes, byte[]>> materialized) { return aggregate(initializer, adder, subtractor, NamedInternal.empty(), materialized); }
@Test public void shouldAggregateAndMaterializeResults() { builder .table( topic, Consumed.with(Serdes.String(), Serdes.String())) .groupBy( MockMapper.selectValueKeyValueMapper(), Grouped.with(Serdes.String(), Serdes.String())) .aggregate( MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("aggregate") .withValueSerde(Serdes.String()) .withKeySerde(Serdes.String())); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(topic, driver); { { final KeyValueStore<String, String> aggregate = driver.getKeyValueStore("aggregate"); assertThat(aggregate.get("1"), equalTo("0+1+1+1")); assertThat(aggregate.get("2"), equalTo("0+2+2")); } { final KeyValueStore<String, ValueAndTimestamp<String>> aggregate = driver.getTimestampedKeyValueStore("aggregate"); assertThat(aggregate.get("1"), equalTo(ValueAndTimestamp.make("0+1+1+1", 50L))); assertThat(aggregate.get("2"), equalTo(ValueAndTimestamp.make("0+2+2", 60L))); } } } }
@Override public Schema getSourceSchema() { if (schema == null) { try { Schema.Parser parser = new Schema.Parser(); schema = parser.parse(schemaString); } catch (Exception e) { throw new HoodieSchemaException("Failed to parse schema: " + schemaString, e); } } return schema; }
@Test public void validateRecursiveSchemaGeneration_defaultDepth() throws IOException { TypedProperties properties = new TypedProperties(); properties.setProperty(ProtoClassBasedSchemaProviderConfig.PROTO_SCHEMA_CLASS_NAME.key(), Parent.class.getName()); ProtoClassBasedSchemaProvider protoToAvroSchemaProvider = new ProtoClassBasedSchemaProvider(properties, null); Schema convertedSchema = protoToAvroSchemaProvider.getSourceSchema(); Schema.Parser parser = new Schema.Parser(); Schema expectedSchema = parser.parse(getClass().getClassLoader().getResourceAsStream("schema-provider/proto/parent_schema_recursive_default_limit.avsc")); Assertions.assertEquals(expectedSchema, convertedSchema); }
public static <T> String serialize2Json(T object) { return serialize2Json(object, false); }
@Test public void testSerialize2Json() { Map<String, String> sourceMap = new HashMap<>(); sourceMap.put("k1", "v1"); sourceMap.put("k2", "v2"); sourceMap.put("k3", "v3"); assertThat(JacksonUtils.serialize2Json(sourceMap)).isEqualTo("{\"k1\":\"v1\",\"k2\":\"v2\",\"k3\":\"v3\"}"); assertThat(StringUtils.trimAllWhitespace(JacksonUtils.serialize2Json(sourceMap, true))).isEqualTo("{\"k1\":\"v1\",\"k2\":\"v2\",\"k3\":\"v3\"}"); }
public Future<Void> reconcile() { LOGGER.infoCr(reconciliation, "Deleting all the ZooKeeper related resources"); return jmxSecret() .compose(i -> deleteNetworkPolicy()) .compose(i -> deleteServiceAccount()) .compose(i -> deleteService()) .compose(i -> deleteHeadlessService()) .compose(i -> deleteCertificateSecret()) .compose(i -> deleteLoggingAndMetricsConfigMap()) .compose(i -> deletePodDisruptionBudget()) .compose(i -> deletePodSet()) .compose(i -> deletePersistentClaims()); }
@Test public void testZookeeperEraserReconcilePVCDeletionWithDeleteClaimTrue(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; ServiceOperator mockServiceOps = supplier.serviceOperations; NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; ConfigMapOperator mockCmOps = supplier.configMapOperations; StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator; SecretOperator mockSecretOps = supplier.secretOperations; PvcOperator mockPvcOps = supplier.pvcOperations; SharedEnvironmentProvider sharedEnvironmentProvider = supplier.sharedEnvironmentProvider; ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(RECONCILIATION, KAFKA, VERSIONS, sharedEnvironmentProvider); ArgumentCaptor<String> podSetDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockPodSetOps.deleteAsync(any(), anyString(), podSetDeletionCaptor.capture(), anyBoolean())).thenAnswer(i -> Future.succeededFuture()); ArgumentCaptor<String> secretDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockSecretOps.deleteAsync(any(), anyString(), secretDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> saDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockSaOps.deleteAsync(any(), anyString(), saDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> serviceDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockServiceOps.deleteAsync(any(), anyString(), serviceDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> netPolicyDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockNetPolicyOps.deleteAsync(any(), anyString(), netPolicyDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> cmDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockCmOps.deleteAsync(any(), anyString(), cmDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); ArgumentCaptor<String> pdbDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockPdbOps.deleteAsync(any(), anyString(), pdbDeletionCaptor.capture(), anyBoolean())).thenReturn(Future.succeededFuture()); // Mock the PVC Operator Map<String, PersistentVolumeClaim> zkPvcs = createZooPvcs(NAMESPACE, zkCluster.getStorage(), zkCluster.nodes(), (replica, storageId) -> VolumeUtils.DATA_VOLUME_NAME + "-" + KafkaResources.zookeeperPodName(KAFKA.getMetadata().getName(), replica), deleteClaim(KAFKA.getSpec().getZookeeper().getStorage())); ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); ArgumentCaptor<String> pvcDeletionCaptor = ArgumentCaptor.forClass(String.class); when(mockPvcOps.reconcile(any(), anyString(), pvcDeletionCaptor.capture(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockPvcOps.getAsync(anyString(), ArgumentMatchers.startsWith("data-"))) .thenAnswer(invocation -> { String pvcName = invocation.getArgument(1); if (pvcName.contains(zkCluster.getComponentName())) { return Future.succeededFuture(zkPvcs.get(pvcName)); } return Future.succeededFuture(null); }); when(mockPvcOps.listAsync(anyString(), ArgumentMatchers.any(Labels.class))) .thenAnswer(invocation -> Future.succeededFuture(zkPvcs.values().stream().toList())); // test reconcile ZooKeeperEraser zkEraser = new ZooKeeperEraser( RECONCILIATION, supplier ); Checkpoint async = context.checkpoint(); zkEraser.reconcile() .onComplete(context.succeeding(v -> context.verify(() -> { verify(mockCmOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockSaOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockServiceOps, times(2)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockSecretOps, times(2)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockNetPolicyOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockPodSetOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); verify(mockPdbOps, times(1)).deleteAsync(any(), any(), any(), anyBoolean()); assertThat(netPolicyDeletionCaptor.getAllValues(), is(List.of("my-cluster-network-policy-zookeeper"))); assertThat(serviceDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-client", "my-cluster-zookeeper-nodes"))); assertThat(saDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); assertThat(secretDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-jmx", "my-cluster-zookeeper-nodes"))); assertThat(podSetDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); assertThat(cmDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper-config"))); assertThat(pdbDeletionCaptor.getAllValues(), is(List.of("my-cluster-zookeeper"))); // Check PVCs verify(mockPvcOps, times(3)).getAsync(any(), any()); verify(mockPvcOps, times(1)).listAsync(any(), ArgumentMatchers.any(Labels.class)); verify(mockPvcOps, times(3)).reconcile(any(), any(), any(), any()); assertThat(pvcDeletionCaptor.getAllValues(), is(List.of("data-my-cluster-zookeeper-2", "data-my-cluster-zookeeper-0", "data-my-cluster-zookeeper-1"))); assertThat(pvcCaptor.getAllValues().size(), is(3)); assertThat(pvcCaptor.getAllValues().get(0), is(nullValue())); assertThat(pvcCaptor.getAllValues().get(1), is(nullValue())); assertThat(pvcCaptor.getAllValues().get(2), is(nullValue())); async.flag(); }))); }
@Override public final XMLObject getEntityDescriptorElement() { try { var idpEntityId = determineIdentityProviderEntityId(); return resolve().resolveSingle(new CriteriaSet(new EntityIdCriterion(idpEntityId))); } catch (final ResolverException e) { throw new SAMLException("Error initializing idpMetadataProvider", e); } }
@Test public void resolveExpiringMetadata() { var configuration = new SAML2Configuration(); configuration.setIdentityProviderMetadataResource(new ClassPathResource("expired-idp-metadata.xml")); metadataResolver = new SAML2IdentityProviderMetadataResolver(configuration); metadataResolver.init(); assertNull(metadataResolver.getEntityDescriptorElement()); }
@Override public SubtaskMetricsMessageParameters getUnresolvedMessageParameters() { return new SubtaskMetricsMessageParameters(); }
@Test void testMessageParameters() { assertThat(subtaskMetricsHeaders.getUnresolvedMessageParameters()) .isInstanceOf(SubtaskMetricsMessageParameters.class); }
public static MethodDeclaration getMethodDeclaration(final String methodName, final Map<String, ClassOrInterfaceType> parameterNameTypeMap) { MethodDeclaration toReturn = getMethodDeclaration(methodName); NodeList<Parameter> typeParameters = new NodeList<>(); parameterNameTypeMap.forEach((parameterName, classOrInterfaceType) -> { Parameter toAdd = new Parameter(); toAdd.setName(parameterName); toAdd.setType(classOrInterfaceType); typeParameters.add(toAdd); }); toReturn.setParameters(typeParameters); return toReturn; }
@Test void getMethodDeclaration() { final String methodName = "METHOD_NAME"; final ClassOrInterfaceDeclaration classOrInterfaceDeclaration = new ClassOrInterfaceDeclaration(); assertThat(CommonCodegenUtils.getMethodDeclaration(classOrInterfaceDeclaration, methodName)).isNotPresent(); classOrInterfaceDeclaration.addMethod("NOT_METHOD"); assertThat(CommonCodegenUtils.getMethodDeclaration(classOrInterfaceDeclaration, methodName)).isNotPresent(); classOrInterfaceDeclaration.addMethod(methodName); assertThat(CommonCodegenUtils.getMethodDeclaration(classOrInterfaceDeclaration, methodName)).isPresent(); }