focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void stop() { stop(true); }
@Test public void shouldNotCallCloseCallbackOnStop() { // When: query.stop(); // Then: verify(listener, times(0)).onClose(query); }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testSingleTopBundlesLoadData() { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 1)); topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 2)); topBundlesLoadDataStore.pushAsync("broker3:8080", getTopBundlesLoad("my-tenant/my-namespaceC", 6)); topBundlesLoadDataStore.pushAsync("broker4:8080", getTopBundlesLoad("my-tenant/my-namespaceD", 10)); topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 70)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); assertEquals(counter.getBreakdownCounters().get(Skip).get(NoBundles).get(), 1); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); }
public static JibContainerBuilder toJibContainerBuilder( ArtifactProcessor processor, Jar jarOptions, CommonCliOptions commonCliOptions, CommonContainerConfigCliOptions commonContainerConfigCliOptions, ConsoleLogger logger) throws IOException, InvalidImageReferenceException { String imageReference = commonContainerConfigCliOptions.getFrom().orElseGet(() -> getDefaultBaseImage(processor)); JibContainerBuilder containerBuilder = ContainerBuilders.create(imageReference, Collections.emptySet(), commonCliOptions, logger); List<FileEntriesLayer> layers = processor.createLayers(); List<String> customEntrypoint = commonContainerConfigCliOptions.getEntrypoint(); List<String> entrypoint = customEntrypoint.isEmpty() ? processor.computeEntrypoint(jarOptions.getJvmFlags()) : customEntrypoint; containerBuilder .setEntrypoint(entrypoint) .setFileEntriesLayers(layers) .setExposedPorts(commonContainerConfigCliOptions.getExposedPorts()) .setVolumes(commonContainerConfigCliOptions.getVolumes()) .setEnvironment(commonContainerConfigCliOptions.getEnvironment()) .setLabels(commonContainerConfigCliOptions.getLabels()) .setProgramArguments(commonContainerConfigCliOptions.getProgramArguments()); commonContainerConfigCliOptions.getUser().ifPresent(containerBuilder::setUser); commonContainerConfigCliOptions.getFormat().ifPresent(containerBuilder::setFormat); commonContainerConfigCliOptions.getCreationTime().ifPresent(containerBuilder::setCreationTime); return containerBuilder; }
@Test public void testToJibContainerBuilder_packagedStandard_basicInfo() throws IOException, InvalidImageReferenceException { when(mockStandardPackagedProcessor.getJavaVersion()).thenReturn(8); FileEntriesLayer layer = FileEntriesLayer.builder() .setName("jar") .addEntry( Paths.get("path/to/standardJar.jar"), AbsoluteUnixPath.get("/app/standardJar.jar")) .build(); when(mockStandardPackagedProcessor.createLayers()).thenReturn(Arrays.asList(layer)); when(mockStandardPackagedProcessor.computeEntrypoint(anyList())) .thenReturn(ImmutableList.of("java", "-jar", "/app/standardJar.jar")); when(mockCommonContainerConfigCliOptions.getFrom()).thenReturn(Optional.empty()); JibContainerBuilder containerBuilder = JarFiles.toJibContainerBuilder( mockStandardPackagedProcessor, mockJarCommand, mockCommonCliOptions, mockCommonContainerConfigCliOptions, mockLogger); ContainerBuildPlan buildPlan = containerBuilder.toContainerBuildPlan(); assertThat(buildPlan.getBaseImage()).isEqualTo("eclipse-temurin:8-jre"); assertThat(buildPlan.getPlatforms()).isEqualTo(ImmutableSet.of(new Platform("amd64", "linux"))); assertThat(buildPlan.getCreationTime()).isEqualTo(Instant.EPOCH); assertThat(buildPlan.getFormat()).isEqualTo(ImageFormat.Docker); assertThat(buildPlan.getEnvironment()).isEmpty(); assertThat(buildPlan.getLabels()).isEmpty(); assertThat(buildPlan.getVolumes()).isEmpty(); assertThat(buildPlan.getExposedPorts()).isEmpty(); assertThat(buildPlan.getUser()).isNull(); assertThat(buildPlan.getWorkingDirectory()).isNull(); assertThat(buildPlan.getEntrypoint()) .containsExactly("java", "-jar", "/app/standardJar.jar") .inOrder(); assertThat(buildPlan.getLayers().get(0).getName()).isEqualTo("jar"); assertThat(((FileEntriesLayer) buildPlan.getLayers().get(0)).getEntries()) .isEqualTo( FileEntriesLayer.builder() .addEntry( Paths.get("path/to/standardJar.jar"), AbsoluteUnixPath.get("/app/standardJar.jar")) .build() .getEntries()); }
public static RestSettingBuilder get(final String id) { return get(eq(checkId(id))); }
@Test public void should_get_all_resources() throws Exception { Plain resource1 = new Plain(); resource1.code = 1; resource1.message = "hello"; Plain resource2 = new Plain(); resource2.code = 2; resource2.message = "world"; server.resource("targets", get().response(json(ImmutableList.of(resource1, resource2))) ); running(server, () -> { String uri = "/targets"; List<Plain> plains = getResources(uri); assertThat(plains.size(), is(2)); }); }
int calculatePrice(Integer basePrice, Integer percent, Integer fixedPrice) { // 1. 优先使用固定佣金 if (fixedPrice != null && fixedPrice > 0) { return ObjectUtil.defaultIfNull(fixedPrice, 0); } // 2. 根据比例计算佣金 if (basePrice != null && basePrice > 0 && percent != null && percent > 0) { return MoneyUtils.calculateRatePriceFloor(basePrice, Double.valueOf(percent)); } return 0; }
@Test public void testCalculatePrice_equalsZero() { // mock 数据 Integer payPrice = null; Integer percent = null; Integer fixedPrice = null; // 调用 int brokerage = brokerageRecordService.calculatePrice(payPrice, percent, fixedPrice); // 断言 assertEquals(brokerage, 0); }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<Path>(); // At least one entry successfully parsed boolean success = false; // Call hook for those implementors which need to perform some action upon the list after it has been created // from the server stream, but before any clients see the list parser.preParse(replies); for(String line : replies) { final FTPFile f = parser.parseFTPEntry(line); if(null == f) { continue; } final String name = f.getName(); if(!success) { if(lenient) { // Workaround for #2410. STAT only returns ls of directory itself // Workaround for #2434. STAT of symbolic link directory only lists the directory itself. if(directory.getName().equals(name)) { log.warn(String.format("Skip %s matching parent directory name", f.getName())); continue; } if(name.contains(String.valueOf(Path.DELIMITER))) { if(!name.startsWith(directory.getAbsolute() + Path.DELIMITER)) { // Workaround for #2434. log.warn(String.format("Skip %s with delimiter in name", name)); continue; } } } } success = true; if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", f.getName())); } continue; } final Path parsed = new Path(directory, PathNormalizer.name(name), f.getType() == FTPFile.DIRECTORY_TYPE ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file)); switch(f.getType()) { case FTPFile.SYMBOLIC_LINK_TYPE: parsed.setType(EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Symbolic link target may be an absolute or relative path final String target = f.getLink(); if(StringUtils.isBlank(target)) { log.warn(String.format("Missing symbolic link target for %s", parsed)); final EnumSet<Path.Type> type = parsed.getType(); type.remove(Path.Type.symboliclink); } else if(StringUtils.startsWith(target, String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else if(StringUtils.equals("..", target)) { parsed.setSymlinkTarget(directory); } else if(StringUtils.equals(".", target)) { parsed.setSymlinkTarget(parsed); } else { parsed.setSymlinkTarget(new Path(directory, target, EnumSet.of(Path.Type.file))); } break; } if(parsed.isFile()) { parsed.attributes().setSize(f.getSize()); } parsed.attributes().setOwner(f.getUser()); parsed.attributes().setGroup(f.getGroup()); Permission.Action u = Permission.Action.none; if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.READ_PERMISSION)) { u = u.or(Permission.Action.read); } if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.WRITE_PERMISSION)) { u = u.or(Permission.Action.write); } if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.EXECUTE_PERMISSION)) { u = u.or(Permission.Action.execute); } Permission.Action g = Permission.Action.none; if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.READ_PERMISSION)) { g = g.or(Permission.Action.read); } if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.WRITE_PERMISSION)) { g = g.or(Permission.Action.write); } if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.EXECUTE_PERMISSION)) { g = g.or(Permission.Action.execute); } Permission.Action o = Permission.Action.none; if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.READ_PERMISSION)) { o = o.or(Permission.Action.read); } if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.WRITE_PERMISSION)) { o = o.or(Permission.Action.write); } if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.EXECUTE_PERMISSION)) { o = o.or(Permission.Action.execute); } final Permission permission = new Permission(u, g, o); if(f instanceof FTPExtendedFile) { permission.setSetuid(((FTPExtendedFile) f).isSetuid()); permission.setSetgid(((FTPExtendedFile) f).isSetgid()); permission.setSticky(((FTPExtendedFile) f).isSticky()); } if(!Permission.EMPTY.equals(permission)) { parsed.attributes().setPermission(permission); } final Calendar timestamp = f.getTimestamp(); if(timestamp != null) { parsed.attributes().setModificationDate(timestamp.getTimeInMillis()); } children.add(parsed); } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test(expected = FTPInvalidListException.class) public void test3763() throws Exception { Path path = new Path("/www", EnumSet.of(Path.Type.directory)); assertEquals("www", path.getName()); assertEquals("/www", path.getAbsolute()); final AttributedList<Path> list = new FTPListResponseReader(new FTPParserSelector().getParser("UNIX"), true) .read(path, Collections.singletonList( "lrwxrwxrwx 1 mk basicgrp 27 Sep 23 2004 /home/mk/www -> /www/basic/mk") ); }
@VisibleForTesting static Function<List<String>, ProcessBuilder> defaultProcessBuilderFactory( String dockerExecutable, ImmutableMap<String, String> dockerEnvironment) { return dockerSubCommand -> { List<String> dockerCommand = new ArrayList<>(1 + dockerSubCommand.size()); dockerCommand.add(dockerExecutable); dockerCommand.addAll(dockerSubCommand); ProcessBuilder processBuilder = new ProcessBuilder(dockerCommand); Map<String, String> environment = processBuilder.environment(); environment.putAll(dockerEnvironment); return processBuilder; }; }
@Test public void testDefaultProcessorBuilderFactory_customExecutable() { ProcessBuilder processBuilder = CliDockerClient.defaultProcessBuilderFactory("docker-executable", ImmutableMap.of()) .apply(Arrays.asList("sub", "command")); Assert.assertEquals( Arrays.asList("docker-executable", "sub", "command"), processBuilder.command()); Assert.assertEquals(System.getenv(), processBuilder.environment()); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuffer buf = new StringBuffer(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case FORMAT_MODIFIER_STATE: handleFormatModifierState(c, tokenList, buf); break; case OPTION_STATE: processOption(c, tokenList, buf); break; case KEYWORD_STATE: handleKeywordState(c, tokenList, buf); break; case RIGHT_PARENTHESIS_STATE: handleRightParenthesisState(c, tokenList, buf); break; default: } } // EOS switch (state) { case LITERAL_STATE: addValuedToken(Token.LITERAL, buf, tokenList); break; case KEYWORD_STATE: tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString())); break; case RIGHT_PARENTHESIS_STATE: tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN); break; case FORMAT_MODIFIER_STATE: case OPTION_STATE: throw new ScanException("Unexpected end of pattern string"); } return tokenList; }
@Test public void testEmptyP2() throws ScanException { List<Token> tl = new TokenStream("%()").tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(Token.PERCENT_TOKEN); witness.add(Token.BARE_COMPOSITE_KEYWORD_TOKEN); witness.add(Token.RIGHT_PARENTHESIS_TOKEN); assertEquals(witness, tl); }
public void calculate(IThrowableProxy tp) { while (tp != null) { populateFrames(tp.getStackTraceElementProxyArray()); IThrowableProxy[] suppressed = tp.getSuppressed(); if(suppressed != null) { for(IThrowableProxy current:suppressed) { populateFrames(current.getStackTraceElementProxyArray()); } } tp = tp.getCause(); } }
@Test public void nested() throws Exception { Throwable t = TestHelper.makeNestedException(3); ThrowableProxy tp = new ThrowableProxy(t); PackagingDataCalculator pdc = tp.getPackagingDataCalculator(); pdc.calculate(tp); verify(tp); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { try { final EueApiClient client = new EueApiClient(session); // Move to trash first as precondition of delete this.delete(super.trash(files, prompt, callback)); for(Path f : files.keySet()) { fileid.cache(f, null); } } catch(ApiException e) { for(Path f : files.keySet()) { throw new EueExceptionMappingService().map("Cannot delete {0}", e, f); } } }
@Test(expected = NotfoundException.class) public void testDoubleDelete() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path file = new EueTouchFeature(session, fileid).touch(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final String resourceId = file.attributes().getFileId(); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); try { new EueDeleteFeature(session, fileid).delete(Collections.singletonList( file.withAttributes(new PathAttributes().withFileId(resourceId))), new DisabledLoginCallback(), new Delete.DisabledCallback()); fail(); } catch(NotfoundException e) { assertEquals(String.format("Https://mc.gmx.net/restfs-1/fs/@1015156902205593160/resource/%s does not exist. Please contact your web hosting service provider for assistance.", resourceId), e.getDetail()); throw e; } }
public KsqlTarget target(final URI server) { return target(server, Collections.emptyMap()); }
@Test public void shouldSendKsqlRequest() { // Given: String ksql = "some ksql"; Object expectedResponse = setupExpectedResponse(); // When: KsqlTarget target = ksqlClient.target(serverUri); RestResponse<KsqlEntityList> resp = target.postKsqlRequest(ksql, Collections.emptyMap(), Optional.of(123L)); // Then: assertThat(resp.get(), is(expectedResponse)); assertThat(server.getHttpMethod(), is(HttpMethod.POST)); assertThat(server.getPath(), is("/ksql")); assertThat(server.getHeaders().get("Accept"), is("application/json")); assertThat(getKsqlRequest(), is(new KsqlRequest(ksql, properties, Collections.emptyMap(), 123L))); }
@Override public void setQualityGate(QualityGate g) { // fail fast requireNonNull(g); checkState(qualityGate == null, "QualityGateHolder can be initialized only once"); this.qualityGate = g; }
@Test public void setQualityGate_throws_ISE_if_called_twice() { assertThatThrownBy(() -> { QualityGateHolderImpl holder = new QualityGateHolderImpl(); holder.setQualityGate(QUALITY_GATE); holder.setQualityGate(QUALITY_GATE); }) .isInstanceOf(IllegalStateException.class); }
public static String compareMd5ResultString(List<String> changedGroupKeys) throws IOException { if (null == changedGroupKeys) { return ""; } StringBuilder sb = new StringBuilder(); for (String groupKey : changedGroupKeys) { String[] dataIdGroupId = GroupKey.parseKey(groupKey); sb.append(dataIdGroupId[0]); sb.append(WORD_SEPARATOR); sb.append(dataIdGroupId[1]); // if have tenant, then set it if (dataIdGroupId.length == SIZE_4) { if (StringUtil.isNotBlank(dataIdGroupId[2])) { sb.append(WORD_SEPARATOR); sb.append(dataIdGroupId[2]); } } sb.append(LINE_SEPARATOR); } return URLEncoder.encode(sb.toString(), "UTF-8"); }
@Test public void compareMd5ResultStringTest() { String key = null; try { key = Md5ConfigUtil.compareMd5ResultString(Lists.newArrayList("DataId+Group")); } catch (IOException ignored) { } Assert.isTrue(Objects.equals("DataId%02Group%01", key)); }
@Override protected Collection<X509Certificate> getTrusted() { return repository.findTrustedCertificates().stream().map( c -> X509Factory.toCertificate(c.getRaw()) ).collect(Collectors.toList()); }
@Test public void shouldNotLoadCertificateIfNoTrustedInDocumentType() throws Exception { final Certificate rdw = loadCertificate("rdw/01.cer", true); final Certificate npkd = loadCertificate("npkd/01.cer", false); certificateRepo.save(rdw); certificateRepo.save(npkd); certificateRepo.flush(); final Collection<X509Certificate> trusted = service.getTrusted(); assertEquals(1, trusted.size()); assertEquals(rdw.getSubject(), X509Factory.toCanonical(trusted.toArray(new X509Certificate[0])[0].getSubjectX500Principal())); }
public MutableDataset<T> load(Path csvPath, String responseName) throws IOException { return new MutableDataset<>(loadDataSource(csvPath, responseName)); }
@Test public void testFileDoesNotExist() { Path tmp; try { tmp = Files.createTempFile("CSVLoaderTest_testFileDoesNotExist", "txt"); Files.delete(tmp); assertFalse(Files.exists(tmp)); } catch (IOException e) { throw new RuntimeException(e); } CSVLoader<MockOutput> loader = new CSVLoader<>(new MockOutputFactory()); assertThrows(NoSuchFileException.class, () -> loader.load(tmp, "RESPONSE")); }
@Deactivate public void deactivate() { states.removeListener(statesListener); eventHandler.shutdown(); violations.destroy(); log.info("Stopped"); }
@Test public void testDeactivate() { eventHandler = newSingleThreadExecutor(groupedThreads("onos/security/store", "event-handler", log)); eventHandler.shutdown(); assertTrue(eventHandler.isShutdown()); }
public Stream<Hit> stream() { if (nPostingLists == 0) { return Stream.empty(); } return StreamSupport.stream(new PredicateSpliterator(), false); }
@Test void requireThatSingleStreamFiltersOnConstructedCompleteIntervals() { PredicateSearch search = createPredicateSearch( new byte[]{1, 1, 1}, postingList( SubqueryBitmap.ALL_SUBQUERIES, entry(0, 0x000100ff), entry(1, 0x00010001, 0x000200ff), entry(2, 0x00010042))); assertEquals(List.of(new Hit(0), new Hit(1)).toString(), search.stream().toList().toString()); }
public List<StepInstance> getAllStepInstances( String workflowId, long workflowInstanceId, long workflowRunId) { return getStepInstancesByIds( workflowId, workflowInstanceId, workflowRunId, null, this::maestroStepFromResult); }
@Test public void testGetAllStepInstances() { List<StepInstance> instances = stepDao.getAllStepInstances(TEST_WORKFLOW_ID, 1, 1); assertEquals(1, instances.size()); StepInstance instance = instances.get(0); assertEquals(StepInstance.Status.RUNNING, instance.getRuntimeState().getStatus()); assertFalse(instance.getSignalDependencies().isSatisfied()); assertEquals( 2, instance .getDefinition() .getOutputs() .get(StepOutputsDefinition.StepOutputType.SIGNAL) .asSignalOutputsDefinition() .getDefinitions() .size()); assertTrue(instance.getArtifacts().isEmpty()); assertTrue(instance.getTimeline().isEmpty()); instance.setArtifacts(null); instance.setTimeline(null); Assertions.assertThat(instance).usingRecursiveComparison().isEqualTo(si); }
public static ReadStudyMetadata readStudyMetadata() { return new ReadStudyMetadata(); }
@Test public void test_Dicom_failedMetadataRead() { String badWebPath = "foo"; DicomIO.ReadStudyMetadata.Result retrievedData; retrievedData = pipeline.apply(Create.of(badWebPath)).apply(DicomIO.readStudyMetadata()); PAssert.that(retrievedData.getReadResponse()).empty(); PAssert.that(retrievedData.getFailedReads()) .satisfies( (errors) -> { Assert.assertTrue(errors.iterator().hasNext()); return null; }); pipeline.run(); }
@Override public String resolve(Method method, Object[] arguments, String spelExpression) { if (StringUtils.isEmpty(spelExpression)) { return spelExpression; } if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) { return stringValueResolver.resolveStringValue(spelExpression); } if (spelExpression.matches(METHOD_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } if (spelExpression.matches(BEAN_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory)); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } return spelExpression; }
@Test public void stringSpelTest() throws Exception { String testExpression = "#{'recover'}"; DefaultSpelResolverTest target = new DefaultSpelResolverTest(); Method testMethod = target.getClass().getMethod("testMethod", String.class); String result = sut.resolve(testMethod, new Object[]{}, testExpression); assertThat(result).isEqualTo("recover"); }
@Override @GuardedBy("getLock()") public boolean hasPage(PageId pageId) { return mPages.contains(INDEX_PAGE_ID, pageId); }
@Test public void hasPage() { Assert.assertFalse(mMetaStore.hasPage(mPage)); mMetaStore.addPage(mPage, mPageInfo); Assert.assertTrue(mMetaStore.hasPage(mPage)); }
public Set<HotColdLocation> signal(@Nonnull final WorldPoint worldPoint, @Nonnull final HotColdTemperature temperature, @Nullable final HotColdTemperatureChange temperatureChange) { // when the strange device reads a temperature, that means that the center of the final dig location // is a range of squares away from the player's current location (Chebyshev AKA Chess-board distance) int maxSquaresAway = temperature.getMaxDistance(); int minSquaresAway = temperature.getMinDistance(); // maxDistanceArea encompasses all of the points that are within the max possible distance from the player final Rectangle maxDistanceArea = new Rectangle( worldPoint.getX() - maxSquaresAway, worldPoint.getY() - maxSquaresAway, 2 * maxSquaresAway + 1, 2 * maxSquaresAway + 1); // minDistanceArea encompasses all of the points that are within the min possible distance from the player final Rectangle minDistanceArea = new Rectangle( worldPoint.getX() - minSquaresAway, worldPoint.getY() - minSquaresAway, 2 * minSquaresAway + 1, 2 * minSquaresAway + 1); // eliminate from consideration dig spots that lie entirely within the min range or entirely outside of the max range possibleLocations.removeIf(entry -> minDistanceArea.contains(entry.getRect()) || !maxDistanceArea.intersects(entry.getRect())); // if a previous world point has been recorded, we can consider the warmer/colder result from the strange device if (lastWorldPoint != null && temperatureChange != null) { switch (temperatureChange) { case COLDER: // eliminate spots that are warmer or same temperature possibleLocations.removeIf(location -> { final WorldPoint locationPoint = location.getWorldPoint(); return locationPoint.distanceTo2D(worldPoint) <= locationPoint.distanceTo2D(lastWorldPoint); }); break; case WARMER: // eliminate spots that are colder or same temperature possibleLocations.removeIf(location -> { final WorldPoint locationPoint = location.getWorldPoint(); return locationPoint.distanceTo2D(worldPoint) >= locationPoint.distanceTo2D(lastWorldPoint); }); break; case SAME: // eliminate spots which are colder or warmer (as they would not yield a SAME temperature change) possibleLocations.removeIf(location -> { final WorldPoint locationPoint = location.getWorldPoint(); return locationPoint.distanceTo2D(worldPoint) != locationPoint.distanceTo2D(lastWorldPoint); }); } } lastWorldPoint = worldPoint; return getPossibleLocations(); }
@Test public void testBeginnerCowFieldNarrowing() { // Start with Cow field north of Lumbridge and Northeast of Al Kharid mine locations remaining final Set<HotColdLocation> startingLocations = EnumSet.of( HotColdLocation.LUMBRIDGE_COW_FIELD, HotColdLocation.NORTHEAST_OF_AL_KHARID_MINE ); HotColdSolver solver = new HotColdSolver(startingLocations); assertEquals(startingLocations, solver.signal(new WorldPoint(3313, 3208, 0), HotColdTemperature.WARM, null)); assertEquals(Sets.immutableEnumSet(HotColdLocation.LUMBRIDGE_COW_FIELD), solver.signal(new WorldPoint(3312, 3208, 0), HotColdTemperature.WARM, HotColdTemperatureChange.WARMER)); }
@Override public void subscribe(String serviceName, EventListener listener) throws NacosException { subscribe(serviceName, new ArrayList<>(), listener); }
@Test void testSubscribe4() throws NacosException { //given String serviceName = "service1"; String groupName = "group1"; List<String> clusterList = Arrays.asList("cluster1", "cluster2"); EventListener listener = event -> { }; //when client.subscribe(serviceName, groupName, clusterList, listener); NamingSelectorWrapper wrapper = new NamingSelectorWrapper(serviceName, groupName, getUniqueClusterString(clusterList), NamingSelectorFactory.newClusterSelector(clusterList), listener); //then verify(changeNotifier, times(1)).registerListener(groupName, serviceName, wrapper); verify(proxy, times(1)).subscribe(serviceName, groupName, Constants.NULL); }
@Override public <T> Task<T> synchronize(Task<T> task, long deadline) { return PlanLocal.get(getPlanLocalKey(), LockInternal.class) .flatMap(lockInternal -> { if (lockInternal != null) { // we already acquire the lock, add count only. lockInternal._lockCount++; return Task.value(lockInternal._lockNode); } else { // try acquire. return acquire(deadline); } }) /* run the given task with toTry() */ .flatMap(unused -> task).toTry() /* release the lock and unwind the result */ .flatMap(result -> release().andThen(unwind(result))); }
@Test public void testReentrant() throws InterruptedException { final long deadline = System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(10, TimeUnit.SECONDS); final ZKLock lock = createZKLock(); Task<Integer> synchronizedTask = lock.synchronize(lock.synchronize(Task.value(1), deadline), deadline); runAndWait("synchronizedTask", synchronizedTask); Assert.assertEquals((int) synchronizedTask.get(), 1); }
@Override public ResponseHeader execute() throws SQLException { check(sqlStatement); ProxyContext.getInstance().getContextManager().getPersistServiceFacade().getMetaDataManagerPersistService().createDatabase(sqlStatement.getDatabaseName()); return new UpdateResponseHeader(sqlStatement); }
@Test void assertExecuteCreateExistDatabase() { when(statement.getDatabaseName()).thenReturn("foo_db"); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); when(ProxyContext.getInstance().databaseExists("foo_db")).thenReturn(true); assertThrows(DatabaseCreateExistsException.class, () -> handler.execute()); }
@Override public void openExisting(final ProcessorContext context, final long streamTime) { metricsRecorder.init(ProcessorContextUtils.metricsImpl(context), context.taskId()); super.openExisting(context, streamTime); }
@Test public void shouldUpdateSegmentFileNameFromOldColonFormatToNewFormat() throws Exception { final String storeDirectoryPath = stateDirectory.getAbsolutePath() + File.separator + storeName; final File storeDirectory = new File(storeDirectoryPath); //noinspection ResultOfMethodCallIgnored storeDirectory.mkdirs(); for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) { final File oldSegment = new File(storeDirectoryPath + File.separator + storeName + ":" + segmentId * (RETENTION_PERIOD / (NUM_SEGMENTS - 1))); //noinspection ResultOfMethodCallIgnored Files.createFile(oldSegment.toPath()); } segments.openExisting(context, -1L); for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) { final File newSegment = new File(storeDirectoryPath + File.separator + storeName + "." + segmentId * (RETENTION_PERIOD / (NUM_SEGMENTS - 1))); assertTrue(Files.exists(newSegment.toPath())); } }
public void verify(int number, byte[] dg) { final byte[] compare = digests.get(number); if (compare == null) { throw new CryptoException("Could not find digest of data group " + number); } final byte[] calculated = DigestUtils.digest(algorithm).digest(dg); if (!CryptoUtils.compare(compare, calculated, 0)) { throw new VerificationException("Digest of data group " + number + " is not equal to security object"); } }
@Test public void verifyPcaRvigDg14() throws Exception { final LdsSecurityObject ldsSecurityObject = mapper.read( readFromCms("pca-rvig"), LdsSecurityObject.class); ldsSecurityObject.verify(14, createPcaRvigDg14()); }
public static void smooth(PointList pointList, double maxElevationDelta) { internSmooth(pointList, 0, pointList.size() - 1, maxElevationDelta); }
@Test public void smoothRamer() { PointList pl1 = new PointList(3, true); pl1.add(0, 0, 0); pl1.add(0.0005, 0.0005, 100); pl1.add(0.001, 0.001, 50); EdgeElevationSmoothingRamer.smooth(pl1, 70); assertEquals(3, pl1.size()); assertEquals(100, pl1.getEle(1), .1); EdgeElevationSmoothingRamer.smooth(pl1, 75); assertEquals(3, pl1.size()); assertEquals(25, pl1.getEle(1), .1); }
public void terminateCluster(final List<String> deleteTopicPatterns) { terminatePersistentQueries(); deleteSinkTopics(deleteTopicPatterns); deleteTopics(managedTopics); ksqlEngine.close(); }
@Test public void shouldNotCleanUpSchemaIfSchemaDoesNotExist() throws Exception { // Given: givenTopicsExistInKafka("K_Foo", "K_Bar"); givenSinkTopicsExistInMetastore(FormatFactory.AVRO, "K_Foo", "K_Bar"); givenSchemasForTopicsExistInSchemaRegistry("K_Bar"); // When: clusterTerminator.terminateCluster(ImmutableList.of("K_Foo", "K_Bar")); // Then: verifySchemaDeletedForTopics("K_Bar"); verifySchemaNotDeletedForTopic("K_Foo"); }
@Override protected List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs, File workingDir) throws Exception { int numInputSegments = segmentDirs.size(); _eventObserver.notifyProgress(pinotTaskConfig, "Converting segments: " + numInputSegments); String taskType = pinotTaskConfig.getTaskType(); Map<String, String> configs = pinotTaskConfig.getConfigs(); LOGGER.info("Starting task: {} with configs: {}", taskType, configs); long startMillis = System.currentTimeMillis(); String tableNameWithType = configs.get(MinionConstants.TABLE_NAME_KEY); TableConfig tableConfig = getTableConfig(tableNameWithType); Schema schema = getSchema(tableNameWithType); SegmentProcessorConfig.Builder segmentProcessorConfigBuilder = new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema); // Time handler config segmentProcessorConfigBuilder .setTimeHandlerConfig(MergeTaskUtils.getTimeHandlerConfig(tableConfig, schema, configs)); // Partitioner config segmentProcessorConfigBuilder .setPartitionerConfigs(MergeTaskUtils.getPartitionerConfigs(tableConfig, schema, configs)); // Merge type segmentProcessorConfigBuilder.setMergeType(MergeTaskUtils.getMergeType(configs)); // Aggregation types segmentProcessorConfigBuilder.setAggregationTypes(MergeTaskUtils.getAggregationTypes(configs)); // Segment config segmentProcessorConfigBuilder.setSegmentConfig(MergeTaskUtils.getSegmentConfig(configs)); // Progress observer segmentProcessorConfigBuilder.setProgressObserver(p -> _eventObserver.notifyProgress(_pinotTaskConfig, p)); SegmentProcessorConfig segmentProcessorConfig = segmentProcessorConfigBuilder.build(); List<RecordReader> recordReaders = new ArrayList<>(numInputSegments); int count = 1; for (File segmentDir : segmentDirs) { _eventObserver.notifyProgress(_pinotTaskConfig, String.format("Creating RecordReader for: %s (%d out of %d)", segmentDir, count++, numInputSegments)); PinotSegmentRecordReader recordReader = new PinotSegmentRecordReader(); // NOTE: Do not fill null field with default value to be consistent with other record readers recordReader.init(segmentDir, null, null, true); recordReaders.add(recordReader); } List<File> outputSegmentDirs; try { _eventObserver.notifyProgress(_pinotTaskConfig, "Generating segments"); outputSegmentDirs = new SegmentProcessorFramework(recordReaders, segmentProcessorConfig, workingDir).process(); } finally { for (RecordReader recordReader : recordReaders) { recordReader.close(); } } long endMillis = System.currentTimeMillis(); LOGGER.info("Finished task: {} with configs: {}. Total time: {}ms", taskType, configs, (endMillis - startMillis)); List<SegmentConversionResult> results = new ArrayList<>(); for (File outputSegmentDir : outputSegmentDirs) { String outputSegmentName = outputSegmentDir.getName(); results.add(new SegmentConversionResult.Builder().setFile(outputSegmentDir).setSegmentName(outputSegmentName) .setTableNameWithType(tableNameWithType).build()); } return results; }
@Test public void testConvert() throws Exception { MergeRollupTaskExecutor mergeRollupTaskExecutor = new MergeRollupTaskExecutor(new MinionConf()); mergeRollupTaskExecutor.setMinionEventObserver(new MinionProgressObserver()); Map<String, String> configs = new HashMap<>(); configs.put(MinionConstants.TABLE_NAME_KEY, "testTable_OFFLINE"); configs.put(MinionConstants.MergeRollupTask.MERGE_LEVEL_KEY, "daily"); PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, configs); List<SegmentConversionResult> conversionResults = mergeRollupTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR); Assert.assertEquals(conversionResults.size(), 1); Assert.assertEquals(conversionResults.get(0).getSegmentName(), MERGED_SEGMENT_NAME); File mergedSegment = conversionResults.get(0).getFile(); SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(mergedSegment); Assert.assertEquals(segmentMetadata.getTotalDocs(), NUM_SEGMENTS * NUM_ROWS); }
@Override public long getIndexedQueryCount() { throw new UnsupportedOperationException("Queries on replicated maps are not supported."); }
@Test(expected = UnsupportedOperationException.class) public void testIndexedQueryCount() { localReplicatedMapStats.getIndexedQueryCount(); }
@Override public Exchange add(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange) throws OptimisticLockingException { if (!optimistic) { throw new UnsupportedOperationException(); } LOG.trace("Adding an Exchange with ID {} for key {} in an optimistic manner.", newExchange.getExchangeId(), key); if (oldExchange == null) { DefaultExchangeHolder newHolder = DefaultExchangeHolder.marshal(newExchange, true, allowSerializedHeaders); DefaultExchangeHolder oldHolder = cache.getAndPut(key, newHolder); if (oldHolder != null) { Exchange exchange = unmarshallExchange(camelContext, oldHolder); LOG.error( "Optimistic locking failed for exchange with key {}: IMap#putIfAbsend returned Exchange with ID {}, while it's expected no exchanges to be returned", key, exchange != null ? exchange.getExchangeId() : "<null>"); throw new OptimisticLockingException(); } } else { DefaultExchangeHolder oldHolder = DefaultExchangeHolder.marshal(oldExchange, true, allowSerializedHeaders); DefaultExchangeHolder newHolder = DefaultExchangeHolder.marshal(newExchange, true, allowSerializedHeaders); if (!cache.replace(key, oldHolder, newHolder)) { LOG.error( "Optimistic locking failed for exchange with key {}: IMap#replace returned no Exchanges, while it's expected to replace one", key); throw new OptimisticLockingException(); } } LOG.trace("Added an Exchange with ID {} for key {} in optimistic manner.", newExchange.getExchangeId(), key); return oldExchange; }
@Test public void optimisticRepoFailsForNonOptimisticAdd() throws Exception { JCacheAggregationRepository repo = createRepository(true); repo.start(); try { final CamelContext context = context(); Exchange ex = new DefaultExchange(context); assertThrows(UnsupportedOperationException.class, () -> repo.add(context, "myKey", ex)); } finally { repo.stop(); } }
@Override public String toString() { if (data == null) return getOpCodeName(opcode); return String.format("%s[%s]", getPushDataName(opcode), ByteUtils.formatHex(data)); }
@Test public void testToStringOnInvalidScriptChunk() { // see https://github.com/bitcoinj/bitcoinj/issues/1860 // In summary: toString() throws when given an invalid ScriptChunk. // It should perhaps be impossible to even construct such a ScriptChunk, but // until that is the case, toString() should not throw. ScriptChunk pushWithoutData = new ScriptChunk(OP_PUSHDATA1, null); // the chunk is invalid, but at least we can determine its opcode assertEquals("PUSHDATA1", pushWithoutData.toString()); }
@Override public void setProperties(final Properties properties) { }
@Test public void setPropertiesTest() { final PostgreSqlUpdateInterceptor postgreSqlUpdateInterceptor = new PostgreSqlUpdateInterceptor(); Assertions.assertDoesNotThrow(() -> postgreSqlUpdateInterceptor.setProperties(mock(Properties.class))); }
public static Map<String, String> deserialize2Map(String jsonStr) { try { if (StringUtils.hasText(jsonStr)) { Map<String, Object> temp = OM.readValue(jsonStr, Map.class); Map<String, String> result = new HashMap<>(); temp.forEach((key, value) -> { result.put(String.valueOf(key), String.valueOf(value)); }); return result; } return new HashMap<>(); } catch (JsonProcessingException e) { LOG.error( "Json to map failed. check if the format of the json string[{}] is correct.", jsonStr, e); throw new RuntimeException("Json to map failed.", e); } }
@Test public void testDeserialize2Map() { String jsonStr = "{\"k1\":\"v1\",\"k2\":\"v2\",\"k3\":\"v3\"}"; Map<String, String> map = JacksonUtils.deserialize2Map(jsonStr); assertThat(map.size()).isEqualTo(3); assertThat(map.get("k1")).isEqualTo("v1"); assertThat(map.get("k2")).isEqualTo("v2"); assertThat(map.get("k3")).isEqualTo("v3"); }
public static void validateRequestHeadersAndUpdateResourceContext(final Map<String, String> headers, final Set<String> customMimeTypesSupported, ServerResourceContext resourceContext) { validateRequestHeadersAndUpdateResourceContext(headers, customMimeTypesSupported, resourceContext, new RequestContext()); }
@Test() public void testValidateRequestHeadersWithValidAcceptHeaderAndMatch() throws Exception { Map<String, String> headers = new HashMap<>(); headers.put("Accept", "application/json"); ServerResourceContext resourceContext = new ResourceContextImpl(); RestUtils.validateRequestHeadersAndUpdateResourceContext(headers, Collections.emptySet(), resourceContext); Assert.assertEquals(resourceContext.getResponseMimeType(), "application/json"); }
MethodSpec buildFunction(AbiDefinition functionDefinition) throws ClassNotFoundException { return buildFunction(functionDefinition, true); }
@Test public void testBuildFunctionConstantDynamicArrayRawListReturn() throws Exception { AbiDefinition functionDefinition = new AbiDefinition( true, Arrays.asList(new NamedType("param", "uint8[]")), "functionName", Arrays.asList(new NamedType("result", "address[]")), "type", false); MethodSpec methodSpec = solidityFunctionWrapper.buildFunction(functionDefinition); String expected = "public org.web3j.protocol.core.RemoteFunctionCall<java.util.List> functionName(\n" + " java.util.List<java.math.BigInteger> param) {\n" + " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n" + " java.util.Arrays.<org.web3j.abi.datatypes.Type>asList(new org.web3j.abi.datatypes.DynamicArray<org.web3j.abi.datatypes.generated.Uint8>(\n" + " org.web3j.abi.datatypes.generated.Uint8.class,\n" + " org.web3j.abi.Utils.typeMap(param, org.web3j.abi.datatypes.generated.Uint8.class))), \n" + " java.util.Arrays.<org.web3j.abi.TypeReference<?>>asList(new org.web3j.abi.TypeReference<org.web3j.abi.datatypes.DynamicArray<org.web3j.abi.datatypes.Address>>() {}));\n" + " return new org.web3j.protocol.core.RemoteFunctionCall<java.util.List>(function,\n" + " new java.util.concurrent.Callable<java.util.List>() {\n" + " @java.lang.Override\n" + " @java.lang.SuppressWarnings(\"unchecked\")\n" + " public java.util.List call() throws java.lang.Exception {\n" + " java.util.List<org.web3j.abi.datatypes.Type> result = (java.util.List<org.web3j.abi.datatypes.Type>) executeCallSingleValueReturn(function, java.util.List.class);\n" + " return convertToNative(result);\n" + " }\n" + " });\n" + "}\n"; assertEquals((expected), methodSpec.toString()); }
public Object toIdObject(String baseId) throws AmqpProtocolException { if (baseId == null) { return null; } try { if (hasAmqpUuidPrefix(baseId)) { String uuidString = strip(baseId, AMQP_UUID_PREFIX_LENGTH); return UUID.fromString(uuidString); } else if (hasAmqpUlongPrefix(baseId)) { String longString = strip(baseId, AMQP_ULONG_PREFIX_LENGTH); return UnsignedLong.valueOf(longString); } else if (hasAmqpStringPrefix(baseId)) { return strip(baseId, AMQP_STRING_PREFIX_LENGTH); } else if (hasAmqpBinaryPrefix(baseId)) { String hexString = strip(baseId, AMQP_BINARY_PREFIX_LENGTH); byte[] bytes = convertHexStringToBinary(hexString); return new Binary(bytes); } else { // We have a string without any type prefix, transmit it as-is. return baseId; } } catch (IllegalArgumentException e) { throw new AmqpProtocolException("Unable to convert ID value"); } }
@Test public void testToIdObjectWithStringContainingNoEncodingPrefix() throws Exception { String stringId = "myStringId"; Object idObject = messageIdHelper.toIdObject(stringId); assertNotNull("null object should not have been returned", idObject); assertEquals("expected id object was not returned", stringId, idObject); }
private void flush() throws InterruptedException { RequestInfo requestInfo = createRequestInfo(); while (rateLimitingStrategy.shouldBlock(requestInfo)) { mailboxExecutor.yield(); requestInfo = createRequestInfo(); } List<RequestEntryT> batch = createNextAvailableBatch(requestInfo); if (batch.isEmpty()) { return; } long requestTimestamp = System.currentTimeMillis(); rateLimitingStrategy.registerInFlightRequest(requestInfo); inFlightRequestsCount++; submitRequestEntries( batch, new AsyncSinkWriterResultHandler(requestTimestamp, batch, requestInfo)); }
@Test public void testRetryableErrorsDoNotViolateAtLeastOnceSemanticsDueToRequeueOfFailures() throws IOException, InterruptedException { AsyncSinkWriterImpl sink = new AsyncSinkWriterImplBuilder() .context(sinkInitContext) .maxBatchSize(3) .maxBatchSizeInBytes(10_000_000) .simulateFailures(true) .build(); writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "25", Arrays.asList(), Arrays.asList(25)); writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "55", Arrays.asList(), Arrays.asList(25, 55)); // 25, 55 persisted; 965 failed and inflight writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "965", Arrays.asList(25, 55), Arrays.asList()); writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "75", Arrays.asList(25, 55, 965, 75), Arrays.asList()); writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "95", Arrays.asList(25, 55, 965, 75), Arrays.asList(95)); /* * Writing 955 to the sink increases the buffer to size 3 containing [75, 95, 955]. This * triggers the outstanding in flight request with the failed 965 to be run, and 965 is * placed at the front of the queue. The failure throttles down {@code maxBatchSize} to 1. * buffer now should be [965, 75, 95, 955] * A new batch containing 965 is then sent, success causes {@code maxBatchSize} to go up * to 3 again. * next batch is then created of all requests, 75 and 95 are also persisted. * 955 is in flight after failure. */ writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "955", Arrays.asList(25, 55, 965, 75), Arrays.asList(95, 955)); writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "550", Arrays.asList(25, 55, 965, 75, 95), Arrays.asList()); /* * [550, 45] are attempted to be persisted */ writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "45", Arrays.asList(25, 55, 965, 75, 95, 955, 550), Arrays.asList(45)); /* * [550,45,35] triggers inflight request to be added, buffer should be [955,550,45,35] * batch size is reduced to 1. * Next request would contain only [995] which is persisted, * success causes batch size to rise again to 3. next batch is now [550,45,35]. * All are persisted and batch size is 3. */ writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "35", Arrays.asList(25, 55, 965, 75, 95, 955, 550), Arrays.asList(45, 35)); /* ] should be in the bufferedRequestEntries * [ 550] should be in the inFlightRequest, ready to be added * [25, 55, 965, 75, 95, 995, 45, 35] should be downstream already */ writeXToSinkAssertDestinationIsInStateYAndBufferHasZ( sink, "535", Arrays.asList(25, 55, 965, 75, 95, 955, 550, 45, 35), Arrays.asList()); // Checkpoint occurs sink.flush(true); // Everything is saved assertThat(res).isEqualTo(Arrays.asList(25, 55, 965, 75, 95, 955, 550, 45, 35, 535)); assertThat(getWriterState(sink).getStateSize()).isEqualTo(0); }
@Override public void onQueuesUpdate(List<Queue> queues) { List<QueueUpdateMsg> queueUpdateMsgs = queues.stream() .map(queue -> QueueUpdateMsg.newBuilder() .setTenantIdMSB(queue.getTenantId().getId().getMostSignificantBits()) .setTenantIdLSB(queue.getTenantId().getId().getLeastSignificantBits()) .setQueueIdMSB(queue.getId().getId().getMostSignificantBits()) .setQueueIdLSB(queue.getId().getId().getLeastSignificantBits()) .setQueueName(queue.getName()) .setQueueTopic(queue.getTopic()) .setPartitions(queue.getPartitions()) .setDuplicateMsgToAllPartitions(queue.isDuplicateMsgToAllPartitions()) .build()) .collect(Collectors.toList()); ToRuleEngineNotificationMsg ruleEngineMsg = ToRuleEngineNotificationMsg.newBuilder().addAllQueueUpdateMsgs(queueUpdateMsgs).build(); ToCoreNotificationMsg coreMsg = ToCoreNotificationMsg.newBuilder().addAllQueueUpdateMsgs(queueUpdateMsgs).build(); ToTransportMsg transportMsg = ToTransportMsg.newBuilder().addAllQueueUpdateMsgs(queueUpdateMsgs).build(); doSendQueueNotifications(ruleEngineMsg, coreMsg, transportMsg); }
@Test public void testOnQueueChangeSingleMonolithAndSingleRemoteTransport() { when(partitionService.getAllServiceIds(ServiceType.TB_RULE_ENGINE)).thenReturn(Sets.newHashSet(MONOLITH)); when(partitionService.getAllServiceIds(ServiceType.TB_CORE)).thenReturn(Sets.newHashSet(MONOLITH)); when(partitionService.getAllServiceIds(ServiceType.TB_TRANSPORT)).thenReturn(Sets.newHashSet(MONOLITH, TRANSPORT)); TbQueueProducer<TbProtoQueueMsg<TransportProtos.ToRuleEngineNotificationMsg>> tbREQueueProducer = mock(TbQueueProducer.class); TbQueueProducer<TbProtoQueueMsg<TransportProtos.ToTransportMsg>> tbTransportQueueProducer = mock(TbQueueProducer.class); when(producerProvider.getRuleEngineNotificationsMsgProducer()).thenReturn(tbREQueueProducer); when(producerProvider.getTransportNotificationsMsgProducer()).thenReturn(tbTransportQueueProducer); clusterService.onQueuesUpdate(List.of(createTestQueue())); verify(topicService, times(1)).getNotificationsTopic(ServiceType.TB_RULE_ENGINE, MONOLITH); verify(topicService, times(1)).getNotificationsTopic(ServiceType.TB_TRANSPORT, TRANSPORT); verify(topicService, never()).getNotificationsTopic(eq(ServiceType.TB_CORE), any()); verify(tbREQueueProducer, times(1)) .send(eq(topicService.getNotificationsTopic(ServiceType.TB_RULE_ENGINE, MONOLITH)), any(TbProtoQueueMsg.class), isNull()); verify(tbTransportQueueProducer, times(1)) .send(eq(topicService.getNotificationsTopic(ServiceType.TB_TRANSPORT, TRANSPORT)), any(TbProtoQueueMsg.class), isNull()); verify(tbTransportQueueProducer, never()) .send(eq(topicService.getNotificationsTopic(ServiceType.TB_TRANSPORT, MONOLITH)), any(TbProtoQueueMsg.class), isNull()); verify(tbTransportQueueProducer, never()) .send(eq(topicService.getNotificationsTopic(ServiceType.TB_CORE, MONOLITH)), any(TbProtoQueueMsg.class), isNull()); verify(producerProvider, never()).getTbCoreNotificationsMsgProducer(); }
public static Map<String, String> inputFiles(RunContext runContext, Object inputs) throws Exception { return FilesService.inputFiles(runContext, Collections.emptyMap(), inputs); }
@Test void renderInputFile() throws Exception { RunContext runContext = runContextFactory.of(Map.of("filename", "file.txt", "content", "Hello World")); Map<String, String> content = FilesService.inputFiles(runContext, Map.of("{{filename}}", "{{content}}")); assertThat(content.get("file.txt"), is("Hello World")); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } return StringUtils.equals(filePath, ((FlatMessageFile) obj).filePath); }
@Test public void testEquals() { String topic = "EqualsTest"; FlatMessageFile flatFile1 = new FlatMessageFile(flatFileFactory, topic, 0); FlatMessageFile flatFile2 = new FlatMessageFile(flatFileFactory, topic, 0); FlatMessageFile flatFile3 = new FlatMessageFile(flatFileFactory, topic, 1); Assert.assertEquals(flatFile1, flatFile2); Assert.assertEquals(flatFile1.hashCode(), flatFile2.hashCode()); Assert.assertNotEquals(flatFile1, flatFile3); flatFile1.shutdown(); flatFile2.shutdown(); flatFile3.shutdown(); flatFile1.destroy(); flatFile2.destroy(); flatFile3.destroy(); }
@SuppressWarnings("deprecation") public static void setupDistributedCache(Configuration conf, Map<String, LocalResource> localResources) throws IOException { LocalResourceBuilder lrb = new LocalResourceBuilder(); lrb.setConf(conf); // Cache archives lrb.setType(LocalResourceType.ARCHIVE); lrb.setUris(JobContextImpl.getCacheArchives(conf)); lrb.setTimestamps(JobContextImpl.getArchiveTimestamps(conf)); lrb.setSizes(getFileSizes(conf, MRJobConfig.CACHE_ARCHIVES_SIZES)); lrb.setVisibilities(DistributedCache.getArchiveVisibilities(conf)); lrb.setSharedCacheUploadPolicies( Job.getArchiveSharedCacheUploadPolicies(conf)); lrb.createLocalResources(localResources); // Cache files lrb.setType(LocalResourceType.FILE); lrb.setUris(JobContextImpl.getCacheFiles(conf)); lrb.setTimestamps(JobContextImpl.getFileTimestamps(conf)); lrb.setSizes(getFileSizes(conf, MRJobConfig.CACHE_FILES_SIZES)); lrb.setVisibilities(DistributedCache.getFileVisibilities(conf)); lrb.setSharedCacheUploadPolicies( Job.getFileSharedCacheUploadPolicies(conf)); lrb.createLocalResources(localResources); }
@Test @Timeout(30000) public void testSetupDistributedCacheEmpty() throws IOException { Configuration conf = new Configuration(); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); MRApps.setupDistributedCache(conf, localResources); assertTrue(localResources.isEmpty(), "Empty Config did not produce an empty list of resources"); }
@Override public Boolean delete(Alarm alarm, User user) { TenantId tenantId = alarm.getTenantId(); logEntityActionService.logEntityAction(tenantId, alarm.getOriginator(), alarm, alarm.getCustomerId(), ActionType.ALARM_DELETE, user); return alarmSubscriptionService.deleteAlarm(tenantId, alarm.getId()); }
@Test public void testDelete() { service.delete(new Alarm(), new User()); verify(logEntityActionService, times(1)).logEntityAction(any(), any(), any(), any(), eq(ActionType.ALARM_DELETE), any()); verify(alarmSubscriptionService, times(1)).deleteAlarm(any(), any()); }
@Override public String[] split(String text) { if (splitContraction) { text = WONT_CONTRACTION.matcher(text).replaceAll("$1ill not"); text = SHANT_CONTRACTION.matcher(text).replaceAll("$1ll not"); text = AINT_CONTRACTION.matcher(text).replaceAll("$1m not"); for (Pattern regexp : NOT_CONTRACTIONS) { text = regexp.matcher(text).replaceAll("$1 not"); } for (Pattern regexp : CONTRACTIONS2) { text = regexp.matcher(text).replaceAll("$1 $2"); } for (Pattern regexp : CONTRACTIONS3) { text = regexp.matcher(text).replaceAll("$1 $2 $3"); } } text = DELIMITERS[0].matcher(text).replaceAll(" $1 "); text = DELIMITERS[1].matcher(text).replaceAll(" $1"); text = DELIMITERS[2].matcher(text).replaceAll(" $1"); text = DELIMITERS[3].matcher(text).replaceAll(" . "); text = DELIMITERS[4].matcher(text).replaceAll(" $1 "); String[] words = WHITESPACE.split(text); if (words.length > 1 && words[words.length-1].equals(".")) { if (EnglishAbbreviations.contains(words[words.length-2])) { words[words.length-2] = words[words.length-2] + "."; } } ArrayList<String> result = new ArrayList<>(); for (String token : words) { if (!token.isEmpty()) { result.add(token); } } return result.toArray(new String[0]); }
@Test public void testTokenizeTis() { System.out.println("tokenize tis"); String text = "'tis, 'tisn't, and 'twas were common in early modern English texts."; String[] expResult = {"'t", "is", ",", "'t", "is", "not", ",", "and", "'t", "was", "were", "common", "in", "early", "modern", "English", "texts", "."}; SimpleTokenizer instance = new SimpleTokenizer(true); String[] result = instance.split(text); assertEquals(expResult.length, result.length); for (int i = 0; i < result.length; i++) { assertEquals(expResult[i], result[i]); } }
@Override public void remove(NamedNode master) { connection.sync(RedisCommands.SENTINEL_REMOVE, master.getName()); }
@Test public void testRemove() { Collection<RedisServer> masters = connection.masters(); connection.remove(masters.iterator().next()); }
public RuntimeOptionsBuilder parse(Map<String, String> properties) { return parse(properties::get); }
@Test void should_parse_plugin_publish_enabled() { properties.put(Constants.PLUGIN_PUBLISH_ENABLED_PROPERTY_NAME, "true"); RuntimeOptions options = cucumberPropertiesParser .parse(properties) .enablePublishPlugin() .build(); assertThat(options.plugins().get(0).pluginString(), equalTo("io.cucumber.core.plugin.PublishFormatter")); }
@Override public synchronized void write(int b) throws IOException { mUfsOutStream.write(b); mBytesWritten++; }
@Test public void writeIncreasingByteArray() throws IOException, AlluxioException { AlluxioURI ufsPath = getUfsPath(); try (FileOutStream outStream = mFileSystem.createFile(ufsPath)) { outStream.write(BufferUtils.getIncreasingByteArray(CHUNK_SIZE)); } verifyIncreasingBytesWritten(ufsPath, CHUNK_SIZE); }
public Result runIndexOrPartitionScanQueryOnOwnedPartitions(Query query) { Result result = runIndexOrPartitionScanQueryOnOwnedPartitions(query, true); assert result != null; return result; }
@Test public void runFullQuery() { Predicate<Object, Object> predicate = Predicates.equal("this", value); Query query = Query.of() .mapName(map.getName()) .predicate(predicate) .iterationType(IterationType.ENTRY) .partitionIdSet(SetUtil.allPartitionIds(instance.getPartitionService().getPartitions().size())) .build(); QueryResult result = (QueryResult) queryRunner.runIndexOrPartitionScanQueryOnOwnedPartitions(query); assertEquals(1, result.getRows().size()); assertEquals(map.get(key), toObject(result.getRows().iterator().next().getValue())); assertArrayEquals(result.getPartitionIds().toArray(), mapService.getMapServiceContext().getCachedOwnedPartitions().toArray()); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void setGameScore() { int res = (int) (System.currentTimeMillis() / 1000); BaseResponse response = bot.execute(new SetGameScore(chatId, res, "AgAAAPrwAQCj_Q4D2s-51_8jsuU")); assertTrue(response.isOk()); SendResponse sendResponse = (SendResponse) bot.execute( new SetGameScore(chatId, res + 1, chatId, 8162).force(true).disableEditMessage(true)); GameTest.check(sendResponse.message().game()); }
private Main() { // Utility Class. }
@Test public void runsAgainstLocal() throws Exception { final File pwd = temp.newFolder(); Main.main(String.format( "--version=local:%s", System.getProperty("logstash.benchmark.test.local.path") ), String.format("--workdir=%s", pwd.getAbsolutePath())); }
@Override public String getAvailabilityZone() { return awsMetadataApi.availabilityZoneEcs(); }
@Test public void getAvailabilityZone() { // given String expectedResult = "us-east-1a"; given(awsMetadataApi.availabilityZoneEcs()).willReturn(expectedResult); // when String result = awsEcsClient.getAvailabilityZone(); // then assertEquals(expectedResult, result); }
@Override public void onMsg(TbContext ctx, TbMsg msg) throws TbNodeException { ctx.tellNext(msg, checkMatches(msg) ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE); }
@Test void givenTypePolygonAndConfigWithPolygonDefined_whenOnMsg_thenTrue() throws TbNodeException { // GIVEN var config = new TbGpsGeofencingFilterNodeConfiguration().defaultConfiguration(); config.setFetchPerimeterInfoFromMessageMetadata(false); config.setPolygonsDefinition(GeoUtilTest.SIMPLE_RECT); node.init(ctx, new TbNodeConfiguration(JacksonUtil.valueToTree(config))); DeviceId deviceId = new DeviceId(UUID.randomUUID()); TbMsg msg = getTbMsg(deviceId, TbMsgMetaData.EMPTY, GeoUtilTest.POINT_INSIDE_SIMPLE_RECT_CENTER.getLatitude(), GeoUtilTest.POINT_INSIDE_SIMPLE_RECT_CENTER.getLongitude()); // WHEN node.onMsg(ctx, msg); // THEN ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx, times(1)).tellNext(newMsgCaptor.capture(), eq(TbNodeConnectionType.TRUE)); verify(ctx, never()).tellFailure(any(), any()); TbMsg newMsg = newMsgCaptor.getValue(); assertThat(newMsg).isNotNull(); assertThat(newMsg).isSameAs(msg); }
@Override public Iterable<Overlay> overlaysReversed() { return new Iterable<Overlay>() { /** * @since 6.1.0 */ private ListIterator<Overlay> bulletProofReverseListIterator() { while (true) { try { return mOverlayList.listIterator(mOverlayList.size()); } catch (final IndexOutOfBoundsException e) { // thread-concurrency fix - in case an item is removed in a very inappropriate time // cf. https://github.com/osmdroid/osmdroid/issues/1260 } } } @Override public Iterator<Overlay> iterator() { final ListIterator<Overlay> i = bulletProofReverseListIterator(); return new Iterator<Overlay>() { @Override public boolean hasNext() { return i.hasPrevious(); } @Override public Overlay next() { return i.previous(); } @Override public void remove() { i.remove(); } }; } }; }
@Test public void testOverlaysReversed() { final ListTest<Overlay> list = new ListTest<Overlay>() { private final DefaultOverlayManager defaultOverlayManager = new DefaultOverlayManager(null); @Override public void add() { defaultOverlayManager.add(new Overlay() { }); } @Override public void remove() { defaultOverlayManager.remove(0); } @Override public Iterable<Overlay> reverseOrder() { return defaultOverlayManager.overlaysReversed(); } @Override protected ListIterator<Overlay> unprotectedReverseListIterator() { throw new IllegalArgumentException(); // not to be used here } @Override public ListIterator<Overlay> reverseIterator() { throw new IllegalArgumentException(); // not to be used here } }; final ListTester<Overlay> tester = new ListTester<>(); tester.test(list); }
public BeamFnApi.InstructionResponse.Builder harnessMonitoringInfos( BeamFnApi.InstructionRequest request) { BeamFnApi.HarnessMonitoringInfosResponse.Builder response = BeamFnApi.HarnessMonitoringInfosResponse.newBuilder(); MetricsContainer container = MetricsEnvironment.getProcessWideContainer(); if (container != null && container instanceof MetricsContainerImpl) { response.putAllMonitoringData( ((MetricsContainerImpl) container).getMonitoringData(this.metricsShortIds)); } return BeamFnApi.InstructionResponse.newBuilder().setHarnessMonitoringInfos(response); }
@Test public void testReturnsProcessWideMonitoringInfos() { MetricsEnvironment.setProcessWideContainer(MetricsContainerImpl.createProcessWideContainer()); HashMap<String, String> labels = new HashMap<String, String>(); labels.put(MonitoringInfoConstants.Labels.SERVICE, "service"); labels.put(MonitoringInfoConstants.Labels.METHOD, "method"); labels.put(MonitoringInfoConstants.Labels.RESOURCE, "resource"); labels.put(MonitoringInfoConstants.Labels.PTRANSFORM, "transform"); labels.put(MonitoringInfoConstants.Labels.STATUS, "ok"); MonitoringInfoMetricName name = MonitoringInfoMetricName.named(MonitoringInfoConstants.Urns.API_REQUEST_COUNT, labels); Counter counter = LabeledMetrics.counter(name, true); counter.inc(7); ShortIdMap metricsShortIds = new ShortIdMap(); HarnessMonitoringInfosInstructionHandler testObject = new HarnessMonitoringInfosInstructionHandler(metricsShortIds); BeamFnApi.InstructionRequest.Builder builder = BeamFnApi.InstructionRequest.newBuilder(); BeamFnApi.InstructionResponse.Builder responseBuilder = testObject.harnessMonitoringInfos(builder.build()); BeamFnApi.InstructionResponse response = responseBuilder.build(); assertEquals(1, response.getHarnessMonitoringInfos().getMonitoringDataMap().size()); // Expect a payload to be set for "metric0". assertTrue( !response.getHarnessMonitoringInfos().getMonitoringDataMap().get("metric0").isEmpty()); }
@Override public AuthenticationResult authenticate(final ChannelHandlerContext context, final PacketPayload payload) { AuthorityRule rule = ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getGlobalRuleMetaData().getSingleRule(AuthorityRule.class); if (MySQLConnectionPhase.AUTH_PHASE_FAST_PATH == connectionPhase) { currentAuthResult = authenticatePhaseFastPath(context, payload, rule); if (!currentAuthResult.isFinished()) { return currentAuthResult; } } else if (MySQLConnectionPhase.AUTHENTICATION_METHOD_MISMATCH == connectionPhase) { authenticateMismatchedMethod((MySQLPacketPayload) payload); } Grantee grantee = new Grantee(currentAuthResult.getUsername(), getHostAddress(context)); if (!login(rule, grantee, authResponse)) { throw new AccessDeniedException(currentAuthResult.getUsername(), grantee.getHostname(), 0 != authResponse.length); } if (!authorizeDatabase(rule, grantee, currentAuthResult.getDatabase())) { throw new DatabaseAccessDeniedException(currentAuthResult.getUsername(), grantee.getHostname(), currentAuthResult.getDatabase()); } writeOKPacket(context); return AuthenticationResultBuilder.finished(grantee.getUsername(), grantee.getHostname(), currentAuthResult.getDatabase()); }
@Test void assertAuthenticateSuccess() { setConnectionPhase(MySQLConnectionPhase.AUTH_PHASE_FAST_PATH); AuthorityRule rule = mock(AuthorityRule.class); when(rule.getAuthenticatorType(any())).thenReturn(""); ShardingSphereUser user = new ShardingSphereUser("root", "", "127.0.0.1"); when(rule.findUser(user.getGrantee())).thenReturn(Optional.of(user)); ChannelHandlerContext context = mockChannelHandlerContext(); ContextManager contextManager = mockContextManager(rule); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); authenticationEngine.authenticate(context, getPayload("root", null, authResponse)); verify(context).writeAndFlush(any(MySQLOKPacket.class)); }
public static boolean matchPathToPattern(String requestPath, String endpointPattern) { String[] pathPatternParts = endpointPattern.split("/"); String[] pathParts = requestPath.split("/"); boolean isMatch = true; for (int i = 0; i < pathPatternParts.length; i++) { String patternPart = pathPatternParts[i]; String urlPart = pathParts[i]; if (patternPart.startsWith("{") && patternPart.endsWith("}")) { continue; // Wildcard found, move to the next part } if (!patternPart.equals(urlPart)) { isMatch = false; // Part does not match, URLs do not match break; } } return isMatch; }
@Test public void testMatchPath() { String pattern = "/v1/pets/{petId}"; String path = "/v1/pets/1"; Assert.assertTrue(StringUtils.matchPathToPattern(path, pattern)); pattern = "/v1/pets/{petId}/name"; path = "/v1/pets/1/name"; Assert.assertTrue(StringUtils.matchPathToPattern(path, pattern)); pattern = "/v1/pets/{petId}"; Assert.assertTrue(StringUtils.matchPathToPattern(path, pattern)); pattern = "/foo/bar"; Assert.assertTrue(StringUtils.matchPathToPattern("/foo/bar", pattern)); Assert.assertFalse(StringUtils.matchPathToPattern("/foo/bar?abc=123", pattern)); pattern = "/gateway/dev/ph-l4j-files/file?version=1"; Assert.assertFalse(StringUtils.matchPathToPattern("/dev-ph-l4j-files/file?version=1", pattern)); pattern = "/gateway/dev/ph-l4j-files/file?version=1"; Assert.assertTrue(StringUtils.matchPathToPattern("/gateway/dev/ph-l4j-files/file?version=1", pattern)); pattern = "/gateway/dev/ph-l4j-files/file/05048267?version=1"; Assert.assertFalse(StringUtils.matchPathToPattern("/gateway/dev/ph-l4j-files/file?version=1", pattern)); }
@Bean("EsClient") public EsClient provide(Configuration config) { Settings.Builder esSettings = Settings.builder(); // mandatory property defined by bootstrap process esSettings.put("cluster.name", config.get(CLUSTER_NAME.getKey()).get()); boolean clusterEnabled = config.getBoolean(CLUSTER_ENABLED.getKey()).orElse(false); boolean searchNode = !clusterEnabled || SEARCH.equals(NodeType.parse(config.get(CLUSTER_NODE_TYPE.getKey()).orElse(null))); List<HttpHost> httpHosts; if (clusterEnabled && !searchNode) { httpHosts = getHttpHosts(config); LOGGER.info("Connected to remote Elasticsearch: [{}]", displayedAddresses(httpHosts)); } else { // defaults provided in: // * in org.sonar.process.ProcessProperties.Property.SEARCH_HOST // * in org.sonar.process.ProcessProperties.Property.SEARCH_PORT HostAndPort host = HostAndPort.fromParts(config.get(SEARCH_HOST.getKey()).get(), config.getInt(SEARCH_PORT.getKey()).get()); httpHosts = Collections.singletonList(toHttpHost(host, config)); LOGGER.info("Connected to local Elasticsearch: [{}]", displayedAddresses(httpHosts)); } return new EsClient(config.get(CLUSTER_SEARCH_PASSWORD.getKey()).orElse(null), config.get(CLUSTER_ES_HTTP_KEYSTORE.getKey()).orElse(null), config.get(CLUSTER_ES_HTTP_KEYSTORE_PASSWORD.getKey()).orElse(null), httpHosts.toArray(new HttpHost[0])); }
@Test public void connection_to_remote_es_nodes_when_cluster_mode_is_enabled_and_local_es_is_disabled() { settings.setProperty(CLUSTER_ENABLED.getKey(), true); settings.setProperty(CLUSTER_NODE_TYPE.getKey(), "application"); settings.setProperty(CLUSTER_SEARCH_HOSTS.getKey(), format("%s:8080,%s:8081", localhostHostname, localhostHostname)); EsClient client = underTest.provide(settings.asConfig()); RestHighLevelClient nativeClient = client.nativeClient(); assertThat(nativeClient.getLowLevelClient().getNodes()).hasSize(2); Node node = nativeClient.getLowLevelClient().getNodes().get(0); assertThat(node.getHost().getAddress().getHostName()).isEqualTo(localhostHostname); assertThat(node.getHost().getPort()).isEqualTo(8080); node = nativeClient.getLowLevelClient().getNodes().get(1); assertThat(node.getHost().getAddress().getHostName()).isEqualTo(localhostHostname); assertThat(node.getHost().getPort()).isEqualTo(8081); assertThat(logTester.logs(Level.INFO)) .has(new Condition<>(s -> s.contains("Connected to remote Elasticsearch: [http://" + localhostHostname + ":8080, http://" + localhostHostname + ":8081]"), "")); }
@Override public byte[] echo(byte[] message) { return read(null, ByteArrayCodec.INSTANCE, ECHO, message); }
@Test public void testEcho() { assertThat(connection.echo("test".getBytes())).isEqualTo("test".getBytes()); }
public Map<String, String> getLabels() { return labels; }
@Test void testGetLabels() { Map<String, String> labels = requestMeta.getLabels(); assertNotNull(labels); assertEquals(1, labels.size()); assertEquals("dev", labels.get("env")); }
public ClusterStateBundle.FeedBlock inferContentClusterFeedBlockOrNull(ContentCluster cluster) { if (!feedBlockEnabled) { return null; } var nodeInfos = cluster.getNodeInfos(); var exhaustions = enumerateNodeResourceExhaustionsAcrossAllNodes(nodeInfos); if (exhaustions.isEmpty()) { return null; } int maxDescriptions = 3; String description = exhaustions.stream() .limit(maxDescriptions) .map(NodeResourceExhaustion::toExhaustionAddedDescription) .collect(Collectors.joining(", ")); if (exhaustions.size() > maxDescriptions) { description += String.format(" (... and %d more)", exhaustions.size() - maxDescriptions); } description = decoratedMessage(cluster, description); // FIXME we currently will trigger a cluster state recomputation even if the number of // exhaustions is greater than what is returned as part of the description. Though at // that point, cluster state recomputations will be the least of your worries...! return ClusterStateBundle.FeedBlock.blockedWith(description, exhaustions); }
@Test void feed_block_description_can_contain_optional_name_component() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", "a-fancy-disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals(decorate(cf, "disk:a-fancy-disk on node 1 [storage.1.local] is 51.0% full (the configured limit is 50.0%)"), feedBlock.getDescription()); }
public void isAnyOf( @Nullable Object first, @Nullable Object second, @Nullable Object @Nullable ... rest) { isIn(accumulate(first, second, rest)); }
@Test public void isAnyOfFailure() { expectFailure.whenTesting().that("x").isAnyOf("a", "b", "c"); assertFailureKeys("expected any of", "but was"); assertFailureValue("expected any of", "[a, b, c]"); }
public static boolean shouldBackoff(long initialTimestamp, TimeUnit unitInitial, int failedAttempts, long defaultInterval, long maxBackoffInterval) { long initialTimestampInNano = unitInitial.toNanos(initialTimestamp); long currentTime = System.nanoTime(); long interval = defaultInterval; for (int i = 1; i < failedAttempts; i++) { interval = interval * 2; if (interval > maxBackoffInterval) { interval = maxBackoffInterval; break; } } // if the current time is less than the time at which next retry should occur, we should backoff return currentTime < (initialTimestampInNano + interval); }
@Test public void shouldBackoffTest() { // gives false assertFalse(Backoff.shouldBackoff(0L, TimeUnit.NANOSECONDS, 0)); long currentTimestamp = System.nanoTime(); // gives true assertTrue(Backoff.shouldBackoff(currentTimestamp, TimeUnit.NANOSECONDS, 100)); }
public static TableUpsertMetadataManager create(TableConfig tableConfig, @Nullable PinotConfiguration instanceUpsertConfig) { String tableNameWithType = tableConfig.getTableName(); UpsertConfig upsertConfig = tableConfig.getUpsertConfig(); Preconditions.checkArgument(upsertConfig != null, "Must provide upsert config for table: %s", tableNameWithType); TableUpsertMetadataManager metadataManager; String metadataManagerClass = upsertConfig.getMetadataManagerClass(); if (instanceUpsertConfig != null) { if (metadataManagerClass == null) { metadataManagerClass = instanceUpsertConfig.getProperty(UPSERT_DEFAULT_METADATA_MANAGER_CLASS); } // Server level config honoured only when table level config is not set to true if (!upsertConfig.isEnableSnapshot()) { upsertConfig.setEnableSnapshot( Boolean.parseBoolean(instanceUpsertConfig.getProperty(UPSERT_DEFAULT_ENABLE_SNAPSHOT, "false"))); } // Server level config honoured only when table level config is not set to true if (!upsertConfig.isEnablePreload()) { upsertConfig.setEnablePreload( Boolean.parseBoolean(instanceUpsertConfig.getProperty(UPSERT_DEFAULT_ENABLE_PRELOAD, "false"))); } // server level config honoured only when table level config is not set to true if (!upsertConfig.isAllowPartialUpsertConsumptionDuringCommit()) { upsertConfig.setAllowPartialUpsertConsumptionDuringCommit(Boolean.parseBoolean( instanceUpsertConfig.getProperty(UPSERT_DEFAULT_ALLOW_PARTIAL_UPSERT_CONSUMPTION_DURING_COMMIT, "false"))); } } if (StringUtils.isNotEmpty(metadataManagerClass)) { LOGGER.info("Creating TableUpsertMetadataManager with class: {} for table: {}", metadataManagerClass, tableNameWithType); try { metadataManager = (TableUpsertMetadataManager) Class.forName(metadataManagerClass).newInstance(); } catch (Exception e) { throw new RuntimeException( String.format("Caught exception while constructing TableUpsertMetadataManager with class: %s for table: %s", metadataManagerClass, tableNameWithType), e); } } else { LOGGER.info("Creating ConcurrentMapTableUpsertMetadataManager for table: {}", tableNameWithType); metadataManager = new ConcurrentMapTableUpsertMetadataManager(); } return metadataManager; }
@Test public void testCreateForManagerClassWithConsistentDeletes() { UpsertConfig upsertConfig = new UpsertConfig(UpsertConfig.Mode.FULL); upsertConfig.setHashFunction(HashFunction.NONE); upsertConfig.setEnableDeletedKeysCompactionConsistency(true); _tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(RAW_TABLE_NAME).setUpsertConfig(upsertConfig).build(); TableUpsertMetadataManager tableUpsertMetadataManager = TableUpsertMetadataManagerFactory.create(_tableConfig, null); assertNotNull(tableUpsertMetadataManager); assertTrue(tableUpsertMetadataManager instanceof BaseTableUpsertMetadataManager); }
protected abstract FullHttpRequest newHandshakeRequest();
@Test public void testSetOriginFromCustomHeaders() { HttpHeaders customHeaders = new DefaultHttpHeaders().set(getOriginHeaderName(), "http://example.com"); WebSocketClientHandshaker handshaker = newHandshaker(URI.create("ws://server.example.com/chat"), null, customHeaders, false, true); FullHttpRequest request = handshaker.newHandshakeRequest(); try { assertEquals("http://example.com", request.headers().get(getOriginHeaderName())); } finally { request.release(); } }
static JobVertexInputInfo computeVertexInputInfoForAllToAll( int sourceCount, int targetCount, Function<Integer, Integer> numOfSubpartitionsRetriever, boolean isDynamicGraph, boolean isBroadcast) { final List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>(); IndexRange partitionRange = new IndexRange(0, sourceCount - 1); for (int i = 0; i < targetCount; ++i) { IndexRange subpartitionRange = computeConsumedSubpartitionRange( i, targetCount, () -> numOfSubpartitionsRetriever.apply(0), isDynamicGraph, isBroadcast); executionVertexInputInfos.add( new ExecutionVertexInputInfo(i, partitionRange, subpartitionRange)); } return new JobVertexInputInfo(executionVertexInputInfos); }
@Test void testComputeVertexInputInfoForAllToAllWithDynamicGraph() { final JobVertexInputInfo nonBroadcast = computeVertexInputInfoForAllToAll(2, 3, ignored -> 10, true, false); assertThat(nonBroadcast.getExecutionVertexInputInfos()) .containsExactlyInAnyOrder( new ExecutionVertexInputInfo(0, new IndexRange(0, 1), new IndexRange(0, 2)), new ExecutionVertexInputInfo(1, new IndexRange(0, 1), new IndexRange(3, 5)), new ExecutionVertexInputInfo( 2, new IndexRange(0, 1), new IndexRange(6, 9))); final JobVertexInputInfo broadcast = computeVertexInputInfoForAllToAll(2, 3, ignored -> 1, true, true); assertThat(broadcast.getExecutionVertexInputInfos()) .containsExactlyInAnyOrder( new ExecutionVertexInputInfo(0, new IndexRange(0, 1), new IndexRange(0, 0)), new ExecutionVertexInputInfo(1, new IndexRange(0, 1), new IndexRange(0, 0)), new ExecutionVertexInputInfo( 2, new IndexRange(0, 1), new IndexRange(0, 0))); }
@Override public BeamSqlTable buildBeamSqlTable(Table table) { return new BigQueryTable(table, getConversionOptions(table.getProperties())); }
@Test public void testSelectWriteDispositionMethodEmpty() { Table table = fakeTableWithProperties( "hello", "{ " + WRITE_DISPOSITION_PROPERTY + ": " + "\"" + WriteDisposition.WRITE_EMPTY.toString() + "\" }"); BigQueryTable sqlTable = (BigQueryTable) provider.buildBeamSqlTable(table); assertEquals(WriteDisposition.WRITE_EMPTY, sqlTable.writeDisposition); }
public static boolean isBase64(CharSequence base64) { if (base64 == null || base64.length() < 2) { return false; } final byte[] bytes = StrUtil.utf8Bytes(base64); if (bytes.length != base64.length()) { // 如果长度不相等,说明存在双字节字符,肯定不是Base64,直接返回false return false; } return isBase64(bytes); }
@Test public void isBase64Test(){ assertTrue(Base64.isBase64(Base64.encode(RandomUtil.randomString(1000)))); }
public static <T> T convertWithCheck(Type type, Object value, T defaultValue, boolean quietly) { final ConverterRegistry registry = ConverterRegistry.getInstance(); try { return registry.convert(type, value, defaultValue); } catch (Exception e) { if (quietly) { return defaultValue; } throw e; } }
@Test public void toLongFromNumberWithFormatTest() { final NumberWithFormat value = new NumberWithFormat(1678285713935L, null); final Long aLong = Convert.convertWithCheck(Long.class, value, null, false); assertEquals(new Long(1678285713935L), aLong); }
@Override public Deserializer getDeserializer(String type) throws HessianProtocolException { // 如果类型在过滤列表, 说明是jdk自带类, 直接委托父类处理 if (StringUtils.isEmpty(type) || ClassFilter.filterExcludeClass(type)) { return super.getDeserializer(type); } // 如果是数组类型, 且在name过滤列表, 说明jdk类, 直接委托父类处理 if (type.charAt(0) == ARRAY_PREFIX && ClassFilter.arrayFilter(type)) { return super.getDeserializer(type); } // 查看是否已经包含反序列化器 Deserializer deserializer = DESERIALIZER_MAP.get(type); if (deserializer != null) { return deserializer; } // 自定义Throwable采用JavaDeserializer,反序列化成Throwable而不是GenericObject deserializer = getDeserializerForCustomThrowable(type); if (deserializer != null) { DESERIALIZER_MAP.putIfAbsent(type, deserializer); return deserializer; } // 新建反序列化器, 如果是java.lang.Class使用GenericClassDeserializer,否则使用GenericDeserializer if (ClassFilter.CLASS_NAME.equals(type)) { deserializer = GenericClassDeserializer.getInstance(); } else { deserializer = new GenericDeserializer(type); } DESERIALIZER_MAP.putIfAbsent(type, deserializer); return deserializer; }
@Test public void getDeserializer() throws Exception { }
@Udf(description = "Returns the hyperbolic tangent of an INT value") public Double tanh( @UdfParameter( value = "value", description = "The value in radians to get the hyperbolic tangent of." ) final Integer value ) { return tanh(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleMoreThanPositive2Pi() { assertThat(udf.tanh(9.1), closeTo(0.9999999750614947, 0.000000000000001)); assertThat(udf.tanh(6.3), closeTo(0.9999932559922726, 0.000000000000001)); assertThat(udf.tanh(7), closeTo(0.9999983369439447, 0.000000000000001)); assertThat(udf.tanh(7L), closeTo(0.9999983369439447, 0.000000000000001)); }
public static <T> CloseableIterator<T> from(Iterator<T> iterator) { // early fail requireNonNull(iterator); checkArgument(!(iterator instanceof AutoCloseable), "This method does not support creating a CloseableIterator from an Iterator which is Closeable"); return new RegularIteratorWrapper<>(iterator); }
@Test(expected = IllegalArgumentException.class) public void from_iterator_throws_IAE_if_arg_is_a_AutoCloseable() { CloseableIterator.from(new CloseableIt()); }
@Override public void execute(ComputationStep.Context context) { executeForBranch(treeRootHolder.getRoot()); }
@Test public void changed_event_if_qp_has_been_updated() { QualityProfile qp1 = qp(QP_NAME_1, LANGUAGE_KEY_1, BEFORE_DATE); QualityProfile qp2 = qp(QP_NAME_1, LANGUAGE_KEY_1, AFTER_DATE); qProfileStatusRepository.register(qp2.getQpKey(), UPDATED); mockQualityProfileMeasures(treeRootHolder.getRoot(), arrayOf(qp1), arrayOf(qp2)); Language language = mockLanguageInRepository(LANGUAGE_KEY_1); when(qualityProfileRuleChangeTextResolver.mapChangeToNumberOfRules(qp2, treeRootHolder.getRoot().getUuid())).thenReturn(CHANGE_TO_NUMBER_OF_RULES_MAP); underTest.execute(new TestComputationStepContext()); verify(eventRepository).add(eventArgumentCaptor.capture()); verifyNoMoreInteractions(eventRepository); verifyEvent(eventArgumentCaptor.getValue(), "\"" + qp2.getQpName() + "\" (" + language.getName() + ") updated with " + RULE_CHANGE_TEXT, "from=" + UtcDateUtils.formatDateTime(BEFORE_DATE_PLUS_1_SEC) + ";key=" + qp1.getQpKey() + ";languageKey=" + qp2.getLanguageKey()+ ";name=" + qp2.getQpName() + ";to=" + UtcDateUtils.formatDateTime(AFTER_DATE_PLUS_1_SEC), RULE_CHANGE_TEXT); }
public void inputWatermarkStatus( WatermarkStatus watermarkStatus, int channelIndex, DataOutput<?> output) throws Exception { // Shared input channel is only enabled in batch jobs, which do not have watermark status // events. Preconditions.checkState(!isInputChannelShared); SubpartitionStatus subpartitionStatus = subpartitionStatuses.get(channelIndex).get(subpartitionIndexes[channelIndex]); // It is supposed that WatermarkStatus will not appear in jobs where one input channel // consumes multiple subpartitions, so we do not need to map channelIndex into // subpartitionStatusIndex for now, like what is done on Watermarks. // only account for watermark status inputs that will result in a status change for the // subpartition if (watermarkStatus.isIdle() && subpartitionStatus.watermarkStatus.isActive()) { // handle active -> idle toggle for the subpartition subpartitionStatus.watermarkStatus = WatermarkStatus.IDLE; // the subpartition is now idle, therefore not aligned markWatermarkUnaligned(subpartitionStatus); // if all subpartitions of the valve are now idle, we need to output an idle stream // status from the valve (this also marks the valve as idle) if (!SubpartitionStatus.hasActiveSubpartitions(subpartitionStatuses)) { // now that all subpartitions are idle and no subpartitions will continue to advance // its // watermark, // we should "flush" all watermarks across all subpartitions; effectively, this // means // emitting // the max watermark across all subpartitions as the new watermark. Also, since we // already try to advance // the min watermark as subpartitions individually become IDLE, here we only need to // perform the flush // if the watermark of the last active subpartition that just became idle is the // current // min watermark. if (subpartitionStatus.watermark == lastOutputWatermark) { findAndOutputMaxWatermarkAcrossAllSubpartitions(output); } lastOutputWatermarkStatus = WatermarkStatus.IDLE; output.emitWatermarkStatus(lastOutputWatermarkStatus); } else if (subpartitionStatus.watermark == lastOutputWatermark) { // if the watermark of the subpartition that just became idle equals the last output // watermark (the previous overall min watermark), we may be able to find a new // min watermark from the remaining aligned subpartitions findAndOutputNewMinWatermarkAcrossAlignedSubpartitions(output); } } else if (watermarkStatus.isActive() && subpartitionStatus.watermarkStatus.isIdle()) { // handle idle -> active toggle for the subpartition subpartitionStatus.watermarkStatus = WatermarkStatus.ACTIVE; // if the last watermark of the subpartition, before it was marked idle, is still // larger than // the overall last output watermark of the valve, then we can set the subpartition to // be // aligned already. if (subpartitionStatus.watermark >= lastOutputWatermark) { markWatermarkAligned(subpartitionStatus); } // if the valve was previously marked to be idle, mark it as active and output an active // stream // status because at least one of the subpartitions is now active if (lastOutputWatermarkStatus.isIdle()) { lastOutputWatermarkStatus = WatermarkStatus.ACTIVE; output.emitWatermarkStatus(lastOutputWatermarkStatus); } } }
@Test void testMultipleInputWatermarkStatusToggling() throws Exception { StatusWatermarkOutput valveOutput = new StatusWatermarkOutput(); StatusWatermarkValve valve = new StatusWatermarkValve(2); // this also implicitly verifies that all input channels start as active valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 0, valveOutput); valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); // now, all channels are IDLE valve.inputWatermarkStatus(WatermarkStatus.IDLE, 0, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(WatermarkStatus.IDLE); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 0, valveOutput); valve.inputWatermarkStatus(WatermarkStatus.IDLE, 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isNull(); // as soon as at least one input becomes active again, the ACTIVE marker should be forwarded valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 1, valveOutput); assertThat(valveOutput.popLastSeenOutput()).isEqualTo(WatermarkStatus.ACTIVE); valve.inputWatermarkStatus(WatermarkStatus.ACTIVE, 0, valveOutput); // already back to ACTIVE, should yield no output assertThat(valveOutput.popLastSeenOutput()).isNull(); }
public void fillMaxSpeed(Graph graph, EncodingManager em) { // In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info, // but now we have and can fill the country-dependent max_speed value where missing. EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class); fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL); }
@Test public void testCityGermany() { ReaderWay way = new ReaderWay(0L); way.setTag("country", Country.DEU); way.setTag("highway", "primary"); EdgeIteratorState edge = createEdge(way).set(urbanDensity, CITY); calc.fillMaxSpeed(graph, em); assertEquals(50, edge.get(maxSpeedEnc), 1); assertTrue(edge.get(maxSpeedEstEnc)); way = new ReaderWay(0L); way.setTag("country", Country.DEU); way.setTag("highway", "motorway"); edge = createEdge(way).set(urbanDensity, CITY); calc.fillMaxSpeed(graph, em); assertEquals(UNLIMITED_SIGN_SPEED, edge.get(maxSpeedEnc), 1); way = new ReaderWay(0L); way.setTag("country", Country.DEU); way.setTag("highway", "residential"); edge = createEdge(way).set(urbanDensity, CITY); calc.fillMaxSpeed(graph, em); assertEquals(50, edge.get(maxSpeedEnc), 1); way = new ReaderWay(0L); way.setTag("country", Country.DEU); way.setTag("highway", "residential"); way.setTag("maxspeed", "70"); edge = createEdge(way); calc.fillMaxSpeed(graph, em); assertEquals(70, edge.get(maxSpeedEnc), 1); assertFalse(edge.get(maxSpeedEstEnc)); }
@Override public V getNow(V valueIfAbsent) { // if there is an explicit value set, we use that if (result != null) { return (V) result; } // if there already is a deserialized value set, use it. if (deserializedValue != VOID) { return (V) deserializedValue; } // otherwise, do not cache the value returned from future.getNow // because it might be the default valueIfAbsent Object value = future.getNow(valueIfAbsent); try { if (value instanceof ClientMessage) { return resolve(value); } else { return (value instanceof Data && deserializeResponse) ? serializationService.toObject(value) : (V) value; } } catch (HazelcastSerializationException exc) { throw new CompletionException(exc); } }
@Test public void getNow_whenDoneReturnValue() { invocationFuture.complete(response); assertTrue(delegatingFuture.isDone()); assertEquals(DESERIALIZED_VALUE, delegatingFuture.getNow(DESERIALIZED_DEFAULT_VALUE)); }
public static Storage getStorageType(String hiveSdLocation) { if (hiveSdLocation.startsWith(StorageType.S3.name().toLowerCase())) { return new S3Storage(); } else if (hiveSdLocation.startsWith(StorageType.OSS.name().toLowerCase())) { return new OSSStorage(); } else if (hiveSdLocation.startsWith(StorageType.COS.name().toLowerCase())) { return new COSStorage(); } else { return new HDFSStorage(hiveSdLocation); } }
@Test void testStorageType() { STORAGE_MAP .entrySet() .forEach( storageMapEntry -> { Class<? extends Storage> expectedStorageClass = storageMapEntry.getValue(); Storage storage = StorageFactory.getStorageType(storageMapEntry.getKey()); Assertions.assertNotNull(storage); Assertions.assertTrue(expectedStorageClass.isInstance(storage)); }); }
public static String SHA512(String data) { return SHA512(data.getBytes()); }
@Test public void testSHA512() throws Exception { String biezhiSHA512 = "cf3b5d0ed88f7945edf687d730b9b7d8e7817c5dcff1b1907c77a8bf6ae8d85fd8e1c7973ef5a6391df6cfb647f891c19ccf3a7f21ecdc7ca18322131aba5cc6"; Assert.assertEquals( biezhiSHA512, EncryptKit.SHA512("biezhi") ); Assert.assertEquals( biezhiSHA512, EncryptKit.SHA512("biezhi".getBytes()) ); TestCase.assertTrue( Arrays.equals( ConvertKit.hexString2Bytes(biezhiSHA512), EncryptKit.SHA512ToByte("biezhi".getBytes()) ) ); }
@Override public <T> ListenableFuture<PluginExecutionResult<T>> executeAsync( PluginExecutorConfig<T> executorConfig) { // Executes the core plugin logic within the thread pool. return FluentFuture.from( pluginExecutionThreadPool.submit( () -> { executionStopwatch.start(); return executorConfig.pluginExecutionLogic().call(); })) // Terminate plugin if it runs over 1 hour. .withTimeout(Duration.ofHours(1), pluginExecutionThreadPool) // If execution succeeded, build successful execution result. .transform(resultData -> buildSucceededResult(resultData, executorConfig), directExecutor()) // If execution failed, build failed execution result. .catching( Throwable.class, exception -> buildFailedResult(exception, executorConfig), directExecutor()); }
@Test public void executeAsync_whenFailedWithPluginExecutionException_returnsFailedResult() throws ExecutionException, InterruptedException { PluginExecutorConfig<String> executorConfig = PluginExecutorConfig.<String>builder() .setMatchedPlugin(FAKE_MATCHING_RESULT) .setPluginExecutionLogic( () -> { throw new PluginExecutionException("test exception"); }) .build(); PluginExecutionResult<String> executionResult = new PluginExecutorImpl(PLUGIN_EXECUTION_THREAD_POOL, executionStopWatch) .executeAsync(executorConfig) .get(); assertThat(executionResult.exception()).isPresent(); assertThat(executionResult.exception().get()).hasCauseThat().isNull(); assertThat(executionResult.exception().get()).hasMessageThat().contains("test exception"); assertThat(executionResult.isSucceeded()).isFalse(); assertThat(executionResult.executionStopwatch().elapsed()).isEqualTo(TICK_DURATION); assertThat(executionResult.resultData()).isEmpty(); }
@Override public void accept(MetadataShellState state) { String fullGlob = glob.startsWith("/") ? glob : state.workingDirectory() + "/" + glob; List<String> globComponents = CommandUtils.stripDotPathComponents(CommandUtils.splitPath(fullGlob)); MetadataNode root = state.root(); if (root == null) { throw new RuntimeException("Invalid null root"); } if (!accept(globComponents, 0, root, new String[0])) { handler.accept(Optional.empty()); } }
@Test public void testNotFoundGlob() { InfoConsumer consumer = new InfoConsumer(); GlobVisitor visitor = new GlobVisitor("epsilon", consumer); visitor.accept(DATA); assertEquals(Optional.empty(), consumer.infos); }
public void setUuids(Set<String> uuids) { requireNonNull(uuids, "Uuids cannot be null"); checkState(this.uuids == null, "Uuids have already been initialized"); this.uuids = new HashSet<>(uuids); }
@Test public void fail_with_ISE_when_setting_uuids_twice() { assertThatThrownBy(() -> { sut.setUuids(newHashSet("ABCD")); sut.setUuids(newHashSet("EFGH")); }) .isInstanceOf(IllegalStateException.class) .hasMessage("Uuids have already been initialized"); }
@Override public void setViewActivity(View view, Activity activity) { }
@Test public void setViewActivity() { View view = new View(mApplication); mSensorsAPI.setViewActivity(view, new EmptyActivity()); Object tag = view.getTag(R.id.sensors_analytics_tag_view_activity); Assert.assertNull(tag); }
private RemotingCommand getBrokerHaStatus(ChannelHandlerContext ctx, RemotingCommand request) { final RemotingCommand response = RemotingCommand.createResponseCommand(null); HARuntimeInfo runtimeInfo = this.brokerController.getMessageStore().getHARuntimeInfo(); if (runtimeInfo != null) { byte[] body = runtimeInfo.encode(); response.setBody(body); response.setCode(ResponseCode.SUCCESS); response.setRemark(null); } else { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("Can not get HARuntimeInfo, may be duplicationEnable is true"); } return response; }
@Test public void testGetBrokerHaStatus() throws RemotingCommandException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_BROKER_HA_STATUS,null); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR); when(brokerController.getMessageStore()).thenReturn(messageStore); when(messageStore.getHARuntimeInfo()).thenReturn(new HARuntimeInfo()); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
public static ParamType getSchemaFromType(final Type type) { return getSchemaFromType(type, JAVA_TO_ARG_TYPE); }
@Test public void shouldGetLongSchemaForLongClass() { assertThat( UdfUtil.getSchemaFromType(Long.class), equalTo(ParamTypes.LONG) ); }
public ClientAuth getClientAuth() { String clientAuth = getString(SSL_CLIENT_AUTHENTICATION_CONFIG); if (originals().containsKey(SSL_CLIENT_AUTH_CONFIG)) { if (originals().containsKey(SSL_CLIENT_AUTHENTICATION_CONFIG)) { log.warn( "The {} configuration is deprecated. Since a value has been supplied for the {} " + "configuration, that will be used instead", SSL_CLIENT_AUTH_CONFIG, SSL_CLIENT_AUTHENTICATION_CONFIG ); } else { log.warn( "The configuration {} is deprecated and should be replaced with {}", SSL_CLIENT_AUTH_CONFIG, SSL_CLIENT_AUTHENTICATION_CONFIG ); clientAuth = getBoolean(SSL_CLIENT_AUTH_CONFIG) ? SSL_CLIENT_AUTHENTICATION_REQUIRED : SSL_CLIENT_AUTHENTICATION_NONE; } } return getClientAuth(clientAuth); }
@Test public void shouldUseClientAuthIfNoClientAuthenticationProvidedNone() { // Given: final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .put(KsqlRestConfig.SSL_CLIENT_AUTH_CONFIG, false) .build()); // When: final ClientAuth clientAuth = config.getClientAuth(); // Then: assertThat(clientAuth, is(ClientAuth.NONE)); }
public Iterator<TrainTestFold<T>> split(Dataset<T> dataset, boolean shuffle) { int nsamples = dataset.size(); if (nsamples == 0) { throw new IllegalArgumentException("empty input data"); } if (nsplits > nsamples) { throw new IllegalArgumentException("cannot have nsplits > nsamples"); } int[] indices; if (shuffle) { indices = Util.randperm(nsamples,rng); } else { indices = IntStream.range(0, nsamples).toArray(); } int[] foldSizes = new int[nsplits]; Arrays.fill(foldSizes, nsamples/nsplits); for (int i = 0; i < (nsamples%nsplits); i++) { foldSizes[i] += 1; } return new Iterator<TrainTestFold<T>>() { int foldPtr = 0; int dataPtr = 0; @Override public boolean hasNext() { return foldPtr < foldSizes.length; } @Override public TrainTestFold<T> next() { int size = foldSizes[foldPtr]; foldPtr++; int start = dataPtr; int stop = dataPtr+size; dataPtr = stop; int[] holdOut = Arrays.copyOfRange(indices, start, stop); int[] rest = new int[indices.length - holdOut.length]; System.arraycopy(indices, 0, rest, 0, start); System.arraycopy(indices, stop, rest, start, nsamples-stop); return new TrainTestFold<>( new DatasetView<>(dataset, rest, "TrainFold(seed="+seed+","+foldPtr+" of " + nsplits+")"), new DatasetView<>(dataset, holdOut, "TestFold(seed="+seed+","+foldPtr+" of " + nsplits+")" ) ); } }; }
@Test public void testKFolderKDoesNotDivideN() { int n = 52; int nsplits = 10; Dataset<MockOutput> data = getData(n); int expectTestSize = n/nsplits; int expectTrainSize = n-expectTestSize; KFoldSplitter<MockOutput> kf = new KFoldSplitter<>(nsplits, 3); Iterator<KFoldSplitter.TrainTestFold<MockOutput>> iter = kf.split(data, true); int ct = 0; while (ct < 2 && iter.hasNext()) { KFoldSplitter.TrainTestFold<MockOutput> fold = iter.next(); assertEquals(expectTrainSize-1, fold.train.size()); assertEquals(expectTestSize+1, fold.test.size()); ct++; } while (iter.hasNext()) { KFoldSplitter.TrainTestFold<MockOutput> fold = iter.next(); assertEquals(expectTrainSize, fold.train.size()); assertEquals(expectTestSize, fold.test.size()); ct++; } assertEquals(nsplits, ct); }
void prepareAndDumpMetadata(String taskId) { Map<String, String> metadata = new LinkedHashMap<>(); metadata.put("projectKey", moduleHierarchy.root().key()); metadata.put("serverUrl", server.getPublicRootUrl()); metadata.put("serverVersion", server.getVersion()); properties.branch().ifPresent(branch -> metadata.put("branch", branch)); URL dashboardUrl = buildDashboardUrl(server.getPublicRootUrl(), moduleHierarchy.root().key()); metadata.put("dashboardUrl", dashboardUrl.toExternalForm()); URL taskUrl = HttpUrl.parse(server.getPublicRootUrl()).newBuilder() .addPathSegment("api").addPathSegment("ce").addPathSegment("task") .addQueryParameter(ID, taskId) .build() .url(); metadata.put("ceTaskId", taskId); metadata.put("ceTaskUrl", taskUrl.toExternalForm()); ceTaskReportDataHolder.init(taskId, taskUrl.toExternalForm(), dashboardUrl.toExternalForm()); dumpMetadata(metadata); }
@Test public void dump_public_url_if_defined_for_branches() throws IOException { when(server.getPublicRootUrl()).thenReturn("https://publicserver/sonarqube"); when(branchConfiguration.branchType()).thenReturn(BRANCH); when(branchConfiguration.branchName()).thenReturn("branch-6.7"); ReportPublisher underTest = new ReportPublisher(properties, wsClient, server, contextPublisher, moduleHierarchy, mode, mock(TempFolder.class), new ReportPublisherStep[0], branchConfiguration, reportMetadataHolder, analysisWarnings, javaArchitectureInformationProvider, fileStructure, ciConfiguration); underTest.prepareAndDumpMetadata("TASK-123"); assertThat(readFileToString(properties.metadataFilePath().toFile(), StandardCharsets.UTF_8)).isEqualTo( "projectKey=org.sonarsource.sonarqube:sonarqube\n" + "serverUrl=https://publicserver/sonarqube\n" + "serverVersion=6.4\n" + "dashboardUrl=https://publicserver/sonarqube/dashboard?id=org.sonarsource.sonarqube%3Asonarqube&branch=branch-6.7\n" + "ceTaskId=TASK-123\n" + "ceTaskUrl=https://publicserver/sonarqube/api/ce/task?id=TASK-123\n"); }
@Override protected String buildUndoSQL() { TableRecords beforeImage = sqlUndoLog.getBeforeImage(); List<Row> beforeImageRows = beforeImage.getRows(); if (CollectionUtils.isEmpty(beforeImageRows)) { // TODO throw new ShouldNeverHappenException("Invalid UNDO LOG"); } Row row = beforeImageRows.get(0); List<Field> nonPkFields = row.nonPrimaryKeys(); // update sql undo log before image all field come from table meta. need add escape. // see BaseTransactionalExecutor#buildTableRecords String updateColumns = nonPkFields.stream().map( field -> ColumnUtils.addEscape(field.getName(), JdbcConstants.SQLSERVER) + " = ?").collect( Collectors.joining(", ")); List<String> pkNameList = getOrderedPkList(beforeImage, row, JdbcConstants.SQLSERVER).stream().map(Field::getName) .collect(Collectors.toList()); String whereSql = SqlGenerateUtils.buildWhereConditionByPKs(pkNameList, JdbcConstants.SQLSERVER); return "UPDATE " + sqlUndoLog.getTableName() + " SET " + updateColumns + " WHERE " + whereSql; }
@Test public void buildUndoSQL() { String sql = executor.buildUndoSQL().toLowerCase(); Assertions.assertNotNull(sql); Assertions.assertTrue(sql.contains("update")); Assertions.assertTrue(sql.contains("id")); Assertions.assertTrue(sql.contains("age")); }
@Override public String getPath() { var fullPath = request.getRequestURI(); // it shouldn't be null, but in case it is, it's better to return empty string if (fullPath == null) { return Pac4jConstants.EMPTY_STRING; } // very strange use case if (fullPath.startsWith("//")) { fullPath = fullPath.substring(1); } val context = request.getContextPath(); // this one shouldn't be null either, but in case it is, then let's consider it is empty if (context != null) { return fullPath.substring(context.length()); } return fullPath; }
@Test public void testGetPathFullpathContext() { when(request.getRequestURI()).thenReturn(CTX_PATH); when(request.getContextPath()).thenReturn(CTX); WebContext context = new JEEContext(request, response); assertEquals(PATH, context.getPath()); }
@VisibleForTesting Properties getTemplateBindings(String userName) { Properties k8sProperties = new Properties(); // k8s template properties k8sProperties.put("zeppelin.k8s.interpreter.user", String.valueOf(userName).trim()); k8sProperties.put("zeppelin.k8s.interpreter.namespace", getInterpreterNamespace()); k8sProperties.put("zeppelin.k8s.interpreter.pod.name", getPodName()); k8sProperties.put("zeppelin.k8s.interpreter.serviceAccount", getServiceAccount()); k8sProperties.put("zeppelin.k8s.interpreter.container.name", interpreterGroupName.toLowerCase()); k8sProperties.put("zeppelin.k8s.interpreter.container.image", containerImage); k8sProperties.put("zeppelin.k8s.interpreter.group.id", getInterpreterGroupId()); k8sProperties.put("zeppelin.k8s.interpreter.group.name", interpreterGroupName); k8sProperties.put("zeppelin.k8s.interpreter.setting.name", getInterpreterSettingName()); k8sProperties.put("zeppelin.k8s.interpreter.localRepo", getLocalRepoDir()); k8sProperties.put("zeppelin.k8s.interpreter.rpc.portRange", getInterpreterPortRange()); k8sProperties.put("zeppelin.k8s.server.rpc.service", intpEventServerHost); k8sProperties.put("zeppelin.k8s.server.rpc.portRange", intpEventServerPort); String serverNamespace = K8sUtils.getCurrentK8sNamespace(); String interpreterNamespace = getInterpreterNamespace(); //Set the owner reference (zeppelin-server pod) for garbage collection when zeppelin server and the zeppelin interpreter is in the same namespace (Kubernetes cannot specify an owner in different namespace). if (ownerUID() != null && ownerName() != null && StringUtils.equals(serverNamespace, interpreterNamespace)) { k8sProperties.put("zeppelin.k8s.server.uid", ownerUID()); k8sProperties.put("zeppelin.k8s.server.pod.name", ownerName()); } Map<String, String> k8sEnv = new HashMap<>(getEnv()); // environment variables k8sEnv.put(ENV_SERVICE_DOMAIN, getEnv().getOrDefault(ENV_SERVICE_DOMAIN, System.getenv(ENV_SERVICE_DOMAIN) == null ? "local.zeppelin-project.org" : System.getenv(ENV_SERVICE_DOMAIN))); k8sEnv.put(ENV_ZEPPELIN_HOME, getEnv().getOrDefault(ENV_ZEPPELIN_HOME, System.getenv(ENV_ZEPPELIN_HOME))); if (isSpark()) { int webUiPort = 4040; k8sProperties.put("zeppelin.k8s.spark.container.image", sparkImage); // There is already initial value following --driver-java-options added in interpreter.sh // so we need to pass spark.driver.defaultJavaOptions and spark.driver.extraJavaOptions // as SPARK_DRIVER_EXTRAJAVAOPTIONS_CONF env variable to build spark-submit command correctly. StringJoiner driverExtraJavaOpts = new StringJoiner(" "); if (properties.containsKey(SPARK_DRIVER_DEFAULTJAVAOPTS)) { driverExtraJavaOpts.add((String) properties.remove(SPARK_DRIVER_DEFAULTJAVAOPTS)); } if (properties.containsKey(SPARK_DRIVER_EXTRAJAVAOPTS)) { driverExtraJavaOpts.add((String) properties.remove(SPARK_DRIVER_EXTRAJAVAOPTS)); } if (driverExtraJavaOpts.length() > 0) { k8sEnv.put("SPARK_DRIVER_EXTRAJAVAOPTIONS_CONF", driverExtraJavaOpts.toString()); } if (isSparkOnKubernetes(properties)) { addSparkK8sProperties(); k8sEnv.put("ZEPPELIN_SPARK_CONF", prepareZeppelinSparkConf(userName)); } k8sEnv.put("SPARK_HOME", getEnv().getOrDefault("SPARK_HOME", "/spark")); // configure interpreter property "zeppelin.spark.uiWebUrl" if not defined, to enable spark ui through reverse proxy String webUrl = (String) properties.get("zeppelin.spark.uiWebUrl"); if (StringUtils.isBlank(webUrl)) { webUrl = "//{{PORT}}-{{SERVICE_NAME}}.{{SERVICE_DOMAIN}}"; } properties.put("zeppelin.spark.uiWebUrl", sparkUiWebUrlFromTemplate( webUrl, webUiPort, getPodName(), k8sEnv.get(ENV_SERVICE_DOMAIN) )); // configure interpreter property "zeppelin.k8s.spark.ingress.host" if not defined, to enable spark ui through ingress String ingressHost = (String) properties.get("zeppelin.k8s.spark.ingress.host"); if (StringUtils.isBlank(ingressHost)) { ingressHost = "{{PORT}}-{{SERVICE_NAME}}.{{SERVICE_DOMAIN}}"; } properties.put("zeppelin.k8s.spark.ingress.host", sparkUiWebUrlFromTemplate( ingressHost, webUiPort, getPodName(), k8sEnv.get(ENV_SERVICE_DOMAIN) )); // Resources of Interpreter Pod if (properties.containsKey(SPARK_DRIVER_MEMORY)) { String memory; if (properties.containsKey(SPARK_DRIVER_MEMORY_OVERHEAD)) { memory = K8sUtils.calculateSparkMemory(properties.getProperty(SPARK_DRIVER_MEMORY), properties.getProperty(SPARK_DRIVER_MEMORY_OVERHEAD)); } else { memory = K8sUtils.calculateMemoryWithDefaultOverhead(properties.getProperty(SPARK_DRIVER_MEMORY)); } k8sProperties.put("zeppelin.k8s.interpreter.memory", memory); } if (properties.containsKey(SPARK_DRIVER_CORES)) { k8sProperties.put("zeppelin.k8s.interpreter.cores", properties.getProperty(SPARK_DRIVER_CORES)); } } k8sProperties.put("zeppelin.k8s.envs", k8sEnv); // interpreter properties overrides the values k8sProperties.putAll(Maps.fromProperties(properties)); return k8sProperties; }
@Test void testSparkPodResourcesMemoryOverhead() { // given Properties properties = new Properties(); properties.put("spark.driver.memory", "1g"); properties.put("spark.driver.memoryOverhead", "256m"); properties.put("spark.driver.cores", "5"); Map<String, String> envs = new HashMap<>(); envs.put("SERVICE_DOMAIN", "mydomain"); K8sRemoteInterpreterProcess intp = new K8sRemoteInterpreterProcess( client, "default", new File(".skip"), "interpreter-container:1.0", "shared_process", "spark", "myspark", properties, envs, "zeppelin.server.service", 12320, false, "spark-container:1.0", 10, 10, false, false); // when Properties p = intp.getTemplateBindings(null); // then assertEquals("5", p.get("zeppelin.k8s.interpreter.cores")); assertEquals("1280Mi", p.get("zeppelin.k8s.interpreter.memory")); }
@Override public void deleteLevel(Long id) { // 校验存在 validateLevelExists(id); // 校验分组下是否有用户 validateLevelHasUser(id); // 删除 memberLevelMapper.deleteById(id); }
@Test public void testDeleteLevel_success() { // mock 数据 MemberLevelDO dbLevel = randomPojo(MemberLevelDO.class); memberlevelMapper.insert(dbLevel);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbLevel.getId(); // 调用 levelService.deleteLevel(id); // 校验数据不存在了 assertNull(memberlevelMapper.selectById(id)); }
public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } return writeLoopOnConverters(event); }
@Test public void prefixConverterSmoke() { String pattern = "%prefix(%logger) %message"; pl.setPattern(pattern); pl.start(); String val = pl.doLayout(makeLoggingEvent("hello", null)); assertEquals("logger=" + logger.getName() + " hello", val); }
public static void scale(File srcImageFile, File destImageFile, float scale) { BufferedImage image = null; try { image = read(srcImageFile); scale(image, destImageFile, scale); } finally { flush(image); } }
@Test @Disabled public void scaleByWidthAndHeightTest() { ImgUtil.scale(FileUtil.file("f:/test/aaa.jpg"), FileUtil.file("f:/test/aaa_result.jpg"), 100, 400, Color.BLUE); }
public void recordMetric(long time, String command, String user, long delta) { RollingWindow window = getRollingWindow(command, user); window.incAt(time, delta); }
@Test public void testTotal() throws Exception { Configuration config = new Configuration(); config.setInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY, 1); config.setInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY, N_TOP_USERS); int period = 10; RollingWindowManager rollingWindowManager = new RollingWindowManager(config, period); long t = 0; rollingWindowManager.recordMetric(t, "op1", users[0], 3); checkValues(rollingWindowManager, t, "op1", 3, 3); // both should have a value. t = (long)(period * .5); rollingWindowManager.recordMetric(t, "op2", users[0], 4); checkValues(rollingWindowManager, t, "op1", 3, 7); checkValues(rollingWindowManager, t, "op2", 4, 7); // neither should reset. t = period - 1; checkValues(rollingWindowManager, t, "op1", 3, 7); checkValues(rollingWindowManager, t, "op2", 4, 7); // op1 should reset in its next period, but not op2. t = period; rollingWindowManager.recordMetric(10, "op1", users[0], 10); checkValues(rollingWindowManager, t, "op1", 10, 14); checkValues(rollingWindowManager, t, "op2", 4, 14); // neither should reset. t = (long)(period * 1.25); rollingWindowManager.recordMetric(t, "op2", users[0], 7); checkValues(rollingWindowManager, t, "op1", 10, 21); checkValues(rollingWindowManager, t, "op2", 11, 21); // op2 should reset. t = (long)(period * 1.5); rollingWindowManager.recordMetric(t, "op2", users[0], 13); checkValues(rollingWindowManager, t, "op1", 10, 23); checkValues(rollingWindowManager, t, "op2", 13, 23); }
@Override public VersioningConfiguration getConfiguration(final Path file) throws BackgroundException { final Path bucket = containerService.getContainer(file); if(cache.contains(bucket)) { return cache.get(bucket); } try { final S3BucketVersioningStatus status = session.getClient().getBucketVersioningStatus(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName()); if(null == status) { log.warn(String.format("Failure parsing versioning status for %s", bucket)); return VersioningConfiguration.empty(); } final VersioningConfiguration configuration = new VersioningConfiguration(status.isVersioningEnabled(), status.isMultiFactorAuthDeleteRequired()); cache.put(bucket, configuration); return configuration; } catch(ServiceException e) { try { throw new S3ExceptionMappingService().map("Cannot read container configuration", e); } catch(AccessDeniedException l) { log.warn(String.format("Missing permission to read versioning configuration for %s %s", bucket, e.getMessage())); return VersioningConfiguration.empty(); } catch(InteroperabilityException | NotfoundException i) { log.warn(String.format("Not supported to read versioning configuration for %s %s", bucket, e.getMessage())); return VersioningConfiguration.empty(); } } }
@Test public void testGetConfigurationEnabled() throws Exception { final VersioningConfiguration configuration = new S3VersioningFeature(session, new S3AccessControlListFeature(session)).getConfiguration(new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume))); assertNotNull(configuration); assertTrue(configuration.isEnabled()); }
@Override public HandlerStatus onWrite() { compactOrClear(dst); try { for (; ; ) { if (packet == null) { packet = src.get(); if (packet == null) { // everything is processed, so we are done return CLEAN; } } if (packetWriter.writeTo(packet, dst)) { // packet got written, lets see if another packet can be written packet = null; } else { // the packet didn't get written completely, so we are done. return DIRTY; } } } finally { dst.flip(); } }
@Test public void whenNotEnoughSpace() { final Packet packet = new Packet(serializationService.toBytes(new byte[2000])); ByteBuffer dst = ByteBuffer.allocate(1000); dst.flip(); PacketSupplier src = new PacketSupplier(); src.queue.add(packet); encoder.dst(dst); encoder.src(src); HandlerStatus result = encoder.onWrite(); assertEquals(DIRTY, result); }
protected void setUpJettyOptions( Node node ) { Map<String, String> jettyOptions = parseJettyOptions( node ); if ( jettyOptions != null && jettyOptions.size() > 0 ) { for ( Entry<String, String> jettyOption : jettyOptions.entrySet() ) { System.setProperty( jettyOption.getKey(), jettyOption.getValue() ); } } }
@Test public void testDoNotSetUpJettyOptionsAsSystemParameters_WhenNoOptionsNode() throws KettleXMLException { Node configNode = getConfigNode( getConfigWithNoOptionsNode() ); slServerConfig.setUpJettyOptions( configNode ); assertFalse( "There should not be any jetty option but it is here: " + EXPECTED_ACCEPTORS_KEY, System .getProperties().containsKey( EXPECTED_ACCEPTORS_KEY ) ); assertFalse( "There should not be any jetty option but it is here: " + EXPECTED_ACCEPT_QUEUE_SIZE_KEY, System .getProperties().containsKey( EXPECTED_ACCEPT_QUEUE_SIZE_KEY ) ); assertFalse( "There should not be any jetty option but it is here: " + EXPECTED_LOW_RES_MAX_IDLE_TIME_KEY, System .getProperties().containsKey( EXPECTED_LOW_RES_MAX_IDLE_TIME_KEY ) ); }
public static int gt0(int value, String name) { return (int) gt0((long) value, name); }
@Test(expected = IllegalArgumentException.class) public void checkGTZeroLessThanZero() { Check.gt0(-1, "test"); }
private Impulse() {}
@Test @Category({ValidatesRunner.class, UsesImpulse.class}) public void testImpulse() { PCollection<Integer> result = p.apply(Impulse.create()) .apply( FlatMapElements.into(TypeDescriptors.integers()) .via(impulse -> Arrays.asList(1, 2, 3))); PAssert.that(result).containsInAnyOrder(1, 2, 3); p.run().waitUntilFinish(); }
public void close() { close(Long.MAX_VALUE, false); }
@Test public void shouldThrowOnNegativeTimeoutForClose() throws Exception { prepareStreams(); prepareStreamThread(streamThreadOne, 1); prepareStreamThread(streamThreadTwo, 2); prepareTerminableThread(streamThreadOne); try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { assertThrows(IllegalArgumentException.class, () -> streams.close(Duration.ofMillis(-1L))); } }