focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public TriRpcStatus withCause(Throwable cause) { return new TriRpcStatus(this.code, cause, this.description); }
@Test void withCause() { TriRpcStatus origin = TriRpcStatus.NOT_FOUND; TriRpcStatus withCause = origin.withCause(new IllegalStateException("test")); Assertions.assertNull(origin.cause); Assertions.assertTrue(withCause.cause.getMessage().contains("test")); }
public boolean isFound() { return found; }
@Test public void testCalcAverageSpeedDetails() { Weighting weighting = new SpeedWeighting(carAvSpeedEnc); Path p = new Dijkstra(pathDetailGraph, weighting, TraversalMode.NODE_BASED).calcPath(1, 5); assertTrue(p.isFound()); Map<String, List<PathDetail>> details = PathDetailsFromEdges.calcDetails(p, carManager, weighting, List.of(AVERAGE_SPEED), new PathDetailsBuilderFactory(), 0, pathDetailGraph); assertEquals(1, details.size()); List<PathDetail> averageSpeedDetails = details.get(AVERAGE_SPEED); assertEquals(4, averageSpeedDetails.size()); assertEquals(162.2, (double) averageSpeedDetails.get(0).getValue(), 1.e-3); assertEquals(327.3, (double) averageSpeedDetails.get(1).getValue(), 1.e-3); assertEquals(36.0, (double) averageSpeedDetails.get(2).getValue(), 1.e-3); assertEquals(162.2, (double) averageSpeedDetails.get(3).getValue(), 1.e-3); assertEquals(0, averageSpeedDetails.get(0).getFirst()); assertEquals(1, averageSpeedDetails.get(1).getFirst()); assertEquals(2, averageSpeedDetails.get(2).getFirst()); assertEquals(3, averageSpeedDetails.get(3).getFirst()); assertEquals(4, averageSpeedDetails.get(3).getLast()); }
@Override public AppResponse process(Flow flow, ActivateAppRequest body) { String decodedPin = ChallengeService.decodeMaskedPin(appSession.getIv(), appAuthenticator.getSymmetricKey(), body.getMaskedPincode()); if ((decodedPin == null || !Pattern.compile("\\d{5}").matcher(decodedPin).matches())) { return flow.setFailedStateAndReturnNOK(appSession); } else if (!appAuthenticator.getUserAppId().equals(body.getUserAppId())){ digidClient.remoteLog("754", Map.of(lowerUnderscore(ACCOUNT_ID) ,appAuthenticator.getAccountId())); return flow.setFailedStateAndReturnNOK(appSession); } appAuthenticator.setMaskedPin(decodedPin); appAuthenticator.setLastSignInAt(ZonedDateTime.now()); if (!switchService.digidAppSwitchEnabled() ) { digidClient.remoteLog("824", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId())); throw new SwitchDisabledException(); } if (flow instanceof RequestAccountAndAppFlow || flow instanceof ActivateAppWithPasswordLetterFlow) { Map<String, String> result = digidClient.finishRegistration(appSession.getRegistrationId(), appSession.getAccountId(), flow.getName()); if (result.get(lowerUnderscore(STATUS)).equals("PENDING") && result.get(lowerUnderscore(ACTIVATION_CODE)) != null && result.get(lowerUnderscore(GELDIGHEIDSTERMIJN)) != null) { appAuthenticator.setStatus("pending"); appAuthenticator.setActivationCode(result.get(lowerUnderscore(ACTIVATION_CODE))); appAuthenticator.setGeldigheidstermijn(result.get(lowerUnderscore(GELDIGHEIDSTERMIJN))); appAuthenticator.setRequestedAt(ZonedDateTime.now()); return new StatusResponse("PENDING"); } else { return new NokResponse(); } } else { return ((ActivationFlow) flow).activateApp(appAuthenticator, appSession); } }
@Test void processPendingRequestAccountAndAppFlow(){ Map<String, String> finishRegistrationResponse = Map.of(lowerUnderscore(STATUS), "PENDING", lowerUnderscore(ACTIVATION_CODE), "abcd", lowerUnderscore(GELDIGHEIDSTERMIJN), "20"); when(switchService.digidAppSwitchEnabled()).thenReturn(true); when(digidClientMock.finishRegistration(TEST_REGISTRATION_ID, TEST_ACCOUNT_ID, RequestAccountAndAppFlow.NAME)).thenReturn(finishRegistrationResponse); AppResponse appResponse = pincodeSet.process(mockedRequestFlow, mockedActivateAppRequest); assertTrue(appResponse instanceof StatusResponse); assertEquals("pending", mockedAppAuthenticator.getStatus()); }
@Override public byte[] getBytes(final int columnIndex) throws SQLException { return (byte[]) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, byte[].class), byte[].class); }
@Test void assertGetBytesWithColumnLabel() throws SQLException { when(mergeResultSet.getValue(1, byte[].class)).thenReturn(new byte[]{(byte) 1}); assertThat(shardingSphereResultSet.getBytes("label"), is(new byte[]{(byte) 1})); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void timeWindowZeroArgCountShouldPreserveTopologyStructure() { final StreamsBuilder builder = new StreamsBuilder(); builder.stream("input-topic") .groupByKey() .windowedBy(TimeWindows.of(ofMillis(1))) .count(); final Topology topology = builder.build(); final TopologyDescription describe = topology.describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" + " --> KSTREAM-AGGREGATE-0000000002\n" + " Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n", describe.toString() ); topology.internalTopologyBuilder.setStreamsConfig(streamsConfig); assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true)); }
public String convert(Object o) { StringBuilder buf = new StringBuilder(); Converter<Object> p = headTokenConverter; while (p != null) { buf.append(p.convert(o)); p = p.getNext(); } return buf.toString(); }
@Test public void date() { Calendar cal = Calendar.getInstance(); cal.set(2003, 4, 20, 17, 55); FileNamePattern pp = new FileNamePattern("foo%d{yyyy.MM.dd}", context); assertEquals("foo2003.05.20", pp.convert(cal.getTime())); pp = new FileNamePattern("foo%d{yyyy.MM.dd HH:mm}", context); assertEquals("foo2003.05.20 17:55", pp.convert(cal.getTime())); pp = new FileNamePattern("%d{yyyy.MM.dd HH:mm} foo", context); assertEquals("2003.05.20 17:55 foo", pp.convert(cal.getTime())); }
public static boolean matchIpExpression(String pattern, String address) throws UnknownHostException { if (address == null) { return false; } String host = address; int port = 0; // only works for ipv4 address with 'ip:port' format if (address.endsWith(":")) { String[] hostPort = address.split(":"); host = hostPort[0]; port = StringUtils.parseInteger(hostPort[1]); } // if the pattern is subnet format, it will not be allowed to config port param in pattern. if (pattern.contains("/")) { CIDRUtils utils = new CIDRUtils(pattern); return utils.isInRange(host); } return matchIpRange(pattern, host, port); }
@Test void testMatchIpMatch() throws UnknownHostException { assertTrue(NetUtils.matchIpExpression("192.168.1.*", "192.168.1.63", 90)); assertTrue(NetUtils.matchIpExpression("192.168.1.192/26", "192.168.1.199", 90)); }
public Configuration parse() { if (parsed) { throw new BuilderException("Each XMLConfigBuilder can only be used once."); } parsed = true; parseConfiguration(parser.evalNode("/configuration")); return configuration; }
@Test void parse() throws IOException { ResourceLoader loader = new DefaultResourceLoader(); Resource resource = loader.getResource("classpath:/MybatisXMLConfigBuilderTest.xml"); MybatisXMLConfigBuilder builder = new MybatisXMLConfigBuilder(resource.getInputStream(), null); Configuration configuration = builder.parse(); assertThat(configuration).isInstanceOf(MybatisConfiguration.class); assertThat(configuration.getMappedStatement("com.baomidou.mybatisplus.core.MybatisXMLConfigBuilderTest$EntityMapper.selectCount")) .isNotNull(); }
@Override public void verify(String value) { long l = Long.parseLong(value); if (l < min || l > max) { throw new RuntimeException(format("value is not in range(%d, %d)", min, max)); } }
@Test public void verify_ValueLessThanMin_ThrowsRuntimeException() { RuntimeException exception = assertThrows(RuntimeException.class, () -> longRangeAttribute.verify("-1")); assertEquals("value is not in range(0, 100)", exception.getMessage()); }
public static boolean createFile(final Path filePath) { try { final Path parent = filePath.getParent(); if (parent == null) { return false; } if (Files.notExists(parent)) { Files.createDirectories(parent); } if (Files.notExists(filePath)) { Files.createFile(filePath); } return true; } catch (final Exception e) { return false; } }
@Test void testCreateFileRealDir() { Path realDirHistoryFile = Paths.get(realFolder.toFile().getPath(), "history.file"); CliUtils.createFile(realDirHistoryFile); assertThat(Files.exists(realDirHistoryFile)).isTrue(); }
@GetMapping("/by-namespace-and-releases-not-in") public List<InstanceDTO> getByReleasesNotIn(@RequestParam("appId") String appId, @RequestParam("clusterName") String clusterName, @RequestParam("namespaceName") String namespaceName, @RequestParam("releaseIds") String releaseIds) { Set<Long> releaseIdSet = RELEASES_SPLITTER.splitToList(releaseIds).stream().map(Long::parseLong) .collect(Collectors.toSet()); List<Release> releases = releaseService.findByReleaseIds(releaseIdSet); if (CollectionUtils.isEmpty(releases)) { throw NotFoundException.releaseNotFound(releaseIds); } Set<String> releaseKeys = releases.stream().map(Release::getReleaseKey).collect(Collectors .toSet()); List<InstanceConfig> instanceConfigs = instanceService .findInstanceConfigsByNamespaceWithReleaseKeysNotIn(appId, clusterName, namespaceName, releaseKeys); Multimap<Long, InstanceConfig> instanceConfigMap = HashMultimap.create(); Set<String> otherReleaseKeys = Sets.newHashSet(); for (InstanceConfig instanceConfig : instanceConfigs) { instanceConfigMap.put(instanceConfig.getInstanceId(), instanceConfig); otherReleaseKeys.add(instanceConfig.getReleaseKey()); } List<Instance> instances = instanceService.findInstancesByIds(instanceConfigMap.keySet()); if (CollectionUtils.isEmpty(instances)) { return Collections.emptyList(); } List<InstanceDTO> instanceDTOs = BeanUtils.batchTransform(InstanceDTO.class, instances); List<Release> otherReleases = releaseService.findByReleaseKeys(otherReleaseKeys); Map<String, ReleaseDTO> releaseMap = Maps.newHashMap(); for (Release release : otherReleases) { //unset configurations to save space release.setConfigurations(null); ReleaseDTO releaseDTO = BeanUtils.transform(ReleaseDTO.class, release); releaseMap.put(release.getReleaseKey(), releaseDTO); } for (InstanceDTO instanceDTO : instanceDTOs) { Collection<InstanceConfig> configs = instanceConfigMap.get(instanceDTO.getId()); List<InstanceConfigDTO> configDTOs = configs.stream().map(instanceConfig -> { InstanceConfigDTO instanceConfigDTO = new InstanceConfigDTO(); instanceConfigDTO.setRelease(releaseMap.get(instanceConfig.getReleaseKey())); instanceConfigDTO.setReleaseDeliveryTime(instanceConfig.getReleaseDeliveryTime()); instanceConfigDTO.setDataChangeLastModifiedTime(instanceConfig .getDataChangeLastModifiedTime()); return instanceConfigDTO; }).collect(Collectors.toList()); instanceDTO.setConfigs(configDTOs); } return instanceDTOs; }
@Test public void testGetByReleasesNotIn() throws Exception { String someConfigAppId = "someConfigAppId"; String someConfigClusterName = "someConfigClusterName"; String someConfigNamespaceName = "someConfigNamespaceName"; long someReleaseId = 1; long anotherReleaseId = 2; String releaseIds = Joiner.on(",").join(someReleaseId, anotherReleaseId); Date someReleaseDeliveryTime = new Date(); Date anotherReleaseDeliveryTime = new Date(); Release someRelease = mock(Release.class); Release anotherRelease = mock(Release.class); String someReleaseKey = "someReleaseKey"; String anotherReleaseKey = "anotherReleaseKey"; when(someRelease.getReleaseKey()).thenReturn(someReleaseKey); when(anotherRelease.getReleaseKey()).thenReturn(anotherReleaseKey); when(releaseService.findByReleaseIds(Sets.newHashSet(someReleaseId, anotherReleaseId))) .thenReturn(Lists.newArrayList(someRelease, anotherRelease)); long someInstanceId = 1; long anotherInstanceId = 2; String someInstanceConfigReleaseKey = "someInstanceConfigReleaseKey"; String anotherInstanceConfigReleaseKey = "anotherInstanceConfigReleaseKey"; InstanceConfig someInstanceConfig = mock(InstanceConfig.class); InstanceConfig anotherInstanceConfig = mock(InstanceConfig.class); when(someInstanceConfig.getInstanceId()).thenReturn(someInstanceId); when(anotherInstanceConfig.getInstanceId()).thenReturn(anotherInstanceId); when(someInstanceConfig.getReleaseKey()).thenReturn(someInstanceConfigReleaseKey); when(anotherInstanceConfig.getReleaseKey()).thenReturn(anotherInstanceConfigReleaseKey); when(someInstanceConfig.getReleaseDeliveryTime()).thenReturn(someReleaseDeliveryTime); when(anotherInstanceConfig.getReleaseDeliveryTime()).thenReturn(anotherReleaseDeliveryTime); when(instanceService.findInstanceConfigsByNamespaceWithReleaseKeysNotIn(someConfigAppId, someConfigClusterName, someConfigNamespaceName, Sets.newHashSet(someReleaseKey, anotherReleaseKey))).thenReturn(Lists.newArrayList(someInstanceConfig, anotherInstanceConfig)); String someInstanceAppId = "someInstanceAppId"; String someInstanceClusterName = "someInstanceClusterName"; String someInstanceNamespaceName = "someInstanceNamespaceName"; String someIp = "someIp"; String anotherIp = "anotherIp"; Instance someInstance = assembleInstance(someInstanceId, someInstanceAppId, someInstanceClusterName, someInstanceNamespaceName, someIp); Instance anotherInstance = assembleInstance(anotherInstanceId, someInstanceAppId, someInstanceClusterName, someInstanceNamespaceName, anotherIp); when(instanceService.findInstancesByIds(Sets.newHashSet(someInstanceId, anotherInstanceId))) .thenReturn(Lists.newArrayList(someInstance, anotherInstance)); Release someInstanceConfigRelease = new Release(); someInstanceConfigRelease.setReleaseKey(someInstanceConfigReleaseKey); Release anotherInstanceConfigRelease = new Release(); anotherInstanceConfigRelease.setReleaseKey(anotherInstanceConfigReleaseKey); when(releaseService.findByReleaseKeys(Sets.newHashSet(someInstanceConfigReleaseKey, anotherInstanceConfigReleaseKey))).thenReturn(Lists.newArrayList(someInstanceConfigRelease, anotherInstanceConfigRelease)); List<InstanceDTO> result = instanceConfigController.getByReleasesNotIn(someConfigAppId, someConfigClusterName, someConfigNamespaceName, releaseIds); assertEquals(2, result.size()); InstanceDTO someInstanceDto = null; InstanceDTO anotherInstanceDto = null; for (InstanceDTO instanceDTO : result) { if (instanceDTO.getId() == someInstanceId) { someInstanceDto = instanceDTO; } else if (instanceDTO.getId() == anotherInstanceId) { anotherInstanceDto = instanceDTO; } } verifyInstance(someInstance, someInstanceDto); verifyInstance(anotherInstance, anotherInstanceDto); assertEquals(someInstanceConfigReleaseKey, someInstanceDto.getConfigs().get(0).getRelease() .getReleaseKey()); assertEquals(anotherInstanceConfigReleaseKey, anotherInstanceDto.getConfigs().get(0) .getRelease() .getReleaseKey()); assertEquals(someReleaseDeliveryTime, someInstanceDto.getConfigs().get(0).getReleaseDeliveryTime()); assertEquals(anotherReleaseDeliveryTime, anotherInstanceDto.getConfigs().get(0) .getReleaseDeliveryTime()); }
public PathAttributes deserialize(final T serialized) { final Deserializer<T> dict = factory.create(serialized); final PathAttributes attributes = new PathAttributes(); final String sizeObj = dict.stringForKey("Size"); if(sizeObj != null) { attributes.setSize(Long.parseLong(sizeObj)); } final String quotaObj = dict.stringForKey("Quota"); if(quotaObj != null) { attributes.setQuota(new Quota.Space(attributes.getSize(), Long.parseLong(quotaObj))); } final String modifiedObj = dict.stringForKey("Modified"); if(modifiedObj != null) { attributes.setModificationDate(Long.parseLong(modifiedObj)); } final String createdObj = dict.stringForKey("Created"); if(createdObj != null) { attributes.setCreationDate(Long.parseLong(createdObj)); } final String revisionObj = dict.stringForKey("Revision"); if(revisionObj != null) { attributes.setRevision(Long.parseLong(revisionObj)); } final String etagObj = dict.stringForKey("ETag"); if(etagObj != null) { attributes.setETag(etagObj); } final Object permissionObj = dict.objectForKey("Permission"); if(permissionObj != null) { attributes.setPermission(new PermissionDictionary<>().deserialize(permissionObj)); } attributes.setOwner(dict.stringForKey("Owner")); attributes.setGroup(dict.stringForKey("Group")); final Object aclObj = dict.objectForKey("Acl"); if(aclObj != null) { attributes.setAcl(new AclDictionary<>().deserialize(aclObj)); } if(dict.mapForKey("Link") != null) { final Map<String, String> link = dict.mapForKey("Link"); attributes.setLink(new DescriptiveUrl(URI.create(link.get("Url")), DescriptiveUrl.Type.valueOf(link.get("Type")))); } else { final String linkObj = dict.stringForKey("Link"); if(linkObj != null) { attributes.setLink(new DescriptiveUrl(URI.create(linkObj), DescriptiveUrl.Type.http)); } } if(dict.mapForKey("Checksum") != null) { final Map<String, String> checksum = dict.mapForKey("Checksum"); attributes.setChecksum(new Checksum(HashAlgorithm.valueOf(checksum.get("Algorithm")), checksum.get("Hash"), checksum.get("Base64"))); } else { attributes.setChecksum(Checksum.parse(dict.stringForKey("Checksum"))); } attributes.setVersionId(dict.stringForKey("Version")); attributes.setFileId(dict.stringForKey("File Id")); attributes.setDisplayname(dict.stringForKey("Display Name")); attributes.setLockId(dict.stringForKey("Lock Id")); final String duplicateObj = dict.stringForKey("Duplicate"); if(duplicateObj != null) { attributes.setDuplicate(Boolean.parseBoolean(duplicateObj)); } final String hiddenObj = dict.stringForKey("Hidden"); if(hiddenObj != null) { attributes.setHidden(Boolean.parseBoolean(hiddenObj)); } attributes.setMetadata(Collections.emptyMap()); attributes.setRegion(dict.stringForKey("Region")); attributes.setStorageClass(dict.stringForKey("Storage Class")); final T vaultObj = dict.objectForKey("Vault"); if(vaultObj != null) { attributes.setVault(new PathDictionary<>(factory).deserialize(vaultObj)); } final Map<String, String> customObj = dict.mapForKey("Custom"); if(customObj != null) { attributes.setCustom(customObj); } final String verdictObj = dict.stringForKey("Verdict"); if(verdictObj != null) { attributes.setVerdict(PathAttributes.Verdict.valueOf(verdictObj)); } return attributes; }
@Test public void testSerializeHashCode() { PathAttributes attributes = new PathAttributes(); attributes.setPermission(new Permission(644)); attributes.setDuplicate(true); attributes.setVersionId("v-1"); attributes.setFileId("myUniqueId"); attributes.setDisplayname("myShinyNameOnDisplay"); attributes.setModificationDate(System.currentTimeMillis()); assertEquals(attributes, new PathAttributesDictionary<>().deserialize(attributes.serialize(SerializerFactory.get()))); assertEquals(attributes.hashCode(), new PathAttributesDictionary<>().deserialize(attributes.serialize(SerializerFactory.get())).hashCode()); }
@Override public void close() { this.displayResultExecutorService.shutdown(); }
@Test void testFailedBatchResult() { final Configuration testConfig = new Configuration(); testConfig.set(EXECUTION_RESULT_MODE, ResultMode.TABLEAU); testConfig.set(RUNTIME_MODE, RuntimeExecutionMode.BATCH); ResultDescriptor resultDescriptor = new ResultDescriptor(CliClientTestUtils.createTestClient(schema), testConfig); TestChangelogResult changelogResult = new TestChangelogResult( () -> { throw new SqlExecutionException("query failed"); }); CliTableauResultView view = new CliTableauResultView( terminal, resultDescriptor, changelogResult, System.currentTimeMillis()); assertThatThrownBy(view::displayResults) .satisfies(anyCauseMatches(SqlExecutionException.class, "query failed")); view.close(); assertThat(changelogResult.closed).isTrue(); }
public static L3ModificationInstruction modArpTha(MacAddress addr) { checkNotNull(addr, "Dst l3 ARP address cannot be null"); return new ModArpEthInstruction(L3SubType.ARP_THA, addr); }
@Test public void testModArpThaMethod() { final Instruction instruction = Instructions.modArpTha(mac1); final L3ModificationInstruction.ModArpEthInstruction modArpEthInstruction = checkAndConvert(instruction, Instruction.Type.L3MODIFICATION, L3ModificationInstruction.ModArpEthInstruction.class); assertThat(modArpEthInstruction.subtype(), is(L3ModificationInstruction.L3SubType.ARP_THA)); assertThat(modArpEthInstruction.mac(), is(mac1)); }
@Udf public Integer length(@UdfParameter final String jsonArray) { if (jsonArray == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonArray); if (node.isMissingNode() || !node.isArray()) { return null; } return node.size(); }
@Test public void shouldReturnNullForNumber() { // When: final Integer result = udf.length("123"); // Then: assertNull(result); }
@Override public double sd() { return PI_SQRT3 * scale; }
@Test public void testSd() { System.out.println("sd"); LogisticDistribution instance = new LogisticDistribution(2.0, 1.0); instance.rand(); assertEquals(Math.PI/Math.sqrt(3), instance.sd(), 1E-7); }
public static BigDecimal[] toDecimalArray(String name, Object value) { try { if (value instanceof BigDecimal[]) { return (BigDecimal[]) value; } else if (value instanceof double[]) { return Arrays.stream((double[]) value) .mapToObj(val -> new BigDecimal(String.valueOf(val))) .toArray(BigDecimal[]::new); } else if (value instanceof List) { return ((List<?>) value) .stream().map(d -> new BigDecimal(String.valueOf(d))).toArray(BigDecimal[]::new); } else { throw new MaestroInternalError( "Cannot cast value [%s] into a BigDecimal array for param [%s]", toTruncateString(value), name); } } catch (NumberFormatException nfe) { throw new MaestroInternalError( nfe, "Invalid number format for value: %s for param [%s]", toTruncateString(value), name); } }
@Test public void testDecimalArrayToDecimalArray() { BigDecimal[] val = new BigDecimal[] {new BigDecimal("1.2"), new BigDecimal("3.4"), new BigDecimal("5.6")}; BigDecimal[] actual = ParamHelper.toDecimalArray("foo", val); assertArrayEquals(val, actual); }
@Override public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0; final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); // Check every broker and select loadData.getBrokerData().forEach((broker, brokerData) -> { final LocalBrokerData localData = brokerData.getLocalData(); final double currentUsage = localData.getMaxResourceUsageWithWeight( conf.getLoadBalancerCPUResourceWeight(), conf.getLoadBalancerDirectMemoryResourceWeight(), conf.getLoadBalancerBandwidthInResourceWeight(), conf.getLoadBalancerBandwidthOutResourceWeight()); if (currentUsage < overloadThreshold) { if (log.isDebugEnabled()) { log.debug("[{}] Broker is not overloaded, ignoring at this point ({})", broker, localData.printResourceUsage()); } return; } // We want to offload enough traffic such that this broker will go below the overload threshold // Also, add a small margin so that this broker won't be very close to the threshold edge. double percentOfTrafficToOffload = currentUsage - overloadThreshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN; double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut(); double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload; log.info( "Attempting to shed load on {}, which has resource usage {}% above threshold {}%" + " -- Offloading at least {} MByte/s of traffic ({})", broker, 100 * currentUsage, 100 * overloadThreshold, minimumThroughputToOffload / 1024 / 1024, localData.printResourceUsage()); MutableDouble trafficMarkedToOffload = new MutableDouble(0); MutableBoolean atLeastOneBundleSelected = new MutableBoolean(false); if (localData.getBundles().size() > 1) { // Sort bundles by throughput, then pick the biggest N which combined // make up for at least the minimum throughput to offload loadData.getBundleDataForLoadShedding().entrySet().stream() .filter(e -> localData.getBundles().contains(e.getKey())) .map((e) -> { // Map to throughput value // Consider short-term byte rate to address system resource burden String bundle = e.getKey(); BundleData bundleData = e.getValue(); TimeAverageMessageData shortTermData = bundleData.getShortTermData(); double throughput = shortTermData.getMsgThroughputIn() + shortTermData .getMsgThroughputOut(); return Pair.of(bundle, throughput); }).filter(e -> { // Only consider bundles that were not already unloaded recently return !recentlyUnloadedBundles.containsKey(e.getLeft()); }).sorted((e1, e2) -> { // Sort by throughput in reverse order return Double.compare(e2.getRight(), e1.getRight()); }).forEach(e -> { if (trafficMarkedToOffload.doubleValue() < minimumThroughputToOffload || atLeastOneBundleSelected.isFalse()) { selectedBundlesCache.put(broker, e.getLeft()); trafficMarkedToOffload.add(e.getRight()); atLeastOneBundleSelected.setTrue(); } }); } else if (localData.getBundles().size() == 1) { log.warn( "HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. " + "No Load Shedding will be done on this broker", localData.getBundles().iterator().next(), broker); } else { log.warn("Broker {} is overloaded despite having no bundles", broker); } }); return selectedBundlesCache; }
@Test public void testBrokerWithMultipleBundles() { int numBundles = 10; LoadData loadData = new LoadData(); LocalBrokerData broker1 = new LocalBrokerData(); broker1.setBandwidthIn(new ResourceUsage(999, 1000)); broker1.setBandwidthOut(new ResourceUsage(999, 1000)); LocalBrokerData anotherBroker = new LocalBrokerData(); String anotherBrokerName = "another-broker"; double brokerThroghput = 0; for (int i = 1; i <= numBundles; i++) { broker1.getBundles().add("bundle-" + i); BundleData bundle = new BundleData(); TimeAverageMessageData db = new TimeAverageMessageData(); double throughput = i * 1024 * 1024; db.setMsgThroughputIn(throughput); db.setMsgThroughputOut(throughput); bundle.setShortTermData(db); loadData.getBundleData().put("bundle-" + i, bundle); // This bundle should not be selected for `broker1` since it is belong to another broker. String anotherBundleName = anotherBrokerName + "-bundle-" + (numBundles + i); loadData.getBundleData().put(anotherBundleName, bundle); anotherBroker.getBundles().add(anotherBundleName); brokerThroghput += throughput; } broker1.setMsgThroughputIn(brokerThroghput); broker1.setMsgThroughputOut(brokerThroghput); loadData.getBrokerData().put("broker-1", new BrokerData(broker1)); loadData.getBrokerData().put(anotherBrokerName, new BrokerData(anotherBroker)); Multimap<String, String> bundlesToUnload = os.findBundlesForUnloading(loadData, conf); assertFalse(bundlesToUnload.isEmpty()); assertEquals(bundlesToUnload.get("broker-1"), List.of("bundle-10", "bundle-9")); }
public PipelineColumnMetaData getColumnMetaData(final int columnIndex) { return getColumnMetaData(columnNames.get(columnIndex - 1)); }
@Test void assertIsPrimaryKey() { assertTrue(pipelineTableMetaData.getColumnMetaData(1).isUniqueKey()); }
public static boolean isNotEmpty(Collection coll) { return !CollectionUtils.isEmpty(coll); }
@Test void testIsNotEmpty() { assertTrue(CollectionUtils.isNotEmpty(Collections.singletonList("target"))); assertFalse(CollectionUtils.isNotEmpty(Collections.emptyList())); assertFalse(CollectionUtils.isNotEmpty(null)); }
public static ConnectedComponents findComponentsRecursive(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) { return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).findComponentsRecursive(); }
@Test public void smallGraph() { // 3<-0->2-1 g.edge(0, 2).setDistance(1).set(speedEnc, 10, 0); // edge-keys 0,1 g.edge(0, 3).setDistance(1).set(speedEnc, 10, 0); // edge-keys 2,3 g.edge(2, 1).setDistance(1).set(speedEnc, 10, 10); // edge-keys 4,5 ConnectedComponents result = EdgeBasedTarjanSCC.findComponentsRecursive(g, fwdAccessFilter, false); assertEquals(6, result.getEdgeKeys()); assertEquals(5, result.getTotalComponents()); assertEquals(1, result.getComponents().size()); assertEquals(result.getComponents().get(0), result.getBiggestComponent()); assertEquals(IntArrayList.from(5, 4), result.getComponents().get(0)); assertEquals(4, result.getSingleEdgeComponents().cardinality()); for (IntCursor c : IntArrayList.from(0, 1, 2, 3)) assertTrue(result.getSingleEdgeComponents().get(c.value)); }
@Override @TpsControl(pointName = "ConfigPublish") @Secured(action = ActionTypes.WRITE, signType = SignType.CONFIG) @ExtractorManager.Extractor(rpcExtractor = ConfigRequestParamExtractor.class) public ConfigPublishResponse handle(ConfigPublishRequest request, RequestMeta meta) throws NacosException { try { String dataId = request.getDataId(); String group = request.getGroup(); String content = request.getContent(); final String tenant = request.getTenant(); final String srcIp = meta.getClientIp(); final String requestIpApp = request.getAdditionParam("requestIpApp"); final String tag = request.getAdditionParam("tag"); final String appName = request.getAdditionParam("appName"); final String type = request.getAdditionParam("type"); final String srcUser = request.getAdditionParam("src_user"); final String encryptedDataKey = request.getAdditionParam("encryptedDataKey"); // check tenant ParamUtils.checkParam(dataId, group, "datumId", content); ParamUtils.checkParam(tag); Map<String, Object> configAdvanceInfo = new HashMap<>(10); MapUtil.putIfValNoNull(configAdvanceInfo, "config_tags", request.getAdditionParam("config_tags")); MapUtil.putIfValNoNull(configAdvanceInfo, "desc", request.getAdditionParam("desc")); MapUtil.putIfValNoNull(configAdvanceInfo, "use", request.getAdditionParam("use")); MapUtil.putIfValNoNull(configAdvanceInfo, "effect", request.getAdditionParam("effect")); MapUtil.putIfValNoNull(configAdvanceInfo, "type", type); MapUtil.putIfValNoNull(configAdvanceInfo, "schema", request.getAdditionParam("schema")); ParamUtils.checkParam(configAdvanceInfo); if (AggrWhitelist.isAggrDataId(dataId)) { Loggers.REMOTE_DIGEST.warn("[aggr-conflict] {} attempt to publish single data, {}, {}", srcIp, dataId, group); throw new NacosException(NacosException.NO_RIGHT, "dataId:" + dataId + " is aggr"); } ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); configInfo.setMd5(request.getCasMd5()); configInfo.setType(type); configInfo.setEncryptedDataKey(encryptedDataKey); String betaIps = request.getAdditionParam("betaIps"); ConfigOperateResult configOperateResult = null; String persistEvent = ConfigTraceService.PERSISTENCE_EVENT; if (StringUtils.isBlank(betaIps)) { if (StringUtils.isBlank(tag)) { if (StringUtils.isNotBlank(request.getCasMd5())) { configOperateResult = configInfoPersistService.insertOrUpdateCas(srcIp, srcUser, configInfo, configAdvanceInfo); if (!configOperateResult.isSuccess()) { return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(), "Cas publish fail,server md5 may have changed."); } } else { configOperateResult = configInfoPersistService.insertOrUpdate(srcIp, srcUser, configInfo, configAdvanceInfo); } ConfigChangePublisher.notifyConfigChange(new ConfigDataChangeEvent(false, dataId, group, tenant, configOperateResult.getLastModified())); } else { if (StringUtils.isNotBlank(request.getCasMd5())) { configOperateResult = configInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp, srcUser); if (!configOperateResult.isSuccess()) { return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(), "Cas publish tag config fail,server md5 may have changed."); } } else { configOperateResult = configInfoTagPersistService.insertOrUpdateTag(configInfo, tag, srcIp, srcUser); } persistEvent = ConfigTraceService.PERSISTENCE_EVENT_TAG + "-" + tag; ConfigChangePublisher.notifyConfigChange( new ConfigDataChangeEvent(false, dataId, group, tenant, tag, configOperateResult.getLastModified())); } } else { // beta publish if (StringUtils.isNotBlank(request.getCasMd5())) { configOperateResult = configInfoBetaPersistService.insertOrUpdateBetaCas(configInfo, betaIps, srcIp, srcUser); if (!configOperateResult.isSuccess()) { return ConfigPublishResponse.buildFailResponse(ResponseCode.FAIL.getCode(), "Cas publish beta config fail,server md5 may have changed."); } } else { configOperateResult = configInfoBetaPersistService.insertOrUpdateBeta(configInfo, betaIps, srcIp, srcUser); } persistEvent = ConfigTraceService.PERSISTENCE_EVENT_BETA; ConfigChangePublisher.notifyConfigChange( new ConfigDataChangeEvent(true, dataId, group, tenant, configOperateResult.getLastModified())); } ConfigTraceService.logPersistenceEvent(dataId, group, tenant, requestIpApp, configOperateResult.getLastModified(), srcIp, persistEvent, ConfigTraceService.PERSISTENCE_TYPE_PUB, content); return ConfigPublishResponse.buildSuccessResponse(); } catch (Exception e) { Loggers.REMOTE_DIGEST.error("[ConfigPublishRequestHandler] publish config error ,request ={}", request, e); return ConfigPublishResponse.buildFailResponse( (e instanceof NacosException) ? ((NacosException) e).getErrCode() : ResponseCode.FAIL.getCode(), e.getMessage()); } }
@Test void testPublishAggrCheckFail() throws NacosException, InterruptedException { RequestMeta requestMeta = new RequestMeta(); String clientIp = "127.0.0.1"; requestMeta.setClientIp(clientIp); String dataId = "testPublishAggrCheckFail"; String group = "group"; String tenant = "tenant"; String content = "content"; ConfigPublishRequest configPublishRequest = new ConfigPublishRequest(); configPublishRequest.setDataId(dataId); configPublishRequest.setGroup(group); configPublishRequest.setTenant(tenant); configPublishRequest.setContent(content); when(AggrWhitelist.isAggrDataId(eq(dataId))).thenReturn(Boolean.TRUE); AtomicReference<ConfigDataChangeEvent> reference = new AtomicReference<>(); NotifyCenter.registerSubscriber(new Subscriber() { @Override public void onEvent(Event event) { ConfigDataChangeEvent event1 = (ConfigDataChangeEvent) event; if (event1.dataId.equals(dataId)) { reference.set((ConfigDataChangeEvent) event); } } @Override public Class<? extends Event> subscribeType() { return ConfigDataChangeEvent.class; } }); ConfigPublishResponse response = configPublishRequestHandler.handle(configPublishRequest, requestMeta); assertEquals(ResponseCode.FAIL.getCode(), response.getResultCode()); assertTrue(response.getMessage().contains("is aggr")); Thread.sleep(500L); assertTrue(reference.get() == null); }
@VisibleForTesting static String getProjectCacheDirectoryFromProject(Path path) { try { byte[] hashedBytes = MessageDigest.getInstance("SHA-256") .digest(path.toFile().getCanonicalPath().getBytes(Charsets.UTF_8)); StringBuilder stringBuilder = new StringBuilder(2 * hashedBytes.length); for (byte b : hashedBytes) { stringBuilder.append(String.format("%02x", b)); } return stringBuilder.toString(); } catch (IOException | SecurityException ex) { throw new RuntimeException( "Unable to create cache directory for project path: " + path + " - you can try to configure --project-cache manually", ex); } catch (NoSuchAlgorithmException ex) { throw new RuntimeException( "SHA-256 algorithm implementation not found - might be a broken JVM"); } }
@Test public void testGetProjectCacheDirectoryFromProject_sameFileDifferentPaths() throws IOException { temporaryFolder.newFolder("ignored"); Path path = temporaryFolder.getRoot().toPath(); Path indirectPath = temporaryFolder.getRoot().toPath().resolve("ignored").resolve(".."); assertThat(path).isNotEqualTo(indirectPath); // the general equality should not hold true assertThat(Files.isSameFile(path, indirectPath)).isTrue(); // path equality holds assertThat(CacheDirectories.getProjectCacheDirectoryFromProject(path)) .isEqualTo( CacheDirectories.getProjectCacheDirectoryFromProject( indirectPath)); // our hash should hold }
@Override public EntityStatementJWS establishIdpTrust(URI issuer) { var trustedFederationStatement = fetchTrustedFederationStatement(issuer); // the federation statement from the master will establish trust in the JWKS and the issuer URL // of the idp, // we still need to fetch the entity configuration directly afterward to get the full // entity statement return fetchTrustedEntityConfiguration(issuer, trustedFederationStatement.body().jwks()); }
@Test void establishTrust_badFedmasterConfigSignature() { var client = new FederationMasterClientImpl(FEDERATION_MASTER, federationApiClient, clock); var issuer = URI.create("https://idp-tk.example.com"); var fedmasterKeypair = ECKeyGenerator.example(); var unrelatedKeypair = ECKeyGenerator.generate(); var fedmasterEntityConfigurationJws = badSignatureFedmasterConfiguration(fedmasterKeypair, unrelatedKeypair); when(federationApiClient.fetchEntityConfiguration(FEDERATION_MASTER)) .thenReturn(fedmasterEntityConfigurationJws); // when var e = assertThrows(FederationException.class, () -> client.establishIdpTrust(issuer)); // then assertEquals( "entity statement of 'https://fedmaster.example.com' has a bad signature", e.getMessage()); }
@Override public ResourceAllocationResult tryFulfillRequirements( Map<JobID, Collection<ResourceRequirement>> missingResources, TaskManagerResourceInfoProvider taskManagerResourceInfoProvider, BlockedTaskManagerChecker blockedTaskManagerChecker) { final ResourceAllocationResult.Builder resultBuilder = ResourceAllocationResult.builder(); final List<InternalResourceInfo> registeredResources = getAvailableResources( taskManagerResourceInfoProvider, resultBuilder, blockedTaskManagerChecker); final List<InternalResourceInfo> pendingResources = getPendingResources(taskManagerResourceInfoProvider, resultBuilder); ResourceProfile totalCurrentResources = Stream.concat(registeredResources.stream(), pendingResources.stream()) .map(internalResourceInfo -> internalResourceInfo.totalProfile) .reduce(ResourceProfile.ZERO, ResourceProfile::merge); for (Map.Entry<JobID, Collection<ResourceRequirement>> resourceRequirements : missingResources.entrySet()) { final JobID jobId = resourceRequirements.getKey(); final Collection<ResourceRequirement> unfulfilledJobRequirements = tryFulfillRequirementsForJobWithResources( jobId, resourceRequirements.getValue(), registeredResources); if (!unfulfilledJobRequirements.isEmpty()) { totalCurrentResources = totalCurrentResources.merge( tryFulfillRequirementsForJobWithPendingResources( jobId, unfulfilledJobRequirements, pendingResources, resultBuilder)); } } // Unlike tryFulfillRequirementsForJobWithPendingResources, which updates pendingResources // to the latest state after a new PendingTaskManager is created, // tryFulFillRequiredResources will not update pendingResources even after new // PendingTaskManagers are created. // This is because the pendingResources are no longer needed afterward. tryFulFillRequiredResources( registeredResources, pendingResources, totalCurrentResources, resultBuilder); return resultBuilder.build(); }
@Test void testExcessPendingResourcesCouldReleaseEvenly() { final JobID jobId = new JobID(); final List<ResourceRequirement> requirements = new ArrayList<>(); final TaskManagerResourceInfoProvider taskManagerResourceInfoProvider = TestingTaskManagerResourceInfoProvider.newBuilder() .setPendingTaskManagersSupplier( () -> Arrays.asList( new PendingTaskManager( DEFAULT_SLOT_RESOURCE.multiply(2), 2), new PendingTaskManager( DEFAULT_SLOT_RESOURCE.multiply(2), 2))) .build(); requirements.add(ResourceRequirement.create(ResourceProfile.UNKNOWN, 2)); final ResourceAllocationResult result = EVENLY_STRATEGY.tryFulfillRequirements( Collections.singletonMap(jobId, requirements), taskManagerResourceInfoProvider, resourceID -> false); assertThat(result.getUnfulfillableJobs()).isEmpty(); assertThat(result.getPendingTaskManagersToAllocate()).isEmpty(); assertThat(result.getAllocationsOnPendingResources()).hasSize(1); }
@Override public void execute(SensorContext context) { analyse(context, Xoo.KEY, XooRulesDefinition.XOO_REPOSITORY); analyse(context, Xoo2.KEY, XooRulesDefinition.XOO2_REPOSITORY); }
@Test public void testRule() throws IOException { DefaultInputFile inputFile = new TestInputFileBuilder("foo", "src/Foo.xoo") .setLanguage(Xoo.KEY) .initMetadata("a\nb\nc\nd\ne\nf\ng\nh\ni\n") .build(); SensorContextTester context = SensorContextTester.create(temp.newFolder()); context.fileSystem().add(inputFile); sensor.execute(context); assertThat(context.allIssues()).hasSize(10); // One issue per line for (Issue issue : context.allIssues()) { assertThat(issue.isQuickFixAvailable()).isTrue(); } }
public Map<String, String> getTasksStates(String taskType) throws IOException, HttpException { HttpGet httpGet = createHttpGetRequest(MinionRequestURLBuilder.baseUrl(_controllerUrl).forTasksStates(taskType)); try (CloseableHttpResponse response = HTTP_CLIENT.execute(httpGet)) { int statusCode = response.getCode(); final String responseString = IOUtils.toString(response.getEntity().getContent()); if (statusCode >= 400) { throw new HttpException( String.format("Unable to get tasks states map. Error code %d, Error message: %s", statusCode, responseString)); } return JsonUtils.stringToObject(responseString, TYPEREF_MAP_STRING_STRING); } }
@Test public void testTasksStates() throws IOException, HttpException { HttpServer httpServer = startServer(14203, "/tasks/SegmentGenerationAndPushTask/taskstates", createHandler(200, "{\"Task_SegmentGenerationAndPushTask_1607470525615\":\"IN_PROGRESS\"}", 0)); MinionClient minionClient = new MinionClient("http://localhost:14203", null); Assert.assertEquals(minionClient.getTasksStates("SegmentGenerationAndPushTask") .get("Task_SegmentGenerationAndPushTask_1607470525615"), "IN_PROGRESS"); httpServer.stop(0); }
@Override public <K> HostToKeyMapper<K> getPartitionInformation(URI serviceUri, Collection<K> keys, int limitHostPerPartition, int hash) throws ServiceUnavailableException { if (limitHostPerPartition <= 0) { throw new IllegalArgumentException("limitHostPartition cannot be 0 or less"); } ServiceProperties service = listenToServiceAndCluster(serviceUri); String serviceName = service.getServiceName(); String clusterName = service.getClusterName(); ClusterProperties cluster = getClusterProperties(serviceName, clusterName); LoadBalancerStateItem<UriProperties> uriItem = getUriItem(serviceName, clusterName, cluster); UriProperties uris = uriItem.getProperty(); List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = _state.getStrategiesForService(serviceName, service.getPrioritizedSchemes()); Map<Integer, Integer> partitionWithoutEnoughHost = new HashMap<>(); if (! orderedStrategies.isEmpty()) { // get the partitionId -> keys mapping final PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName); int maxPartitionId = accessor.getMaxPartitionId(); List<K> unmappedKeys = new ArrayList<>(); Map<Integer, Set<K>> partitionSet = getPartitionSet(keys, accessor, unmappedKeys); // get the partitionId -> host URIs list Map<Integer, KeysAndHosts<K>> partitionDataMap = new HashMap<>(); for (Integer partitionId : partitionSet.keySet()) { for (LoadBalancerState.SchemeStrategyPair pair : orderedStrategies) { TrackerClientSubsetItem subsetItem = getPotentialClients(serviceName, service, cluster, uris, pair.getScheme(), partitionId, uriItem.getVersion()); Map<URI, TrackerClient> trackerClients = subsetItem.getWeightedSubset(); int size = Math.min(trackerClients.size(), limitHostPerPartition); List<URI> rankedUri = new ArrayList<>(size); Ring<URI> ring = pair.getStrategy().getRing(uriItem.getVersion(), partitionId, trackerClients, subsetItem.shouldForceUpdate()); Iterator<URI> iterator = ring.getIterator(hash); while (iterator.hasNext() && rankedUri.size() < size) { URI uri = iterator.next(); if (!rankedUri.contains(uri)) { rankedUri.add(uri); } } if (rankedUri.size() < limitHostPerPartition) { partitionWithoutEnoughHost.put(partitionId, limitHostPerPartition - rankedUri.size()); } KeysAndHosts<K> keysAndHosts = new KeysAndHosts<>(partitionSet.get(partitionId), rankedUri); partitionDataMap.put(partitionId, keysAndHosts); if (!rankedUri.isEmpty()) { // don't go to the next strategy if there are already hosts in the current one break; } } } return new HostToKeyMapper<>(unmappedKeys, partitionDataMap, limitHostPerPartition, maxPartitionId + 1, partitionWithoutEnoughHost); } else { throw new ServiceUnavailableException(serviceName, "PEGA_1009. Unable to find a load balancer strategy" + "Server Schemes: [" + String.join(", ", service.getPrioritizedSchemes()) + ']'); } }
@Test public void testGetPartitionInfoOrdering() throws Exception { String serviceName = "articles"; String clusterName = "cluster"; String path = "path"; String strategyName = "degrader"; // setup 3 partitions. Partition 1 and Partition 2 both have server1 - server3. Partition 3 only has server1. Map<URI,Map<Integer, PartitionData>> partitionDescriptions = new HashMap<>(); final URI server1 = new URI("http://foo1.com"); Map<Integer, PartitionData> server1Data = new HashMap<>(); server1Data.put(1, new PartitionData(1.0)); server1Data.put(2, new PartitionData(1.0)); server1Data.put(3, new PartitionData(1.0)); partitionDescriptions.put(server1, server1Data); final URI server2 = new URI("http://foo2.com"); Map<Integer, PartitionData> server2Data = new HashMap<>(); server2Data.put(1, new PartitionData(1.0)); server2Data.put(2, new PartitionData(1.0)); partitionDescriptions.put(server2, server2Data); final URI server3 = new URI("http://foo3.com"); Map<Integer, PartitionData> server3Data = new HashMap<>(); server3Data.put(1, new PartitionData(1.0)); server3Data.put(2, new PartitionData(1.0)); partitionDescriptions.put(server3, server3Data); //setup strategy which involves tweaking the hash ring to get partitionId -> URI host List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = new ArrayList<>(); LoadBalancerStrategy strategy = new TestLoadBalancerStrategy(partitionDescriptions); orderedStrategies.add(new LoadBalancerState.SchemeStrategyPair(PropertyKeys.HTTP_SCHEME, strategy)); //setup the partition accessor which can only map keys from 1 - 3. PartitionAccessor accessor = new TestPartitionAccessor(); URI serviceURI = new URI("d2://" + serviceName); SimpleLoadBalancer balancer = new SimpleLoadBalancer(new PartitionedLoadBalancerTestState( clusterName, serviceName, path, strategyName, partitionDescriptions, orderedStrategies, accessor ), _d2Executor); List<Integer> keys = new ArrayList<>(); keys.add(1); keys.add(2); keys.add(3); keys.add(123); HostToKeyMapper<Integer> result = balancer.getPartitionInformation(serviceURI, keys, 3, 123); Assert.assertEquals(result.getLimitHostPerPartition(), 3); Assert.assertEquals(1, result.getUnmappedKeys().size()); Assert.assertEquals(123, (int)result.getUnmappedKeys().iterator().next().getKey()); //partition 0 should be null Assert.assertNull(result.getPartitionInfoMap().get(0)); // results for partition 1 should contain server1, server2 and server3 KeysAndHosts<Integer> keysAndHosts1 = result.getPartitionInfoMap().get(1); assertEquals(keysAndHosts1.getKeys().size(), 1); assertEquals((int) keysAndHosts1.getKeys().iterator().next(), 1); List<URI> ordering1 = keysAndHosts1.getHosts(); // results for partition 2 should be the same as partition1. KeysAndHosts<Integer> keysAndHosts2 = result.getPartitionInfoMap().get(2); assertEquals(keysAndHosts2.getKeys().size(), 1); assertEquals((int) keysAndHosts2.getKeys().iterator().next(), 2); List<URI> ordering2 = keysAndHosts2.getHosts(); //for partition 3 KeysAndHosts<Integer> keysAndHosts3 = result.getPartitionInfoMap().get(3); assertEquals(keysAndHosts3.getKeys().size(), 1); assertEquals((int) keysAndHosts3.getKeys().iterator().next(), 3); List<URI> ordering3 = keysAndHosts3.getHosts(); // Just compare the size and contents of the list, not the ordering. assertEquals(ordering1.size(), 3); List<URI> allServers = new ArrayList<>(); allServers.add(server1); allServers.add(server2); allServers.add(server3); Assert.assertTrue(ordering1.containsAll(allServers)); Assert.assertTrue(ordering2.containsAll(allServers)); Assert.assertEquals(ordering1, ordering2); Assert.assertEquals(ordering3.get(0), server1); Assert.assertTrue(result.getPartitionsWithoutEnoughHosts().containsKey(3)); Assert.assertEquals((int)result.getPartitionsWithoutEnoughHosts().get(3), 2); }
@Override public ExportResult<MediaContainerResource> export( UUID jobId, TokensAndUrlAuthData authData, Optional<ExportInformation> exportInformation) throws UploadErrorException, FailedToListAlbumsException, InvalidTokenException, PermissionDeniedException, IOException, FailedToListMediaItemsException { if (!exportInformation.isPresent()) { // Make list of photos contained in albums so they are not exported twice later on populateContainedMediaList(jobId, authData); return exportAlbums(authData, Optional.empty(), jobId); } else if (exportInformation.get().getContainerResource() instanceof PhotosContainerResource) { // if ExportInformation is a photos container, this is a request to only export the contents // in that container instead of the whole user library return exportPhotosContainer( (PhotosContainerResource) exportInformation.get().getContainerResource(), authData, jobId); } else if (exportInformation.get().getContainerResource() instanceof MediaContainerResource) { // if ExportInformation is a media container, this is a request to only export the contents // in that container instead of the whole user library (this is to support backwards // compatibility with the GooglePhotosExporter) return exportMediaContainer( (MediaContainerResource) exportInformation.get().getContainerResource(), authData, jobId); } /* * Use the export information to determine whether this export call should export albums or * photos. * * Albums are exported if and only if the export information doesn't hold an album * already, and the pagination token begins with the album prefix. There must be a pagination * token for album export since this is isn't the first export operation performed (if it was, * there wouldn't be any export information at all). * * Otherwise, photos are exported. If photos are exported, there may or may not be pagination * information, and there may or may not be album information. If there is no container * resource, that means that we're exporting albumless photos and a pagination token must be * present. The beginning step of exporting albumless photos is indicated by a pagination token * containing only MEDIA_TOKEN_PREFIX with no token attached, in order to differentiate this * case for the first step of export (no export information at all). */ StringPaginationToken paginationToken = (StringPaginationToken) exportInformation.get().getPaginationData(); IdOnlyContainerResource idOnlyContainerResource = (IdOnlyContainerResource) exportInformation.get().getContainerResource(); boolean containerResourcePresent = idOnlyContainerResource != null; boolean paginationDataPresent = paginationToken != null; if (!containerResourcePresent && paginationDataPresent && paginationToken.getToken().startsWith(ALBUM_TOKEN_PREFIX)) { // were still listing out all of the albums since we have pagination data return exportAlbums(authData, Optional.of(paginationToken), jobId); } else { return exportMedia( authData, Optional.ofNullable(idOnlyContainerResource), Optional.ofNullable(paginationToken), jobId); } }
@Test public void testExportPhotosContainer_photosRetrying() throws IOException, InvalidTokenException, PermissionDeniedException, UploadErrorException, FailedToListAlbumsException, FailedToListMediaItemsException { String photoIdToFail1 = "photo3"; String photoIdToFail2 = "photo5"; ImmutableList<PhotoAlbum> albums = ImmutableList.of(); ImmutableList<PhotoModel> photos = ImmutableList.of( setUpSinglePhotoModel("", "photo1"), setUpSinglePhotoModel("", "photo2"), setUpSinglePhotoModel("", photoIdToFail1), setUpSinglePhotoModel("", "photo4"), setUpSinglePhotoModel("", photoIdToFail2), setUpSinglePhotoModel("", "photo6") ); PhotosContainerResource container = new PhotosContainerResource(albums, photos); ExportInformation exportInfo = new ExportInformation(null, container); MediaMetadata photoMediaMetadata = new MediaMetadata(); photoMediaMetadata.setPhoto(new Photo()); // For the photo_id_to_fail photos, throw an exception. when(photosInterface.getMediaItem(photoIdToFail1)).thenThrow(IOException.class); when(photosInterface.getMediaItem(photoIdToFail2)).thenThrow(IOException.class); // For all other photos, return a media item. for (PhotoModel photoModel: photos) { if (photoModel.getDataId().equals(photoIdToFail1) || photoModel.getDataId().equals(photoIdToFail2)) { continue; } when(photosInterface.getMediaItem(photoModel.getDataId())).thenReturn( setUpSingleMediaItem(photoModel.getDataId(), photoModel.getDataId(), photoMediaMetadata) ); } ExportResult<MediaContainerResource> result = retryingGoogleMediaExporter.export( uuid, authData, Optional.of(exportInfo) ); assertThat( result.getExportedData().getPhotos().stream().map(x -> x.getDataId()).collect(Collectors.toList()) ).isEqualTo( photos.stream().map( x -> x.getDataId() ).filter( dataId -> !(dataId.equals(photoIdToFail1) || dataId.equals(photoIdToFail2)) ).collect( Collectors.toList() ) ); assertThat(result.getExportedData().getPhotos().size()).isEqualTo(photos.size() - 2); assertThat(retryingExecutor.getErrors().size()).isEqualTo(2); assertThat(retryingExecutor.getErrors().stream().findFirst().toString().contains("IOException")).isTrue(); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatInnerJoin() { final Join join = new Join(leftAlias, ImmutableList.of(new JoinedSource( Optional.empty(), rightAlias, JoinedSource.Type.INNER, criteria, Optional.of(new WithinExpression(10, TimeUnit.SECONDS))))); final String expected = "`left` L\nINNER JOIN `right` R WITHIN 10 SECONDS ON " + "(('left.col0' = 'right.col0'))"; assertEquals(expected, SqlFormatter.formatSql(join)); }
@Override public void onChange(List<JobRunrMetadata> metadataList) { if (this.serversWithPollIntervalInSecondsTimeBoxTooSmallMetadataList == null || this.serversWithPollIntervalInSecondsTimeBoxTooSmallMetadataList.size() != metadataList.size()) { problems.removeProblemsOfType(PollIntervalInSecondsTimeBoxIsTooSmallProblem.PROBLEM_TYPE); if (!metadataList.isEmpty() && !problems.containsProblemOfType(CpuAllocationIrregularityProblem.PROBLEM_TYPE)) { problems.addProblem(new PollIntervalInSecondsTimeBoxIsTooSmallProblem(metadataList)); } this.serversWithPollIntervalInSecondsTimeBoxTooSmallMetadataList = metadataList; } }
@Test void ifNoChangesOnPollIntervalInSecondsTimeBoxIsTooSmallThenNoProblemsCreated() { pollIntervalInSecondsTimeBoxIsTooSmallProblemHandler.onChange(emptyList()); verifyNoInteractions(problems); }
public static <K, E> Collector<E, ImmutableListMultimap.Builder<K, E>, ImmutableListMultimap<K, E>> index(Function<? super E, K> keyFunction) { return index(keyFunction, Function.identity()); }
@Test public void index_empty_stream_returns_empty_map() { assertThat(Stream.<MyObj>empty().collect(index(MyObj::getId)).size()).isZero(); assertThat(Stream.<MyObj>empty().collect(index(MyObj::getId, MyObj::getText)).size()).isZero(); }
public static Set<HostInfo> getRemoteHosts( final List<PersistentQueryMetadata> currentQueries, final KsqlHostInfo localHost ) { return currentQueries.stream() // required filter else QueryMetadata.getAllMetadata() throws .filter(q -> q.getState().isRunningOrRebalancing()) .map(QueryMetadata::getAllStreamsHostMetadata) .filter(Objects::nonNull) .flatMap(Collection::stream) .map(StreamsMetadata::hostInfo) .filter(hostInfo -> !(hostInfo.host().equals(localHost.host()) && hostInfo.port() == (localHost.port()))) .collect(Collectors.toSet()); }
@Test public void shouldFilterQueryMetadataByState() { // When: final Set<HostInfo> info = DiscoverRemoteHostsUtil.getRemoteHosts( ImmutableList.of(runningQuery, notRunningQuery), THIS_HOST_INFO ); // Then: assertThat(info, contains(OTHER_HOST_INFO)); verify(notRunningQuery, never()).getAllStreamsHostMetadata(); }
public static RecordBatchingStateRestoreCallback adapt(final StateRestoreCallback restoreCallback) { Objects.requireNonNull(restoreCallback, "stateRestoreCallback must not be null"); if (restoreCallback instanceof RecordBatchingStateRestoreCallback) { return (RecordBatchingStateRestoreCallback) restoreCallback; } else if (restoreCallback instanceof BatchingStateRestoreCallback) { return records -> { final List<KeyValue<byte[], byte[]>> keyValues = new ArrayList<>(); for (final ConsumerRecord<byte[], byte[]> record : records) { keyValues.add(new KeyValue<>(record.key(), record.value())); } ((BatchingStateRestoreCallback) restoreCallback).restoreAll(keyValues); }; } else { return records -> { for (final ConsumerRecord<byte[], byte[]> record : records) { restoreCallback.restore(record.key(), record.value()); } }; } }
@Test public void shouldThrowOnRestoreAll() { assertThrows(UnsupportedOperationException.class, () -> adapt(mock(StateRestoreCallback.class)).restoreAll(null)); }
public void go(boolean shouldLoop, AgentBootstrapperArgs bootstrapperArgs) { loop = shouldLoop; launcherThread = Thread.currentThread(); validate(); cleanupTempFiles(); int returnValue = 0; DefaultAgentLaunchDescriptorImpl descriptor = new DefaultAgentLaunchDescriptorImpl(bootstrapperArgs, this); do { ClassLoader tccl = launcherThread.getContextClassLoader(); try (AgentLauncherCreator agentLauncherCreator = getLauncherCreator()) { AgentLauncher launcher = agentLauncherCreator.createLauncher(); LOG.info("Attempting create and start launcher..."); setContextClassLoader(launcher.getClass().getClassLoader()); returnValue = launcher.launch(descriptor); resetContextClassLoader(tccl); LOG.info("Launcher returned with code {}(0x{})", returnValue, Integer.toHexString(returnValue).toUpperCase()); if (returnValue == AgentLauncher.IRRECOVERABLE_ERROR) { loop = false; } } catch (Exception e) { LOG.error("Error starting launcher", e); } finally { resetContextClassLoader(tccl); forceGCToPreventOOM(); } // Immediately restart if launcher isn't up to date. if (returnValue != AgentLauncher.NOT_UP_TO_DATE) { waitForRelaunchTime(); } } while (loop); LOG.info("Agent Bootstrapper stopped"); jvmExit(returnValue); }
@Test @Timeout(10) public void shouldNotRelaunchAgentLauncherWhenItReturnsAnIrrecoverableCode() { final boolean[] destroyCalled = new boolean[1]; final AgentBootstrapper bootstrapper = new AgentBootstrapper(){ @Override AgentLauncherCreator getLauncherCreator() { return new AgentLauncherCreator() { @Override public AgentLauncher createLauncher() { return descriptor -> AgentLauncher.IRRECOVERABLE_ERROR; } @Override public void close() { destroyCalled[0] = true; } }; } }; final AgentBootstrapper spyBootstrapper = stubJVMExit(bootstrapper); try { spyBootstrapper.go(true, new AgentBootstrapperArgs().setServerUrl(new URL("http://" + "ghost-name" + ":" + 3518 + "/go")).setRootCertFile(null).setSslVerificationMode(AgentBootstrapperArgs.SslMode.NONE)); } catch (Exception e) { fail("should not have propagated exception thrown while invoking the launcher"); } assertThat(destroyCalled[0], is(true)); }
@VisibleForTesting void handleResponse(DiscoveryResponseData response) { ResourceType resourceType = response.getResourceType(); switch (resourceType) { case NODE: handleD2NodeResponse(response); break; case D2_URI_MAP: handleD2URIMapResponse(response); break; case D2_URI: handleD2URICollectionResponse(response); break; default: throw new AssertionError("Missing case in enum switch: " + resourceType); } }
@Test public void testHandleD2NodeUpdateWithEmptyResponse() { XdsClientImplFixture fixture = new XdsClientImplFixture(); fixture._xdsClientImpl.handleResponse(DISCOVERY_RESPONSE_WITH_EMPTY_NODE_RESPONSE); fixture.verifyAckSent(1); }
public void execute() { if (report.rules != null) { importNewFormat(); } else { importDeprecatedFormat(); } }
@Test public void execute_whenNewFormatWithZeroIssues() { ExternalIssueReport report = new ExternalIssueReport(); ExternalIssueReport.Rule rule = createRule(); report.issues = new ExternalIssueReport.Issue[0]; report.rules = new ExternalIssueReport.Rule[]{rule}; ExternalIssueImporter underTest = new ExternalIssueImporter(this.context, report); underTest.execute(); assertThat(context.allExternalIssues()).isEmpty(); assertThat(context.allIssues()).isEmpty(); assertThat(logs.logs(Level.INFO)).contains("Imported 0 issues in 0 files"); }
public void updateToRemovedBlock(boolean add, long blockId) { if (add) { if (mBlocks.contains(blockId)) { mToRemoveBlocks.add(blockId); } } else { mToRemoveBlocks.remove(blockId); } }
@Test public void updateToRemovedBlock() { // remove a non-existing block mInfo.updateToRemovedBlock(true, 10L); assertTrue(mInfo.getToRemoveBlocks().isEmpty()); // remove block 1 mInfo.updateToRemovedBlock(true, 1L); assertTrue(mInfo.getToRemoveBlocks().contains(1L)); // cancel the removal mInfo.updateToRemovedBlock(false, 1L); assertTrue(mInfo.getToRemoveBlocks().isEmpty()); // actually remove 1 for real mInfo.updateToRemovedBlock(true, 1L); mInfo.removeBlockFromWorkerMeta(1L); assertTrue(mInfo.getToRemoveBlocks().isEmpty()); }
List<StatisticsEntry> takeStatistics() { if (reporterEnabled) throw new IllegalStateException("Cannot take consistent snapshot while reporter is enabled"); var ret = new ArrayList<StatisticsEntry>(); consume((metric, value) -> ret.add(new StatisticsEntry(metric, value))); return ret; }
@Test void retrieving_statistics_resets_the_counters() { testRequest("http", 200, "GET"); testRequest("http", 200, "GET"); var stats = collector.takeStatistics(); assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 200, 2L); testRequest("http", 200, "GET"); stats = collector.takeStatistics(); assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 200, 1L); }
@Override public String getAuthorizeUrl(Integer socialType, Integer userType, String redirectUri) { // 获得对应的 AuthRequest 实现 AuthRequest authRequest = buildAuthRequest(socialType, userType); // 生成跳转地址 String authorizeUri = authRequest.authorize(AuthStateUtils.createState()); return HttpUtils.replaceUrlQuery(authorizeUri, "redirect_uri", redirectUri); }
@Test public void testGetAuthorizeUrl() { try (MockedStatic<AuthStateUtils> authStateUtilsMock = mockStatic(AuthStateUtils.class)) { // 准备参数 Integer socialType = SocialTypeEnum.WECHAT_MP.getType(); Integer userType = randomPojo(UserTypeEnum.class).getValue(); String redirectUri = "sss"; // mock 获得对应的 AuthRequest 实现 AuthRequest authRequest = mock(AuthRequest.class); when(authRequestFactory.get(eq("WECHAT_MP"))).thenReturn(authRequest); // mock 方法 authStateUtilsMock.when(AuthStateUtils::createState).thenReturn("aoteman"); when(authRequest.authorize(eq("aoteman"))).thenReturn("https://www.iocoder.cn?redirect_uri=yyy"); // 调用 String url = socialClientService.getAuthorizeUrl(socialType, userType, redirectUri); // 断言 assertEquals("https://www.iocoder.cn?redirect_uri=sss", url); } }
@Override public Optional<Object> invoke(Function<InvokerContext, Object> invokeFunc, Function<Throwable, Object> exFunc, String serviceName) { return invoke(invokeFunc, exFunc, serviceName, getRetry(null)); }
@Test public void invokeWithNoInstances() { Object exResult = new Object(); final Function<InvokerContext, Object> invokerFunc = invokerContext -> null; final Function<Throwable, Object> exFunc = ex -> exResult; Optional<Object> invoke = retryService.invoke(invokerFunc, exFunc, serviceName); Assert.assertFalse(invoke.isPresent()); }
public static long[] colSums(int[][] matrix) { long[] x = new long[matrix[0].length]; for (int[] row : matrix) { for (int j = 0; j < x.length; j++) { x[j] += row[j]; } } return x; }
@Test public void testColSums() { System.out.println("colSums"); double[][] A = { {0.7220180, 0.07121225, 0.6881997}, {-0.2648886, -0.89044952, 0.3700456}, {-0.6391588, 0.44947578, 0.6240573} }; double[] r = {-0.1820294, -0.3697615, 1.6823026}; double[] result = MathEx.colSums(A); for (int i = 0; i < r.length; i++) { assertEquals(result[i], r[i], 1E-7); } }
@Override public Dimension render(Graphics2D graphics) { Font originalFont = null; if (font != null) { originalFont = graphics.getFont(); graphics.setFont(font); } final FontMetrics fontMetrics = graphics.getFontMetrics(); Matcher matcher = COL_TAG_PATTERN.matcher(text); Color textColor = color; int idx = 0; int width = 0; while (matcher.find()) { String color = matcher.group(1); String s = text.substring(idx, matcher.start()); idx = matcher.end(); renderText(graphics, textColor, position.x + width, position.y, s); width += fontMetrics.stringWidth(s); textColor = Color.decode("#" + color); } { String s = text.substring(idx); renderText(graphics, textColor, position.x + width, position.y, s); width += fontMetrics.stringWidth(s); } int height = fontMetrics.getHeight(); if (originalFont != null) { graphics.setFont(originalFont); } return new Dimension(width, height); }
@Test public void testRender2() { TextComponent textComponent = new TextComponent(); textComponent.setText("<col=0000ff>test"); textComponent.render(graphics); verify(graphics, times(2)).drawString(eq("test"), anyInt(), anyInt()); verify(graphics).setColor(Color.BLUE); }
public static void registerApplicationNodeInCollectServer(String applicationName, URL collectServerUrl, URL applicationNodeUrl) { if (collectServerUrl == null || applicationNodeUrl == null) { throw new IllegalArgumentException( "collectServerUrl and applicationNodeUrl must not be null"); } final String appName; if (applicationName == null) { appName = Parameters.getCurrentApplication(); } else { appName = applicationName; } final URL registerUrl; try { registerUrl = new URL(collectServerUrl.toExternalForm() + "?appName=" + URLEncoder.encode(appName, StandardCharsets.UTF_8) + "&appUrls=" // "UTF-8" as said in javadoc + URLEncoder.encode(applicationNodeUrl.toExternalForm(), StandardCharsets.UTF_8) + "&action=registerNode"); unregisterApplicationNodeInCollectServerUrl = new URL( registerUrl.toExternalForm().replace("registerNode", "unregisterNode")); } catch (final IOException e) { // can't happen if urls are ok throw new IllegalArgumentException(e); } // this is an asynchronous call because if this method is called when the webapp is starting, // the webapp can not respond to the collect server for the first collect of data final Thread thread = new Thread("javamelody registerApplicationNodeInCollectServer") { @Override public void run() { try { Thread.sleep(10000); } catch (final InterruptedException e) { throw new IllegalStateException(e); } try { new LabradorRetriever(registerUrl).post(null); LOG.info("application node added to the collect server"); } catch (final IOException e) { LOG.warn("Unable to register application's node in the collect server ( " + e + ')', e); } } }; thread.setDaemon(true); thread.start(); }
@Test public void testRegisterApplicationNodeInCollectServer() throws MalformedURLException { MonitoringFilter.registerApplicationNodeInCollectServer(null, new URL("http://localhost:8080"), new URL("http://localhost:8081")); MonitoringFilter.registerApplicationNodeInCollectServer("test", new URL("http://localhost:8080"), new URL("http://localhost:8081")); try { MonitoringFilter.registerApplicationNodeInCollectServer(null, null, new URL("http://localhost:8081")); } catch (final IllegalArgumentException e) { assertNotNull("e", e); } try { MonitoringFilter.registerApplicationNodeInCollectServer(null, new URL("http://localhost:8080"), null); } catch (final IllegalArgumentException e) { assertNotNull("e", e); } }
public void registerBot( String botPath, Function<Update, BotApiMethod<?>> updateHandler, Runnable setWebhook, Runnable deleteWebhook ) throws TelegramApiException { registerBot(DefaultTelegramWebhookBot .builder() .botPath(botPath) .updateHandler(updateHandler) .setWebhook(setWebhook) .deleteWebhook(deleteWebhook) .build()); }
@Test public void testWhenUpdateIsReceivedOnWebhookUpdateReceivedIsCalledOnCorrectBot() throws TelegramApiException, IOException { application.registerBot(telegramWebhookBot); TestTelegramWebhookBot telegramWebhookBot2 = new TestTelegramWebhookBot("/test2"); application.registerBot(telegramWebhookBot2); Request request = new Request.Builder() .url("http://127.0.0.1:" + webhookOptions.getPort() + "/test") .headers(Headers.of(headers)) .post(RequestBody.create(objectMapper.writeValueAsString(update), MediaType.parse("application/json"))) .build(); httpClient.newCall(request).execute(); assertNotNull(telegramWebhookBot.updateReceived); assertEquals(update.getUpdateId(), telegramWebhookBot.updateReceived.getUpdateId()); assertNull(telegramWebhookBot2.updateReceived); }
public static <T> T autobox(Object value, Class<T> type) { return Autoboxer.autobox(value, type); }
@Test void testAutoboxClob() throws SQLException { Clob clob = Mockito.mock(Clob.class); String result = "the result"; Mockito.when(clob.length()).thenReturn((long) result.length()); Mockito.when(clob.getSubString(1, result.length())).thenReturn(result); assertThat(ReflectionUtils.autobox(clob, String.class)).isEqualTo(result); }
@Override public void removeSelector(final SelectorData selectorData) { UpstreamCacheManager.getInstance().removeByKey(selectorData.getId()); CACHED_HANDLE.get().removeHandle(CacheKeyUtils.INST.getKey(selectorData.getId(), Constants.DEFAULT_RULE)); }
@Test public void testRemoveSelector() throws NoSuchFieldException, IllegalAccessException { UpstreamCacheManager instance = UpstreamCacheManager.getInstance(); instance.submit("1", upstreamList); Field field = instance.getClass().getDeclaredField("UPSTREAM_MAP"); field.setAccessible(true); Map<String, List<Upstream>> map = (Map<String, List<Upstream>>) field.get(instance); Assertions.assertNotEquals(map.get("1"), null); webSocketPluginDataHandler.removeSelector(selectorData); Assertions.assertNull(map.get("1")); }
@Override public String getName() { return name; }
@Test void shouldReturnRightName() { assertEquals("fake-controller", controller.getName()); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "/{executionId}/file/preview") @Operation(tags = {"Executions"}, summary = "Get file preview for an execution") public HttpResponse<?> filePreview( @Parameter(description = "The execution id") @PathVariable String executionId, @Parameter(description = "The internal storage uri") @QueryValue URI path, @Parameter(description = "The max row returns") @QueryValue @Nullable Integer maxRows, @Parameter(description = "The file encoding as Java charset name. Defaults to UTF-8", example = "ISO-8859-1") @QueryValue(defaultValue = "UTF-8") String encoding ) throws IOException { this.validateFile(executionId, path, "/api/v1/executions/{executionId}/file?path=" + path); String extension = FilenameUtils.getExtension(path.toString()); Optional<Charset> charset; try { charset = Optional.ofNullable(encoding).map(Charset::forName); } catch (IllegalCharsetNameException | UnsupportedCharsetException e) { throw new IllegalArgumentException("Unable to preview using encoding '" + encoding + "'"); } try (InputStream fileStream = storageInterface.get(tenantService.resolveTenant(), path)) { FileRender fileRender = FileRenderBuilder.of( extension, fileStream, charset, maxRows == null ? this.initialPreviewRows : (maxRows > this.maxPreviewRows ? this.maxPreviewRows : maxRows) ); return HttpResponse.ok(fileRender); } }
@Test void filePreview() throws TimeoutException { Execution defaultExecution = runnerUtils.runOne(null, TESTS_FLOW_NS, "inputs", null, (flow, execution1) -> flowIO.typedInputs(flow, execution1, inputs)); assertThat(defaultExecution.getTaskRunList(), hasSize(13)); String defaultPath = (String) defaultExecution.getInputs().get("file"); String defaultFile = client.toBlocking().retrieve( GET("/api/v1/executions/" + defaultExecution.getId() + "/file/preview?path=" + defaultPath), String.class ); assertThat(defaultFile, containsString("hello")); Map<String, Object> latin1FileInputs = ImmutableMap.<String, Object>builder() .put("failed", "NO") .put("string", "myString") .put("enum", "ENUM_VALUE") .put("int", "42") .put("float", "42.42") .put("instant", "2019-10-06T18:27:49Z") .put("file", Objects.requireNonNull(ExecutionControllerTest.class.getClassLoader().getResource("data/iso88591.txt")).getPath()) .put("secret", "secret") .put("array", "[1, 2, 3]") .put("json", "{}") .build(); Execution latin1Execution = runnerUtils.runOne(null, TESTS_FLOW_NS, "inputs", null, (flow, execution1) -> flowIO.typedInputs(flow, execution1, latin1FileInputs)); assertThat(latin1Execution.getTaskRunList(), hasSize(13)); String latin1Path = (String) latin1Execution.getInputs().get("file"); String latin1File = client.toBlocking().retrieve( GET("/api/v1/executions/" + latin1Execution.getId() + "/file/preview?path=" + latin1Path + "&encoding=ISO-8859-1"), String.class ); assertThat(latin1File, containsString("Düsseldorf")); HttpClientResponseException e = assertThrows(HttpClientResponseException.class, () -> client.toBlocking().retrieve( GET("/api/v1/executions/" + latin1Execution.getId() + "/file/preview?path=" + latin1Path + "&encoding=foo"), String.class )); assertThat(e.getStatus(), is(HttpStatus.UNPROCESSABLE_ENTITY)); assertThat(e.getMessage(), containsString("using encoding 'foo'")); }
@Override public boolean othersDeletesAreVisible(final int type) { return false; }
@Test void assertOthersDeletesAreVisible() { assertFalse(metaData.othersDeletesAreVisible(0)); }
public static int[] clone(int[] array) { int[] clone = new int[array.length]; System.arraycopy(array, 0, clone, 0, array.length); return clone; }
@Test public void testClone() { assertArrayEquals(new int[]{3, 2, 1}, Replicas.clone(new int[]{3, 2, 1})); assertArrayEquals(new int[]{}, Replicas.clone(new int[]{})); assertArrayEquals(new int[]{2}, Replicas.clone(new int[]{2})); }
@Override public void writeCharacters(char[] text, int start, int len) throws XMLStreamException { nonXmlCharFilterer.filter(text, start, len); writer.writeCharacters(text, start, len); }
@Test public void testWriteCharacters3Args() throws XMLStreamException { char[] buffer = new char[] { 'a', 'b', 'c' }; filteringXmlStreamWriter.writeCharacters(buffer, 2, 3); verify(xmlStreamWriterMock).writeCharacters(same(buffer), eq(2), eq(3)); }
public static Read read() { // 1000 for batch size is good enough in many cases, // ex: if document size is large, around 10KB, the response's size will be around 10MB // if document seize is small, around 1KB, the response's size will be around 1MB return new AutoValue_SolrIO_Read.Builder().setBatchSize(1000).setQuery("*:*").build(); }
@Test public void testRead() throws Exception { SolrIOTestUtils.insertTestDocuments(SOLR_COLLECTION, NUM_DOCS, solrClient); PCollection<SolrDocument> output = pipeline.apply( SolrIO.read() .withConnectionConfiguration(connectionConfiguration) .from(SOLR_COLLECTION) .withBatchSize(101)); PAssert.thatSingleton(output.apply("Count", Count.globally())).isEqualTo(NUM_DOCS); pipeline.run(); }
@Override public Result invoke(Invoker<?> invoker, Invocation inv) throws RpcException { if ((inv.getMethodName().equals($INVOKE) || inv.getMethodName().equals($INVOKE_ASYNC)) && inv.getArguments() != null && inv.getArguments().length == 3 && !GenericService.class.isAssignableFrom(invoker.getInterface())) { String name = ((String) inv.getArguments()[0]).trim(); String[] types = (String[]) inv.getArguments()[1]; Object[] args = (Object[]) inv.getArguments()[2]; try { Method method = findMethodByMethodSignature(invoker.getInterface(), name, types, inv.getServiceModel()); Class<?>[] params = method.getParameterTypes(); if (args == null) { args = new Object[params.length]; } if (types == null) { types = new String[params.length]; } if (args.length != types.length) { throw new RpcException( "GenericFilter#invoke args.length != types.length, please check your " + "params"); } String generic = inv.getAttachment(GENERIC_KEY); if (StringUtils.isBlank(generic)) { generic = getGenericValueFromRpcContext(); } if (StringUtils.isEmpty(generic) || ProtocolUtils.isDefaultGenericSerialization(generic) || ProtocolUtils.isGenericReturnRawResult(generic)) { try { args = PojoUtils.realize(args, params, method.getGenericParameterTypes()); } catch (Exception e) { logger.error( LoggerCodeConstants.PROTOCOL_ERROR_DESERIALIZE, "", "", "Deserialize generic invocation failed. ServiceKey: " + inv.getTargetServiceUniqueName(), e); throw new RpcException(e); } } else if (ProtocolUtils.isGsonGenericSerialization(generic)) { args = getGsonGenericArgs(args, method.getGenericParameterTypes()); } else if (ProtocolUtils.isJavaGenericSerialization(generic)) { Configuration configuration = ApplicationModel.ofNullable(applicationModel) .modelEnvironment() .getConfiguration(); if (!configuration.getBoolean(CommonConstants.ENABLE_NATIVE_JAVA_GENERIC_SERIALIZE, false)) { String notice = "Trigger the safety barrier! " + "Native Java Serializer is not allowed by default." + "This means currently maybe being attacking by others. " + "If you are sure this is a mistake, " + "please set `" + CommonConstants.ENABLE_NATIVE_JAVA_GENERIC_SERIALIZE + "` enable in configuration! " + "Before doing so, please make sure you have configure JEP290 to prevent serialization attack."; logger.error(CONFIG_FILTER_VALIDATION_EXCEPTION, "", "", notice); throw new RpcException(new IllegalStateException(notice)); } for (int i = 0; i < args.length; i++) { if (byte[].class == args[i].getClass()) { try (UnsafeByteArrayInputStream is = new UnsafeByteArrayInputStream((byte[]) args[i])) { args[i] = applicationModel .getExtensionLoader(Serialization.class) .getExtension(GENERIC_SERIALIZATION_NATIVE_JAVA) .deserialize(null, is) .readObject(); } catch (Exception e) { throw new RpcException("Deserialize argument [" + (i + 1) + "] failed.", e); } } else { throw new RpcException("Generic serialization [" + GENERIC_SERIALIZATION_NATIVE_JAVA + "] only support message type " + byte[].class + " and your message type is " + args[i].getClass()); } } } else if (ProtocolUtils.isBeanGenericSerialization(generic)) { for (int i = 0; i < args.length; i++) { if (args[i] != null) { if (args[i] instanceof JavaBeanDescriptor) { args[i] = JavaBeanSerializeUtil.deserialize((JavaBeanDescriptor) args[i]); } else { throw new RpcException("Generic serialization [" + GENERIC_SERIALIZATION_BEAN + "] only support message type " + JavaBeanDescriptor.class.getName() + " and your message type is " + args[i].getClass().getName()); } } } } else if (ProtocolUtils.isProtobufGenericSerialization(generic)) { // as proto3 only accept one protobuf parameter if (args.length == 1 && args[0] instanceof String) { try (UnsafeByteArrayInputStream is = new UnsafeByteArrayInputStream(((String) args[0]).getBytes())) { args[0] = applicationModel .getExtensionLoader(Serialization.class) .getExtension(GENERIC_SERIALIZATION_PROTOBUF) .deserialize(null, is) .readObject(method.getParameterTypes()[0]); } catch (Exception e) { throw new RpcException("Deserialize argument failed.", e); } } else { throw new RpcException("Generic serialization [" + GENERIC_SERIALIZATION_PROTOBUF + "] only support one " + String.class.getName() + " argument and your message size is " + args.length + " and type is" + args[0].getClass().getName()); } } RpcInvocation rpcInvocation = new RpcInvocation( inv.getTargetServiceUniqueName(), invoker.getUrl().getServiceModel(), method.getName(), invoker.getInterface().getName(), invoker.getUrl().getProtocolServiceKey(), method.getParameterTypes(), args, inv.getObjectAttachments(), inv.getInvoker(), inv.getAttributes(), inv instanceof RpcInvocation ? ((RpcInvocation) inv).getInvokeMode() : null); return invoker.invoke(rpcInvocation); } catch (NoSuchMethodException | ClassNotFoundException e) { throw new RpcException(e.getMessage(), e); } } return invoker.invoke(inv); }
@Test void testInvokeWithJavaException() throws Exception { // temporary enable native java generic serialize System.setProperty(ENABLE_NATIVE_JAVA_GENERIC_SERIALIZE, "true"); Assertions.assertThrows(RpcException.class, () -> { Method genericInvoke = GenericService.class.getMethods()[0]; Map<String, Object> person = new HashMap<String, Object>(); person.put("name", "dubbo"); person.put("age", 10); RpcInvocation invocation = new RpcInvocation( $INVOKE, GenericService.class.getName(), "", genericInvoke.getParameterTypes(), new Object[] { "getPerson", new String[] {Person.class.getCanonicalName()}, new Object[] {person} }); invocation.setAttachment(GENERIC_KEY, GENERIC_SERIALIZATION_NATIVE_JAVA); URL url = URL.valueOf("test://test:11/org.apache.dubbo.rpc.support.DemoService?" + "accesslog=true&group=dubbo&version=1.1"); Invoker invoker = Mockito.mock(Invoker.class); when(invoker.invoke(any(Invocation.class))).thenReturn(new AppResponse(new Person("person", 10))); when(invoker.getUrl()).thenReturn(url); when(invoker.getInterface()).thenReturn(DemoService.class); genericFilter.invoke(invoker, invocation); }); System.clearProperty(ENABLE_NATIVE_JAVA_GENERIC_SERIALIZE); }
protected String messageToString(Message message) { switch (message.getMessageType()) { case SYSTEM: return message.getContent(); case USER: return humanPrompt + message.getContent(); case ASSISTANT: return assistantPrompt + message.getContent(); case TOOL: throw new IllegalArgumentException(TOOL_EXECUTION_NOT_SUPPORTED_FOR_WAI_MODELS); } throw new IllegalArgumentException("Unknown message type: " + message.getMessageType()); }
@Test public void testSingleAssistantMessage() { Message assistantMessage = new AssistantMessage("Assistant message"); String expected = "Assistant message"; Assert.assertEquals(expected, converter.messageToString(assistantMessage)); }
public static boolean isEnumCanonicalName(String className) { return Enum.class.getCanonicalName().equals(className); }
@Test public void isEnumCanonicalName() { assertThat(ScenarioSimulationSharedUtils.isEnumCanonicalName(Enum.class.getCanonicalName())).isTrue(); assertThat(ScenarioSimulationSharedUtils.isEnumCanonicalName(Enum.class.getSimpleName())).isFalse(); }
@Override public String getName() { return FUNCTION_NAME; }
@Test public void testPowerNullColumn() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("power(%s,%s)", INT_SV_NULL_COLUMN, 0)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof PowerTransformFunction); Assert.assertEquals(transformFunction.getName(), TransformFunctionType.POWER.getName()); double[] expectedValues = new double[NUM_ROWS]; RoaringBitmap roaringBitmap = new RoaringBitmap(); for (int i = 0; i < NUM_ROWS; i++) { if (isNullRow(i)) { roaringBitmap.add(i); } else { expectedValues[i] = 1; } } testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap); }
public <T extends VFSConnectionDetails> boolean test( @NonNull ConnectionManager manager, @NonNull T details, @Nullable VFSConnectionTestOptions options ) throws KettleException { if ( options == null ) { options = new VFSConnectionTestOptions(); } // The specified connection details may not exist saved in the meta-store, // but still needs to have a non-empty name in it, to be able to form a temporary PVFS URI. if ( StringUtils.isEmpty( details.getName() ) ) { return false; } VFSConnectionProvider<T> provider = getExistingProvider( manager, details ); if ( !provider.test( details ) ) { return false; } if ( !details.isRootPathSupported() || options.isRootPathIgnored() ) { return true; } String resolvedRootPath; try { resolvedRootPath = getResolvedRootPath( details ); } catch ( KettleException e ) { // Invalid root path. return false; } if ( resolvedRootPath == null ) { return !details.isRootPathRequired(); } // Ensure that root path exists and is a folder. return isFolder( getConnectionRootProviderFileObject( manager, provider, details ) ); }
@Test public void testTestReturnsFalseWhenRootPathFileDoesNotExist() throws Exception { when( connectionRootProviderFileObject.exists() ).thenReturn( false ); assertFalse( vfsConnectionManagerHelper.test( connectionManager, vfsConnectionDetails, getTestOptionsCheckRootPath() ) ); }
@Override public String toString() { StringBuilder sb = new StringBuilder("{"); addField(sb, "\"edition\": ", this.edition, true); endString(sb); return sb.toString(); }
@Test void toStringHasEdition() { LicenseNewValue newValue = new LicenseNewValue("Developer"); assertThat(newValue.toString()).contains("edition"); }
public static List<FieldInfo> buildSourceSchemaEntity(final LogicalSchema schema) { final List<FieldInfo> allFields = schema.columns().stream() .map(EntityUtil::toFieldInfo) .collect(Collectors.toList()); if (allFields.isEmpty()) { throw new IllegalArgumentException("Root schema should contain columns: " + schema); } return allFields; }
@Test public void shouldBuildCorrectMapField() { // Given: final LogicalSchema schema = LogicalSchema.builder() .valueColumn(ColumnName.of("field"), SqlTypes.map(SqlTypes.BIGINT, SqlTypes.INTEGER)) .build(); // When: final List<FieldInfo> fields = EntityUtil.buildSourceSchemaEntity(schema); // Then: assertThat(fields, hasSize(1)); assertThat(fields.get(0).getName(), equalTo("field")); assertThat(fields.get(0).getSchema().getTypeName(), equalTo("MAP")); assertThat(fields.get(0).getSchema().getFields(), equalTo(Optional.empty())); assertThat(fields.get(0).getSchema().getMemberSchema().get().getTypeName(), equalTo("INTEGER")); }
@Override public ImportResult importItem( UUID jobId, IdempotentImportExecutor idempotentExecutor, TokenSecretAuthData authData, MediaContainerResource data) throws Exception { // Make the data smugmug compatible data.transmogrify(transmogrificationConfig); try { SmugMugInterface smugMugInterface = getOrCreateSmugMugInterface(authData); for (MediaAlbum album : data.getAlbums()) { idempotentExecutor.executeAndSwallowIOExceptions( album.getId(), album.getName(), () -> importSingleAlbum(jobId, album, smugMugInterface)); } for (PhotoModel photo : data.getPhotos()) { idempotentExecutor.executeAndSwallowIOExceptions( photo.getIdempotentId(), photo.getTitle(), () -> importSinglePhoto(jobId, idempotentExecutor, photo, smugMugInterface)); } for (VideoModel video : data.getVideos()) { idempotentExecutor.executeAndSwallowIOExceptions( video.getIdempotentId(), video.getName(), () -> importSingleVideo(jobId, idempotentExecutor, video, smugMugInterface)); } } catch (IOException e) { monitor.severe(() -> "Error importing", e); return new ImportResult(e); } return ImportResult.OK; }
@Test public void importStoresAlbumInJobStore() throws Exception { // setup test objects UUID jobId = UUID.randomUUID(); MediaAlbum mediaAlbum1 = new MediaAlbum("albumId1", "albumName1", "albumDescription1"); PhotoModel photoModel1 = new PhotoModel( "PHOTO_TITLE", "FETCHABLE_URL", "PHOTO_DESCRIPTION", "MEDIA_TYPE", "photoId1", mediaAlbum1.getId(), false); PhotoModel photoModel2 = new PhotoModel( "PHOTO_TITLE", "FETCHABLE_URL", "PHOTO_DESCRIPTION", "MEDIA_TYPE", "photoId2", mediaAlbum1.getId(), false); PhotoModel photoModel3 = new PhotoModel( "PHOTO_TITLE", "FETCHABLE_URL", "PHOTO_DESCRIPTION", "MEDIA_TYPE", "photoId3", mediaAlbum1.getId(), false); VideoModel videoModel1 = new VideoModel( "VIDEO_TITLE", "FETCHABLE_URL", "VIDEO_DESCRIPTION", "MEDIA_TYPE", "videoId1", mediaAlbum1.getId(), false, null); VideoModel videoModel2 = new VideoModel( "VIDEO_TITLE", "FETCHABLE_URL", "VIDEO_DESCRIPTION", "MEDIA_TYPE", "videoId2", mediaAlbum1.getId(), false, null); VideoModel videoModel3 = new VideoModel( "VIDEO_TITLE", "FETCHABLE_URL", "VIDEO_DESCRIPTION", "MEDIA_TYPE", "videoId3", mediaAlbum1.getId(), false, null); MediaContainerResource mediaContainerResource1 = new MediaContainerResource(Collections.singletonList(mediaAlbum1), ImmutableList.of(), ImmutableList.of()); MediaContainerResource mediaContainerResource2 = new MediaContainerResource( ImmutableList.of(), ImmutableList.of(photoModel1, photoModel2, photoModel3), ImmutableList.of(videoModel1, videoModel2, videoModel3)); SmugMugAlbum smugMugAlbum1 = new SmugMugAlbum( "date", mediaAlbum1.getDescription(), mediaAlbum1.getName(), "privacy", "albumUri1", "urlname", "weburi"); String overflowAlbumName = smugMugAlbum1.getName() + " (1)"; SmugMugAlbum smugMugAlbum2 = new SmugMugAlbum( "date", mediaAlbum1.getDescription(), overflowAlbumName, "privacy", "albumUri2", "urlname", "weburi"); SmugMugAlbumResponse mockAlbumResponse1 = new SmugMugAlbumResponse(smugMugAlbum1.getUri(), "Locator", "LocatorType", smugMugAlbum1); SmugMugAlbumResponse mockAlbumResponse2 = new SmugMugAlbumResponse(smugMugAlbum2.getUri(), "Locator", "LocatorType", smugMugAlbum2); when(smugMugInterface.createAlbum(eq(smugMugAlbum1.getName()))).thenReturn(mockAlbumResponse1); when(smugMugInterface.createAlbum(eq(smugMugAlbum2.getName()))).thenReturn(mockAlbumResponse2); SmugMugImageUploadResponse smugMugUploadImageResponse = new SmugMugImageUploadResponse( "imageUri", "albumImageUri", new ImageInfo("imageUri", "albumImageUri", "statusImageReplaceUri", "url")); when(smugMugInterface.uploadImage(any(), any(), any())).thenReturn(smugMugUploadImageResponse); when(smugMugInterface.uploadVideo(any(), any(), any())).thenReturn(smugMugUploadImageResponse); when(smugMugInterface.getImageAsStream(any())).thenReturn(bufferedInputStream); // Run test SmugMugMediaImporter importer = new SmugMugMediaImporter( smugMugInterface, config, jobStore, new AppCredentials("key", "secret"), mock(ObjectMapper.class), monitor); ImportResult result = importer.importItem( jobId, EXECUTOR, new TokenSecretAuthData("token", "secret"), mediaContainerResource1); result = importer.importItem( jobId, EXECUTOR, new TokenSecretAuthData("token", "secret"), mediaContainerResource2); // Verify ArgumentCaptor<String> photoUrlsCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> albumNamesCaptor = ArgumentCaptor.forClass(String.class); verify(smugMugInterface, atLeastOnce()).createAlbum(albumNamesCaptor.capture()); verify(smugMugInterface, atLeastOnce()).getImageAsStream(photoUrlsCaptor.capture()); List<String> capturedAlbumNames = albumNamesCaptor.getAllValues(); assertTrue(capturedAlbumNames.contains(smugMugAlbum1.getName())); assertTrue(capturedAlbumNames.contains(smugMugAlbum2.getName())); List<String> capturedPhotoUrls = photoUrlsCaptor.getAllValues(); assertTrue(capturedPhotoUrls.contains(photoModel1.getFetchableUrl())); assertTrue(capturedPhotoUrls.contains(photoModel2.getFetchableUrl())); assertTrue(capturedPhotoUrls.contains(photoModel3.getFetchableUrl())); String overflowAlbumId = mediaAlbum1.getId() + "-overflow-1"; assertThat((String) EXECUTOR.getCachedValue(mediaAlbum1.getId())) .isEqualTo(smugMugAlbum1.getUri()); assertThat((String) EXECUTOR.getCachedValue(overflowAlbumId)).isEqualTo(smugMugAlbum2.getUri()); SmugMugPhotoTempData tempData1 = new SmugMugPhotoTempData( mediaAlbum1.getId(), smugMugAlbum1.getName(), smugMugAlbum1.getDescription(), smugMugAlbum1.getUri(), 4, overflowAlbumId); SmugMugPhotoTempData tempData2 = new SmugMugPhotoTempData( overflowAlbumId, smugMugAlbum2.getName(), smugMugAlbum2.getDescription(), smugMugAlbum2.getUri(), 2, null); assertThat( jobStore .findData( jobId, String.format(TEMP_DATA_FORMAT, mediaAlbum1.getId()), SmugMugPhotoTempData.class) .toString()) .isEqualTo(tempData1.toString()); assertThat( jobStore .findData( jobId, String.format(TEMP_DATA_FORMAT, overflowAlbumId), SmugMugPhotoTempData.class) .toString()) .isEqualTo(tempData2.toString()); }
@Override public Write.Append append(final Path file, final TransferStatus status) throws BackgroundException { return new Write.Append(status.isExists()).withStatus(status); }
@Test public void testAppend() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path workdir = new LocalHomeFinderFeature().find(); final Path test = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new LocalTouchFeature(session).touch(test, new TransferStatus()); assertTrue(new LocalUploadFeature(session).append(test, new TransferStatus().exists(true).withLength(0L).withRemote(new LocalAttributesFinderFeature(session).find(test))).append); new LocalDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
private void validateHmsUri(String catalogHmsUri) { if (catalogHmsUri == null) { return; } Configuration conf = SparkSession.active().sessionState().newHadoopConf(); String envHmsUri = conf.get(HiveConf.ConfVars.METASTOREURIS.varname, null); if (envHmsUri == null) { return; } Preconditions.checkArgument( catalogHmsUri.equals(envHmsUri), "Inconsistent Hive metastore URIs: %s (Spark session) != %s (spark_catalog)", envHmsUri, catalogHmsUri); }
@Test public void testValidateHmsUri() { // HMS uris match assertThat(spark.sessionState().catalogManager().v2SessionCatalog().defaultNamespace()[0]) .isEqualTo("default"); // HMS uris doesn't match spark.sessionState().catalogManager().reset(); String catalogHmsUri = "RandomString"; spark.conf().set(envHmsUriKey, hmsUri); spark.conf().set(catalogHmsUriKey, catalogHmsUri); assertThatThrownBy(() -> spark.sessionState().catalogManager().v2SessionCatalog()) .isInstanceOf(IllegalArgumentException.class) .hasMessage( String.format( "Inconsistent Hive metastore URIs: %s (Spark session) != %s (spark_catalog)", hmsUri, catalogHmsUri)); // no env HMS uri, only catalog HMS uri spark.sessionState().catalogManager().reset(); spark.conf().set(catalogHmsUriKey, hmsUri); spark.conf().unset(envHmsUriKey); assertThat(spark.sessionState().catalogManager().v2SessionCatalog().defaultNamespace()[0]) .isEqualTo("default"); // no catalog HMS uri, only env HMS uri spark.sessionState().catalogManager().reset(); spark.conf().set(envHmsUriKey, hmsUri); spark.conf().unset(catalogHmsUriKey); assertThat(spark.sessionState().catalogManager().v2SessionCatalog().defaultNamespace()[0]) .isEqualTo("default"); }
@Override public Object getValue(final int columnIndex, final Class<?> type) throws SQLException { if (boolean.class == type) { return resultSet.getBoolean(columnIndex); } if (byte.class == type) { return resultSet.getByte(columnIndex); } if (short.class == type) { return resultSet.getShort(columnIndex); } if (int.class == type) { return resultSet.getInt(columnIndex); } if (long.class == type) { return resultSet.getLong(columnIndex); } if (float.class == type) { return resultSet.getFloat(columnIndex); } if (double.class == type) { return resultSet.getDouble(columnIndex); } if (String.class == type) { return resultSet.getString(columnIndex); } if (BigDecimal.class == type) { return resultSet.getBigDecimal(columnIndex); } if (byte[].class == type) { return resultSet.getBytes(columnIndex); } if (Date.class == type) { return resultSet.getDate(columnIndex); } if (Time.class == type) { return resultSet.getTime(columnIndex); } if (Timestamp.class == type) { return resultSet.getTimestamp(columnIndex); } if (Blob.class == type) { return resultSet.getBlob(columnIndex); } if (Clob.class == type) { return resultSet.getClob(columnIndex); } if (Array.class == type) { return resultSet.getArray(columnIndex); } return resultSet.getObject(columnIndex); }
@Test void assertGetValueByBytes() throws SQLException { ResultSet resultSet = mock(ResultSet.class); byte[] value = {(byte) 1}; when(resultSet.getBytes(1)).thenReturn(value); assertThat(new JDBCStreamQueryResult(resultSet).getValue(1, byte[].class), is(value)); }
@Bean public RateLimiterRegistry rateLimiterRegistry( RateLimiterConfigurationProperties rateLimiterProperties, EventConsumerRegistry<RateLimiterEvent> rateLimiterEventsConsumerRegistry, RegistryEventConsumer<RateLimiter> rateLimiterRegistryEventConsumer, @Qualifier("compositeRateLimiterCustomizer") CompositeCustomizer<RateLimiterConfigCustomizer> compositeRateLimiterCustomizer) { RateLimiterRegistry rateLimiterRegistry = createRateLimiterRegistry(rateLimiterProperties, rateLimiterRegistryEventConsumer, compositeRateLimiterCustomizer); registerEventConsumer(rateLimiterRegistry, rateLimiterEventsConsumerRegistry, rateLimiterProperties); rateLimiterProperties.getInstances().forEach( (name, properties) -> rateLimiterRegistry .rateLimiter(name, rateLimiterProperties .createRateLimiterConfig(properties, compositeRateLimiterCustomizer, name)) ); return rateLimiterRegistry; }
@Test public void testRateLimiterRegistry() { io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties instanceProperties1 = new io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties(); instanceProperties1.setLimitForPeriod(2); instanceProperties1.setSubscribeForEvents(true); io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties instanceProperties2 = new io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties(); instanceProperties2.setLimitForPeriod(4); instanceProperties2.setSubscribeForEvents(true); RateLimiterConfigurationProperties rateLimiterConfigurationProperties = new RateLimiterConfigurationProperties(); rateLimiterConfigurationProperties.getInstances().put("backend1", instanceProperties1); rateLimiterConfigurationProperties.getInstances().put("backend2", instanceProperties2); rateLimiterConfigurationProperties.setRateLimiterAspectOrder(300); RateLimiterConfiguration rateLimiterConfiguration = new RateLimiterConfiguration(); DefaultEventConsumerRegistry<RateLimiterEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); RateLimiterRegistry rateLimiterRegistry = rateLimiterConfiguration .rateLimiterRegistry(rateLimiterConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeRateLimiterCustomizerTest()); assertThat(rateLimiterConfigurationProperties.getRateLimiterAspectOrder()).isEqualTo(300); assertThat(rateLimiterRegistry.getAllRateLimiters().size()).isEqualTo(2); RateLimiter rateLimiter = rateLimiterRegistry.rateLimiter("backend1"); assertThat(rateLimiter).isNotNull(); assertThat(rateLimiter.getRateLimiterConfig().getLimitForPeriod()).isEqualTo(2); RateLimiter rateLimiter2 = rateLimiterRegistry.rateLimiter("backend2"); assertThat(rateLimiter2).isNotNull(); assertThat(rateLimiter2.getRateLimiterConfig().getLimitForPeriod()).isEqualTo(4); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(2); }
@Override public Long sendSingleNotifyToAdmin(Long userId, String templateCode, Map<String, Object> templateParams) { return sendSingleNotify(userId, UserTypeEnum.ADMIN.getValue(), templateCode, templateParams); }
@Test public void testSendSingleNotifyToAdmin() { // 准备参数 Long userId = randomLongId(); String templateCode = randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock NotifyTemplateService 的方法 NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(notifyTemplateService.getNotifyTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String content = randomString(); when(notifyTemplateService.formatNotifyTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock NotifyMessageService 的方法 Long messageId = randomLongId(); when(notifyMessageService.createNotifyMessage(eq(userId), eq(UserTypeEnum.ADMIN.getValue()), eq(template), eq(content), eq(templateParams))).thenReturn(messageId); // 调用 Long resultMessageId = notifySendService.sendSingleNotifyToAdmin(userId, templateCode, templateParams); // 断言 assertEquals(messageId, resultMessageId); }
public FEELFnResult<Boolean> invoke(@ParameterName( "string" ) String string, @ParameterName( "match" ) String match) { if ( string == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null")); } if ( match == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "match", "cannot be null")); } return FEELFnResult.ofResult( string.endsWith( match ) ); }
@Test void invokeParamsNull() { FunctionTestUtil.assertResultError(endsWithFunction.invoke((String) null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(endsWithFunction.invoke(null, "test"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(endsWithFunction.invoke("test", null), InvalidParametersEvent.class); }
public static void checkNotNullAndNotEmpty(@Nullable String value, String propertyName) { Preconditions.checkNotNull(value, "Property '" + propertyName + "' cannot be null"); Preconditions.checkArgument( !value.trim().isEmpty(), "Property '" + propertyName + "' cannot be an empty string"); }
@Test public void testCheckNotEmpty_collectionFailNull() { try { Validator.checkNotNullAndNotEmpty((Collection<?>) null, "test"); Assert.fail(); } catch (NullPointerException npe) { Assert.assertEquals("Property 'test' cannot be null", npe.getMessage()); } }
DatanodeStorageInfo getStorageInfo(int index) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index * 3 < triplets.length : "Index is out of bound"; return (DatanodeStorageInfo)triplets[index * 3]; }
@Test public void testReplaceStorage() throws Exception { // Create two dummy storages. final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo( "storageID1", "127.0.0.1"); final DatanodeStorageInfo storage2 = new DatanodeStorageInfo( storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2")); final int NUM_BLOCKS = 10; BlockInfo[] blockInfos = new BlockInfo[NUM_BLOCKS]; // Create a few dummy blocks and add them to the first storage. for (int i = 0; i < NUM_BLOCKS; ++i) { blockInfos[i] = new BlockInfoContiguous((short) 3); storage1.addBlock(blockInfos[i]); } // Try to move one of the blocks to a different storage. boolean added = storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED; Assert.assertThat(added, is(false)); Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2)); }
@Subscribe public void onGraphicChanged(GraphicChanged graphicChanged) { Player player = client.getLocalPlayer(); if (graphicChanged.getActor() != player) { return; } if (player.getGraphic() == GraphicID.WINE_MAKE && config.fermentTimer()) { Optional<FermentTimer> fermentTimerOpt = infoBoxManager.getInfoBoxes().stream() .filter(FermentTimer.class::isInstance) .map(FermentTimer.class::cast) .findAny(); if (fermentTimerOpt.isPresent()) { FermentTimer fermentTimer = fermentTimerOpt.get(); fermentTimer.reset(); } else { FermentTimer fermentTimer = new FermentTimer(itemManager.getImage(ItemID.JUG_OF_WINE), this); infoBoxManager.addInfoBox(fermentTimer); } } }
@Test public void testOnGraphicChanged() { Player player = mock(Player.class); when(player.getGraphic()).thenReturn(GraphicID.WINE_MAKE); when(config.fermentTimer()).thenReturn(true); when(client.getLocalPlayer()).thenReturn(player); GraphicChanged graphicChanged = new GraphicChanged(); graphicChanged.setActor(player); cookingPlugin.onGraphicChanged(graphicChanged); verify(infoBoxManager).addInfoBox(any(FermentTimer.class)); }
@Override public String getDriverClass() { return "com.microsoft.sqlserver.jdbc.SQLServerDriver"; }
@Test public void testGetDriverClass() throws Exception { assertEquals( "com.microsoft.sqlserver.jdbc.SQLServerDriver", dbMeta.getDriverClass() ); }
@GuardedBy("lock") private boolean isLeader(ResourceManager<?> resourceManager) { return running && this.leaderResourceManager == resourceManager; }
@Test void grantLeadership_stopped_doesNotStartNewRm() throws Exception { final CompletableFuture<UUID> startRmFuture = new CompletableFuture<>(); rmFactoryBuilder.setInitializeConsumer(startRmFuture::complete); createAndStartResourceManager(); resourceManagerService.close(); // grant leadership final CompletableFuture<LeaderInformation> confirmedLeaderInformation = leaderElection.isLeader(UUID.randomUUID()); // service stopped, should not start new RM assertNotComplete(startRmFuture); assertNotComplete(confirmedLeaderInformation); }
@Description("Returns the bounding rectangle of a Geometry expanded by distance.") @ScalarFunction("expand_envelope") @SqlType(GEOMETRY_TYPE_NAME) public static Slice expandEnvelope(@SqlType(GEOMETRY_TYPE_NAME) Slice input, @SqlType(DOUBLE) double distance) { if (isNaN(distance)) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "expand_envelope: distance is NaN"); } if (distance < 0) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("expand_envelope: distance %s is negative", distance)); } Envelope envelope = deserializeEnvelope(input); if (envelope.isEmpty()) { return EMPTY_POLYGON; } return EsriGeometrySerde.serialize(new Envelope( envelope.getXMin() - distance, envelope.getYMin() - distance, envelope.getXMax() + distance, envelope.getYMax() + distance)); }
@Test public void testExpandEnvelope() { assertFunction("ST_IsEmpty(expand_envelope(ST_GeometryFromText('POINT EMPTY'), 1))", BOOLEAN, true); assertFunction("ST_IsEmpty(expand_envelope(ST_GeometryFromText('POLYGON EMPTY'), 1))", BOOLEAN, true); assertFunction("ST_AsText(expand_envelope(ST_Envelope(ST_Point(1, 10)), 3))", VARCHAR, "POLYGON ((-2 7, -2 13, 4 13, 4 7, -2 7))"); assertFunction("ST_AsText(expand_envelope(ST_Point(1, 10), 3))", VARCHAR, "POLYGON ((-2 7, -2 13, 4 13, 4 7, -2 7))"); assertFunction("ST_AsText(expand_envelope(ST_GeometryFromText('LINESTRING (1 10, 3 15)'), 2))", VARCHAR, "POLYGON ((-1 8, -1 17, 5 17, 5 8, -1 8))"); assertFunction("ST_AsText(expand_envelope(ST_GeometryFromText('GEOMETRYCOLLECTION (POINT (5 1), LINESTRING (3 4, 4 4))'), 1))", VARCHAR, "POLYGON ((2 0, 2 5, 6 5, 6 0, 2 0))"); // JTS has an envelope expanded by infinity to be empty, which is weird. // PostGIS returns an infinite envelope, which is a tricky concept. // We'll leave it like this until it becomes a problem. assertFunction("ST_AsText(expand_envelope(ST_Point(0, 0), infinity()))", VARCHAR, "POLYGON EMPTY"); assertInvalidFunction("ST_AsText(expand_envelope(ST_Point(0, 0), nan()))", "expand_envelope: distance is NaN"); assertInvalidFunction("ST_AsText(expand_envelope(ST_Point(0, 0), -1))", "expand_envelope: distance -1.0 is negative"); assertInvalidFunction("ST_AsText(expand_envelope(ST_Point(0, 0), -infinity()))", "expand_envelope: distance -Infinity is negative"); }
public static <T> AvroSchema<T> of(SchemaDefinition<T> schemaDefinition) { if (schemaDefinition.getSchemaReaderOpt().isPresent() && schemaDefinition.getSchemaWriterOpt().isPresent()) { return new AvroSchema<>(schemaDefinition.getSchemaReaderOpt().get(), schemaDefinition.getSchemaWriterOpt().get(), parseSchemaInfo(schemaDefinition, SchemaType.AVRO)); } ClassLoader pojoClassLoader = null; if (schemaDefinition.getClassLoader() != null) { pojoClassLoader = schemaDefinition.getClassLoader(); } else if (schemaDefinition.getPojo() != null) { pojoClassLoader = schemaDefinition.getPojo().getClassLoader(); } return new AvroSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.AVRO), pojoClassLoader); }
@Test public void testGetNativeSchema() throws SchemaValidationException { AvroSchema<StructWithAnnotations> schema2 = AvroSchema.of(StructWithAnnotations.class); org.apache.avro.Schema avroSchema2 = (Schema) schema2.getNativeSchema().get(); assertSame(schema2.schema, avroSchema2); }
DecodedJWT verifyJWT(PublicKey publicKey, String publicKeyAlg, DecodedJWT jwt) throws AuthenticationException { if (publicKeyAlg == null) { incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM); throw new AuthenticationException("PublicKey algorithm cannot be null"); } Algorithm alg; try { switch (publicKeyAlg) { case ALG_RS256: alg = Algorithm.RSA256((RSAPublicKey) publicKey, null); break; case ALG_RS384: alg = Algorithm.RSA384((RSAPublicKey) publicKey, null); break; case ALG_RS512: alg = Algorithm.RSA512((RSAPublicKey) publicKey, null); break; case ALG_ES256: alg = Algorithm.ECDSA256((ECPublicKey) publicKey, null); break; case ALG_ES384: alg = Algorithm.ECDSA384((ECPublicKey) publicKey, null); break; case ALG_ES512: alg = Algorithm.ECDSA512((ECPublicKey) publicKey, null); break; default: incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM); throw new AuthenticationException("Unsupported algorithm: " + publicKeyAlg); } } catch (ClassCastException e) { incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH); throw new AuthenticationException("Expected PublicKey alg [" + publicKeyAlg + "] does match actual alg."); } // We verify issuer when retrieving the PublicKey, so it is not verified here. // The claim presence requirements are based on https://openid.net/specs/openid-connect-basic-1_0.html#IDToken Verification verifierBuilder = JWT.require(alg) .acceptLeeway(acceptedTimeLeewaySeconds) .withAnyOfAudience(allowedAudiences) .withClaimPresence(RegisteredClaims.ISSUED_AT) .withClaimPresence(RegisteredClaims.EXPIRES_AT) .withClaimPresence(RegisteredClaims.NOT_BEFORE) .withClaimPresence(RegisteredClaims.SUBJECT); if (isRoleClaimNotSubject) { verifierBuilder = verifierBuilder.withClaimPresence(roleClaim); } JWTVerifier verifier = verifierBuilder.build(); try { return verifier.verify(jwt); } catch (TokenExpiredException e) { incrementFailureMetric(AuthenticationExceptionCode.EXPIRED_JWT); throw new AuthenticationException("JWT expired: " + e.getMessage()); } catch (SignatureVerificationException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT_SIGNATURE); throw new AuthenticationException("JWT signature verification exception: " + e.getMessage()); } catch (InvalidClaimException e) { incrementFailureMetric(AuthenticationExceptionCode.INVALID_JWT_CLAIM); throw new AuthenticationException("JWT contains invalid claim: " + e.getMessage()); } catch (AlgorithmMismatchException e) { incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH); throw new AuthenticationException("JWT algorithm does not match Public Key algorithm: " + e.getMessage()); } catch (JWTDecodeException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT); throw new AuthenticationException("Error while decoding JWT: " + e.getMessage()); } catch (JWTVerificationException | IllegalArgumentException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT); throw new AuthenticationException("JWT verification failed: " + e.getMessage()); } }
@Test public void ensureFutureNBFFails() throws Exception { KeyPair keyPair = Keys.keyPairFor(SignatureAlgorithm.RS256); DefaultJwtBuilder defaultJwtBuilder = new DefaultJwtBuilder(); addValidMandatoryClaims(defaultJwtBuilder, basicProviderAudience); // Override the exp set in the above method defaultJwtBuilder.setNotBefore(Date.from(Instant.now().plusSeconds(60))); defaultJwtBuilder.signWith(keyPair.getPrivate()); DecodedJWT jwt = JWT.decode(defaultJwtBuilder.compact()); Assert.assertThrows(AuthenticationException.class, () -> basicProvider.verifyJWT(keyPair.getPublic(), SignatureAlgorithm.RS256.getValue(), jwt)); }
@InvokeOnHeader(CONTROL_ACTION_SUBSCRIBE) public void performSubscribe(final Message message, AsyncCallback callback) { String filterId; if (message.getBody() instanceof DynamicRouterControlMessage) { filterId = subscribeFromMessage(dynamicRouterControlService, message, false); } else { filterId = subscribeFromHeaders(dynamicRouterControlService, message, false); } message.setBody(filterId); callback.done(false); }
@Test void performSubscribeActionWithMessageInBodyAndPredicateBean() { String subscribeChannel = "testChannel"; DynamicRouterControlMessage subMsg = DynamicRouterControlMessage.Builder.newBuilder() .subscribeChannel(subscribeChannel) .subscriptionId("testId") .destinationUri("mock://test") .priority(10) .predicateBean("testPredicate") .build(); when(message.getBody()).thenReturn(subMsg); when(message.getBody(DynamicRouterControlMessage.class)).thenReturn(subMsg); Mockito.doNothing().when(callback).done(false); producer.performSubscribe(message, callback); Mockito.verify(controlService, Mockito.times(1)) .subscribeWithPredicateBean( subscribeChannel, "testId", "mock://test", 10, "testPredicate", false); }
public <M> Flowable<M> getMessages(Class<M> type) { ReplayProcessor<M> p = ReplayProcessor.create(); return p.doOnRequest(new LongConsumer() { @Override public void accept(long n) throws Exception { AtomicLong counter = new AtomicLong(n); RFuture<Integer> t = topic.addListenerAsync(type, new MessageListener<M>() { @Override public void onMessage(CharSequence channel, M msg) { p.onNext(msg); if (counter.decrementAndGet() == 0) { topic.removeListenerAsync(this); p.onComplete(); } } }); t.whenComplete((id, e) -> { if (e != null) { p.onError(e); return; } p.doOnCancel(new Action() { @Override public void run() throws Exception { topic.removeListenerAsync(id); } }); }); } }); }
@Test public void testLong() throws InterruptedException { RTopicRx topic = redisson.getTopic("test"); Flowable<String> messages = topic.getMessages(String.class); List<String> list = new ArrayList<>(); messages.subscribe(new Subscriber<String>() { @Override public void onSubscribe(Subscription s) { s.request(10); } @Override public void onNext(String t) { list.add(t); } @Override public void onError(Throwable t) { } @Override public void onComplete() { } }); for (int i = 0; i < 15; i++) { sync(topic.publish("" + i)); } Awaitility.waitAtMost(Duration.ofSeconds(10)).until(() -> list.equals(Arrays.asList("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"))); }
@Override public AnnotationVisitor visitAnnotation(String descriptor, boolean visible) { //if (!descriptor.equals("Lorg/pf4j/Extension;")) { if (!Type.getType(descriptor).getClassName().equals(Extension.class.getName())) { return super.visitAnnotation(descriptor, visible); } return new AnnotationVisitor(ASM_VERSION) { @Override public AnnotationVisitor visitArray(final String name) { if ("ordinal".equals(name) || "plugins".equals(name) || "points".equals(name)) { return new AnnotationVisitor(ASM_VERSION, super.visitArray(name)) { @Override public void visit(String key, Object value) { log.debug("Load annotation attribute {} = {} ({})", name, value, value.getClass().getName()); if ("ordinal".equals(name)) { extensionInfo.ordinal = Integer.parseInt(value.toString()); } else if ("plugins".equals(name)) { if (value instanceof String) { log.debug("Found plugin {}", value); extensionInfo.plugins.add((String) value); } else if (value instanceof String[]) { log.debug("Found plugins {}", Arrays.toString((String[]) value)); extensionInfo.plugins.addAll(Arrays.asList((String[]) value)); } else { log.debug("Found plugin {}", value.toString()); extensionInfo.plugins.add(value.toString()); } } else { String pointClassName = ((Type) value).getClassName(); log.debug("Found point " + pointClassName); extensionInfo.points.add(pointClassName); } super.visit(key, value); } }; } return super.visitArray(name); } }; }
@Test void visitArrayShouldHandleOrdinalAttribute() { ExtensionInfo extensionInfo = new ExtensionInfo("org.pf4j.asm.ExtensionInfo"); ClassVisitor extensionVisitor = new ExtensionVisitor(extensionInfo); AnnotationVisitor annotationVisitor = extensionVisitor.visitAnnotation("Lorg/pf4j/Extension;", true); AnnotationVisitor arrayVisitor = annotationVisitor.visitArray("ordinal"); arrayVisitor.visit("key", 1); assertEquals(1, extensionInfo.getOrdinal()); }
public static synchronized void configure(DataflowWorkerLoggingOptions options) { if (!initialized) { throw new RuntimeException("configure() called before initialize()"); } // For compatibility reason, we do not call SdkHarnessOptions.getConfiguredLoggerFromOptions // to config the logging for legacy worker, instead replicate the config steps used for // DataflowWorkerLoggingOptions for default log level and log level overrides. SdkHarnessOptions harnessOptions = options.as(SdkHarnessOptions.class); boolean usedDeprecated = false; // default value for both DefaultSdkHarnessLogLevel and DefaultWorkerLogLevel are INFO Level overrideLevel = getJulLevel(harnessOptions.getDefaultSdkHarnessLogLevel()); if (options.getDefaultWorkerLogLevel() != null && options.getDefaultWorkerLogLevel() != INFO) { overrideLevel = getJulLevel(options.getDefaultWorkerLogLevel()); usedDeprecated = true; } LogManager.getLogManager().getLogger(ROOT_LOGGER_NAME).setLevel(overrideLevel); if (options.getWorkerLogLevelOverrides() != null) { for (Map.Entry<String, DataflowWorkerLoggingOptions.Level> loggerOverride : options.getWorkerLogLevelOverrides().entrySet()) { Logger logger = Logger.getLogger(loggerOverride.getKey()); logger.setLevel(getJulLevel(loggerOverride.getValue())); configuredLoggers.add(logger); } usedDeprecated = true; } else if (harnessOptions.getSdkHarnessLogLevelOverrides() != null) { for (Map.Entry<String, SdkHarnessOptions.LogLevel> loggerOverride : harnessOptions.getSdkHarnessLogLevelOverrides().entrySet()) { Logger logger = Logger.getLogger(loggerOverride.getKey()); logger.setLevel(getJulLevel(loggerOverride.getValue())); configuredLoggers.add(logger); } } // If the options specify a level for messages logged to System.out/err, we need to reconfigure // the corresponding stream adapter. if (options.getWorkerSystemOutMessageLevel() != null) { System.out.close(); System.setOut( JulHandlerPrintStreamAdapterFactory.create( loggingHandler, SYSTEM_OUT_LOG_NAME, getJulLevel(options.getWorkerSystemOutMessageLevel()), Charset.defaultCharset())); } if (options.getWorkerSystemErrMessageLevel() != null) { System.err.close(); System.setErr( JulHandlerPrintStreamAdapterFactory.create( loggingHandler, SYSTEM_ERR_LOG_NAME, getJulLevel(options.getWorkerSystemErrMessageLevel()), Charset.defaultCharset())); } if (usedDeprecated) { LOG.warn( "Deprecated DataflowWorkerLoggingOptions are used for log level settings." + "Consider using options defined in SdkHarnessOptions for forward compatibility."); } }
@Test public void testSystemOutLevelOverrides() throws IOException { DataflowWorkerLoggingOptions options = PipelineOptionsFactory.as(DataflowWorkerLoggingOptions.class); options.setWorkerSystemOutMessageLevel(DataflowWorkerLoggingOptions.Level.WARN); DataflowWorkerLoggingInitializer.configure(options.as(DataflowWorkerLoggingOptions.class)); System.out.println("foobar"); verifyLogOutput("WARN"); }
protected File getOutputFile(final String path, final String baseFileName) throws IOException { makeDir(path); final String now = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss").format(new Date()); final String fileName = baseFileName + "." + now; final File file = Paths.get(path, fileName).toFile(); if (!file.exists() && !file.createNewFile()) { throw new IOException("Fail to create file: " + file); } return file; }
@Test public void testGetOutputFileWithEmptyPath() throws IOException { final File f = getOutputFile("", "test1.log"); assertTrue(f.exists()); FileUtils.forceDelete(f); }
@Override public Map<String, Object> load(String configKey) { if (targetFilePath != null) { try { Map<String, Object> raw = (Map<String, Object>) Utils.readYamlFile(targetFilePath); if (raw != null) { return (Map<String, Object>) raw.get(configKey); } } catch (Exception e) { LOG.error("Failed to load from file {}", targetFilePath); } } return null; }
@Test public void testFileNotThere() { Config conf = new Config(); conf.put(DaemonConfig.SCHEDULER_CONFIG_LOADER_URI, FILE_SCHEME_PREFIX + "/file/not/exist/"); FileConfigLoader testLoader = new FileConfigLoader(conf); Map<String, Object> result = testLoader.load(DaemonConfig.MULTITENANT_SCHEDULER_USER_POOLS); assertNull(result, "Unexpectedly returned a map"); }
@Udf(description = "Splits a string into an array of substrings based on a delimiter.") public List<String> split( @UdfParameter( description = "The string to be split. If NULL, then function returns NULL.") final String string, @UdfParameter( description = "The delimiter to split a string by. If NULL, then function returns NULL.") final String delimiter) { if (string == null || delimiter == null) { return null; } // Java split() accepts regular expressions as a delimiter, but the behavior of this UDF split() // is to accept only literal strings. This method uses Guava Splitter instead, which does not // accept any regex pattern. This is to avoid a confusion to users when splitting by regex // special characters, such as '.' and '|'. try { // Guava Splitter does not accept empty delimiters. Use the Java split() method instead. if (delimiter.isEmpty()) { return Arrays.asList(EMPTY_DELIMITER.split(string)); } else { return Splitter.on(delimiter).splitToList(string); } } catch (final Exception e) { throw new KsqlFunctionException( String.format("Invalid delimiter '%s' in the split() function.", delimiter), e); } }
@Test public void shouldSplitAndAddEmptySpacesIfDelimiterStringIsFoundInContiguousPositions() { assertThat(splitUdf.split("A||A", "|"), contains("A", "", "A")); assertThat(splitUdf.split("z||A||z", "|"), contains("z", "", "A", "", "z")); assertThat(splitUdf.split("||A||A", "|"), contains("", "", "A", "", "A")); assertThat(splitUdf.split("A||A||", "|"), contains("A", "", "A", "", "")); }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) { IdentityProvider provider = resolveProviderOrHandleResponse(request, response, CALLBACK_PATH); if (provider != null) { handleProvider(request, response, provider); } }
@Test public void do_filter_on_auth2_identity_provider() { when(request.getRequestURI()).thenReturn("/oauth2/callback/" + OAUTH2_PROVIDER_KEY); identityProviderRepository.addIdentityProvider(oAuth2IdentityProvider); when(threadLocalUserSession.hasSession()).thenReturn(true); when(threadLocalUserSession.getLogin()).thenReturn(LOGIN); underTest.doFilter(request, response, chain); assertCallbackCalled(oAuth2IdentityProvider); verify(authenticationEvent).loginSuccess(request, LOGIN, Source.oauth2(oAuth2IdentityProvider)); }
@Override public void registerSuperProperties(JSONObject superProperties) { }
@Test public void registerSuperProperties() { JSONObject jsonObject = new JSONObject(); try { jsonObject.put("super", "super"); } catch (JSONException e) { e.printStackTrace(); } mSensorsAPI.registerSuperProperties(jsonObject); Assert.assertEquals(0, mSensorsAPI.getSuperProperties().length()); }
public static boolean isPunctuationCodePoint(int codePoint) { switch (Character.getType(codePoint)) { // General category "P" (punctuation) case Character.DASH_PUNCTUATION: case Character.START_PUNCTUATION: case Character.END_PUNCTUATION: case Character.CONNECTOR_PUNCTUATION: case Character.OTHER_PUNCTUATION: case Character.INITIAL_QUOTE_PUNCTUATION: case Character.FINAL_QUOTE_PUNCTUATION: // General category "S" (symbol) case Character.MATH_SYMBOL: case Character.CURRENCY_SYMBOL: case Character.MODIFIER_SYMBOL: case Character.OTHER_SYMBOL: return true; default: switch (codePoint) { case '$': case '+': case '<': case '=': case '>': case '^': case '`': case '|': case '~': return true; default: return false; } } }
@Test public void isPunctuation() { // From https://spec.commonmark.org/0.29/#ascii-punctuation-character char[] chars = { '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', // (U+0021–2F) ':', ';', '<', '=', '>', '?', '@', // (U+003A–0040) '[', '\\', ']', '^', '_', '`', // (U+005B–0060) '{', '|', '}', '~' // (U+007B–007E) }; for (char c : chars) { assertTrue("Expected to be punctuation: " + c, Characters.isPunctuationCodePoint(c)); } }
public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: return NamenodeRole.NAMENODE; case BACKUP: return NamenodeRole.BACKUP; case CHECKPOINT: return NamenodeRole.CHECKPOINT; } return null; }
@Test public void testConvertRecoveringBlock() { DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 }; RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3); RecoveringBlockProto bProto = PBHelper.convert(b); RecoveringBlock b1 = PBHelper.convert(bProto); assertEquals(b.getBlock(), b1.getBlock()); DatanodeInfo[] dnInfo1 = b1.getLocations(); assertEquals(dnInfo.length, dnInfo1.length); for (int i=0; i < dnInfo.length; i++) { compare(dnInfo[0], dnInfo1[0]); } }
public boolean changesSpecTemplate() { return changesSpecTemplate; }
@Test public void testSpecVolumesIgnored() { StatefulSet ss1 = new StatefulSetBuilder() .withNewMetadata() .withNamespace("test") .withName("foo") .endMetadata() .withNewSpec(). withNewTemplate() .withNewSpec() .addToVolumes(0, new VolumeBuilder() .withConfigMap(new ConfigMapVolumeSourceBuilder().withDefaultMode(1).build()) .build()) .endSpec() .endTemplate() .endSpec() .build(); StatefulSet ss2 = new StatefulSetBuilder() .withNewMetadata() .withNamespace("test") .withName("foo") .endMetadata() .withNewSpec() .withNewTemplate() .withNewSpec() .addToVolumes(0, new VolumeBuilder() .withConfigMap(new ConfigMapVolumeSourceBuilder().withDefaultMode(2).build()) .build()) .endSpec() .endTemplate() .endSpec() .build(); assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesSpecTemplate(), is(false)); }
public static PathOutputCommitter createCommitter(Path outputPath, TaskAttemptContext context) throws IOException { return getCommitterFactory(outputPath, context.getConfiguration()) .createOutputCommitter(outputPath, context); }
@Test public void testCommitterNullOutputPath() throws Throwable { // bind http to schema Configuration conf = newBondedConfiguration(); // then ask committers for a null path FileOutputCommitter committer = createCommitter( FileOutputCommitterFactory.class, FileOutputCommitter.class, null, conf); assertNull(committer.getOutputPath()); assertNull(committer.getWorkPath()); }
@Override public List<V> removeAll(Object key) { return (List<V>) get(removeAllAsync(key)); }
@Test public void testRemoveAll() { RListMultimap<String, String> map = redisson.getListMultimap("test1"); map.put("0", "1"); map.put("0", "1"); map.put("0", "2"); map.put("0", "3"); RList<String> set = map.get("0"); set.removeAll(Arrays.asList("4", "5")); assertThat(map.size()).isEqualTo(4); set.removeAll(Arrays.asList("3")); assertThat(map.size()).isEqualTo(3); List<String> values = map.removeAll("0"); assertThat(values).containsExactly("1", "1", "2"); assertThat(map.size()).isZero(); List<String> values2 = map.removeAll("0"); assertThat(values2).isEmpty(); }
static void setStringProperty(Message message, String name, String value) { try { message.setStringProperty(name, value); } catch (Throwable t) { propagateIfFatal(t); log(t, "error setting property {0} on message {1}", name, message); } }
@Test void setStringProperty() throws Exception { MessageProperties.setStringProperty(message, "b3", "1"); assertThat(message.getObjectProperty("b3")) .isEqualTo("1"); }
public static Iterable<String> expandAtNFilepattern(String filepattern) { ImmutableList.Builder<String> builder = ImmutableList.builder(); Matcher match = AT_N_SPEC.matcher(filepattern); if (!match.find()) { builder.add(filepattern); } else { int numShards = Integer.parseInt(match.group("N")); String formatString = "-%0" + getShardWidth(numShards, filepattern) + "d-of-%05d"; for (int i = 0; i < numShards; ++i) { builder.add( AT_N_SPEC.matcher(filepattern).replaceAll(String.format(formatString, i, numShards))); } if (match.find()) { throw new IllegalArgumentException( "More than one @N wildcard found in filepattern: " + filepattern); } } return builder.build(); }
@Test public void testExpandAtNFilepatternNoPattern() throws Exception { assertThat( Filepatterns.expandAtNFilepattern("gs://bucket/object@google.ism"), contains("gs://bucket/object@google.ism")); }
@VisibleForTesting @SuppressWarnings("nullness") // ok to have nullable elements on stream static String renderName(String prefix, MetricResult<?> metricResult) { MetricKey key = metricResult.getKey(); MetricName name = key.metricName(); String step = key.stepName(); return Streams.concat( Stream.of(prefix), Stream.of(stripSuffix(normalizePart(step))), Stream.of(name.getNamespace(), name.getName()).map(SparkBeamMetric::normalizePart)) .filter(not(Strings::isNullOrEmpty)) .collect(Collectors.joining(".")); }
@Test public void testRenderNameWithPrefix() { MetricResult<Object> metricResult = MetricResult.create( MetricKey.create( "myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")), 123, 456); String renderedName = SparkBeamMetric.renderName("prefix", metricResult); assertThat( "Metric name was not rendered correctly", renderedName, equalTo("prefix.myStep_one_two_three.myNameSpace__.myName__")); }
public PatchRequest<T> generatePatchRequest() { return PatchRequest.createFromPatchDocument(generatePatchTree().getDataMap()); }
@Test public void testPatchGenerateAndPatchRequestRecorderGenerateIdenticalPatches() throws CloneNotSupportedException { TestRecord t1 = new TestRecord(); TestRecord t2 = new TestRecord(t1.data().copy()); t2.setId(1L); t2.setMessage("Foo Bar Baz"); PatchRequest<TestRecord> patchFromGenerator = PatchGenerator.diff(t1, t2); PatchRequestRecorder<TestRecord> patchRecorder = new PatchRequestRecorder<>(TestRecord.class); patchRecorder.getRecordingProxy().setId(1L).setMessage("Foo Bar Baz"); PatchRequest<TestRecord> patchFromRecorder = patchRecorder.generatePatchRequest(); Assert.assertEquals(patchFromRecorder.getPatchDocument(), patchFromGenerator.getPatchDocument()); }
@Override public SinkWriter<WindowedValue<IsmRecord<V>>> writer() throws IOException { return new IsmSinkWriter(FileSystems.create(resourceId, MimeTypes.BINARY)); }
@Test public void testWriteKeyWhichIsProperPrefixOfPreviousSecondaryKeyIsError() throws Throwable { IsmSink<byte[]> sink = new IsmSink<>( FileSystems.matchNewResource(tmpFolder.newFile().getPath(), false), CODER, BLOOM_FILTER_SIZE_LIMIT); SinkWriter<WindowedValue<IsmRecord<byte[]>>> sinkWriter = sink.writer(); sinkWriter.add( new ValueInEmptyWindows<>( IsmRecord.of(ImmutableList.of(EMPTY, new byte[] {0x00, 0x00}), EMPTY))); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("expects keys to be written in strictly increasing order"); sinkWriter.add( new ValueInEmptyWindows<>(IsmRecord.of(ImmutableList.of(EMPTY, new byte[] {0x00}), EMPTY))); }
public boolean equals(Object o) { if (o == this) { return true; } if (! (o instanceof ConfigSourceSet)) { return false; } ConfigSourceSet css = (ConfigSourceSet)o; return sources.equals(css.sources); }
@Test public void testEquals() { assertEquals(new ConfigSourceSet(), new ConfigSourceSet()); assertNotEquals(new ConfigSourceSet(), new ConfigSourceSet(new String[]{"a"})); assertEquals(new ConfigSourceSet(new String[]{"a"}), new ConfigSourceSet(new String[]{"a"})); assertEquals(new ConfigSourceSet(new String[]{"a"}), new ConfigSourceSet(new String[]{" A "})); assertEquals(new ConfigSourceSet(new String[]{"a"}), new ConfigSourceSet(new String[]{"A", "a"})); assertEquals(new ConfigSourceSet(new String[]{"A"}), new ConfigSourceSet(new String[]{"a", " a "})); assertNotEquals(new ConfigSourceSet(new String[]{"a"}), new ConfigSourceSet(new String[]{"b"})); assertNotEquals(new ConfigSourceSet(new String[]{"a"}), new ConfigSourceSet(new String[]{"a", "b"})); assertEquals(new ConfigSourceSet(new String[]{"a", "b"}), new ConfigSourceSet(new String[]{"a", "b"})); assertEquals(new ConfigSourceSet(new String[]{"b", "a"}), new ConfigSourceSet(new String[]{"a", "b"})); assertEquals(new ConfigSourceSet(new String[]{"A", " b"}), new ConfigSourceSet(new String[]{"a ", "B"})); assertEquals(new ConfigSourceSet(new String[]{"b", "a", "c"}), new ConfigSourceSet(new String[]{"a", "b", "c"})); assertNotEquals(new ConfigSourceSet(new String[]{"a", "b"}), new ConfigSourceSet(new String[]{"b", "c"})); assertNotEquals("foo", new ConfigSourceSet()); }
public static UTypeApply create(UExpression type, List<UExpression> typeArguments) { return new AutoValue_UTypeApply(type, ImmutableList.copyOf(typeArguments)); }
@Test public void equality() { new EqualsTester() .addEqualityGroup( UTypeApply.create( UClassIdent.create("java.util.List"), UClassIdent.create("java.lang.String"))) .addEqualityGroup( UTypeApply.create( UClassIdent.create("java.util.Set"), UClassIdent.create("java.lang.String"))) .addEqualityGroup( UTypeApply.create( UClassIdent.create("java.util.List"), UClassIdent.create("java.lang.Integer"))) .testEquals(); }
public BigDecimal calculateTDEE(ActiveLevel activeLevel) { if(activeLevel == null) return BigDecimal.valueOf(0); BigDecimal multiplayer = BigDecimal.valueOf(activeLevel.getMultiplayer()); return multiplayer.multiply(BMR).setScale(2, RoundingMode.HALF_DOWN); }
@Test void calculateTDEE_MODERATELY_ACTIVE() { BigDecimal TDEE = bmrCalculator.calculate(attributes).calculateTDEE(ActiveLevel.MODERATELY); assertEquals(new BigDecimal("3165.87"), TDEE); }
@Subscribe public void reportLocalProcesses(final ReportLocalProcessesEvent event) { if (!event.getInstanceId().equals(contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId())) { return; } Collection<Process> processes = ProcessRegistry.getInstance().listAll(); if (!processes.isEmpty()) { contextManager.getPersistServiceFacade().getRepository().persist( ProcessNode.getProcessListInstancePath(event.getTaskId(), event.getInstanceId()), YamlEngine.marshal(swapper.swapToYamlConfiguration(processes))); } contextManager.getPersistServiceFacade().getRepository().delete(ComputeNode.getProcessTriggerInstanceNodePath(event.getInstanceId(), event.getTaskId())); }
@Test void assertReportLocalProcesses() { Process process = mock(Process.class); String processId = "foo_id"; when(process.getId()).thenReturn(processId); when(process.isInterrupted()).thenReturn(false); when(process.isIdle()).thenReturn(false); when(process.getCompletedUnitCount()).thenReturn(new AtomicInteger(0)); when(process.getTotalUnitCount()).thenReturn(new AtomicInteger(0)); ProcessRegistry.getInstance().add(process); String instanceId = contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId(); subscriber.reportLocalProcesses(new ReportLocalProcessesEvent(instanceId, processId)); verify(repository).persist("/execution_nodes/foo_id/" + instanceId, "processes:" + System.lineSeparator() + "- completedUnitCount: 0" + System.lineSeparator() + " id: foo_id" + System.lineSeparator() + " idle: false" + System.lineSeparator() + " interrupted: false" + System.lineSeparator() + " startMillis: 0" + System.lineSeparator() + " totalUnitCount: 0" + System.lineSeparator()); verify(repository).delete("/nodes/compute_nodes/show_process_list_trigger/" + instanceId + ":foo_id"); }
public void giveCompliments(Royalty r) { r.receiveCompliments(); }
@Test void testGiveCompliments() { final var royalty = mock(Royalty.class); final var servant = new Servant("test"); servant.giveCompliments(royalty); verify(royalty).receiveCompliments(); verifyNoMoreInteractions(royalty); }
public MultiMap<Value, T, List<T>> get(final KeyDefinition keyDefinition) { return tree.get(keyDefinition); }
@Test void testFindByAge() throws Exception { final MultiMap<Value, Person, List<Person>> age = map.get(KeyDefinition.newKeyDefinition() .withId("age") .build()); assertThat(age.keySet()).extracting(x -> x.getComparable()).containsExactly(20, 30); assertThat(age.get(new Value(20))).contains(toni, eder); }