focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public int read() throws IOException { if (!ensureDataInBuffer()) { return -1; } return buf[position++] & BYTE_MASK; }
@Test public void readBufByBuf() throws Exception { // given byte[] buf = new byte[mockInput.length / 2]; int streamPos = 0; // when - then for (int count; (count = in.read(buf)) != -1; ) { for (int i = 0; i < count; i++) { assertEquals(mockInput[streamPos++], buf[i]); } } assertEquals(mockInput.length, streamPos); }
public static void smooth(PointList geometry, double maxWindowSize) { if (geometry.size() <= 2) { // geometry consists only of tower nodes, there are no pillar nodes to be smoothed in between return; } // calculate the distance between all points once here to avoid repeated calculation. // for n nodes there are always n-1 edges double[] distances = new double[geometry.size() - 1]; for (int i = 0; i <= geometry.size() - 2; i++) { distances[i] = DistancePlaneProjection.DIST_PLANE.calcDist( geometry.getLat(i), geometry.getLon(i), geometry.getLat(i + 1), geometry.getLon(i + 1) ); } // map that will collect all smoothed elevation values, size is less by 2 // because elevation of start and end point (tower nodes) won't be touched IntDoubleHashMap averagedElevations = new IntDoubleHashMap((geometry.size() - 1) * 4 / 3); // iterate over every pillar node to smooth its elevation // first and last points are left out as they are tower nodes for (int i = 1; i <= geometry.size() - 2; i++) { // first, determine the average window which could be smaller when close to pillar nodes double searchDistance = maxWindowSize / 2.0; double searchDistanceBack = 0.0; for (int j = i - 1; j >= 0; j--) { searchDistanceBack += distances[j]; if (searchDistanceBack > searchDistance) { break; } } // update search distance if pillar node is close to START tower node searchDistance = Math.min(searchDistance, searchDistanceBack); double searchDistanceForward = 0.0; for (int j = i; j < geometry.size() - 1; j++) { searchDistanceForward += distances[j]; if (searchDistanceForward > searchDistance) { break; } } // update search distance if pillar node is close to END tower node searchDistance = Math.min(searchDistance, searchDistanceForward); if (searchDistance <= 0.0) { // there is nothing to smooth. this is an edge case where pillar nodes share exactly the same location // as a tower node. // by doing so we avoid (at least theoretically) a division by zero later in the function call continue; } // area under elevation curve double elevationArea = 0.0; // first going again backwards double distanceBack = 0.0; for (int j = i - 1; j >= 0; j--) { double dist = distances[j]; double searchDistLeft = searchDistance - distanceBack; distanceBack += dist; if (searchDistLeft < dist) { // node lies outside averaging window double elevationDelta = geometry.getEle(j) - geometry.getEle(j + 1); double elevationAtSearchDistance = geometry.getEle(j + 1) + searchDistLeft / dist * elevationDelta; elevationArea += searchDistLeft * (geometry.getEle(j + 1) + elevationAtSearchDistance) / 2.0; break; } else { elevationArea += dist * (geometry.getEle(j + 1) + geometry.getEle(j)) / 2.0; } } // now going forward double distanceForward = 0.0; for (int j = i; j < geometry.size() - 1; j++) { double dist = distances[j]; double searchDistLeft = searchDistance - distanceForward; distanceForward += dist; if (searchDistLeft < dist) { double elevationDelta = geometry.getEle(j + 1) - geometry.getEle(j); double elevationAtSearchDistance = geometry.getEle(j) + searchDistLeft / dist * elevationDelta; elevationArea += searchDistLeft * (geometry.getEle(j) + elevationAtSearchDistance) / 2.0; break; } else { elevationArea += dist * (geometry.getEle(j + 1) + geometry.getEle(j)) / 2.0; } } double elevationAverage = elevationArea / (searchDistance * 2); averagedElevations.put(i, elevationAverage); } // after all pillar nodes got an averaged elevation, elevations are overwritten averagedElevations.forEach((Consumer<IntDoubleCursor>) c -> geometry.setElevation(c.key, c.value)); }
@Test public void testDuplicatesTower() { PointList pl = new PointList(5, true); pl.add(0.0, 0.0, 1.0); pl.add(0.0, 0.0, 1.0); pl.add(0.0, 0.0, 2.0); pl.add(1.0, 1.0, 3.0); pl.add(2.0, 2.0, 4.0); EdgeElevationSmoothingMovingAverage.smooth(pl, 150.0); assertEquals(5, pl.size()); assertEquals(1.0, pl.getEle(0), 0.000001); assertFalse(Double.isNaN(pl.getEle(1))); assertFalse(Double.isNaN(pl.getEle(2))); assertFalse(Double.isNaN(pl.getEle(3))); assertEquals(4.0, pl.getEle(4), 0.000001); }
@NotNull @Override public INode enrich(@NotNull INode node) { if (node instanceof SHA2 sha2) { return enrich(sha2); } return node; }
@Test void blockSize() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "Jca"); final SHA2 sha256 = new SHA2(256, testDetectionLocation); this.logBefore(sha256); final SHA2Enricher sha2Enricher = new SHA2Enricher(); final INode enriched = sha2Enricher.enrich(sha256); this.logAfter(enriched); assertThat(enriched.hasChildOfType(BlockSize.class)).isPresent(); assertThat(enriched.hasChildOfType(BlockSize.class).get().asString()).isEqualTo("512"); }
@Nullable public Float getFloatValue(@FloatFormat final int formatType, @IntRange(from = 0) final int offset) { if ((offset + getTypeLen(formatType)) > size()) return null; switch (formatType) { case FORMAT_SFLOAT -> { if (mValue[offset + 1] == 0x07 && mValue[offset] == (byte) 0xFE) return Float.POSITIVE_INFINITY; if ((mValue[offset + 1] == 0x07 && mValue[offset] == (byte) 0xFF) || (mValue[offset + 1] == 0x08 && mValue[offset] == 0x00) || (mValue[offset + 1] == 0x08 && mValue[offset] == 0x01)) return Float.NaN; if (mValue[offset + 1] == 0x08 && mValue[offset] == 0x02) return Float.NEGATIVE_INFINITY; return bytesToFloat(mValue[offset], mValue[offset + 1]); } case FORMAT_FLOAT -> { if (mValue[offset + 3] == 0x00) { if (mValue[offset + 2] == 0x7F && mValue[offset + 1] == (byte) 0xFF) { if (mValue[offset] == (byte) 0xFE) return Float.POSITIVE_INFINITY; if (mValue[offset] == (byte) 0xFF) return Float.NaN; } else if (mValue[offset + 2] == (byte) 0x80 && mValue[offset + 1] == 0x00) { if (mValue[offset] == 0x00 || mValue[offset] == 0x01) return Float.NaN; if (mValue[offset] == 0x02) return Float.NEGATIVE_INFINITY; } } return bytesToFloat(mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3]); } } return null; }
@Test public void setValue_SFLOAT_roundUp_500() { final MutableData data = new MutableData(new byte[2]); data.setValue(1000500f, Data.FORMAT_SFLOAT, 0); final float value = data.getFloatValue(Data.FORMAT_SFLOAT, 0); assertEquals(1001000f, value, 0.00); }
@SuppressWarnings("unchecked") public static synchronized <T extends Cache> T createLocalCache(String name) { T cache = (T) caches.get(name); if (cache != null) { return cache; } cache = (T) localCacheFactoryStrategy.createCache(name); localOnly.add(name); log.info("Created local-only cache [" + localCacheFactoryClass + "] for " + name); return wrapCache(cache, name); }
@Test public void testLocalCacheCreation() throws Exception { // Setup test fixture. // Execute system under test. final Cache result = CacheFactory.createLocalCache("unittest-localcache-creation"); // Verify results. assertNotNull(result); }
@Override public GenericRow transform(GenericRow record) { for (Map.Entry<String, FunctionEvaluator> entry : _expressionEvaluators.entrySet()) { String column = entry.getKey(); FunctionEvaluator transformFunctionEvaluator = entry.getValue(); Object existingValue = record.getValue(column); if (existingValue == null) { try { // Skip transformation if column value already exists // NOTE: column value might already exist for OFFLINE data, // For backward compatibility, The only exception here is that we will override nested field like array, // collection or map since they were not included in the record transformation before. record.putValue(column, transformFunctionEvaluator.evaluate(record)); } catch (Exception e) { if (!_continueOnError) { throw new RuntimeException("Caught exception while evaluation transform function for column: " + column, e); } else { LOGGER.debug("Caught exception while evaluation transform function for column: {}", column, e); record.putValue(GenericRow.INCOMPLETE_RECORD_KEY, true); } } } else if (existingValue.getClass().isArray() || existingValue instanceof Collections || existingValue instanceof Map) { try { Object transformedValue = transformFunctionEvaluator.evaluate(record); // For backward compatibility, The only exception here is that we will override nested field like array, // collection or map since they were not included in the record transformation before. if (!isTypeCompatible(existingValue, transformedValue)) { record.putValue(column, transformedValue); } } catch (Exception e) { LOGGER.debug("Caught exception while evaluation transform function for column: {}", column, e); } } } return record; }
@Test public void testTransformFunctionContinueOnError() { Schema pinotSchema = new Schema(); DimensionFieldSpec dimensionFieldSpec = new DimensionFieldSpec("x", FieldSpec.DataType.INT, true); pinotSchema.addField(dimensionFieldSpec); List<TransformConfig> transformConfigs = Collections.singletonList( new TransformConfig("y", "plus(x, 10)")); IngestionConfig ingestionConfig = new IngestionConfig(); ingestionConfig.setTransformConfigs(transformConfigs); ingestionConfig.setContinueOnError(true); TableConfig tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName("testTransformFunctionWithWrongInput") .setIngestionConfig(ingestionConfig) .build(); ExpressionTransformer expressionTransformer = new ExpressionTransformer(tableConfig, pinotSchema); // Valid case: x is int, y is int GenericRow genericRow = new GenericRow(); genericRow.putValue("x", 10); expressionTransformer.transform(genericRow); Assert.assertEquals(genericRow.getValue("y"), 20.0); // Invalid case: x is string, y is int genericRow = new GenericRow(); genericRow.putValue("x", "abcd"); expressionTransformer.transform(genericRow); Assert.assertEquals(genericRow.getValue("y"), null); // Invalid case: x is null, y is int genericRow = new GenericRow(); genericRow.putValue("x", null); expressionTransformer.transform(genericRow); Assert.assertEquals(genericRow.getValue("y"), null); }
public static SnapshotRef fromJson(String json) { Preconditions.checkArgument( json != null && !json.isEmpty(), "Cannot parse snapshot ref from invalid JSON: %s", json); return JsonUtil.parse(json, SnapshotRefParser::fromJson); }
@Test public void testTagFromJsonDefault() { String json = "{\"snapshot-id\":1,\"type\":\"tag\"}"; SnapshotRef ref = SnapshotRef.tagBuilder(1L).build(); assertThat(SnapshotRefParser.fromJson(json)) .as("Should be able to deserialize default tag") .isEqualTo(ref); }
@Override public int updateAllNotifyMessageRead(Long userId, Integer userType) { return notifyMessageMapper.updateListRead(userId, userType); }
@Test public void testUpdateAllNotifyMessageRead() { // mock 数据 NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到 o.setUserId(1L); o.setUserType(UserTypeEnum.ADMIN.getValue()); o.setReadStatus(false); o.setReadTime(null); o.setTemplateParams(randomTemplateParams()); }); notifyMessageMapper.insert(dbNotifyMessage); // 测试 userId 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L))); // 测试 userType 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue()))); // 测试 readStatus 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setReadStatus(true))); // 准备参数 Long userId = 1L; Integer userType = UserTypeEnum.ADMIN.getValue(); // 调用 int updateCount = notifyMessageService.updateAllNotifyMessageRead(userId, userType); // 断言 assertEquals(1, updateCount); NotifyMessageDO notifyMessage = notifyMessageMapper.selectById(dbNotifyMessage.getId()); assertTrue(notifyMessage.getReadStatus()); assertNotNull(notifyMessage.getReadTime()); }
public List<CircuitBreakerProto.CircuitBreakerRule> getServiceCircuitBreakerRule(String namespace, String sourceService, String dstService) { LOG.debug("Get service circuit breaker rules with namespace:{} and sourceService:{} and dstService:{}.", namespace, sourceService, dstService); List<CircuitBreakerProto.CircuitBreakerRule> rules = new ArrayList<>(); // get source service circuit breaker rules. ServiceRule sourceServiceRule = getServiceRule(namespace, sourceService, ServiceEventKey.EventType.CIRCUIT_BREAKING); if (sourceServiceRule != null) { Object rule = sourceServiceRule.getRule(); if (rule instanceof CircuitBreakerProto.CircuitBreaker) { rules.addAll(((CircuitBreakerProto.CircuitBreaker) rule).getRulesList()); } } // get peer service circuit breaker rules. ServiceRule dstServiceRule = getServiceRule(namespace, dstService, ServiceEventKey.EventType.CIRCUIT_BREAKING); if (dstServiceRule != null) { Object rule = dstServiceRule.getRule(); if (rule instanceof CircuitBreakerProto.CircuitBreaker) { rules.addAll(((CircuitBreakerProto.CircuitBreaker) rule).getRulesList()); } } return rules; }
@Test public void testGetServiceCircuitBreakerRule() { final String testNamespace = "testNamespace"; final String testSourceService = "testSourceService"; final String testDstService = "testDstService"; CircuitBreakerProto.CircuitBreaker circuitBreaker = CircuitBreakerProto.CircuitBreaker.newBuilder() .addRules(CircuitBreakerProto.CircuitBreakerRule.newBuilder().build()) .build(); ServiceRuleByProto serviceRule = new ServiceRuleByProto(circuitBreaker, "111", false, ServiceEventKey.EventType.CIRCUIT_BREAKING); ServiceRuleResponse serviceRuleResponse = new ServiceRuleResponse(serviceRule); // source when(consumerAPI.getServiceRule( argThat(request -> request != null && testNamespace.equals(request.getNamespace()) && testSourceService.equals(request.getService()) && ServiceEventKey.EventType.CIRCUIT_BREAKING.equals(request.getRuleType())) )).thenReturn(serviceRuleResponse); ServiceRuleResponse emptyRuleResponse = new ServiceRuleResponse(null); // destination when(consumerAPI.getServiceRule( argThat(request -> request != null && testNamespace.equals(request.getNamespace()) && testDstService.equals(request.getService()) && ServiceEventKey.EventType.CIRCUIT_BREAKING.equals(request.getRuleType())) )).thenReturn(emptyRuleResponse); ServiceRuleManager serviceRuleManager = new ServiceRuleManager(sdkContext, consumerAPI); List<CircuitBreakerProto.CircuitBreakerRule> serviceCircuitBreakerRule = serviceRuleManager.getServiceCircuitBreakerRule(testNamespace, testSourceService, testDstService); assertThat(serviceCircuitBreakerRule).hasSize(1); }
@Override public Boolean run(final Session<?> session) throws BackgroundException { final Metadata feature = session.getFeature(Metadata.class); if(log.isDebugEnabled()) { log.debug(String.format("Run with feature %s", feature)); } for(Path file : files) { if(this.isCanceled()) { throw new ConnectionCanceledException(); } this.write(session, feature, file); } return true; }
@Test public void testRunDifferent() throws Exception { final PathAttributes attributesA = new PathAttributes(); { final Map<String, String> map = new HashMap<>(); map.put("equal", "equal"); map.put("different", "diff1"); map.put("unique", "unique"); attributesA.setMetadata(map); } final PathAttributes attributesB = new PathAttributes(); { final Map<String, String> map = new HashMap<>(); map.put("equal", "equal"); map.put("different", "diff2"); attributesB.setMetadata(map); } final List<Path> files = Arrays.asList( new Path("a", EnumSet.of(Path.Type.file), attributesA), new Path("b", EnumSet.of(Path.Type.file), attributesB)); final Map<String, String> updated = new HashMap<>(); updated.put("equal", "equal-changed"); updated.put("unique", null); updated.put("different", null); WriteMetadataWorker worker = new WriteMetadataWorker(files, updated, false, new DisabledProgressListener()) { @Override public void cleanup(final Boolean map) { fail(); } }; final AtomicBoolean call = new AtomicBoolean(); worker.run(new NullSession(new Host(new TestProtocol())) { @Override @SuppressWarnings("unchecked") public <T> T _getFeature(final Class<T> type) { if(type == Metadata.class) { return (T) new Metadata() { @Override public Map<String, String> getDefault(final Local local) { return Collections.emptyMap(); } @Override public Map<String, String> getMetadata(final Path file) { throw new UnsupportedOperationException(); } @Override public void setMetadata(final Path file, final TransferStatus status) { assertTrue(status.getMetadata().containsKey("equal")); assertTrue(status.getMetadata().containsKey("different")); assertEquals("equal-changed", status.getMetadata().get("equal")); switch(file.getName()) { case "a": assertTrue(status.getMetadata().containsKey("unique")); assertEquals("diff1", status.getMetadata().get("different")); assertEquals("unique", status.getMetadata().get("unique")); break; case "b": assertFalse(status.getMetadata().containsKey("unique")); assertEquals("diff2", status.getMetadata().get("different")); break; default: fail(); break; } call.set(true); } }; } return super._getFeature(type); } }); assertTrue(call.get()); }
@Override public KeyVersion createKey(final String name, final byte[] material, final Options options) throws IOException { return doOp(new ProviderCallable<KeyVersion>() { @Override public KeyVersion call(KMSClientProvider provider) throws IOException { return provider.createKey(name, material, options); } }, nextIdx(), false); }
@Test public void testLoadBalancing() throws Exception { Configuration conf = new Configuration(); KMSClientProvider p1 = mock(KMSClientProvider.class); when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class))) .thenReturn( new KMSClientProvider.KMSKeyVersion("p1", "v1", new byte[0])); KMSClientProvider p2 = mock(KMSClientProvider.class); when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class))) .thenReturn( new KMSClientProvider.KMSKeyVersion("p2", "v2", new byte[0])); KMSClientProvider p3 = mock(KMSClientProvider.class); when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class))) .thenReturn( new KMSClientProvider.KMSKeyVersion("p3", "v3", new byte[0])); KeyProvider kp = new LoadBalancingKMSClientProvider( new KMSClientProvider[] { p1, p2, p3 }, 0, conf); assertEquals("p1", kp.createKey("test1", new Options(conf)).getName()); assertEquals("p2", kp.createKey("test2", new Options(conf)).getName()); assertEquals("p3", kp.createKey("test3", new Options(conf)).getName()); assertEquals("p1", kp.createKey("test4", new Options(conf)).getName()); }
@Override public synchronized void write(int b) throws IOException { mUfsOutStream.write(b); mBytesWritten++; }
@Test public void writeOverflowOffLen() throws IOException, AlluxioException { AlluxioURI ufsPath = getUfsPath(); try (FileOutStream outStream = mFileSystem.createFile(ufsPath)) { assertThrows(IllegalArgumentException.class, () -> outStream.write(BufferUtils.getIncreasingByteArray(CHUNK_SIZE), 5, CHUNK_SIZE + 5)); } }
public List<YangElement> readXmlConfiguration(HierarchicalConfiguration cfg, String path) { List<YangElement> elements = new ArrayList<>(); String key = nullIsNotFound(findPath(cfg, path), "Configuration does not contain desired path"); getElements(cfg.configurationsAt(key), elements, key, cfg, path, key); return ImmutableList.copyOf(elements); }
@Test public void testReadNestedXmlConfiguration() throws ConfigurationException { testCreateConfig.load(getClass().getResourceAsStream("/testYangConfig.xml")); List<YangElement> elements = utils.readXmlConfiguration(testCreateConfig, "controllers"); List<YangElement> expected = ImmutableList.of( new YangElement("controllers", ImmutableMap.of("controller.id", "tcp:1.1.1.1:1", "controller.ip-address", "1.1.1.1")), new YangElement("controllers", ImmutableMap.of("controller.id", "tcp:2.2.2.2:2", "controller.ip-address", "2.2.2.2"))); assertEquals("Wrong elements collected", expected, elements); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void ensure_default_snippet_type_is_underscore() { RuntimeOptions runtimeOptions = parser .parse() .build(); RuntimeOptions options = new CucumberPropertiesParser() .parse(properties) .build(runtimeOptions); assertThat(options.getSnippetType(), is(SnippetType.UNDERSCORE)); }
@Override public String toString() { StringBuilder sb = new StringBuilder("{"); addField(sb, "\"componentUuid\": ", this.componentUuid, true); addField(sb, "\"componentKey\": ", this.componentKey, true); addField(sb, "\"componentName\": ", this.componentName, true); addField(sb, "\"qualifier\": ", getQualifier(qualifier), true); addField(sb, "\"description\": ", this.description, true); addField(sb, "\"path\": ", this.path, true); addField(sb, "\"isPrivate\": ", Objects.toString(this.isPrivate, ""), false); addField(sb, "\"isEnabled\": ", Objects.toString(this.isEnabled, ""), false); endString(sb); return sb.toString(); }
@Test void toString_addsProjectQualifier() { ComponentNewValue newValue = new ComponentNewValue("uuid", "name", "key", true, "path", "TRK"); assertThat(newValue.toString()) .contains("componentUuid") .contains("\"qualifier\": \"project\""); }
@PostMapping public Mono<ResponseEntity<FavouriteProduct>> addProductToFavourites( Mono<JwtAuthenticationToken> authenticationTokenMono, @Valid @RequestBody Mono<NewFavouriteProductPayload> payloadMono, UriComponentsBuilder uriComponentsBuilder ) { return Mono.zip(authenticationTokenMono, payloadMono) .flatMap(tuple -> this.favouriteProductsService .addProductToFavourites(tuple.getT2().productId(), tuple.getT1().getToken().getSubject())) .map(favouriteProduct -> ResponseEntity .created(uriComponentsBuilder.replacePath("feedback-api/favourite-products/{id}") .build(favouriteProduct.getId())) .body(favouriteProduct)); }
@Test void addProductToFavourites_ReturnsCreatedFavouriteProduct() { // given doReturn(Mono.just(new FavouriteProduct(UUID.fromString("fe87eef6-cbd7-11ee-aeb6-275dac91de02"), 1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c"))) .when(this.favouriteProductsService).addProductToFavourites(1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c"); // when StepVerifier.create(this.controller.addProductToFavourites(Mono.just(new JwtAuthenticationToken(Jwt.withTokenValue("e30.e30") .headers(headers -> headers.put("foo", "bar")) .claim("sub", "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c").build())), Mono.just(new NewFavouriteProductPayload(1)), UriComponentsBuilder.fromUriString("http://localhost"))) // then .expectNext(ResponseEntity.created(URI.create("http://localhost/feedback-api/favourite-products/fe87eef6-cbd7-11ee-aeb6-275dac91de02")) .body(new FavouriteProduct(UUID.fromString("fe87eef6-cbd7-11ee-aeb6-275dac91de02"), 1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c"))) .verifyComplete(); verify(this.favouriteProductsService).addProductToFavourites(1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c"); verifyNoMoreInteractions(this.favouriteProductsService); }
public static String findBsnOudeWaarde(List<Container> categorieList){ return findValue(categorieList, CATEGORIE_IDENTIFICATIENUMMERS_OUDE_WAARDE, ELEMENT_BURGERSERVICENUMMER); }
@Test public void testFindOldBsn(){assertThat(CategorieUtil.findBsnOudeWaarde(createFullCategories()), is("burgerservicenummer_oud"));}
public void mountCgroups(List<String> cgroupKVs, String hierarchy) throws IOException { try { PrivilegedOperation mountCGroupsOp = new PrivilegedOperation( PrivilegedOperation.OperationType.MOUNT_CGROUPS, hierarchy); Configuration conf = super.getConf(); mountCGroupsOp.appendArgs(cgroupKVs); PrivilegedOperationExecutor privilegedOperationExecutor = getPrivilegedOperationExecutor(); privilegedOperationExecutor.executePrivilegedOperation(mountCGroupsOp, false); } catch (PrivilegedOperationException e) { int exitCode = e.getExitCode(); LOG.warn("Exception in LinuxContainerExecutor mountCgroups ", e); throw new IOException("Problem mounting cgroups " + cgroupKVs + "; exit code = " + exitCode + " and output: " + e.getOutput(), e); } }
@Test public void testCGroups() throws Exception { Assume.assumeTrue(shouldRun()); String cgroupsMount = System.getProperty("cgroups.mount"); Assume.assumeTrue((cgroupsMount != null) && !cgroupsMount.isEmpty()); assertTrue("Cgroups mount point does not exist", new File( cgroupsMount).exists()); List<String> cgroupKVs = new ArrayList<>(); String hierarchy = "hadoop-yarn"; String[] controllers = { "cpu", "net_cls" }; for (String controller : controllers) { cgroupKVs.add(controller + "=" + cgroupsMount + "/" + controller); assertTrue(new File(cgroupsMount, controller).exists()); } try { exec.mountCgroups(cgroupKVs, hierarchy); for (String controller : controllers) { assertTrue(controller + " cgroup not mounted", new File( cgroupsMount + "/" + controller + "/tasks").exists()); assertTrue(controller + " cgroup hierarchy not created", new File(cgroupsMount + "/" + controller + "/" + hierarchy).exists()); assertTrue(controller + " cgroup hierarchy created incorrectly", new File(cgroupsMount + "/" + controller + "/" + hierarchy + "/tasks").exists()); } } catch (IOException ie) { fail("Couldn't mount cgroups " + ie.toString()); throw ie; } }
@Override public boolean process(NacosTask task) { if (!(task instanceof DumpAllTask)) { DEFAULT_LOG.error("[all-dump-error] ,invalid task type,DumpAllProcessor should process DumpAllTask type."); return false; } DumpAllTask dumpAllTask = (DumpAllTask) task; long currentMaxId = configInfoPersistService.findConfigMaxId(); long lastMaxId = 0; ThreadPoolExecutor executorService = null; if (dumpAllTask.isStartUp()) { executorService = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(), Runtime.getRuntime().availableProcessors(), 60L, TimeUnit.SECONDS, new LinkedBlockingQueue<>(PropertyUtil.getAllDumpPageSize() * 2), r -> new Thread(r, "dump all executor"), new ThreadPoolExecutor.CallerRunsPolicy()); } else { executorService = new ThreadPoolExecutor(1, 1, 60L, TimeUnit.SECONDS, new SynchronousQueue<>(), r -> new Thread(r, "dump all executor"), new ThreadPoolExecutor.CallerRunsPolicy()); } DEFAULT_LOG.info("start dump all config-info..."); while (lastMaxId < currentMaxId) { long start = System.currentTimeMillis(); Page<ConfigInfoWrapper> page = configInfoPersistService.findAllConfigInfoFragment(lastMaxId, PropertyUtil.getAllDumpPageSize(), dumpAllTask.isStartUp()); long dbTimeStamp = System.currentTimeMillis(); if (page == null || page.getPageItems() == null || page.getPageItems().isEmpty()) { break; } for (ConfigInfoWrapper cf : page.getPageItems()) { lastMaxId = Math.max(cf.getId(), lastMaxId); //if not start up, page query will not return content, check md5 and lastModified first ,if changed ,get single content info to dump. if (!dumpAllTask.isStartUp()) { final String groupKey = GroupKey2.getKey(cf.getDataId(), cf.getGroup(), cf.getTenant()); boolean newLastModified = cf.getLastModified() > ConfigCacheService.getLastModifiedTs(groupKey); //check md5 & update local disk cache. String localContentMd5 = ConfigCacheService.getContentMd5(groupKey); boolean md5Update = !localContentMd5.equals(cf.getMd5()); if (newLastModified || md5Update) { LogUtil.DUMP_LOG.info("[dump-all] find change config {}, {}, md5={}", groupKey, cf.getLastModified(), cf.getMd5()); cf = configInfoPersistService.findConfigInfo(cf.getDataId(), cf.getGroup(), cf.getTenant()); } else { continue; } } if (cf == null) { continue; } if (cf.getDataId().equals(AggrWhitelist.AGGRIDS_METADATA)) { AggrWhitelist.load(cf.getContent()); } if (cf.getDataId().equals(ClientIpWhiteList.CLIENT_IP_WHITELIST_METADATA)) { ClientIpWhiteList.load(cf.getContent()); } if (cf.getDataId().equals(SwitchService.SWITCH_META_DATA_ID)) { SwitchService.load(cf.getContent()); } final String content = cf.getContent(); final String dataId = cf.getDataId(); final String group = cf.getGroup(); final String tenant = cf.getTenant(); final long lastModified = cf.getLastModified(); final String type = cf.getType(); final String encryptedDataKey = cf.getEncryptedDataKey(); executorService.execute(() -> { final String md5Utf8 = MD5Utils.md5Hex(content, ENCODE_UTF8); boolean result = ConfigCacheService.dumpWithMd5(dataId, group, tenant, content, md5Utf8, lastModified, type, encryptedDataKey); if (result) { LogUtil.DUMP_LOG.info("[dump-all-ok] {}, {}, length={},md5UTF8={}", GroupKey2.getKey(dataId, group), lastModified, content.length(), md5Utf8); } else { LogUtil.DUMP_LOG.info("[dump-all-error] {}", GroupKey2.getKey(dataId, group)); } }); } long diskStamp = System.currentTimeMillis(); DEFAULT_LOG.info("[all-dump] submit all task for {} / {}, dbTime={},diskTime={}", lastMaxId, currentMaxId, (dbTimeStamp - start), (diskStamp - dbTimeStamp)); } //wait all task are finished and then shutdown executor. try { int unfinishedTaskCount = 0; while ((unfinishedTaskCount = executorService.getQueue().size() + executorService.getActiveCount()) > 0) { DEFAULT_LOG.info("[all-dump] wait {} dump tasks to be finished", unfinishedTaskCount); Thread.sleep(1000L); } executorService.shutdown(); } catch (Exception e) { DEFAULT_LOG.error("[all-dump] wait dump tasks to be finished error", e); } DEFAULT_LOG.info("success to dump all config-info。"); return true; }
@Test void testDumpAllOnCheckAll() throws Exception { ConfigInfoWrapper configInfoWrapper1 = createNewConfig(1); ConfigInfoWrapper configInfoWrapper2 = createNewConfig(2); long timestamp = System.currentTimeMillis(); configInfoWrapper1.setLastModified(timestamp); configInfoWrapper2.setLastModified(timestamp); Page<ConfigInfoWrapper> page = new Page<>(); page.setTotalCount(2); page.setPagesAvailable(2); page.setPageNumber(1); List<ConfigInfoWrapper> list = Arrays.asList(configInfoWrapper1, configInfoWrapper2); page.setPageItems(list); Mockito.when(configInfoPersistService.findConfigMaxId()).thenReturn(2L); Mockito.when(configInfoPersistService.findAllConfigInfoFragment(0, PropertyUtil.getAllDumpPageSize(), false)).thenReturn(page); ConfigInfoWrapper configInfoWrapperSingle1 = new ConfigInfoWrapper(); BeanUtils.copyProperties(configInfoWrapper1, configInfoWrapperSingle1); configInfoWrapperSingle1.setContent("content123456"); Mockito.when(configInfoPersistService.findConfigInfo(configInfoWrapper1.getDataId(), configInfoWrapper1.getGroup(), configInfoWrapper1.getTenant())).thenReturn(configInfoWrapperSingle1); ConfigInfoWrapper configInfoWrapperSingle2 = new ConfigInfoWrapper(); BeanUtils.copyProperties(configInfoWrapper2, configInfoWrapperSingle2); configInfoWrapperSingle2.setContent("content123456222"); Mockito.when(configInfoPersistService.findConfigInfo(configInfoWrapper2.getDataId(), configInfoWrapper2.getGroup(), configInfoWrapper2.getTenant())).thenReturn(configInfoWrapperSingle2); // For config 1, assign a latter time, to make sure that it would not be updated. // For config 2, assign an earlier time, to make sure that it would be updated. String md51 = MD5Utils.md5Hex(configInfoWrapper1.getContent(), "UTF-8"); String md52 = MD5Utils.md5Hex(configInfoWrapper2.getContent(), "UTF-8"); long latterTimestamp = timestamp + 999; long earlierTimestamp = timestamp - 999; String encryptedDataKey = "testEncryptedDataKey"; ConfigCacheService.dumpWithMd5(configInfoWrapper1.getDataId(), configInfoWrapper1.getGroup(), configInfoWrapper1.getTenant(), configInfoWrapper1.getContent(), md51, latterTimestamp, "json", encryptedDataKey); ConfigCacheService.dumpWithMd5(configInfoWrapper2.getDataId(), configInfoWrapper2.getGroup(), configInfoWrapper2.getTenant(), configInfoWrapper2.getContent(), md52, earlierTimestamp, "json", encryptedDataKey); DumpAllTask dumpAllTask = new DumpAllTask(false); boolean process = dumpAllProcessor.process(dumpAllTask); assertTrue(process); //Check cache CacheItem contentCache1 = ConfigCacheService.getContentCache( GroupKey2.getKey(configInfoWrapper1.getDataId(), configInfoWrapper1.getGroup(), configInfoWrapper1.getTenant())); // check if config1 is not updated assertEquals(md51, contentCache1.getConfigCache().getMd5Utf8()); assertEquals(latterTimestamp, contentCache1.getConfigCache().getLastModifiedTs()); //check disk String contentFromDisk1 = ConfigDiskServiceFactory.getInstance() .getContent(configInfoWrapper1.getDataId(), configInfoWrapper1.getGroup(), configInfoWrapper1.getTenant()); assertEquals(configInfoWrapper1.getContent(), contentFromDisk1); //Check cache CacheItem contentCache2 = ConfigCacheService.getContentCache( GroupKey2.getKey(configInfoWrapper2.getDataId(), configInfoWrapper2.getGroup(), configInfoWrapper2.getTenant())); // check if config2 is updated assertEquals(MD5Utils.md5Hex(configInfoWrapperSingle2.getContent(), "UTF-8"), contentCache2.getConfigCache().getMd5Utf8()); assertEquals(configInfoWrapper2.getLastModified(), contentCache2.getConfigCache().getLastModifiedTs()); //check disk String contentFromDisk2 = ConfigDiskServiceFactory.getInstance() .getContent(configInfoWrapper2.getDataId(), configInfoWrapper2.getGroup(), configInfoWrapper2.getTenant()); assertEquals(configInfoWrapperSingle2.getContent(), contentFromDisk2); }
public static Ip6Address valueOf(byte[] value) { return new Ip6Address(value); }
@Test public void testComparisonIPv6() { Ip6Address addr1, addr2, addr3, addr4; addr1 = Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8888"); addr2 = Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8888"); addr3 = Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8887"); addr4 = Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8889"); assertTrue(addr1.compareTo(addr2) == 0); assertTrue(addr1.compareTo(addr3) > 0); assertTrue(addr1.compareTo(addr4) < 0); addr1 = Ip6Address.valueOf("ffff:2222:3333:4444:5555:6666:7777:8888"); addr2 = Ip6Address.valueOf("ffff:2222:3333:4444:5555:6666:7777:8888"); addr3 = Ip6Address.valueOf("ffff:2222:3333:4444:5555:6666:7777:8887"); addr4 = Ip6Address.valueOf("ffff:2222:3333:4444:5555:6666:7777:8889"); assertTrue(addr1.compareTo(addr2) == 0); assertTrue(addr1.compareTo(addr3) > 0); assertTrue(addr1.compareTo(addr4) < 0); addr1 = Ip6Address.valueOf("ffff:2222:3333:4444:5555:6666:7777:8888"); addr2 = Ip6Address.valueOf("ffff:2222:3333:4444:5555:6666:7777:8888"); addr3 = Ip6Address.valueOf("ffff:2222:3333:4443:5555:6666:7777:8888"); addr4 = Ip6Address.valueOf("ffff:2222:3333:4445:5555:6666:7777:8888"); assertTrue(addr1.compareTo(addr2) == 0); assertTrue(addr1.compareTo(addr3) > 0); assertTrue(addr1.compareTo(addr4) < 0); }
@Override public void write(final OutputStream out) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity try { out.write("[".getBytes(StandardCharsets.UTF_8)); write(out, buildHeader()); final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); while (!connectionClosed && queryMetadata.isRunning() && !limitReached && !complete) { final KeyValueMetadata<List<?>, GenericRow> row = rowQueue.poll( disconnectCheckInterval, TimeUnit.MILLISECONDS ); if (row != null) { write(out, buildRow(row)); } else { // If no new rows have been written, the user may have terminated the connection without // us knowing. Check by trying to write a single newline. out.write("\n".getBytes(StandardCharsets.UTF_8)); out.flush(); } drainAndThrowOnError(out); } if (connectionClosed) { return; } drain(out); if (limitReached) { objectMapper.writeValue(out, StreamedRow.finalMessage("Limit Reached")); } else if (complete) { objectMapper.writeValue(out, StreamedRow.finalMessage("Query Completed")); } out.write("]\n".getBytes(StandardCharsets.UTF_8)); out.flush(); } catch (final EOFException exception) { // The user has terminated the connection; we can stop writing log.warn("Query terminated due to exception:" + exception.toString()); } catch (final InterruptedException exception) { // The most likely cause of this is the server shutting down. Should just try to close // gracefully, without writing any more to the connection stream. log.warn("Interrupted while writing to connection stream"); } catch (final Exception exception) { log.error("Exception occurred while writing to connection stream: ", exception); outputException(out, exception); } finally { close(); } }
@Test public void shouldHandleTableRows() { // Given: when(queryMetadata.getResultType()).thenReturn(ResultType.TABLE); doAnswer(tableRows("key1", "Row1", "key2", null, "key3", "Row3")) .when(rowQueue).drainTo(any()); createWriter(); forceWriterToNotBlock(); // When: writer.write(out); // Then: final List<String> lines = getOutput(out); assertThat(lines, hasItems( containsString("{\"row\":{\"columns\":[\"Row1\"]}}"), containsString("{\"row\":{\"columns\":[null],\"tombstone\":true}}"), containsString("{\"row\":{\"columns\":[\"Row3\"]}}") )); }
@Udf public String rpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); sb.append(input); final int padChars = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padChars; i += padding.length()) { sb.append(padding); } sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldReturnNullForEmptyPaddingString() { final String result = udf.rpad("foo", 4, ""); assertThat(result, is(nullValue())); }
@Override public boolean isGenerateSQLToken(final SQLStatementContext sqlStatementContext) { return sqlStatementContext instanceof InsertStatementContext && (((InsertStatementContext) sqlStatementContext).getSqlStatement()).getOnDuplicateKeyColumns().isPresent(); }
@Test void assertIsNotGenerateSQLTokenWithoutOnDuplicateKeyColumns() { assertFalse(generator.isGenerateSQLToken(mock(InsertStatementContext.class, RETURNS_DEEP_STUBS))); }
@Override public synchronized Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); final double threshold = conf.getLoadBalancerBrokerThresholdShedderPercentage() / 100.0; final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); final double minThroughputThreshold = conf.getLoadBalancerBundleUnloadMinThroughputThreshold() * MB; final double avgUsage = getBrokerAvgUsage(loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf); if (avgUsage == 0) { log.warn("average max resource usage is 0"); return selectedBundlesCache; } loadData.getBrokerData().forEach((broker, brokerData) -> { final LocalBrokerData localData = brokerData.getLocalData(); final double currentUsage = brokerAvgResourceUsage.getOrDefault(broker, 0.0); if (currentUsage < avgUsage + threshold) { if (log.isDebugEnabled()) { log.debug("[{}] broker is not overloaded, ignoring at this point ({})", broker, localData.printResourceUsage()); } return; } double percentOfTrafficToOffload = currentUsage - avgUsage - threshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN; double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut(); double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload; if (minimumThroughputToOffload < minThroughputThreshold) { if (log.isDebugEnabled()) { log.debug("[{}] broker is planning to shed throughput {} MByte/s less than " + "minimumThroughputThreshold {} MByte/s, skipping bundle unload ({})", broker, minimumThroughputToOffload / MB, minThroughputThreshold / MB, localData.printResourceUsage()); } return; } log.info( "Attempting to shed load on {}, which has max resource usage above avgUsage and threshold {}%" + " > {}% + {}% -- Offloading at least {} MByte/s of traffic," + " left throughput {} MByte/s ({})", broker, 100 * currentUsage, 100 * avgUsage, 100 * threshold, minimumThroughputToOffload / MB, (brokerCurrentThroughput - minimumThroughputToOffload) / MB, localData.printResourceUsage()); if (localData.getBundles().size() > 1) { filterAndSelectBundle(loadData, recentlyUnloadedBundles, broker, localData, minimumThroughputToOffload); } else if (localData.getBundles().size() == 1) { log.warn( "HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. " + "No Load Shedding will be done on this broker", localData.getBundles().iterator().next(), broker); } else { log.warn("Broker {} is overloaded despite having no bundles", broker); } }); if (selectedBundlesCache.isEmpty() && conf.isLowerBoundarySheddingEnabled()) { tryLowerBoundaryShedding(loadData, conf); } return selectedBundlesCache; }
@Test public void testLowerBoundarySheddingNoBrokerToOffload() { int numBundles = 10; int brokerNum = 11; LoadData loadData = new LoadData(); double throughput = 80 * 1024 * 1024; //Load of all Brokers are 80%, and no Broker needs to offload. for (int i = 0; i < brokerNum; i++) { LocalBrokerData broker = new LocalBrokerData(); for (int j = 0; j < numBundles; j++) { broker.getBundles().add("bundle-" + j); BundleData bundle = new BundleData(); TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData(); timeAverageMessageData.setMsgThroughputIn(throughput); timeAverageMessageData.setMsgThroughputOut(throughput); bundle.setShortTermData(timeAverageMessageData); String broker2BundleName = "broker-" + i + "-bundle-" + j; loadData.getBundleData().put(broker2BundleName, bundle); broker.getBundles().add(broker2BundleName); } broker.setBandwidthIn(new ResourceUsage(80, 100)); broker.setBandwidthOut(new ResourceUsage(80, 100)); broker.setMsgThroughputIn(throughput); broker.setMsgThroughputOut(throughput); loadData.getBrokerData().put("broker-" + i, new BrokerData(broker)); } ThresholdShedder shedder = new ThresholdShedder(); Multimap<String, String> bundlesToUnload = shedder.findBundlesForUnloading(loadData, conf); assertTrue(bundlesToUnload.isEmpty()); conf.setLowerBoundarySheddingEnabled(true); bundlesToUnload = thresholdShedder.findBundlesForUnloading(loadData, conf); assertTrue(bundlesToUnload.isEmpty()); }
public List<BlameLine> blame(Path baseDir, String fileName) throws Exception { BlameOutputProcessor outputProcessor = new BlameOutputProcessor(); try { this.processWrapperFactory.create( baseDir, outputProcessor::process, gitCommand, GIT_DIR_FLAG, String.format(GIT_DIR_ARGUMENT, baseDir), GIT_DIR_FORCE_FLAG, baseDir.toString(), BLAME_COMMAND, BLAME_LINE_PORCELAIN_FLAG, IGNORE_WHITESPACES, FILENAME_SEPARATOR_FLAG, fileName) .execute(); } catch (UncommittedLineException e) { LOG.debug("Unable to blame file '{}' - it has uncommitted changes", fileName); return emptyList(); } return outputProcessor.getBlameLines(); }
@Test public void should_read_lines_only_based_on_new_line() throws Exception { Path baseDir = createNewTempFolder().toPath(); String filePath = "file.txt"; createFile(filePath, "test1\rtest2\r\ttest3", baseDir); Git git = createRepository(baseDir); createFile(filePath, "line", baseDir); commit(git, filePath); List<BlameLine> blame = blameCommand.blame(baseDir, "file.txt"); assertThat(blame).hasSize(1); }
@Override public ParameterByTypeTransformer parameterByTypeTransformer() { return transformer; }
@Test void can_transform_object_to_type() throws Throwable { Method method = JavaDefaultParameterTransformerDefinitionTest.class.getMethod("transform_object_to_type", Object.class, Type.class); JavaDefaultParameterTransformerDefinition definition = new JavaDefaultParameterTransformerDefinition(method, lookup); String transformed = (String) definition.parameterByTypeTransformer().transform("something", String.class); assertThat(transformed, is("transform_object_to_type")); }
public boolean isSuccess() { return httpStatus != null && httpStatus >= 200 && httpStatus < 300; }
@Test public void isSuccess_returns_false_if_failed_to_send_http_request() { WebhookDelivery delivery = newBuilderTemplate() .setError(new IOException("Fail to connect")) .build(); assertThat(delivery.isSuccess()).isFalse(); }
public static Area getArea(String ip) { return AreaUtils.getArea(getAreaId(ip)); }
@Test public void testGetArea_string() { // 120.202.4.0|120.202.4.255|420600 Area area = IPUtils.getArea("120.202.4.50"); assertEquals("襄阳市", area.getName()); }
@Override public boolean equals( Object object ) { if ( !( object instanceof Element ) ) { return false; } if ( this == object ) { return true; } Element other = ( (Element) object ); return Objects.equals( this.name, other.name ) && Objects.equals( this.path, other.path ) && Objects.equals( this.provider, other.provider ); }
@Test public void testEquals() { Element element2 = new Element( NAME, TYPE, PATH, LOCAL_PROVIDER ); assertEquals( element1, element2 ); element2 = new Element( "diffname", TYPE, "/tmp/diffname", LOCAL_PROVIDER ); assertNotEquals( element1, element2 ); element2 = new Element( NAME, TYPE, PATH, "diffProvider" ); assertNotEquals( element1, element2 ); element2 = new Element( NAME, EntityType.REPOSITORY_FILE, PATH, LOCAL_PROVIDER ); // Changing the file type does not effect equals because in a map, if the path and provider are the same then // the files would live in the same physical space. assertEquals( element1, element2 ); // future proofing for unexpected null values assertNotEquals( new Element( null, null, null, null ), element2 ); }
protected boolean nodesReturnsSameType(Object leftObject, Object rightObject) { if (Objects.equals(leftObject, rightObject)) { return true; } else if (leftObject == null || rightObject == null) { return true; } else { Class<?> left = leftObject.getClass(); Class<?> right = rightObject.getClass(); return left.equals(right) || left.isAssignableFrom(right) || right.isAssignableFrom(left); } }
@Test void nodesReturnsSameType_False() { assertThat(rangeFunction.nodesReturnsSameType("1", 1)) .withFailMessage("\"1\" - 1") .isFalse(); }
public boolean canBeSubsumed() { // If the checkpoint is forced, it cannot be subsumed. return !props.isSavepoint(); }
@Test void testCanBeSubsumed() throws Exception { // Forced checkpoints cannot be subsumed CheckpointProperties forced = new CheckpointProperties( true, SavepointType.savepoint(SavepointFormatType.CANONICAL), false, false, false, false, false, false); final PendingCheckpoint pending = createPendingCheckpoint(forced); assertThat(pending.canBeSubsumed()).isFalse(); assertThatThrownBy( () -> abort(pending, CheckpointFailureReason.CHECKPOINT_SUBSUMED), "Did not throw expected Exception") .isInstanceOf(IllegalStateException.class); // Non-forced checkpoints can be subsumed CheckpointProperties subsumed = new CheckpointProperties( false, SavepointType.savepoint(SavepointFormatType.CANONICAL), false, false, false, false, false, false); assertThat(createPendingCheckpoint(subsumed).canBeSubsumed()).isFalse(); }
public static <K, C, V, T> V computeIfAbsent(Map<K, V> target, K key, BiFunction<C, T, V> mappingFunction, C param1, T param2) { Objects.requireNonNull(target, "target"); Objects.requireNonNull(key, "key"); Objects.requireNonNull(mappingFunction, "mappingFunction"); Objects.requireNonNull(param1, "param1"); Objects.requireNonNull(param2, "param2"); V val = target.get(key); if (val == null) { V ret = mappingFunction.apply(param1, param2); target.put(key, ret); return ret; } return val; }
@Test public void computeIfAbsentNotExistKeyTest() { Map<String, Object> map = new HashMap<>(); map.put("abc", "123"); BiFunction<String, String, Object> mappingFunction = (a, b) -> a + b; try { MapUtil.computeIfAbsent(map, null, mappingFunction, "param1", "param2"); } catch (Exception e) { if (e instanceof NullPointerException) { Assert.isTrue(Objects.equals("key", e.getMessage())); } } }
@Override public void onAddClassLoader(ModuleModel scopeModel, ClassLoader classLoader) { refreshClassLoader(classLoader); }
@Test void testStatus5() { FrameworkModel frameworkModel = new FrameworkModel(); ApplicationModel applicationModel = frameworkModel.newApplication(); ModuleModel moduleModel = applicationModel.newModule(); System.setProperty(CommonConstants.CLASS_DESERIALIZE_BLOCK_ALL, "true"); SerializeSecurityManager ssm = frameworkModel.getBeanFactory().getBean(SerializeSecurityManager.class); SerializeSecurityConfigurator serializeSecurityConfigurator = new SerializeSecurityConfigurator(moduleModel); serializeSecurityConfigurator.onAddClassLoader( moduleModel, Thread.currentThread().getContextClassLoader()); Assertions.assertEquals(SerializeCheckStatus.STRICT, ssm.getCheckStatus()); System.clearProperty(CommonConstants.CLASS_DESERIALIZE_BLOCK_ALL); frameworkModel.destroy(); }
Configuration getEffectiveConfiguration(String[] args) throws CliArgsException { final CommandLine commandLine = cli.parseCommandLineOptions(args, true); final Configuration effectiveConfiguration = new Configuration(baseConfiguration); effectiveConfiguration.addAll(cli.toConfiguration(commandLine)); effectiveConfiguration.set(DeploymentOptions.TARGET, KubernetesSessionClusterExecutor.NAME); return effectiveConfiguration; }
@Test void testConfigurationClusterSpecification() throws Exception { final Configuration configuration = new Configuration(); final int jobManagerMemory = 1337; configuration.set( JobManagerOptions.TOTAL_PROCESS_MEMORY, MemorySize.ofMebiBytes(jobManagerMemory)); final int taskManagerMemory = 7331; configuration.set( TaskManagerOptions.TOTAL_PROCESS_MEMORY, MemorySize.ofMebiBytes(taskManagerMemory)); final int slotsPerTaskManager = 42; configuration.set(TaskManagerOptions.NUM_TASK_SLOTS, slotsPerTaskManager); final String[] args = {"-e", KubernetesSessionClusterExecutor.NAME}; final KubernetesSessionCli cli = new KubernetesSessionCli(configuration, confDirPath.toAbsolutePath().toString()); Configuration executorConfig = cli.getEffectiveConfiguration(args); ClusterClientFactory<String> clientFactory = getClusterClientFactory(executorConfig); ClusterSpecification clusterSpecification = clientFactory.getClusterSpecification(executorConfig); assertThat(clusterSpecification.getMasterMemoryMB()).isEqualTo(jobManagerMemory); assertThat(clusterSpecification.getTaskManagerMemoryMB()).isEqualTo(taskManagerMemory); assertThat(clusterSpecification.getSlotsPerTaskManager()).isEqualTo(slotsPerTaskManager); }
@Udf(description = "Converts a string representation of a date in the given format" + " into the TIMESTAMP value." + " Single quotes in the timestamp format can be escaped with ''," + " for example: 'yyyy-MM-dd''T''HH:mm:ssX'.") public Timestamp parseTimestamp( @UdfParameter( description = "The string representation of a date.") final String formattedTimestamp, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { return parseTimestamp(formattedTimestamp, formatPattern, ZoneId.of("GMT").getId()); }
@Test public void shouldThrowOnEmptyString() { // When: final KsqlFunctionException e = assertThrows( KsqlFunctionException.class, () -> udf.parseTimestamp("", "yyyy-MM-dd'T'HH:mm:ss.SSS") ); // Then: assertThat(e.getMessage(), containsString("Text '' could not be parsed at index 0")); }
@ConstantFunction.List(list = { @ConstantFunction(name = "add", argTypes = {DECIMALV2, DECIMALV2}, returnType = DECIMALV2), @ConstantFunction(name = "add", argTypes = {DECIMAL32, DECIMAL32}, returnType = DECIMAL32), @ConstantFunction(name = "add", argTypes = {DECIMAL64, DECIMAL64}, returnType = DECIMAL64), @ConstantFunction(name = "add", argTypes = {DECIMAL128, DECIMAL128}, returnType = DECIMAL128) }) public static ConstantOperator addDecimal(ConstantOperator first, ConstantOperator second) { return createDecimalConstant(first.getDecimal().add(second.getDecimal())); }
@Test public void addDecimal() { assertEquals("200", ScalarOperatorFunctions.addDecimal(O_DECIMAL_100, O_DECIMAL_100).getDecimal().toPlainString()); assertEquals("200", ScalarOperatorFunctions.addDecimal(O_DECIMAL32P7S2_100, O_DECIMAL32P7S2_100).getDecimal() .toPlainString()); assertTrue( ScalarOperatorFunctions.addDecimal(O_DECIMAL32P7S2_100, O_DECIMAL32P7S2_100).getType().isDecimalV3()); assertEquals("200", ScalarOperatorFunctions.addDecimal(O_DECIMAL32P9S0_100, O_DECIMAL32P9S0_100).getDecimal() .toPlainString()); assertTrue( ScalarOperatorFunctions.addDecimal(O_DECIMAL32P9S0_100, O_DECIMAL32P9S0_100).getType().isDecimalV3()); assertEquals("200", ScalarOperatorFunctions.addDecimal(O_DECIMAL64P15S10_100, O_DECIMAL64P15S10_100).getDecimal() .toPlainString()); assertTrue(ScalarOperatorFunctions.addDecimal(O_DECIMAL64P15S10_100, O_DECIMAL64P15S10_100).getType() .isDecimalV3()); assertEquals("200", ScalarOperatorFunctions.addDecimal(O_DECIMAL64P18S15_100, O_DECIMAL64P18S15_100).getDecimal() .toPlainString()); assertTrue(ScalarOperatorFunctions.addDecimal(O_DECIMAL64P18S15_100, O_DECIMAL64P18S15_100).getType() .isDecimalV3()); assertEquals("200", ScalarOperatorFunctions.addDecimal(O_DECIMAL128P30S2_100, O_DECIMAL128P30S2_100).getDecimal() .toPlainString()); assertTrue(ScalarOperatorFunctions.addDecimal(O_DECIMAL128P30S2_100, O_DECIMAL128P30S2_100).getType() .isDecimalV3()); assertEquals("200", ScalarOperatorFunctions.addDecimal(O_DECIMAL128P38S20_100, O_DECIMAL128P38S20_100).getDecimal() .toPlainString()); assertTrue(ScalarOperatorFunctions.addDecimal(O_DECIMAL128P38S20_100, O_DECIMAL128P38S20_100).getType() .isDecimalV3()); }
@Override public void onIssue(Component component, DefaultIssue issue) { if (issue.authorLogin() != null) { return; } loadScmChangesets(component); Optional<String> scmAuthor = guessScmAuthor(issue, component); if (scmAuthor.isPresent()) { if (scmAuthor.get().length() <= IssueDto.AUTHOR_MAX_SIZE) { issueUpdater.setNewAuthor(issue, scmAuthor.get(), changeContext); } else { LOGGER.debug("SCM account '{}' is too long to be stored as issue author", scmAuthor.get()); } } if (issue.assignee() == null) { UserIdDto userId = scmAuthor.map(scmAccountToUser::getNullable).orElse(defaultAssignee.loadDefaultAssigneeUserId()); issueUpdater.setNewAssignee(issue, userId, changeContext); } }
@Test void assign_new_issue_to_author_of_change() { addScmUser("john", buildUserId("u123", "john")); setSingleChangeset("john", 123456789L, "rev-1"); DefaultIssue issue = newIssueOnLines(1); underTest.onIssue(FILE, issue); assertThat(issue.assignee()).isEqualTo("u123"); assertThat(issue.assigneeLogin()).isEqualTo("john"); }
@VisibleForTesting static Map<Severity, List<String>> checkNoticeFile( Map<String, Set<Dependency>> modulesWithShadedDependencies, String moduleName, @Nullable NoticeContents noticeContents) { final Map<Severity, List<String>> problemsBySeverity = new HashMap<>(); if (noticeContents == null) { addProblem(problemsBySeverity, Severity.CRITICAL, "The NOTICE file was empty."); } else { // first line must be the module name. if (!noticeContents.getNoticeModuleName().equals(moduleName)) { addProblem( problemsBySeverity, Severity.TOLERATED, String.format( "First line does not start with module name. firstLine=%s", noticeContents.getNoticeModuleName())); } // collect all declared dependencies from NOTICE file Set<Dependency> declaredDependencies = new HashSet<>(); for (Dependency declaredDependency : noticeContents.getDeclaredDependencies()) { if (!declaredDependencies.add(declaredDependency)) { addProblem( problemsBySeverity, Severity.CRITICAL, String.format("Dependency %s is declared twice.", declaredDependency)); } } // find all dependencies missing from NOTICE file Collection<Dependency> expectedDependencies = modulesWithShadedDependencies.getOrDefault(moduleName, Collections.emptySet()) .stream() .filter( dependency -> !dependency.getGroupId().equals("org.apache.flink")) .collect(Collectors.toList()); for (Dependency expectedDependency : expectedDependencies) { if (!declaredDependencies.contains(expectedDependency)) { addProblem( problemsBySeverity, Severity.CRITICAL, String.format("Dependency %s is not listed.", expectedDependency)); } } boolean moduleDefinesExcessDependencies = MODULES_DEFINING_EXCESS_DEPENDENCIES.contains(moduleName); // find all dependencies defined in NOTICE file, which were not expected for (Dependency declaredDependency : declaredDependencies) { if (!expectedDependencies.contains(declaredDependency)) { final Severity severity = moduleDefinesExcessDependencies ? Severity.SUPPRESSED : Severity.TOLERATED; addProblem( problemsBySeverity, severity, String.format( "Dependency %s is not bundled, but listed.", declaredDependency)); } } } return problemsBySeverity; }
@Test void testCheckNoticeFileRejectsDuplicateLine() { final String moduleName = "test"; final Map<String, Set<Dependency>> bundleDependencies = new HashMap<>(); bundleDependencies.put( moduleName, Collections.singleton(Dependency.create("a", "b", "c", null))); assertThat( NoticeFileChecker.checkNoticeFile( bundleDependencies, moduleName, new NoticeContents( moduleName, Arrays.asList( Dependency.create("a", "b", "c", null), Dependency.create("a", "b", "c", null))))) .containsOnlyKeys(NoticeFileChecker.Severity.CRITICAL); }
@Override public long getMaxSetLatency() { throw new UnsupportedOperationException("Set operation on replicated maps is not supported."); }
@Test(expected = UnsupportedOperationException.class) public void testGetMaxSetLatency() { localReplicatedMapStats.getMaxSetLatency(); }
ClassicGroup getOrMaybeCreateClassicGroup( String groupId, boolean createIfNotExists ) throws GroupIdNotFoundException { Group group = groups.get(groupId); if (group == null && !createIfNotExists) { throw new GroupIdNotFoundException(String.format("Classic group %s not found.", groupId)); } if (group == null) { ClassicGroup classicGroup = new ClassicGroup(logContext, groupId, ClassicGroupState.EMPTY, time, metrics); groups.put(groupId, classicGroup); metrics.onClassicGroupStateTransition(null, classicGroup.currentState()); return classicGroup; } else { if (group.type() == CLASSIC) { return (ClassicGroup) group; } else { // We don't support upgrading/downgrading between protocols at the moment so // we throw an exception if a group exists with the wrong type. throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.", groupId)); } } }
@Test public void testSyncGroupLeaderAfterFollower() throws Exception { // To get a group of two members: // 1. join and sync with a single member (because we can't immediately join with two members) // 2. join and sync with the first member and a new member GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .build(); JoinGroupResponseData leaderJoinResponse = context.joinClassicGroupAsDynamicMemberAndCompleteRebalance("group-id"); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); JoinGroupRequestData joinRequest = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() .withGroupId("group-id") .withMemberId(UNKNOWN_MEMBER_ID) .withDefaultProtocolTypeAndProtocols() .withRebalanceTimeoutMs(10000) .withSessionTimeoutMs(5000) .build(); GroupMetadataManagerTestContext.JoinResult followerJoinResult = context.sendClassicGroupJoin(joinRequest); assertTrue(followerJoinResult.records.isEmpty()); assertFalse(followerJoinResult.joinFuture.isDone()); GroupMetadataManagerTestContext.JoinResult leaderJoinResult = context.sendClassicGroupJoin(joinRequest.setMemberId(leaderJoinResponse.memberId())); assertTrue(leaderJoinResult.records.isEmpty()); assertTrue(leaderJoinResult.joinFuture.isDone()); assertTrue(followerJoinResult.joinFuture.isDone()); assertEquals(Errors.NONE.code(), leaderJoinResult.joinFuture.get().errorCode()); assertEquals(Errors.NONE.code(), followerJoinResult.joinFuture.get().errorCode()); assertEquals(leaderJoinResult.joinFuture.get().generationId(), followerJoinResult.joinFuture.get().generationId()); assertEquals(leaderJoinResponse.memberId(), leaderJoinResult.joinFuture.get().leader()); assertEquals(leaderJoinResponse.memberId(), followerJoinResult.joinFuture.get().leader()); assertTrue(group.isInState(COMPLETING_REBALANCE)); int nextGenerationId = leaderJoinResult.joinFuture.get().generationId(); String followerId = followerJoinResult.joinFuture.get().memberId(); byte[] leaderAssignment = new byte[]{0}; byte[] followerAssignment = new byte[]{1}; // Sync group with follower to get new assignment. SyncGroupRequestData syncRequest = new GroupMetadataManagerTestContext.SyncGroupRequestBuilder() .withGroupId("group-id") .withMemberId(leaderJoinResponse.memberId()) .withGenerationId(leaderJoinResponse.generationId()) .build(); GroupMetadataManagerTestContext.SyncResult followerSyncResult = context.sendClassicGroupSync( syncRequest .setMemberId(followerId) .setGenerationId(nextGenerationId) ); assertTrue(followerSyncResult.records.isEmpty()); assertFalse(followerSyncResult.syncFuture.isDone()); // Sync group with leader to get new assignment. List<SyncGroupRequestAssignment> assignment = new ArrayList<>(); assignment.add(new SyncGroupRequestAssignment() .setMemberId(leaderJoinResponse.memberId()) .setAssignment(leaderAssignment) ); assignment.add(new SyncGroupRequestAssignment() .setMemberId(followerId) .setAssignment(followerAssignment) ); GroupMetadataManagerTestContext.SyncResult syncResult = context.sendClassicGroupSync( syncRequest .setMemberId(leaderJoinResponse.memberId()) .setGenerationId(nextGenerationId) .setAssignments(assignment) ); // Simulate a successful write to log. This will update the group assignment with the new assignment. syncResult.appendFuture.complete(null); Map<String, byte[]> updatedAssignment = assignment.stream().collect(Collectors.toMap( SyncGroupRequestAssignment::memberId, SyncGroupRequestAssignment::assignment )); assertEquals( Collections.singletonList( GroupCoordinatorRecordHelpers.newGroupMetadataRecord(group, updatedAssignment, MetadataVersion.latestTesting())), syncResult.records ); assertTrue(syncResult.syncFuture.isDone()); assertEquals(Errors.NONE.code(), syncResult.syncFuture.get().errorCode()); assertEquals(leaderAssignment, syncResult.syncFuture.get().assignment()); // Follower sync group should also be completed. assertEquals(Errors.NONE.code(), followerSyncResult.syncFuture.get().errorCode()); assertEquals(followerAssignment, followerSyncResult.syncFuture.get().assignment()); assertTrue(group.isInState(STABLE)); }
@CanIgnoreReturnValue public final Ordered containsAtLeast( @Nullable Object firstExpected, @Nullable Object secondExpected, @Nullable Object @Nullable ... restOfExpected) { return containsAtLeastElementsIn(accumulate(firstExpected, secondExpected, restOfExpected)); }
@Test public void iterableContainsAtLeastInOrderWithNull() { assertThat(asList(3, null, 5)).containsAtLeast(3, null, 5).inOrder(); assertThat(asList(3, null, 7, 5)).containsAtLeast(3, null, 5).inOrder(); }
@Override public void onRollbackFailure(GlobalTransaction tx, Throwable originalException) { LOGGER.warn("Failed to rollback transaction[" + tx.getXid() + "]", originalException); TIMER.newTimeout(new CheckTimerTask(tx, GlobalStatus.Rollbacked), SCHEDULE_INTERVAL_SECONDS, TimeUnit.SECONDS); }
@Test void onRollbackFailure() throws Exception { RootContext.bind(DEFAULT_XID); GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate(); FailureHandler failureHandler = new DefaultFailureHandlerImpl(); failureHandler.onRollbackFailure(tx, new MyRuntimeException("").getCause()); // get timer Class<?> c = Class.forName("org.apache.seata.tm.api.DefaultFailureHandlerImpl"); Field field = c.getDeclaredField("TIMER"); field.setAccessible(true); HashedWheelTimer timer = (HashedWheelTimer) field.get(failureHandler); // assert timer pendingCount: first time is 1 Long pendingTimeout = timer.pendingTimeouts(); Assertions.assertEquals(pendingTimeout,1L); //set globalStatus globalStatus= GlobalStatus.Rollbacked; Thread.sleep(25*1000L); pendingTimeout = timer.pendingTimeouts(); LOGGER.info("pendingTimeout {}" ,pendingTimeout); //all timer is done Assertions.assertEquals(pendingTimeout,0L); }
public static boolean isTel(CharSequence value) { return Validator.isMatchRegex(PatternPool.TEL, value); }
@Test public void testTel() { final ArrayList<String> tels = new ArrayList<>(); tels.add("010-12345678"); tels.add("020-9999999"); tels.add("0755-7654321"); final ArrayList<String> errTels = new ArrayList<>(); errTels.add("010 12345678"); errTels.add("A20-9999999"); errTels.add("0755-7654.321"); errTels.add("13619887123"); for (final String s : tels) { assertTrue(PhoneUtil.isTel(s)); } for (final String s : errTels) { assertFalse(PhoneUtil.isTel(s)); } }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException { boolean forceRedirect = config .getBoolean(SONAR_FORCE_REDIRECT_DEFAULT_ADMIN_CREDENTIALS) .orElse(true); if (forceRedirect && userSession.hasSession() && userSession.isLoggedIn() && userSession.isSystemAdministrator() && !"admin".equals(userSession.getLogin()) && defaultAdminCredentialsVerifier.hasDefaultCredentialUser()) { redirectTo(response, request.getContextPath() + CHANGE_ADMIN_PASSWORD_PATH); } chain.doFilter(request, response); }
@Test public void do_not_redirect_if_not_a_system_administrator() throws Exception { when(session.isSystemAdministrator()).thenReturn(false); underTest.doFilter(request, response, chain); verify(response, never()).sendRedirect(any()); }
String resolve() throws IOException { String from = String.format("%s/v%s/%s.zip", GITHUB_DOWNLOAD_PREFIX, getSDKVersion(), buildFileName()); if (!Strings.isNullOrEmpty(options.getPrismLocation())) { checkArgument( !options.getPrismLocation().startsWith(GITHUB_TAG_PREFIX), "Provided --prismLocation URL is not an Apache Beam Github " + "Release page URL or download URL: ", from); from = options.getPrismLocation(); } String fromFileName = getNameWithoutExtension(from); Path to = Paths.get(userHome(), PRISM_BIN_PATH, fromFileName); if (Files.exists(to)) { return to.toString(); } createDirectoryIfNeeded(to); if (from.startsWith("http")) { String result = resolve(new URL(from), to); checkState(Files.exists(to), "Resolved location does not exist: %s", result); return result; } String result = resolve(Paths.get(from), to); checkState(Files.exists(to), "Resolved location does not exist: %s", result); return result; }
@Test public void givenFilePrismLocationOption_thenResolves() throws IOException { assertThat(Files.exists(DESTINATION_DIRECTORY)).isFalse(); PrismPipelineOptions options = options(); options.setPrismLocation(getLocalPrismBuildOrIgnoreTest()); PrismLocator underTest = new PrismLocator(options); String got = underTest.resolve(); assertThat(got).contains(DESTINATION_DIRECTORY.toString()); Path gotPath = Paths.get(got); assertThat(Files.exists(gotPath)).isTrue(); }
public int getWikiPrice(ItemPrice itemPrice) { final int wikiPrice = itemPrice.getWikiPrice(); final int jagPrice = itemPrice.getPrice(); if (wikiPrice <= 0) { return jagPrice; } if (wikiPrice <= lowPriceThreshold) { return wikiPrice; } return wikiPrice < jagPrice * activePriceThreshold ? wikiPrice : jagPrice; }
@Test public void testGetWikiPrice() { ItemPrice itemPrice = new ItemPrice(); itemPrice.setId(ItemID.YEW_SEED); itemPrice.setName("Yew seed"); itemPrice.setPrice(47_975); itemPrice.setWikiPrice(50_754); assertEquals(itemPrice.getWikiPrice(), itemManager.getWikiPrice(itemPrice)); itemPrice.setWikiPrice(300_000); // outside of 5x range assertEquals(itemPrice.getPrice(), itemManager.getWikiPrice(itemPrice)); }
public static List<TypedExpression> coerceCorrectConstructorArguments( final Class<?> type, List<TypedExpression> arguments, List<Integer> emptyCollectionArgumentsIndexes) { Objects.requireNonNull(type, "Type parameter cannot be null as the method searches constructors from that class!"); Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead."); Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead."); if (emptyCollectionArgumentsIndexes.size() > arguments.size()) { throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. " + "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")"); } // Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it. final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments); Constructor<?> constructor = resolveConstructor(type, coercedArgumentsTypesList); if (constructor != null) { return coercedArgumentsTypesList; } else { // This needs to go through all possible combinations. final int indexesListSize = emptyCollectionArgumentsIndexes.size(); for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) { for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) { switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex)); constructor = resolveConstructor(type, coercedArgumentsTypesList); if (constructor != null) { return coercedArgumentsTypesList; } switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex)); } switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes)); } // No constructor found, return the original arguments. return arguments; } }
@Test public void coerceCorrectConstructorArgumentsCoerceMap() { final List<TypedExpression> arguments = List.of( new ListExprT(new ListCreationLiteralExpression(null, NodeList.nodeList())), new ListExprT(new ListCreationLiteralExpression(null, NodeList.nodeList()))); final List<Class<?>> expectedArgumentClasses = List.of(ListExprT.class, MapExprT.class); final List<TypedExpression> coercedArguments = MethodResolutionUtils.coerceCorrectConstructorArguments( Person.class, arguments, List.of(1)); Assertions.assertThat(getTypedExpressionsClasses(coercedArguments)) .containsExactlyElementsOf(expectedArgumentClasses); }
@SuppressWarnings("unchecked") public <T extends Expression> T rewrite(final T expression, final C context) { return (T) rewriter.process(expression, context); }
@Test public void shouldRewriteStructExpression() { // Given: final CreateStructExpression parsed = parseExpression("STRUCT(FOO := 'foo', BAR := col4[1])"); final Expression fooVal = parsed.getFields().stream().filter(f -> f.getName().equals("FOO")).findFirst().get().getValue(); final Expression barVal = parsed.getFields().stream().filter(f -> f.getName().equals("BAR")).findFirst().get().getValue(); when(processor.apply(fooVal, context)).thenReturn(expr1); when(processor.apply(barVal, context)).thenReturn(expr2); // When: final Expression rewritten = expressionRewriter.rewrite(parsed, context); // Then: assertThat(rewritten, equalTo(new CreateStructExpression(ImmutableList.of(new Field("FOO", expr1), new Field("BAR", expr2))))); }
public static List<Path> pluginUrls(Path topPath) throws IOException { boolean containsClassFiles = false; Set<Path> archives = new TreeSet<>(); LinkedList<DirectoryEntry> dfs = new LinkedList<>(); Set<Path> visited = new HashSet<>(); if (isArchive(topPath)) { return Collections.singletonList(topPath); } DirectoryStream<Path> topListing = Files.newDirectoryStream( topPath, PLUGIN_PATH_FILTER ); dfs.push(new DirectoryEntry(topListing)); visited.add(topPath); try { while (!dfs.isEmpty()) { Iterator<Path> neighbors = dfs.peek().iterator; if (!neighbors.hasNext()) { dfs.pop().stream.close(); continue; } Path adjacent = neighbors.next(); if (Files.isSymbolicLink(adjacent)) { try { Path symlink = Files.readSymbolicLink(adjacent); // if symlink is absolute resolve() returns the absolute symlink itself Path parent = adjacent.getParent(); if (parent == null) { continue; } Path absolute = parent.resolve(symlink).toRealPath(); if (Files.exists(absolute)) { adjacent = absolute; } else { continue; } } catch (IOException e) { // See https://issues.apache.org/jira/browse/KAFKA-6288 for a reported // failure. Such a failure at this stage is not easily reproducible and // therefore an exception is caught and ignored after issuing a // warning. This allows class scanning to continue for non-broken plugins. log.warn( "Resolving symbolic link '{}' failed. Ignoring this path.", adjacent, e ); continue; } } if (!visited.contains(adjacent)) { visited.add(adjacent); if (isArchive(adjacent)) { archives.add(adjacent); } else if (isClassFile(adjacent)) { containsClassFiles = true; } else { DirectoryStream<Path> listing = Files.newDirectoryStream( adjacent, PLUGIN_PATH_FILTER ); dfs.push(new DirectoryEntry(listing)); } } } } finally { while (!dfs.isEmpty()) { dfs.pop().stream.close(); } } if (containsClassFiles) { if (archives.isEmpty()) { return Collections.singletonList(topPath); } log.warn("Plugin path contains both java archives and class files. Returning only the" + " archives"); } return Arrays.asList(archives.toArray(new Path[0])); }
@Test public void testOrderOfPluginUrlsWithJars() throws Exception { createBasicDirectoryLayout(); // Here this method is just used to create the files. The result is not used. createBasicExpectedUrls(); List<Path> actual = PluginUtils.pluginUrls(pluginPath); // 'simple-transform.jar' is created first. In many cases, without sorting within the // PluginUtils, this jar will be placed before 'another-transform.jar'. However this is // not guaranteed because a DirectoryStream does not maintain a certain order in its // results. Besides this test case, sorted order in every call to assertUrls below. int i = Arrays.toString(actual.toArray()).indexOf("another-transform.jar"); int j = Arrays.toString(actual.toArray()).indexOf("simple-transform.jar"); assertTrue(i < j); }
public static String truncateContent(String content) { if (content == null) { return ""; } else if (content.length() <= SHOW_CONTENT_SIZE) { return content; } else { return content.substring(0, SHOW_CONTENT_SIZE) + "..."; } }
@Test void testTruncateLongContent() { char[] arr = new char[101]; Arrays.fill(arr, 'a'); String content = new String(arr); String actual = ContentUtils.truncateContent(content); assertEquals(content.substring(0, 100) + "...", actual); }
boolean rescaleNeeded() { return (initialScores_low > finalScores_low && initialScores_high >= initialScores_low && finalScores_high >= finalScores_low); }
@Test void increasingScoresNoNeed() { var adjuster = new RangeAdjuster(); rerank(adjuster, 1.0, 2.0); rerank(adjuster, 3.0, 4.0); rerank(adjuster, 2.0, 3.0); assertFalse(adjuster.rescaleNeeded()); }
@Override public void callback(CallbackContext context) { try (OAuth20Service scribe = scribeFactory.newScribe(gitLabSettings, context.getCallbackUrl(), scribeApi)) { onCallback(context, scribe); } catch (IOException | ExecutionException e) { throw new IllegalStateException(e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IllegalStateException(e); } }
@Test public void onCallback_withGroupSyncAndAllowedGroupsNotMatching_shouldThrow() { when(gitLabSettings.syncUserGroups()).thenReturn(true); when(gitLabSettings.allowedGroups()).thenReturn(Set.of("path2")); mockGsonUser(); mockGitlabGroups(); assertThatExceptionOfType(UnauthorizedException.class) .isThrownBy(() -> gitLabIdentityProvider.callback(callbackContext)) .withMessage("You are not allowed to authenticate"); }
@Override public Collection<RedisServer> masters() { List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS); return toRedisServersList(masters); }
@Test public void testMasters() { Collection<RedisServer> masters = connection.masters(); assertThat(masters).hasSize(1); }
public static long write(InputStream is, OutputStream os) throws IOException { return write(is, os, BUFFER_SIZE); }
@Test void testWrite1() throws Exception { assertThat((int) IOUtils.write(is, os, 16), equalTo(TEXT.length())); }
public static Collection<MdbValidityStatus> assertEjbClassValidity(final ClassInfo mdbClass) { Collection<MdbValidityStatus> mdbComplianceIssueList = new ArrayList<>(MdbValidityStatus.values().length); final String className = mdbClass.name().toString(); verifyModifiers(className, mdbClass.flags(), mdbComplianceIssueList); for (MethodInfo method : mdbClass.methods()) { if ("onMessage".equals(method.name())) { verifyOnMessageMethod(className, method.flags(), mdbComplianceIssueList); } if ("finalize".equals(method.name())) { EjbLogger.DEPLOYMENT_LOGGER.mdbCantHaveFinalizeMethod(className); mdbComplianceIssueList.add(MdbValidityStatus.MDB_SHOULD_NOT_HAVE_FINALIZE_METHOD); } } return mdbComplianceIssueList; }
@Test public void mdbWithFinalizeMethod() { assertTrue(assertEjbClassValidity(buildClassInfoForClass(InvalidMdbWithFinalizeMethod.class.getName())).contains( MdbValidityStatus.MDB_SHOULD_NOT_HAVE_FINALIZE_METHOD)); }
public synchronized void xaCommit(String xid, long branchId, String applicationData) throws XAException { XAXid xaXid = XAXidBuilder.build(xid, branchId); xaResource.commit(xaXid, false); releaseIfNecessary(); }
@Test public void testXACommit() throws Throwable { Connection connection = Mockito.mock(Connection.class); Mockito.when(connection.getAutoCommit()).thenReturn(true); XAResource xaResource = Mockito.mock(XAResource.class); XAConnection xaConnection = Mockito.mock(XAConnection.class); Mockito.when(xaConnection.getXAResource()).thenReturn(xaResource); BaseDataSourceResource<ConnectionProxyXA> baseDataSourceResource = Mockito.mock(BaseDataSourceResource.class); String xid = "xxx"; ConnectionProxyXA connectionProxyXA = new ConnectionProxyXA(connection, xaConnection, baseDataSourceResource, xid); connectionProxyXA.init(); connectionProxyXA.xaCommit("xxx", 123L, null); Mockito.verify(xaResource).commit(any(Xid.class), any(Boolean.class)); Mockito.verify(xaResource, times(0)).rollback(any(Xid.class)); }
@Nullable private static JobID getJobId(CommandLine commandLine) throws FlinkParseException { String jobId = commandLine.getOptionValue(JOB_ID_OPTION.getOpt()); if (jobId == null) { return null; } try { return JobID.fromHexString(jobId); } catch (IllegalArgumentException e) { throw createFlinkParseException(JOB_ID_OPTION, e); } }
@Test void testSetJobIdManually() throws FlinkParseException { final JobID jobId = new JobID(); final String[] args = { "--configDir", confDirPath, "--job-classname", "foobar", "--job-id", jobId.toString() }; final StandaloneApplicationClusterConfiguration standaloneApplicationClusterConfiguration = commandLineParser.parse(args); assertThat(standaloneApplicationClusterConfiguration.getJobId()).isEqualTo(jobId); }
public String encode(long... numbers) { if (numbers.length == 0) { return ""; } for (final long number : numbers) { if (number < 0) { return ""; } if (number > MAX_NUMBER) { throw new IllegalArgumentException("number can not be greater than " + MAX_NUMBER + "L"); } } return this._encode(numbers); }
@Test public void test_randomness_for_incrementing() { Hashids a; a = new Hashids("this is my salt"); Assert.assertEquals(a.encode(1L), "NV"); Assert.assertEquals(a.encode(2L), "6m"); Assert.assertEquals(a.encode(3L), "yD"); Assert.assertEquals(a.encode(4L), "2l"); Assert.assertEquals(a.encode(5L), "rD"); }
@Override @SuppressWarnings("ProtoFieldNullComparison") public List<IncomingMessage> pull( long requestTimeMsSinceEpoch, SubscriptionPath subscription, int batchSize, boolean returnImmediately) throws IOException { PullRequest request = new PullRequest().setReturnImmediately(returnImmediately).setMaxMessages(batchSize); PullResponse response = pubsub.projects().subscriptions().pull(subscription.getPath(), request).execute(); if (response.getReceivedMessages() == null || response.getReceivedMessages().isEmpty()) { return ImmutableList.of(); } List<IncomingMessage> incomingMessages = new ArrayList<>(response.getReceivedMessages().size()); for (ReceivedMessage message : response.getReceivedMessages()) { PubsubMessage pubsubMessage = message.getMessage(); Map<String, String> attributes; if (pubsubMessage.getAttributes() != null) { attributes = pubsubMessage.getAttributes(); } else { attributes = new HashMap<>(); } // Payload. byte[] elementBytes = pubsubMessage.getData() == null ? null : pubsubMessage.decodeData(); if (elementBytes == null) { elementBytes = new byte[0]; } // Timestamp. long timestampMsSinceEpoch; if (Strings.isNullOrEmpty(timestampAttribute)) { timestampMsSinceEpoch = parseTimestampAsMsSinceEpoch(message.getMessage().getPublishTime()); } else { timestampMsSinceEpoch = extractTimestampAttribute(timestampAttribute, attributes); } // Ack id. String ackId = message.getAckId(); checkState(!Strings.isNullOrEmpty(ackId)); // Record id, if any. @Nullable String recordId = null; if (idAttribute != null) { recordId = attributes.get(idAttribute); } if (Strings.isNullOrEmpty(recordId)) { // Fall back to the Pubsub provided message id. recordId = pubsubMessage.getMessageId(); } com.google.pubsub.v1.PubsubMessage.Builder protoMessage = com.google.pubsub.v1.PubsubMessage.newBuilder(); protoMessage.setData(ByteString.copyFrom(elementBytes)); protoMessage.putAllAttributes(attributes); // PubsubMessage uses `null` to represent no ordering key where we want a default of "". if (pubsubMessage.getOrderingKey() != null) { protoMessage.setOrderingKey(pubsubMessage.getOrderingKey()); } else { protoMessage.setOrderingKey(""); } incomingMessages.add( IncomingMessage.of( protoMessage.build(), timestampMsSinceEpoch, requestTimeMsSinceEpoch, ackId, recordId)); } return incomingMessages; }
@Test public void pullOneMessage() throws IOException { String expectedSubscription = SUBSCRIPTION.getPath(); PullRequest expectedRequest = new PullRequest().setReturnImmediately(true).setMaxMessages(10); PubsubMessage expectedPubsubMessage = new PubsubMessage() .setMessageId(MESSAGE_ID) .encodeData(DATA.getBytes(StandardCharsets.UTF_8)) .setPublishTime(String.valueOf(PUB_TIME)) .setAttributes( ImmutableMap.of( TIMESTAMP_ATTRIBUTE, String.valueOf(MESSAGE_TIME), ID_ATTRIBUTE, RECORD_ID)) .setOrderingKey(ORDERING_KEY); ReceivedMessage expectedReceivedMessage = new ReceivedMessage().setMessage(expectedPubsubMessage).setAckId(ACK_ID); PullResponse expectedResponse = new PullResponse().setReceivedMessages(ImmutableList.of(expectedReceivedMessage)); when((Object) mockPubsub .projects() .subscriptions() .pull(expectedSubscription, expectedRequest) .execute()) .thenReturn(expectedResponse); List<IncomingMessage> acutalMessages = client.pull(REQ_TIME, SUBSCRIPTION, 10, true); assertEquals(1, acutalMessages.size()); IncomingMessage actualMessage = acutalMessages.get(0); assertEquals(ACK_ID, actualMessage.ackId()); assertEquals(DATA, actualMessage.message().getData().toStringUtf8()); assertEquals(RECORD_ID, actualMessage.recordId()); assertEquals(REQ_TIME, actualMessage.requestTimeMsSinceEpoch()); assertEquals(MESSAGE_TIME, actualMessage.timestampMsSinceEpoch()); assertEquals(ORDERING_KEY, actualMessage.message().getOrderingKey()); }
@ScalarOperator(ADD) @SqlType(StandardTypes.DOUBLE) public static double add(@SqlType(StandardTypes.DOUBLE) double left, @SqlType(StandardTypes.DOUBLE) double right) { return left + right; }
@Test public void testAdd() { assertFunction("37.7E0 + 37.7E0", DOUBLE, 37.7 + 37.7); assertFunction("37.7E0 + 17.1E0", DOUBLE, 37.7 + 17.1); assertFunction("17.1E0 + 37.7E0", DOUBLE, 17.1 + 37.7); assertFunction("17.1E0 + 17.1E0", DOUBLE, 17.1 + 17.1); }
@Override public boolean isMarshallable(Object o) { return o instanceof Serializable; }
@Test public void testDate() throws Exception { JavaSerializationMarshaller marshaller = new JavaSerializationMarshaller(); isMarshallable(marshaller, Instant.now()); }
public static String toHexStringNoPrefixZeroPadded(BigInteger value, int size) { return toHexStringZeroPadded(value, size, false); }
@Test public void testToHexStringZeroPaddedTooLargs() { assertThrows( UnsupportedOperationException.class, () -> Numeric.toHexStringNoPrefixZeroPadded(BigInteger.valueOf(-1), 5)); }
public static ErrorResponse fromJson(int code, String json) { return JsonUtil.parse(json, node -> OAuthErrorResponseParser.fromJson(code, node)); }
@Test public void testOAuthErrorResponseFromJsonMissingError() { String description = "Credentials given were invalid"; String uri = "http://iceberg.apache.org"; String json = String.format("{\"error_description\":\"%s\",\"error_uri\":\"%s\"}", description, uri); assertThatThrownBy(() -> OAuthErrorResponseParser.fromJson(400, json)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing string: error"); }
@Override public Class<? extends DiscoveryStrategy> getDiscoveryStrategyType() { return HazelcastKubernetesDiscoveryStrategy.class; }
@Test public void checkDiscoveryStrategyType() { HazelcastKubernetesDiscoveryStrategyFactory factory = new HazelcastKubernetesDiscoveryStrategyFactory(); Class<? extends DiscoveryStrategy> strategyType = factory.getDiscoveryStrategyType(); assertEquals(HazelcastKubernetesDiscoveryStrategy.class.getCanonicalName(), strategyType.getCanonicalName()); }
public static ResourceModel processResource(final Class<?> resourceClass) { return processResource(resourceClass, null); }
@Test(expectedExceptions = ResourceConfigException.class) public void failsOnInvalidFinderMethodNonEntityReturnType() { @RestLiCollection(name = "finderWithInvalidNonEntityReturnType") class LocalClass extends CollectionResourceTemplate<Long, EmptyRecord> { @Finder("finderWithInvalidNonEntityReturnType") public List<Long> finderWithInvalidNonEntityReturnType(@QueryParam("arg") long arg) { return Collections.emptyList(); } } RestLiAnnotationReader.processResource(LocalClass.class); Assert.fail("#validateFinderMethod should fail throwing a ResourceConfigException"); }
public CaseInsensitiveStringComparison ignoringCase() { return new CaseInsensitiveStringComparison(); }
@Test public void stringInequalityIgnoringCaseWithNullExpectedString() { assertThat("abc").ignoringCase().isNotEqualTo(null); }
@Override public void checkSubjectAccess( final KsqlSecurityContext securityContext, final String subjectName, final AclOperation operation ) { authorizationProvider.checkPrivileges( securityContext, AuthObjectType.SUBJECT, subjectName, ImmutableList.of(operation) ); }
@Test public void shouldCheckSubjectTopicPrivilegesOnProvidedAccessValidator() { // When accessValidator.checkSubjectAccess(securityContext, "subject1", AclOperation.READ); // Then verify(authorizationProvider, times(1)) .checkPrivileges( securityContext, AuthObjectType.SUBJECT, "subject1", ImmutableList.of(AclOperation.READ)); }
ComponentDto createBranchComponent(DbSession dbSession, ComponentKey componentKey, ComponentDto mainComponentDto, BranchDto mainComponentBranchDto) { checkState(delegate != null, "Current edition does not support branch feature"); return delegate.createBranchComponent(dbSession, componentKey, mainComponentDto, mainComponentBranchDto); }
@Test public void createBranchComponent_fails_with_ISE_if_delegate_is_null() { DbSession dbSession = mock(DbSession.class); ComponentKey componentKey = mock(ComponentKey.class); ComponentDto mainComponentDto = new ComponentDto(); BranchDto mainComponentBranchDto = new BranchDto(); assertThatThrownBy(() -> underTestNoBranch.createBranchComponent(dbSession, componentKey, mainComponentDto, mainComponentBranchDto)) .isInstanceOf(IllegalStateException.class) .hasMessage("Current edition does not support branch feature"); }
@Override public void process(final Exchange exchange) throws Exception { component.getRoutingProcessor(configuration.getChannel()) .process(exchange); }
@Test void testProcessSynchronous() throws Exception { when(component.getRoutingProcessor("testChannel")).thenReturn(processor); doNothing().when(processor).process(exchange); when(configuration.getChannel()).thenReturn("testChannel"); assertDoesNotThrow(() -> producer.process(exchange)); }
public Map<String, String> penRequestAllowed(PenRequest request) throws PenRequestException, SharedServiceClientException { final List<PenRequestStatus> result = repository.findByBsnAndDocTypeAndSequenceNo(request.getBsn(), request.getDocType(), request.getSequenceNo()); checkIfTooSoonOrTooOften(result); return statusOK; }
@Test public void penRequest23HoursAfterLastRequestThrowsDWS1Exception() throws PenRequestException, SharedServiceClientException{ // create a previous penRequest with a RequestDateTime status.setRequestDatetime(TEST_TIME.minusHours(23)); mockStatusList.add(status); // return arraylist with one dummy penrequest Mockito.when(mockRepository.findByBsnAndDocTypeAndSequenceNo(request.getBsn(), request.getDocType(), request.getSequenceNo())).thenReturn(mockStatusList); Exception exception = assertThrows(PenRequestException.class, () -> { service.penRequestAllowed(request); }); assertEquals("DWS1", exception.getMessage()); }
@Override public void cancel() { isRunning = false; // we need to close the socket as well, because the Thread.interrupt() function will // not wake the thread in the socketStream.read() method when blocked. Socket theSocket = this.currentSocket; if (theSocket != null) { IOUtils.closeSocket(theSocket); } }
@Test void testSocketSourceOutputAcrossRetries() throws Exception { ServerSocket server = new ServerSocket(0); Socket channel = null; try { SocketTextStreamFunction source = new SocketTextStreamFunction(LOCALHOST, server.getLocalPort(), "\n", 10, 100); SocketSourceThread runner = new SocketSourceThread(source, "test1", "check1", "check2"); runner.start(); // first connection: nothing channel = NetUtils.acceptWithoutTimeout(server); channel.close(); // second connection: first string channel = NetUtils.acceptWithoutTimeout(server); OutputStreamWriter writer = new OutputStreamWriter(channel.getOutputStream()); writer.write("te"); writer.close(); channel.close(); // third connection: nothing channel = NetUtils.acceptWithoutTimeout(server); channel.close(); // forth connection: second string channel = NetUtils.acceptWithoutTimeout(server); writer = new OutputStreamWriter(channel.getOutputStream()); writer.write("st1\n"); writer.write("check1\n"); writer.write("check2\n"); writer.flush(); runner.waitForNumElements(2); runner.cancel(); runner.waitUntilDone(); } finally { if (channel != null) { IOUtils.closeQuietly(channel); } IOUtils.closeQuietly(server); } }
public Set<Cookie> decode(String header) { Set<Cookie> cookies = new TreeSet<Cookie>(); decode(cookies, header); return cookies; }
@Test public void testDecodingMultipleCookies() { String c1 = "myCookie=myValue;"; String c2 = "myCookie2=myValue2;"; String c3 = "myCookie3=myValue3;"; Set<Cookie> cookies = ServerCookieDecoder.STRICT.decode(c1 + c2 + c3); assertEquals(3, cookies.size()); Iterator<Cookie> it = cookies.iterator(); Cookie cookie = it.next(); assertNotNull(cookie); assertEquals("myValue", cookie.value()); cookie = it.next(); assertNotNull(cookie); assertEquals("myValue2", cookie.value()); cookie = it.next(); assertNotNull(cookie); assertEquals("myValue3", cookie.value()); }
@Override public void validateUserList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得岗位信息 List<AdminUserDO> users = userMapper.selectBatchIds(ids); Map<Long, AdminUserDO> userMap = CollectionUtils.convertMap(users, AdminUserDO::getId); // 校验 ids.forEach(id -> { AdminUserDO user = userMap.get(id); if (user == null) { throw exception(USER_NOT_EXISTS); } if (!CommonStatusEnum.ENABLE.getStatus().equals(user.getStatus())) { throw exception(USER_IS_DISABLE, user.getNickname()); } }); }
@Test public void testValidateUserList_notEnable() { // mock 数据 AdminUserDO userDO = randomAdminUserDO().setStatus(CommonStatusEnum.DISABLE.getStatus()); userMapper.insert(userDO); // 准备参数 List<Long> ids = singletonList(userDO.getId()); // 调用, 并断言异常 assertServiceException(() -> userService.validateUserList(ids), USER_IS_DISABLE, userDO.getNickname()); }
public Optional<Search> getForUser(String id, SearchUser searchUser) { final Optional<Search> search = dbService.get(id); search.ifPresent(s -> checkPermission(searchUser, s)); return search; }
@Test public void loadsSearchIfUserIsOwner() { final String userName = "boeser-willi"; final Search search = mockSearchWithOwner(userName); final SearchUser searchUser = mock(SearchUser.class); when(searchUser.owns(search)).thenReturn(true); final Optional<Search> result = sut.getForUser(search.id(), searchUser); assertThat(result).isEqualTo(Optional.of(search)); }
@Override public ResourceCleaner createGlobalResourceCleaner( ComponentMainThreadExecutor mainThreadExecutor) { return DefaultResourceCleaner.forGloballyCleanableResources( mainThreadExecutor, cleanupExecutor, retryStrategy) .withPrioritizedCleanup( JOB_MANAGER_RUNNER_REGISTRY_LABEL, ofLocalResource(jobManagerRunnerRegistry)) .withRegularCleanup(JOB_GRAPH_STORE_LABEL, jobGraphWriter) .withRegularCleanup(BLOB_SERVER_LABEL, blobServer) .withRegularCleanup(HA_SERVICES_LABEL, highAvailabilityServices) .withRegularCleanup( JOB_MANAGER_METRIC_GROUP_LABEL, ofLocalResource(jobManagerMetricGroup)) .build(); }
@Test public void testGlobalResourceCleaning() throws ExecutionException, InterruptedException, TimeoutException { assertCleanupNotTriggered(); final CompletableFuture<Void> cleanupResultFuture = testInstance .createGlobalResourceCleaner( ComponentMainThreadExecutorServiceAdapter.forMainThread()) .cleanupAsync(JOB_ID); assertWaitingForPrioritizedCleanupToFinish(); assertThat(cleanupResultFuture).isNotCompleted(); // makes the prioritized JobManagerRunner cleanup result terminate so that other cleanups // are triggered jobManagerRunnerRegistryLocalCleanupResultFuture.complete(null); assertThat(jobManagerRunnerRegistryLocalCleanupFuture).isCompleted(); assertThat(jobGraphWriterLocalCleanupFuture).isNotDone(); assertThat(jobGraphWriterGlobalCleanupFuture).isCompleted(); assertThat(blobServer.getLocalCleanupFuture()).isNotDone(); assertThat(blobServer.getGlobalCleanupFuture()).isCompleted(); assertThat(highAvailabilityServicesGlobalCleanupFuture).isCompleted(); assertJobMetricGroupCleanedUp(); assertThat(cleanupResultFuture).isCompleted(); }
@Nullable @Override public JobGraph recoverJobGraph(JobID jobId) throws Exception { checkNotNull(jobId, "Job ID"); LOG.debug("Recovering job graph {} from {}.", jobId, jobGraphStateHandleStore); final String name = jobGraphStoreUtil.jobIDToName(jobId); synchronized (lock) { verifyIsRunning(); boolean success = false; RetrievableStateHandle<JobGraph> jobGraphRetrievableStateHandle; try { try { jobGraphRetrievableStateHandle = jobGraphStateHandleStore.getAndLock(name); } catch (StateHandleStore.NotExistException ignored) { success = true; return null; } catch (Exception e) { throw new FlinkException( "Could not retrieve the submitted job graph state handle " + "for " + name + " from the submitted job graph store.", e); } JobGraph jobGraph; try { jobGraph = jobGraphRetrievableStateHandle.retrieveState(); } catch (ClassNotFoundException cnfe) { throw new FlinkException( "Could not retrieve submitted JobGraph from state handle under " + name + ". This indicates that you are trying to recover from state written by an " + "older Flink version which is not compatible. Try cleaning the state handle store.", cnfe); } catch (IOException ioe) { throw new FlinkException( "Could not retrieve submitted JobGraph from state handle under " + name + ". This indicates that the retrieved state handle is broken. Try cleaning the state handle " + "store.", ioe); } addedJobGraphs.add(jobGraph.getJobID()); LOG.info("Recovered {}.", jobGraph); success = true; return jobGraph; } finally { if (!success) { jobGraphStateHandleStore.release(name); } } } }
@Test public void testRecoverJobGraphFailedShouldReleaseHandle() throws Exception { final CompletableFuture<String> releaseFuture = new CompletableFuture<>(); final FlinkException testException = new FlinkException("Test exception."); final TestingStateHandleStore<JobGraph> stateHandleStore = builder.setGetFunction( ignore -> { throw testException; }) .setReleaseConsumer(releaseFuture::complete) .build(); final JobGraphStore jobGraphStore = createAndStartJobGraphStore(stateHandleStore); try { jobGraphStore.recoverJobGraph(testingJobGraph.getJobID()); fail( "recoverJobGraph should fail when there is exception in getting the state handle."); } catch (Exception ex) { assertThat(ex, FlinkMatchers.containsCause(testException)); String actual = releaseFuture.get(timeout, TimeUnit.MILLISECONDS); assertThat(actual, is(testingJobGraph.getJobID().toString())); } }
@Override public List<AdminUserDO> getUserListByNickname(String nickname) { return userMapper.selectListByNickname(nickname); }
@Test public void testGetUserListByNickname() { // mock 数据 AdminUserDO user = randomAdminUserDO(o -> o.setNickname("芋头")); userMapper.insert(user); // 测试 nickname 不匹配 userMapper.insert(randomAdminUserDO(o -> o.setNickname("源码"))); // 准备参数 String nickname = "芋"; // 调用 List<AdminUserDO> result = userService.getUserListByNickname(nickname); // 断言 assertEquals(1, result.size()); assertEquals(user, result.get(0)); }
@Override public BackgroundException map(final SMBRuntimeException failure) { if(failure instanceof SMBApiException) { final StringBuilder buffer = new StringBuilder(); final SMBApiException e = (SMBApiException) failure; // NTSTATUS final NtStatus status = e.getStatus(); this.append(buffer, String.format("%s (0x%08x)", LocaleFactory.localizedString(status.name(), "SMB"), e.getStatusCode())); switch(status) { case STATUS_BAD_NETWORK_NAME: case STATUS_NOT_FOUND: case STATUS_OBJECT_NAME_NOT_FOUND: case STATUS_OBJECT_PATH_NOT_FOUND: case STATUS_PATH_NOT_COVERED: return new NotfoundException(buffer.toString(), failure.getCause()); case STATUS_NOT_IMPLEMENTED: case STATUS_NOT_SUPPORTED: return new UnsupportedException(buffer.toString(), failure.getCause()); case STATUS_ACCESS_DENIED: return new AccessDeniedException(buffer.toString(), failure.getCause()); case STATUS_OBJECT_NAME_COLLISION: return new ConflictException(buffer.toString(), failure.getCause()); case STATUS_FILE_LOCK_CONFLICT: case STATUS_LOCK_NOT_GRANTED: case STATUS_SHARING_VIOLATION: return new LockedException(buffer.toString(), failure.getCause()); case STATUS_LOGON_FAILURE: case STATUS_PASSWORD_EXPIRED: case STATUS_ACCOUNT_DISABLED: case STATUS_LOGON_TYPE_NOT_GRANTED: return new LoginFailureException(buffer.toString(), failure.getCause()); case STATUS_DISK_FULL: return new QuotaException(buffer.toString(), failure.getCause()); case STATUS_IO_TIMEOUT: return new ConnectionTimeoutException(buffer.toString(), failure.getCause()); case STATUS_CONNECTION_DISCONNECTED: case STATUS_CONNECTION_RESET: return new ConnectionRefusedException(buffer.toString(), failure.getCause()); default: return new InteroperabilityException(buffer.toString(), failure.getCause()); } } for(Throwable cause : ExceptionUtils.getThrowableList(failure)) { if(cause instanceof TransportException) { return new SMBTransportExceptionMappingService().map((TransportException) cause); } if(cause instanceof TimeoutException) { return new ConnectionTimeoutException(cause.getMessage(), cause); } } final Throwable root = ExceptionUtils.getRootCause(failure); return new InteroperabilityException(root.getMessage(), failure); }
@Test public void map() { assertEquals("Interoperability failure", new SMBExceptionMappingService().map(new SMBRuntimeException("")).getMessage()); assertEquals("Please contact your web hosting service provider for assistance.", new SMBExceptionMappingService().map(new SMBRuntimeException("")).getDetail()); assertEquals("STATUS_OBJECT_NAME_NOT_FOUND (0xc0000034). Please contact your web hosting service provider for assistance.", new SMBExceptionMappingService().map(new SMBApiException(3221225524L, SMB2MessageCommandCode.SMB2_CREATE, "Create failed for \\\\localhost\\user\\Dk9I5nTZ", null)).getDetail()); }
public Rule<FilterNode> filterNodeRule() { return new PullUpExpressionInLambdaFilterNodeRule(); }
@Test public void testNoValidFilter() { tester().assertThat(new PullUpExpressionInLambdaRules(getFunctionManager()).filterNodeRule()) .setSystemProperty(PULL_EXPRESSION_FROM_LAMBDA_ENABLED, "true") .on(p -> { p.variable("idmap", new MapType(BIGINT, BIGINT, KEY_BLOCK_EQUALS, KEY_BLOCK_HASH_CODE)); return p.filter( p.rowExpression("cardinality(map_filter(idmap, (k, v) -> array_position(array_sort(array[v, k]), k) <= 200)) > 0"), p.values(p.variable("idmap", new MapType(BIGINT, BIGINT, KEY_BLOCK_EQUALS, KEY_BLOCK_HASH_CODE)))); }).doesNotFire(); }
List<Set<UiNode>> splitByLayer(List<String> layerTags, Set<? extends UiNode> nodes) { final int nLayers = layerTags.size(); if (!layerTags.get(nLayers - 1).equals(LAYER_DEFAULT)) { throw new IllegalArgumentException(E_DEF_NOT_LAST); } List<Set<UiNode>> splitList = new ArrayList<>(layerTags.size()); Map<String, Set<UiNode>> byLayer = new HashMap<>(layerTags.size()); for (String tag : layerTags) { Set<UiNode> set = new HashSet<>(); byLayer.put(tag, set); splitList.add(set); } for (UiNode n : nodes) { String which = n.layer(); if (!layerTags.contains(which)) { which = LAYER_DEFAULT; } byLayer.get(which).add(n); } return splitList; }
@Test public void oneLayer() { title("oneLayer()"); List<Set<UiNode>> result = t2.splitByLayer(DEF_TAG_ONLY, NODES); print(result); assertEquals("wrong split size", 1, result.size()); Set<UiNode> def = result.get(0); assertEquals("def bad size", 6, def.size()); assertEquals("missing node D", true, def.contains(NODE_D)); assertEquals("missing node F", true, def.contains(NODE_F)); assertEquals("missing node A", true, def.contains(NODE_A)); assertEquals("missing node C", true, def.contains(NODE_C)); assertEquals("missing node B", true, def.contains(NODE_B)); assertEquals("missing node E", true, def.contains(NODE_E)); }
public void writeInfinityDecreasing() { writeTrailingBytes(INFINITY_ENCODED_DECREASING); }
@Test public void testWriteInfinityDecreasing() { OrderedCode orderedCode = new OrderedCode(); try { orderedCode.readInfinityDecreasing(); fail("Expected IllegalArgumentException."); } catch (IllegalArgumentException e) { // expected } orderedCode.writeInfinityDecreasing(); assertTrue(orderedCode.readInfinityDecreasing()); try { orderedCode.readInfinityDecreasing(); fail("Expected IllegalArgumentException."); } catch (IllegalArgumentException e) { // expected } }
@Override public Collection<String> allClientId() { // client id is unique in the application // use set to replace array list // it will improve the performance Collection<String> clientIds = new HashSet<>(clients.size()); clientIds.addAll(clients.keySet()); return clientIds; }
@Test void testAllClientId() { Collection<String> allClientIds = persistentIpPortClientManager.allClientId(); assertEquals(1, allClientIds.size()); assertTrue(allClientIds.contains(clientId)); }
public static MemorySegment wrapCopy(byte[] bytes, int start, int end) throws IllegalArgumentException { checkArgument(end >= start); checkArgument(end <= bytes.length); MemorySegment copy = allocateUnpooledSegment(end - start); copy.put(0, bytes, start, copy.size()); return copy; }
@Test void testWrapCopyWrongStart() { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> MemorySegmentFactory.wrapCopy(new byte[] {1, 2, 3}, 10, 3)); }
public NativeReader.Progress getProgress() { return progress.get(); }
@Test public void testGetProgress() throws Exception { MockReaderIterator iterator = new MockReaderIterator(0, 5); MockOutputReceiver receiver = new MockOutputReceiver(); ManualScheduler scheduler = new ManualScheduler(); final ReadOperation readOperation = ReadOperation.forTest(new MockReader(iterator), receiver, scheduler.getExecutor(), context); Thread thread = runReadLoopInThread(readOperation); for (int i = 0; i < 5; ++i) { // Reader currently blocked in start()/advance(). // Ensure that getProgress() doesn't block while the reader advances. ApproximateReportedProgress progress = readerProgressToCloudProgress(readOperation.getProgress()); Long observedIndex = (progress == null) ? null : progress.getPosition().getRecordIndex().longValue(); assertTrue( "Actual: " + observedIndex + " instead of " + i, (i == 0 && progress == null) || i == observedIndex || i == observedIndex + 1); iterator.offerNext(i); // Now the reader is not blocked (instead the receiver is blocked): progress can be // updated. Wait for it to be updated. scheduler.runOnce(); receiver.unblockProcess(); } thread.join(); }
public FEELFnResult<Boolean> invoke(@ParameterName( "range" ) Range range, @ParameterName( "point" ) Comparable point) { if ( point == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null")); } if ( range == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null")); } try { boolean result = ( range.getHighBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getHighEndPoint() ) == 0 ); return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range")); } }
@Test void invokeParamsCantBeCompared() { FunctionTestUtil.assertResultError( finishedByFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, 1, 2, Range.RangeBoundary.CLOSED ) ), InvalidParametersEvent.class ); }
public static org.apache.hadoop.mapred.JobID fromYarn(JobId id) { String identifier = fromClusterTimeStamp(id.getAppId().getClusterTimestamp()); return new org.apache.hadoop.mapred.JobID(identifier, id.getId()); }
@Test public void testFromYarnApplicationReport() { ApplicationId mockAppId = mock(ApplicationId.class); when(mockAppId.getClusterTimestamp()).thenReturn(12345L); when(mockAppId.getId()).thenReturn(6789); ApplicationReport mockReport = mock(ApplicationReport.class); when(mockReport.getTrackingUrl()).thenReturn("dummy-tracking-url"); when(mockReport.getApplicationId()).thenReturn(mockAppId); when(mockReport.getYarnApplicationState()).thenReturn(YarnApplicationState.KILLED); when(mockReport.getUser()).thenReturn("dummy-user"); when(mockReport.getQueue()).thenReturn("dummy-queue"); when(mockReport.getPriority()).thenReturn(Priority.newInstance(4)); String jobFile = "dummy-path/job.xml"; try { JobStatus status = TypeConverter.fromYarn(mockReport, jobFile); } catch (NullPointerException npe) { fail("Type converstion from YARN fails for jobs without " + "ApplicationUsageReport"); } ApplicationResourceUsageReport appUsageRpt = Records .newRecord(ApplicationResourceUsageReport.class); Resource r = Records.newRecord(Resource.class); r.setMemorySize(2048); appUsageRpt.setNeededResources(r); appUsageRpt.setNumReservedContainers(1); appUsageRpt.setNumUsedContainers(3); appUsageRpt.setReservedResources(r); appUsageRpt.setUsedResources(r); when(mockReport.getApplicationResourceUsageReport()).thenReturn(appUsageRpt); JobStatus status = TypeConverter.fromYarn(mockReport, jobFile); assertNotNull(status, "fromYarn returned null status"); assertEquals("dummy-path/job.xml", status.getJobFile(), "jobFile set incorrectly"); assertEquals("dummy-queue", status.getQueue(), "queue set incorrectly"); assertEquals("dummy-tracking-url", status.getTrackingUrl(), "trackingUrl set incorrectly"); assertEquals("dummy-user", status.getUsername(), "user set incorrectly"); assertEquals("dummy-tracking-url", status.getSchedulingInfo(), "schedulingInfo set incorrectly"); assertEquals(6789, status.getJobID().getId(), "jobId set incorrectly"); assertEquals(JobStatus.State.KILLED, status.getState(), "state set incorrectly"); assertEquals(2048, status.getNeededMem(), "needed mem info set incorrectly"); assertEquals(1, status.getNumReservedSlots(), "num rsvd slots info set incorrectly"); assertEquals(3, status.getNumUsedSlots(), "num used slots info set incorrectly"); assertEquals(2048, status.getReservedMem(), "rsvd mem info set incorrectly"); assertEquals(2048, status.getUsedMem(), "used mem info set incorrectly"); assertEquals(JobPriority.HIGH, status.getPriority(), "priority set incorrectly"); }
KubernetesApiProvider buildKubernetesApiUrlProvider() { try { String endpointSlicesUrlString = String.format("%s/apis/discovery.k8s.io/v1/namespaces/%s/endpointslices", kubernetesMaster, namespace); callGet(endpointSlicesUrlString); LOGGER.finest("Using EndpointSlices API to discover endpoints."); } catch (Exception e) { LOGGER.finest("EndpointSlices are not available, using Endpoints API to discover endpoints."); return new KubernetesApiEndpointProvider(); } return new KubernetesApiEndpointSlicesProvider(); }
@Test public void buildKubernetesApiUrlProviderReturnsEndpointSlicesProvider() throws JsonProcessingException { stub(String.format("/apis/discovery.k8s.io/v1/namespaces/%s/endpointslices", NAMESPACE), endpointSliceList(Collections.singletonList(443), "34.68.96.71")); assertThat(kubernetesClient.buildKubernetesApiUrlProvider()).isInstanceOf(KubernetesApiEndpointSlicesProvider.class); }
public String nextNonCliCommand() { String line; do { line = terminal.readLine(); } while (maybeHandleCliSpecificCommands(line)); return line; }
@Test public void shouldFailIfCommandNameIsQuoted() { // Given: when(lineSupplier.get()) .thenReturn("'some' 'command' " + "Arg0" + WHITE_SPACE + "'Arg 1'") .thenReturn("not a CLI command;"); // When: console.nextNonCliCommand(); // Then: verify(cliCommand, never()).execute(any(), any()); }
public TaskInfo abort() { taskStateMachine.abort(); return getTaskInfo(); }
@Test public void testAbort() throws Exception { SqlTask sqlTask = createInitialTask(); TaskInfo taskInfo = sqlTask.updateTask(TEST_SESSION, Optional.of(PLAN_FRAGMENT), ImmutableList.of(new TaskSource(TABLE_SCAN_NODE_ID, ImmutableSet.of(SPLIT), true)), createInitialEmptyOutputBuffers(PARTITIONED).withBuffer(OUT, 0).withNoMoreBufferIds(), Optional.of(new TableWriteInfo(Optional.empty(), Optional.empty(), Optional.empty()))); assertEquals(taskInfo.getTaskStatus().getState(), TaskState.RUNNING); taskInfo = sqlTask.getTaskInfo(); assertEquals(taskInfo.getTaskStatus().getState(), TaskState.RUNNING); sqlTask.abortTaskResults(OUT); taskInfo = sqlTask.getTaskInfo(taskInfo.getTaskStatus().getState()).get(1, SECONDS); assertEquals(taskInfo.getTaskStatus().getState(), TaskState.FINISHED); taskInfo = sqlTask.getTaskInfo(); assertEquals(taskInfo.getTaskStatus().getState(), TaskState.FINISHED); }
@Override protected String selectorHandler(final MetaDataRegisterDTO metaDataDTO) { return GsonUtils.getInstance().toJson(SpringCloudSelectorHandle.builder().serviceId(metaDataDTO.getAppName()).build()); }
@Test public void testSelectorHandler() { MetaDataRegisterDTO metaDataRegisterDTO = MetaDataRegisterDTO.builder().appName("testSelectorHandler").build(); assertEquals("{\"serviceId\":\"testSelectorHandler\",\"gray\":false}", shenyuClientRegisterSpringCloudService.selectorHandler(metaDataRegisterDTO)); }
@Override public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { String application = invoker.getUrl().getParameter(CommonConstants.APPLICATION_KEY); if (application != null) { RpcContext.getContext().setAttachment(DubboUtils.SENTINEL_DUBBO_APPLICATION_KEY, application); } return invoker.invoke(invocation); }
@Test public void testInvokeNullApplicationKey() { Invoker invoker = mock(Invoker.class); Invocation invocation = mock(Invocation.class); URL url = URL.valueOf("test://test:111/test?application="); when(invoker.getUrl()).thenReturn(url); filter.invoke(invoker, invocation); verify(invoker).invoke(invocation); String application = RpcContext.getContext().getAttachment(DubboUtils.SENTINEL_DUBBO_APPLICATION_KEY); assertEquals(application, ""); }
@Udf public String encodeParam( @UdfParameter(description = "the value to encode") final String input) { if (input == null) { return null; } final Escaper escaper = UrlEscapers.urlFormParameterEscaper(); return escaper.escape(input); }
@Test public void shouldReturnSpecialCharsIntact() { assertThat(".-*_ should all pass through without being encoded", encodeUdf.encodeParam("foo.-*_bar"), equalTo("foo.-*_bar")); }
public String output() { if (isEmpty()) { return ""; } try { isClosed.compareAndSet(false, true); return new String(stream.toByteArray(), StandardCharsets.UTF_8); } catch (Exception e) { LOG.error("Write failed: ", e); return "Write failed: " + e.getMessage(); } finally { try { stream.close(); } catch (IOException e) { LOG.error("Close stream error: ", e); } try { channel.close(); } catch (IOException e) { LOG.error("Close channel error: ", e); } } }
@Test public void testOutput() { writer.write(byteBuffer.asReadOnlyBuffer()); String res = writer.output(); Assertions.assertEquals(res, "hello, shenyu"); }
@VisibleForTesting static ResourceProfile fromResourceSpec(ResourceSpec resourceSpec) { return fromResourceSpec(resourceSpec, MemorySize.ZERO); }
@Test void testFromSpecWithSerializationCopy() throws Exception { final ResourceSpec copiedSpec = CommonTestUtils.createCopySerializable(ResourceSpec.UNKNOWN); final ResourceProfile profile = ResourceProfile.fromResourceSpec(copiedSpec); assertThat(profile).isEqualTo(ResourceProfile.fromResourceSpec(ResourceSpec.UNKNOWN)); }
public static void addTransferRateMetric(final DataNodeMetrics metrics, final long read, final long durationInNS) { metrics.addReadTransferRate(getTransferRateInBytesPerSecond(read, durationInNS)); }
@Test public void testAddTransferRateMetricForZeroNSTransferDuration() { DataNodeMetrics mockMetrics = mock(DataNodeMetrics.class); DFSUtil.addTransferRateMetric(mockMetrics, 1L, 0); verify(mockMetrics).addReadTransferRate(999_999_999L); }
public ByteBuffer fetchOnePacket() throws IOException { int readLen; ByteBuffer result = defaultBuffer; result.clear(); while (true) { headerByteBuffer.clear(); readLen = readAll(headerByteBuffer); if (readLen != PACKET_HEADER_LEN) { // remote has close this channel LOG.info("Receive packet header failed, " + "remote {} may close the channel.", remoteHostPortString); return null; } if (packetId() != sequenceId) { LOG.warn("receive packet sequence id[" + packetId() + "] want to get[" + sequenceId + "]"); throw new IOException("Bad packet sequence."); } int packetLen = packetLen(); if ((result.capacity() - result.position()) < packetLen) { // byte buffer is not enough, new one packet ByteBuffer tmp; if (packetLen < MAX_PHYSICAL_PACKET_LENGTH) { // last packet, enough to this packet is OK. tmp = ByteBuffer.allocate(packetLen + result.position()); } else { // already have packet, to allocate two packet. tmp = ByteBuffer.allocate(2 * packetLen + result.position()); } tmp.put(result.array(), 0, result.position()); result = tmp; } // read one physical packet // before read, set limit to make read only one packet result.limit(result.position() + packetLen); readLen = readAll(result); if (readLen != packetLen) { LOG.warn("Length of received packet content(" + readLen + ") is not equal with length in head.(" + packetLen + ")"); return null; } accSequenceId(); if (packetLen != MAX_PHYSICAL_PACKET_LENGTH) { result.flip(); break; } } return result; }
@Test(expected = IOException.class) public void testBadSeq() throws IOException { // mock new Expectations() { { channel.read((ByteBuffer) any); minTimes = 0; result = new Delegate() { int fakeRead(ByteBuffer buffer) { int maxLen = MysqlChannel.MAX_PHYSICAL_PACKET_LENGTH; MysqlSerializer serializer = MysqlSerializer.newInstance(); if (readIdx == 0) { // packet readIdx++; serializer.writeInt3(maxLen); serializer.writeInt1(packetId++); buffer.put(serializer.toArray()); return 4; } else if (readIdx == 1) { readIdx++; int readLen = buffer.remaining(); byte[] buf = new byte[readLen]; for (int i = 0; i < readLen; ++i) { buf[i] = (byte) ('a' + (i % 26)); } buffer.put(buf); return readLen; } else if (readIdx == 2) { // packet readIdx++; serializer.writeInt3(10); // NOTE: Bad packet seq serializer.writeInt1(0); buffer.put(serializer.toArray()); return 4; } else if (readIdx == 3) { readIdx++; byte[] buf = new byte[buffer.remaining()]; for (int i = 0; i < buffer.remaining(); ++i) { buf[i] = (byte) ('a' + (i % 26)); } buffer.put(buf); return buffer.remaining(); } return 0; } }; } }; MysqlChannel channel1 = new MysqlChannel(channel); ByteBuffer buf = channel1.fetchOnePacket(); }
@Override public void validateRoleList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得角色信息 List<RoleDO> roles = roleMapper.selectBatchIds(ids); Map<Long, RoleDO> roleMap = convertMap(roles, RoleDO::getId); // 校验 ids.forEach(id -> { RoleDO role = roleMap.get(id); if (role == null) { throw exception(ROLE_NOT_EXISTS); } if (!CommonStatusEnum.ENABLE.getStatus().equals(role.getStatus())) { throw exception(ROLE_IS_DISABLE, role.getName()); } }); }
@Test public void testValidateRoleList_success() { // mock 数据 RoleDO roleDO = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); roleMapper.insert(roleDO); // 准备参数 List<Long> ids = singletonList(roleDO.getId()); // 调用,无需断言 roleService.validateRoleList(ids); }
public CompletableFuture<JobClient> submitJob( JobGraph jobGraph, ClassLoader userCodeClassloader) throws Exception { MiniClusterConfiguration miniClusterConfig = getMiniClusterConfig(jobGraph.getMaximumParallelism()); MiniCluster miniCluster = miniClusterFactory.apply(miniClusterConfig); miniCluster.start(); return miniCluster .submitJob(jobGraph) .thenApplyAsync( FunctionUtils.uncheckedFunction( submissionResult -> { org.apache.flink.client.ClientUtils .waitUntilJobInitializationFinished( () -> miniCluster .getJobStatus( submissionResult .getJobID()) .get(), () -> miniCluster .requestJobResult( submissionResult .getJobID()) .get(), userCodeClassloader); return submissionResult; })) .thenApply( result -> new MiniClusterJobClient( result.getJobID(), miniCluster, userCodeClassloader, MiniClusterJobClient.JobFinalizationBehavior .SHUTDOWN_CLUSTER)) .whenComplete( (ignored, throwable) -> { if (throwable != null) { // We failed to create the JobClient and must shutdown to ensure // cleanup. shutDownCluster(miniCluster); } }) .thenApply(Function.identity()); }
@Test void testMultipleExecutions() throws Exception { PerJobMiniClusterFactory perJobMiniClusterFactory = initializeMiniCluster(); { JobClient jobClient = perJobMiniClusterFactory .submitJob(getNoopJobGraph(), ClassLoader.getSystemClassLoader()) .get(); jobClient.getJobExecutionResult().get(); assertThatMiniClusterIsShutdown(); } { JobClient jobClient = perJobMiniClusterFactory .submitJob(getNoopJobGraph(), ClassLoader.getSystemClassLoader()) .get(); jobClient.getJobExecutionResult().get(); assertThatMiniClusterIsShutdown(); } }
public static boolean isParameterOptional(ParameterSchema param) { boolean optional = param.isOptional() == null ? false : param.isOptional(); return optional || param.hasDefault(); }
@Test(dataProvider = "params") public void testIsParameterOptional(ParameterSchema parameterSchema, boolean expected) { Assert.assertEquals(RestLiToolsUtils.isParameterOptional(parameterSchema), expected); }
public SearchSourceBuilder create(SearchesConfig config) { return create(SearchCommand.from(config)); }
@Test void searchIncludesSize() { final SearchSourceBuilder search = this.searchRequestFactory.create(ChunkCommand.builder() .indices(Collections.singleton("graylog_0")) .range(RANGE) .batchSize(BATCH_SIZE) .build()); assertThat(search.toString()).contains("\"size\":42"); }