focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public String extractAppIdFromRequest(HttpServletRequest request) { String appId = null; String servletPath = request.getServletPath(); if (StringUtils.startsWith(servletPath, URL_CONFIGS_PREFIX)) { appId = StringUtils.substringBetween(servletPath, URL_CONFIGS_PREFIX, URL_SEPARATOR); } else if (StringUtils.startsWith(servletPath, URL_CONFIGFILES_JSON_PREFIX)) { appId = StringUtils.substringBetween(servletPath, URL_CONFIGFILES_JSON_PREFIX, URL_SEPARATOR); } else if (StringUtils.startsWith(servletPath, URL_CONFIGFILES_PREFIX)) { appId = StringUtils.substringBetween(servletPath, URL_CONFIGFILES_PREFIX, URL_SEPARATOR); } else if (StringUtils.startsWith(servletPath, URL_NOTIFICATIONS_PREFIX)) { appId = request.getParameter("appId"); } return appId; }
@Test public void testExtractAppIdFromRequest3() { when(request.getServletPath()).thenReturn("/configfiles/someAppId/default/application"); String appId = accessKeyUtil.extractAppIdFromRequest(request); assertThat(appId).isEqualTo("someAppId"); }
@Override public <T> T merge(T detachedObject) { Map<Object, Object> alreadyPersisted = new HashMap<Object, Object>(); return persist(detachedObject, alreadyPersisted, RCascadeType.MERGE); }
@Test public void testNullValue() { TestClass ti = new TestClass("3"); TestClass liveObject = redisson.getLiveObjectService().merge(ti); liveObject.setCode("test"); liveObject.setCode(null); assertThat(liveObject.getCode()).isNull(); liveObject.setCode("123"); assertThat(liveObject.getCode()).isEqualTo("123"); }
public String extractAppIdFromRequest(HttpServletRequest request) { String appId = null; String servletPath = request.getServletPath(); if (StringUtils.startsWith(servletPath, URL_CONFIGS_PREFIX)) { appId = StringUtils.substringBetween(servletPath, URL_CONFIGS_PREFIX, URL_SEPARATOR); } else if (StringUtils.startsWith(servletPath, URL_CONFIGFILES_JSON_PREFIX)) { appId = StringUtils.substringBetween(servletPath, URL_CONFIGFILES_JSON_PREFIX, URL_SEPARATOR); } else if (StringUtils.startsWith(servletPath, URL_CONFIGFILES_PREFIX)) { appId = StringUtils.substringBetween(servletPath, URL_CONFIGFILES_PREFIX, URL_SEPARATOR); } else if (StringUtils.startsWith(servletPath, URL_NOTIFICATIONS_PREFIX)) { appId = request.getParameter("appId"); } return appId; }
@Test public void testExtractAppIdFromRequest1() { when(request.getServletPath()).thenReturn("/configs/someAppId/default/application"); String appId = accessKeyUtil.extractAppIdFromRequest(request); assertThat(appId).isEqualTo("someAppId"); }
@Override public FileEntity upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency); try { // Full size of file final long size = status.getLength() + status.getOffset(); final List<Future<TransferStatus>> parts = new ArrayList<>(); long offset = 0; long remaining = status.getLength(); String ref = null; for(int partNumber = 1; remaining > 0; partNumber++) { final FileUploadPartEntity uploadPartEntity = this.continueUpload(file, ref, partNumber); final long length; if(uploadPartEntity.isParallelParts()) { length = Math.min(Math.max(size / (MAXIMUM_UPLOAD_PARTS - 1), partsize), remaining); } else { length = remaining; } parts.add(this.submit(pool, file, local, throttle, listener, status, uploadPartEntity.getUploadUri(), partNumber, offset, length, callback)); remaining -= length; offset += length; ref = uploadPartEntity.getRef(); } final List<TransferStatus> checksums = Interruptibles.awaitAll(parts); final FileEntity entity = this.completeUpload(file, ref, status, checksums); // Mark parent status as complete status.withResponse(new BrickAttributesFinderFeature(session).toAttributes(entity)).setComplete(); return entity; } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testUploadMultipleParts() throws Exception { // 5L * 1024L * 1024L final BrickUploadFeature feature = new BrickUploadFeature(session, new BrickWriteFeature(session), 5 * 1024L * 1024L, 5); final Path root = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(root, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); final int length = 5242881; final byte[] content = RandomUtils.nextBytes(length); IOUtils.write(content, local.getOutputStream(false)); final TransferStatus status = new TransferStatus(); status.setLength(content.length); final BytecountStreamListener count = new BytecountStreamListener(); feature.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count, status, new DisabledConnectionCallback()); assertEquals(content.length, count.getSent()); assertTrue(status.isComplete()); assertNotSame(PathAttributes.EMPTY, status.getResponse()); assertTrue(new BrickFindFeature(session).find(test)); assertEquals(content.length, new BrickAttributesFinderFeature(session).find(test).getSize()); final byte[] compare = new byte[length]; IOUtils.readFully(new BrickReadFeature(session).read(test, new TransferStatus().withLength(length), new DisabledConnectionCallback()), compare); assertArrayEquals(content, compare); new BrickDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); }
@Override public NacosUser authenticate(String username, String rawPassword) throws AccessException { if (StringUtils.isBlank(username) || StringUtils.isBlank(rawPassword)) { throw new AccessException("user not found!"); } NacosUserDetails nacosUserDetails = (NacosUserDetails) userDetailsService.loadUserByUsername(username); if (nacosUserDetails == null || !PasswordEncoderUtil.matches(rawPassword, nacosUserDetails.getPassword())) { throw new AccessException("user not found!"); } return new NacosUser(nacosUserDetails.getUsername(), jwtTokenManager.createToken(username)); }
@Test void testAuthenticate2() { assertThrows(AccessException.class, () -> { abstractAuthenticationManager.authenticate("nacos", null); }); }
public JsValue eval(InputStream is) { return eval(FileUtils.toString(is)); }
@Test void testJavaStaticMethod() { je.eval("var StaticPojo = Java.type('com.intuit.karate.graal.StaticPojo')"); JsValue sp = je.eval("StaticPojo.sayHello"); assertTrue(sp.isFunction()); Value ov = sp.getOriginal(); assertTrue(ov.canExecute()); assertFalse(ov.isHostObject()); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema == null && value == null) { return null; } JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
@Test public void timestampToJson() { GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 2000000000); calendar.add(Calendar.MILLISECOND, 2000000000); java.util.Date date = calendar.getTime(); JsonNode converted = parse(converter.fromConnectData(TOPIC, Timestamp.SCHEMA, date)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int64\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1 }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); JsonNode payload = converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertTrue(payload.isLong()); assertEquals(4000000000L, payload.longValue()); }
public static void setCurator(CuratorFramework curator) { CURATOR_TL.set(curator); }
@Test public void testMultipleInit() throws Exception { String connectString = zkServer.getConnectString(); RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); Configuration conf = getSecretConf(connectString); CuratorFramework curatorFramework = CuratorFrameworkFactory.builder() .connectString(connectString) .retryPolicy(retryPolicy) .build(); curatorFramework.start(); ZKDelegationTokenSecretManager.setCurator(curatorFramework); DelegationTokenManager tm1 = new DelegationTokenManager(conf, new Text("foo")); DelegationTokenManager tm2 = new DelegationTokenManager(conf, new Text("bar")); // When the init method is called, // the ZKDelegationTokenSecretManager#startThread method will be called, // and the creatingParentContainersIfNeeded will be called to create the nameSpace. ExecutorService executorService = Executors.newFixedThreadPool(2); Callable<Boolean> tm1Callable = () -> { tm1.init(); return true; }; Callable<Boolean> tm2Callable = () -> { tm2.init(); return true; }; List<Future<Boolean>> futures = executorService.invokeAll( Arrays.asList(tm1Callable, tm2Callable)); for(Future<Boolean> future : futures) { Assert.assertTrue(future.get()); } executorService.shutdownNow(); Assert.assertTrue(executorService.awaitTermination(1, TimeUnit.SECONDS)); tm1.destroy(); tm2.destroy(); String workingPath = "/" + conf.get(ZKDelegationTokenSecretManager.ZK_DTSM_ZNODE_WORKING_PATH, ZKDelegationTokenSecretManager.ZK_DTSM_ZNODE_WORKING_PATH_DEAFULT) + "/ZKDTSMRoot"; // Check if the created NameSpace exists. Stat stat = curatorFramework.checkExists().forPath(workingPath); Assert.assertNotNull(stat); curatorFramework.close(); }
@Override public double getValue(double quantile) { if (quantile < 0.0 || quantile > 1.0 || Double.isNaN(quantile)) { throw new IllegalArgumentException(quantile + " is not in [0..1]"); } if (values.length == 0) { return 0.0; } int posx = Arrays.binarySearch(quantiles, quantile); if (posx < 0) posx = ((-posx) - 1) - 1; if (posx < 1) { return values[0]; } if (posx >= values.length) { return values[values.length - 1]; } return values[posx]; }
@Test(expected = IllegalArgumentException.class) public void disallowsNotANumberQuantile() { snapshot.getValue(Double.NaN); }
public static ConnectionGroup getOrCreateConnectionGroup(String namespace) { AssertUtil.assertNotBlank(namespace, "namespace should not be empty"); ConnectionGroup group = getOrCreateGroup(namespace); return group; }
@Test public void testGetOrCreateConnectionGroup() { String namespace = "test-namespace"; assertNull(ConnectionManager.getConnectionGroup(namespace)); ConnectionGroup group1 = ConnectionManager.getOrCreateConnectionGroup(namespace); assertNotNull(group1); // Put one connection. ConnectionManager.addConnection(namespace, "12.23.34.45:1997"); ConnectionGroup group2 = ConnectionManager.getOrCreateConnectionGroup(namespace); assertSame(group1, group2); }
public static <T extends PipelineOptions> T validate(Class<T> klass, PipelineOptions options) { return validate(klass, options, false); }
@Test public void testSuperInterfaceGroupIsInAdditionToSubInterfaceGroupOnlyWhenValidatingSuperInterface() { SubOptions opts = PipelineOptionsFactory.as(SubOptions.class); opts.setFoo("Foo"); opts.setSuperclassObj("Hello world"); opts.setRunner(CrashingRunner.class); // Valid SubOptions, but invalid SuperOptions PipelineOptionsValidator.validate(SubOptions.class, opts); expectedException.expectMessage("sub"); expectedException.expectMessage("Missing required value"); expectedException.expectMessage("getBar"); PipelineOptionsValidator.validate(SuperOptions.class, opts); }
@VisibleForTesting static ModificationTimeProvider createModificationTimeProvider(String modificationTime) throws InvalidFilesModificationTimeException { try { switch (modificationTime) { case "EPOCH_PLUS_SECOND": Instant epochPlusSecond = Instant.ofEpochSecond(1); return (ignored1, ignored2) -> epochPlusSecond; default: Instant timestamp = DateTimeFormatter.ISO_DATE_TIME.parse(modificationTime, Instant::from); return (ignored1, ignored2) -> timestamp; } } catch (DateTimeParseException ex) { throw new InvalidFilesModificationTimeException(modificationTime, modificationTime, ex); } }
@Test public void testCreateModificationTimeProvider_isoDateTimeValue() throws InvalidFilesModificationTimeException { ModificationTimeProvider timeProvider = PluginConfigurationProcessor.createModificationTimeProvider("2011-12-03T10:15:30+09:00"); Instant expected = DateTimeFormatter.ISO_DATE_TIME.parse("2011-12-03T01:15:30Z", Instant::from); assertThat(timeProvider.get(Paths.get("foo"), AbsoluteUnixPath.get("/bar"))) .isEqualTo(expected); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Override public @Nullable <InputT> TransformEvaluator<InputT> forApplication( AppliedPTransform<?, ?, ?> application, CommittedBundle<?> inputBundle) throws IOException { return createEvaluator((AppliedPTransform) application); }
@Test public void boundedSourceInMemoryTransformEvaluatorProducesElements() throws Exception { when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle()); UncommittedBundle<Long> outputBundle = bundleFactory.createBundle(longs); when(context.createBundle(longs)).thenReturn(outputBundle); Collection<CommittedBundle<?>> initialInputs = new BoundedReadEvaluatorFactory.InputProvider(context, options) .getInitialInputs(longsProducer, 1); List<WindowedValue<?>> outputs = new ArrayList<>(); for (CommittedBundle<?> shardBundle : initialInputs) { TransformEvaluator<?> evaluator = factory.forApplication(longsProducer, null); for (WindowedValue<?> shard : shardBundle.getElements()) { evaluator.processElement((WindowedValue) shard); } TransformResult<?> result = evaluator.finishBundle(); assertThat(result.getWatermarkHold(), equalTo(BoundedWindow.TIMESTAMP_MAX_VALUE)); assertThat( Iterables.size(result.getOutputBundles()), equalTo(Iterables.size(shardBundle.getElements()))); for (UncommittedBundle<?> output : result.getOutputBundles()) { CommittedBundle<?> committed = output.commit(BoundedWindow.TIMESTAMP_MAX_VALUE); for (WindowedValue<?> val : committed.getElements()) { outputs.add(val); } } } assertThat( outputs, containsInAnyOrder( gw(1L), gw(2L), gw(4L), gw(8L), gw(9L), gw(7L), gw(6L), gw(5L), gw(3L), gw(0L))); }
public Input<DefaultIssue> create(Component component) { return new RawLazyInput(component); }
@Test void filter_excludes_issues_from_report() { RuleKey ruleKey = RuleKey.of("java", "S001"); markRuleAsActive(ruleKey); registerRule(ruleKey, "name"); when(issueFilter.accept(any(), eq(FILE))).thenReturn(false); ScannerReport.Issue reportIssue = ScannerReport.Issue.newBuilder() .setTextRange(newTextRange(2)) .setMsg("the message") .setRuleRepository(ruleKey.repository()) .setRuleKey(ruleKey.rule()) .setSeverity(Constants.Severity.BLOCKER) .setGap(3.14) .build(); reportReader.putIssues(FILE.getReportAttributes().getRef(), singletonList(reportIssue)); Input<DefaultIssue> input = underTest.create(FILE); Collection<DefaultIssue> issues = input.getIssues(); assertThat(issues).isEmpty(); }
public static DataSchema buildSchemaByProjection(DataSchema schema, DataMap maskMap) { return buildSchemaByProjection(schema, maskMap, Collections.emptyList()); }
@Test public void testBuildSchemaByProjectionNonexistentFields() { RecordDataSchema schema = (RecordDataSchema) DataTemplateUtil.getSchema(RecordTemplateWithPrimitiveKey.class); DataMap projectionMask = buildProjectionMaskDataMap("id", "nonexistentFieldFooBar"); try { buildSchemaByProjection(schema, projectionMask); } catch (InvalidProjectionException e) { Assert.assertEquals(e.getMessage(), "Projected field \"nonexistentFieldFooBar\" not present in schema \"" + schema.getFullName() + "\""); return; } Assert.fail("Building schema by projection with nonexistent fields should throw an InvalidProjectionException"); }
public void deleteSuffixesRecursive() throws IOException, InterruptedException { act(new DeleteSuffixesRecursive()); }
@Test @Issue("JENKINS-44909") public void deleteSuffixesRecursive() throws Exception { File deleteSuffixesRecursiveFolder = temp.newFolder("deleteSuffixesRecursive"); FilePath filePath = new FilePath(deleteSuffixesRecursiveFolder); FilePath suffix = filePath.withSuffix(WorkspaceList.COMBINATOR + "suffixed"); FilePath textTempFile = suffix.createTextTempFile("tmp", null, "dummy", true); assertThat(textTempFile.exists(), is(true)); filePath.deleteSuffixesRecursive(); assertThat(textTempFile.exists(), is(false)); }
@Bean public SelectorHandleConverterFactor selectorHandleConverterFactor(final List<SelectorHandleConverter> converterList) { Map<String, SelectorHandleConverter> converterMap = converterList.stream().collect(Collectors.toMap(SelectorHandleConverter::pluginName, Function.identity())); return new SelectorHandleConverterFactor(converterMap); }
@Test public void testSelectorHandleConverterFactor() { List<SelectorHandleConverter> converterList = new ArrayList<>(); GrpcSelectorHandleConverter grpc = new GrpcSelectorHandleConverter(); converterList.add(grpc); SelectorHandleConverterFactor factor = shenyuAdminConfiguration.selectorHandleConverterFactor(converterList); assertEquals(grpc, factor.newInstance(PluginEnum.GRPC.getName())); }
public List<Tuple2<JobID, BlobKey>> checkLimit(long size) { checkArgument(size >= 0); synchronized (lock) { List<Tuple2<JobID, BlobKey>> blobsToDelete = new ArrayList<>(); long current = total; for (Map.Entry<Tuple2<JobID, BlobKey>, Long> entry : caches.entrySet()) { if (current + size > sizeLimit) { blobsToDelete.add(entry.getKey()); current -= entry.getValue(); } } return blobsToDelete; } }
@Test void testCheckLimitForBlobWithNegativeSize() { assertThatThrownBy(() -> tracker.checkLimit(-1L)) .isInstanceOf(IllegalArgumentException.class); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseEmptyArray() { SchemaAndValue schemaAndValue = Values.parseString("[]"); assertEquals(Type.ARRAY, schemaAndValue.schema().type()); assertEquals(Collections.emptyList(), schemaAndValue.value()); }
@Override public StadiumSections findAllByStadium(final Long stadiumId) { Stadium stadium = stadiumReadUsecase.findById(stadiumId); List<Section> sections = sectionRepository.findAllByStadium(stadiumId); List<SectionInfo> sectionInfos = sections.stream().map(SectionInfo::from).toList(); return new StadiumSections(stadium.getSeatingChartImage(), sectionInfos); }
@Test void findAllByStadium_는_존재하지_않는_경기장_요청에_예외를_반환한다() { // given final Long stadiumId = -999L; // when // then assertThatThrownBy(() -> sectionReadService.findAllByStadium(stadiumId)) .isInstanceOf(StadiumNotFoundException.class); }
public PDOutlineItem getLastChild() { return getOutlineItem(COSName.LAST); }
@Test void nullLastChild() { assertNull(root.getLastChild()); }
@Udf public List<Long> generateSeriesLong( @UdfParameter(description = "The beginning of the series") final long start, @UdfParameter(description = "Marks the end of the series (inclusive)") final long end ) { return generateSeriesLong(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldComputeIntRangeWithEvenStepLong() { final List<Long> range = rangeUdf.generateSeriesLong(0, 9, 2); assertThat(range, hasSize(5)); long index = 0; for (final long i : range) { assertThat(index, is(i)); index += 2; } }
@Override public void handle(final RoutingContext routingContext) { if (routingContext.request().isSSL()) { final String indicatedServerName = routingContext.request().connection() .indicatedServerName(); final String requestHost = routingContext.request().host(); if (indicatedServerName != null && requestHost != null) { // sometimes the port is present in the host header, remove it final String requestHostNoPort = requestHost.replaceFirst(":\\d+", ""); if (!requestHostNoPort.equals(indicatedServerName)) { log.error(String.format( "Sni check failed, host header: %s, sni value %s", requestHostNoPort, indicatedServerName) ); routingContext.fail(MISDIRECTED_REQUEST.code(), new KsqlApiException("This request was incorrectly sent to this ksqlDB server", Errors.ERROR_CODE_MISDIRECTED_REQUEST)); return; } } } routingContext.next(); }
@Test public void shouldNotCheckIfSniNull() { // Given: when(httpConnection.indicatedServerName()).thenReturn(null); // When: sniHandler.handle(routingContext); // Then: verify(routingContext, never()).fail(anyInt(), any()); verify(routingContext, times(1)).next(); }
public static Builder newBuilder() { return new Builder(); }
@Test public void decreaseOnDrops() { AIMDLimit limiter = AIMDLimit.newBuilder().initialLimit(30).build(); limiter.onSample(0, 0, 0, true); Assert.assertEquals(27, limiter.getLimit()); }
@CanIgnoreReturnValue @Override public V put(K key, V value) { if (key == null) { throw new NullPointerException("key == null"); } if (value == null && !allowNullValues) { throw new NullPointerException("value == null"); } Node<K, V> created = find(key, true); V result = created.value; created.value = value; return result; }
@Test @SuppressWarnings("ModifiedButNotUsed") public void testPutNullKeyFails() { LinkedTreeMap<String, String> map = new LinkedTreeMap<>(); var e = assertThrows(NullPointerException.class, () -> map.put(null, "android")); assertThat(e).hasMessageThat().isEqualTo("key == null"); }
@Override public Set<String> keySet() { return new HashSet<>(lowerKeyToOriginMap.values()); }
@Test void keySet() { Set<String> keySet = new HashSet<>(); keySet.add("Key"); keySet.add("Key2"); Assertions.assertEquals(keySet, lowerCaseLinkHashMap.keySet()); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(HANA_BOOLEAN); builder.dataType(HANA_BOOLEAN); builder.length(2L); break; case TINYINT: builder.columnType(HANA_TINYINT); builder.dataType(HANA_TINYINT); break; case SMALLINT: builder.columnType(HANA_SMALLINT); builder.dataType(HANA_SMALLINT); break; case INT: builder.columnType(HANA_INTEGER); builder.dataType(HANA_INTEGER); break; case BIGINT: builder.columnType(HANA_BIGINT); builder.dataType(HANA_BIGINT); break; case FLOAT: builder.columnType(HANA_REAL); builder.dataType(HANA_REAL); break; case DOUBLE: builder.columnType(HANA_DOUBLE); builder.dataType(HANA_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", HANA_DECIMAL, precision, scale)); builder.dataType(HANA_DECIMAL); builder.precision(precision); builder.scale(scale); break; case BYTES: builder.columnType(HANA_BLOB); builder.dataType(HANA_BLOB); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= MAX_NVARCHAR_LENGTH) { builder.columnType(HANA_NVARCHAR); builder.dataType(HANA_NVARCHAR); builder.length( column.getColumnLength() == null ? MAX_NVARCHAR_LENGTH : column.getColumnLength()); } else { builder.columnType(HANA_CLOB); builder.dataType(HANA_CLOB); } break; case DATE: builder.columnType(HANA_DATE); builder.dataType(HANA_DATE); break; case TIME: builder.columnType(HANA_TIME); builder.dataType(HANA_TIME); break; case TIMESTAMP: if (column.getScale() == null || column.getScale() <= 0) { builder.columnType(HANA_SECONDDATE); builder.dataType(HANA_SECONDDATE); } else { int timestampScale = column.getScale(); if (column.getScale() > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(HANA_TIMESTAMP); builder.dataType(HANA_TIMESTAMP); builder.scale(timestampScale); } break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.SAP_HANA, column.getDataType().getSqlType().name(), column.getName()); } BasicTypeDefine typeDefine = builder.build(); typeDefine.setColumnType( appendColumnSizeIfNeed( typeDefine.getColumnType(), typeDefine.getLength(), typeDefine.getScale())); return typeDefine; }
@Test public void testReconvertDouble() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.DOUBLE_TYPE).build(); BasicTypeDefine typeDefine = SapHanaTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(SapHanaTypeConverter.HANA_DOUBLE, typeDefine.getColumnType()); Assertions.assertEquals(SapHanaTypeConverter.HANA_DOUBLE, typeDefine.getDataType()); }
public void logResponse(Config config, HttpRequest request, Response response) { long startTime = request.getStartTime(); long elapsedTime = request.getEndTime() - startTime; response.setResponseTime(elapsedTime); StringBuilder sb = new StringBuilder(); String uri = request.getUrl(); HttpLogModifier responseModifier = logModifier(config, uri); sb.append("response time in milliseconds: ").append(elapsedTime).append('\n'); sb.append(requestCount).append(" < ").append(response.getStatus()); logHeaders(requestCount, " < ", sb, responseModifier, response.getHeaders()); ResourceType rt = response.getResourceType(); if (rt == null || rt.isBinary()) { // don't log body } else { logBody(config, responseModifier, sb, uri, response.getBody(), false, rt); } sb.append('\n'); logger.debug("{}", sb); }
@Test void testResponseLoggingTurtleWithCharset() { setup("ttl", TURTLE_SAMPLE, "text/turtle; charset=UTF-8"); httpRequestBuilder.path("/ttl"); Response response = handle(); assertEquals(response.getBodyAsString(), TURTLE_SAMPLE); assertEquals(response.getContentType(), "text/turtle; charset=UTF-8"); httpLogger.logResponse(config, request, response); String logs = logAppender.collect(); assertTrue(logs.contains(TURTLE_SAMPLE)); assertTrue(logs.contains("Content-Type: text/turtle; charset=UTF-8")); }
@Override public void getFlatFields( String fieldExpression, int offset, List<FlatFieldDescriptor> result) { Matcher matcher = PATTERN_NESTED_FIELDS_WILDCARD.matcher(fieldExpression); if (!matcher.matches()) { throw new InvalidFieldReferenceException( "Invalid tuple field reference \"" + fieldExpression + "\"."); } String field = matcher.group(0); if ((field.equals(ExpressionKeys.SELECT_ALL_CHAR)) || (field.equals(ExpressionKeys.SELECT_ALL_CHAR_SCALA))) { // handle select all int keyPosition = 0; for (TypeInformation<?> fType : types) { if (fType instanceof CompositeType) { CompositeType<?> cType = (CompositeType<?>) fType; cType.getFlatFields( ExpressionKeys.SELECT_ALL_CHAR, offset + keyPosition, result); keyPosition += cType.getTotalFields() - 1; } else { result.add(new FlatFieldDescriptor(offset + keyPosition, fType)); } keyPosition++; } } else { field = matcher.group(1); Matcher intFieldMatcher = PATTERN_INT_FIELD.matcher(field); int fieldIndex; if (intFieldMatcher.matches()) { // field expression is an integer fieldIndex = Integer.valueOf(field); } else { fieldIndex = this.getFieldIndex(field); } // fetch the field type will throw exception if the index is illegal TypeInformation<?> fieldType = this.getTypeAt(fieldIndex); // compute the offset, for (int i = 0; i < fieldIndex; i++) { offset += this.getTypeAt(i).getTotalFields(); } String tail = matcher.group(3); if (tail == null) { // expression hasn't nested field if (fieldType instanceof CompositeType) { ((CompositeType) fieldType).getFlatFields("*", offset, result); } else { result.add(new FlatFieldDescriptor(offset, fieldType)); } } else { // expression has nested field if (fieldType instanceof CompositeType) { ((CompositeType) fieldType).getFlatFields(tail, offset, result); } else { throw new InvalidFieldReferenceException( "Nested field expression \"" + tail + "\" not possible on atomic type " + fieldType + "."); } } } }
@Test void testGetFlatFields() { RowTypeInfo typeInfo1 = new RowTypeInfo(typeList, new String[] {"int", "row", "string"}); List<FlatFieldDescriptor> result = new ArrayList<>(); typeInfo1.getFlatFields("row.*", 0, result); assertThat(result).hasSize(2); assertThat(result.get(0).toString()) .isEqualTo(new FlatFieldDescriptor(1, BasicTypeInfo.SHORT_TYPE_INFO).toString()); assertThat(result.get(1).toString()) .isEqualTo(new FlatFieldDescriptor(2, BasicTypeInfo.BIG_DEC_TYPE_INFO).toString()); result.clear(); typeInfo1.getFlatFields("string", 0, result); assertThat(result).hasSize(1); assertThat(result.get(0).toString()) .isEqualTo(new FlatFieldDescriptor(3, BasicTypeInfo.STRING_TYPE_INFO).toString()); }
static String simpleXMLEscape(String str) { // We could even use the 'more flexible' CDATA section but for now do the following: // The 'and' could be important sometimes but remove others return XML_ESCAPE_PATTERN.matcher(str.replace("&", "&amp;")).replaceAll("_"); }
@Test public void testXMLEscape_issue572() { assertEquals("_", GpxConversions.simpleXMLEscape("<")); assertEquals("_blup_", GpxConversions.simpleXMLEscape("<blup>")); assertEquals("a&amp;b", GpxConversions.simpleXMLEscape("a&b")); }
@Override public void addJobStorageOnChangeListener(StorageProviderChangeListener listener) { onChangeListeners.add(listener); startTimerToSendUpdates(); }
@Test void backgroundJobServerStatusChangeListenersAreNotifiedOfBackgroundJobServers() { final BackgroundJobServerStatusChangeListenerForTest changeListener = new BackgroundJobServerStatusChangeListenerForTest(); storageProvider.addJobStorageOnChangeListener(changeListener); await() .untilAsserted(() -> assertThat(changeListener.changedServerStates).isNotNull()); }
public static void checkState(final boolean expression) { if (!expression) { throw new IllegalStateException(); } }
@Test public void testCheckStateWithSuccess() throws Exception { // success Preconditions.checkState(true); // null supplier Preconditions.checkState(true, null); // null message Preconditions.checkState(true, (String) null); Preconditions.checkState(true, NON_NULL_STRING); // null in string format Preconditions.checkState(true, EXPECTED_ERROR_MSG_ARGS, null, null); // illegalformat Preconditions.checkState(true, EXPECTED_ERROR_MSG_ARGS, 1, 2); // ill-formated string supplier Preconditions.checkState(true, ()-> String.format("%d", NON_INT_STRING)); // null pattern to string formatter Preconditions.checkState(true, NULL_FORMATTER, null, 1); // null arguments to string formatter Preconditions.checkState(true, EXPECTED_ERROR_MSG_ARGS, null, null); // illegal format exception Preconditions.checkState(true, "message %d %d", NON_INT_STRING, 1); // insufficient arguments Preconditions.checkState(true, EXPECTED_ERROR_MSG_ARGS, NON_INT_STRING); // null format in string supplier Preconditions.checkState(true, () -> String.format(NULL_FORMATTER, NON_INT_STRING)); }
public static byte[] decode(String input) { // Check special case if (input == null || input.equals("")) { return new byte[0]; } char[] sArr = input.toCharArray(); int sLen = sArr.length; if (sLen == 0) { return new byte[0]; } int sIx = 0; // Start and end index after trimming. int eIx = sLen - 1; // Trim illegal chars from start while (sIx < eIx && IALPHABET[sArr[sIx]] < 0) { sIx++; } // Trim illegal chars from end while (eIx > 0 && IALPHABET[sArr[eIx]] < 0) { eIx--; } // get the padding count (=) (0, 1 or 2) // Count '=' at end. int pad = sArr[eIx] == '=' ? (sArr[eIx - 1] == '=' ? 2 : 1) : 0; // Content count including possible separators int cCnt = eIx - sIx + 1; int sepCnt = sLen > 76 ? (sArr[76] == '\r' ? cCnt / 78 : 0) << 1 : 0; // The number of decoded bytes int len = ((cCnt - sepCnt) * 6 >> 3) - pad; // Preallocate byte[] of exact length byte[] dArr = new byte[len]; // Decode all but the last 0 - 2 bytes. int d = 0; int three = 3; int eight = 8; for (int cc = 0, eLen = (len / three) * three; d < eLen; ) { // Assemble three bytes into an int from four "valid" characters. int i = ctoi(sArr[sIx++]) << 18 | ctoi(sArr[sIx++]) << 12 | ctoi(sArr[sIx++]) << 6 | ctoi(sArr[sIx++]); // Add the bytes dArr[d++] = (byte) (i >> 16); dArr[d++] = (byte) (i >> 8); dArr[d++] = (byte) i; // If line separator, jump over it. if (sepCnt > 0 && ++cc == 19) { sIx += 2; cc = 0; } } if (d < len) { // Decode last 1-3 bytes (incl '=') into 1-3 bytes int i = 0; for (int j = 0; sIx <= eIx - pad; j++) { i |= ctoi(sArr[sIx++]) << (18 - j * 6); } for (int r = 16; d < len; r -= eight) { dArr[d++] = (byte) (i >> r); } } return dArr; }
@Test void testNotStandardDecode() { String notStandardOrigin = "SecretKey012345678901234567890123456789012345678901234567890123456789"; byte[] decodeNotStandardOrigin = Base64Decode.decode(notStandardOrigin); String truncationOrigin = "SecretKey01234567890123456789012345678901234567890123456789012345678"; byte[] decodeTruncationOrigin = Base64Decode.decode(truncationOrigin); assertArrayEquals(decodeNotStandardOrigin, decodeTruncationOrigin); }
Future<String> findZookeeperLeader(Reconciliation reconciliation, Set<String> pods, TlsPemIdentity coTlsPemIdentity) { if (pods.size() == 0) { return Future.succeededFuture(UNKNOWN_LEADER); } else if (pods.size() == 1) { return Future.succeededFuture(pods.stream().findFirst().get()); } try { NetClientOptions netClientOptions = clientOptions(coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity()); return zookeeperLeaderWithBackoff(reconciliation, pods, netClientOptions); } catch (Throwable e) { return Future.failedFuture(e); } }
@Test public void test1PodClusterReturnsOnlyPodAsLeader(VertxTestContext context) { ZookeeperLeaderFinder finder = new ZookeeperLeaderFinder(vertx, this::backoff); Checkpoint a = context.checkpoint(); int firstPodIndex = 0; finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, Set.of(createPodWithId(firstPodIndex)), DUMMY_IDENTITY) .onComplete(context.succeeding(leader -> { context.verify(() -> assertThat(leader, is("my-cluster-zookeeper-0"))); a.flag(); })); }
@Override @CheckForNull public String message(Locale locale, String key, @Nullable String defaultValue, Object... parameters) { String bundleKey = propertyToBundles.get(key); String value = null; if (bundleKey != null) { try { ResourceBundle resourceBundle = ResourceBundle.getBundle(bundleKey, locale, classloader, control); value = resourceBundle.getString(key); } catch (MissingResourceException e1) { // ignore } } if (value == null) { value = defaultValue; } return formatMessage(value, parameters); }
@Test public void get_french_label_if_swiss_country() { Locale swiss = new Locale("fr", "CH"); assertThat(underTest.message(swiss, "checkstyle.rule1.name", null)).isEqualTo("Rule un"); assertThat(underTest.message(swiss, "any", null)).isEqualTo("Tous"); // language pack assertThat(underTest.message(swiss, "sqale.page", null)).isEqualTo("Titre de la page Sqale"); }
static StaticDataTask fromJson(JsonNode jsonNode) { Preconditions.checkArgument(jsonNode != null, "Invalid JSON node for data task: null"); Preconditions.checkArgument( jsonNode.isObject(), "Invalid JSON node for data task: non-object (%s)", jsonNode); Schema schema = SchemaParser.fromJson(JsonUtil.get(SCHEMA, jsonNode)); Schema projectedSchema = SchemaParser.fromJson(JsonUtil.get(PROJECTED_SCHEMA, jsonNode)); DataFile metadataFile = (DataFile) ContentFileParser.fromJson( JsonUtil.get(METADATA_FILE, jsonNode), PartitionSpec.unpartitioned()); JsonNode rowsArray = JsonUtil.get(ROWS, jsonNode); Preconditions.checkArgument( rowsArray.isArray(), "Invalid JSON node for rows: non-array (%s)", rowsArray); StructLike[] rows = new StructLike[rowsArray.size()]; for (int i = 0; i < rowsArray.size(); ++i) { JsonNode rowNode = rowsArray.get(i); rows[i] = (StructLike) SingleValueParser.fromJson(schema.asStruct(), rowNode); } return new StaticDataTask(metadataFile, schema, projectedSchema, rows); }
@Test public void invalidJsonNode() throws Exception { String jsonStr = "{\"str\":\"1\", \"arr\":[]}"; ObjectMapper mapper = new ObjectMapper(); JsonNode rootNode = mapper.reader().readTree(jsonStr); assertThatThrownBy(() -> DataTaskParser.fromJson(rootNode.get("str"))) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Invalid JSON node for data task: non-object "); assertThatThrownBy(() -> DataTaskParser.fromJson(rootNode.get("arr"))) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Invalid JSON node for data task: non-object "); }
@Override public Set<DiscreteResource> values() { return ImmutableSet.of(); }
@Test public void testValues() { assertThat(sut.values(), is(ImmutableSet.of())); }
@Override public Object getValue(final int columnIndex, final Class<?> type) throws SQLException { if (boolean.class == type) { return resultSet.getBoolean(columnIndex); } if (byte.class == type) { return resultSet.getByte(columnIndex); } if (short.class == type) { return resultSet.getShort(columnIndex); } if (int.class == type) { return resultSet.getInt(columnIndex); } if (long.class == type) { return resultSet.getLong(columnIndex); } if (float.class == type) { return resultSet.getFloat(columnIndex); } if (double.class == type) { return resultSet.getDouble(columnIndex); } if (String.class == type) { return resultSet.getString(columnIndex); } if (BigDecimal.class == type) { return resultSet.getBigDecimal(columnIndex); } if (byte[].class == type) { return resultSet.getBytes(columnIndex); } if (Date.class == type) { return resultSet.getDate(columnIndex); } if (Time.class == type) { return resultSet.getTime(columnIndex); } if (Timestamp.class == type) { return resultSet.getTimestamp(columnIndex); } if (Blob.class == type) { return resultSet.getBlob(columnIndex); } if (Clob.class == type) { return resultSet.getClob(columnIndex); } if (Array.class == type) { return resultSet.getArray(columnIndex); } return resultSet.getObject(columnIndex); }
@Test void assertGetValueByString() throws SQLException { ResultSet resultSet = mock(ResultSet.class); when(resultSet.getString(1)).thenReturn("value"); assertThat(new JDBCStreamQueryResult(resultSet).getValue(1, String.class), is("value")); }
@VisibleForTesting static long getInitialDelayTime(long periodSeconds, LocalDateTime startTime, LocalDateTime scheduleTime) { Duration duration = Duration.between(scheduleTime, startTime); long initialDelay = duration.getSeconds(); // if startTime < now, start scheduling from the next period if (initialDelay < 0) { // if schedule time is not a complete second, add extra 1 second to avoid less than expect scheduler time. // eg: // Register scheduler, task:mv-271809, initialDay:22, periodSeconds:60, startTime:2023-12-29T17:50, // scheduleTime:2024-01-30T15:27:37.342356010 // Before:schedule at : Hour:MINUTE:59 // After: schedule at : HOUR:MINUTE:00 int extra = scheduleTime.getNano() > 0 ? 1 : 0; return ((initialDelay % periodSeconds) + periodSeconds + extra) % periodSeconds; } else { return initialDelay; } }
@Test public void testGetInitialDelayTime2() throws Exception { Assert.assertEquals(23, TaskManager.getInitialDelayTime(60, parseLocalDateTime("2023-12-29 19:50:00"), LocalDateTime.parse("2024-01-30T15:27:37.342356010"))); Assert.assertEquals(50, TaskManager.getInitialDelayTime(60, parseLocalDateTime("2023-12-29 19:50:00"), LocalDateTime.parse("2024-01-30T15:27:10.342356010"))); }
public void completeDefaults(Props props) { // init string properties for (Map.Entry<Object, Object> entry : defaults().entrySet()) { props.setDefault(entry.getKey().toString(), entry.getValue().toString()); } boolean clusterEnabled = props.valueAsBoolean(CLUSTER_ENABLED.getKey(), false); if (!clusterEnabled) { props.setDefault(SEARCH_HOST.getKey(), InetAddress.getLoopbackAddress().getHostAddress()); props.setDefault(SEARCH_PORT.getKey(), "9001"); fixPortIfZero(props, Property.SEARCH_HOST.getKey(), SEARCH_PORT.getKey()); fixEsTransportPortIfNull(props); } }
@Test public void completeDefaults_does_not_set_the_http_port_of_elasticsearch_if_value_is_zero_in_search_node_in_cluster() { Properties p = new Properties(); p.setProperty("sonar.cluster.enabled", "true"); p.setProperty("sonar.search.port", "0"); Props props = new Props(p); processProperties.completeDefaults(props); assertThat(props.valueAsInt("sonar.search.port")).isZero(); }
@VisibleForTesting PlanNodeStatsEstimate calculateJoinComplementStats( Optional<RowExpression> filter, List<EquiJoinClause> criteria, PlanNodeStatsEstimate leftStats, PlanNodeStatsEstimate rightStats) { if (rightStats.getOutputRowCount() == 0) { // no left side rows are matched return leftStats; } if (criteria.isEmpty()) { // TODO: account for non-equi conditions if (filter.isPresent()) { return PlanNodeStatsEstimate.unknown(); } return normalizer.normalize(leftStats.mapOutputRowCount(rowCount -> 0.0)); } // TODO: add support for non-equality conditions (e.g: <=, !=, >) int numberOfFilterClauses = filter.map(expression -> extractConjuncts(expression).size()).orElse(0); // Heuristics: select the most selective criteria for join complement clause. // Principals behind this heuristics is the same as in computeInnerJoinStats: // select "driving join clause" that reduces matched rows the most. return criteria.stream() .map(drivingClause -> calculateJoinComplementStats(leftStats, rightStats, drivingClause, criteria.size() - 1 + numberOfFilterClauses)) .filter(estimate -> !estimate.isOutputRowCountUnknown()) .max(comparingDouble(PlanNodeStatsEstimate::getOutputRowCount)) .map(estimate -> normalizer.normalize(estimate)) .orElse(PlanNodeStatsEstimate.unknown()); }
@Test public void testRightJoinComplementStats() { PlanNodeStatsEstimate expected = NORMALIZER.normalize( planNodeStats( RIGHT_ROWS_COUNT * RIGHT_JOIN_COLUMN_NULLS, variableStatistics(RIGHT_JOIN_COLUMN, NaN, NaN, 1.0, 0), RIGHT_OTHER_COLUMN_STATS)); PlanNodeStatsEstimate actual = JOIN_STATS_RULE.calculateJoinComplementStats( Optional.empty(), ImmutableList.of(new EquiJoinClause(RIGHT_JOIN_COLUMN, LEFT_JOIN_COLUMN)), RIGHT_STATS, LEFT_STATS); assertEquals(actual, expected); }
public String convert(ILoggingEvent le) { List<Marker> markers = le.getMarkerList(); if (markers == null || markers.isEmpty()) { return EMPTY; } int size = markers.size(); if (size == 1) return markers.get(0).toString(); StringBuffer buf = new StringBuffer(32); for (int i = 0; i < size; i++) { if (i != 0) buf.append(' '); Marker m = markers.get(i); buf.append(m.toString()); } return buf.toString(); }
@Test public void testWithSeveralChildMarker() { Marker marker = markerFactory.getMarker("testParent"); marker.add(markerFactory.getMarker("child1")); marker.add(markerFactory.getMarker("child2")); marker.add(markerFactory.getMarker("child3")); String result = converter.convert(createLoggingEvent(marker)); assertEquals("testParent [ child1, child2, child3 ]", result); }
@Override public void copyFromPrevious(String key) { checkArgument(readCache.contains(key), "Previous cache doesn't contain key '%s'", key); write(key, readCache.read(key)); }
@Test public void copyFromPrevious_reads_from_readCache() throws IOException { byte[] b = new byte[] {1}; InputStream value = new ByteArrayInputStream(b); when(readCache.contains("key")).thenReturn(true); when(readCache.read("key")).thenReturn(value); writeCache.copyFromPrevious("key"); assertThatCacheContains(Map.of("key", b)); }
public static CredentialService getInstance() { return getInstance(null); }
@Test void testGetInstance3() throws NoSuchFieldException, IllegalAccessException { System.setProperty(IdentifyConstants.PROJECT_NAME_PROPERTY, APP_NAME); CredentialService credentialService1 = CredentialService.getInstance(); Field appNameField = credentialService1.getClass().getDeclaredField("appName"); appNameField.setAccessible(true); String appName = (String) appNameField.get(credentialService1); assertEquals(APP_NAME, appName); }
@Override public synchronized Optional<ListenableFuture<V>> schedule( Checkable<K, V> target, K context) { if (checksInProgress.containsKey(target)) { return Optional.empty(); } final LastCheckResult<V> result = completedChecks.get(target); if (result != null) { final long msSinceLastCheck = timer.monotonicNow() - result.completedAt; if (msSinceLastCheck < minMsBetweenChecks) { LOG.debug("Skipped checking {}. Time since last check {}ms " + "is less than the min gap {}ms.", target, msSinceLastCheck, minMsBetweenChecks); return Optional.empty(); } } LOG.info("Scheduling a check for {}", target); final ListenableFuture<V> lfWithoutTimeout = executorService.submit( new Callable<V>() { @Override public V call() throws Exception { return target.check(context); } }); final ListenableFuture<V> lf; if (diskCheckTimeout > 0) { lf = TimeoutFuture .create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, scheduledExecutorService); } else { lf = lfWithoutTimeout; } checksInProgress.put(target, lf); addResultCachingCallback(target, lf); return Optional.of(lf); }
@Test(timeout=60000) public void testExceptionCaching() throws Exception { final ThrowingCheckable target1 = new ThrowingCheckable(); final FakeTimer timer = new FakeTimer(); ThrottledAsyncChecker<Boolean, Boolean> checker = new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP, 0, getExecutorService()); assertTrue(checker.schedule(target1, true).isPresent()); waitTestCheckableCheckCount(target1, 1L); assertFalse(checker.schedule(target1, true).isPresent()); waitTestCheckableCheckCount(target1, 1L); }
public boolean isSupported( String filePath ) { if ( filePath == null ) { return false; } boolean ret = false; try { connectionFileNameParser.parseUri( filePath ); ret = true; } catch ( FileSystemException e ) { // DO NOTHING } return ret; }
@Test public void testIsSupported() throws Exception { assertFalse( vfsFileProvider.isSupported( null ) ); assertFalse( vfsFileProvider.isSupported( "" ) ); assertFalse( vfsFileProvider.isSupported( " " ) ); assertFalse( vfsFileProvider.isSupported( "someGarbage" ) ); assertFalse( vfsFileProvider.isSupported( "/someUser/someUnixFile" ) ); assertFalse( vfsFileProvider.isSupported( "T:\\Users\\RandomSUser\\Documents\\someWindowsFile" ) ); assertFalse( vfsFileProvider.isSupported( "//home/randomUser/randomFile.rpt" ) ); assertFalse( vfsFileProvider.isSupported( "xyz://some/path" ) ); assertTrue( vfsFileProvider.isSupported( "pvfs://someConnection/someFilePath" ) ); assertTrue( vfsFileProvider.isSupported( "pvfs://Special Character name &#! <>/someFilePath" ) ); }
@Override public void processElement(StreamRecord<RowData> element) throws Exception { RowData inputRow = element.getValue(); long timestamp; if (windowAssigner.isEventTime()) { if (inputRow.isNullAt(rowtimeIndex)) { // null timestamp would be dropped numNullRowTimeRecordsDropped.inc(); return; } timestamp = inputRow.getTimestamp(rowtimeIndex, 3).getMillisecond(); } else { timestamp = getProcessingTimeService().getCurrentProcessingTime(); } timestamp = toUtcTimestampMills(timestamp, shiftTimeZone); Collection<TimeWindow> elementWindows = windowAssigner.assignWindows(inputRow, timestamp); collect(inputRow, elementWindows); }
@Test public void testConsumingChangelogRecords() throws Exception { final TumblingWindowAssigner assigner = TumblingWindowAssigner.of(Duration.ofSeconds(3)); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(assigner, shiftTimeZone); testHarness.setup(OUT_SERIALIZER); testHarness.open(); // process elements ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); testHarness.processElement(binaryRecord(RowKind.INSERT, "key1", 1, 20L)); testHarness.processElement(binaryRecord(RowKind.UPDATE_BEFORE, "key1", 1, 30L)); testHarness.processElement(binaryRecord(RowKind.UPDATE_AFTER, "key1", 1, 40L)); testHarness.processWatermark(new Watermark(999)); // append 3 fields: window_start, window_end, window_time expectedOutput.add( binaryRecord( RowKind.INSERT, "key1", 1, 20L, localMills(0L), localMills(3000L), 2999L)); expectedOutput.add( binaryRecord( RowKind.UPDATE_BEFORE, "key1", 1, 30L, localMills(0L), localMills(3000L), 2999L)); expectedOutput.add( binaryRecord( RowKind.UPDATE_AFTER, "key1", 1, 40L, localMills(0L), localMills(3000L), 2999L)); expectedOutput.add(new Watermark(999)); ASSERTER.assertOutputEqualsSorted( "Output was not correct.", expectedOutput, testHarness.getOutput()); testHarness.processWatermark(new Watermark(9999)); expectedOutput.add(new Watermark(9999)); ASSERTER.assertOutputEqualsSorted( "Output was not correct.", expectedOutput, testHarness.getOutput()); // late records would not be dropped testHarness.processElement(binaryRecord(RowKind.DELETE, "key1", 1, 200L)); expectedOutput.add( binaryRecord( RowKind.DELETE, "key1", 1, 200L, localMills(0L), localMills(3000L), 2999L)); ASSERTER.assertOutputEqualsSorted( "Output was not correct.", expectedOutput, testHarness.getOutput()); testHarness.close(); }
@Override public boolean getTcpKeepAlive() { return clientConfig.getPropertyAsBoolean(TCP_KEEP_ALIVE, false); }
@Test void testGetTcpKeepAliveOverride() { clientConfig.set(TCP_KEEP_ALIVE, true); assertTrue(connectionPoolConfig.getTcpKeepAlive()); }
@Override public void close() { if (isClosed()) { return; } if (closed.compareAndSet(false, true)) { if (hasOwnRedisson) { redisson.shutdown(); } cacheManager.closeCache(this); for (CacheEntryListenerConfiguration<K, V> config : listeners.keySet()) { deregisterCacheEntryListener(config); } redisson.getEvictionScheduler().remove(getRawName()); } }
@Test public void testClose() throws IOException { URL configUrl = getClass().getResource("redisson-jcache.yaml"); Config cfg = Config.fromYAML(configUrl); MutableConfiguration c = new MutableConfiguration(); c.setStatisticsEnabled(true); Configuration<String, String> config = RedissonConfiguration.fromConfig(cfg, c); Cache<String, String> cache = Caching.getCachingProvider() .getCacheManager().createCache("test", config); cache.close(); }
@Override public String getTargetRestEndpointURL() { return URL; }
@Test void testURL() { assertThat(instance.getTargetRestEndpointURL()).endsWith("yarn-cancel"); }
protected String generateQueryString(MultiValuedTreeMap<String, String> parameters, boolean encode, String encodeCharset) throws ServletException { if (parameters == null || parameters.isEmpty()) { return null; } if (queryString != null) { return queryString; } StringBuilder queryStringBuilder = new StringBuilder(); try { for (String key : parameters.keySet()) { for (String val : parameters.get(key)) { queryStringBuilder.append("&"); if (encode) { queryStringBuilder.append(URLEncoder.encode(key, encodeCharset)); } else { queryStringBuilder.append(key); } queryStringBuilder.append("="); if (val != null) { if (encode) { queryStringBuilder.append(URLEncoder.encode(val, encodeCharset)); } else { queryStringBuilder.append(val); } } } } } catch (UnsupportedEncodingException e) { throw new ServletException("Invalid charset passed for query string encoding", e); } queryString = queryStringBuilder.toString(); queryString = queryString.substring(1); // remove the first & - faster to do it here than adding logic in the Lambda return queryString; }
@Test void queryStringWithEncodedParams_generateQueryString_validQuery() { AwsProxyHttpServletRequest request = new AwsProxyHttpServletRequest(encodedQueryString, mockContext, null, config); String parsedString = null; try { parsedString = request.generateQueryString(request.getAwsProxyRequest().getMultiValueQueryStringParameters(), true, config.getUriEncoding()); } catch (ServletException e) { e.printStackTrace(); fail("Could not generate query string"); } assertTrue(parsedString.contains("one=two")); assertTrue(parsedString.contains("json=%7B%22name%22%3A%22faisal%22%7D")); assertTrue(parsedString.contains("&") && parsedString.indexOf("&") > 0 && parsedString.indexOf("&") < parsedString.length()); }
@Udf public <T> String toJsonString(@UdfParameter final T input) { return toJson(input); }
@Test public void shouldSerializeMap() { // When: final Map<String, Integer> map = new HashMap<String, Integer>() {{ put("c", 2); put("d", 4); }}; final String result = udf.toJsonString(map); // Then: assertEquals("{\"c\":2,\"d\":4}", result); }
public Collection<String> getActualDataSourceNames() { return routeUnits.stream().map(each -> each.getDataSourceMapper().getActualName()).collect(Collectors.toCollection(() -> new HashSet<>(routeUnits.size(), 1L))); }
@Test void assertGetActualDataSourceNames() { Collection<String> actual = singleRouteContext.getActualDataSourceNames(); assertThat(actual.size(), is(1)); assertThat(actual.iterator().next(), is(DATASOURCE_NAME_0)); actual = multiRouteContext.getActualDataSourceNames(); assertThat(actual.size(), is(2)); Iterator<String> iterator = actual.iterator(); assertThat(iterator.next(), is(DATASOURCE_NAME_0)); assertThat(iterator.next(), is(DATASOURCE_NAME_1)); }
static BytecodeExpression notEqual(BytecodeExpression left, BytecodeExpression right) { requireNonNull(left, "left is null"); requireNonNull(right, "right is null"); checkArgument(left.getType().equals(right.getType()), "left and right must be the same type"); OpCode comparisonInstruction; OpCode noMatchJumpInstruction; Class<?> type = left.getType().getPrimitiveType(); if (type == int.class) { comparisonInstruction = null; noMatchJumpInstruction = IF_ICMPEQ; } else if (type == long.class) { comparisonInstruction = LCMP; noMatchJumpInstruction = IFEQ; } else if (type == float.class) { comparisonInstruction = FCMPL; noMatchJumpInstruction = IFEQ; } else if (type == double.class) { comparisonInstruction = DCMPL; noMatchJumpInstruction = IFEQ; } else if (type == null) { comparisonInstruction = null; noMatchJumpInstruction = IF_ACMPEQ; } else { throw new IllegalArgumentException("Not equal than does not support " + type); } return new ComparisonBytecodeExpression("!=", comparisonInstruction, noMatchJumpInstruction, left, right); }
@SuppressWarnings({"FloatingPointEquality", "ComparisonToNaN", "EqualsNaN", "EqualsWithItself"}) @Test public void testNotEqual() throws Exception { assertBytecodeExpression(notEqual(constantInt(7), constantInt(3)), 7 != 3, "(7 != 3)"); assertBytecodeExpression(notEqual(constantInt(7), constantInt(7)), 7 != 7, "(7 != 7)"); assertBytecodeExpression(notEqual(constantLong(7L), constantLong(3L)), 7L != 3L, "(7L != 3L)"); assertBytecodeExpression(notEqual(constantLong(7L), constantLong(7L)), 7L != 7L, "(7L != 7L)"); assertBytecodeExpression(notEqual(constantFloat(7.7f), constantFloat(3.3f)), 7.7f != 3.3f, "(7.7f != 3.3f)"); assertBytecodeExpression(notEqual(constantFloat(7.7f), constantFloat(7.7f)), 7.7f != 7.7f, "(7.7f != 7.7f)"); assertBytecodeExpression(notEqual(constantFloat(Float.NaN), constantFloat(7.7f)), Float.NaN != 7.7f, "(NaNf != 7.7f)"); assertBytecodeExpression(notEqual(constantFloat(Float.NaN), constantFloat(Float.NaN)), Float.NaN != Float.NaN, "(NaNf != NaNf)"); assertBytecodeExpression(notEqual(constantDouble(7.7), constantDouble(3.3)), 7.7 != 3.3, "(7.7 != 3.3)"); assertBytecodeExpression(notEqual(constantDouble(7.7), constantDouble(7.7)), 7.7 != 7.7, "(7.7 != 7.7)"); assertBytecodeExpression(notEqual(constantDouble(Double.NaN), constantDouble(7.7)), Double.NaN != 7.7, "(NaN != 7.7)"); assertBytecodeExpression(notEqual(constantDouble(7.7), constantDouble(Double.NaN)), 7.7 != Double.NaN, "(7.7 != NaN)"); assertBytecodeExpression(notEqual(constantDouble(Double.NaN), constantDouble(Double.NaN)), Double.NaN != Double.NaN, "(NaN != NaN)"); // the byte code is verifying with != but that breaks check style so we use assertBytecodeExpression(notEqual(constantString("foo"), constantString("bar")), !"foo".equals("bar"), "(\"foo\" != \"bar\")"); assertBytecodeExpression(notEqual(constantString("foo"), constantString("foo")), !"foo".equals("foo"), "(\"foo\" != \"foo\")"); }
@Override public synchronized Optional<ListenableFuture<V>> schedule( Checkable<K, V> target, K context) { if (checksInProgress.containsKey(target)) { return Optional.empty(); } final LastCheckResult<V> result = completedChecks.get(target); if (result != null) { final long msSinceLastCheck = timer.monotonicNow() - result.completedAt; if (msSinceLastCheck < minMsBetweenChecks) { LOG.debug("Skipped checking {}. Time since last check {}ms " + "is less than the min gap {}ms.", target, msSinceLastCheck, minMsBetweenChecks); return Optional.empty(); } } LOG.info("Scheduling a check for {}", target); final ListenableFuture<V> lfWithoutTimeout = executorService.submit( new Callable<V>() { @Override public V call() throws Exception { return target.check(context); } }); final ListenableFuture<V> lf; if (diskCheckTimeout > 0) { lf = TimeoutFuture .create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, scheduledExecutorService); } else { lf = lfWithoutTimeout; } checksInProgress.put(target, lf); addResultCachingCallback(target, lf); return Optional.of(lf); }
@Test(timeout=60000) public void testExceptionIsPropagated() throws Exception { final ThrowingCheckable target = new ThrowingCheckable(); final FakeTimer timer = new FakeTimer(); ThrottledAsyncChecker<Boolean, Boolean> checker = new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP, 0, getExecutorService()); final Optional<ListenableFuture<Boolean>> olf = checker.schedule(target, true); assertTrue(olf.isPresent()); try { olf.get().get(); fail("Failed to get expected ExecutionException"); } catch(ExecutionException ee) { assertTrue(ee.getCause() instanceof DummyException); } }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; // This handles a tombstone message if (value == null) { return SchemaAndValue.NULL; } try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); // The deserialized data should either be an envelope object containing the schema and the payload or the schema // was stripped during serialization and we need to fill in an all-encompassing schema. if (!config.schemasEnabled()) { ObjectNode envelope = JSON_NODE_FACTORY.objectNode(); envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null); envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue); jsonValue = envelope; } Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); return new SchemaAndValue( schema, convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config) ); }
@Test public void timestampToConnectOptional() { Schema schema = Timestamp.builder().optional().schema(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1, \"optional\": true }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertNull(schemaAndValue.value()); }
@Override public List<?> deserialize(final String topic, final byte[] bytes) { if (bytes == null) { return null; } try { final String recordCsvString = new String(bytes, StandardCharsets.UTF_8); final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, csvFormat) .getRecords(); if (csvRecords.isEmpty()) { throw new SerializationException("No fields in record"); } final CSVRecord csvRecord = csvRecords.get(0); if (csvRecord == null || csvRecord.size() == 0) { throw new SerializationException("No fields in record."); } SerdeUtils.throwOnColumnCountMismatch(parsers.size(), csvRecord.size(), false, topic); final List<Object> values = new ArrayList<>(parsers.size()); final Iterator<Parser> pIt = parsers.iterator(); for (int i = 0; i < csvRecord.size(); i++) { final String value = csvRecord.get(i); final Parser parser = pIt.next(); final Object parsed = value == null || value.isEmpty() ? null : parser.parse(value); values.add(parsed); } return values; } catch (final Exception e) { throw new SerializationException("Error deserializing delimited", e); } }
@Test public void shouldDeserializeDecimal() { // Given: final PersistenceSchema schema = persistenceSchema( column("cost", SqlTypes.decimal(4, 2)) ); final KsqlDelimitedDeserializer deserializer = createDeserializer(schema); final byte[] bytes = "01.12".getBytes(StandardCharsets.UTF_8); // When: final List<?> result = deserializer.deserialize("", bytes); // Then: assertThat(result, contains(new BigDecimal("1.12"))); }
@Override @SuppressWarnings("CallToSystemGC") public void execute(Map<String, List<String>> parameters, PrintWriter output) { final int count = parseRuns(parameters); for (int i = 0; i < count; i++) { output.println("Running GC..."); output.flush(); runtime.gc(); } output.println("Done!"); }
@Test void runsOnceWithNoParameters() throws Exception { task.execute(Collections.emptyMap(), output); verify(runtime, times(1)).gc(); }
public static long betweenWeek(Date beginDate, Date endDate, boolean isReset) { if (isReset) { beginDate = beginOfDay(beginDate); endDate = beginOfDay(endDate); } return between(beginDate, endDate, DateUnit.WEEK); }
@Test public void betweenWeekTest() { final DateTime start = DateUtil.parse("2019-03-05"); final DateTime end = DateUtil.parse("2019-10-05"); final long weekCount = DateUtil.betweenWeek(start, end, true); assertEquals(30L, weekCount); }
public void changeInvisibleTimeAsync(// final String brokerName, final String addr, // final ChangeInvisibleTimeRequestHeader requestHeader,// final long timeoutMillis, final AckCallback ackCallback ) throws RemotingException, MQBrokerException, InterruptedException { final RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CHANGE_MESSAGE_INVISIBLETIME, requestHeader); this.remotingClient.invokeAsync(addr, request, timeoutMillis, new InvokeCallback() { @Override public void operationComplete(ResponseFuture responseFuture) { } @Override public void operationSucceed(RemotingCommand response) { try { ChangeInvisibleTimeResponseHeader responseHeader = (ChangeInvisibleTimeResponseHeader) response.decodeCommandCustomHeader(ChangeInvisibleTimeResponseHeader.class); AckResult ackResult = new AckResult(); if (ResponseCode.SUCCESS == response.getCode()) { ackResult.setStatus(AckStatus.OK); ackResult.setPopTime(responseHeader.getPopTime()); ackResult.setExtraInfo(ExtraInfoUtil .buildExtraInfo(requestHeader.getOffset(), responseHeader.getPopTime(), responseHeader.getInvisibleTime(), responseHeader.getReviveQid(), requestHeader.getTopic(), brokerName, requestHeader.getQueueId()) + MessageConst.KEY_SEPARATOR + requestHeader.getOffset()); } else { ackResult.setStatus(AckStatus.NO_EXIST); } ackCallback.onSuccess(ackResult); } catch (Exception e) { ackCallback.onException(e); } } @Override public void operationFail(Throwable throwable) { ackCallback.onException(throwable); } }); }
@Test public void testChangeInvisibleTimeAsync_Success() throws Exception { doAnswer((Answer<Void>) mock -> { InvokeCallback callback = mock.getArgument(3); RemotingCommand request = mock.getArgument(1); ResponseFuture responseFuture = new ResponseFuture(null, request.getOpaque(), 3 * 1000, null, null); RemotingCommand response = RemotingCommand.createResponseCommand(ChangeInvisibleTimeResponseHeader.class); response.setOpaque(request.getOpaque()); response.setCode(ResponseCode.SUCCESS); ChangeInvisibleTimeResponseHeader responseHeader = (ChangeInvisibleTimeResponseHeader) response.readCustomHeader(); responseHeader.setPopTime(System.currentTimeMillis()); responseHeader.setInvisibleTime(10 * 1000L); responseFuture.setResponseCommand(response); callback.operationSucceed(responseFuture.getResponseCommand()); return null; }).when(remotingClient).invokeAsync(anyString(), any(RemotingCommand.class), anyLong(), any(InvokeCallback.class)); final CountDownLatch done = new CountDownLatch(1); ChangeInvisibleTimeRequestHeader requestHeader = new ChangeInvisibleTimeRequestHeader(); requestHeader.setTopic(topic); requestHeader.setQueueId(0); requestHeader.setOffset(0L); requestHeader.setInvisibleTime(10 * 1000L); mqClientAPI.changeInvisibleTimeAsync(brokerName, brokerAddr, requestHeader, 10 * 1000, new AckCallback() { @Override public void onSuccess(AckResult ackResult) { assertThat(ackResult.getStatus()).isEqualTo(AckStatus.OK); done.countDown(); } @Override public void onException(Throwable e) { Assertions.fail("want no exception but got one", e); done.countDown(); } }); done.await(); }
@Override public boolean alterOffsets(Map<String, String> config, Map<Map<String, ?>, Map<String, ?>> offsets) { for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) { Map<String, ?> sourceOffset = offsetEntry.getValue(); if (sourceOffset == null) { // We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't // want to prevent users from being able to clean it up using the REST API continue; } Map<String, ?> sourcePartition = offsetEntry.getKey(); if (sourcePartition == null) { throw new ConnectException("Source partitions may not be null"); } MirrorUtils.validateSourcePartitionString(sourcePartition, SOURCE_CLUSTER_ALIAS_KEY); MirrorUtils.validateSourcePartitionString(sourcePartition, TARGET_CLUSTER_ALIAS_KEY); MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true); } // We don't actually use these offsets in the task class, so no additional effort is required beyond just validating // the format of the user-supplied offsets return true; }
@Test public void testAlterOffsetsOffsetValues() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); Function<Object, Boolean> alterOffsets = offset -> connector.alterOffsets(null, Collections.singletonMap( sourcePartition("primary", "backup"), Collections.singletonMap(MirrorUtils.OFFSET_KEY, offset) )); assertThrows(ConnectException.class, () -> alterOffsets.apply("nan")); assertThrows(ConnectException.class, () -> alterOffsets.apply(null)); assertThrows(ConnectException.class, () -> alterOffsets.apply(new Object())); assertThrows(ConnectException.class, () -> alterOffsets.apply(3.14)); assertThrows(ConnectException.class, () -> alterOffsets.apply(-420)); assertThrows(ConnectException.class, () -> alterOffsets.apply("-420")); assertThrows(ConnectException.class, () -> alterOffsets.apply("10")); assertThrows(ConnectException.class, () -> alterOffsets.apply(10)); assertThrows(ConnectException.class, () -> alterOffsets.apply(((long) Integer.MAX_VALUE) + 1)); assertTrue(() -> alterOffsets.apply(0)); }
@Override public void open() throws Exception { executableStage = ExecutableStage.fromPayload(payload); hasSdfProcessFn = hasSDF(executableStage); initializeUserState(executableStage, getKeyedStateBackend(), pipelineOptions); // TODO: Wire this into the distributed cache and make it pluggable. // TODO: Do we really want this layer of indirection when accessing the stage bundle factory? // It's a little strange because this operator is responsible for the lifetime of the stage // bundle "factory" (manager?) but not the job or Flink bundle factories. How do we make // ownership of the higher level "factories" explicit? Do we care? stageContext = contextFactory.get(jobInfo); stageBundleFactory = stageContext.getStageBundleFactory(executableStage); stateRequestHandler = getStateRequestHandler(executableStage); progressHandler = new BundleProgressHandler() { @Override public void onProgress(ProcessBundleProgressResponse progress) { if (flinkMetricContainer != null) { flinkMetricContainer.updateMetrics(stepName, progress.getMonitoringInfosList()); } } @Override public void onCompleted(ProcessBundleResponse response) { if (flinkMetricContainer != null) { flinkMetricContainer.updateMetrics(stepName, response.getMonitoringInfosList()); } } }; finalizationHandler = BundleFinalizationHandlers.inMemoryFinalizer( stageBundleFactory.getInstructionRequestHandler()); checkpointHandler = getBundleCheckpointHandler(hasSdfProcessFn); minEventTimeTimerTimestampInCurrentBundle = Long.MAX_VALUE; minEventTimeTimerTimestampInLastBundle = Long.MAX_VALUE; super.setPreBundleCallback(this::preBundleStartCallback); super.setBundleFinishedCallback(this::finishBundleCallback); // This will call {@code createWrappingDoFnRunner} which needs the above dependencies. super.open(); }
@Test public void sdkErrorsSurfaceOnClose() throws Exception { TupleTag<Integer> mainOutput = new TupleTag<>("main-output"); DoFnOperator.MultiOutputOutputManagerFactory<Integer> outputManagerFactory = new DoFnOperator.MultiOutputOutputManagerFactory( mainOutput, VoidCoder.of(), new SerializablePipelineOptions(FlinkPipelineOptions.defaults())); ExecutableStageDoFnOperator<Integer, Integer> operator = getOperator(mainOutput, Collections.emptyList(), outputManagerFactory); OneInputStreamOperatorTestHarness<WindowedValue<Integer>, WindowedValue<Integer>> testHarness = new OneInputStreamOperatorTestHarness<>(operator); testHarness.open(); @SuppressWarnings("unchecked") RemoteBundle bundle = Mockito.mock(RemoteBundle.class); when(stageBundleFactory.getBundle(any(), any(), any(), any(), any(), any())).thenReturn(bundle); @SuppressWarnings("unchecked") FnDataReceiver<WindowedValue<?>> receiver = Mockito.mock(FnDataReceiver.class); when(bundle.getInputReceivers()).thenReturn(ImmutableMap.of("input", receiver)); Exception expected = new RuntimeException(new Exception()); doThrow(expected).when(bundle).close(); thrown.expectCause(is(expected)); operator.processElement(new StreamRecord<>(WindowedValue.valueInGlobalWindow(0))); testHarness.close(); }
@Override public PageResult<ProductCommentDO> getCommentPage(AppCommentPageReqVO pageVO, Boolean visible) { return productCommentMapper.selectPage(pageVO, visible); }
@Test public void testGetCommentPage_success() { // 准备参数 ProductCommentDO productComment = randomPojo(ProductCommentDO.class, o -> { o.setUserNickname("王二狗"); o.setSpuName("感冒药"); o.setScores(ProductCommentScoresEnum.FOUR.getScores()); o.setReplyStatus(Boolean.TRUE); o.setVisible(Boolean.TRUE); o.setId(generateId()); o.setUserId(generateId()); o.setAnonymous(Boolean.TRUE); o.setOrderId(generateId()); o.setOrderItemId(generateId()); o.setSpuId(generateId()); o.setSkuId(generateId()); o.setDescriptionScores(ProductCommentScoresEnum.FOUR.getScores()); o.setBenefitScores(ProductCommentScoresEnum.FOUR.getScores()); o.setContent("真好吃"); o.setReplyUserId(generateId()); o.setReplyContent("确实"); o.setReplyTime(LocalDateTime.now()); o.setCreateTime(LocalDateTime.now()); o.setUpdateTime(LocalDateTime.now()); }); productCommentMapper.insert(productComment); Long orderId = productComment.getOrderId(); Long spuId = productComment.getSpuId(); // 测试 userNickname 不匹配 productCommentMapper.insert(cloneIgnoreId(productComment, o -> o.setUserNickname("王三").setScores(ProductCommentScoresEnum.ONE.getScores()))); // 测试 orderId 不匹配 productCommentMapper.insert(cloneIgnoreId(productComment, o -> o.setOrderId(generateId()))); // 测试 spuId 不匹配 productCommentMapper.insert(cloneIgnoreId(productComment, o -> o.setSpuId(generateId()))); // 测试 spuName 不匹配 productCommentMapper.insert(cloneIgnoreId(productComment, o -> o.setSpuName("感康"))); // 测试 scores 不匹配 productCommentMapper.insert(cloneIgnoreId(productComment, o -> o.setScores(ProductCommentScoresEnum.ONE.getScores()))); // 测试 replied 不匹配 productCommentMapper.insert(cloneIgnoreId(productComment, o -> o.setReplyStatus(Boolean.FALSE))); // 测试 visible 不匹配 productCommentMapper.insert(cloneIgnoreId(productComment, o -> o.setVisible(Boolean.FALSE))); // 调用 ProductCommentPageReqVO productCommentPageReqVO = new ProductCommentPageReqVO(); productCommentPageReqVO.setUserNickname("王二"); productCommentPageReqVO.setOrderId(orderId); productCommentPageReqVO.setSpuId(spuId); productCommentPageReqVO.setSpuName("感冒药"); productCommentPageReqVO.setScores(ProductCommentScoresEnum.FOUR.getScores()); productCommentPageReqVO.setReplyStatus(Boolean.TRUE); PageResult<ProductCommentDO> commentPage = productCommentService.getCommentPage(productCommentPageReqVO); PageResult<ProductCommentRespVO> result = BeanUtils.toBean(productCommentMapper.selectPage(productCommentPageReqVO), ProductCommentRespVO.class); assertEquals(result.getTotal(), commentPage.getTotal()); PageResult<ProductCommentDO> all = productCommentService.getCommentPage(new ProductCommentPageReqVO()); assertEquals(8, all.getTotal()); // 测试获取所有商品分页评论数据 PageResult<ProductCommentDO> result1 = productCommentService.getCommentPage(new AppCommentPageReqVO(), Boolean.TRUE); assertEquals(7, result1.getTotal()); // 测试获取所有商品分页中评数据 PageResult<ProductCommentDO> result2 = productCommentService.getCommentPage(new AppCommentPageReqVO().setType(AppCommentPageReqVO.MEDIOCRE_COMMENT), Boolean.TRUE); assertEquals(2, result2.getTotal()); // 测试获取指定 spuId 商品分页中评数据 PageResult<ProductCommentDO> result3 = productCommentService.getCommentPage(new AppCommentPageReqVO().setSpuId(spuId).setType(AppCommentPageReqVO.MEDIOCRE_COMMENT), Boolean.TRUE); assertEquals(2, result3.getTotal()); // 测试分页 tab count //AppCommentStatisticsRespVO tabsCount = productCommentService.getCommentStatistics(spuId, Boolean.TRUE); //assertEquals(4, tabsCount.getGoodCount()); //assertEquals(2, tabsCount.getMediocreCount()); //assertEquals(0, tabsCount.getNegativeCount()); }
public static double add(float v1, float v2) { return add(Float.toString(v1), Float.toString(v2)).doubleValue(); }
@Test public void addTest() { final Float a = 3.15f; final Double b = 4.22; final double result = NumberUtil.add(a, b).doubleValue(); assertEquals(7.37, result, 0); }
@Override public void add(long item, long count) { if (count < 0) { // Negative values are not implemented in the regular version, and do not // play nicely with this algorithm anyway throw new IllegalArgumentException("Negative increments not implemented"); } int[] buckets = new int[depth]; for (int i = 0; i < depth; ++i) { buckets[i] = hash(item, i); } long min = table[0][buckets[0]]; for (int i = 1; i < depth; ++i) { min = Math.min(min, table[i][buckets[i]]); } for (int i = 0; i < depth; ++i) { long newVal = Math.max(table[i][buckets[i]], min + count); table[i][buckets[i]] = newVal; } size += count; }
@Test public void testAccuracyStrings() { int seed = 7364181; Random r = new Random(seed); int numItems = 1000000; String[] xs = new String[numItems]; int maxScale = 20; for (int i = 0; i < xs.length; i++) { int scale = r.nextInt(maxScale); xs[i] = RandomStringUtils.random(scale); } double epsOfTotalCount = 0.0001; double confidence = 0.99; ConservativeAddSketch sketch = new ConservativeAddSketch(epsOfTotalCount, confidence, seed); IFrequency baseSketch = new CountMinSketch(epsOfTotalCount, confidence, seed); for (String x : xs) { sketch.add(x, 1); baseSketch.add(x, 1); } Map<String, Long> actualFreq = new HashMap<String, Long>(numItems / 10); for (String x : xs) { Long val = actualFreq.get(x); if (val == null) { actualFreq.put(x, 1L); } else { actualFreq.put(x, val + 1L); } } int numErrors = 0; int betterNumbers = 0; long totalDelta = 0; int okayError = (int) (numItems * epsOfTotalCount) + 1; long totalError = 0; for (Map.Entry<String, Long> entry : actualFreq.entrySet()) { String key = entry.getKey(); Long value = entry.getValue(); long error = sketch.estimateCount(key) - value; totalError += error; if (error > okayError) { numErrors++; } long delta = baseSketch.estimateCount(key) - sketch.estimateCount(key); if (delta > 0) { totalDelta += delta; betterNumbers++; } } long usedValues = actualFreq.size(); double pCorrect = 1 - 1.0 * numErrors / usedValues; System.out.println("Confidence : " + pCorrect + " Errors : " + numErrors + " Error margin : " + okayError); System.out.println("Total error : " + totalError + " Average error : " + totalError / usedValues); System.out.println("Beat base for : " + 100 * betterNumbers / usedValues + " percent of values" + " with a total delta of " + totalDelta); assertTrue("Confidence not reached: required " + confidence + ", reached " + pCorrect, pCorrect > confidence); }
public static SortedMap<String, String> filterDubboProperties(ConfigurableEnvironment environment) { SortedMap<String, String> dubboProperties = new TreeMap<>(); Map<String, Object> properties = extractProperties(environment); for (Map.Entry<String, Object> entry : properties.entrySet()) { String propertyName = entry.getKey(); if (propertyName.startsWith(DUBBO_PREFIX + PROPERTY_NAME_SEPARATOR) && entry.getValue() != null) { dubboProperties.put( propertyName, environment.resolvePlaceholders(entry.getValue().toString())); } } return Collections.unmodifiableSortedMap(dubboProperties); }
@Test void testFilterDubboProperties() { MockEnvironment environment = new MockEnvironment(); environment.setProperty("message", "Hello,World"); environment.setProperty("dubbo.registry.address", "zookeeper://10.10.10.1:2181"); environment.setProperty("dubbo.consumer.check", "false"); SortedMap<String, String> dubboProperties = filterDubboProperties(environment); Assertions.assertEquals(2, dubboProperties.size()); Assertions.assertEquals("zookeeper://10.10.10.1:2181", dubboProperties.get("dubbo.registry.address")); Assertions.assertEquals("false", dubboProperties.get("dubbo.consumer.check")); }
@Override public Class<MetricCollectionResponseBody> getResponseClass() { return MetricCollectionResponseBody.class; }
@Test void testResponseClass() { assertThat(metricsHandlerHeaders.getResponseClass()) .isEqualTo(MetricCollectionResponseBody.class); }
public UserInfo getUser(String addr, String username, long millis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException { GetUserRequestHeader requestHeader = new GetUserRequestHeader(username); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_GET_USER, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(addr, request, millis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return RemotingSerializable.decode(response.getBody(), UserInfo.class); } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark()); }
@Test public void assertGetUser() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); setResponseBody(createUserInfo()); UserInfo actual = mqClientAPI.getUser(defaultBrokerAddr, "", defaultTimeout); assertNotNull(actual); assertEquals("username", actual.getUsername()); assertEquals("password", actual.getPassword()); assertEquals("userStatus", actual.getUserStatus()); assertEquals("userType", actual.getUserType()); }
static CSInitializer build(String targetType) { CSInitializer officialCSInitializer = tryLoadCSInitializerByClassName(targetType); if (officialCSInitializer != null) { return officialCSInitializer; } log.info("[CSInitializerFactory] try load CSInitializerFactory by name failed, start to use Reflections!"); // JAVA SPI 机制太笨了,短期内继续保留 Reflections 官网下高版本兼容性 Reflections reflections = new Reflections(OmsConstant.PACKAGE); Set<Class<? extends CSInitializer>> cSInitializerClzSet = reflections.getSubTypesOf(CSInitializer.class); log.info("[CSInitializerFactory] scan subTypeOf CSInitializer: {}", cSInitializerClzSet); for (Class<? extends CSInitializer> clz : cSInitializerClzSet) { try { CSInitializer csInitializer = clz.getDeclaredConstructor().newInstance(); String type = csInitializer.type(); log.info("[CSInitializerFactory] new instance for CSInitializer[{}] successfully, type={}, object: {}", clz, type, csInitializer); if (targetType.equalsIgnoreCase(type)) { return csInitializer; } } catch (Exception e) { log.error("[CSInitializerFactory] new instance for CSInitializer[{}] failed, maybe you should provide a non-parameter constructor", clz); ExceptionUtils.rethrow(e); } } throw new PowerJobException(String.format("can't load CSInitializer[%s], ensure your package name start with 'tech.powerjob' and import the dependencies!", targetType)); }
@Test void testNotFind() { Assertions.assertThrows(PowerJobException.class, () -> { CSInitializerFactory.build("omicron"); }); }
@Override public Image call() throws LayerPropertyNotFoundException { try (ProgressEventDispatcher ignored = progressEventDispatcherFactory.create("building image format", 1); TimerEventDispatcher ignored2 = new TimerEventDispatcher(buildContext.getEventHandlers(), DESCRIPTION)) { // Constructs the image. Image.Builder imageBuilder = Image.builder(buildContext.getTargetFormat()); // Base image layers baseImageLayers.forEach(imageBuilder::addLayer); // Passthrough config and count non-empty history entries int nonEmptyLayerCount = 0; for (HistoryEntry historyObject : baseImage.getHistory()) { imageBuilder.addHistory(historyObject); if (!historyObject.hasCorrespondingLayer()) { nonEmptyLayerCount++; } } imageBuilder .setArchitecture(baseImage.getArchitecture()) .setOs(baseImage.getOs()) .addEnvironment(baseImage.getEnvironment()) .addLabels(baseImage.getLabels()) .setHealthCheck(baseImage.getHealthCheck()) .addExposedPorts(baseImage.getExposedPorts()) .addVolumes(baseImage.getVolumes()) .setUser(baseImage.getUser()) .setWorkingDirectory(baseImage.getWorkingDirectory()); ContainerConfiguration containerConfiguration = buildContext.getContainerConfiguration(); // Add history elements for non-empty layers that don't have one yet Instant layerCreationTime = containerConfiguration.getCreationTime(); for (int count = 0; count < baseImageLayers.size() - nonEmptyLayerCount; count++) { imageBuilder.addHistory( HistoryEntry.builder() .setCreationTimestamp(layerCreationTime) .setComment("auto-generated by Jib") .build()); } // Add built layers/configuration for (PreparedLayer applicationLayer : applicationLayers) { imageBuilder .addLayer(applicationLayer) .addHistory( HistoryEntry.builder() .setCreationTimestamp(layerCreationTime) .setAuthor("Jib") .setCreatedBy(buildContext.getToolName() + ":" + buildContext.getToolVersion()) .setComment(applicationLayer.getName()) .build()); } imageBuilder .addEnvironment(containerConfiguration.getEnvironmentMap()) .setCreated(containerConfiguration.getCreationTime()) .setEntrypoint(computeEntrypoint(baseImage, containerConfiguration)) .setProgramArguments(computeProgramArguments(baseImage, containerConfiguration)) .addExposedPorts(containerConfiguration.getExposedPorts()) .addVolumes(containerConfiguration.getVolumes()) .addLabels(containerConfiguration.getLabels()); if (containerConfiguration.getUser() != null) { imageBuilder.setUser(containerConfiguration.getUser()); } if (containerConfiguration.getWorkingDirectory() != null) { imageBuilder.setWorkingDirectory(containerConfiguration.getWorkingDirectory().toString()); } // Gets the container configuration content descriptor. return imageBuilder.build(); } }
@Test public void test_inheritedEntrypointAndProgramArguments() { Mockito.when(mockContainerConfiguration.getEntrypoint()).thenReturn(null); Mockito.when(mockContainerConfiguration.getProgramArguments()).thenReturn(null); Image image = new BuildImageStep( mockBuildContext, mockProgressEventDispatcherFactory, baseImage, baseImageLayers, applicationLayers) .call(); Assert.assertEquals(ImmutableList.of("baseImageEntrypoint"), image.getEntrypoint()); Assert.assertEquals(ImmutableList.of("catalina.sh", "run"), image.getProgramArguments()); }
@Nullable public ResolvedAddressTypes resolvedAddressTypes() { return resolvedAddressTypes; }
@Test void resolvedAddressTypesBadValues() { assertThatExceptionOfType(NullPointerException.class) .isThrownBy(() -> builder.resolvedAddressTypes(null)); }
@Override public void readData(ObjectDataInput in) throws IOException { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void testReadData() throws Exception { localCacheWideEventData.readData(null); }
@Override public String getId() { return id; }
@Test void ifNoIdGivenItUsesJobSignature() { TestService testService = new TestService(); final RecurringJob recurringJob1 = aDefaultRecurringJob().withoutId().withJobDetails(() -> System.out.println("This is a test")).build(); assertThat(recurringJob1.getId()).isEqualTo("java.lang.System.out.println(java.lang.String)"); IocJobLambda<TestService> iocJobLambda = (x) -> x.doWork(3, 97693); final RecurringJob recurringJob2 = aDefaultRecurringJob().withoutId().withJobDetails(iocJobLambda).build(); assertThat(recurringJob2.getId()).isEqualTo("org.jobrunr.stubs.TestService.doWork(java.lang.Integer,java.lang.Integer)"); final RecurringJob recurringJob3 = aDefaultRecurringJob().withoutId().withJobDetails((JobLambda) testService::doWork).build(); assertThat(recurringJob3.getId()).isEqualTo("org.jobrunr.stubs.TestService.doWork()"); }
public static void processEnvVariables(Map<String, String> inputProperties) { processEnvVariables(inputProperties, System.getenv()); }
@Test void shouldProcessOldJsonEnvVariables() { var inputProperties = new HashMap<String, String>(); EnvironmentConfig.processEnvVariables(inputProperties, Map.of("SONARQUBE_SCANNER_PARAMS", "{\"key1\":\"value1\", \"key2\":\"value2\"}")); assertThat(inputProperties).containsOnly( entry("key1", "value1"), entry("key2", "value2")); }
public boolean tableExists(String dbName, String tableName) { return metastore.tableExists(dbName, tableName); }
@Test public void testTableExists() { Assert.assertTrue(hmsOps.tableExists("db1", "tbl1")); }
public void addCurrentDirectoryChangedListener( CurrentDirectoryChangedListener listener ) { if ( listener != null && !currentDirectoryChangedListeners.contains( listener ) ) { currentDirectoryChangedListeners.add( listener ); } }
@Test public void testAddCurrentDirectoryChangedListener() { meta.fireNameChangedListeners( "a", "a" ); meta.fireNameChangedListeners( "a", "b" ); meta.addCurrentDirectoryChangedListener( null ); meta.fireCurrentDirectoryChanged( "a", "b" ); CurrentDirectoryChangedListener listener = mock( CurrentDirectoryChangedListener.class ); meta.addCurrentDirectoryChangedListener( listener ); meta.fireCurrentDirectoryChanged( "b", "a" ); verify( listener, times( 1 ) ).directoryChanged( meta, "b", "a" ); meta.fireCurrentDirectoryChanged( "a", "a" ); meta.removeCurrentDirectoryChangedListener( null ); meta.removeCurrentDirectoryChangedListener( listener ); meta.fireNameChangedListeners( "b", "a" ); verifyNoMoreInteractions( listener ); }
public static <D extends BaseDoc> List<D> convertToDocs(SearchHits hits, Function<Map<String, Object>, D> converter) { List<D> docs = new ArrayList<>(); for (SearchHit hit : hits.getHits()) { docs.add(converter.apply(hit.getSourceAsMap())); } return docs; }
@Test public void convertToDocs_empty() { SearchHits hits = new SearchHits(new SearchHit[] {}, new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0); List<BaseDoc> docs = EsUtils.convertToDocs(hits, IssueDoc::new); assertThat(docs).isEmpty(); }
@SneakyThrows public void createBucket() { if (!isBucketExists()) { minioClient.makeBucket(MakeBucketArgs.builder().bucket(bucketName).build()); } }
@Test void createBucket() { }
@CheckForNull @Override public Set<Path> branchChangedFiles(String targetBranchName, Path rootBaseDir) { return Optional.ofNullable((branchChangedFilesWithFileMovementDetection(targetBranchName, rootBaseDir))) .map(GitScmProvider::extractAbsoluteFilePaths) .orElse(null); }
@Test public void branchChangedFiles_use_remote_target_ref_when_running_on_circle_ci() throws IOException, GitAPIException { when(system2.envVariable("CIRCLECI")).thenReturn("true"); git.checkout().setName("b1").setCreateBranch(true).call(); createAndCommitFile("file-b1"); Path worktree2 = temp.newFolder().toPath(); Git local = Git.cloneRepository() .setURI(worktree.toString()) .setDirectory(worktree2.toFile()) .call(); // Make local master match analyzed branch, so if local ref is used then change files will be empty local.checkout().setCreateBranch(true).setName("master").setStartPoint("origin/b1").call(); local.checkout().setName("b1").call(); assertThat(newScmProvider().branchChangedFiles("master", worktree2)) .containsOnly(worktree2.resolve("file-b1")); verifyNoInteractions(analysisWarnings); }
static void dissectTruncateLogEntry( final ClusterEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int absoluteOffset = offset; absoluteOffset += dissectLogHeader(CONTEXT, eventCode, buffer, absoluteOffset, builder); final long logLeadershipTermId = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; final long leadershipTermId = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; final long candidateTermId = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; final long commitPosition = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; final long logPosition = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; final long appendPosition = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; final long oldPosition = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; final long newPosition = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; final int memberId = buffer.getInt(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_INT; builder.append(": memberId=").append(memberId); builder.append(" state="); buffer.getStringAscii(absoluteOffset, builder, LITTLE_ENDIAN); builder.append(" logLeadershipTermId=").append(logLeadershipTermId); builder.append(" leadershipTermId=").append(leadershipTermId); builder.append(" candidateTermId=").append(candidateTermId); builder.append(" commitPosition=").append(commitPosition); builder.append(" logPosition=").append(logPosition); builder.append(" appendPosition=").append(appendPosition); builder.append(" oldPosition=").append(oldPosition); builder.append(" newPosition=").append(newPosition); }
@Test void dissectTruncateLogEntry() { final int offset = 10; int writeIndex = offset; writeIndex += internalEncodeLogHeader(buffer, offset, 100, 200, () -> 5_000_000_000L); buffer.putLong(writeIndex, 555, LITTLE_ENDIAN); writeIndex += SIZE_OF_LONG; buffer.putLong(writeIndex, 166, LITTLE_ENDIAN); writeIndex += SIZE_OF_LONG; buffer.putLong(writeIndex, 42, LITTLE_ENDIAN); writeIndex += SIZE_OF_LONG; buffer.putLong(writeIndex, 1024, LITTLE_ENDIAN); writeIndex += SIZE_OF_LONG; buffer.putLong(writeIndex, 998, LITTLE_ENDIAN); writeIndex += SIZE_OF_LONG; buffer.putLong(writeIndex, 1024, LITTLE_ENDIAN); writeIndex += SIZE_OF_LONG; buffer.putLong(writeIndex, 1200, LITTLE_ENDIAN); writeIndex += SIZE_OF_LONG; buffer.putLong(writeIndex, 800, LITTLE_ENDIAN); writeIndex += SIZE_OF_LONG; buffer.putInt(writeIndex, 123, LITTLE_ENDIAN); writeIndex += SIZE_OF_INT; buffer.putStringAscii(writeIndex, "election state", LITTLE_ENDIAN); ClusterEventDissector.dissectTruncateLogEntry(TRUNCATE_LOG_ENTRY, buffer, offset, builder); assertEquals("[5.000000000] " + CONTEXT + ": " + TRUNCATE_LOG_ENTRY.name() + " [100/200]: memberId=123 " + "state=election state logLeadershipTermId=555 leadershipTermId=166 candidateTermId=42 " + "commitPosition=1024 logPosition=998 appendPosition=1024 oldPosition=1200 newPosition=800", builder.toString()); }
@Override public FailApplicationAttemptResponse failApplicationAttempt( FailApplicationAttemptRequest request) throws YarnException, IOException { if (request == null || request.getApplicationAttemptId() == null || request.getApplicationAttemptId().getApplicationId() == null) { routerMetrics.incrFailAppAttemptFailedRetrieved(); String msg = "Missing failApplicationAttempt request or applicationId " + "or applicationAttemptId information."; RouterAuditLogger.logFailure(user.getShortUserName(), FAIL_APPLICATIONATTEMPT, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, null); } long startTime = clock.getTime(); SubClusterId subClusterId = null; ApplicationAttemptId applicationAttemptId = request.getApplicationAttemptId(); ApplicationId applicationId = applicationAttemptId.getApplicationId(); try { subClusterId = getApplicationHomeSubCluster(applicationId); } catch (YarnException e) { routerMetrics.incrFailAppAttemptFailedRetrieved(); String msg = "ApplicationAttempt " + applicationAttemptId + " belongs to Application " + applicationId + " does not exist in FederationStateStore."; RouterAuditLogger.logFailure(user.getShortUserName(), FAIL_APPLICATIONATTEMPT, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, e); } ApplicationClientProtocol clientRMProxy = getClientRMProxyForSubCluster(subClusterId); FailApplicationAttemptResponse response = null; try { response = clientRMProxy.failApplicationAttempt(request); } catch (Exception e) { routerMetrics.incrFailAppAttemptFailedRetrieved(); String msg = "Unable to get the applicationAttempt report for " + applicationAttemptId + " to SubCluster " + subClusterId; RouterAuditLogger.logFailure(user.getShortUserName(), FAIL_APPLICATIONATTEMPT, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, e); } if (response == null) { LOG.error("No response when attempting to retrieve the report of " + "the applicationAttempt {} to SubCluster {}.", request.getApplicationAttemptId(), subClusterId.getId()); } long stopTime = clock.getTime(); routerMetrics.succeededFailAppAttemptRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(user.getShortUserName(), FAIL_APPLICATIONATTEMPT, TARGET_CLIENT_RM_SERVICE, applicationId, subClusterId); return response; }
@Test public void testFailApplicationAttempt() throws Exception { LOG.info("Test FederationClientInterceptor : Fail Application Attempt request."); // null request LambdaTestUtils.intercept(YarnException.class, "Missing failApplicationAttempt request " + "or applicationId or applicationAttemptId information.", () -> interceptor.failApplicationAttempt(null)); // normal request ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); SubmitApplicationRequest request = mockSubmitApplicationRequest(appId); // Submit the application SubmitApplicationResponse response = interceptor.submitApplication(request); Assert.assertNotNull(response); Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId)); SubClusterId subClusterId = interceptor.getApplicationHomeSubCluster(appId); Assert.assertNotNull(subClusterId); MockRM mockRM = interceptor.getMockRMs().get(subClusterId); mockRM.waitForState(appId, RMAppState.ACCEPTED); RMApp rmApp = mockRM.getRMContext().getRMApps().get(appId); mockRM.waitForState(rmApp.getCurrentAppAttempt().getAppAttemptId(), RMAppAttemptState.SCHEDULED); // Call GetApplicationAttempts GetApplicationAttemptsRequest attemptsRequest = GetApplicationAttemptsRequest.newInstance(appId); GetApplicationAttemptsResponse attemptsResponse = interceptor.getApplicationAttempts(attemptsRequest); Assert.assertNotNull(attemptsResponse); ApplicationAttemptId attemptId = attemptsResponse.getApplicationAttemptList(). get(0).getApplicationAttemptId(); FailApplicationAttemptRequest requestFailAppAttempt = FailApplicationAttemptRequest.newInstance(attemptId); FailApplicationAttemptResponse responseFailAppAttempt = interceptor.failApplicationAttempt(requestFailAppAttempt); Assert.assertNotNull(responseFailAppAttempt); }
@VisibleForTesting static GrpcConfig extractGrpcQueryClientConfig(PinotConfig config) { Map<String, Object> target = new HashMap<>(); target.put(CONFIG_USE_PLAIN_TEXT, !config.isUseSecureConnection()); target.put(CONFIG_MAX_INBOUND_MESSAGE_BYTES_SIZE, config.getStreamingServerGrpcMaxInboundMessageBytes()); if (config.isUseSecureConnection()) { setOrRemoveProperty(target, "tls.keystore.path", config.getGrpcTlsKeyStorePath()); setOrRemoveProperty(target, "tls.keystore.password", config.getGrpcTlsKeyStorePassword()); setOrRemoveProperty(target, "tls.keystore.type", config.getGrpcTlsKeyStoreType()); setOrRemoveProperty(target, "tls.truststore.path", config.getGrpcTlsTrustStorePath()); setOrRemoveProperty(target, "tls.truststore.password", config.getGrpcTlsTrustStorePassword()); setOrRemoveProperty(target, "tls.truststore.type", config.getGrpcTlsTrustStoreType()); } return new GrpcConfig(target); }
@Test public void testExtractTlsPropertyDefault() { PinotConfig config = new PinotConfig(); GrpcConfig extracted = PinotPageSourceProvider.extractGrpcQueryClientConfig(config); assertEquals(extracted.getMaxInboundMessageSizeBytes(), DEFAULT_STREAMING_SERVER_GRPC_MAX_INBOUND_MESSAGE_BYTES); assertTrue(extracted.isUsePlainText()); assertNotNull(extracted.getTlsConfig()); assertNull(extracted.getTlsConfig().getKeyStorePath()); assertNull(extracted.getTlsConfig().getKeyStorePassword()); assertNull(extracted.getTlsConfig().getTrustStorePath()); assertNull(extracted.getTlsConfig().getTrustStorePassword()); }
public void deleteServiceMetadata(Service service) { MetadataOperation<ServiceMetadata> operation = buildMetadataOperation(service); WriteRequest operationLog = WriteRequest.newBuilder().setGroup(Constants.SERVICE_METADATA) .setOperation(DataOperation.DELETE.name()).setData(ByteString.copyFrom(serializer.serialize(operation))) .build(); submitMetadataOperation(operationLog); }
@Test void testDeleteServiceMetadata() { assertThrows(NacosRuntimeException.class, () -> { namingMetadataOperateService.deleteServiceMetadata(service); Mockito.verify(service).getNamespace(); Mockito.verify(service).getGroup(); Mockito.verify(service).getName(); }); }
public static Timestamp parseTimestamp(final String str) { return PARSER.parseToTimestamp(str); }
@Test public void shouldNotParseTimestamp() { assertThrows(KsqlException.class, () -> SqlTimeTypes.parseTimestamp("abc")); assertThrows(KsqlException.class, () -> SqlTimeTypes.parseTimestamp("2019-03-17 03:00")); }
@Override public String getProperty(String key) { String value = super.getProperty(key); if (value != null) { return value; } // Sort the keys to solve the problem of matching priority. List<String> sortedKeyList = keySet().stream() .map(k -> (String) k) .sorted(Comparator.reverseOrder()) .collect(Collectors.toList()); String keyPattern = sortedKeyList.stream() .filter(k -> { String matchingKey = k; if (matchingKey.startsWith(CommonConstants.ANY_VALUE)) { matchingKey = CommonConstants.HIDE_KEY_PREFIX + matchingKey; } return Pattern.matches(matchingKey, key); }) .findFirst() .orElse(null); return keyPattern == null ? null : super.getProperty(keyPattern); }
@Test void testGetProperty() { RegexProperties regexProperties = new RegexProperties(); regexProperties.setProperty("org.apache.dubbo.provider.*", "http://localhost:20880"); regexProperties.setProperty("org.apache.dubbo.provider.config.*", "http://localhost:30880"); regexProperties.setProperty("org.apache.dubbo.provider.config.demo", "http://localhost:40880"); regexProperties.setProperty("org.apache.dubbo.consumer.*.demo", "http://localhost:50880"); regexProperties.setProperty("*.service", "http://localhost:60880"); Assertions.assertEquals( "http://localhost:20880", regexProperties.getProperty("org.apache.dubbo.provider.cluster")); Assertions.assertEquals( "http://localhost:30880", regexProperties.getProperty("org.apache.dubbo.provider.config.cluster")); Assertions.assertEquals( "http://localhost:40880", regexProperties.getProperty("org.apache.dubbo.provider.config.demo")); Assertions.assertEquals( "http://localhost:50880", regexProperties.getProperty("org.apache.dubbo.consumer.service.demo")); Assertions.assertEquals("http://localhost:60880", regexProperties.getProperty("org.apache.dubbo.service")); }
static Schema getSchema(Class<? extends Message> clazz) { return getSchema(ProtobufUtil.getDescriptorForClass(clazz)); }
@Test public void testNestedSchema() { assertEquals( TestProtoSchemas.NESTED_SCHEMA, ProtoSchemaTranslator.getSchema(Proto3SchemaMessages.Nested.class)); }
public String render(String name, Map<String, Object> scope, Locale locale) { var template = mf.compile(name); var w = new StringWriter(); var modifiableScope = new HashMap<>(scope); modifiableScope.put("trans", new TranslateBundleFunction("i18n", locale)); template.execute(w, modifiableScope); return w.toString(); }
@Test void render() { var mf = mock(MustacheFactory.class); var template = mock(Mustache.class); var sut = new TemplateRenderer(mf); var name = "mytemplate.mustache"; var translateBundleFunction = new TranslateBundleFunction("i18n", Locale.US); var scope = new HashMap<String, Object>(); scope.put("trans", translateBundleFunction); when(mf.compile(name)).thenReturn(template); // when var got = sut.render(name, scope, Locale.US); // then verify(mf).compile(name); verify(template).execute(any(), anyMap()); }
@VisibleForTesting static Collection<FailureEnricher> filterInvalidEnrichers( final Set<FailureEnricher> failureEnrichers) { final Map<String, Set<Class<?>>> enrichersByKey = new HashMap<>(); failureEnrichers.forEach( enricher -> enricher.getOutputKeys() .forEach( enricherKey -> enrichersByKey .computeIfAbsent( enricherKey, ignored -> new HashSet<>()) .add(enricher.getClass()))); final Set<Class<?>> invalidEnrichers = enrichersByKey.entrySet().stream() .filter(entry -> entry.getValue().size() > 1) .flatMap( entry -> { LOG.warn( "Following enrichers have have registered duplicate output key [%s] and will be ignored: {}.", entry.getValue().stream() .map(Class::getName) .collect(Collectors.joining(", "))); return entry.getValue().stream(); }) .collect(Collectors.toSet()); return failureEnrichers.stream() .filter(enricher -> !invalidEnrichers.contains(enricher.getClass())) .collect(Collectors.toList()); }
@Test public void testValidatedEnrichersWithInvalidEntries() { // create two enrichers with overlapping keys and a valid one -- must be different classes final FailureEnricher validEnricher = new TestEnricher("validKey"); final FailureEnricher firstOverlapEnricher = new AnotherTestEnricher("key1", "key2"); final FailureEnricher secondOverlapEnricher = new AndAnotherTestEnricher("key2", "key3"); final Set<FailureEnricher> enrichers = new HashSet<FailureEnricher>() { { add(validEnricher); add(firstOverlapEnricher); add(secondOverlapEnricher); } }; final Collection<FailureEnricher> validatedEnrichers = FailureEnricherUtils.filterInvalidEnrichers(enrichers); // Only one enricher is valid assertThat(validatedEnrichers).hasSize(1); }
private ExitStatus run() { try { init(); return new Processor().processNamespace().getExitStatus(); } catch (IllegalArgumentException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS; } catch (IOException e) { System.out.println(e + ". Exiting ..."); LOG.error(e + ". Exiting ..."); return ExitStatus.IO_EXCEPTION; } finally { dispatcher.shutdownNow(); } }
@Test(timeout = 300000) public void testWithFederatedHACluster() throws Exception{ final Configuration conf = new HdfsConfiguration(); initConf(conf); final MiniDFSCluster cluster = new MiniDFSCluster .Builder(conf) .storageTypes(new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}) .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2)) .numDataNodes(4).build(); DFSTestUtil.setFederatedHAConfiguration(cluster, conf); try { Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Iterator<URI> iter = namenodes.iterator(); URI nn1 = iter.next(); URI nn2 = iter.next(); cluster.transitionToActive(0); cluster.transitionToActive(2); final String file = "/test/file"; Path dir = new Path ("/test"); final DistributedFileSystem dfs1 = (DistributedFileSystem) FileSystem .get(nn1, conf); final DistributedFileSystem dfs2 = (DistributedFileSystem) FileSystem .get(nn2, conf); setupStoragePoliciesAndPaths(dfs1, dfs2, dir, file); //Changing Storage Policies dfs1.setStoragePolicy(dir, "COLD"); dfs2.setStoragePolicy(dir, "HOT"); int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] {"-p", nn1 + dir.toString(), nn2 + dir.toString()}); Assert.assertEquals("Movement to DISK should be successful", 0, rc); waitForLocatedBlockWithArchiveStorageType(dfs1, file, 3); waitForLocatedBlockWithDiskStorageType(dfs2, file, 3); } finally { cluster.shutdown(); } }
public List<Release> findByReleaseKeys(Set<String> releaseKeys) { return releaseRepository.findByReleaseKeyIn(releaseKeys); }
@Test public void testFindByReleaseKeys() throws Exception { Release someRelease = mock(Release.class); Release anotherRelease = mock(Release.class); String someReleaseKey = "key1"; String anotherReleaseKey = "key2"; List<Release> someReleases = Lists.newArrayList(someRelease, anotherRelease); Set<String> someReleaseKeys = Sets.newHashSet(someReleaseKey, anotherReleaseKey); when(releaseRepository.findByReleaseKeyIn(someReleaseKeys)).thenReturn(someReleases); List<Release> result = releaseService.findByReleaseKeys(someReleaseKeys); assertEquals(someReleases, result); }
@Override public void removeDestination(ConnectionContext context, ActiveMQDestination destination, long timeout) throws Exception { DestinationAction action = new DestinationAction(context, destination, "remove"); assertAuthorized(action); super.removeDestination(context, destination, timeout); }
@Test(expected=UnauthorizedException.class) public void testRemoveDestinationNotAuthorized() throws Exception { String name = "myTopic"; ActiveMQDestination dest = new ActiveMQTopic(name); Subject subject = new PermsSubject(); ConnectionContext context = createContext(subject); filter.removeDestination(context, dest, 1000); }
public static void main(String[] args) throws Exception { WorkerArguments workerArguments = new WorkerArguments(); CommandLine commander = new CommandLine(workerArguments); commander.setCommandName("FunctionWorkerStarter"); commander.parseArgs(args); if (workerArguments.help) { commander.usage(commander.getOut()); return; } if (workerArguments.generateDocs) { CmdGenerateDocs cmd = new CmdGenerateDocs("pulsar"); cmd.addCommand("functions-worker", commander); cmd.run(null); return; } WorkerConfig workerConfig; if (isBlank(workerArguments.configFile)) { workerConfig = new WorkerConfig(); } else { workerConfig = WorkerConfig.load(workerArguments.configFile); } final Worker worker = new Worker(workerConfig); try { worker.start(); } catch (Throwable th) { log.error("Encountered error in function worker.", th); worker.stop(); ShutdownUtil.triggerImmediateForcefulShutdown(); } Runtime.getRuntime().addShutdownHook(new Thread(() -> { log.info("Stopping function worker service..."); worker.stop(); })); }
@Test public void testMainGenerateDocs() throws Exception { PrintStream oldStream = System.out; try { ByteArrayOutputStream baoStream = new ByteArrayOutputStream(); System.setOut(new PrintStream(baoStream)); Class argumentsClass = Class.forName("org.apache.pulsar.functions.worker.FunctionWorkerStarter$WorkerArguments"); FunctionWorkerStarter.main(new String[]{"-g"}); String message = baoStream.toString(); Field[] fields = argumentsClass.getDeclaredFields(); for (Field field : fields) { boolean fieldHasAnno = field.isAnnotationPresent(Option.class); if (fieldHasAnno) { Option fieldAnno = field.getAnnotation(Option.class); String[] names = fieldAnno.names(); String nameStr = Arrays.asList(names).toString(); nameStr = nameStr.substring(1, nameStr.length() - 1); assertTrue(message.indexOf(nameStr) > 0); } } } finally { System.setOut(oldStream); } }
public static Optional<PfxOptions> getPfxTrustStoreOptions(final Map<String, String> props) { final String location = getTrustStoreLocation(props); final String password = getTrustStorePassword(props); if (!Strings.isNullOrEmpty(location)) { return Optional.of(buildPfxOptions(location, password)); } return Optional.empty(); }
@Test public void shouldBuildTrustStorePfxOptionsWithPathOnly() { // When final Optional<PfxOptions> pfxOptions = VertxSslOptionsFactory.getPfxTrustStoreOptions( ImmutableMap.of( SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "path" ) ); // Then assertThat(pfxOptions.get().getPath(), is("path")); assertThat(pfxOptions.get().getPassword(), is("")); }
public static Result label(long durationInMillis) { double nbSeconds = durationInMillis / 1000.0; double nbMinutes = nbSeconds / 60; double nbHours = nbMinutes / 60; double nbDays = nbHours / 24; double nbYears = nbDays / 365; return getMessage(nbSeconds, nbMinutes, nbHours, nbDays, nbYears); }
@Test public void age_in_minute() { DurationLabel.Result result = DurationLabel.label(now() - ago(MINUTE)); assertThat(result.key()).isEqualTo("duration.minute"); assertThat(result.value()).isNull(); }
@Override public SuspensionReasons grantSuspensionRequest(OrchestratorContext context, ApplicationApi application) throws HostStateChangeDeniedException { var suspensionReasons = new SuspensionReasons(); // Apply per-cluster policy for (ClusterApi cluster : application.getClusters()) { suspensionReasons.mergeWith(clusterPolicy.verifyGroupGoingDownIsFine(cluster)); } // Ask Cluster Controller to set storage nodes in maintenance, unless the node is already allowed // to be down (or permanently down) in case they are guaranteed to be in maintenance already. for (StorageNode storageNode : application.getNoRemarksStorageNodesInGroupInClusterOrder()) { storageNode.setStorageNodeState(context, ClusterControllerNodeState.MAINTENANCE); } // Ensure all nodes in the group are marked as allowed to be down for (HostName hostName : application.getNodesInGroupWithStatus(HostStatus.NO_REMARKS)) { application.setHostState(context, hostName, HostStatus.ALLOWED_TO_BE_DOWN); } return suspensionReasons; }
@Test public void testGrantSuspension() throws HostStateChangeDeniedException { final HostedVespaClusterPolicy clusterPolicy = mock(HostedVespaClusterPolicy.class); when(clusterPolicy.verifyGroupGoingDownIsFine(any())).thenReturn(SuspensionReasons.nothingNoteworthy()); final HostedVespaPolicy policy = new HostedVespaPolicy(clusterPolicy, clientFactory, applicationApiFactory, flagSource); final ApplicationApi applicationApi = mock(ApplicationApi.class); when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("tenant:app:default")); ClusterApi clusterApi1 = mock(ClusterApi.class); ClusterApi clusterApi2 = mock(ClusterApi.class); ClusterApi clusterApi3 = mock(ClusterApi.class); List<ClusterApi> clusterApis = List.of(clusterApi1, clusterApi2, clusterApi3); when(applicationApi.getClusters()).thenReturn(clusterApis); StorageNode storageNode1 = mock(StorageNode.class); HostName hostName1 = new HostName("storage-1"); when(storageNode1.hostName()).thenReturn(hostName1); HostName hostName2 = new HostName("host-2"); StorageNode storageNode3 = mock(StorageNode.class); HostName hostName3 = new HostName("storage-3"); when(storageNode1.hostName()).thenReturn(hostName3); List<StorageNode> upStorageNodes = List.of(storageNode1, storageNode3); when(applicationApi.getNoRemarksStorageNodesInGroupInClusterOrder()).thenReturn(upStorageNodes); // setHostState List<HostName> noRemarksHostNames = List.of(hostName1, hostName2, hostName3); when(applicationApi.getNodesInGroupWithStatus(HostStatus.NO_REMARKS)).thenReturn(noRemarksHostNames); InOrder order = inOrder(applicationApi, clusterPolicy, storageNode1, storageNode3); OrchestratorContext context = mock(OrchestratorContext.class); policy.grantSuspensionRequest(context, applicationApi); order.verify(applicationApi).getClusters(); order.verify(clusterPolicy).verifyGroupGoingDownIsFine(clusterApi1); order.verify(clusterPolicy).verifyGroupGoingDownIsFine(clusterApi2); order.verify(clusterPolicy).verifyGroupGoingDownIsFine(clusterApi3); order.verify(applicationApi).getNoRemarksStorageNodesInGroupInClusterOrder(); order.verify(storageNode1).setStorageNodeState(context, ClusterControllerNodeState.MAINTENANCE); order.verify(storageNode3).setStorageNodeState(context, ClusterControllerNodeState.MAINTENANCE); order.verify(applicationApi).getNodesInGroupWithStatus(HostStatus.NO_REMARKS); order.verify(applicationApi).setHostState(context, hostName1, HostStatus.ALLOWED_TO_BE_DOWN); order.verify(applicationApi).setHostState(context, hostName2, HostStatus.ALLOWED_TO_BE_DOWN); order.verify(applicationApi).setHostState(context, hostName3, HostStatus.ALLOWED_TO_BE_DOWN); order.verifyNoMoreInteractions(); }
@Override public V remove() { if (isEmpty()) { return null; } if (count() == 1) { _count--; TreeNode<P, V> temporal = _root; _root = null; return temporal.get_value(); } //if has more than 1 element // Obtener el binario de la cantidad de nodos byte[] movimientos = convertToBinary(_count); // Encontrar el nodo a eliminar //Reccorer de acuerdo a los movimientos int index = 1; TreeNode<P, V> actual = _root; TreeNode<P, V> nodoToBeDeleted = null; while (index < movimientos.length) { if (index == movimientos.length - 1) { if (movimientos[index] == 0) { // Inserto en el hijo izquierdo nodoToBeDeleted = actual.get_left(); } else { //Inserto en el hijo derecho nodoToBeDeleted = actual.get_right(); } } else { if (movimientos[index] == 0) { // Inserto en el hijo izquierdo actual = actual.get_left(); } else { //Inserto en el hijo derecho actual = actual.get_right(); } } index++; } // Hacer Swap de la hoja con la raiz P tempPriority = nodoToBeDeleted.get_priority(); V tempValue = nodoToBeDeleted.get_value(); nodoToBeDeleted.set_priority(_root.get_priority()); nodoToBeDeleted.set_value(_root.get_value()); _root.set_priority(tempPriority); _root.set_value(tempValue); // Eliminar el nodo hoja tempPriority = nodoToBeDeleted.get_priority(); tempValue = nodoToBeDeleted.get_value(); TreeNode<P, V> parent = nodoToBeDeleted.get_parent(); if (parent.get_left() == nodoToBeDeleted) parent.set_left(null); else parent.set_right(null); // buscar el lugar de insercion actual = _root; while (actual != null) { boolean actualHasLeftChild = actual.get_left() != null; boolean actualHasRightChild = actual.get_right() != null; if (actualHasLeftChild && actualHasRightChild) { //Tiene a los 2 hijos //Si tiene a los dos hijos verifico quien es el mayor int result = _priorityComparator.compare(actual.get_left().get_priority(), actual.get_right().get_priority()); if (result == 0) { //Son iguales result = _priorityComparator.compare(actual.get_priority(), actual.get_left().get_priority()); if (result < 0) { P tempPriority2 = actual.get_priority(); V tempValue2 = actual.get_value(); actual.set_priority(actual.get_left().get_priority()); actual.set_value(actual.get_left().get_value()); actual.get_left().set_priority(tempPriority2); actual.get_left().set_value(tempValue2); actual = actual.get_left(); } else { break; } } else if (result > 0){ //Hijo izquierdo mayor result = _priorityComparator.compare(actual.get_priority(), actual.get_left().get_priority()); if (result < 0) { P tempPriority2 = actual.get_priority(); V tempValue2 = actual.get_value(); actual.set_priority(actual.get_left().get_priority()); actual.set_value(actual.get_left().get_value()); actual.get_left().set_priority(tempPriority2); actual.get_left().set_value(tempValue2); actual = actual.get_left(); } else { break; } } else { result = _priorityComparator.compare(actual.get_priority(), actual.get_right().get_priority()); if (result < 0) { P tempPriority2 = actual.get_priority(); V tempValue2 = actual.get_value(); actual.set_priority(actual.get_right().get_priority()); actual.set_value(actual.get_right().get_value()); actual.get_right().set_priority(tempPriority2); actual.get_right().set_value(tempValue2); actual = actual.get_right(); } else { break; } } } else if (!actualHasLeftChild && !actualHasRightChild){ //No tiene hijos break; } else if (actualHasLeftChild){ //Solo tiene izquierdo int result = _priorityComparator.compare(actual.get_priority(), actual.get_left().get_priority()); if (result < 0) { P tempPriority2 = actual.get_priority(); V tempValue2 = actual.get_value(); actual.set_priority(actual.get_left().get_priority()); actual.set_value(actual.get_left().get_value()); actual.get_left().set_priority(tempPriority2); actual.get_left().set_value(tempValue2); } else { break; } } else { //Solo tiene derecho int result = _priorityComparator.compare(actual.get_priority(), actual.get_right().get_priority()); if (result < 0) { P tempPriority2 = actual.get_priority(); V tempValue2 = actual.get_value(); actual.set_priority(actual.get_right().get_priority()); actual.set_value(actual.get_right().get_value()); actual.get_right().set_priority(tempPriority2); actual.get_right().set_value(tempValue2); } else { break; } } } // reducir la cantidad _count--; return tempValue; }
@Test public void testInsertAndRemoveWithEmptyHeap() { // Crear un comparador de prioridades Comparator<Integer> comparator = new ComparadorNumeros<>(); // Crear un heap utilizando el árbol binario iterativo HeapUsingIterativeBinaryTree<Integer, String> heap = new HeapUsingIterativeBinaryTree<>(comparator); // Prueba remove en un heap vacío assertNull(heap.remove()); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { final PathAttributes attributes = new PathAttributes(); if(log.isDebugEnabled()) { log.debug(String.format("Read location for bucket %s", file)); } attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier()); return attributes; } if(file.getType().contains(Path.Type.upload)) { final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } throw new NotfoundException(file.getAbsolute()); } try { PathAttributes attr; final Path bucket = containerService.getContainer(file); try { attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails( file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(ServiceException e) { switch(e.getResponseCode()) { case 405: if(log.isDebugEnabled()) { log.debug(String.format("Mark file %s as delete marker", file)); } // Only DELETE method is allowed for delete markers attr = new PathAttributes(); attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString())); attr.setDuplicate(true); return attr; } throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } if(StringUtils.isNotBlank(attr.getVersionId())) { if(log.isDebugEnabled()) { log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file)); } // Determine if latest version try { final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId(); if(null != latest) { if(log.isDebugEnabled()) { log.debug(String.format("Found later version %s for %s", latest, file)); } // Duplicate if not latest version attr.setDuplicate(!latest.equals(attr.getVersionId())); } } catch(ServiceException e) { final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); if(failure instanceof NotfoundException) { attr.setDuplicate(true); } else { throw failure; } } } return attr; } catch(NotfoundException e) { if(file.isDirectory()) { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // File may be marked as placeholder but no placeholder file exists. Check for common prefix returned. try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } catch(NotfoundException n) { throw e; } // Found common prefix return PathAttributes.EMPTY; } throw e; } }
@Test public void testFindRoot() throws Exception { final S3AttributesFinderFeature f = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)); assertEquals(PathAttributes.EMPTY, f.find(new Path("/", EnumSet.of(Path.Type.directory)))); }
@Override public void execute(ComputationStep.Context context) { VisitorsCrawler visitorsCrawler = new VisitorsCrawler(visitors, LOGGER.isDebugEnabled()); visitorsCrawler.visit(treeRootHolder.getRoot()); logVisitorExecutionDurations(visitors, visitorsCrawler); }
@Test public void execute_with_path_aware_visitor() { ExecuteVisitorsStep underTest = new ExecuteVisitorsStep(treeRootHolder, singletonList(new TestPathAwareVisitor())); measureRepository.addRawMeasure(FILE_1_REF, NCLOC_KEY, newMeasureBuilder().create(1)); measureRepository.addRawMeasure(FILE_2_REF, NCLOC_KEY, newMeasureBuilder().create(1)); underTest.execute(new TestComputationStepContext()); assertThat(measureRepository.getAddedRawMeasure(FILE_1_REF, TEST_METRIC_KEY).get().getIntValue()).isOne(); assertThat(measureRepository.getAddedRawMeasure(FILE_2_REF, TEST_METRIC_KEY).get().getIntValue()).isOne(); assertThat(measureRepository.getAddedRawMeasure(DIRECTORY_REF, TEST_METRIC_KEY).get().getIntValue()).isEqualTo(2); assertThat(measureRepository.getAddedRawMeasure(ROOT_REF, TEST_METRIC_KEY).get().getIntValue()).isEqualTo(2); }
@Override public List<Integer> getMemberPartitionsIfAssigned(Address target) { if (!partitionStateManager.isInitialized()) { return Collections.emptyList(); } return getMemberPartitions(target); }
@Test public void test_getMemberPartitionsIfAssigned_whenNotInitialized() { List<Integer> partitions = partitionService.getMemberPartitionsIfAssigned(getAddress(instance)); assertThat(partitions).isEmpty(); }
@Override public String toString() { return null == schemaName.toString() ? tableName.toString() : String.join(".", schemaName.toString(), tableName.toString()); }
@Test void assertToString() { CaseInsensitiveQualifiedTable actual = new CaseInsensitiveQualifiedTable(new CaseInsensitiveIdentifier(null), new CaseInsensitiveIdentifier("t_order")); assertThat(actual.toString(), is("t_order")); }