focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override protected boolean isNan(Integer number) { // NaN never applies here because only types like Float and Double have NaN return false; }
@Test void testIsNan() { IntegerSummaryAggregator ag = new IntegerSummaryAggregator(); // always false for Integer assertThat(ag.isNan(-1)).isFalse(); assertThat(ag.isNan(0)).isFalse(); assertThat(ag.isNan(23)).isFalse(); assertThat(ag.isNan(Integer.MAX_VALUE)).isFalse(); assertThat(ag.isNan(Integer.MIN_VALUE)).isFalse(); assertThat(ag.isNan(null)).isFalse(); }
public static byte[] getValue(byte[] raw) { try (final Asn1InputStream is = new Asn1InputStream(raw)) { is.readTag(); return is.read(is.readLength()); } }
@Test public void shouldThrowExceptionIfExtendedLengthDoesNotExtend() { thrown.expect(Asn1Exception.class); Asn1Utils.getValue(new byte[] { 0x10, (byte) 0x82, 1 }); }
@GetMapping("/detailPath") @RequiresPermissions("system:authen:editResourceDetails") public ShenyuAdminResult detailPath(@RequestParam("id") @Existed(message = "auth path not existed", providerMethodName = "existedByAuthId", provider = AuthPathMapper.class) @NotBlank final String authId) { return ShenyuAdminResult.success(ShenyuResultMessage.DETAIL_SUCCESS, appAuthService.detailPath(authId)); }
@Test public void testDetailPath() throws Exception { final AuthPathVO authPathVO = new AuthPathVO(); authPathVO.setId("0001"); authPathVO.setAppName("testApp"); authPathVO.setPath("/test"); given(this.appAuthService.detailPath("0001")).willReturn(Collections.singletonList(authPathVO)); this.mockMvc.perform(MockMvcRequestBuilders.get("/appAuth/detailPath") .param("id", "0001")) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DETAIL_SUCCESS))) .andExpect(jsonPath("$.data[0].path", is(authPathVO.getPath()))) .andReturn(); }
public static DataSourcePoolProperties create(final DataSourceConfiguration config) { return new DataSourcePoolProperties(config.getConnection().getDataSourceClassName(), createProperties(config)); }
@Test void assertCreateWithDataSourceConfiguration() { assertParameter(DataSourcePoolPropertiesCreator.create(createDataSourceConfiguration())); }
@Override public UnderFileSystem create(String path, UnderFileSystemConfiguration conf) { Preconditions.checkNotNull(path, "Unable to create UnderFileSystem instance:" + " URI path should not be null"); if (conf.getInt(PropertyKey.UNDERFS_GCS_VERSION) == GCS_VERSION_TWO) { try { return GCSV2UnderFileSystem.createInstance(new AlluxioURI(path), conf); } catch (IOException e) { LOG.error("Failed to create GCSV2UnderFileSystem.", e); throw Throwables.propagate(e); } } else { if (checkGCSCredentials(conf)) { try { return GCSUnderFileSystem.createInstance(new AlluxioURI(path), conf); } catch (ServiceException e) { LOG.error("Failed to create GCSUnderFileSystem.", e); throw Throwables.propagate(e); } } } String err = "GCS credentials or version not available, cannot create GCS Under File System."; throw new InvalidArgumentRuntimeException(err); }
@Test public void createInstanceWithoutCredentials() { Configuration.unset(PropertyKey.GCS_ACCESS_KEY); Configuration.unset(PropertyKey.GCS_SECRET_KEY); mAlluxioConf = Configuration.global(); mConf = UnderFileSystemConfiguration.defaults(mAlluxioConf); try { mFactory.create(mPath, mConf); } catch (Exception e) { Assert.assertTrue(e instanceof RuntimeException); Assert.assertTrue(e.getMessage().contains("GCS credentials or version not available, " + "cannot create GCS Under File System.")); } Exception e = Assert.assertThrows(RuntimeException.class, () -> mFactory.create( mPath, mConf)); Assert.assertTrue(e.getMessage().contains("GCS credentials or version not available, " + "cannot create GCS Under File System.")); }
@Override public DeviceLastSeenState getState(@Nullable final Account account, @Nullable final Device device) { if (account != null && device != null) { final DeviceLastSeenState.PushTokenType pushTokenType; if (StringUtils.isNotBlank(device.getApnId())) { pushTokenType = DeviceLastSeenState.PushTokenType.APNS; } else if (StringUtils.isNotBlank(device.getGcmId())) { pushTokenType = DeviceLastSeenState.PushTokenType.FCM; } else { pushTokenType = null; } return new DeviceLastSeenState(true, device.getCreated(), hasPushToken(device), device.getLastSeen(), pushTokenType); } else { return DeviceLastSeenState.MISSING_DEVICE_STATE; } }
@Test void getState() { final IdleDevicePushNotificationExperiment experiment = getExperiment(); assertEquals(DeviceLastSeenState.MISSING_DEVICE_STATE, experiment.getState(null, null)); assertEquals(DeviceLastSeenState.MISSING_DEVICE_STATE, experiment.getState(mock(Account.class), null)); final long createdAtMillis = CURRENT_TIME.minus(Duration.ofDays(14)).toEpochMilli(); { final Device apnsDevice = mock(Device.class); when(apnsDevice.getApnId()).thenReturn("apns-token"); when(apnsDevice.getCreated()).thenReturn(createdAtMillis); when(apnsDevice.getLastSeen()).thenReturn(CURRENT_TIME.toEpochMilli()); assertEquals( new DeviceLastSeenState(true, createdAtMillis, true, CURRENT_TIME.toEpochMilli(), DeviceLastSeenState.PushTokenType.APNS), experiment.getState(mock(Account.class), apnsDevice)); } { final Device fcmDevice = mock(Device.class); when(fcmDevice.getGcmId()).thenReturn("fcm-token"); when(fcmDevice.getCreated()).thenReturn(createdAtMillis); when(fcmDevice.getLastSeen()).thenReturn(CURRENT_TIME.toEpochMilli()); assertEquals( new DeviceLastSeenState(true, createdAtMillis, true, CURRENT_TIME.toEpochMilli(), DeviceLastSeenState.PushTokenType.FCM), experiment.getState(mock(Account.class), fcmDevice)); } { final Device noTokenDevice = mock(Device.class); when(noTokenDevice.getCreated()).thenReturn(createdAtMillis); when(noTokenDevice.getLastSeen()).thenReturn(CURRENT_TIME.toEpochMilli()); assertEquals( new DeviceLastSeenState(true, createdAtMillis, false, CURRENT_TIME.toEpochMilli(), null), experiment.getState(mock(Account.class), noTokenDevice)); } }
@Override public ValidationResult validate(Object value) { ValidationResult result = super.validate(value); if (result instanceof ValidationResult.ValidationPassed) { final String sValue = (String)value; if (sValue.length() < minLength || sValue.length() > maxLength) { result = new ValidationResult.ValidationFailed("Value is not between " + minLength + " and " + maxLength + " in length!"); } } return result; }
@Test public void testValidateMaxLengthString() { assertThat(new LimitedStringValidator(1, 5).validate("12345")) .isInstanceOf(ValidationResult.ValidationPassed.class); }
public static <T, PredicateT extends ProcessFunction<T, Boolean>> Filter<T> by( PredicateT predicate) { return new Filter<>(predicate); }
@Test @Category(NeedsRunner.class) public void testFilterByProcessFunction() { PCollection<Integer> output = p.apply(Create.of(1, 2, 3, 4, 5, 6, 7)).apply(Filter.by(new EvenProcessFn())); PAssert.that(output).containsInAnyOrder(2, 4, 6); p.run(); }
@Override public void updateSmsSendResult(Long id, Boolean success, String apiSendCode, String apiSendMsg, String apiRequestId, String apiSerialNo) { SmsSendStatusEnum sendStatus = success ? SmsSendStatusEnum.SUCCESS : SmsSendStatusEnum.FAILURE; smsLogMapper.updateById(SmsLogDO.builder().id(id) .sendStatus(sendStatus.getStatus()).sendTime(LocalDateTime.now()) .apiSendCode(apiSendCode).apiSendMsg(apiSendMsg) .apiRequestId(apiRequestId).apiSerialNo(apiSerialNo).build()); }
@Test public void testUpdateSmsSendResult() { // mock 数据 SmsLogDO dbSmsLog = randomSmsLogDO( o -> o.setSendStatus(SmsSendStatusEnum.IGNORE.getStatus())); smsLogMapper.insert(dbSmsLog); // 准备参数 Long id = dbSmsLog.getId(); Boolean success = randomBoolean(); String apiSendCode = randomString(); String apiSendMsg = randomString(); String apiRequestId = randomString(); String apiSerialNo = randomString(); // 调用 smsLogService.updateSmsSendResult(id, success, apiSendCode, apiSendMsg, apiRequestId, apiSerialNo); // 断言 dbSmsLog = smsLogMapper.selectById(id); assertEquals(success ? SmsSendStatusEnum.SUCCESS.getStatus() : SmsSendStatusEnum.FAILURE.getStatus(), dbSmsLog.getSendStatus()); assertNotNull(dbSmsLog.getSendTime()); assertEquals(apiSendCode, dbSmsLog.getApiSendCode()); assertEquals(apiSendMsg, dbSmsLog.getApiSendMsg()); assertEquals(apiRequestId, dbSmsLog.getApiRequestId()); assertEquals(apiSerialNo, dbSmsLog.getApiSerialNo()); }
@Override public InputStream fetch(String fetchKey, Metadata metadata, ParseContext parseContext) throws TikaException, IOException { return fetch(fetchKey, -1, -1, metadata); }
@Test public void testConfig() throws Exception { FetcherManager fetcherManager = FetcherManager.load( Paths.get(this.getClass().getResource("/tika-config-s3.xml").toURI())); Fetcher fetcher = fetcherManager.getFetcher("s3"); Metadata metadata = new Metadata(); try (InputStream is = fetcher.fetch(FETCH_STRING, metadata, new ParseContext())) { Files.copy(is, outputFile, StandardCopyOption.REPLACE_EXISTING); } }
public Tasks findByType(Class<? extends Task> type) { Tasks matchedTasks = new Tasks(); for (Task t : this) { if (type.isInstance(t)) { matchedTasks.add(t); } } return matchedTasks; }
@Test public void shouldReturnEmptyTasks() throws Exception { AntTask antTask1 = new AntTask(); FetchTask fetchArtifact = new FetchTask(); Tasks tasks = new Tasks(antTask1, fetchArtifact); Tasks finds = tasks.findByType(NantTask.class); assertThat(finds.size(), is(0)); }
public boolean assign(DefaultIssue issue, @Nullable UserDto user, IssueChangeContext context) { String assigneeUuid = user != null ? user.getUuid() : null; if (!Objects.equals(assigneeUuid, issue.assignee())) { String newAssigneeName = user == null ? null : user.getName(); issue.setFieldChange(context, ASSIGNEE, UNUSED, newAssigneeName); issue.setAssigneeUuid(user != null ? user.getUuid() : null); issue.setUpdateDate(context.date()); issue.setChanged(true); issue.setSendNotifications(true); return true; } return false; }
@Test void not_change_assignee() { UserDto user = newUserDto().setLogin("morgan").setName("Morgan"); issue.setAssigneeUuid(user.getUuid()); boolean updated = underTest.assign(issue, user, context); assertThat(updated).isFalse(); assertThat(issue.currentChange()).isNull(); assertThat(issue.mustSendNotifications()).isFalse(); }
public static SammonMapping of(double[][] proximity) { return of(proximity, new Properties()); }
@Test public void test() { System.out.println("Sammon's Mapping"); double[][] points = { {-1921.9111, -1830.4309}, {759.7598, -606.0879}, {-80.1989, 443.366}, {106.2067, 512.101}, {484.4129, 477.3046}, {-295.3324, 445.0549}, {-543.941, 1091.5882}, {7.4096, -269.6847}, {1942.8039, -727.8288}, {-626.7153, 721.5507}, {-185.8613, 658.2859}, {1916.6406, -83.5842}, {149.485, -217.709}, {1372.7065, -349.7255}, {285.2568, -514.7278}, {-273.1086, -426.4983}, {-569.246, -106.5333}, {161.4922, 261.1512}, {-698.6729, -1023.6605}, {-951.8776, 1716.5056}, {-1039.3089, -170.4371} }; SammonMapping sammon = SammonMapping.of(Eurodist.x); assertEquals(0.00941, sammon.stress, 1E-5); for (int i = 0; i < points.length; i++) { System.out.println(Arrays.toString(sammon.coordinates[i])); //assertArrayEquals(points[i], sammon.coordinates[i], 1E-4); } }
public int computeThreshold(StreamConfig streamConfig, CommittingSegmentDescriptor committingSegmentDescriptor, @Nullable SegmentZKMetadata committingSegmentZKMetadata, String newSegmentName) { long desiredSegmentSizeBytes = streamConfig.getFlushThresholdSegmentSizeBytes(); if (desiredSegmentSizeBytes <= 0) { desiredSegmentSizeBytes = StreamConfig.DEFAULT_FLUSH_THRESHOLD_SEGMENT_SIZE_BYTES; } long optimalSegmentSizeBytesMin = desiredSegmentSizeBytes / 2; double optimalSegmentSizeBytesMax = desiredSegmentSizeBytes * 1.5; if (committingSegmentZKMetadata == null) { // first segment of the partition, hence committing segment is null if (_latestSegmentRowsToSizeRatio > 0) { // new partition group added case long targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, using prev ratio {}, setting rows threshold for {} as {}", _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } else { final int autotuneInitialRows = streamConfig.getFlushAutotuneInitialRows(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, setting threshold for {} as {}", newSegmentName, autotuneInitialRows); return autotuneInitialRows; } } final long committingSegmentSizeBytes = committingSegmentDescriptor.getSegmentSizeBytes(); if (committingSegmentSizeBytes <= 0 // repair segment case || SegmentCompletionProtocol.REASON_FORCE_COMMIT_MESSAGE_RECEIVED.equals( committingSegmentDescriptor.getStopReason())) { String reason = committingSegmentSizeBytes <= 0 // ? "Committing segment size is not available" // : "Committing segment is due to force-commit"; final int targetNumRows = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info("{}, setting thresholds from previous segment for {} as {}", reason, newSegmentName, targetNumRows); return targetNumRows; } final long timeConsumed = _clock.millis() - committingSegmentZKMetadata.getCreationTime(); final long numRowsConsumed = committingSegmentZKMetadata.getTotalDocs(); final int numRowsThreshold = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "{}: Data from committing segment: Time {} numRows {} threshold {} segmentSize(bytes) {}", newSegmentName, TimeUtils.convertMillisToPeriod(timeConsumed), numRowsConsumed, numRowsThreshold, committingSegmentSizeBytes); double currentRatio = (double) numRowsConsumed / committingSegmentSizeBytes; if (_latestSegmentRowsToSizeRatio > 0) { _latestSegmentRowsToSizeRatio = CURRENT_SEGMENT_RATIO_WEIGHT * currentRatio + PREVIOUS_SEGMENT_RATIO_WEIGHT * _latestSegmentRowsToSizeRatio; } else { _latestSegmentRowsToSizeRatio = currentRatio; } // If the number of rows consumed is less than what we set as target in metadata, then the segment hit time limit. // We can set the new target to be slightly higher than the actual number of rows consumed so that we can aim // to hit the row limit next time around. // // If the size of the committing segment is higher than the desired segment size, then the administrator has // set a lower segment size threshold. We should treat this case as if we have hit thw row limit and not the time // limit. // // TODO: add feature to adjust time threshold as well // If we set new threshold to be numRowsConsumed, we might keep oscillating back and forth between doubling limit // and time threshold being hit If we set new threshold to be committingSegmentZKMetadata // .getSizeThresholdToFlushSegment(), // we might end up using a lot more memory than required for the segment Using a minor bump strategy, until // we add feature to adjust time We will only slightly bump the threshold based on numRowsConsumed if (numRowsConsumed < numRowsThreshold && committingSegmentSizeBytes < desiredSegmentSizeBytes) { final long timeThresholdMillis = streamConfig.getFlushThresholdTimeMillis(); long currentNumRows = numRowsConsumed; StringBuilder logStringBuilder = new StringBuilder().append("Time threshold reached. "); if (timeThresholdMillis < timeConsumed) { // The administrator has reduced the time threshold. Adjust the // number of rows to match the average consumption rate on the partition. currentNumRows = timeThresholdMillis * numRowsConsumed / timeConsumed; logStringBuilder.append(" Detected lower time threshold, adjusting numRowsConsumed to ").append(currentNumRows) .append(". "); } long targetSegmentNumRows = (long) (currentNumRows * ROWS_MULTIPLIER_WHEN_TIME_THRESHOLD_HIT); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); logStringBuilder.append("Setting segment size for {} as {}"); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(logStringBuilder.toString(), newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } long targetSegmentNumRows; if (committingSegmentSizeBytes < optimalSegmentSizeBytesMin) { targetSegmentNumRows = numRowsConsumed + numRowsConsumed / 2; } else if (committingSegmentSizeBytes > optimalSegmentSizeBytesMax) { targetSegmentNumRows = numRowsConsumed / 2; } else { if (_latestSegmentRowsToSizeRatio > 0) { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); } else { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * currentRatio); } } targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment size {}, current ratio {}, setting threshold for {} as {}", committingSegmentSizeBytes, _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; }
@Test public void testSegmentSizeTooSmall() { SegmentFlushThresholdComputer computer = new SegmentFlushThresholdComputer(); StreamConfig streamConfig = mock(StreamConfig.class); when(streamConfig.getFlushThresholdSegmentSizeBytes()).thenReturn(300_0000L); CommittingSegmentDescriptor committingSegmentDescriptor = mock(CommittingSegmentDescriptor.class); when(committingSegmentDescriptor.getSegmentSizeBytes()).thenReturn(500_0000L); SegmentZKMetadata committingSegmentZKMetadata = mock(SegmentZKMetadata.class); when(committingSegmentZKMetadata.getTotalDocs()).thenReturn(30_000L); when(committingSegmentZKMetadata.getSizeThresholdToFlushSegment()).thenReturn(20_000); int threshold = computer.computeThreshold(streamConfig, committingSegmentDescriptor, committingSegmentZKMetadata, "events3__0__0__20211222T1646Z"); // totalDocs / 2 // 30000 / 2 assertEquals(threshold, 15_000); }
@Override public RouteContext createRouteContext(final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final SingleRule rule, final ConfigurationProperties props, final ConnectionContext connectionContext) { if (1 == database.getResourceMetaData().getStorageUnits().size()) { return createSingleDataSourceRouteContext(rule, database, queryContext); } RouteContext result = new RouteContext(); SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SingleMetaDataValidatorFactory.newInstance(sqlStatementContext.getSqlStatement()).ifPresent(optional -> optional.validate(rule, sqlStatementContext, database)); Collection<QualifiedTable> singleTables = getSingleTables(database, rule, result, sqlStatementContext); SingleRouteEngineFactory.newInstance(singleTables, sqlStatementContext.getSqlStatement()).ifPresent(optional -> optional.route(result, rule)); return result; }
@Test void assertCreateRouteContextWithReadwriteSplittingDataSource() throws SQLException { SingleRule rule = new SingleRule(new SingleRuleConfiguration(), DefaultDatabase.LOGIC_NAME, new H2DatabaseType(), Collections.singletonMap("readwrite_ds", new MockedDataSource(mockConnection())), Collections.emptyList()); rule.getAttributes().getAttribute(DataNodeRuleAttribute.class).getAllDataNodes().put("t_order", Collections.singletonList(createDataNode("write_ds"))); ShardingSphereDatabase database = mockReadwriteSplittingDatabase(); RouteContext actual = new SingleSQLRouter().createRouteContext( createQueryContext(), mock(RuleMetaData.class), database, rule, new ConfigurationProperties(new Properties()), new ConnectionContext(Collections::emptySet)); assertThat(actual.getRouteUnits().size(), is(1)); RouteUnit routeUnit = actual.getRouteUnits().iterator().next(); assertThat(routeUnit.getDataSourceMapper().getLogicName(), is("readwrite_ds")); assertThat(routeUnit.getDataSourceMapper().getActualName(), is("write_ds")); assertFalse(routeUnit.getTableMappers().isEmpty()); }
IdBatchAndWaitTime newIdBaseLocal(int batchSize) { return newIdBaseLocal(Clock.currentTimeMillis(), getNodeId(), batchSize); }
@Test public void when_maximumAllowedFuture_then_noWaitTime() { IdBatchAndWaitTime result = gen.newIdBaseLocal(1516028439000L, 1234, (int) (IDS_PER_SECOND * DEFAULT_ALLOWED_FUTURE_MILLIS)); assertEquals(0, result.waitTimeMillis); }
@Description("convert a string in the given base to a number") @ScalarFunction @LiteralParameters("x") @SqlType(StandardTypes.BIGINT) public static long fromBase(@SqlType("varchar(x)") Slice value, @SqlType(StandardTypes.BIGINT) long radix) { checkRadix(radix); try { return Long.parseLong(value.toStringUtf8(), (int) radix); } catch (NumberFormatException e) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("Not a valid base-%d number: %s", radix, value.toStringUtf8()), e); } }
@Test public void testFromBase() { assertFunction("from_base('80000000', 16)", BIGINT, 2147483648L); assertFunction("from_base('11111111', 2)", BIGINT, 255L); assertFunction("from_base('-7fffffff', 16)", BIGINT, -2147483647L); assertFunction("from_base('9223372036854775807', 10)", BIGINT, 9223372036854775807L); assertFunction("from_base('-9223372036854775808', 10)", BIGINT, -9223372036854775808L); assertFunction("from_base(NULL, 10)", BIGINT, null); assertFunction("from_base('-9223372036854775808', NULL)", BIGINT, null); assertFunction("from_base(NULL, NULL)", BIGINT, null); assertInvalidFunction("from_base('Z', 37)", "Radix must be between 2 and 36"); assertInvalidFunction("from_base('Z', 35)", "Not a valid base-35 number: Z"); assertInvalidFunction("from_base('9223372036854775808', 10)", "Not a valid base-10 number: 9223372036854775808"); assertInvalidFunction("from_base('Z', 37)", "Radix must be between 2 and 36"); assertInvalidFunction("from_base('Z', 35)", "Not a valid base-35 number: Z"); assertInvalidFunction("from_base('9223372036854775808', 10)", "Not a valid base-10 number: 9223372036854775808"); }
@DeleteMapping("/batch") @RequiresPermissions("system:manager:delete") public ShenyuAdminResult deleteDashboardUser(@RequestBody @NotEmpty final List<@NotBlank String> ids) { // [mandatory] This function can only be used by the admin user Assert.isTrue(SessionUtil.isAdmin(), "This function can only be used by the admin(root) user"); return ShenyuAdminResult.success(ShenyuResultMessage.DELETE_SUCCESS, dashboardUserService.delete(new HashSet<>(ids))); }
@Test public void deleteDashboardUser() throws Exception { final String url = "/dashboardUser/batch"; final List<String> ids = Lists.newArrayList(); SessionUtil.setLocalVisitor(UserInfo.builder().userId("1").userName("admin").build()); given(dashboardUserService.delete(any())).willReturn(0); mockMvc.perform(delete(url, ids) .content(GsonUtils.getInstance().toJson(ids)) .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andDo(print()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DELETE_SUCCESS))) .andExpect(jsonPath("$.data", is(0))); }
public void addForwardedField(int input, int sourceField, int targetField) { Map<Integer, FieldSet> fieldMapping; if (input != 0 && input != 1) { throw new IndexOutOfBoundsException(); } else if (input == 0) { fieldMapping = this.fieldMapping1; } else { fieldMapping = this.fieldMapping2; } if (isTargetFieldPresent(targetField, fieldMapping)) { throw new InvalidSemanticAnnotationException( "Target field " + targetField + " was added twice to input " + input); } FieldSet targetFields = fieldMapping.get(sourceField); if (targetFields != null) { fieldMapping.put(sourceField, targetFields.addField(targetField)); } else { fieldMapping.put(sourceField, new FieldSet(targetField)); } }
@Test void testAddForwardedFieldsTargetTwice2() { assertThatThrownBy( () -> { DualInputSemanticProperties sp = new DualInputSemanticProperties(); sp.addForwardedField(1, 0, 2); sp.addForwardedField(1, 1, 2); }) .isInstanceOf(SemanticProperties.InvalidSemanticAnnotationException.class); }
@Override void decode(ByteBufAllocator alloc, ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception { int len = setInput(headerBlock); int numBytes; do { numBytes = decompress(alloc, frame); } while (numBytes > 0); // z_stream has an internal 64-bit hold buffer // it is always capable of consuming the entire input if (decompressor.getRemaining() != 0) { // we reached the end of the deflate stream throw INVALID_HEADER_BLOCK; } headerBlock.skipBytes(len); }
@Test public void testHeaderBlockInvalidDictionary() throws Exception { final ByteBuf headerBlock = Unpooled.buffer(7); headerBlock.writeByte(0x78); headerBlock.writeByte(0x3f); headerBlock.writeByte(0x01); // Unknown dictionary headerBlock.writeByte(0x02); // Unknown dictionary headerBlock.writeByte(0x03); // Unknown dictionary headerBlock.writeByte(0x04); // Unknown dictionary headerBlock.writeByte(0); // Non-compressed block assertThrows(SpdyProtocolException.class, new Executable() { @Override public void execute() throws Throwable { decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); } }); headerBlock.release(); }
public static SqlDecimal toSqlDecimal(final SqlType type) { switch (type.baseType()) { case DECIMAL: return (SqlDecimal) type; case INTEGER: return SqlTypes.INT_UPCAST_TO_DECIMAL; case BIGINT: return SqlTypes.BIGINT_UPCAST_TO_DECIMAL; default: throw new KsqlException( "Cannot convert " + type.baseType() + " to " + SqlBaseType.DECIMAL + "."); } }
@Test public void shouldConvertDecimalToSqlDecimal() { // Given: final SqlDecimal given = SqlTypes.decimal(2, 2); // When: final SqlDecimal decimal = DecimalUtil.toSqlDecimal(given); // Then: assertThat(decimal, is(SqlTypes.decimal(2, 2))); }
public Value evalForValue(String exp) { return context.eval(JS, exp); }
@Test void testJavaType() { Value v = je.evalForValue("Java.type('com.intuit.karate.graal.SimplePojo')"); assertTrue(v.isMetaObject()); assertTrue(v.isHostObject()); }
LetterComposite messageFromElves() { var words = List.of( new Word('M', 'u', 'c', 'h'), new Word('w', 'i', 'n', 'd'), new Word('p', 'o', 'u', 'r', 's'), new Word('f', 'r', 'o', 'm'), new Word('y', 'o', 'u', 'r'), new Word('m', 'o', 'u', 't', 'h') ); return new Sentence(words); }
@Test void testMessageFromElves() { final var messenger = new Messenger(); testMessage( messenger.messageFromElves(), "Much wind pours from your mouth." ); }
@Override public SchemaResult getValueSchema( final Optional<String> topicName, final Optional<Integer> schemaId, final FormatInfo expectedFormat, final SerdeFeatures serdeFeatures ) { return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, false); }
@Test public void shouldReturnErrorFromGetValueWithIdSchemaIfNotFound() throws Exception { // Given: when(srClient.getSchemaBySubjectAndId(any(), anyInt())) .thenThrow(notFoundException()); // When: final SchemaResult result = supplier.getValueSchema(Optional.of(TOPIC_NAME), Optional.of(42), expectedFormat, SerdeFeatures.of()); // Then: assertThat(result.schemaAndId, is(Optional.empty())); assertThat(result.failureReason, is(not(Optional.empty()))); verifyFailureMessageForValue(result, Optional.of(42)); }
@Override public RedisClusterNode clusterGetNodeForSlot(int slot) { Iterable<RedisClusterNode> res = clusterGetNodes(); for (RedisClusterNode redisClusterNode : res) { if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) { return redisClusterNode; } } return null; }
@Test public void testClusterGetNodeForSlot() { RedisClusterNode node1 = connection.clusterGetNodeForSlot(1); RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000); assertThat(node1.getId()).isNotEqualTo(node2.getId()); }
public static Ip6Prefix valueOf(byte[] address, int prefixLength) { return new Ip6Prefix(Ip6Address.valueOf(address), prefixLength); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfStringNegativePrefixLengthIPv6() { Ip6Prefix ipPrefix; ipPrefix = Ip6Prefix.valueOf("1111:2222:3333:4444:5555:6666:7777:8888/-1"); }
@Override public Long createDictType(DictTypeSaveReqVO createReqVO) { // 校验字典类型的名字的唯一性 validateDictTypeNameUnique(null, createReqVO.getName()); // 校验字典类型的类型的唯一性 validateDictTypeUnique(null, createReqVO.getType()); // 插入字典类型 DictTypeDO dictType = BeanUtils.toBean(createReqVO, DictTypeDO.class); dictType.setDeletedTime(LocalDateTimeUtils.EMPTY); // 唯一索引,避免 null 值 dictTypeMapper.insert(dictType); return dictType.getId(); }
@Test public void testCreateDictType_success() { // 准备参数 DictTypeSaveReqVO reqVO = randomPojo(DictTypeSaveReqVO.class, o -> o.setStatus(randomEle(CommonStatusEnum.values()).getStatus())) .setId(null); // 避免 id 被赋值 // 调用 Long dictTypeId = dictTypeService.createDictType(reqVO); // 断言 assertNotNull(dictTypeId); // 校验记录的属性是否正确 DictTypeDO dictType = dictTypeMapper.selectById(dictTypeId); assertPojoEquals(reqVO, dictType, "id"); }
public ChangeInvisibleDurationActivity(MessagingProcessor messagingProcessor, GrpcClientSettingsManager grpcClientSettingsManager, GrpcChannelManager grpcChannelManager) { super(messagingProcessor, grpcClientSettingsManager, grpcChannelManager); }
@Test public void testChangeInvisibleDurationActivity() throws Throwable { String newHandle = "newHandle"; ArgumentCaptor<Long> invisibleTimeArgumentCaptor = ArgumentCaptor.forClass(Long.class); AckResult ackResult = new AckResult(); ackResult.setExtraInfo(newHandle); ackResult.setStatus(AckStatus.OK); when(this.messagingProcessor.changeInvisibleTime( any(), any(), anyString(), anyString(), anyString(), invisibleTimeArgumentCaptor.capture() )).thenReturn(CompletableFuture.completedFuture(ackResult)); ChangeInvisibleDurationResponse response = this.changeInvisibleDurationActivity.changeInvisibleDuration( createContext(), ChangeInvisibleDurationRequest.newBuilder() .setInvisibleDuration(Durations.fromSeconds(3)) .setTopic(Resource.newBuilder().setName(TOPIC).build()) .setGroup(Resource.newBuilder().setName(CONSUMER_GROUP).build()) .setMessageId("msgId") .setReceiptHandle(buildReceiptHandle(TOPIC, System.currentTimeMillis(), 3000)) .build() ).get(); assertEquals(Code.OK, response.getStatus().getCode()); assertEquals(TimeUnit.SECONDS.toMillis(3), invisibleTimeArgumentCaptor.getValue().longValue()); assertEquals(newHandle, response.getReceiptHandle()); }
public static Combine.BinaryCombineDoubleFn ofDoubles() { return new SumDoubleFn(); }
@Test public void testSumDoubleFnNegativeInfinity() { testCombineFn( Sum.ofDoubles(), Lists.newArrayList(Double.NEGATIVE_INFINITY, 2.0, 3.0, 4.0), Double.NEGATIVE_INFINITY); }
public static <T> List<T> sortByProperty(Collection<T> collection, String property) { return sort(collection, new PropertyComparator<>(property)); }
@Test public void sortByPropertyTest() { final List<TestBean> list = CollUtil.newArrayList( new TestBean("张三", 12, DateUtil.parse("2018-05-01")), // new TestBean("李四", 13, DateUtil.parse("2018-03-01")), // new TestBean("王五", 12, DateUtil.parse("2018-04-01"))// ); CollUtil.sortByProperty(list, "createTime"); assertEquals("李四", list.get(0).getName()); assertEquals("王五", list.get(1).getName()); assertEquals("张三", list.get(2).getName()); }
static void fetchConfigMap(CoreV1Api coreClient, String changeConfigMap, String changeConfigMapNamespace, KubernetesRuntimeFactory kubernetesRuntimeFactory) { try { V1ConfigMap v1ConfigMap = coreClient.readNamespacedConfigMap(changeConfigMap, changeConfigMapNamespace, null); Map<String, String> data = v1ConfigMap.getData(); if (data != null) { overRideKubernetesConfig(data, kubernetesRuntimeFactory); } } catch (Exception e) { log.error("Error while trying to fetch configmap {} at namespace {}", changeConfigMap, changeConfigMapNamespace, e); } }
@Test public void testDynamicConfigMapLoading() throws Exception { String changeConfigMap = "changeMap"; String changeConfigNamespace = "changeConfigNamespace"; KubernetesRuntimeFactory kubernetesRuntimeFactory = getKuberentesRuntimeFactory(); CoreV1Api coreV1Api = Mockito.mock(CoreV1Api.class); V1ConfigMap v1ConfigMap = new V1ConfigMap(); Mockito.doReturn(v1ConfigMap).when(coreV1Api).readNamespacedConfigMap(any(), any(), any()); KubernetesRuntimeFactory.fetchConfigMap(coreV1Api, changeConfigMap, changeConfigNamespace, kubernetesRuntimeFactory); Mockito.verify(coreV1Api, Mockito.times(1)).readNamespacedConfigMap(eq(changeConfigMap), eq(changeConfigNamespace), eq(null)); KubernetesRuntimeFactory expected = getKuberentesRuntimeFactory(); assertEquals(kubernetesRuntimeFactory, expected); HashMap<String, String> configs = new HashMap<>(); configs.put("pulsarDockerImageName", "test_dockerImage2"); configs.put("imagePullPolicy", "test_imagePullPolicy2"); v1ConfigMap.setData(configs); KubernetesRuntimeFactory.fetchConfigMap(coreV1Api, changeConfigMap, changeConfigNamespace, kubernetesRuntimeFactory); Mockito.verify(coreV1Api, Mockito.times(2)).readNamespacedConfigMap(eq(changeConfigMap), eq(changeConfigNamespace), eq(null)); assertEquals(kubernetesRuntimeFactory.getPulsarDockerImageName(), "test_dockerImage2"); assertEquals(kubernetesRuntimeFactory.getImagePullPolicy(), "test_imagePullPolicy2"); }
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception { return fromXmlPartial(toInputStream(partial, UTF_8), o); }
@Test void shouldLoadMaterialWithAutoUpdate() throws Exception { MaterialConfig material = xmlLoader.fromXmlPartial("<hg url=\"file:///tmp/testSvnRepo/project1/trunk\" autoUpdate=\"false\"/>", HgMaterialConfig.class); assertThat(material.isAutoUpdate()).isFalse(); material = xmlLoader.fromXmlPartial("<git url=\"file:///tmp/testSvnRepo/project1/trunk\" autoUpdate=\"false\"/>", GitMaterialConfig.class); assertThat(material.isAutoUpdate()).isFalse(); material = xmlLoader.fromXmlPartial("<svn url=\"file:///tmp/testSvnRepo/project1/trunk\" autoUpdate=\"false\"/>", SvnMaterialConfig.class); assertThat(material.isAutoUpdate()).isFalse(); material = xmlLoader.fromXmlPartial("<p4 port='localhost:1666' autoUpdate='false' ><view/></p4>", P4MaterialConfig.class); assertThat(material.isAutoUpdate()).isFalse(); }
public static double add(float v1, float v2) { return add(Float.toString(v1), Float.toString(v2)).doubleValue(); }
@Test public void addTest5() { final double add = NumberUtil.add(1686036549717D, 1000D); assertEquals(1686036550717D, add, 0); }
@Override public Set<Output> loadByIds(Collection<String> ids) { final DBQuery.Query query = DBQuery.in(OutputImpl.FIELD_ID, ids); try (org.mongojack.DBCursor<OutputImpl> dbCursor = coll.find(query)) { return ImmutableSet.copyOf((Iterable<? extends Output>) dbCursor); } }
@Test @MongoDBFixtures("OutputServiceImplTest.json") public void loadByIdsReturnsRequestedOutputs() { assertThat(outputService.loadByIds(ImmutableSet.of())).isEmpty(); assertThat(outputService.loadByIds(ImmutableSet.of("54e300000000000000000000"))).isEmpty(); assertThat(outputService.loadByIds(ImmutableSet.of("54e3deadbeefdeadbeef0001", "54e300000000000000000000"))).hasSize(1); assertThat(outputService.loadByIds(ImmutableSet.of("54e3deadbeefdeadbeef0001", "54e3deadbeefdeadbeef0002", "54e300000000000000000000"))).hasSize(2); }
@Override public ExecuteContext onThrow(ExecuteContext context) { ThreadLocalUtils.removeRequestData(); ThreadLocalUtils.removeRequestTag(); return context; }
@Test public void testOnThrow() { ThreadLocalUtils.setRequestTag(new RequestTag(null)); ThreadLocalUtils.setRequestData(new RequestData(null, null, null)); interceptor.onThrow(context); Assert.assertNull(ThreadLocalUtils.getRequestTag()); Assert.assertNull(ThreadLocalUtils.getRequestData()); }
@Override public String getMapName() { if (mapName != null) { return mapName; } if (events.isEmpty()) { return null; } // This batch should all relate to a single map QueryCacheEventData next = events.iterator().next(); mapName = next.getMapName(); return mapName; }
@Test public void testGetMapName() { assertEquals("myMap", batchEventData.getMapName()); }
public static GeoPoint fromDoubleString(final String s, final char spacer) { final int spacerPos1 = s.indexOf(spacer); final int spacerPos2 = s.indexOf(spacer, spacerPos1 + 1); if (spacerPos2 == -1) { return new GeoPoint( (Double.parseDouble(s.substring(0, spacerPos1))), (Double.parseDouble(s.substring(spacerPos1 + 1, s.length())))); } else { return new GeoPoint( (Double.parseDouble(s.substring(0, spacerPos1))), (Double.parseDouble(s.substring(spacerPos1 + 1, spacerPos2))), Double.parseDouble(s.substring(spacerPos2 + 1, s.length()))); } }
@Test public void test_toFromDoubleString_withoutAltitude() { final GeoPoint in = new GeoPoint(-117.123, 33.123); final GeoPoint out = GeoPoint.fromDoubleString("-117.123,33.123", ','); assertEquals("toFromString without altitude", in, out); }
static FeatureResolver create( ConfigurationParameters parameters, CucumberEngineDescriptor engineDescriptor, Predicate<String> packageFilter ) { return new FeatureResolver(parameters, engineDescriptor, packageFilter); }
@Test void example() { TestDescriptor example = getExample(); assertEquals("Example #1.1", example.getDisplayName()); assertEquals( asSet(create("FeatureTag"), create("Example1Tag"), create("ScenarioOutlineTag")), example.getTags()); assertEquals(of(from(featurePath, from(19, 7))), example.getSource()); assertEquals(TEST, example.getType()); assertEquals( id.append("feature", featureSegmentValue) .append("scenario", "11") .append("examples", "17") .append("example", "19"), example.getUniqueId()); PickleDescriptor pickleDescriptor = (PickleDescriptor) example; assertEquals(Optional.of("io.cucumber.junit.platform.engine"), pickleDescriptor.getPackage()); }
public void updateView() { this.view.displayGiant(giant); }
@Test void testUpdateView() { final var model = mock(GiantModel.class); final var view = mock(GiantView.class); final var controller = new GiantController(model, view); verifyNoMoreInteractions(model, view); controller.updateView(); verify(view).displayGiant(model); verifyNoMoreInteractions(model, view); }
public static int get() { return currentAvailableProcessors; }
@Test public void getAvailableProcessors_withoutOverride() { int availableProcessors = Runtime.getRuntime().availableProcessors(); assertEquals(availableProcessors, RuntimeAvailableProcessors.get()); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Version that = (Version) o; return isEqualTo(that); }
@Test public void equals() throws Exception { assertEquals(Version.UNKNOWN, Version.UNKNOWN); assertEquals(Version.of(3, 0), Version.of(3, 0)); assertFalse(Version.of(3, 0).equals(Version.of(4, 0))); assertFalse(Version.UNKNOWN.equals(Version.of(4, 0))); assertFalse(Version.UNKNOWN.equals(new Object())); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testContinueOnStepFailure4() { fail = true; run( "def var = 'foo'", "configure continueOnStepFailure = true", "match var == 'bar'", "match var == 'pub'", "match var == 'crawl'", "match var == 'foo'", "configure continueOnStepFailure = false", "match var == 'foo'" ); assertEquals("match var == 'crawl'", sr.result.getFailedStep().getStep().getText()); }
public static NamespaceName get(String tenant, String namespace) { validateNamespaceName(tenant, namespace); return get(tenant + '/' + namespace); }
@Test(expectedExceptions = IllegalArgumentException.class) public void namespace_loopBackAddress() { NamespaceName.get("0.0.0.0"); }
public static ConfigRepoConfig createConfigRepoConfig(MaterialConfig repo, String pluginId, String id) { return (ConfigRepoConfig) new ConfigRepoConfig().setRepo(repo).setPluginId(pluginId).setId(id); }
@Test public void validateTree_shouldValidateTheMaterialConfig() { CruiseConfig cruiseConfig = new BasicCruiseConfig(); MaterialConfig materialConfig = mock(MaterialConfig.class); when(materialConfig.errors()).thenReturn(new ConfigErrors()); ConfigRepoConfig configRepoConfig = ConfigRepoConfig.createConfigRepoConfig(materialConfig, "plug", "id"); cruiseConfig.setConfigRepos(new ConfigReposConfig(configRepoConfig)); ConfigSaveValidationContext validationContext = ConfigSaveValidationContext.forChain(cruiseConfig); configRepoConfig.validateTree(validationContext); verify(materialConfig).validateTree(validationContext); }
@Override public void start() { settings.setSettingLoader(loader); }
@Test public void change_loader_at_startup() { underTest.start(); verify(settings).setSettingLoader(loader); }
public RowExpression extract(PlanNode node) { return node.accept(new Visitor(domainTranslator, functionAndTypeManager), null); }
@Test public void testFilter() { PlanNode node = filter(baseTableScan, and( greaterThan(AV, call(metadata.getFunctionAndTypeManager(), "rand", DOUBLE, ImmutableList.of())), lessThan(BV, bigintLiteral(10)))); RowExpression effectivePredicate = effectivePredicateExtractor.extract(node); // Non-deterministic functions should be purged assertEquals(normalizeConjuncts(effectivePredicate), normalizeConjuncts(lessThan(BV, bigintLiteral(10)))); }
@Override public synchronized List<UdfFactory> listFunctions() { return new ArrayList<>(udfs.values()); }
@Test public void shouldHaveNoBuiltInUDFsRegistered() { final Collection<String> names = Collections2.transform(functionRegistry.listFunctions(), UdfFactory::getName); assertThat("One or more built-in Kudfs are registered in the InternalFunctionRegistry", names, hasSize(0)); }
public GsonAzureRepoList getRepos(String serverUrl, String token, @Nullable String projectName) { String url = Stream.of(getTrimmedUrl(serverUrl), projectName, "_apis/git/repositories?" + API_VERSION_3) .filter(StringUtils::isNotBlank) .collect(joining("/")); return doGet(token, url, r -> buildGson().fromJson(r.body().charStream(), GsonAzureRepoList.class)); }
@Test public void get_repos_non_json_payload() { enqueueResponse(200, NON_JSON_PAYLOAD); assertThatThrownBy(() -> underTest.getRepos(server.url("").toString(), "token", null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage(UNABLE_TO_CONTACT_AZURE); }
public void expectLogMessage(int level, String tag, Matcher<String> messageMatcher) { expectLog(level, tag, messageMatcher, null); }
@Test public void expectLogMessage_duplicateMatchers_areNotDeduplicated() { Log.e("Mytag", "message1"); rule.expectLogMessage(Log.ERROR, "Mytag", Matchers.equalTo("message1")); rule.expectLogMessage(Log.ERROR, "Mytag", Matchers.equalTo("message1")); expectedException.expect(Matchers.isA(AssertionError.class)); }
@Override public boolean match(String attributeValue) { if (attributeValue == null) { return false; } switch (type) { case Equals: return attributeValue.equals(value); case StartsWith: return (length == -1 || length == attributeValue.length()) && attributeValue.startsWith(value); case EndsWith: return (length == -1 || length == attributeValue.length()) && attributeValue.endsWith(value); case Contains: return attributeValue.contains(value); case Regexp: return regexPattern.matcher(attributeValue).matches(); default: throw new IllegalStateException("Unexpected type " + type); } }
@Test public void testDegeneratedEquals() { LikeCondition likeCondition = new LikeCondition("ab"); assertTrue(likeCondition.match("ab")); assertFalse(likeCondition.match("ac")); assertFalse(likeCondition.match("XabY")); }
public LoadCompConf scaleThroughput(double v) { if (streams != null) { List<OutputStream> newStreams = streams.stream().map((s) -> s.scaleThroughput(v)).collect(Collectors.toList()); return new LoadCompConf(id, parallelism, newStreams, cpuLoad, memoryLoad, slp); } else { return this; } }
@Test public void scaleThroughput() { LoadCompConf orig = new LoadCompConf.Builder() .withId("SOME_SPOUT") .withParallelism(1) .withStream(new OutputStream("default", new NormalDistStats(500.0, 100.0, 300.0, 600.0), false)) .build(); assertEquals(500.0, orig.getAllEmittedAggregate(), 0.001); LoadCompConf scaled = orig.scaleThroughput(2.0); //Parallelism is same assertEquals(1, scaled.parallelism); assertEquals("SOME_SPOUT", scaled.id); //But throughput is the same assertEquals(1000.0, scaled.getAllEmittedAggregate(), 0.001); }
public List<InputSplit> getSplits(JobContext job) throws IOException { StopWatch sw = new StopWatch().start(); long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job)); long maxSize = getMaxSplitSize(job); // generate splits List<InputSplit> splits = new ArrayList<InputSplit>(); List<FileStatus> files = listStatus(job); boolean ignoreDirs = !getInputDirRecursive(job) && job.getConfiguration().getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false); for (FileStatus file: files) { if (ignoreDirs && file.isDirectory()) { continue; } Path path = file.getPath(); long length = file.getLen(); if (length != 0) { BlockLocation[] blkLocations; if (file instanceof LocatedFileStatus) { blkLocations = ((LocatedFileStatus) file).getBlockLocations(); } else { FileSystem fs = path.getFileSystem(job.getConfiguration()); blkLocations = fs.getFileBlockLocations(file, 0, length); } if (isSplitable(job, path)) { long blockSize = file.getBlockSize(); long splitSize = computeSplitSize(blockSize, minSize, maxSize); long bytesRemaining = length; while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) { int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining); splits.add(makeSplit(path, length-bytesRemaining, splitSize, blkLocations[blkIndex].getHosts(), blkLocations[blkIndex].getCachedHosts())); bytesRemaining -= splitSize; } if (bytesRemaining != 0) { int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining); splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining, blkLocations[blkIndex].getHosts(), blkLocations[blkIndex].getCachedHosts())); } } else { // not splitable if (LOG.isDebugEnabled()) { // Log only if the file is big enough to be splitted if (length > Math.min(file.getBlockSize(), minSize)) { LOG.debug("File is not splittable so no parallelization " + "is possible: " + file.getPath()); } } splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(), blkLocations[0].getCachedHosts())); } } else { //Create empty hosts array for zero length files splits.add(makeSplit(path, 0, length, new String[0])); } } // Save the number of input files for metrics/loadgen job.getConfiguration().setLong(NUM_INPUT_FILES, files.size()); sw.stop(); if (LOG.isDebugEnabled()) { LOG.debug("Total # of splits generated by getSplits: " + splits.size() + ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS)); } return splits; }
@Test public void testNumInputFilesRecursively() throws Exception { Configuration conf = getConfiguration(); conf.set(FileInputFormat.INPUT_DIR_RECURSIVE, "true"); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads); Job job = Job.getInstance(conf); FileInputFormat<?, ?> fileInputFormat = new TextInputFormat(); List<InputSplit> splits = fileInputFormat.getSplits(job); Assert.assertEquals("Input splits are not correct", 3, splits.size()); verifySplits(Lists.newArrayList("test:/a1/a2/file2", "test:/a1/a2/file3", "test:/a1/file1"), splits); // Using the deprecated configuration conf = getConfiguration(); conf.set("mapred.input.dir.recursive", "true"); job = Job.getInstance(conf); splits = fileInputFormat.getSplits(job); verifySplits(Lists.newArrayList("test:/a1/a2/file2", "test:/a1/a2/file3", "test:/a1/file1"), splits); }
static Map<String, DataSource> getJndiDataSources() throws NamingException { final Map<String, DataSource> dataSources = new LinkedHashMap<>(2); final String datasourcesParameter = Parameter.DATASOURCES.getValue(); if (datasourcesParameter == null) { dataSources.putAll(getJndiDataSourcesAt("java:comp/env/jdbc")); // pour jboss sans jboss-env.xml ou sans resource-ref dans web.xml : dataSources.putAll(getJndiDataSourcesAt("java:/jdbc")); // pour JavaEE 6 : // (voir par exemple http://smokeandice.blogspot.com/2009/12/datasourcedefinition-hidden-gem-from.html) dataSources.putAll(getJndiDataSourcesAt("java:global/jdbc")); // pour WebLogic 10 et WebSphere 7, cf issue 68 dataSources.putAll(getJndiDataSourcesAt("jdbc")); } else if (!datasourcesParameter.trim().isEmpty()) { final InitialContext initialContext = new InitialContext(); for (final String datasource : datasourcesParameter.split(",")) { final String jndiName = datasource.trim(); // ici, on n'ajoute pas java:/comp/env // et on suppose qu'il n'en faut pas ou que cela a été ajouté dans le paramétrage final DataSource dataSource = (DataSource) initialContext.lookup(jndiName); dataSources.put(jndiName, dataSource); } initialContext.close(); } return Collections.unmodifiableMap(dataSources); }
@Test public void testGetJndiDataSources() { getJndiDataSources(); setProperty(Parameter.DATASOURCES, ""); getJndiDataSources(); setProperty(Parameter.DATASOURCES, "testDataSource"); getJndiDataSources(); setProperty(Parameter.DATASOURCES, null); }
public static ScenarioRunnerProvider getSpecificRunnerProvider(Type type) { if (Type.RULE.equals(type)) { return RuleScenarioRunner::new; } else if (Type.DMN.equals(type)) { return DMNScenarioRunner::new; } else { throw new IllegalArgumentException("Impossible to run simulation of type " + type); } }
@Test(expected = IllegalArgumentException.class) public void getSpecificRunnerProviderNullType() { settingsLocal.setType(null); AbstractScenarioRunner.getSpecificRunnerProvider(null); }
public ByteBuffer sliceAsByteBuffer() { return sliceAsByteBuffer(readerIndex, size - readerIndex); }
@Test public void testSliceAsByteBuffer() { byte[] data = new byte[10]; new Random().nextBytes(data); { MemoryBuffer buffer = MemoryUtils.wrap(data, 5, 5); assertEquals(buffer.sliceAsByteBuffer(), ByteBuffer.wrap(data, 5, 5)); } { ByteBuffer direct = ByteBuffer.allocateDirect(10); direct.put(data); direct.flip(); direct.position(5); MemoryBuffer buffer = MemoryUtils.wrap(direct); assertEquals(buffer.sliceAsByteBuffer(), direct); Assert.assertEquals( ByteBufferUtil.getAddress(buffer.sliceAsByteBuffer()), ByteBufferUtil.getAddress(direct) + 5); } { long address = 0; try { address = Platform.allocateMemory(10); ByteBuffer direct = ByteBufferUtil.wrapDirectBuffer(address, 10); direct.put(data); direct.flip(); direct.position(5); MemoryBuffer buffer = MemoryUtils.wrap(direct); assertEquals(buffer.sliceAsByteBuffer(), direct); assertEquals(ByteBufferUtil.getAddress(buffer.sliceAsByteBuffer()), address + 5); } finally { Platform.freeMemory(address); } } }
public static XmlXStream getInstance() { return s_instance; }
@Test public void testEncodingDecodingWithoutMetaData() throws Exception { Applications applications = InstanceInfoGenerator.newBuilder(10, 2).withMetaData(false).build().toApplications(); XStream xstream = XmlXStream.getInstance(); String xmlDocument = xstream.toXML(applications); Applications decodedApplications = (Applications) xstream.fromXML(xmlDocument); assertThat(EurekaEntityComparators.equal(decodedApplications, applications), is(true)); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testParseLastWeekGermany() throws Exception { final NaturalDateParser naturalDateParser = new NaturalDateParser(Locale.GERMANY); DateTime reference = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("12.06.2021 09:45:23"); NaturalDateParser.Result result = naturalDateParser.parse("last week", reference.toDate()); DateTime lastMonday = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("31.05.2021 00:00:00"); DateTime nextMonday = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("07.06.2021 00:00:00"); assertThat(result.getFrom()).as("should be equal to").isEqualTo(lastMonday); assertThat(result.getTo()).as("should be equal to").isEqualTo(nextMonday); }
@Override public Set<Name> getLocations() { if(StringUtils.isNotBlank(session.getHost().getRegion())) { final S3Region region = new S3Region(session.getHost().getRegion()); if(log.isDebugEnabled()) { log.debug(String.format("Return single region %s set in bookmark", region)); } return Collections.singleton(region); } if(StringUtils.isNotEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) { if(log.isDebugEnabled()) { log.debug(String.format("Return empty set for hostname %s", session.getHost())); } // Connected to single bucket return Collections.emptySet(); } if(!S3Session.isAwsHostname(session.getHost().getHostname(), false)) { if(new S3Protocol().getRegions().equals(session.getHost().getProtocol().getRegions())) { // Return empty set for unknown provider if(log.isDebugEnabled()) { log.debug(String.format("Return empty set for unknown provider %s", session.getHost())); } return Collections.emptySet(); } } return session.getHost().getProtocol().getRegions(); }
@Test public void testNonEmptyProfile() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new S3Protocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/Wasabi (us-central-1).cyberduckprofile")); final S3Session session = new S3Session(new Host(profile, profile.getDefaultHostname())); final S3LocationFeature feature = new S3LocationFeature(session); assertFalse(feature.getLocations().isEmpty()); assertTrue(feature.getLocations().contains(new Location.Name("us-central-1"))); }
@Override public long getLong(TemporalField field) { return offsetTime.getLong(field); }
@Test void getLong() { Arrays.stream(ChronoField.values()).filter(offsetTime::isSupported) .forEach(field -> assertEquals(offsetTime.getLong(field), zoneTime.getLong(field))); }
@Override public String toString() { return toString(""); }
@Test public void shouldCreateStringWithMultipleSourcesAndTopics() { topology.addSource("source", "topic1", "topic2"); topology.addSource("source2", "t", "t1", "t2"); final ProcessorTopology processorTopology = topology.getInternalBuilder().buildTopology(); final String result = processorTopology.toString(); assertThat(result, containsString("source:\n\t\ttopics:\t\t[topic1, topic2]\n")); assertThat(result, containsString("source2:\n\t\ttopics:\t\t[t, t1, t2]\n")); }
public ImmutableList<Process> runServerProcesses() { logger.atInfo().log("Starting language server processes (if any)..."); return commands.stream() // Filter out commands that don't need server start up .filter(command -> !Strings.isNullOrEmpty(command.serverCommand())) .map( command -> runProcess( CommandExecutorFactory.create( command.serverCommand(), getCommand("--port=", command.port()), getCommand("--log_id=", command.logId()), getCommand("--log_output=", command.outputDir()), "--trust_all_ssl_cert=" + command.trustAllSslCert(), getCommand("--timeout_seconds=", command.timeoutSeconds().getSeconds()), getCommand("--callback_address=", command.callbackAddress()), getCommand("--callback_port=", command.callbackPort()), getCommand("--polling_uri=", command.pollingUri())))) .filter(Optional::isPresent) .map(Optional::get) .collect(toImmutableList()); }
@Test public void runServerProcess_whenPathExistsAndNormalPort_returnsValidProcessList() { ImmutableList<LanguageServerCommand> commands = ImmutableList.of( LanguageServerCommand.create( "/bin/sh", "", "34567", "34", "/output-here", false, Duration.ofSeconds(10), "157.34.0.2", 8080, "157.34.0.2:8881", 0)); RemoteServerLoader loader = Guice.createInjector(new RemoteServerLoaderModule(commands)) .getInstance(RemoteServerLoader.class); var processList = loader.runServerProcesses(); assertThat(processList).hasSize(1); assertThat(processList.get(0)).isNotNull(); }
@Override public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility( TypeSerializerSnapshot<T> oldSerializerSnapshot) { if (!(oldSerializerSnapshot instanceof AvroSerializerSnapshot)) { return TypeSerializerSchemaCompatibility.incompatible(); } AvroSerializerSnapshot<?> oldAvroSerializerSnapshot = (AvroSerializerSnapshot<?>) oldSerializerSnapshot; return resolveSchemaCompatibility(oldAvroSerializerSnapshot.schema, schema); }
@Test void aLargeSchemaAvroSnapshotIsCompatibleAfterARoundTrip() throws IOException { // construct the large schema up to a size of 65535 bytes. int thresholdSize = 65535; StringBuilder schemaField = new StringBuilder(thresholdSize); for (int i = 0; i <= thresholdSize; i++) { schemaField.append('a'); } Schema largeSchema = SchemaBuilder.record("name") .namespace("org.apache.flink") .fields() .requiredString(schemaField.toString()) .endRecord(); AvroSerializer<GenericRecord> serializer = new AvroSerializer<>(GenericRecord.class, largeSchema); AvroSerializerSnapshot<GenericRecord> restored = roundTrip(serializer.snapshotConfiguration()); assertThat(serializer.snapshotConfiguration().resolveSchemaCompatibility(restored)) .is(isCompatibleAsIs()); }
public static Position emptyPosition() { return new Position(new ConcurrentHashMap<>()); }
@Test public void shouldNotHash() { final Position position = Position.emptyPosition(); assertThrows(UnsupportedOperationException.class, position::hashCode); // going overboard... final HashSet<Position> set = new HashSet<>(); assertThrows(UnsupportedOperationException.class, () -> set.add(position)); final HashMap<Position, Integer> map = new HashMap<>(); assertThrows(UnsupportedOperationException.class, () -> map.put(position, 5)); }
public HostAndPort getHttpBindAddress() { return httpBindAddress .requireBracketsForIPv6() .withDefaultPort(GRAYLOG_DEFAULT_PORT); }
@Test public void testHttpBindAddressIsIPv6Address() throws RepositoryException, ValidationException { jadConfig.setRepository(new InMemoryRepository(ImmutableMap.of("http_bind_address", "[2001:db8::1]:9000"))) .addConfigurationBean(configuration) .process(); assertThat(configuration.getHttpBindAddress()).isEqualTo(HostAndPort.fromParts("[2001:db8::1]", 9000)); }
public void seek(long pos) throws IOException { throwIfClosed(); throwIfInvalidSeek(pos); nextReadPos = pos; }
@Test public void testSeek() throws Exception { S3ARemoteInputStream inputStream; inputStream = S3APrefetchFakes.createS3InMemoryInputStream(futurePool, "bucket", "key", 9); testSeekHelper(inputStream, 9, 9); inputStream = S3APrefetchFakes.createS3CachingInputStream(futurePool, "bucket", "key", 9, 5, 1); testSeekHelper(inputStream, 5, 9); }
@Override public Instance selectOneHealthyInstance(String serviceName) throws NacosException { return selectOneHealthyInstance(serviceName, new ArrayList<>()); }
@Test void testSelectOneHealthyInstance8() throws NacosException { //given Instance healthyInstance = new Instance(); healthyInstance.setIp("1.1.1.1"); healthyInstance.setPort(1000); List<Instance> hosts = new ArrayList<>(); hosts.add(healthyInstance); ServiceInfo infoWithHealthyInstance = new ServiceInfo(); infoWithHealthyInstance.setHosts(hosts); when(proxy.queryInstancesOfService(anyString(), anyString(), anyString(), anyBoolean())).thenReturn( infoWithHealthyInstance); String serviceName = "service1"; String groupName = "group1"; List<String> clusterList = Arrays.asList("cluster1", "cluster2"); //when client.selectOneHealthyInstance(serviceName, groupName, clusterList, false); //then verify(proxy, times(1)).queryInstancesOfService(serviceName, groupName, "cluster1,cluster2", false); }
public SearchJob executeSync(String searchId, SearchUser searchUser, ExecutionState executionState) { return searchDomain.getForUser(searchId, searchUser) .map(s -> executeSync(s, searchUser, executionState)) .orElseThrow(() -> new NotFoundException("No search found with id <" + searchId + ">.")); }
@Test public void throwsExceptionIfSearchIsNotFound() { final SearchUser searchUser = TestSearchUser.builder().build(); when(searchDomain.getForUser(eq("search1"), eq(searchUser))).thenReturn(Optional.empty()); assertThatExceptionOfType(NotFoundException.class) .isThrownBy(() -> this.searchExecutor.executeSync("search1", searchUser, ExecutionState.empty())) .withMessage("No search found with id <search1>."); }
@Override void decode(ByteBufAllocator alloc, ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception { ObjectUtil.checkNotNull(headerBlock, "headerBlock"); ObjectUtil.checkNotNull(frame, "frame"); if (cumulation == null) { decodeHeaderBlock(headerBlock, frame); if (headerBlock.isReadable()) { cumulation = alloc.buffer(headerBlock.readableBytes()); cumulation.writeBytes(headerBlock); } } else { cumulation.writeBytes(headerBlock); decodeHeaderBlock(cumulation, frame); if (cumulation.isReadable()) { cumulation.discardReadBytes(); } else { releaseBuffer(); } } }
@Test public void testNegativeValueLength() throws Exception { ByteBuf headerBlock = Unpooled.buffer(16); headerBlock.writeInt(1); headerBlock.writeInt(4); headerBlock.writeBytes(nameBytes); headerBlock.writeInt(-1); decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); assertFalse(headerBlock.isReadable()); assertTrue(frame.isInvalid()); assertEquals(0, frame.headers().names().size()); headerBlock.release(); }
void release() { if (nettyConnectionWriter != null) { checkNotNull(nettyConnectionWriter).close(null); } }
@Test void testRelease() { MemoryTierSubpartitionProducerAgent subpartitionProducerAgent = createSubpartitionProducerAgent(); AtomicBoolean isClosed = new AtomicBoolean(false); TestingNettyConnectionWriter connectionWriter = new TestingNettyConnectionWriter.Builder() .setCloseFunction( throwable -> { isClosed.set(true); return null; }) .build(); subpartitionProducerAgent.connectionEstablished(connectionWriter); subpartitionProducerAgent.release(); assertThat(isClosed).isTrue(); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testOffsetAssignmentAfterUpConversionV0ToV1Compressed() { Compression compression = Compression.gzip().build(); MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V0, RecordBatch.NO_TIMESTAMP, compression); checkOffsets(records, 0); long offset = 1234567; LogValidator.ValidationResult validatedResults = new LogValidator( records, new TopicPartition("topic", 0), time, CompressionType.GZIP, compression, false, RecordBatch.MAGIC_VALUE_V1, TimestampType.LOG_APPEND_TIME, 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ); checkOffsets(validatedResults.validatedRecords, offset); verifyRecordValidationStats( validatedResults.recordValidationStats, 3, // numConvertedRecords records, true // compressed ); }
@NonNull @Override public Object configure(CNode config, ConfigurationContext context) throws ConfiguratorException { return Stapler.lookupConverter(target) .convert( target, context.getSecretSourceResolver() .resolve(config.asScalar().toString())); }
@Test public void _string_env_default() throws Exception { environment.set("NOT_THERE", "abc"); Configurator c = registry.lookupOrFail(String.class); final Object value = c.configure(new Scalar("${ENV_FOR_TEST:-unsecured-token}"), context); assertEquals("unsecured-token", value); }
public static <T, R extends Collection<T>> Opt<R> ofEmptyAble(R value) { return ObjectUtil.isEmpty(value) ? empty() : new Opt<>(value); }
@Test public void ofEmptyAbleTest() { // 以前,输入一个CollectionUtil感觉要命,类似前缀的类一大堆,代码补全形同虚设(在项目中起码要输入完CollectionUtil才能在第一个调出这个函数) // 关键它还很常用,判空和判空集合真的太常用了... List<String> past = Opt.ofNullable(Collections.<String>emptyList()).filter(CollectionUtil::isNotEmpty).orElseGet(() -> Collections.singletonList("hutool")); // 现在,一个ofEmptyAble搞定 List<String> hutool = Opt.ofEmptyAble(Collections.<String>emptyList()).orElseGet(() -> Collections.singletonList("hutool")); assertEquals(past, hutool); assertEquals(hutool, Collections.singletonList("hutool")); }
@Nullable @SuppressWarnings("checkstyle:returncount") static Metadata resolve(InternalSerializationService ss, Object target, boolean key) { try { if (target instanceof Data) { Data data = (Data) target; if (data.isPortable()) { ClassDefinition classDefinition = ss.getPortableContext().lookupClassDefinition(data); return resolvePortable(classDefinition, key); } else if (data.isCompact()) { return resolveCompact(ss.extractSchemaFromData(data), key); } else if (data.isJson()) { return null; } else { return resolveJava(ss.toObject(data).getClass(), key); } } else if (target instanceof VersionedPortable) { VersionedPortable portable = (VersionedPortable) target; ClassDefinition classDefinition = ss.getPortableContext() .lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), portable.getClassVersion()); return resolvePortable(classDefinition, key); } else if (target instanceof Portable) { Portable portable = (Portable) target; ClassDefinition classDefinition = ss.getPortableContext() .lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), 0); return resolvePortable(classDefinition, key); } else if (target instanceof PortableGenericRecord) { return resolvePortable(((PortableGenericRecord) target).getClassDefinition(), key); } else if (target instanceof CompactGenericRecord) { return resolveCompact(((CompactGenericRecord) target).getSchema(), key); } else if (ss.isCompactSerializable(target)) { Schema schema = ss.extractSchemaFromObject(target); return resolveCompact(schema, key); } else if (target instanceof HazelcastJsonValue) { return null; } else { return resolveJava(target.getClass(), key); } } catch (Exception e) { return null; } }
@Test public void test_portableRecord() { ClassDefinition classDefinition = new ClassDefinitionBuilder(PORTABLE_FACTORY_ID, PORTABLE_CLASS_ID, PORTABLE_CLASS_VERSION).build(); InternalSerializationService ss = new DefaultSerializationServiceBuilder().addClassDefinition(classDefinition).build(); Metadata metadata = SampleMetadataResolver.resolve(ss, new PortableGenericRecordBuilder(classDefinition).build(), key); assertThat(metadata.options()).containsExactly( entry(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT, PORTABLE_FORMAT), entry(key ? OPTION_KEY_FACTORY_ID : OPTION_VALUE_FACTORY_ID, String.valueOf(PORTABLE_FACTORY_ID)), entry(key ? OPTION_KEY_CLASS_ID : OPTION_VALUE_CLASS_ID, String.valueOf(PORTABLE_CLASS_ID)), entry(key ? OPTION_KEY_CLASS_VERSION : OPTION_VALUE_CLASS_VERSION, String.valueOf(PORTABLE_CLASS_VERSION)) ); metadata = SampleMetadataResolver.resolve(ss, ss.toData(new PortableGenericRecordBuilder(classDefinition).build()), key); assertThat(metadata.options()).containsExactly( entry(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT, PORTABLE_FORMAT), entry(key ? OPTION_KEY_FACTORY_ID : OPTION_VALUE_FACTORY_ID, String.valueOf(PORTABLE_FACTORY_ID)), entry(key ? OPTION_KEY_CLASS_ID : OPTION_VALUE_CLASS_ID, String.valueOf(PORTABLE_CLASS_ID)), entry(key ? OPTION_KEY_CLASS_VERSION : OPTION_VALUE_CLASS_VERSION, String.valueOf(PORTABLE_CLASS_VERSION)) ); }
public Future<KafkaVersionChange> reconcile() { return getPods() .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNewClusterWithAllVersions(VertxTestContext context) { VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), VERSIONS.defaultVersion().metadataVersion(), VERSIONS.defaultVersion().metadataVersion()), mockRos(List.of()) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
public static ExtractionResult fromPredicate( Metadata metadata, Session session, Expression predicate, TypeProvider types) { return new Visitor(metadata, session, types).process(predicate, false); }
@Test public void testFromOrPredicate() { Expression originalPredicate = or( and(greaterThan(C_BIGINT, bigintLiteral(1L)), unprocessableExpression1(C_BIGINT)), and(lessThan(C_BIGINT, bigintLiteral(5L)), unprocessableExpression2(C_BIGINT))); ExtractionResult result = fromPredicate(originalPredicate); assertEquals(result.getRemainingExpression(), originalPredicate); assertEquals(result.getTupleDomain(), withColumnDomains(ImmutableMap.of(C_BIGINT, Domain.notNull(BIGINT)))); originalPredicate = or( and(equal(C_BIGINT, bigintLiteral(1L)), unprocessableExpression1(C_BIGINT)), and(equal(C_BIGINT, bigintLiteral(2L)), unprocessableExpression2(C_BIGINT))); result = fromPredicate(originalPredicate); assertEquals(result.getRemainingExpression(), originalPredicate); assertEquals(result.getTupleDomain(), withColumnDomains(ImmutableMap.of(C_BIGINT, Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L)), false)))); // Same unprocessableExpression means that we can do more extraction // If both sides are operating on the same single symbol originalPredicate = or( and(equal(C_BIGINT, bigintLiteral(1L)), unprocessableExpression1(C_BIGINT)), and(equal(C_BIGINT, bigintLiteral(2L)), unprocessableExpression1(C_BIGINT))); result = fromPredicate(originalPredicate); assertEquals(result.getRemainingExpression(), unprocessableExpression1(C_BIGINT)); assertEquals(result.getTupleDomain(), withColumnDomains(ImmutableMap.of(C_BIGINT, Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L)), false)))); // And not if they have different symbols assertUnsupportedPredicate(or( and(equal(C_BIGINT, bigintLiteral(1L)), unprocessableExpression1(C_BIGINT)), and(equal(C_DOUBLE, doubleLiteral(2.0)), unprocessableExpression1(C_BIGINT)))); // We can make another optimization if one side is the super set of the other side originalPredicate = or( and(greaterThan(C_BIGINT, bigintLiteral(1L)), greaterThan(C_DOUBLE, doubleLiteral(1.0)), unprocessableExpression1(C_BIGINT)), and(greaterThan(C_BIGINT, bigintLiteral(2L)), greaterThan(C_DOUBLE, doubleLiteral(2.0)), unprocessableExpression1(C_BIGINT))); result = fromPredicate(originalPredicate); assertEquals(result.getRemainingExpression(), unprocessableExpression1(C_BIGINT)); assertEquals(result.getTupleDomain(), withColumnDomains(ImmutableMap.of( C_BIGINT, Domain.create(ValueSet.ofRanges(Range.greaterThan(BIGINT, 1L)), false), C_DOUBLE, Domain.create(ValueSet.ofRanges(Range.greaterThan(DOUBLE, 1.0)), false)))); // We can't make those inferences if the unprocessableExpressions are non-deterministic originalPredicate = or( and(equal(C_BIGINT, bigintLiteral(1L)), randPredicate(C_BIGINT, BIGINT)), and(equal(C_BIGINT, bigintLiteral(2L)), randPredicate(C_BIGINT, BIGINT))); result = fromPredicate(originalPredicate); assertEquals(result.getRemainingExpression(), originalPredicate); assertEquals(result.getTupleDomain(), withColumnDomains(ImmutableMap.of(C_BIGINT, Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L)), false)))); // Test complements originalPredicate = not(or( and(greaterThan(C_BIGINT, bigintLiteral(1L)), unprocessableExpression1(C_BIGINT)), and(lessThan(C_BIGINT, bigintLiteral(5L)), unprocessableExpression2(C_BIGINT)))); result = fromPredicate(originalPredicate); assertEquals(result.getRemainingExpression(), and( not(and(greaterThan(C_BIGINT, bigintLiteral(1L)), unprocessableExpression1(C_BIGINT))), not(and(lessThan(C_BIGINT, bigintLiteral(5L)), unprocessableExpression2(C_BIGINT))))); assertTrue(result.getTupleDomain().isAll()); originalPredicate = not(or( not(and(greaterThan(C_BIGINT, bigintLiteral(1L)), unprocessableExpression1(C_BIGINT))), not(and(lessThan(C_BIGINT, bigintLiteral(5L)), unprocessableExpression2(C_BIGINT))))); result = fromPredicate(originalPredicate); assertEquals(result.getRemainingExpression(), and(unprocessableExpression1(C_BIGINT), unprocessableExpression2(C_BIGINT))); assertEquals(result.getTupleDomain(), withColumnDomains(ImmutableMap.of(C_BIGINT, Domain.create(ValueSet.ofRanges(Range.range(BIGINT, 1L, false, 5L, false)), false)))); }
@Override public void copyParametersFrom( NamedParams aParam ) { if ( params != null && aParam != null ) { params.clear(); String[] keys = aParam.listParameters(); for ( int idx = 0; idx < keys.length; idx++ ) { String desc; try { desc = aParam.getParameterDescription( keys[idx] ); } catch ( UnknownParamException e ) { desc = ""; } String defValue; try { defValue = aParam.getParameterDefault( keys[idx] ); } catch ( UnknownParamException e ) { defValue = ""; } String value; try { value = aParam.getParameterValue( keys[idx] ); } catch ( UnknownParamException e ) { value = ""; } try { addParameterDefinition( keys[idx], defValue, desc ); } catch ( DuplicateParamException e ) { // Do nothing, just overwrite. } setParameterValue( keys[idx], value ); } } }
@Test public void testCopyParametersFromNullChecks() throws Exception { // Test null case namedParams.copyParametersFrom( null ); NamedParams namedParams2 = new NamedParamsDefault(); // Test internal params == null case ( (NamedParamsDefault) namedParams ).params = null; namedParams.copyParametersFrom( namedParams2 ); }
@Override public List<String> assignSegment(String segmentName, Map<String, Map<String, String>> currentAssignment, InstancePartitions instancePartitions, InstancePartitionsType instancePartitionsType) { int numPartitions = instancePartitions.getNumPartitions(); checkReplication(instancePartitions, _replication, _tableName); int partitionId; if (_partitionColumn == null || numPartitions == 1) { partitionId = 0; } else { // Uniformly spray the segment partitions over the instance partitions if (_tableConfig.getTableType() == TableType.OFFLINE) { partitionId = SegmentAssignmentUtils .getOfflineSegmentPartitionId(segmentName, _tableName, _helixManager, _partitionColumn) % numPartitions; } else { partitionId = SegmentAssignmentUtils .getRealtimeSegmentPartitionId(segmentName, _tableName, _helixManager, _partitionColumn) % numPartitions; } } return SegmentAssignmentUtils.assignSegmentWithReplicaGroup(currentAssignment, instancePartitions, partitionId); }
@Test public void testTableBalancedWithPartition() { Map<String, Map<String, String>> currentAssignment = new TreeMap<>(); for (String segmentName : SEGMENTS) { List<String> instancesAssigned = _segmentAssignmentWithPartition .assignSegment(segmentName, currentAssignment, _instancePartitionsMapWithPartition); currentAssignment .put(segmentName, SegmentAssignmentUtils.getInstanceStateMap(instancesAssigned, SegmentStateModel.ONLINE)); } assertEquals(currentAssignment.size(), NUM_SEGMENTS); // Each segment should have 3 replicas for (Map<String, String> instanceStateMap : currentAssignment.values()) { assertEquals(instanceStateMap.size(), NUM_REPLICAS); } int[] numSegmentsAssignedPerInstance = SegmentAssignmentUtils.getNumSegmentsAssignedPerInstance(currentAssignment, INSTANCES); int[] expectedNumSegmentsAssignedPerInstance = new int[NUM_INSTANCES]; int numSegmentsPerInstance = NUM_SEGMENTS * NUM_REPLICAS / NUM_INSTANCES; Arrays.fill(expectedNumSegmentsAssignedPerInstance, numSegmentsPerInstance); assertEquals(numSegmentsAssignedPerInstance, expectedNumSegmentsAssignedPerInstance); // Current assignment should already be balanced assertEquals( _segmentAssignmentWithPartition.rebalanceTable(currentAssignment, _instancePartitionsMapWithPartition, null, null, new RebalanceConfig()), currentAssignment); }
@CheckForNull public String getDescriptionAsHtml(RuleDto ruleDto) { if (ruleDto.getDescriptionFormat() == null) { return null; } Collection<RuleDescriptionSectionDto> ruleDescriptionSectionDtos = ruleDto.getRuleDescriptionSectionDtos(); return retrieveDescription(ruleDescriptionSectionDtos, Objects.requireNonNull(ruleDto.getDescriptionFormat())); }
@Test public void getMarkdownDescriptionAsHtml() { RuleDto rule = new RuleDto().setDescriptionFormat(MARKDOWN).addRuleDescriptionSectionDto(MARKDOWN_SECTION).setType(RuleType.BUG); String html = ruleDescriptionFormatter.getDescriptionAsHtml(rule); assertThat(html).isEqualTo("<strong>md</strong> <code>description</code>"); }
@Nullable public static <T> T getWithoutException(CompletableFuture<T> future) { if (isCompletedNormally(future)) { try { return future.get(); } catch (InterruptedException | ExecutionException ignored) { } } return null; }
@Test void testGetWithoutExceptionWithAnException() { final CompletableFuture<Integer> completableFuture = new CompletableFuture<>(); completableFuture.completeExceptionally(new RuntimeException("expected")); assertThat(FutureUtils.getWithoutException(completableFuture)).isNull(); }
@Override public Repositories listRepositories(String appUrl, AccessToken accessToken, String organization, @Nullable String query, int page, int pageSize) { checkPageArgs(page, pageSize); String searchQuery = "fork:true+org:" + organization; if (query != null) { searchQuery = query.replace(" ", "+") + "+" + searchQuery; } try { Repositories repositories = new Repositories(); GetResponse response = githubApplicationHttpClient.get(appUrl, accessToken, String.format("/search/repositories?q=%s&page=%s&per_page=%s", searchQuery, page, pageSize)); Optional<GsonRepositorySearch> gsonRepositories = response.getContent().map(content -> GSON.fromJson(content, GsonRepositorySearch.class)); if (!gsonRepositories.isPresent()) { return repositories; } repositories.setTotal(gsonRepositories.get().getTotalCount()); if (gsonRepositories.get().getItems() != null) { repositories.setRepositories(gsonRepositories.get().getItems().stream() .map(GsonGithubRepository::toRepository) .toList()); } return repositories; } catch (Exception e) { throw new IllegalStateException(format("Failed to list all repositories of '%s' accessible by user access token on '%s' using query '%s'", organization, appUrl, searchQuery), e); } }
@Test public void listRepositories_fail_if_pageIndex_out_of_bounds() { UserAccessToken token = new UserAccessToken("token"); assertThatThrownBy(() -> underTest.listRepositories(appUrl, token, "test", null, 0, 100)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("'page' must be larger than 0."); }
@Override public void onMsg(TbContext ctx, TbMsg msg) throws ExecutionException, InterruptedException, TbNodeException { if (!TbJsonPathNodeConfiguration.DEFAULT_JSON_PATH.equals(this.jsonPathValue)) { try { Object jsonPathData = jsonPath.read(msg.getData(), this.configurationJsonPath); ctx.tellSuccess(TbMsg.transformMsgData(msg, JacksonUtil.toString(jsonPathData))); } catch (PathNotFoundException e) { ctx.tellFailure(msg, e); } } else { ctx.tellSuccess(msg); } }
@Test void givenNoArrayMsg_whenOnMsg_thenTellFailure() throws Exception { String data = "{\"Attribute_1\":22.5,\"Attribute_5\":10.3}"; JsonNode dataNode = JacksonUtil.toJsonNode(data); TbMsg msg = getTbMsg(deviceId, dataNode.toString()); node.onMsg(ctx, msg); ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); ArgumentCaptor<Exception> exceptionCaptor = ArgumentCaptor.forClass(Exception.class); verify(ctx, never()).tellSuccess(any()); verify(ctx, times(1)).tellFailure(newMsgCaptor.capture(), exceptionCaptor.capture()); assertThat(newMsgCaptor.getValue()).isSameAs(msg); assertThat(exceptionCaptor.getValue()).isInstanceOf(RuntimeException.class); }
public int getMaxIdleSeconds() { return maxIdleSeconds; }
@Test public void testGetMaxIdleSeconds() { assertEquals(MapConfig.DEFAULT_MAX_IDLE_SECONDS, new MapConfig().getMaxIdleSeconds()); }
public static Write write() { return Write.create(); }
@Test public void testWriteValidationFailsMissingOptionsAndInstanceAndProject() { BigtableIO.WriteWithResults write = BigtableIO.write().withTableId("table").withWriteResults(); thrown.expect(IllegalArgumentException.class); write.expand(null); }
@SuppressWarnings("unchecked") public static <S, F> S visit(final Schema schema, final Visitor<S, F> visitor) { final BiFunction<Visitor<?, ?>, Schema, Object> handler = HANDLER.get(schema.type()); if (handler == null) { throw new UnsupportedOperationException("Unsupported schema type: " + schema.type()); } return (S) handler.apply(visitor, schema); }
@Test public void shouldThrowOnUnknownType() { // Given: final Type unknownType = mock(Type.class, "bob"); final Schema schema = mock(Schema.class); when(schema.type()).thenReturn(unknownType); // When: final UnsupportedOperationException e = assertThrows( UnsupportedOperationException.class, () -> SchemaWalker.visit(schema, visitor) ); // Then: assertThat(e.getMessage(), containsString("Unsupported schema type: bob")); }
Flux<Post> findAll() { return Flux.fromIterable(data.values()); }
@Test public void testGetAllPosts() { StepVerifier.create(posts.findAll()) .consumeNextWith(p -> assertEquals("post one", p.getTitle())) .consumeNextWith(p -> assertEquals("post two", p.getTitle())) .expectComplete() .verify(); }
@Override public Map<V, String> hash(V... members) { return get(hashAsync(members)); }
@Test public void testHashEmpty() { RGeo<String> geo = redisson.getGeo("test"); assertThat(geo.hash("Palermo", "Catania")).isEmpty(); }
@Override public void upgrade() { if (clusterConfigService.get(V20230531135500_MigrateRemoveObsoleteItemsFromGrantsCollection.MigrationCompleted.class) != null) { return; } final Set<String> names = new HashSet(); mongoConnection.getMongoDatabase().listCollectionNames().forEach(names::add); if (names.contains(DBGrantService.COLLECTION_NAME)) { var query = new BasicDBObject("target", Pattern.compile("^grn::::favorite:")); mongoConnection.getMongoDatabase().getCollection(DBGrantService.COLLECTION_NAME).deleteMany(query); query = new BasicDBObject("target", Pattern.compile("^grn::::last_opened:")); mongoConnection.getMongoDatabase().getCollection(DBGrantService.COLLECTION_NAME).deleteMany(query); } clusterConfigService.write(new MigrationCompleted()); }
@Test @MongoDBFixtures("V20230531135500_MigrateRemoveObsoleteItemsFromGrantsCollectionTests.json") void removingAllObsoleteEntries() { assertThat(this.collection.countDocuments()).isEqualTo(13); this.migration.upgrade(); assertThat(migrationCompleted()).isNotNull(); assertThat(this.collection.countDocuments()).isEqualTo(9); this.collection.find().forEach(d -> { assertThat(d.get("target").toString()).doesNotContain("favorite"); assertThat(d.get("target").toString()).doesNotContain("last_opened"); }); }
public long residentMemorySizeEstimate() { long size = 0; size += Long.BYTES; // value.context.timestamp size += Long.BYTES; // value.context.offset if (topic != null) { size += topic.toCharArray().length; } size += Integer.BYTES; // partition for (final Header header : headers) { size += header.key().toCharArray().length; final byte[] value = header.value(); if (value != null) { size += value.length; } } return size; }
@Test public void shouldEstimateNullValueInHeaderAsZero() { final Headers headers = new RecordHeaders(); headers.add("header-key", null); final ProcessorRecordContext context = new ProcessorRecordContext( 42L, 73L, 0, null, headers ); assertEquals(MIN_SIZE + 10L, context.residentMemorySizeEstimate()); }
public void addLongPollingClient(HttpServletRequest req, HttpServletResponse rsp, Map<String, String> clientMd5Map, int probeRequestSize) { String noHangUpFlag = req.getHeader(LongPollingService.LONG_POLLING_NO_HANG_UP_HEADER); long start = System.currentTimeMillis(); List<String> changedGroups = MD5Util.compareMd5(req, rsp, clientMd5Map); if (changedGroups.size() > 0) { generateResponse(req, rsp, changedGroups); LogUtil.CLIENT_LOG.info("{}|{}|{}|{}|{}|{}|{}", System.currentTimeMillis() - start, "instant", RequestUtil.getRemoteIp(req), "polling", clientMd5Map.size(), probeRequestSize, changedGroups.size()); return; } else if (noHangUpFlag != null && noHangUpFlag.equalsIgnoreCase(TRUE_STR)) { LogUtil.CLIENT_LOG.info("{}|{}|{}|{}|{}|{}|{}", System.currentTimeMillis() - start, "nohangup", RequestUtil.getRemoteIp(req), "polling", clientMd5Map.size(), probeRequestSize, changedGroups.size()); return; } // Must be called by http thread, or send response. final AsyncContext asyncContext = req.startAsync(); // AsyncContext.setTimeout() is incorrect, Control by oneself asyncContext.setTimeout(0L); String ip = RequestUtil.getRemoteIp(req); ConnectionCheckResponse connectionCheckResponse = checkLimit(req); if (!connectionCheckResponse.isSuccess()) { RpcScheduledExecutor.CONTROL_SCHEDULER.schedule( () -> generate503Response(asyncContext, rsp, connectionCheckResponse.getMessage()), 1000L + new Random().nextInt(2000), TimeUnit.MILLISECONDS); return; } String appName = req.getHeader(RequestUtil.CLIENT_APPNAME_HEADER); String tag = req.getHeader("Vipserver-Tag"); int delayTime = SwitchService.getSwitchInteger(SwitchService.FIXED_DELAY_TIME, 500); int minLongPoolingTimeout = SwitchService.getSwitchInteger("MIN_LONG_POOLING_TIMEOUT", 10000); // Add delay time for LoadBalance, and one response is returned 500 ms in advance to avoid client timeout. String requestLongPollingTimeOut = req.getHeader(LongPollingService.LONG_POLLING_HEADER); long timeout = Math.max(minLongPoolingTimeout, Long.parseLong(requestLongPollingTimeOut) - delayTime); ConfigExecutor.executeLongPolling( new ClientLongPolling(asyncContext, clientMd5Map, ip, probeRequestSize, timeout, appName, tag)); }
@Test void testAddLongPollingClientHasNotEqualsMd5() throws IOException { Map<String, String> clientMd5Map = new HashMap<>(); String group = "group"; String tenant = "tenat"; String dataIdEquals = "dataIdEquals0"; String dataIdNotEquals = "dataIdNotEquals0"; String groupKeyEquals = GroupKey.getKeyTenant(dataIdEquals, group, tenant); String groupKeyNotEquals = GroupKey.getKeyTenant(dataIdNotEquals, group, tenant); String md5Equals0 = MD5Utils.md5Hex("countEquals0", "UTF-8"); clientMd5Map.put(groupKeyEquals, md5Equals0); String md5NotEquals1 = MD5Utils.md5Hex("countNotEquals", "UTF-8"); clientMd5Map.put(groupKeyNotEquals, md5NotEquals1); HttpServletRequest httpServletRequest = Mockito.mock(HttpServletRequest.class); Mockito.when(httpServletRequest.getHeader(eq(LongPollingService.LONG_POLLING_NO_HANG_UP_HEADER))).thenReturn(null); String clientIp = "192.168.0.1"; Mockito.when(httpServletRequest.getHeader(eq("X-Forwarded-For"))).thenReturn(clientIp); configCacheServiceMockedStatic.when( () -> ConfigCacheService.isUptodate(eq(groupKeyNotEquals), eq(md5NotEquals1), eq(clientIp), eq(null))).thenReturn(false); configCacheServiceMockedStatic.when(() -> ConfigCacheService.isUptodate(eq(groupKeyEquals), eq(md5Equals0), eq(clientIp), eq(null))) .thenReturn(true); HttpServletResponse httpServletResponse = Mockito.mock(HttpServletResponse.class); PrintWriter printWriter = Mockito.mock(PrintWriter.class); Mockito.when(httpServletResponse.getWriter()).thenReturn(printWriter); int propSize = 3; longPollingService.addLongPollingClient(httpServletRequest, httpServletResponse, clientMd5Map, propSize); String responseString = MD5Util.compareMd5ResultString(Arrays.asList(groupKeyNotEquals)); //expect print not equals group Mockito.verify(printWriter, times(1)).println(eq(responseString)); Mockito.verify(httpServletResponse, times(1)).setStatus(eq(HttpServletResponse.SC_OK)); }
@Override public void saveAll(Iterable<MetricEntity> metrics) { if (metrics == null) { return; } readWriteLock.writeLock().lock(); try { metrics.forEach(this::save); } finally { readWriteLock.writeLock().unlock(); } }
@Test public void testSaveAll() { List<MetricEntity> entities = new ArrayList<>(10000); for (int i = 0; i < 10000; i++) { MetricEntity entry = new MetricEntity(); entry.setApp("testSaveAll"); entry.setResource("testResource" + i); entry.setTimestamp(new Date(System.currentTimeMillis())); entry.setPassQps(1L); entry.setExceptionQps(1L); entry.setBlockQps(0L); entry.setSuccessQps(1L); entities.add(entry); } inMemoryMetricsRepository.saveAll(entities); List<String> result = inMemoryMetricsRepository.listResourcesOfApp("testSaveAll"); Assert.assertTrue(result.size() == entities.size()); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) request; if ("GET".equals(httpRequest.getMethod())) { String acceptEncoding = httpRequest.getHeader(HttpHeaders.ACCEPT_ENCODING); if (acceptEncoding == null) { chain.doFilter(addGzipAcceptEncoding(httpRequest), response); return; } if (!acceptEncoding.contains("gzip")) { ((HttpServletResponse) response).setStatus(HttpServletResponse.SC_NOT_ACCEPTABLE); return; } } chain.doFilter(request, response); }
@Test public void testForceGzipOtherHeader() throws Exception { noneGzipRequest(); when(request.getHeaders("Test")).thenReturn(new Enumeration() { private int c = 0; @Override public boolean hasMoreElements() { return c == 0; } @Override public Object nextElement() { c++; return "ok"; } }); filter.doFilter(request, response, filterChain); String res = ""; Enumeration values = filteredRequest.getHeaders("Test"); while (values.hasMoreElements()) { res = res + values.nextElement() + "\n"; } assertEquals("Expected Test ok", "ok\n", res); }
protected RemotingCommand sendMessage(ChannelHandlerContext ctx, RemotingCommand request, ProxyContext context) throws Exception { SendMessageRequestHeader requestHeader = SendMessageRequestHeader.parseRequestHeader(request); String topic = requestHeader.getTopic(); Map<String, String> property = MessageDecoder.string2messageProperties(requestHeader.getProperties()); TopicMessageType messageType = TopicMessageType.parseFromMessageProperty(property); if (ConfigurationManager.getProxyConfig().isEnableTopicMessageTypeCheck()) { if (topicMessageTypeValidator != null) { // Do not check retry or dlq topic if (!NamespaceUtil.isRetryTopic(topic) && !NamespaceUtil.isDLQTopic(topic)) { TopicMessageType topicMessageType = messagingProcessor.getMetadataService().getTopicMessageType(context, topic); topicMessageTypeValidator.validate(topicMessageType, messageType); } } } if (!NamespaceUtil.isRetryTopic(topic) && !NamespaceUtil.isDLQTopic(topic)) { if (TopicMessageType.TRANSACTION.equals(messageType)) { messagingProcessor.addTransactionSubscription(context, requestHeader.getProducerGroup(), requestHeader.getTopic()); } } return request(ctx, request, context, Duration.ofSeconds(3).toMillis()); }
@Test public void testSendMessage() throws Exception { when(metadataServiceMock.getTopicMessageType(any(), eq(topic))).thenReturn(TopicMessageType.NORMAL); Message message = new Message(topic, "123".getBytes()); message.putUserProperty("a", "b"); SendMessageRequestHeader sendMessageRequestHeader = new SendMessageRequestHeader(); sendMessageRequestHeader.setTopic(topic); sendMessageRequestHeader.setProducerGroup(producerGroup); sendMessageRequestHeader.setDefaultTopic(""); sendMessageRequestHeader.setDefaultTopicQueueNums(0); sendMessageRequestHeader.setQueueId(0); sendMessageRequestHeader.setSysFlag(0); sendMessageRequestHeader.setBrokerName(brokerName); sendMessageRequestHeader.setProperties(MessageDecoder.messageProperties2String(message.getProperties())); RemotingCommand remotingCommand = RemotingCommand.createRequestCommand(RequestCode.SEND_MESSAGE, sendMessageRequestHeader); remotingCommand.setBody(message.getBody()); remotingCommand.makeCustomHeaderToNet(); RemotingCommand expectResponse = RemotingCommand.createResponseCommand(ResponseCode.SUCCESS, "success"); when(messagingProcessorMock.request(any(), eq(brokerName), eq(remotingCommand), anyLong())) .thenReturn(CompletableFuture.completedFuture(expectResponse)); RemotingCommand response = sendMessageActivity.processRequest0(ctx, remotingCommand, null); assertThat(response).isNull(); verify(ctx, times(1)).writeAndFlush(eq(expectResponse)); }
public static ThreeStepCrypticClue forText(String plainText, String text) { final String[] split = text.split("<br>\\s*<br>"); final List<Map.Entry<CrypticClue, Boolean>> steps = new ArrayList<>(split.length); for (String part : split) { boolean isDone = part.contains("<str>"); final String rawText = Text.sanitizeMultilineText(part); for (CrypticClue clue : CrypticClue.CLUES) { if (!rawText.equalsIgnoreCase(clue.getText())) { continue; } steps.add(new AbstractMap.SimpleEntry<>(clue, isDone)); break; } } if (steps.isEmpty() || steps.size() < 3) { return null; } return new ThreeStepCrypticClue(steps, plainText); }
@Test public void forTextEmptyString() { assertNull(ThreeStepCrypticClue.forText("", "")); }
@Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { return inject(statement, new TopicProperties.Builder()); }
@Test public void shouldHaveSuperUsefulErrorMessageIfCreateWithNoPartitions() { // Given: givenStatement("CREATE STREAM foo (FOO STRING) WITH (value_format='avro', kafka_topic='doesntexist');"); // When: final Exception e = assertThrows( KsqlException.class, () -> injector.inject(statement, builder) ); // Then: assertThat(e.getMessage(), containsString( "Topic 'doesntexist' does not exist. If you want to create a new topic for the " + "stream/table please re-run the statement providing the required 'PARTITIONS' " + "configuration in the WITH clause (and optionally 'REPLICAS'). For example: " + "CREATE STREAM FOO (FOO STRING) " + "WITH (KAFKA_TOPIC='doesntexist', PARTITIONS=2, VALUE_FORMAT='avro');")); }
public static Profiler createIfDebug(Logger logger) { if (logger.isDebugEnabled()) { return create(logger); } return NullProfiler.NULL_INSTANCE; }
@Test public void create_null_profiler_if_debug_level_is_disabled() { tester.setLevel(LoggerLevel.TRACE); Profiler profiler = Profiler.createIfDebug(LoggerFactory.getLogger("foo")); assertThat(profiler).isInstanceOf(DefaultProfiler.class); tester.setLevel(LoggerLevel.INFO); profiler = Profiler.createIfDebug(LoggerFactory.getLogger("foo")); assertThat(profiler).isInstanceOf(NullProfiler.class); }
@Override public void initialize(URI uri, Configuration conf) throws IOException { requireNonNull(uri, "uri is null"); requireNonNull(conf, "conf is null"); super.initialize(uri, conf); setConf(conf); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR)); HiveS3Config defaults = new HiveS3Config(); this.stagingDirectory = new File(conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString())); this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1; this.maxBackoffTime = Duration.valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString())); this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString())); int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries()); boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled()); Duration connectTimeout = Duration.valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString())); Duration socketTimeout = Duration.valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString())); int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections()); this.multiPartUploadMinFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes()); this.multiPartUploadMinPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes()); this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess()); this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS, defaults.isS3UseInstanceCredentials()); this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, defaults.isPinS3ClientToCurrentRegion()); this.s3IamRole = conf.get(S3_IAM_ROLE, defaults.getS3IamRole()); this.s3IamRoleSessionName = conf.get(S3_IAM_ROLE_SESSION_NAME, defaults.getS3IamRoleSessionName()); verify(!(useInstanceCredentials && conf.get(S3_IAM_ROLE) != null), "Invalid configuration: either use instance credentials or specify an iam role"); verify((pinS3ClientToCurrentRegion && conf.get(S3_ENDPOINT) == null) || !pinS3ClientToCurrentRegion, "Invalid configuration: either endpoint can be set or S3 client can be pinned to the current region"); this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled()); this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name())); this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId()); this.s3AclType = PrestoS3AclType.valueOf(conf.get(S3_ACL_TYPE, defaults.getS3AclType().name())); String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix()); this.skipGlacierObjects = conf.getBoolean(S3_SKIP_GLACIER_OBJECTS, defaults.isSkipGlacierObjects()); this.s3StorageClass = conf.getEnum(S3_STORAGE_CLASS, defaults.getS3StorageClass()); ClientConfiguration configuration = new ClientConfiguration() .withMaxErrorRetry(maxErrorRetries) .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP) .withConnectionTimeout(toIntExact(connectTimeout.toMillis())) .withSocketTimeout(toIntExact(socketTimeout.toMillis())) .withMaxConnections(maxConnections) .withUserAgentPrefix(userAgentPrefix) .withUserAgentSuffix(S3_USER_AGENT_SUFFIX); this.credentialsProvider = createAwsCredentialsProvider(uri, conf); this.s3 = createAmazonS3Client(conf, configuration); }
@Test public void testInstanceCredentialsEnabled() throws Exception { Configuration config = new Configuration(); // instance credentials are disabled by default try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) { fs.initialize(new URI("s3n://test-bucket/"), config); assertFalse(getAwsCredentialsProvider(fs).getClass().isInstance(InstanceProfileCredentialsProvider.class)); } }
@Override public String toString() { return toStringHelper(getClass()) .add("sourcePort", Integer.toString(sourcePort)) .add("destinationPort", Integer.toString(destinationPort)) .add("sequence", Integer.toString(sequence)) .add("acknowledge", Integer.toString(acknowledge)) .add("dataOffset", Byte.toString(dataOffset)) .add("flags", Short.toString(flags)) .add("windowSize", Short.toString(windowSize)) .add("checksum", Short.toString(checksum)) .add("urgentPointer", Short.toString(urgentPointer)) .add("options", Arrays.toString(options)) .toString(); }
@Test public void testToStringTcp() throws Exception { TCP tcp = deserializer.deserialize(bytePacketTCP4, 0, bytePacketTCP4.length); String str = tcp.toString(); assertTrue(StringUtils.contains(str, "sourcePort=" + 0x50)); assertTrue(StringUtils.contains(str, "destinationPort=" + 0x60)); assertTrue(StringUtils.contains(str, "sequence=" + 0x10)); assertTrue(StringUtils.contains(str, "acknowledge=" + 0x20)); assertTrue(StringUtils.contains(str, "dataOffset=" + (byte) 0x5)); assertTrue(StringUtils.contains(str, "flags=" + (short) 0x2)); assertTrue(StringUtils.contains(str, "windowSize=" + (short) 0x1000)); assertTrue(StringUtils.contains(str, "checksum=" + (short) 0x1bae)); assertTrue(StringUtils.contains(str, "urgentPointer=" + (short) 0x1)); }
List<OffsetRange> getBundleSizes(int desiredNumBundles, long start, long end) { List<OffsetRange> result = new ArrayList<>(); double[] relativeSizes = getRelativeBundleSizes(desiredNumBundles); // Generate offset ranges proportional to the relative sizes. double s = sum(relativeSizes); long startOffset = start; double sizeSoFar = 0; for (int i = 0; i < relativeSizes.length; ++i) { sizeSoFar += relativeSizes[i]; long endOffset = (i == relativeSizes.length - 1) ? end : (long) (start + sizeSoFar * (end - start) / s); if (startOffset != endOffset) { result.add(new OffsetRange(startOffset, endOffset)); } startOffset = endOffset; } return result; }
@Test public void bundlesShouldBeEvenForConstDistribution() { long expectedBundleSize = 2; options.bundleSizeDistribution = fromRealDistribution(new ConstantRealDistribution(2)); splitter = new BundleSplitter(options); List<OffsetRange> bundleSizes = splitter.getBundleSizes(4, 0, options.numRecords); bundleSizes.stream() .map(range -> range.getTo() - range.getFrom()) .forEach(size -> assertEquals(expectedBundleSize, size.intValue())); }
protected Object getValidJMSHeaderValue(String headerName, Object headerValue) { if (headerValue instanceof String) { return headerValue; } else if (headerValue instanceof BigInteger) { return headerValue.toString(); } else if (headerValue instanceof BigDecimal) { return headerValue.toString(); } else if (headerValue instanceof Number) { return headerValue; } else if (headerValue instanceof Character) { return headerValue; } else if (headerValue instanceof CharSequence) { return headerValue.toString(); } else if (headerValue instanceof Boolean) { return headerValue; } else if (headerValue instanceof Date) { if (this.endpoint.getConfiguration().isFormatDateHeadersToIso8601()) { return ZonedDateTime.ofInstant(((Date) headerValue).toInstant(), ZoneOffset.UTC).toString(); } else { return headerValue.toString(); } } return null; }
@Test public void testGetValidJmsHeaderValueWithBigDecimalShouldSucceed() { Object value = jmsBindingUnderTest.getValidJMSHeaderValue("foo", new BigDecimal("123.45")); assertEquals("123.45", value); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldIncludeTopicNameInException() { // Given: final KsqlJsonDeserializer<Long> deserializer = givenDeserializerForSchema(Schema.OPTIONAL_INT64_SCHEMA, Long.class); final byte[] bytes = "true".getBytes(StandardCharsets.UTF_8); // When: final Exception e = assertThrows( Exception.class, () -> deserializer.deserialize(SOME_TOPIC, bytes) ); // Then: assertThat(e.getMessage(), containsString( SOME_TOPIC)); }