focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
public <T> Future<Iterable<T>> multimapFetchSingleEntryFuture(
ByteString encodedKey, ByteString encodedTag, String stateFamily, Coder<T> elemCoder) {
StateTag<ByteString> stateTag =
StateTag.<ByteString>of(Kind.MULTIMAP_SINGLE_ENTRY, encodedTag, stateFamily)
.toBuilder()
.setMultimapKey(encodedKey)
.build();
return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder));
} | @Test
public void testReadMultimapSingleEntryPaginated() throws Exception {
Future<Iterable<Integer>> future =
underTest.multimapFetchSingleEntryFuture(
STATE_MULTIMAP_KEY_1, STATE_KEY_1, STATE_FAMILY, INT_CODER);
Mockito.verifyNoMoreInteractions(mockWindmill);
Windmill.KeyedGetDataRequest.Builder expectedRequest1 =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addMultimapsToFetch(
Windmill.TagMultimapFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.setFetchEntryNamesOnly(false)
.addEntriesToFetch(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_1)
.setFetchMaxBytes(WindmillStateReader.INITIAL_MAX_MULTIMAP_BYTES)
.build()));
Windmill.KeyedGetDataResponse.Builder response1 =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagMultimaps(
Windmill.TagMultimapFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_1)
.addAllValues(Arrays.asList(intData(5), intData(6)))
.setContinuationPosition(500)));
Windmill.KeyedGetDataRequest.Builder expectedRequest2 =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_CONTINUATION_KEY_BYTES)
.addMultimapsToFetch(
Windmill.TagMultimapFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.setFetchEntryNamesOnly(false)
.addEntriesToFetch(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_1)
.setFetchMaxBytes(WindmillStateReader.CONTINUATION_MAX_MULTIMAP_BYTES)
.setRequestPosition(500)
.build()));
Windmill.KeyedGetDataResponse.Builder response2 =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagMultimaps(
Windmill.TagMultimapFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_1)
.addAllValues(Arrays.asList(intData(7), intData(8)))
.setContinuationPosition(800)
.setRequestPosition(500)));
Windmill.KeyedGetDataRequest.Builder expectedRequest3 =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_CONTINUATION_KEY_BYTES)
.addMultimapsToFetch(
Windmill.TagMultimapFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.setFetchEntryNamesOnly(false)
.addEntriesToFetch(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_1)
.setFetchMaxBytes(WindmillStateReader.CONTINUATION_MAX_MULTIMAP_BYTES)
.setRequestPosition(800)
.build()));
Windmill.KeyedGetDataResponse.Builder response3 =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagMultimaps(
Windmill.TagMultimapFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
Windmill.TagMultimapEntry.newBuilder()
.setEntryName(STATE_MULTIMAP_KEY_1)
.addAllValues(Arrays.asList(intData(9), intData(10)))
.setRequestPosition(800)));
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest1.build()))
.thenReturn(response1.build());
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest2.build()))
.thenReturn(response2.build());
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest3.build()))
.thenReturn(response3.build());
Iterable<Integer> results = future.get();
assertThat(results, Matchers.contains(5, 6, 7, 8, 9, 10));
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest1.build());
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest2.build());
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest3.build());
Mockito.verifyNoMoreInteractions(mockWindmill);
// NOTE: The future will still contain a reference to the underlying reader, thus not calling
// assertNoReader(future).
} |
public static Builder builder() {
return new Builder();
} | @Test
void testPrimaryKeyNameMustNotBeNull() {
assertThatThrownBy(
() ->
TableSchema.builder()
.field("f0", DataTypes.BIGINT())
.primaryKey(null, new String[] {"f0", "f2"})
.build())
.isInstanceOf(ValidationException.class)
.hasMessage("PRIMARY KEY's name can not be null or empty.");
} |
@Override
public int hashCode() {
// Note: DisjointPathPair with primary and secondary swapped
// must result in same hashCode
return hasBackup() ? primary.hashCode() + secondary.hashCode() :
Objects.hash(primary);
} | @Test
public void testSwappingPrimarySecondaryDoesntImpactHashCode() {
assertEquals(new DisjointPathPair<>(ABC, ADC).hashCode(),
new DisjointPathPair<>(ADC, ABC).hashCode());
} |
public static IRubyObject deep(final Ruby runtime, final Object input) {
if (input == null) {
return runtime.getNil();
}
final Class<?> cls = input.getClass();
final Rubyfier.Converter converter = CONVERTER_MAP.get(cls);
if (converter != null) {
return converter.convert(runtime, input);
}
return fallbackConvert(runtime, input, cls);
} | @Test
public void testDeepWithDouble() {
Object result = Rubyfier.deep(RubyUtil.RUBY, 1.0D);
assertEquals(RubyFloat.class, result.getClass());
assertEquals(1.0D, ((RubyFloat)result).getDoubleValue(), 0);
} |
private List<GrpcUpstream> buildGrpcUpstreamList(final List<URIRegisterDTO> uriList) {
return uriList.stream()
.map(dto -> CommonUpstreamUtils.buildDefaultGrpcUpstream(dto.getHost(), dto.getPort()))
.collect(Collectors.toCollection(CopyOnWriteArrayList::new));
} | @Test
public void testBuildGrpcUpstreamList() {
List<URIRegisterDTO> list = new ArrayList<>();
list.add(URIRegisterDTO.builder().appName("test1").rpcType(RpcTypeEnum.GRPC.getName()).host("localhost").port(8090).build());
list.add(URIRegisterDTO.builder().appName("test2").rpcType(RpcTypeEnum.GRPC.getName()).host("localhost").port(8091).build());
try {
Method testMethod = shenyuClientRegisterGrpcService.getClass().getDeclaredMethod("buildGrpcUpstreamList", List.class);
testMethod.setAccessible(true);
List<TarsUpstream> result = (List<TarsUpstream>) testMethod.invoke(shenyuClientRegisterGrpcService, list);
assertEquals(result.size(), 2);
} catch (Exception e) {
throw new ShenyuException(e.getCause());
}
} |
protected String copyRequestToDirectory( HttpServletRequest request, String directory ) throws KettleException {
try {
return copyRequestToDirectory( request.getInputStream(), directory );
} catch ( IOException ioe ) {
throw new KettleException( BaseMessages.getString( PKG, "RegisterPackageServlet.Exception.CopyRequest", directory ), ioe );
}
} | @Test
public void testCopyRequestToDirectory_Exception2() throws Exception {
try ( MockedStatic<KettleVFS> kettleVFSMockedStatic = mockStatic( KettleVFS.class ) ) {
expectedEx.expect( MockitoException.class );
InputStream inputStream = Mockito.mock( InputStream.class );
kettleVFSMockedStatic.when( () -> KettleVFS.getFileObject( anyString() ) ).thenThrow( IOException.class );
servlet.copyRequestToDirectory( inputStream, "/tmp/path" );
}
} |
@Override
@MethodNotAvailable
public <T> T invoke(K key, EntryProcessor<K, V, T> entryProcessor, Object... arguments) throws EntryProcessorException {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testInvoke() {
adapter.invoke(23, new ICacheReplaceEntryProcessor(), "value", "newValue");
} |
static Timestamp toTimestamp(final JsonNode object) {
if (object instanceof NumericNode) {
return new Timestamp(object.asLong());
}
if (object instanceof TextNode) {
try {
return new Timestamp(Long.parseLong(object.textValue()));
} catch (final NumberFormatException e) {
throw failedStringCoercionException(SqlBaseType.TIMESTAMP);
}
}
throw invalidConversionException(object, SqlBaseType.TIMESTAMP);
} | @Test
public void shouldConvertLongToTimestampCorrectly() {
final Timestamp d = JsonSerdeUtils.toTimestamp(JsonNodeFactory.instance.numberNode(100));
assertThat(d.getTime(), equalTo(100L));
} |
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) {
meta = (ReplaceStringMeta) smi;
data = (ReplaceStringData) sdi;
if ( super.init( smi, sdi ) ) {
return true;
}
return false;
} | @Test
public void testSynchronizeDifferentFieldsArraysLengths() {
ReplaceStringData data = new ReplaceStringData();
ReplaceString replaceString =
new ReplaceString( stepMockHelper.stepMeta, data, 0, stepMockHelper.transMeta, stepMockHelper.trans );
ReplaceStringMeta meta = new ReplaceStringMeta();
replaceString.init( meta, data );
meta.setFieldInStream( new String[] { "input1", "input2" } );
meta.setFieldOutStream( new String[] { "out" } );
meta.setUseRegEx( new boolean[] { true } );
meta.setCaseSensitive( new boolean[] { false } );
meta.setWholeWord( new boolean[] { true } );
meta.setReplaceString( new String[] { "string" } );
meta.setReplaceByString( new String[] { "string" } );
meta.setEmptyString( new boolean[] { true } );
meta.setFieldReplaceByString( new String[] { "string" } );
meta.afterInjectionSynchronization();
assertEquals( meta.getFieldInStream().length, meta.getFieldOutStream().length );
assertEquals( StringUtils.EMPTY, meta.getFieldOutStream()[ 1 ] );
assertEquals( meta.getFieldInStream().length, meta.getUseRegEx().length );
assertFalse( meta.getUseRegEx()[ 1 ] );
assertEquals( meta.getFieldInStream().length, meta.getCaseSensitive().length );
assertFalse( meta.getCaseSensitive()[ 1 ] );
assertEquals( meta.getFieldInStream().length, meta.getWholeWord().length );
assertFalse( meta.getWholeWord()[ 1 ] );
assertEquals( meta.getFieldInStream().length, meta.getReplaceString().length );
assertEquals( StringUtils.EMPTY, meta.getReplaceString()[ 1 ] );
assertEquals( meta.getFieldInStream().length, meta.getReplaceByString().length );
assertEquals( StringUtils.EMPTY, meta.getReplaceByString()[ 1 ] );
assertEquals( meta.getFieldInStream().length, meta.isSetEmptyString().length );
assertFalse( meta.isSetEmptyString()[ 1 ] );
assertEquals( meta.getFieldInStream().length, meta.getFieldReplaceByString().length );
assertEquals( StringUtils.EMPTY, meta.getFieldReplaceByString()[ 1 ] );
} |
@NotNull
@Override
public Response intercept(@NotNull Chain chain) throws IOException {
Request request = chain.request().newBuilder().removeHeader("Accept-Encoding").build();
Response response = chain.proceed(request);
if (response.headers("Content-Encoding").contains("gzip")) {
response.close();
}
return response;
} | @Test
public void intercept_whenGzipContentEncodingNotIncluded_shouldNotCloseTheResponse() throws IOException {
when(response.headers()).thenReturn(Headers.of("Custom-header", "not-gzip"));
underTest.intercept(chain);
verify(response, times(0)).close();
} |
public static String generateResourceId(
String baseString,
Pattern illegalChars,
String replaceChar,
int targetLength,
DateTimeFormatter timeFormat) {
// first, make sure the baseString, typically the test ID, is not empty
checkArgument(baseString.length() != 0, "baseString cannot be empty.");
// next, replace all illegal characters from given string with given replacement character
String illegalCharsRemoved =
illegalChars.matcher(baseString.toLowerCase()).replaceAll(replaceChar);
// finally, append the date/time and return the substring that does not exceed the length limit
LocalDateTime localDateTime = LocalDateTime.now(ZoneId.of(TIME_ZONE));
String timeAddOn = localDateTime.format(timeFormat);
return illegalCharsRemoved.subSequence(
0, min(targetLength - timeAddOn.length() - 1, illegalCharsRemoved.length()))
+ replaceChar
+ localDateTime.format(timeFormat);
} | @Test
public void testGenerateResourceIdShouldReplaceUnderscoreWithHyphen() {
String testBaseString = "test_inst";
String actual =
generateResourceId(
testBaseString,
ILLEGAL_INSTANCE_CHARS,
REPLACE_INSTANCE_CHAR,
MAX_INSTANCE_ID_LENGTH,
TIME_FORMAT);
assertThat(actual).matches("test-inst-\\d{8}-\\d{6}-\\d{6}");
} |
@Override
public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) {
table.refresh();
if (lastPosition != null) {
return discoverIncrementalSplits(lastPosition);
} else {
return discoverInitialSplits();
}
} | @Test
public void testTableScanNoStats() throws Exception {
appendTwoSnapshots();
ScanContext scanContext =
ScanContext.builder()
.includeColumnStats(false)
.startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL)
.build();
ContinuousSplitPlannerImpl splitPlanner =
new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null);
ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null);
assertThat(initialResult.splits()).hasSize(1);
IcebergSourceSplit split = Iterables.getOnlyElement(initialResult.splits());
assertThat(split.task().files()).hasSize(2);
verifyStatCount(split, 0);
IcebergEnumeratorPosition lastPosition = initialResult.toPosition();
for (int i = 0; i < 3; ++i) {
CycleResult result = verifyOneCycle(splitPlanner, lastPosition);
verifyStatCount(result.split, 0);
lastPosition = result.lastPosition;
}
} |
@Override
public PulsarAdmin getPulsarAdmin() {
if (exposePulsarAdminClientEnabled) {
return pulsarAdmin;
} else {
throw new IllegalStateException("PulsarAdmin is not enabled in function worker");
}
} | @Test
public void testGetPulsarAdmin() throws Exception {
assertEquals(context.getPulsarAdmin(), pulsarAdmin);
} |
@Override
public void execute(ComputationStep.Context context) {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository)
.buildFor(List.of(duplicationFormula)))
.visit(treeRootHolder.getRoot());
} | @Test
public void compute_new_duplicated_lines_density() {
setNewLines(FILE_1, FILE_2, FILE_4);
addDuplicatedBlock(FILE_1_REF, 2);
addDuplicatedBlock(FILE_3_REF, 10);
addDuplicatedBlock(FILE_4_REF, 12);
underTest.execute(new TestComputationStepContext());
assertRawMeasureValue(FILE_1_REF, NEW_DUPLICATED_LINES_DENSITY_KEY, 18.2d);
assertRawMeasureValue(FILE_2_REF, NEW_DUPLICATED_LINES_DENSITY_KEY, 0d);
assertNoRawMeasure(FILE_3_REF, NEW_DUPLICATED_LINES_DENSITY_KEY);
assertRawMeasureValue(FILE_4_REF, NEW_DUPLICATED_LINES_DENSITY_KEY, 100d);
assertRawMeasureValue(DIRECTORY_REF, NEW_DUPLICATED_LINES_DENSITY_KEY, 9.1d);
assertNoRawMeasure(DIRECTORY_2_REF, NEW_DUPLICATED_LINES_DENSITY_KEY);
assertRawMeasureValue(ROOT_REF, NEW_DUPLICATED_LINES_DENSITY_KEY, 39.4d);
} |
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key);
StateQueryRequest<ValueAndTimestamp<GenericRow>>
request = inStore(stateStore.getStateStoreName())
.withQuery(query)
.withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<ValueAndTimestamp<GenericRow>>
result = stateStore.getKafkaStreams().query(request);
final QueryResult<ValueAndTimestamp<GenericRow>> queryResult =
result.getPartitionResults().get(partition);
// Some of these failures are retriable, and in the future, we may want to retry
// locally before throwing.
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
} else {
final ValueAndTimestamp<GenericRow> row = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(
ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp()))
.iterator(),
queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
} | @Test
public void shouldRangeQueryWithCorrectParams_fullTableScan() {
// Given:
when(kafkaStreams.query(any())).thenReturn(getIteratorResult());
// When:
table.get(PARTITION);
// Then:
verify(kafkaStreams).query(queryTypeCaptor.capture());
StateQueryRequest request = queryTypeCaptor.getValue();
assertThat(request.getQuery(), instanceOf(RangeQuery.class));
RangeQuery rangeQuery = (RangeQuery)request.getQuery();
assertThat(rangeQuery.getLowerBound(), is(Optional.empty()));
assertThat(rangeQuery.getUpperBound(), is(Optional.empty()));
} |
public static MaterializedDataPredicates getMaterializedDataPredicates(
SemiTransactionalHiveMetastore metastore,
MetastoreContext metastoreContext,
TypeManager typeManager,
Table table,
DateTimeZone timeZone)
{
List<Column> partitionColumns = table.getPartitionColumns();
for (Column partitionColumn : partitionColumns) {
HiveType hiveType = partitionColumn.getType();
if (!hiveType.isSupportedType()) {
throw new PrestoException(
NOT_SUPPORTED,
String.format("Unsupported Hive type %s found in partition keys of table %s.%s", hiveType, table.getDatabaseName(), table.getTableName()));
}
}
List<HiveColumnHandle> partitionKeyColumnHandles = getPartitionKeyColumnHandles(table);
Map<String, Type> partitionTypes = partitionKeyColumnHandles.stream()
.collect(toImmutableMap(HiveColumnHandle::getName, column -> typeManager.getType(column.getTypeSignature())));
List<PartitionNameWithVersion> partitionNames = metastore.getPartitionNames(metastoreContext, table.getDatabaseName(), table.getTableName())
.orElseThrow(() -> new TableNotFoundException(new SchemaTableName(table.getDatabaseName(), table.getTableName())));
ImmutableList.Builder<TupleDomain<String>> partitionNamesAndValues = ImmutableList.builder();
for (PartitionNameWithVersion partitionName : partitionNames) {
ImmutableMap.Builder<String, NullableValue> partitionNameAndValuesMap = ImmutableMap.builder();
Map<String, String> partitions = toPartitionNamesAndValues(partitionName.getPartitionName());
if (partitionColumns.size() != partitions.size()) {
throw new PrestoException(HIVE_INVALID_METADATA, String.format(
"Expected %d partition key values, but got %d", partitionColumns.size(), partitions.size()));
}
partitionTypes.forEach((name, type) -> {
String value = partitions.get(name);
if (value == null) {
throw new PrestoException(HIVE_INVALID_PARTITION_VALUE, String.format("partition key value cannot be null for field: %s", name));
}
partitionNameAndValuesMap.put(name, parsePartitionValue(name, value, type, timeZone));
});
TupleDomain<String> tupleDomain = TupleDomain.fromFixedValues(partitionNameAndValuesMap.build());
partitionNamesAndValues.add(tupleDomain);
}
return new MaterializedDataPredicates(partitionNamesAndValues.build(), partitionColumns.stream()
.map(Column::getName)
.collect(toImmutableList()));
} | @Test
public void testMaterializedDataPredicatesWithIntPartitionType()
{
TestingTypeManager typeManager = new TestingTypeManager();
TestingSemiTransactionalHiveMetastore testMetastore = TestingSemiTransactionalHiveMetastore.create();
List<String> keys = ImmutableList.of("ds", "code");
Column dsColumn = new Column("ds", HIVE_STRING, Optional.empty(), Optional.empty());
Column codeColumn = new Column("code", HIVE_INT, Optional.empty(), Optional.empty());
List<Column> partitionColumns = ImmutableList.of(dsColumn, codeColumn);
List<String> partitions = ImmutableList.of(
"ds=2021-01-01/code=1",
"ds=2021-01-01/code=2",
"ds=2021-01-02/code=1",
"ds=2021-01-02/code=2");
testMetastore.setPartitionNames(partitions);
ImmutableList.Builder<List<TestingPartitionResult>> partitionResults = ImmutableList.builder();
partitionResults.add(ImmutableList.of(
new TestingPartitionResult("ds", VARCHAR, "CAST('2021-01-01' AS varchar)"),
new TestingPartitionResult("code", INTEGER, "1")));
partitionResults.add(ImmutableList.of(
new TestingPartitionResult("ds", VARCHAR, "CAST('2021-01-01' AS varchar)"),
new TestingPartitionResult("code", INTEGER, "2")));
partitionResults.add(ImmutableList.of(
new TestingPartitionResult("ds", VARCHAR, "CAST('2021-01-02' AS varchar)"),
new TestingPartitionResult("code", INTEGER, "1")));
partitionResults.add(ImmutableList.of(
new TestingPartitionResult("ds", VARCHAR, "CAST('2021-01-02' AS varchar)"),
new TestingPartitionResult("code", INTEGER, "2")));
MaterializedDataPredicates materializedDataPredicates =
getMaterializedDataPredicates(testMetastore, metastoreContext, typeManager, getTable(partitionColumns), DateTimeZone.UTC);
comparePredicates(materializedDataPredicates, keys, partitionResults.build());
} |
public static BigDecimal[] toDecimalArray(String name, Object value) {
try {
if (value instanceof BigDecimal[]) {
return (BigDecimal[]) value;
} else if (value instanceof double[]) {
return Arrays.stream((double[]) value)
.mapToObj(val -> new BigDecimal(String.valueOf(val)))
.toArray(BigDecimal[]::new);
} else if (value instanceof List) {
return ((List<?>) value)
.stream().map(d -> new BigDecimal(String.valueOf(d))).toArray(BigDecimal[]::new);
} else {
throw new MaestroInternalError(
"Cannot cast value [%s] into a BigDecimal array for param [%s]",
toTruncateString(value), name);
}
} catch (NumberFormatException nfe) {
throw new MaestroInternalError(
nfe, "Invalid number format for value: %s for param [%s]", toTruncateString(value), name);
}
} | @Test
public void testListToDecimalArray() {
Object val = Arrays.asList(new BigDecimal("1.2"), "3.4", 5.6);
BigDecimal[] actual = ParamHelper.toDecimalArray("foo", val);
assertEquals(1.2, actual[0].doubleValue(), 0.00000000);
assertEquals(3.4, actual[1].doubleValue(), 0.00000000);
assertEquals(5.6, actual[2].doubleValue(), 0.00000000);
} |
@Override
public void sendSmsCode(SmsCodeSendReqDTO reqDTO) {
SmsSceneEnum sceneEnum = SmsSceneEnum.getCodeByScene(reqDTO.getScene());
Assert.notNull(sceneEnum, "验证码场景({}) 查找不到配置", reqDTO.getScene());
// 创建验证码
String code = createSmsCode(reqDTO.getMobile(), reqDTO.getScene(), reqDTO.getCreateIp());
// 发送验证码
smsSendService.sendSingleSms(reqDTO.getMobile(), null, null,
sceneEnum.getTemplateCode(), MapUtil.of("code", code));
} | @Test
public void sendSmsCode_success() {
// 准备参数
SmsCodeSendReqDTO reqDTO = randomPojo(SmsCodeSendReqDTO.class, o -> {
o.setMobile("15601691300");
o.setScene(SmsSceneEnum.MEMBER_LOGIN.getScene());
});
// mock 方法
SqlConstants.init(DbType.MYSQL);
// 调用
smsCodeService.sendSmsCode(reqDTO);
// 断言 code 验证码
SmsCodeDO smsCodeDO = smsCodeMapper.selectOne(null);
assertPojoEquals(reqDTO, smsCodeDO);
assertEquals("9999", smsCodeDO.getCode());
assertEquals(1, smsCodeDO.getTodayIndex());
assertFalse(smsCodeDO.getUsed());
// 断言调用
verify(smsSendService).sendSingleSms(eq(reqDTO.getMobile()), isNull(), isNull(),
eq("user-sms-login"), eq(MapUtil.of("code", "9999")));
} |
@Override
public Long createConfig(ConfigSaveReqVO createReqVO) {
// 校验参数配置 key 的唯一性
validateConfigKeyUnique(null, createReqVO.getKey());
// 插入参数配置
ConfigDO config = ConfigConvert.INSTANCE.convert(createReqVO);
config.setType(ConfigTypeEnum.CUSTOM.getType());
configMapper.insert(config);
return config.getId();
} | @Test
public void testCreateConfig_success() {
// 准备参数
ConfigSaveReqVO reqVO = randomPojo(ConfigSaveReqVO.class)
.setId(null); // 防止 id 被赋值,导致唯一性校验失败
// 调用
Long configId = configService.createConfig(reqVO);
// 断言
assertNotNull(configId);
// 校验记录的属性是否正确
ConfigDO config = configMapper.selectById(configId);
assertPojoEquals(reqVO, config, "id");
assertEquals(ConfigTypeEnum.CUSTOM.getType(), config.getType());
} |
@Override
public KeyValueIterator<Windowed<K>, V> fetch(final K key) {
Objects.requireNonNull(key, "key cannot be null");
return new MeteredWindowedKeyValueIterator<>(
wrapped().fetch(keyBytes(key)),
fetchSensor,
iteratorDurationSensor,
streamsMetrics,
serdes::keyFrom,
serdes::valueFrom,
time,
numOpenIterators,
openIterators);
} | @Test
public void shouldThrowNullPointerOnFetchIfKeyIsNull() {
setUpWithoutContext();
assertThrows(NullPointerException.class, () -> store.fetch(null));
} |
public boolean similarTo(ClusterStateBundle other) {
if (!baselineState.getClusterState().similarToIgnoringInitProgress(other.baselineState.getClusterState())) {
return false;
}
if (clusterFeedIsBlocked() != other.clusterFeedIsBlocked()) {
return false;
}
if (clusterFeedIsBlocked() && !feedBlock.similarTo(other.feedBlock)) {
return false;
}
// Distribution configs must match exactly for bundles to be similar.
// It may be the case that they are both null, in which case they are also considered equal.
if (!Objects.equals(distributionConfig, other.distributionConfig)) {
return false;
}
// FIXME we currently treat mismatching bucket space sets as unchanged to avoid breaking some tests
return derivedBucketSpaceStates.entrySet().stream()
.allMatch(entry -> other.derivedBucketSpaceStates.getOrDefault(entry.getKey(), entry.getValue())
.getClusterState().similarToIgnoringInitProgress(entry.getValue().getClusterState()));
} | @Test
void similarity_test_considers_cluster_feed_block_concrete_exhaustion_set() {
var blockingBundleNoSet = createTestBundleWithFeedBlock("foo");
var blockingBundleWithSet = createTestBundleWithFeedBlock("bar", setOf(exhaustion(1, "beer"), exhaustion(1, "wine")));
var blockingBundleWithOtherSet = createTestBundleWithFeedBlock("bar", setOf(exhaustion(1, "beer"), exhaustion(1, "soda")));
assertTrue(blockingBundleNoSet.similarTo(blockingBundleNoSet));
assertTrue(blockingBundleWithSet.similarTo(blockingBundleWithSet));
assertFalse(blockingBundleWithSet.similarTo(blockingBundleWithOtherSet));
assertFalse(blockingBundleNoSet.similarTo(blockingBundleWithSet));
assertFalse(blockingBundleNoSet.similarTo(blockingBundleWithOtherSet));
} |
@Override
public Distribution distribute(D2CanaryDistributionStrategy strategy)
{
switch (strategy.getStrategy()) {
case TARGET_HOSTS:
return distributeByTargetHosts(strategy);
case TARGET_APPLICATIONS:
return distributeByTargetApplications(strategy);
case PERCENTAGE:
return distributeByPercentage(strategy);
case DISABLED:
return Distribution.STABLE;
default:
_log.warn("Invalid distribution strategy type: " + strategy.getStrategy().name());
return Distribution.STABLE;
}
} | @Test(dataProvider = "getNormalCasesForPercentageStrategy")
public void testNormalCasesForPercentageStrategy(D2CanaryDistributionStrategy strategy, int hashResult,
CanaryDistributionProvider.Distribution expected)
{
CanaryDistributionProviderImplFixture fixture = new CanaryDistributionProviderImplFixture();
Assert.assertEquals(fixture.getSpiedImpl(hashResult).distribute(strategy), expected,
"Testing percentage strategy: " + strategy + ", with hash result: " + hashResult + ", should return: "
+ expected.name());
} |
protected CuratorFramework getZkClient() {
return this.zkClient;
} | @Test
public void testZooKeeperDataSourceSameZkClient() throws Exception {
TestingServer server = new TestingServer(21813);
server.start();
final String remoteAddress = server.getConnectString();
final String flowPath = "/sentinel-zk-ds-demo/flow-HK";
final String degradePath = "/sentinel-zk-ds-demo/degrade-HK";
ZookeeperDataSource<List<FlowRule>> flowRuleZkDataSource = new ZookeeperDataSource<>(remoteAddress, flowPath,
new Converter<String, List<FlowRule>>() {
@Override
public List<FlowRule> convert(String source) {
return JSON.parseObject(source, new TypeReference<List<FlowRule>>() {
});
}
});
ZookeeperDataSource<List<DegradeRule>> degradeRuleZkDataSource = new ZookeeperDataSource<>(remoteAddress, degradePath,
new Converter<String, List<DegradeRule>>() {
@Override
public List<DegradeRule> convert(String source) {
return JSON.parseObject(source, new TypeReference<List<DegradeRule>>() {
});
}
});
Assert.assertTrue(flowRuleZkDataSource.getZkClient() == degradeRuleZkDataSource.getZkClient());
final String groupId = "sentinel-zk-ds-demo";
final String flowDataId = "flow-HK";
final String degradeDataId = "degrade-HK";
final String scheme = "digest";
final String auth = "root:123456";
AuthInfo authInfo = new AuthInfo(scheme, auth.getBytes());
List<AuthInfo> authInfoList = Collections.singletonList(authInfo);
ZookeeperDataSource<List<FlowRule>> flowRuleZkAutoDataSource = new ZookeeperDataSource<List<FlowRule>>(remoteAddress,
authInfoList, groupId, flowDataId,
new Converter<String, List<FlowRule>>() {
@Override
public List<FlowRule> convert(String source) {
return JSON.parseObject(source, new TypeReference<List<FlowRule>>() {
});
}
});
ZookeeperDataSource<List<DegradeRule>> degradeRuleZkAutoDataSource = new ZookeeperDataSource<List<DegradeRule>>(remoteAddress,
authInfoList, groupId, degradeDataId,
new Converter<String, List<DegradeRule>>() {
@Override
public List<DegradeRule> convert(String source) {
return JSON.parseObject(source, new TypeReference<List<DegradeRule>>() {
});
}
});
Assert.assertTrue(flowRuleZkAutoDataSource.getZkClient() == degradeRuleZkAutoDataSource.getZkClient());
server.stop();
} |
@Override
@SuppressWarnings("unchecked")
public void run(DiagnosticsLogWriter writer) {
writer.startSection("SystemProperties");
keys.clear();
keys.addAll(System.getProperties().keySet());
keys.add(JVM_ARGS);
sort(keys);
for (Object key : keys) {
String keyString = (String) key;
if (isIgnored(keyString)) {
continue;
}
String value = getProperty(keyString);
writer.writeKeyValueEntry(keyString, value);
}
writer.endSection();
} | @Test
public void testRun() {
plugin.run(logWriter);
Properties systemProperties = System.getProperties();
// we check a few of the regular ones
assertContains("java.class.version=" + systemProperties.get("java.class.version"));
assertContains("java.class.path=" + systemProperties.get("java.class.path"));
// we want to make sure the hazelcast system properties are added
assertContains(FAKE_PROPERTY + "=" + FAKE_PROPERTY_VALUE);
// java.vm.args doesn't work under windows
// https://github.com/hazelcast/hazelcast/issues/11610
assertContains(SystemPropertiesPlugin.JVM_ARGS + "=");
// we don't want to have awt
assertNotContains("java.awt");
} |
void recordingStopped(final long recordingId, final long position, final long timestampMs)
{
final int recordingDescriptorOffset = recordingDescriptorOffset(recordingId);
final int offset = recordingDescriptorOffset + DESCRIPTOR_HEADER_LENGTH;
final long stopPosition = nativeOrder() == BYTE_ORDER ? position : Long.reverseBytes(position);
fieldAccessBuffer.putLong(offset + stopTimestampEncodingOffset(), timestampMs, BYTE_ORDER);
fieldAccessBuffer.putLongVolatile(offset + stopPositionEncodingOffset(), stopPosition);
updateChecksum(recordingDescriptorOffset);
forceWrites(catalogChannel);
} | @Test
void recordingStoppedShouldUpdateChecksum()
{
final Checksum checksum = crc32();
try (Catalog catalog = new Catalog(archiveDir, null, 0, CAPACITY, clock, checksum, segmentFileBuffer))
{
assertChecksum(catalog, recordingOneId, 160, 0, null);
catalog.recordingStopped(recordingOneId, 140, 231723682323L);
assertChecksum(catalog, recordingOneId, 160, 1656993099, checksum);
}
} |
@Bean("ModuleConfiguration")
public ModuleConfiguration provide(GlobalConfiguration globalConfig, DefaultInputModule module, GlobalServerSettings globalServerSettings,
ProjectServerSettings projectServerSettings) {
Map<String, String> settings = new LinkedHashMap<>();
settings.putAll(globalServerSettings.properties());
settings.putAll(projectServerSettings.properties());
addScannerSideProperties(settings, module.definition());
settings = sonarGlobalPropertiesFilter.enforceOnlyServerSideSonarGlobalPropertiesAreUsed(settings, globalServerSettings.properties());
return new ModuleConfiguration(globalConfig.getDefinitions(), globalConfig.getEncryption(), settings);
} | @Test
public void should_concatAllPropertiesForCallFilterAndApplyFilterChanges() {
when(globalServerSettings.properties()).thenReturn(GLOBAL_SERVER_PROPERTIES);
when(projectServerSettings.properties()).thenReturn(PROJECT_SERVER_PROPERTIES);
when(sonarGlobalPropertiesFilter.enforceOnlyServerSideSonarGlobalPropertiesAreUsed(ALL_PROPERTIES_MAP, GLOBAL_SERVER_PROPERTIES))
.thenReturn(PROPERTIES_AFTER_FILTERING);
ModuleConfiguration provide = provider.provide(globalConfiguration, defaultInputProject, globalServerSettings, projectServerSettings);
verify(sonarGlobalPropertiesFilter).enforceOnlyServerSideSonarGlobalPropertiesAreUsed(ALL_PROPERTIES_MAP, GLOBAL_SERVER_PROPERTIES);
assertThat(provide.getOriginalProperties()).containsExactlyEntriesOf(PROPERTIES_AFTER_FILTERING);
} |
@Override
public void removeConfigHistory(final Timestamp startTime, final int limitSize) {
HistoryConfigInfoMapper historyConfigInfoMapper = mapperManager.findMapper(
dataSourceService.getDataSourceType(), TableConstant.HIS_CONFIG_INFO);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.START_TIME, startTime);
context.putWhereParameter(FieldConstant.LIMIT_SIZE, limitSize);
MapperResult mapperResult = historyConfigInfoMapper.removeConfigHistory(context);
PaginationHelper<ConfigInfo> helper = createPaginationHelper();
helper.updateLimit(mapperResult.getSql(), mapperResult.getParamList().toArray());
} | @Test
void testRemoveConfigHistory() {
Timestamp timestamp = new Timestamp(System.currentTimeMillis());
int pageSize = 1233;
embeddedHistoryConfigInfoPersistService.removeConfigHistory(timestamp, pageSize);
//verify delete by time and size invoked.
embeddedStorageContextHolderMockedStatic.verify(
() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(timestamp), eq(pageSize)), times(1));
} |
public <T> T getStore(final StoreQueryParameters<T> storeQueryParameters) {
final String storeName = storeQueryParameters.storeName();
final QueryableStoreType<T> queryableStoreType = storeQueryParameters.queryableStoreType();
final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType);
if (!globalStore.isEmpty()) {
return queryableStoreType.create(globalStoreProvider, storeName);
}
return queryableStoreType.create(
new WrappingStoreProvider(storeProviders.values(), storeQueryParameters),
storeName
);
} | @Test
public void shouldThrowExceptionIfWindowStoreDoesntExist() {
assertThrows(InvalidStateStoreException.class, () -> storeProvider.getStore(
StoreQueryParameters.fromNameAndType("not-a-store", QueryableStoreTypes.windowStore())).fetch("1", System.currentTimeMillis()));
} |
public static Map<TopicPartition, ListOffsetsResultInfo> fetchEndOffsets(final Collection<TopicPartition> partitions,
final Admin adminClient) {
if (partitions.isEmpty()) {
return Collections.emptyMap();
}
return getEndOffsets(fetchEndOffsetsFuture(partitions, adminClient));
} | @Test
public void fetchEndOffsetsShouldRethrowExecutionExceptionAsStreamsException() throws Exception {
final Admin adminClient = mock(AdminClient.class);
final ListOffsetsResult result = mock(ListOffsetsResult.class);
@SuppressWarnings("unchecked")
final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = mock(KafkaFuture.class);
when(adminClient.listOffsets(any())).thenReturn(result);
when(result.all()).thenReturn(allFuture);
when(allFuture.get()).thenThrow(new ExecutionException(new RuntimeException()));
assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void unpinAllGeneralForumTopicMessages() {
BaseResponse response = bot.execute(new UnpinAllGeneralForumTopicMessages(forum));
assertTrue(response.isOk());
} |
@Override
public boolean syncData(DistroData data, String targetServer) {
if (isNoExistTarget(targetServer)) {
return true;
}
DistroDataRequest request = new DistroDataRequest(data, data.getType());
Member member = memberManager.find(targetServer);
if (checkTargetServerStatusUnhealthy(member)) {
Loggers.DISTRO
.warn("[DISTRO] Cancel distro sync caused by target server {} unhealthy, key: {}", targetServer,
data.getDistroKey());
return false;
}
try {
Response response = clusterRpcClientProxy.sendRequest(member, request);
return checkResponse(response);
} catch (NacosException e) {
Loggers.DISTRO.error("[DISTRO-FAILED] Sync distro data failed! key: {}", data.getDistroKey(), e);
}
return false;
} | @Test
void testSyncDataWithCallbackException() throws NacosException {
when(memberManager.hasMember(member.getAddress())).thenReturn(true);
when(memberManager.find(member.getAddress())).thenReturn(member);
member.setState(NodeState.UP);
when(clusterRpcClientProxy.isRunning(member)).thenReturn(true);
doThrow(new NacosException()).when(clusterRpcClientProxy).asyncRequest(eq(member), any(), any());
transportAgent.syncData(new DistroData(), member.getAddress(), distroCallback);
verify(distroCallback).onFailed(any(NacosException.class));
} |
public void handleMessagesRetrieved(final Account account, final Device device, final String userAgent) {
pushNotificationScheduler.cancelScheduledNotifications(account, device).whenComplete(logErrors());
} | @Test
void testHandleMessagesRetrieved() {
final UUID accountIdentifier = UUID.randomUUID();
final Account account = mock(Account.class);
final Device device = mock(Device.class);
final String userAgent = HttpHeaders.USER_AGENT;
when(account.getUuid()).thenReturn(accountIdentifier);
when(device.getId()).thenReturn(Device.PRIMARY_ID);
when(pushNotificationScheduler.cancelScheduledNotifications(account, device))
.thenReturn(CompletableFuture.completedFuture(null));
pushNotificationManager.handleMessagesRetrieved(account, device, userAgent);
verify(pushNotificationScheduler).cancelScheduledNotifications(account, device);
} |
@PUT
@Path("/{pluginName}/config/validate")
@Operation(summary = "Validate the provided configuration against the configuration definition for the specified pluginName")
public ConfigInfos validateConfigs(
final @PathParam("pluginName") String pluginName,
final Map<String, String> connectorConfig
) throws Throwable {
String includedConnType = connectorConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG);
if (includedConnType != null
&& !normalizedPluginName(includedConnType).endsWith(normalizedPluginName(pluginName))) {
throw new BadRequestException(
"Included connector type " + includedConnType + " does not match request type "
+ pluginName
);
}
// the validated configs don't need to be logged
FutureCallback<ConfigInfos> validationCallback = new FutureCallback<>();
herder.validateConnectorConfig(connectorConfig, validationCallback, false);
try {
return validationCallback.get(requestTimeout.timeoutMs(), TimeUnit.MILLISECONDS);
} catch (StagedTimeoutException e) {
Stage stage = e.stage();
String message;
if (stage.completed() != null) {
message = "Request timed out. The last operation the worker completed was "
+ stage.description() + ", which began at "
+ Instant.ofEpochMilli(stage.started()) + " and completed at "
+ Instant.ofEpochMilli(stage.completed());
} else {
message = "Request timed out. The worker is currently "
+ stage.description() + ", which began at "
+ Instant.ofEpochMilli(stage.started());
}
// This timeout is for the operation itself. None of the timeout error codes are relevant, so internal server
// error is the best option
throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), message);
} catch (TimeoutException e) {
// This timeout is for the operation itself. None of the timeout error codes are relevant, so internal server
// error is the best option
throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), "Request timed out");
} catch (InterruptedException e) {
throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), "Request interrupted");
}
} | @Test
public void testValidateConfigWithNonExistentName() {
// make a request to connector-plugins resource using a non-loaded connector with the same
// simple name but different package.
String customClassname = "com.custom.package."
+ ConnectorPluginsResourceTestConnector.class.getSimpleName();
assertThrows(BadRequestException.class, () -> connectorPluginsResource.validateConfigs(customClassname, PROPS));
} |
public V getOldValue() {
if (oldValue == null && serializationService != null) {
oldValue = serializationService.toObject(oldValueData);
}
return oldValue;
} | @Test
public void testGetOldValue_withDataValue() {
assertEquals("oldValue", dataEvent.getOldValue());
} |
public KVTable getKVListByNamespace(final String namespace, final long timeoutMillis)
throws RemotingException, MQClientException, InterruptedException {
GetKVListByNamespaceRequestHeader requestHeader = new GetKVListByNamespaceRequestHeader();
requestHeader.setNamespace(namespace);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_KVLIST_BY_NAMESPACE, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(null, request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return KVTable.decode(response.getBody(), KVTable.class);
}
default:
break;
}
throw new MQClientException(response.getCode(), response.getRemark());
} | @Test
public void assertGetKVListByNamespace() throws RemotingException, InterruptedException, MQClientException {
mockInvokeSync();
KVTable responseBody = new KVTable();
responseBody.getTable().put("key", "value");
setResponseBody(responseBody);
KVTable actual = mqClientAPI.getKVListByNamespace("", defaultTimeout);
assertNotNull(actual);
assertEquals(1, actual.getTable().size());
} |
@Override
public boolean next() {
if (rows.hasNext()) {
currentRow = rows.next();
return true;
}
return false;
} | @Test
void assertNext() {
LocalDataQueryResultRow row = new LocalDataQueryResultRow("value");
LocalDataMergedResult actual = new LocalDataMergedResult(Collections.singletonList(row));
assertTrue(actual.next());
assertFalse(actual.next());
} |
public void setUserArtifactBlobKey(String entryName, PermanentBlobKey blobKey)
throws IOException {
byte[] serializedBlobKey;
serializedBlobKey = InstantiationUtil.serializeObject(blobKey);
userArtifacts.computeIfPresent(
entryName,
(key, originalEntry) ->
new DistributedCache.DistributedCacheEntry(
originalEntry.filePath,
originalEntry.isExecutable,
serializedBlobKey,
originalEntry.isZipped));
} | @Test
public void testSetUserArtifactBlobKey() throws IOException, ClassNotFoundException {
JobGraph jb = JobGraphTestUtils.emptyJobGraph();
final DistributedCache.DistributedCacheEntry[] entries = {
new DistributedCache.DistributedCacheEntry("p1", true, true),
new DistributedCache.DistributedCacheEntry("p2", true, false),
new DistributedCache.DistributedCacheEntry("p3", false, true),
new DistributedCache.DistributedCacheEntry("p4", true, false),
};
for (DistributedCache.DistributedCacheEntry entry : entries) {
jb.addUserArtifact(entry.filePath, entry);
}
for (DistributedCache.DistributedCacheEntry entry : entries) {
PermanentBlobKey blobKey = new PermanentBlobKey();
jb.setUserArtifactBlobKey(entry.filePath, blobKey);
DistributedCache.DistributedCacheEntry jobGraphEntry =
jb.getUserArtifacts().get(entry.filePath);
assertNotNull(jobGraphEntry);
assertEquals(
blobKey,
InstantiationUtil.deserializeObject(
jobGraphEntry.blobKey, ClassLoader.getSystemClassLoader()));
assertEquals(entry.isExecutable, jobGraphEntry.isExecutable);
assertEquals(entry.isZipped, jobGraphEntry.isZipped);
assertEquals(entry.filePath, jobGraphEntry.filePath);
}
} |
@Override
public CompletableFuture<Void> closeAsync() {
return cleanupFuture;
} | @Test
void testCloseAsyncBeforeStart() {
final CheckpointResourcesCleanupRunner testInstance = new TestInstanceBuilder().build();
assertThat(testInstance.closeAsync()).isNotCompleted();
} |
@Override
public void aggregate(Iterable<Integer> hashValues) {
for (int hash : hashValues) {
aggregate(hash);
}
} | @Test
public void requireThatSketchBucketsAreCorrectForMultipleValues() {
NormalSketch sketch = new NormalSketch(10);
// Aggregate multiple values
sketch.aggregate(List.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9));
for (int i = 0; i < 10; i++) {
assertBucketEquals(sketch, i, 23);
}
// Check that the other values are zero.
for (int i = 10; i < 1024; i++) {
assertBucketEquals(sketch, i, 0);
}
} |
@Override
public boolean canDeserialize(String topic, Target type) {
return topic.equals(TOPIC);
} | @Test
void canOnlyDeserializeConsumerOffsetsTopic() {
var serde = new ConsumerOffsetsSerde();
assertThat(serde.canDeserialize(ConsumerOffsetsSerde.TOPIC, Serde.Target.KEY)).isTrue();
assertThat(serde.canDeserialize(ConsumerOffsetsSerde.TOPIC, Serde.Target.VALUE)).isTrue();
assertThat(serde.canDeserialize("anyOtherTopic", Serde.Target.KEY)).isFalse();
assertThat(serde.canDeserialize("anyOtherTopic", Serde.Target.VALUE)).isFalse();
} |
public static LoadingCache<String, SQLStatement> build(final DatabaseType databaseType, final CacheOption sqlStatementCacheOption,
final CacheOption parseTreeCacheOption) {
return Caffeine.newBuilder().softValues().initialCapacity(sqlStatementCacheOption.getInitialCapacity()).maximumSize(sqlStatementCacheOption.getMaximumSize())
.build(new SQLStatementCacheLoader(databaseType, parseTreeCacheOption));
} | @Test
void assertBuild() {
assertThat(SQLStatementCacheBuilder.build(TypedSPILoader.getService(DatabaseType.class, "MySQL"), new CacheOption(2000, 65535L), new CacheOption(128, 1024L)), isA(LoadingCache.class));
} |
@Override
public CompletableFuture<Void> getStateConsumedFuture() {
synchronized (requestLock) {
List<CompletableFuture<?>> futures = new ArrayList<>(numberOfInputChannels);
for (InputChannel inputChannel : inputChannels()) {
if (inputChannel instanceof RecoveredInputChannel) {
futures.add(((RecoveredInputChannel) inputChannel).getStateConsumedFuture());
}
}
return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
}
} | @Test
void testCheckpointsDeclinedUnlessStateConsumed() {
SingleInputGate gate = createInputGate(createNettyShuffleEnvironment());
checkState(!gate.getStateConsumedFuture().isDone());
assertThatThrownBy(
() ->
gate.checkpointStarted(
new CheckpointBarrier(
1L,
1L,
alignedNoTimeout(CHECKPOINT, getDefault()))))
.isInstanceOf(CheckpointException.class);
} |
public static boolean defineAccessorClass(Class<?> beanClass) {
ClassLoader classLoader = beanClass.getClassLoader();
if (classLoader == null) {
// Maybe return null if this class was loaded by the bootstrap class loader.
return false;
}
String qualifiedClassName = qualifiedAccessorClassName(beanClass);
try {
classLoader.loadClass(qualifiedClassName);
return true;
} catch (ClassNotFoundException ignored) {
Object lock;
synchronized (defineLock) {
if (defineAccessorStatus.containsKey(beanClass)) {
return defineAccessorStatus.get(beanClass);
} else {
lock = getDefineLock(beanClass);
}
}
synchronized (lock) {
if (defineAccessorStatus.containsKey(beanClass)) {
return defineAccessorStatus.get(beanClass);
}
long startTime = System.nanoTime();
String code = genCode(beanClass);
long durationMs = (System.nanoTime() - startTime) / 1000_000;
LOG.info("Generate code {} take {} ms", qualifiedClassName, durationMs);
String pkg = CodeGenerator.getPackage(beanClass);
CompileUnit compileUnit = new CompileUnit(pkg, accessorClassName(beanClass), code);
Map<String, byte[]> classByteCodes = JaninoUtils.toBytecode(classLoader, compileUnit);
boolean succeed =
ClassLoaderUtils.tryDefineClassesInClassLoader(
qualifiedClassName,
beanClass,
classLoader,
classByteCodes.values().iterator().next())
!= null;
defineAccessorStatus.put(beanClass, succeed);
if (!succeed) {
LOG.info("Define accessor {} in classloader {} failed.", qualifiedClassName, classLoader);
}
return succeed;
}
}
} | @Test
public void defineAccessorClass() throws Exception {
assertTrue(AccessorHelper.defineAccessorClass(A.class));
Class<?> accessorClass = AccessorHelper.getAccessorClass(A.class);
assertEquals(accessorClass.getClassLoader(), A.class.getClassLoader());
A a = new A();
a.f1 = "str";
a.f2 = "str";
Method f1 = accessorClass.getDeclaredMethod("f1", A.class);
Method f2 = accessorClass.getDeclaredMethod("f2", A.class);
assertEquals(f1.invoke(null, a), a.f1);
assertEquals(f2.invoke(null, a), a.f2);
assertTrue(AccessorHelper.defineAccessor(A.class.getDeclaredField("f1")));
assertTrue(AccessorHelper.defineAccessor(A.class.getDeclaredMethod("getF1")));
assertSame(AccessorHelper.getAccessorClass(A.class), accessorClass);
} |
public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) {
Objects.requireNonNull(pred, "pred cannot be null");
Objects.requireNonNull(columns, "columns cannot be null");
return pred.accept(new StatisticsFilter(columns));
} | @Test
public void testNotEqNonNull() {
assertFalse(canDrop(notEq(intColumn, 9), columnMetas));
assertFalse(canDrop(notEq(intColumn, 10), columnMetas));
assertFalse(canDrop(notEq(intColumn, 100), columnMetas));
assertFalse(canDrop(notEq(intColumn, 101), columnMetas));
IntStatistics allSevens = new IntStatistics();
allSevens.setMinMax(7, 7);
assertTrue(canDrop(
notEq(intColumn, 7),
Arrays.asList(getIntColumnMeta(allSevens, 177L), getDoubleColumnMeta(doubleStats, 177L))));
allSevens.setNumNulls(100L);
assertFalse(canDrop(
notEq(intColumn, 7),
Arrays.asList(getIntColumnMeta(allSevens, 177L), getDoubleColumnMeta(doubleStats, 177L))));
allSevens.setNumNulls(177L);
assertFalse(canDrop(
notEq(intColumn, 7),
Arrays.asList(getIntColumnMeta(allSevens, 177L), getDoubleColumnMeta(doubleStats, 177L))));
assertFalse(canDrop(notEq(missingColumn, fromString("any")), columnMetas));
assertFalse(canDrop(notEq(intColumn, 50), missingMinMaxColumnMetas));
assertFalse(canDrop(notEq(doubleColumn, 50.0), missingMinMaxColumnMetas));
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
DecimalColumnStatsDataInspector columnStatsData = decimalInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DecimalColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
DecimalColumnStatsMerger merger = new DecimalColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
if (newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) / newData.getNumDVs();
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation && aggregateData != null
&& aggregateData.isSetLowValue() && aggregateData.isSetHighValue()) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDecimalStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats();
if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DecimalColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDecimalStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
if (MetaStoreServerUtils.decimalToDouble(aggregateData.getLowValue()) < MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) {
aggregateData.setLowValue(aggregateData.getLowValue());
} else {
aggregateData.setLowValue(newData.getLowValue());
}
if (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) > MetaStoreServerUtils
.decimalToDouble(newData.getHighValue())) {
aggregateData.setHighValue(aggregateData.getHighValue());
} else {
aggregateData.setHighValue(newData.getHighValue());
}
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDecimalStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getDecimalStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getDecimalStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateMultiStatsWhenUnmergeableBitVectors() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3");
ColumnStatisticsData data1 = new ColStatsBuilder<>(Decimal.class).numNulls(1).numDVs(3)
.low(ONE).high(THREE).fmSketch(1, 2, 3).kll(1, 2, 3).build();
ColumnStatisticsData data2 = new ColStatsBuilder<>(Decimal.class).numNulls(2).numDVs(3)
.low(THREE).high(FIVE).hll(3, 4, 5).kll(3, 4, 5).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(Decimal.class).numNulls(3).numDVs(4)
.low(ONE).high(EIGHT).hll(1, 2, 6, 8).kll(1, 2, 6, 8).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data2, TABLE, COL, partitions.get(1)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)));
DecimalColumnStatsAggregator aggregator = new DecimalColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
// the aggregation does not update the bitvector, only numDVs is, it keeps the first bitvector;
// numDVs is set to the maximum among all stats when non-mergeable bitvectors are detected
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(Decimal.class).numNulls(6).numDVs(4)
.low(ONE).high(EIGHT).fmSketch(1, 2, 3).kll(1, 2, 3, 3, 4, 5, 1, 2, 6, 8).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = true;
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
// the use of the density function leads to a different estimation for numNDV
expectedStats = new ColStatsBuilder<>(Decimal.class).numNulls(6).numDVs(6)
.low(ONE).high(EIGHT).fmSketch(1, 2, 3).kll(1, 2, 3, 3, 4, 5, 1, 2, 6, 8).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = false;
double[] tunerValues = new double[] { 0, 0.5, 0.75, 1 };
long[] expectedDVs = new long[] { 4, 7, 8, 10 };
for (int i = 0; i < tunerValues.length; i++) {
aggregator.ndvTuner = tunerValues[i];
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
expectedStats = new ColStatsBuilder<>(Decimal.class).numNulls(6).numDVs(expectedDVs[i])
.low(ONE).high(EIGHT).fmSketch(1, 2, 3).kll(1, 2, 3, 3, 4, 5, 1, 2, 6, 8).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
}
} |
public synchronized <K, V> KStream<K, V> stream(final String topic) {
return stream(Collections.singleton(topic));
} | @Test
public void shouldThrowWhenSubscribedToATopicWithDifferentResetPolicies() {
builder.stream("topic", Consumed.with(AutoOffsetReset.EARLIEST));
builder.stream("topic", Consumed.with(AutoOffsetReset.LATEST));
assertThrows(TopologyException.class, builder::build);
} |
public Optional<LineRange[]> getRangesPerLine(Component component) {
int numLines = component.getFileAttributes().getLines();
Optional<CloseableIterator<LineSgnificantCode>> opt = reportReader.readComponentSignificantCode(component.getReportAttributes().getRef());
if (!opt.isPresent()) {
return Optional.empty();
}
try (CloseableIterator<LineSgnificantCode> significantCode = opt.get()) {
return Optional.of(toArray(significantCode, numLines));
}
} | @Test
public void translate_offset_for_each_line() {
Component component = createComponent(1);
List<ScannerReport.LineSgnificantCode> significantCode = new ArrayList<>();
significantCode.add(createLineSignificantCode(1, 1, 2));
reportReader.putSignificantCode(component.getReportAttributes().getRef(), significantCode);
assertThat(underTest.getRangesPerLine(component)).isNotEmpty();
LineRange[] lines = underTest.getRangesPerLine(component).get();
assertThat(lines).hasSize(1);
assertThat(lines[0].startOffset()).isOne();
assertThat(lines[0].endOffset()).isEqualTo(2);
} |
@Override
public GenericRow extract(Map<String, Object> from, GenericRow to) {
Set<String> clpEncodedFieldNames = _config.getFieldsForClpEncoding();
if (_extractAll) {
for (Map.Entry<String, Object> recordEntry : from.entrySet()) {
String recordKey = recordEntry.getKey();
Object recordValue = recordEntry.getValue();
if (clpEncodedFieldNames.contains(recordKey)) {
encodeFieldWithClp(recordKey, recordValue, to);
} else {
if (null != recordValue) {
recordValue = convert(recordValue);
}
to.putValue(recordKey, recordValue);
}
}
return to;
}
// Handle un-encoded fields
for (String fieldName : _fields) {
Object value = from.get(fieldName);
if (null != value) {
value = convert(value);
}
to.putValue(fieldName, value);
}
// Handle encoded fields
for (String fieldName : _config.getFieldsForClpEncoding()) {
Object value = from.get(fieldName);
encodeFieldWithClp(fieldName, value, to);
}
return to;
} | @Test
public void testBadCLPEncodingConfig() {
Map<String, String> props = new HashMap<>();
Set<String> fieldsToRead = new HashSet<>();
// Add some fields for CLP encoding with some mistakenly empty field names
String separator = FIELDS_FOR_CLP_ENCODING_SEPARATOR;
props.put(FIELDS_FOR_CLP_ENCODING_CONFIG_KEY, separator + _MESSAGE_1_FIELD_NAME
+ separator + separator + _MESSAGE_2_FIELD_NAME + separator);
addCLPEncodedField(_MESSAGE_1_FIELD_NAME, fieldsToRead);
addCLPEncodedField(_MESSAGE_2_FIELD_NAME, fieldsToRead);
// Add some unencoded fields
fieldsToRead.add(_TIMESTAMP_FIELD_NAME);
GenericRow row;
// Test extracting specific fields
row = extract(props, fieldsToRead);
assertEquals(row.getValue(_TIMESTAMP_FIELD_NAME), _TIMESTAMP_FIELD_VALUE);
assertNull(row.getValue(_LEVEL_FIELD_NAME));
validateClpEncodedField(row, _MESSAGE_1_FIELD_NAME, _MESSAGE_1_FIELD_VALUE);
validateClpEncodedField(row, _MESSAGE_2_FIELD_NAME, _MESSAGE_2_FIELD_VALUE);
// Test extracting all fields
row = extract(props, null);
assertEquals(row.getValue(_TIMESTAMP_FIELD_NAME), _TIMESTAMP_FIELD_VALUE);
assertEquals(row.getValue(_LEVEL_FIELD_NAME), _LEVEL_FIELD_VALUE);
validateClpEncodedField(row, _MESSAGE_1_FIELD_NAME, _MESSAGE_1_FIELD_VALUE);
validateClpEncodedField(row, _MESSAGE_2_FIELD_NAME, _MESSAGE_2_FIELD_VALUE);
} |
public static void main(String[] args) {
// Example 1: Customer DTO
var customerOne = new CustomerDto("1", "Kelly", "Brown");
var customerTwo = new CustomerDto("2", "Alfonso", "Bass");
var customers = new ArrayList<>(List.of(customerOne, customerTwo));
var customerResource = new CustomerResource(customers);
LOGGER.info("All customers:");
var allCustomers = customerResource.customers();
printCustomerDetails(allCustomers);
LOGGER.info("----------------------------------------------------------");
LOGGER.info("Deleting customer with id {1}");
customerResource.delete(customerOne.id());
allCustomers = customerResource.customers();
printCustomerDetails(allCustomers);
LOGGER.info("----------------------------------------------------------");
LOGGER.info("Adding customer three}");
var customerThree = new CustomerDto("3", "Lynda", "Blair");
customerResource.save(customerThree);
allCustomers = customerResource.customers();
printCustomerDetails(allCustomers);
// Example 2: Product DTO
Product tv = Product.builder().id(1L).name("TV").supplier("Sony").price(1000D).cost(1090D).build();
Product microwave =
Product.builder()
.id(2L)
.name("microwave")
.supplier("Delonghi")
.price(1000D)
.cost(1090D).build();
Product refrigerator =
Product.builder()
.id(3L)
.name("refrigerator")
.supplier("Botsch")
.price(1000D)
.cost(1090D).build();
Product airConditioner =
Product.builder()
.id(4L)
.name("airConditioner")
.supplier("LG")
.price(1000D)
.cost(1090D).build();
List<Product> products =
new ArrayList<>(Arrays.asList(tv, microwave, refrigerator, airConditioner));
ProductResource productResource = new ProductResource(products);
LOGGER.info(
"####### List of products including sensitive data just for admins: \n {}",
Arrays.toString(productResource.getAllProductsForAdmin().toArray()));
LOGGER.info(
"####### List of products for customers: \n {}",
Arrays.toString(productResource.getAllProductsForCustomer().toArray()));
LOGGER.info("####### Going to save Sony PS5 ...");
ProductDto.Request.Create createProductRequestDto =
new ProductDto.Request.Create()
.setName("PS5")
.setCost(1000D)
.setPrice(1220D)
.setSupplier("Sony");
productResource.save(createProductRequestDto);
LOGGER.info(
"####### List of products after adding PS5: {}",
Arrays.toString(productResource.products().toArray()));
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
void start() throws TransientKinesisException {
ImmutableMap.Builder<String, ShardRecordsIterator> shardsMap = ImmutableMap.builder();
for (ShardCheckpoint checkpoint : initialCheckpoint) {
shardsMap.put(checkpoint.getShardId(), createShardIterator(kinesis, checkpoint));
}
shardIteratorsMap.set(shardsMap.build());
if (!shardIteratorsMap.get().isEmpty()) {
int capacityPerShard =
read.getMaxCapacityPerShard() != null
? read.getMaxCapacityPerShard()
: DEFAULT_CAPACITY_PER_SHARD;
recordsQueue = new ArrayBlockingQueue<>(capacityPerShard * shardIteratorsMap.get().size());
String streamName = initialCheckpoint.getStreamName();
startReadingShards(shardIteratorsMap.get().values(), streamName);
} else {
// There are no shards to handle when restoring from an empty checkpoint. Empty checkpoints
// are generated when the last shard handled by this pool was closed
recordsQueue = new ArrayBlockingQueue<>(1);
}
} | @Test
public void shouldStopReadingShardAfterReceivingShardClosedException() throws Exception {
when(firstIterator.readNextBatch()).thenThrow(KinesisShardClosedException.class);
when(firstIterator.findSuccessiveShardRecordIterators()).thenReturn(Collections.emptyList());
shardReadersPool.start();
verify(firstIterator, timeout(TIMEOUT_IN_MILLIS).times(1)).readNextBatch();
verify(secondIterator, timeout(TIMEOUT_IN_MILLIS).atLeast(2)).readNextBatch();
} |
public List<Example<T>> getData() {
return Collections.unmodifiableList(data);
} | @Test
public void testGetData() {
OutputFactory<MockOutput> outputFactory = new MockOutputFactory();
MutableDataset<MockOutput> a = new MutableDataset<>(new MockDataSourceProvenance(), outputFactory);
Assertions.assertThrows(UnsupportedOperationException.class, () -> a.getData().add(mkExample(outputFactory.generateOutput("1"), "a")), "Expected exception thrown as adding to unmodifiable list.");
} |
public String transform() throws ScanException {
StringBuilder stringBuilder = new StringBuilder();
compileNode(node, stringBuilder, new Stack<Node>());
return stringBuilder.toString();
} | @Test
public void literalVariableLiteral() throws ScanException {
String input = "a${k0}c";
Node node = makeNode(input);
NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0);
Assertions.assertEquals("av0c", nodeToStringTransformer.transform());
} |
public Forwarded(Element copy, Date delay, JID delayFrom) {
super("forwarded", "urn:xmpp:forward:0");
populate(copy, delay, delayFrom);
} | @Test
public void testForwarded() {
Message message = new Message();
message.setType(Message.Type.chat);
message.setBody("Tests");
message.addExtension(new DataForm(DataForm.Type.submit));
Forwarded forwarded = new Forwarded(message);
Forwarded forwarded2 = new Forwarded(message);
String xml1 = forwarded.getElement().asXML();
String xml2 = forwarded2.getElement().asXML();
assertEquals("<forwarded xmlns=\"urn:xmpp:forward:0\"><message xmlns=\"jabber:client\" type=\"chat\"><body>Tests</body><x xmlns=\"jabber:x:data\" type=\"submit\"/></message></forwarded>", xml1);
assertEquals("<forwarded xmlns=\"urn:xmpp:forward:0\"><message xmlns=\"jabber:client\" type=\"chat\"><body>Tests</body><x xmlns=\"jabber:x:data\" type=\"submit\"/></message></forwarded>", xml2);
} |
public void setTargetVersion(NodeType nodeType, Version newTargetVersion, boolean force) {
assertLegalNodeTypeForTargetVersion(nodeType);
try (Lock lock = db.lockInfrastructureVersions()) {
Map<NodeType, Version> infrastructureVersions = db.readInfrastructureVersions();
Version currentTargetVersion = Optional.ofNullable(infrastructureVersions.get(nodeType))
.orElse(Version.emptyVersion);
// Trying to set the version to the current version, skip
if (currentTargetVersion.equals(newTargetVersion)) return;
// If we don't force the set, we must set the new version to higher than the already set version
if (!force && currentTargetVersion.isAfter(newTargetVersion)) {
throw new IllegalArgumentException(String.format("Cannot downgrade version without setting 'force'. Current target version: %s, attempted to set target version: %s",
currentTargetVersion.toFullString(), newTargetVersion.toFullString()));
}
if (newTargetVersion.isEmpty()) {
infrastructureVersions.remove(nodeType);
logger.info("Removing target version for " + nodeType);
} else {
infrastructureVersions.put(nodeType, newTargetVersion);
logger.info("Setting target version for " + nodeType + " to " + newTargetVersion.toFullString());
}
db.writeInfrastructureVersions(infrastructureVersions);
}
} | @Test
public void can_only_set_version_on_certain_node_types() {
// We can set version for config
infrastructureVersions.setTargetVersion(NodeType.config, version, false);
try {
infrastructureVersions.setTargetVersion(NodeType.tenant, version, false);
fail("Should not be able to set version for tenant nodes");
} catch (IllegalArgumentException ignored) { }
try {
// Using 'force' does not help, force only applies to version downgrade
infrastructureVersions.setTargetVersion(NodeType.tenant, version, true);
fail("Should not be able to set version for tenant nodes");
} catch (IllegalArgumentException ignored) { }
} |
@Override
public List<Plugin> plugins() {
List<Plugin> plugins = configurationParameters.get(PLUGIN_PROPERTY_NAME, s -> Arrays.stream(s.split(","))
.map(String::trim)
.map(PluginOption::parse)
.map(pluginOption -> (Plugin) pluginOption)
.collect(Collectors.toList()))
.orElseGet(ArrayList::new);
getPublishPlugin()
.ifPresent(plugins::add);
return plugins;
} | @Test
void getPluginNamesWithPublishDisabledAndPublishToken() {
ConfigurationParameters config = new MapConfigurationParameters(Map.of(
Constants.PLUGIN_PUBLISH_ENABLED_PROPERTY_NAME, "false",
Constants.PLUGIN_PUBLISH_TOKEN_PROPERTY_NAME, "some/token"));
assertThat(new CucumberEngineOptions(config).plugins().stream()
.map(Options.Plugin::pluginString)
.collect(toList()),
empty());
} |
public int read(final MessageHandler handler)
{
return read(handler, Integer.MAX_VALUE);
} | @Test
void shouldNotReadSingleMessagePartWayThroughWriting()
{
final long head = 0L;
final int headIndex = (int)head;
when(buffer.getLong(HEAD_COUNTER_INDEX)).thenReturn(head);
when(buffer.getIntVolatile(lengthOffset(headIndex))).thenReturn(0);
final MutableInteger times = new MutableInteger();
final MessageHandler handler = (msgTypeId, buffer, index, length) -> times.increment();
final int messagesRead = ringBuffer.read(handler);
assertThat(messagesRead, is(0));
assertThat(times.get(), is(0));
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer, times(1)).getIntVolatile(lengthOffset(headIndex));
inOrder.verify(buffer, times(0)).setMemory(anyInt(), anyInt(), anyByte());
inOrder.verify(buffer, times(0)).putLongOrdered(HEAD_COUNTER_INDEX, headIndex);
} |
public static String stripPrefix(String value, String prefix) {
if (value == null || prefix == null) {
return value;
}
if (value.startsWith(prefix)) {
return value.substring(prefix.length());
}
return value;
} | @Test
public void shouldStripPrefixes() {
assertThat(URISupport.stripPrefix(null, null)).isNull();
assertThat(URISupport.stripPrefix("", null)).isEmpty();
assertThat(URISupport.stripPrefix(null, "")).isNull();
assertThat(URISupport.stripPrefix("", "")).isEmpty();
assertThat(URISupport.stripPrefix("a", "b")).isEqualTo("a");
assertThat(URISupport.stripPrefix("a", "a")).isEmpty();
assertThat(URISupport.stripPrefix("ab", "b")).isEqualTo("ab");
assertThat(URISupport.stripPrefix("a", "ab")).isEqualTo("a");
} |
public Counters() {
super(groupFactory);
} | @Test
public void testCounters() throws IOException {
Enum[] keysWithResource = {TaskCounter.MAP_INPUT_RECORDS,
TaskCounter.MAP_OUTPUT_BYTES};
Enum[] keysWithoutResource = {myCounters.TEST1, myCounters.TEST2};
String[] groups = {"group1", "group2", "group{}()[]"};
String[] counters = {"counter1", "counter2", "counter{}()[]"};
try {
// I. Check enum counters that have resource bundler
testCounter(getEnumCounters(keysWithResource));
// II. Check enum counters that dont have resource bundler
testCounter(getEnumCounters(keysWithoutResource));
// III. Check string counters
testCounter(getEnumCounters(groups, counters));
} catch (ParseException pe) {
throw new IOException(pe);
}
} |
String clusterEcs() {
return getTaskMetadata().get("Cluster").asString();
} | @Test
public void clusterEcs() {
// given
//language=JSON
String response = """
{
"Cluster" : "hz-cluster",
"AvailabilityZone": "ca-central-1a"
}""";
stubFor(get(urlEqualTo("/task"))
.willReturn(aResponse().withStatus(HttpURLConnection.HTTP_OK).withBody(response)));
// when
String result = awsMetadataApi.clusterEcs();
// then
assertEquals("hz-cluster", result);
} |
@Override
public AppResponse process(Flow flow, RsPollAppApplicationResultRequest request) throws SharedServiceClientException {
checkSwitchesEnabled();
final String activationStatus = appSession.getActivationStatus();
final Long accountId = appSession.getAccountId();
final String userAppId = appSession.getUserAppId();
final boolean removeOldApp = request.getRemoveOldApp().equals("true");
String status;
int maxAppsPerUser = sharedServiceClient.getSSConfigInt("Maximum_aantal_DigiD_apps_eindgebruiker");
appSession.setRemoveOldApp(removeOldApp);
if (TOO_MANY_APPS.equals(activationStatus) && !removeOldApp) {
AppAuthenticator leastRecentApp = appAuthenticatorService.findLeastRecentApp(accountId);
return new TooManyAppsResponse("too_many_active", maxAppsPerUser, leastRecentApp.getDeviceName(),
leastRecentApp.getLastSignInOrActivatedAtOrCreatedAt().toLocalDate().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")));
}
status = TOO_MANY_APPS.equals(activationStatus) && removeOldApp ? OK : activationStatus;
if (!status.equals(OK)) {
setValid(false);
}
return new RsPollAppApplicationResultResponse(status, userAppId);
} | @Test
public void processRsPollAppApplicationResultOkRemoveOldAppTrueTest() throws SharedServiceClientException {
when(sharedServiceClient.getSSConfigInt("Maximum_aantal_DigiD_apps_eindgebruiker")).thenReturn(5);
when(switchService.digidAppSwitchEnabled()).thenReturn(true);
when(switchService.digidRequestStationEnabled()).thenReturn(true);
rsPollAppApplicationResult.setAppSession(createAppSession(ApplyForAppAtRequestStationFlow.NAME, State.RS_APP_APPLICATION_STARTED, TOO_MANY_APPS));
mockedRsPollAppApplicationResultRequest = new RsPollAppApplicationResultRequest();
mockedRsPollAppApplicationResultRequest.setActivationCode(APP_ACTIVATION_CODE);
mockedRsPollAppApplicationResultRequest.setRemoveOldApp("true");
AppResponse appResponse = rsPollAppApplicationResult.process(mockedApplyForAppAtRequestStationFlow, mockedRsPollAppApplicationResultRequest);
assertEquals(true, rsPollAppApplicationResult.getAppSession().isRemoveOldApp());
assertEquals(TOO_MANY_APPS, rsPollAppApplicationResult.getAppSession().getActivationStatus());
assertTrue(appResponse instanceof RsPollAppApplicationResultResponse);
assertEquals(OK,((RsPollAppApplicationResultResponse) appResponse).getStatus());
assertEquals(USER_APP_ID,((RsPollAppApplicationResultResponse) appResponse).getUserAppId());
} |
public MapStoreConfig setProperty(String name, String value) {
properties.setProperty(name, value);
return this;
} | @Test
public void setProperty() {
MapStoreConfig cfg = new MapStoreConfig().setProperty("a", "b");
assertEquals("b", cfg.getProperty("a"));
assertEquals(new MapStoreConfig().setProperty("a", "b"), cfg);
} |
DateRange getRange(String dateRangeString) throws ParseException {
if (dateRangeString == null || dateRangeString.isEmpty())
return null;
String[] dateArr = dateRangeString.split("-");
if (dateArr.length > 2 || dateArr.length < 1)
return null;
// throw new IllegalArgumentException("Only Strings containing two Date separated by a '-' or a single Date are allowed");
ParsedCalendar from = parseDateString(dateArr[0]);
ParsedCalendar to;
if (dateArr.length == 2)
to = parseDateString(dateArr[1]);
else
// faster and safe?
// to = new ParsedCalendar(from.parseType, (Calendar) from.parsedCalendar.clone());
to = parseDateString(dateArr[0]);
try {
return new DateRange(from, to);
} catch (IllegalArgumentException ex) {
return null;
}
} | @Test
public void testParseSingleDateRangeWithoutYearAndDay() throws ParseException {
DateRange dateRange = dateRangeParser.getRange("Sep");
assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 31)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.SEPTEMBER, 1)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.SEPTEMBER, 30)));
assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.OCTOBER, 1)));
} |
@Override
public Boolean getBoolean(K name) {
return null;
} | @Test
public void testGetBooleanDefault() {
assertTrue(HEADERS.getBoolean("name1", true));
} |
public ServerCall<?, ?> call() {
return call;
} | @Test void call() {
assertThat(request.call()).isSameAs(call);
} |
public Result checkConnectionToSCM(String pluginId, final SCMPropertyConfiguration scmConfiguration) {
return pluginRequestHelper.submitRequest(pluginId, REQUEST_CHECK_SCM_CONNECTION, new DefaultPluginInteractionCallback<>() {
@Override
public String requestBody(String resolvedExtensionVersion) {
return messageHandlerMap.get(resolvedExtensionVersion).requestMessageForCheckConnectionToSCM(scmConfiguration);
}
@Override
public Result onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return messageHandlerMap.get(resolvedExtensionVersion).responseMessageForCheckConnectionToSCM(responseBody);
}
});
} | @Test
public void shouldHandleExceptionDuringPluginInteraction() throws Exception {
when(pluginManager.submitTo(eq(PLUGIN_ID), eq(SCM_EXTENSION), requestArgumentCaptor.capture())).thenThrow(new RuntimeException("exception-from-plugin"));
try {
scmExtension.checkConnectionToSCM(PLUGIN_ID, scmPropertyConfiguration);
} catch (Exception e) {
assertThat(e.getMessage(), is("exception-from-plugin"));
}
} |
public ProtocolBuilder accesslog(String accesslog) {
this.accesslog = accesslog;
return getThis();
} | @Test
void accesslog() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.accesslog("accesslog");
Assertions.assertEquals("accesslog", builder.build().getAccesslog());
} |
@InterfaceAudience.Private
@VisibleForTesting
void cleanLogs(Path dirpath, long retainMillis)
throws IOException {
long now = Time.now();
RemoteIterator<FileStatus> iter = list(dirpath);
while (iter.hasNext()) {
FileStatus stat = iter.next();
if (isValidClusterTimeStampDir(stat)) {
Path clusterTimeStampPath = stat.getPath();
MutableBoolean appLogDirPresent = new MutableBoolean(false);
cleanAppLogDir(clusterTimeStampPath, retainMillis, appLogDirPresent);
if (appLogDirPresent.isFalse() &&
(now - stat.getModificationTime() > retainMillis)) {
deleteDir(clusterTimeStampPath);
}
}
}
} | @Test
void testCleanLogs() throws Exception {
// Create test dirs and files
// Irrelevant file, should not be reclaimed
String appDirName = mainTestAppId.toString();
String attemptDirName = ApplicationAttemptId.appAttemptIdStrPrefix
+ appDirName + "_1";
Path irrelevantFilePath = new Path(
testDoneDirPath, "irrelevant.log");
FSDataOutputStream stream = fs.create(irrelevantFilePath);
stream.close();
// Irrelevant directory, should not be reclaimed
Path irrelevantDirPath = new Path(testDoneDirPath, "irrelevant");
fs.mkdirs(irrelevantDirPath);
Path doneAppHomeDir = new Path(new Path(new Path(testDoneDirPath,
Long.toString(mainTestAppId.getClusterTimestamp())), "0000"), "001");
// First application, untouched after creation
Path appDirClean = new Path(doneAppHomeDir, appDirName);
Path attemptDirClean = new Path(appDirClean, attemptDirName);
fs.mkdirs(attemptDirClean);
Path filePath = new Path(attemptDirClean, "test.log");
stream = fs.create(filePath);
stream.close();
// Second application, one file touched after creation
Path appDirHoldByFile = new Path(doneAppHomeDir, appDirName + "1");
Path attemptDirHoldByFile
= new Path(appDirHoldByFile, attemptDirName);
fs.mkdirs(attemptDirHoldByFile);
Path filePathHold = new Path(attemptDirHoldByFile, "test1.log");
stream = fs.create(filePathHold);
stream.close();
// Third application, one dir touched after creation
Path appDirHoldByDir = new Path(doneAppHomeDir, appDirName + "2");
Path attemptDirHoldByDir = new Path(appDirHoldByDir, attemptDirName);
fs.mkdirs(attemptDirHoldByDir);
Path dirPathHold = new Path(attemptDirHoldByDir, "hold");
fs.mkdirs(dirPathHold);
// Fourth application, empty dirs
Path appDirEmpty = new Path(doneAppHomeDir, appDirName + "3");
Path attemptDirEmpty = new Path(appDirEmpty, attemptDirName);
fs.mkdirs(attemptDirEmpty);
Path dirPathEmpty = new Path(attemptDirEmpty, "empty");
fs.mkdirs(dirPathEmpty);
// Should retain all logs after this run
MutableCounterLong dirsCleaned = store.metrics.getLogsDirsCleaned();
long before = dirsCleaned.value();
store.cleanLogs(testDoneDirPath, 10000);
assertTrue(fs.exists(irrelevantDirPath));
assertTrue(fs.exists(irrelevantFilePath));
assertTrue(fs.exists(filePath));
assertTrue(fs.exists(filePathHold));
assertTrue(fs.exists(dirPathHold));
assertTrue(fs.exists(dirPathEmpty));
// Make sure the created dir is old enough
Thread.sleep(2000);
// Touch the second application
stream = fs.append(filePathHold);
stream.writeBytes("append");
stream.close();
// Touch the third application by creating a new dir
fs.mkdirs(new Path(dirPathHold, "holdByMe"));
store.cleanLogs(testDoneDirPath, 1000);
// Verification after the second cleaner call
assertTrue(fs.exists(irrelevantDirPath));
assertTrue(fs.exists(irrelevantFilePath));
assertTrue(fs.exists(filePathHold));
assertTrue(fs.exists(dirPathHold));
assertTrue(fs.exists(doneAppHomeDir));
// appDirClean and appDirEmpty should be cleaned up
assertFalse(fs.exists(appDirClean));
assertFalse(fs.exists(appDirEmpty));
assertEquals(before + 2L, dirsCleaned.value());
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldThrowOnOtherException() {
// Given:
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
allAndPseudoColumnNames(SCHEMA),
ImmutableList.of(
new LongLiteral(1L),
new StringLiteral("str"),
new StringLiteral("str"),
new LongLiteral(2L))
);
doThrow(new RuntimeException("boom"))
.when(producer).send(any());
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext)
);
// Then:
assertThat(e.getCause(), (hasMessage(containsString("boom"))));
} |
public static Expression convert(Predicate[] predicates) {
Expression expression = Expressions.alwaysTrue();
for (Predicate predicate : predicates) {
Expression converted = convert(predicate);
Preconditions.checkArgument(
converted != null, "Cannot convert Spark predicate to Iceberg expression: %s", predicate);
expression = Expressions.and(expression, converted);
}
return expression;
} | @Test
public void testDateFilterConversion() {
LocalDate localDate = LocalDate.parse("2018-10-18");
long epochDay = localDate.toEpochDay();
NamedReference namedReference = FieldReference.apply("x");
LiteralValue ts = new LiteralValue(epochDay, DataTypes.DateType);
org.apache.spark.sql.connector.expressions.Expression[] attrAndValue =
new org.apache.spark.sql.connector.expressions.Expression[] {namedReference, ts};
Predicate predicate = new Predicate(">", attrAndValue);
Expression dateExpression = SparkV2Filters.convert(predicate);
Expression rawExpression = Expressions.greaterThan("x", epochDay);
assertThat(dateExpression.toString())
.as("Generated date expression should be correct")
.isEqualTo(rawExpression.toString());
} |
static final String addFunctionParameter(ParameterDescriptor descriptor, RuleBuilderStep step) {
final String parameterName = descriptor.name(); // parameter name needed by function
final Map<String, Object> parameters = step.parameters();
if (Objects.isNull(parameters)) {
return null;
}
final Object value = parameters.get(parameterName); // parameter value set by rule definition
String syntax = " " + parameterName + " : ";
if (value == null) {
return null;
} else if (value instanceof String valueString) {
if (StringUtils.isEmpty(valueString)) {
return null;
} else if (valueString.startsWith("$")) { // value set as variable
syntax += valueString.substring(1);
} else {
syntax += "\"" + StringEscapeUtils.escapeJava(valueString) + "\""; // value set as string
}
} else {
syntax += value;
}
return syntax;
} | @Test
public void addFunctionParameterNull_WhenNoParameterValueIsSet() {
RuleBuilderStep step = mock(RuleBuilderStep.class);
Map<String, Object> params = new HashMap<>();
when(step.parameters()).thenReturn(params);
ParameterDescriptor descriptor = mock(ParameterDescriptor.class);
when(descriptor.optional()).thenReturn(true);
assertThat(ParserUtil.addFunctionParameter(descriptor, step)).isNull();
} |
@Override public IMetaStore getMetastore() {
if ( kettleRepositoryLocator != null ) {
Repository repository = kettleRepositoryLocator.getRepository();
if ( repository != null ) {
return repository.getRepositoryMetaStore();
}
}
return null;
} | @Test
public void testGetMetastoreSuccess() {
Repository repository = mock( Repository.class );
IMetaStore metaStore = mock( IMetaStore.class );
when( repository.getRepositoryMetaStore() ).thenReturn( metaStore );
when( kettleRepositoryLocator.getRepository() ).thenReturn( repository );
assertEquals( metaStore, repositoryMetastoreProvider.getMetastore() );
} |
@Override
public Properties getConfig(RedisClusterNode node, String pattern) {
RedisClient entry = getEntry(node);
RFuture<List<String>> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_GET, pattern);
List<String> r = syncFuture(f);
if (r != null) {
return Converters.toProperties(r);
}
return null;
} | @Test
public void testGetConfig() {
testInCluster(connection -> {
RedisClusterNode master = getFirstMaster(connection);
Properties config = connection.getConfig(master, "*");
assertThat(config.size()).isGreaterThan(20);
});
} |
@VisibleForTesting
static void loadAttributesMap( DataNode copyNode, JobEntryCopy copy ) throws KettleException {
// And read the job entry copy group attributes map
DataNode groupsNode = copyNode.getNode( PROP_ATTRIBUTES_JOB_ENTRY_COPY );
if ( groupsNode != null ) {
AttributesMapUtil.loadAttributesMap( copyNode, copy, PROP_ATTRIBUTES_JOB_ENTRY_COPY );
} else {
AttributesMapUtil.loadAttributesMap( copyNode, copy );
}
} | @Test
public void loadAttributesMapWithoutAttributesJobEntryCopyTest() throws Exception {
DataNode dataNode = mock( DataNode.class );
JobEntryCopy jobEntryCopy = mock( JobEntryCopy.class );
when( dataNode.getNode( JobDelegate.PROP_ATTRIBUTES_JOB_ENTRY_COPY ) ).thenReturn( dataNode );
try ( MockedStatic<AttributesMapUtil> dummyAttributesMapUtil = mockStatic( AttributesMapUtil.class ) ) {
JobDelegate.loadAttributesMap( dataNode, jobEntryCopy );
dummyAttributesMapUtil.verify( () -> AttributesMapUtil.loadAttributesMap( any( DataNode.class ),
any( AttributesInterface.class ), any() ), VerificationModeFactory.times( 1 ) );
AttributesMapUtil.loadAttributesMap( dataNode, jobEntryCopy, JobDelegate.PROP_ATTRIBUTES_JOB_ENTRY_COPY );
}
} |
@Override
protected void handleCloseConsumer(CommandCloseConsumer closeConsumer) {
final long consumerId = closeConsumer.getConsumerId();
log.info("[{}] Broker notification of closed consumer: {}, assignedBrokerUrl: {}, assignedBrokerUrlTls: {}",
remoteAddress, consumerId,
closeConsumer.hasAssignedBrokerServiceUrl() ? closeConsumer.getAssignedBrokerServiceUrl() : null,
closeConsumer.hasAssignedBrokerServiceUrlTls() ? closeConsumer.getAssignedBrokerServiceUrlTls() : null);
ConsumerImpl<?> consumer = consumers.remove(consumerId);
if (consumer != null) {
String brokerServiceUrl = getBrokerServiceUrl(closeConsumer, consumer);
Optional<URI> hostUri = parseUri(brokerServiceUrl,
closeConsumer.hasRequestId() ? closeConsumer.getRequestId() : null);
Optional<Long> initialConnectionDelayMs = hostUri.map(__ -> 0L);
consumer.connectionClosed(this, initialConnectionDelayMs, hostUri);
} else {
log.warn("[{}] Consumer with id {} not found while closing consumer", remoteAddress, consumerId);
}
} | @Test
public void testHandleCloseConsumer() {
ThreadFactory threadFactory = new DefaultThreadFactory("testHandleCloseConsumer");
EventLoopGroup eventLoop = EventLoopUtil.newEventLoopGroup(1, false, threadFactory);
ClientConfigurationData conf = new ClientConfigurationData();
ClientCnx cnx = new ClientCnx(InstrumentProvider.NOOP, conf, eventLoop);
long consumerId = 1;
PulsarClientImpl pulsarClient = mock(PulsarClientImpl.class);
when(pulsarClient.getConfiguration()).thenReturn(conf);
ConsumerImpl consumer = mock(ConsumerImpl.class);
when(consumer.getClient()).thenReturn(pulsarClient);
cnx.registerConsumer(consumerId, consumer);
assertEquals(cnx.consumers.size(), 1);
CommandCloseConsumer closeConsumer = new CommandCloseConsumer().setConsumerId(consumerId).setRequestId(1);
cnx.handleCloseConsumer(closeConsumer);
assertEquals(cnx.consumers.size(), 0);
verify(consumer).connectionClosed(cnx, Optional.empty(), Optional.empty());
eventLoop.shutdownGracefully();
} |
@Override
public void run() {
ExecutorService executor = null;
try {
// Disable OOM killer and set a limit.
// This has to be set first, so that we get notified about valid events.
// We will be notified about events even, if they happened before
// oom-listener started
setCGroupParameters();
// Start a listener process
ProcessBuilder oomListener = new ProcessBuilder();
oomListener.command(oomListenerPath, yarnCGroupPath);
synchronized (this) {
if (!stopped) {
process = oomListener.start();
} else {
resetCGroupParameters();
LOG.info("Listener stopped before starting");
return;
}
}
LOG.info(String.format("Listening on %s with %s",
yarnCGroupPath,
oomListenerPath));
// We need 1 thread for the error stream and a few others
// as a watchdog for the OOM killer
executor = Executors.newFixedThreadPool(2);
// Listen to any errors in the background. We do not expect this to
// be large in size, so it will fit into a string.
Future<String> errorListener =
executor.submit(() -> IOUtils.toString(process.getErrorStream(), StandardCharsets.UTF_8));
// We get Linux event increments (8 bytes) forwarded from the event stream
// The events cannot be split, so it is safe to read them as a whole
// There is no race condition with the cgroup
// running out of memory. If oom is 1 at startup
// oom_listener will send an initial notification
InputStream events = process.getInputStream();
byte[] event = new byte[8];
int read;
// This loop can be exited by terminating the process
// with stopListening()
while ((read = events.read(event)) == event.length) {
// An OOM event has occurred
resolveOOM(executor);
}
if (read != -1) {
LOG.warn(String.format("Characters returned from event hander: %d",
read));
}
// If the input stream is closed, we wait for exit or process terminated.
int exitCode = process.waitFor();
String error = errorListener.get();
process = null;
LOG.info(String.format("OOM listener exited %d %s", exitCode, error));
} catch (OOMNotResolvedException ex) {
// We could mark the node unhealthy but it shuts down the node anyways.
// Let's just bring down the node manager all containers are frozen.
throw new YarnRuntimeException("Could not resolve OOM", ex);
} catch (Exception ex) {
synchronized (this) {
if (!stopped) {
LOG.warn("OOM Listener exiting.", ex);
}
}
} finally {
// Make sure we do not leak the child process,
// especially if process.waitFor() did not finish.
if (process != null && process.isAlive()) {
process.destroyForcibly();
}
if (executor != null) {
try {
executor.awaitTermination(6, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOG.warn("Exiting without processing all OOM events.");
}
executor.shutdown();
}
resetCGroupParameters();
}
} | @Test(timeout = 20000)
public void testMultipleOOMEvents() throws Exception {
conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH,
script.getAbsolutePath());
try {
FileUtils.writeStringToFile(script,
"#!/bin/bash\nprintf oomevent;printf oomevent;\n", StandardCharsets.UTF_8, false);
assertTrue("Could not set executable",
script.setExecutable(true));
CGroupsHandler cgroups = mock(CGroupsHandler.class);
when(cgroups.getPathForCGroup(any(), any())).thenReturn("");
when(cgroups.getCGroupParam(any(), any(), any()))
.thenReturn("under_oom 0");
Runnable handler = mock(Runnable.class);
doNothing().when(handler).run();
CGroupElasticMemoryController controller =
new CGroupElasticMemoryController(
conf,
null,
cgroups,
true,
false,
10000,
handler
);
controller.run();
verify(handler, times(2)).run();
} finally {
assertTrue(String.format("Could not clean up script %s",
script.getAbsolutePath()), script.delete());
}
} |
public static String[] readLines(File file) throws IOException {
if (file == null || !file.exists() || !file.canRead()) {
return new String[0];
}
return readLines(new FileInputStream(file));
} | @Test
void testReadLines() throws Exception {
String[] lines = IOUtils.readLines(is);
assertThat(lines.length, equalTo(1));
assertThat(lines[0], equalTo(TEXT));
} |
@Override
public abstract AbstractHealthChecker clone() throws CloneNotSupportedException; | @Test
void testClone() throws CloneNotSupportedException {
AbstractHealthChecker none = new AbstractHealthChecker.None().clone();
assertEquals(AbstractHealthChecker.None.class, none.getClass());
} |
@Override
public WindowStoreIterator<V> backwardFetch(final K key,
final Instant timeFrom,
final Instant timeTo) throws IllegalArgumentException {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType);
for (final ReadOnlyWindowStore<K, V> windowStore : stores) {
try {
final WindowStoreIterator<V> result = windowStore.backwardFetch(key, timeFrom, timeTo);
if (!result.hasNext()) {
result.close();
} else {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException(
"State store is not available anymore and may have been migrated to another instance; " +
"please re-discover its location from the state metadata.");
}
}
return KeyValueIterators.emptyWindowStoreIterator();
} | @Test
public void shouldBackwardFetchValuesFromWindowStore() {
underlyingWindowStore.put("my-key", "my-value", 0L);
underlyingWindowStore.put("my-key", "my-later-value", 10L);
assertEquals(
asList(new KeyValue<>(10L, "my-later-value"), new KeyValue<>(0L, "my-value")),
StreamsTestUtils.toList(windowStore.backwardFetch("my-key", ofEpochMilli(0L), ofEpochMilli(25L)))
);
} |
@VisibleForTesting
void refresh(final Consumer<InputStream> changeListener) {
try {
final HeadObjectResponse objectMetadata = s3Client.headObject(HeadObjectRequest.builder()
.bucket(s3Bucket)
.key(objectKey)
.build());
final String initialETag = lastETag.get();
final String refreshedETag = objectMetadata.eTag();
if (!StringUtils.equals(initialETag, refreshedETag) && lastETag.compareAndSet(initialETag, refreshedETag)) {
try (final ResponseInputStream<GetObjectResponse> response = getObject()) {
log.info("Object at s3://{}/{} has changed; new eTag is {} and object size is {} bytes",
s3Bucket, objectKey, response.response().eTag(), response.response().contentLength());
changeListener.accept(response);
}
}
} catch (final Exception e) {
log.warn("Failed to refresh monitored object", e);
}
} | @Test
void refresh() {
final S3Client s3Client = mock(S3Client.class);
final String bucket = "s3bucket";
final String objectKey = "greatest-smooth-jazz-hits-of-all-time.zip";
//noinspection unchecked
final Consumer<InputStream> listener = mock(Consumer.class);
final S3ObjectMonitor objectMonitor = new S3ObjectMonitor(
s3Client,
bucket,
objectKey,
16 * 1024 * 1024,
mock(ScheduledExecutorService.class),
Duration.ofMinutes(1));
final String uuid = UUID.randomUUID().toString();
when(s3Client.headObject(HeadObjectRequest.builder().bucket(bucket).key(objectKey).build())).thenReturn(
HeadObjectResponse.builder().eTag(uuid).build());
final ResponseInputStream<GetObjectResponse> ris = responseInputStreamFromString("abc", uuid);
when(s3Client.getObject(GetObjectRequest.builder().bucket(bucket).key(objectKey).build())).thenReturn(ris);
objectMonitor.refresh(listener);
objectMonitor.refresh(listener);
verify(listener).accept(ris);
} |
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(
config,
MigrationsUtil::getKsqlClient,
getMigrationsDir(getConfigFile(), config)
);
} | @Test
public void shouldPrintInfo() throws Exception {
// Given:
givenMigrations(
ImmutableList.of("1", "3"),
ImmutableList.of(MIGRATED, ERROR),
ImmutableList.of("N/A", "error reason"),
ImmutableList.of("4"));
// When:
final int result = command.command(config, cfg -> ksqlClient, migrationsDir);
// Then:
assertThat(result, is(0));
verify(logAppender, atLeastOnce()).doAppend(logCaptor.capture());
final List<String> logMessages = logCaptor.getAllValues().stream()
.map(LoggingEvent::getRenderedMessage)
.collect(Collectors.toList());
assertThat(logMessages, hasItem(containsString("Current migration version: 3")));
assertThat(logMessages, hasItem(containsString(
" Version | Name | State | Previous Version | Started On | Completed On | Error Reason \n" +
"------------------------------------------------------------------------------------------------\n" +
" 1 | some_name_1 | MIGRATED | <none> | N/A | N/A | N/A \n" +
" 3 | some_name_3 | ERROR | 1 | N/A | N/A | error reason \n" +
" 4 | some name 4 | PENDING | N/A | N/A | N/A | N/A \n" +
"------------------------------------------------------------------------------------------------"
)));
} |
@Override
public void run() {
try {
// We kill containers until the kernel reports the OOM situation resolved
// Note: If the kernel has a delay this may kill more than necessary
while (true) {
String status = cgroups.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL);
if (!status.contains(CGroupsHandler.UNDER_OOM)) {
break;
}
boolean containerKilled = killContainer();
if (!containerKilled) {
// This can happen, if SIGKILL did not clean up
// non-PGID or containers or containers launched by other users
// or if a process was put to the root YARN cgroup.
throw new YarnRuntimeException(
"Could not find any containers but CGroups " +
"reserved for containers ran out of memory. " +
"I am giving up");
}
}
} catch (ResourceHandlerException ex) {
LOG.warn("Could not fetch OOM status. " +
"This is expected at shutdown. Exiting.", ex);
}
} | @Test
public void testNoOpportunisticContainerOverLimitOOM() throws Exception {
ConcurrentHashMap<ContainerId, Container> containers =
new ConcurrentHashMap<>();
Container c1 = createContainer(1, false, 1L, true);
containers.put(c1.getContainerId(), c1);
Container c2 = createContainer(2, false, 2L, true);
containers.put(c2.getContainerId(), c2);
ContainerExecutor ex = createContainerExecutor(containers);
Context context = mock(Context.class);
when(context.getContainers()).thenReturn(containers);
when(context.getContainerExecutor()).thenReturn(ex);
CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class);
when(cGroupsHandler.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL))
.thenReturn("under_oom 1").thenReturn("under_oom 0");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PROCS_FILE))
.thenReturn("1234").thenReturn("");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
.thenReturn(getMB(9));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
.thenReturn(getMB(9));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PROCS_FILE))
.thenReturn("1235").thenReturn("");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
.thenReturn(getMB(9));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
.thenReturn(getMB(9));
DefaultOOMHandler handler =
new DefaultOOMHandler(context, false) {
@Override
protected CGroupsHandler getCGroupsHandler() {
return cGroupsHandler;
}
};
handler.run();
verify(ex, times(1)).signalContainer(
new ContainerSignalContext.Builder()
.setPid("1235")
.setContainer(c2)
.setSignal(ContainerExecutor.Signal.KILL)
.build()
);
verify(ex, times(1)).signalContainer(any());
} |
@Override
protected void analyzeDependency(Dependency dependency, Engine engine) throws AnalysisException {
final Set<Evidence> remove;
if (dependency.getVersion() != null) {
remove = dependency.getEvidence(EvidenceType.VERSION).stream()
.filter(e -> !e.isFromHint() && !dependency.getVersion().equals(e.getValue()))
.collect(Collectors.toSet());
} else {
remove = new HashSet<>();
String fileVersion = null;
String pomVersion = null;
String manifestVersion = null;
for (Evidence e : dependency.getEvidence(EvidenceType.VERSION)) {
if (FILE.equals(e.getSource()) && VERSION.equals(e.getName())) {
fileVersion = e.getValue();
} else if ((NEXUS.equals(e.getSource()) || CENTRAL.equals(e.getSource())
|| POM.equals(e.getSource())) && VERSION.equals(e.getName())) {
pomVersion = e.getValue();
} else if (MANIFEST.equals(e.getSource()) && IMPLEMENTATION_VERSION.equals(e.getName())) {
manifestVersion = e.getValue();
}
}
//ensure we have at least two not null
if (((fileVersion == null ? 0 : 1) + (pomVersion == null ? 0 : 1) + (manifestVersion == null ? 0 : 1)) > 1) {
final DependencyVersion dvFile = new DependencyVersion(fileVersion);
final DependencyVersion dvPom = new DependencyVersion(pomVersion);
final DependencyVersion dvManifest = new DependencyVersion(manifestVersion);
final boolean fileMatch = Objects.equals(dvFile, dvPom) || Objects.equals(dvFile, dvManifest);
final boolean manifestMatch = Objects.equals(dvManifest, dvPom) || Objects.equals(dvManifest, dvFile);
final boolean pomMatch = Objects.equals(dvPom, dvFile) || Objects.equals(dvPom, dvManifest);
if (fileMatch || manifestMatch || pomMatch) {
LOGGER.debug("filtering evidence from {}", dependency.getFileName());
for (Evidence e : dependency.getEvidence(EvidenceType.VERSION)) {
if (!e.isFromHint()
&& !(pomMatch && VERSION.equals(e.getName())
&& (NEXUS.equals(e.getSource()) || CENTRAL.equals(e.getSource()) || POM.equals(e.getSource())))
&& !(fileMatch && VERSION.equals(e.getName()) && FILE.equals(e.getSource()))
&& !(manifestMatch && MANIFEST.equals(e.getSource()) && IMPLEMENTATION_VERSION.equals(e.getName()))) {
remove.add(e);
}
}
}
}
}
remove.forEach((e) -> dependency.removeEvidence(EvidenceType.VERSION, e));
if (dependency.getVersion() == null) {
final Set<Evidence> evidence = dependency.getEvidence(EvidenceType.VERSION);
final DependencyVersion version;
final Evidence e = evidence.stream().findFirst().orElse(null);
if (e != null) {
version = DependencyVersionUtil.parseVersion(e.getValue(), true);
if (version != null && evidence.stream()
.map(ev -> DependencyVersionUtil.parseVersion(ev.getValue(), true))
.allMatch(version::equals)) {
dependency.setVersion(version.toString());
}
}
}
} | @Test
public void testAnalyzeDependencyFilePom() throws Exception {
Dependency dependency = new Dependency();
dependency.addEvidence(EvidenceType.VERSION, "util", "version", "33.3", Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.VERSION, "other", "version", "alpha", Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.VERSION, "other", "Implementation-Version", "1.2.3", Confidence.HIGHEST);
VersionFilterAnalyzer instance = new VersionFilterAnalyzer();
instance.initialize(getSettings());
instance.analyzeDependency(dependency, null);
assertEquals(3, dependency.getEvidence(EvidenceType.VERSION).size());
dependency.addEvidence(EvidenceType.VERSION, "pom", "version", "1.2.3", Confidence.HIGHEST);
instance.analyzeDependency(dependency, null);
assertEquals(4, dependency.getEvidence(EvidenceType.VERSION).size());
dependency.addEvidence(EvidenceType.VERSION, "file", "version", "1.2.3", Confidence.HIGHEST);
instance.analyzeDependency(dependency, null);
assertEquals(2, dependency.getEvidence(EvidenceType.VERSION).size());
dependency.addEvidence(EvidenceType.VERSION, "nexus", "version", "1.2.3", Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.VERSION, "other", "version", "alpha", Confidence.HIGHEST);
instance.analyzeDependency(dependency, null);
assertEquals(3, dependency.getEvidence(EvidenceType.VERSION).size());
dependency.addEvidence(EvidenceType.VERSION, "central", "version", "1.2.3", Confidence.HIGHEST);
dependency.addEvidence(EvidenceType.VERSION, "other", "version", "alpha", Confidence.HIGHEST);
instance.analyzeDependency(dependency, null);
assertEquals(4, dependency.getEvidence(EvidenceType.VERSION).size());
} |
public static PredicateSplit splitPredicate(ScalarOperator predicate) {
ScalarOperator normalPredicate = filterPredicate(predicate);
if (normalPredicate == null) {
return PredicateSplit.of(null, null, null);
}
PredicateExtractor extractor = new PredicateExtractor();
RangePredicate rangePredicate =
normalPredicate.accept(extractor, new PredicateExtractor.PredicateExtractorContext());
ScalarOperator equalityConjunct = Utils.compoundAnd(extractor.getColumnEqualityPredicates());
ScalarOperator rangeConjunct = null;
ScalarOperator residualConjunct = Utils.compoundAnd(extractor.getResidualPredicates());
if (rangePredicate != null) {
// convert rangePredicate to rangeConjunct
rangeConjunct = rangePredicate.toScalarOperator();
} else if (extractor.getColumnEqualityPredicates().isEmpty() && extractor.getResidualPredicates().isEmpty()) {
residualConjunct = Utils.compoundAnd(residualConjunct, normalPredicate);
}
return PredicateSplit.of(equalityConjunct, rangeConjunct, residualConjunct);
} | @Test
public void testSplitPredicate() {
ScalarOperator predicate = null;
PredicateSplit split = PredicateSplit.splitPredicate(predicate);
Assert.assertNotNull(split);
Assert.assertNull(split.getEqualPredicates());
Assert.assertNull(split.getRangePredicates());
Assert.assertNull(split.getResidualPredicates());
ColumnRefFactory columnRefFactory = new ColumnRefFactory();
ColumnRefOperator columnRef1 = columnRefFactory.create("col1", Type.INT, false);
ColumnRefOperator columnRef2 = columnRefFactory.create("col2", Type.INT, false);
BinaryPredicateOperator binaryPredicate = new BinaryPredicateOperator(
BinaryType.EQ, columnRef1, columnRef2);
BinaryPredicateOperator binaryPredicate2 = new BinaryPredicateOperator(
BinaryType.GE, columnRef1, ConstantOperator.createInt(1));
List<ScalarOperator> arguments = Lists.newArrayList();
arguments.add(columnRef1);
arguments.add(columnRef2);
CallOperator callOperator = new CallOperator(FunctionSet.SUM, Type.INT, arguments);
BinaryPredicateOperator binaryPredicate3 = new BinaryPredicateOperator(
BinaryType.GE, callOperator, ConstantOperator.createInt(1));
ScalarOperator andPredicate = Utils.compoundAnd(binaryPredicate, binaryPredicate2, binaryPredicate3);
PredicateSplit result = PredicateSplit.splitPredicate(andPredicate);
Assert.assertEquals(binaryPredicate, result.getEqualPredicates());
Assert.assertEquals(binaryPredicate2, result.getRangePredicates());
Assert.assertEquals(binaryPredicate3, result.getResidualPredicates());
} |
public boolean isSecurityEnabled() {
return securityAuthConfigs != null && !securityAuthConfigs.isEmpty();
} | @Test
public void shouldSaySecurityEnabledIfPasswordFileSecurityEnabled() {
ServerConfig serverConfig = server(passwordFileAuthConfig(), admins());
assertTrue(serverConfig.isSecurityEnabled(), "Security should be enabled when password file config present");
} |
@Override
public DockerInfoDetails info() throws IOException, InterruptedException {
// Runs 'docker info'.
Process infoProcess = docker("info", "-f", "{{json .}}");
InputStream inputStream = infoProcess.getInputStream();
if (infoProcess.waitFor() != 0) {
throw new IOException(
"'docker info' command failed with error: " + getStderrOutput(infoProcess));
}
return JsonTemplateMapper.readJson(inputStream, DockerInfoDetails.class);
} | @Test
public void testInfo() throws InterruptedException, IOException {
String dockerInfoJson = "{ \"OSType\": \"windows\"," + "\"Architecture\": \"arm64\"}";
DockerClient testDockerClient =
new CliDockerClient(
subcommand -> {
assertThat(subcommand).containsExactly("info", "-f", "{{json .}}");
return mockProcessBuilder;
});
// Simulates stdout.
Mockito.when(mockProcess.getInputStream())
.thenReturn(new ByteArrayInputStream(dockerInfoJson.getBytes()));
DockerInfoDetails infoDetails = testDockerClient.info();
assertThat(infoDetails.getArchitecture()).isEqualTo("arm64");
assertThat(infoDetails.getOsType()).isEqualTo("windows");
} |
public Future<KafkaVersionChange> reconcile() {
return getPods()
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
} | @Test
public void testUpgradeWithWrongCurrentMetadataVersion(VertxTestContext context) {
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(VERSIONS.defaultVersion().version(), "5.11-IV1", VERSIONS.defaultVersion().metadataVersion()),
mockRos(mockUniformPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version()))
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.failing(c -> context.verify(() -> {
assertThat(c, is(instanceOf(KafkaUpgradeException.class)));
assertThat(c.getMessage(), is("The current metadata version (5.11-IV1) has to be lower or equal to the Kafka broker version we upgrade from (" + VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version() + ")"));
async.flag();
})));
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path file : files.keySet()) {
try {
callback.delete(file);
if(containerService.isContainer(file)) {
final Storage.Buckets.Delete request = session.getClient().buckets().delete(file.getName());
if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
request.setUserProject(session.getHost().getCredentials().getUsername());
}
request.execute();
}
else {
final Storage.Objects.Delete request = session.getClient().objects().delete(containerService.getContainer(file).getName(), containerService.getKey(file));
if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
request.setUserProject(session.getHost().getCredentials().getUsername());
}
final VersioningConfiguration versioning = null != session.getFeature(Versioning.class) ? session.getFeature(Versioning.class).getConfiguration(
containerService.getContainer(file)
) : VersioningConfiguration.empty();
if(versioning.isEnabled()) {
if(StringUtils.isNotBlank(file.attributes().getVersionId())) {
// You permanently delete versions of objects by including the generation number in the deletion request
request.setGeneration(Long.parseLong(file.attributes().getVersionId()));
}
}
request.execute();
}
}
catch(IOException e) {
final BackgroundException failure = new GoogleStorageExceptionMappingService().map("Cannot delete {0}", e, file);
if(file.isDirectory()) {
if(failure instanceof NotfoundException) {
// No placeholder file may exist but we just have a common prefix
continue;
}
}
throw failure;
}
}
} | @Test
public void testDeletedWithMarker() throws Exception {
final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path directory = new GoogleStorageDirectoryFeature(session).mkdir(new Path(container,
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path test = new GoogleStorageTouchFeature(session).touch(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertNotNull(test.attributes().getVersionId());
assertNotEquals(PathAttributes.EMPTY, new GoogleStorageAttributesFinderFeature(session).find(test));
// Add delete marker
new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(new Path(test).withAttributes(PathAttributes.EMPTY)), new DisabledPasswordCallback(), new Delete.DisabledCallback());
assertTrue(new GoogleStorageAttributesFinderFeature(session).find(test).isDuplicate());
assertFalse(new GoogleStorageFindFeature(session).find(new Path(test).withAttributes(PathAttributes.EMPTY)));
assertFalse(new DefaultFindFeature(session).find(new Path(test).withAttributes(PathAttributes.EMPTY)));
// Test reading delete marker itself
final Path marker = new GoogleStorageObjectListService(session).list(directory, new DisabledListProgressListener()).find(new SimplePathPredicate(test));
assertTrue(marker.attributes().isDuplicate());
assertTrue(new GoogleStorageAttributesFinderFeature(session).find(marker).isDuplicate());
assertTrue(new DefaultAttributesFinderFeature(session).find(marker).isDuplicate());
assertTrue(new GoogleStorageFindFeature(session).find(marker));
new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(new Path(directory).withAttributes(PathAttributes.EMPTY)), new DisabledPasswordCallback(), new Delete.DisabledCallback());
} |
public static boolean seemDuplicates(FeedItem item1, FeedItem item2) {
if (sameAndNotEmpty(item1.getItemIdentifier(), item2.getItemIdentifier())) {
return true;
}
FeedMedia media1 = item1.getMedia();
FeedMedia media2 = item2.getMedia();
if (media1 == null || media2 == null) {
return false;
}
if (sameAndNotEmpty(media1.getStreamUrl(), media2.getStreamUrl())) {
return true;
}
return titlesLookSimilar(item1, item2)
&& datesLookSimilar(item1, item2)
&& durationsLookSimilar(media1, media2)
&& mimeTypeLooksSimilar(media1, media2);
} | @Test
public void testSameId() {
assertTrue(FeedItemDuplicateGuesser.seemDuplicates(
item("id", "Title1", "example.com/episode1", 0, 5 * MINUTES, "audio/*"),
item("id", "Title2", "example.com/episode2", 0, 20 * MINUTES, "video/*")));
} |
public static boolean isInventoryFinished(final int jobShardingCount, final Collection<TransmissionJobItemProgress> jobItemProgresses) {
return isAllProgressesFilled(jobShardingCount, jobItemProgresses) && isAllInventoryTasksCompleted(jobItemProgresses);
} | @Test
void assertIsInventoryFinishedWhenNotAllInventoryTasksCompleted() {
JobItemInventoryTasksProgress inventoryTasksProgress = new JobItemInventoryTasksProgress(Collections.singletonMap("TEST", new InventoryTaskProgress(new IngestPlaceholderPosition())));
TransmissionJobItemProgress transmissionJobItemProgress = new TransmissionJobItemProgress();
transmissionJobItemProgress.setInventory(inventoryTasksProgress);
assertFalse(PipelineJobProgressDetector.isInventoryFinished(1, Collections.singleton(transmissionJobItemProgress)));
} |
public boolean containsTableSharding() {
for (RouteUnit each : routeUnits) {
for (RouteMapper tableMapper : each.getTableMappers()) {
if (!tableMapper.getActualName().equals(tableMapper.getLogicName())) {
return true;
}
}
}
return false;
} | @Test
void assertContainsTableShardingWhenNotContainsTableSharding() {
assertFalse(notContainsTableShardingRouteContext.containsTableSharding());
} |
public static L2ModificationInstruction modVlanPcp(Byte vlanPcp) {
checkNotNull(vlanPcp, "VLAN Pcp cannot be null");
return new L2ModificationInstruction.ModVlanPcpInstruction(vlanPcp);
} | @Test
public void testModVlanPcpMethod() {
final Instruction instruction = Instructions.modVlanPcp(vlanPcp1);
final L2ModificationInstruction.ModVlanPcpInstruction modEtherInstruction =
checkAndConvert(instruction,
Instruction.Type.L2MODIFICATION,
L2ModificationInstruction.ModVlanPcpInstruction.class);
assertThat(modEtherInstruction.vlanPcp(), is(equalTo(vlanPcp1)));
assertThat(modEtherInstruction.subtype(),
is(equalTo(L2ModificationInstruction.L2SubType.VLAN_PCP)));
} |
@Udf
public Long trunc(@UdfParameter final Long val) {
return val;
} | @Test
public void shouldHandleDoubleLiteralsEndingWith5ThatCannotBeRepresentedExactlyAsDoubles() {
assertThat(udf.trunc(new BigDecimal("265.335"), 2), is(new BigDecimal("265.330")));
assertThat(udf.trunc(new BigDecimal("-265.335"), 2), is(new BigDecimal("-265.330")));
assertThat(udf.trunc(new BigDecimal("265.365"), 2), is(new BigDecimal("265.360")));
assertThat(udf.trunc(new BigDecimal("-265.365"), 2), is(new BigDecimal("-265.360")));
} |
public long cardinality()
{
if (isExact()) {
return minhash.size();
}
// Intuition is: get the stored hashes' density, and extrapolate to the whole Hash output range.
// Since Hash output range (2^64) cannot be stored in long type, I use half of the range
// via Long.MAX_VALUE and also divide the hash values' density by 2. The "-1" is bias correction
// detailed in "On Synopses for Distinct-Value Estimation Under Multiset Operations" by Beyer et. al.
long hashesRange = minhash.lastLongKey() - Long.MIN_VALUE;
double halfDensity = Long.divideUnsigned(hashesRange, minhash.size() - 1) / 2D;
return (long) (HASH_OUTPUT_HALF_RANGE / halfDensity);
} | @Test
public void testCardinality()
throws Exception
{
int trials = 1000;
for (int indexBits = 4; indexBits <= 12; indexBits++) {
Map<Integer, StandardDeviation> errors = new HashMap<>();
int numberOfBuckets = 1 << indexBits;
int maxCardinality = numberOfBuckets * 2;
for (int trial = 0; trial < trials; trial++) {
KHyperLogLog khll = new KHyperLogLog();
for (int cardinality = 1; cardinality <= maxCardinality; cardinality++) {
khll.add(ThreadLocalRandom.current().nextLong(), 0L);
if (cardinality % (numberOfBuckets / 10) == 0) {
// only do this a few times, since computing the cardinality is currently not
// as cheap as it should be
double error = (khll.cardinality() - cardinality) * 1.0 / cardinality;
StandardDeviation stdev = errors.computeIfAbsent(cardinality, k -> new StandardDeviation());
stdev.increment(error);
}
}
}
double expectedStandardError = 1.04 / Math.sqrt(1 << indexBits);
for (Map.Entry<Integer, StandardDeviation> entry : errors.entrySet()) {
// Give an extra error margin. This is mostly a sanity check to catch egregious errors
double realStandardError = entry.getValue().getResult();
assertTrue(realStandardError <= expectedStandardError * 1.1,
String.format("Failed at p = %s, cardinality = %s. Expected std error = %s, actual = %s",
indexBits,
entry.getKey(),
expectedStandardError,
realStandardError));
}
}
} |
@Override
public ImmutableList<PluginDefinition> getAllPlugins() {
try {
if (checkHealthWithBackoffs()) {
logger.atInfo().log("Getting language server plugins...");
var listPluginsResponse =
service
.listPluginsWithDeadline(ListPluginsRequest.getDefaultInstance(), DEFAULT_DEADLINE)
.get();
// Note: each plugin service client has a dedicated RemoteVulnDetectorImpl instance,
// so we can safely set this flag here.
this.wantCompactRunRequest = listPluginsResponse.getWantCompactRunRequest();
return ImmutableList.copyOf(listPluginsResponse.getPluginsList());
} else {
return ImmutableList.of();
}
} catch (InterruptedException | ExecutionException e) {
throw new LanguageServerException("Failed to get response from language server.", e);
}
} | @Test
public void getAllPlugins_withNonServingServer_returnsEmptyList() throws Exception {
registerHealthCheckWithStatus(ServingStatus.NOT_SERVING);
assertThat(getNewRemoteVulnDetectorInstance().getAllPlugins()).isEmpty();
} |
public static boolean isDebugMode() {
return debugMode;
} | @Test
public void isDebugMode() throws Exception {
Assert.assertEquals(RpcRunningState.isDebugMode(), RpcRunningState.debugMode);
} |
public static String getExpressionWithoutOutsideParentheses(final String value) {
int parenthesesOffset = getParenthesesOffset(value);
return 0 == parenthesesOffset ? value : value.substring(parenthesesOffset, value.length() - parenthesesOffset);
} | @Test
void assertGetExpressionWithoutOutsideParentheses() {
assertThat(SQLUtils.getExpressionWithoutOutsideParentheses("((a + b*c))"), is("a + b*c"));
assertThat(SQLUtils.getExpressionWithoutOutsideParentheses(""), is(""));
} |
private Function<KsqlConfig, Kudf> getUdfFactory(
final Method method,
final UdfDescription udfDescriptionAnnotation,
final String functionName,
final FunctionInvoker invoker,
final String sensorName
) {
return ksqlConfig -> {
final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance(
method.getDeclaringClass(), udfDescriptionAnnotation.name());
if (actualUdf instanceof Configurable) {
ExtensionSecurityManager.INSTANCE.pushInUdf();
try {
((Configurable) actualUdf)
.configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName));
} finally {
ExtensionSecurityManager.INSTANCE.popOutUdf();
}
}
final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf);
return metrics.<Kudf>map(m -> new UdfMetricProducer(
m.getSensor(sensorName),
theUdf,
Time.SYSTEM
)).orElse(theUdf);
};
} | @Test
public void shouldConfigureConfigurableUdfsOnInstantiation() {
// Given:
final KsqlConfig ksqlConfig = new KsqlConfig(ImmutableMap.of(
KsqlConfig.KSQL_SERVICE_ID_CONFIG, "should not be passed",
KSQL_FUNCTIONS_PROPERTY_PREFIX + "configurableudf.some.setting", "foo-bar",
KSQL_FUNCTIONS_PROPERTY_PREFIX + "_global_.expected-param", "expected-value"
));
final KsqlScalarFunction udf = FUNC_REG.getUdfFactory(FunctionName.of("ConfigurableUdf"))
.getFunction(ImmutableList.of(SqlArgument.of(SqlTypes.INTEGER)));
// When:
udf.newInstance(ksqlConfig);
// Then:
assertThat(PASSED_CONFIG, is(notNullValue()));
assertThat(PASSED_CONFIG.keySet(), not(hasItem(KsqlConfig.KSQL_SERVICE_ID_CONFIG)));
assertThat(PASSED_CONFIG.get(KSQL_FUNCTIONS_PROPERTY_PREFIX + "configurableudf.some.setting"),
is("foo-bar"));
assertThat(PASSED_CONFIG.get(KSQL_FUNCTIONS_PROPERTY_PREFIX + "_global_.expected-param"),
is("expected-value"));
} |
public boolean unblock()
{
final AtomicBuffer buffer = this.buffer;
final long headPosition = buffer.getLongVolatile(headPositionIndex);
final long tailPosition = buffer.getLongVolatile(tailPositionIndex);
if (headPosition == tailPosition)
{
return false;
}
final int mask = capacity - 1;
final int consumerIndex = (int)(headPosition & mask);
final int producerIndex = (int)(tailPosition & mask);
boolean unblocked = false;
int length = buffer.getIntVolatile(consumerIndex);
if (length < 0)
{
buffer.putInt(typeOffset(consumerIndex), PADDING_MSG_TYPE_ID);
buffer.putIntOrdered(lengthOffset(consumerIndex), -length);
unblocked = true;
}
else if (0 == length)
{
// go from (consumerIndex to producerIndex) or (consumerIndex to capacity)
final int limit = producerIndex > consumerIndex ? producerIndex : capacity;
int i = consumerIndex + ALIGNMENT;
do
{
// read the top int of every long (looking for length aligned to 8=ALIGNMENT)
length = buffer.getIntVolatile(i);
if (0 != length)
{
if (scanBackToConfirmStillZeroed(buffer, i, consumerIndex))
{
buffer.putInt(typeOffset(consumerIndex), PADDING_MSG_TYPE_ID);
buffer.putIntOrdered(lengthOffset(consumerIndex), i - consumerIndex);
unblocked = true;
}
break;
}
i += ALIGNMENT;
}
while (i < limit);
}
return unblocked;
} | @Test
void shouldUnblockWhenFullWithoutHeader()
{
final int messageLength = ALIGNMENT * 4;
when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn((long)messageLength);
when(buffer.getLongVolatile(TAIL_COUNTER_INDEX)).thenReturn((long)messageLength + CAPACITY);
when(buffer.getIntVolatile(messageLength * 2)).thenReturn(messageLength);
assertTrue(ringBuffer.unblock());
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer).putInt(typeOffset(messageLength), PADDING_MSG_TYPE_ID);
inOrder.verify(buffer).putIntOrdered(lengthOffset(messageLength), messageLength);
} |
public void generate() throws IOException
{
packageNameByTypes.clear();
generatePackageInfo();
generateTypeStubs();
generateMessageHeaderStub();
for (final List<Token> tokens : ir.messages())
{
final Token msgToken = tokens.get(0);
final List<Token> messageBody = getMessageBody(tokens);
final boolean hasVarData = -1 != findSignal(messageBody, Signal.BEGIN_VAR_DATA);
int i = 0;
final List<Token> fields = new ArrayList<>();
i = collectFields(messageBody, i, fields);
final List<Token> groups = new ArrayList<>();
i = collectGroups(messageBody, i, groups);
final List<Token> varData = new ArrayList<>();
collectVarData(messageBody, i, varData);
final String decoderClassName = formatClassName(decoderName(msgToken.name()));
final String decoderStateClassName = decoderClassName + "#CodecStates";
final FieldPrecedenceModel decoderPrecedenceModel = precedenceChecks.createDecoderModel(
decoderStateClassName, tokens);
generateDecoder(decoderClassName, msgToken, fields, groups, varData, hasVarData, decoderPrecedenceModel);
final String encoderClassName = formatClassName(encoderName(msgToken.name()));
final String encoderStateClassName = encoderClassName + "#CodecStates";
final FieldPrecedenceModel encoderPrecedenceModel = precedenceChecks.createEncoderModel(
encoderStateClassName, tokens);
generateEncoder(encoderClassName, msgToken, fields, groups, varData, hasVarData, encoderPrecedenceModel);
}
} | @Test
void shouldGenerateWithoutPrecedenceChecksByDefault() throws Exception
{
final PrecedenceChecks.Context context = new PrecedenceChecks.Context();
final PrecedenceChecks precedenceChecks = PrecedenceChecks.newInstance(context);
generator(precedenceChecks).generate();
final Field field = Arrays.stream(compileCarEncoder().getDeclaredFields())
.filter(f -> f.getName().equals(context.precedenceChecksFlagName()))
.findFirst()
.orElse(null);
assertNull(field);
} |
static Document replacePlaceholders(Document doc, ExpressionEvalContext evalContext, JetSqlRow inputRow,
String[] externalNames, boolean forRow) {
Object[] values = inputRow.getValues();
return replacePlaceholders(doc, evalContext, values, externalNames, forRow);
} | @Test
public void replaces_dynamic_param() {
// given
Document embedded = new Document("test", "<!DynamicParameter(1)!>");
Document doc = new Document("<!DynamicParameter(0)!>", embedded);
List<Object> arguments = asList("jeden", "dwa");
// when
Document result = PlaceholderReplacer.replacePlaceholders(doc, evalContext(arguments), (Object[]) null,
null, false);
// then
assertThat(result).isInstanceOf(Document.class);
Document expected = new Document("jeden", new Document("test", "dwa"));
assertThat(result).isEqualTo(expected);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.