focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
@Override
@Transactional(rollbackFor = Exception.class) // 添加事务,异常则回滚所有导入
public UserImportRespVO importUserList(List<UserImportExcelVO> importUsers, boolean isUpdateSupport) {
if (CollUtil.isEmpty(importUsers)) {
throw exception(USER_IMPORT_LIST_IS_EMPTY);
}
UserImportRespVO respVO = UserImportRespVO.builder().createUsernames(new ArrayList<>())
.updateUsernames(new ArrayList<>()).failureUsernames(new LinkedHashMap<>()).build();
importUsers.forEach(importUser -> {
// 校验,判断是否有不符合的原因
try {
validateUserForCreateOrUpdate(null, null, importUser.getMobile(), importUser.getEmail(),
importUser.getDeptId(), null);
} catch (ServiceException ex) {
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 判断如果不存在,在进行插入
AdminUserDO existUser = userMapper.selectByUsername(importUser.getUsername());
if (existUser == null) {
userMapper.insert(BeanUtils.toBean(importUser, AdminUserDO.class)
.setPassword(encodePassword(userInitPassword)).setPostIds(new HashSet<>())); // 设置默认密码及空岗位编号数组
respVO.getCreateUsernames().add(importUser.getUsername());
return;
}
// 如果存在,判断是否允许更新
if (!isUpdateSupport) {
respVO.getFailureUsernames().put(importUser.getUsername(), USER_USERNAME_EXISTS.getMsg());
return;
}
AdminUserDO updateUser = BeanUtils.toBean(importUser, AdminUserDO.class);
updateUser.setId(existUser.getId());
userMapper.updateById(updateUser);
respVO.getUpdateUsernames().add(importUser.getUsername());
});
return respVO;
} | @Test
public void testImportUserList_02() {
// 准备参数
UserImportExcelVO importUser = randomPojo(UserImportExcelVO.class, o -> {
o.setStatus(randomEle(CommonStatusEnum.values()).getStatus()); // 保证 status 的范围
o.setSex(randomEle(SexEnum.values()).getSex()); // 保证 sex 的范围
});
// mock deptService 的方法
DeptDO dept = randomPojo(DeptDO.class, o -> {
o.setId(importUser.getDeptId());
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
});
when(deptService.getDept(eq(dept.getId()))).thenReturn(dept);
// mock passwordEncoder 的方法
when(passwordEncoder.encode(eq("yudaoyuanma"))).thenReturn("java");
// 调用
UserImportRespVO respVO = userService.importUserList(newArrayList(importUser), true);
// 断言
assertEquals(1, respVO.getCreateUsernames().size());
AdminUserDO user = userMapper.selectByUsername(respVO.getCreateUsernames().get(0));
assertPojoEquals(importUser, user);
assertEquals("java", user.getPassword());
assertEquals(0, respVO.getUpdateUsernames().size());
assertEquals(0, respVO.getFailureUsernames().size());
} |
static String getETag(String output) {
return "W/" + hash(output.getBytes(UTF_8));
} | @Test
public void getETag_should_return_same_value_for_same_input() {
String input = randomAlphanumeric(200);
assertThat(ETagUtils.getETag(input)).isEqualTo(ETagUtils.getETag(input));
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldHandleRowTimeWithoutKey() {
// Given:
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
ImmutableList.of(SystemColumns.ROWTIME_NAME, COL0, COL1),
ImmutableList.of(
new LongLiteral(1234L),
new StringLiteral("str"),
new LongLiteral(2L)
)
);
// When:
executor.execute(statement, mock(SessionProperties.class), engine, serviceContext);
// Then:
verify(keySerializer).serialize(TOPIC_NAME, genericKey((String) null));
verify(valueSerializer).serialize(TOPIC_NAME, genericRow("str", 2L));
verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1234L, KEY, VALUE));
} |
@Udf
public List<String> keys(@UdfParameter final String jsonObj) {
if (jsonObj == null) {
return null;
}
final JsonNode node = UdfJsonMapper.parseJson(jsonObj);
if (node.isMissingNode() || !node.isObject()) {
return null;
}
final List<String> ret = new ArrayList<>();
node.fieldNames().forEachRemaining(ret::add);
return ret;
} | @Test
public void shouldReturnNullForNumber() {
assertNull(udf.keys("123"));
} |
public String validate(final String xml) {
final Source source = new SAXSource(reader, new InputSource(IOUtils.toInputStream(xml, Charset.defaultCharset())));
return validate(source);
} | @Test
public void testInValidXML() throws Exception {
String payload = IOUtils.toString(ClassLoader.getSystemResourceAsStream("xml/article-2.xml"),
Charset.defaultCharset());
logger.info("Validating payload: {}", payload);
// validate
String result = getProcessor("sch/schematron-2.sch", null).validate(payload);
logger.info("Schematron Report: {}", result);
// should throw two assertions because of the missing chapters in the XML.
assertEquals("A chapter should have a title", Utils.evaluate("//svrl:failed-assert/svrl:text", result));
assertEquals("'chapter' element has more than one title present",
Utils.evaluate("//svrl:successful-report/svrl:text", result).trim());
} |
@SuppressWarnings("checkstyle:MissingSwitchDefault")
@Override
protected void doCommit(TableMetadata base, TableMetadata metadata) {
int version = currentVersion() + 1;
CommitStatus commitStatus = CommitStatus.FAILURE;
/* This method adds no fs scheme, and it persists in HTS that way. */
final String newMetadataLocation = rootMetadataFileLocation(metadata, version);
HouseTable houseTable = HouseTable.builder().build();
try {
// Now that we have metadataLocation we stamp it in metadata property.
Map<String, String> properties = new HashMap<>(metadata.properties());
failIfRetryUpdate(properties);
String currentTsString = String.valueOf(Instant.now(Clock.systemUTC()).toEpochMilli());
properties.put(getCanonicalFieldName("lastModifiedTime"), currentTsString);
if (base == null) {
properties.put(getCanonicalFieldName("creationTime"), currentTsString);
}
properties.put(
getCanonicalFieldName("tableVersion"),
properties.getOrDefault(
getCanonicalFieldName("tableLocation"), CatalogConstants.INITIAL_VERSION));
properties.put(getCanonicalFieldName("tableLocation"), newMetadataLocation);
String serializedSnapshotsToPut = properties.remove(CatalogConstants.SNAPSHOTS_JSON_KEY);
String serializedSnapshotRefs = properties.remove(CatalogConstants.SNAPSHOTS_REFS_KEY);
boolean isStageCreate =
Boolean.parseBoolean(properties.remove(CatalogConstants.IS_STAGE_CREATE_KEY));
logPropertiesMap(properties);
TableMetadata updatedMetadata = metadata.replaceProperties(properties);
if (serializedSnapshotsToPut != null) {
List<Snapshot> snapshotsToPut =
SnapshotsUtil.parseSnapshots(fileIO, serializedSnapshotsToPut);
Pair<List<Snapshot>, List<Snapshot>> snapshotsDiff =
SnapshotsUtil.symmetricDifferenceSplit(snapshotsToPut, updatedMetadata.snapshots());
List<Snapshot> appendedSnapshots = snapshotsDiff.getFirst();
List<Snapshot> deletedSnapshots = snapshotsDiff.getSecond();
snapshotInspector.validateSnapshotsUpdate(
updatedMetadata, appendedSnapshots, deletedSnapshots);
Map<String, SnapshotRef> snapshotRefs =
serializedSnapshotRefs == null
? new HashMap<>()
: SnapshotsUtil.parseSnapshotRefs(serializedSnapshotRefs);
updatedMetadata =
maybeAppendSnapshots(updatedMetadata, appendedSnapshots, snapshotRefs, true);
updatedMetadata = maybeDeleteSnapshots(updatedMetadata, deletedSnapshots);
}
final TableMetadata updatedMtDataRef = updatedMetadata;
metricsReporter.executeWithStats(
() ->
TableMetadataParser.write(updatedMtDataRef, io().newOutputFile(newMetadataLocation)),
InternalCatalogMetricsConstant.METADATA_UPDATE_LATENCY);
houseTable = houseTableMapper.toHouseTable(updatedMetadata);
if (!isStageCreate) {
houseTableRepository.save(houseTable);
} else {
/**
* Refresh current metadata for staged tables from newly created metadata file and disable
* "forced refresh" in {@link OpenHouseInternalTableOperations#commit(TableMetadata,
* TableMetadata)}
*/
refreshFromMetadataLocation(newMetadataLocation);
}
commitStatus = CommitStatus.SUCCESS;
} catch (InvalidIcebergSnapshotException e) {
throw new BadRequestException(e, e.getMessage());
} catch (CommitFailedException e) {
throw e;
} catch (HouseTableCallerException
| HouseTableNotFoundException
| HouseTableConcurrentUpdateException e) {
throw new CommitFailedException(e);
} catch (Throwable persistFailure) {
// Try to reconnect and determine the commit status for unknown exception
log.error(
"Encounter unexpected error while updating metadata.json for table:" + tableIdentifier,
persistFailure);
commitStatus = checkCommitStatus(newMetadataLocation, metadata);
switch (commitStatus) {
case SUCCESS:
log.debug("Calling doCommit succeeded");
break;
case FAILURE:
// logging error and exception-throwing co-existence is needed, given the exception
// handler in
// org.apache.iceberg.BaseMetastoreCatalog.BaseMetastoreCatalogTableBuilder.create swallow
// the
// nested exception information.
log.error("Exception details:", persistFailure);
throw new CommitFailedException(
persistFailure,
String.format(
"Persisting metadata file %s at version %s for table %s failed while persisting to house table",
newMetadataLocation, version, GSON.toJson(houseTable)));
case UNKNOWN:
throw new CommitStateUnknownException(persistFailure);
}
} finally {
switch (commitStatus) {
case FAILURE:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_FAILED_CTR);
break;
case UNKNOWN:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_STATE_UNKNOWN);
break;
default:
break; /*should never happen, kept to silence SpotBugs*/
}
}
} | @Test
void testDoCommitAppendStageOnlySnapshotsExistingVersion() throws IOException {
List<Snapshot> testSnapshots = IcebergTestUtil.getSnapshots();
List<Snapshot> testWapSnapshots = IcebergTestUtil.getWapSnapshots().subList(0, 2);
// add 1 snapshot to the base metadata
TableMetadata base =
TableMetadata.buildFrom(BASE_TABLE_METADATA)
.setBranchSnapshot(testSnapshots.get(0), SnapshotRef.MAIN_BRANCH)
.build();
List<Snapshot> newSnapshots = new ArrayList<>();
newSnapshots.add(testSnapshots.get(0));
newSnapshots.addAll(testWapSnapshots);
Map<String, String> properties = new HashMap<>(base.properties());
try (MockedStatic<TableMetadataParser> ignoreWriteMock =
Mockito.mockStatic(TableMetadataParser.class)) {
// add staged snapshots to the new metadata
properties.put(
CatalogConstants.SNAPSHOTS_JSON_KEY, SnapshotsUtil.serializedSnapshots(newSnapshots));
properties.put(
CatalogConstants.SNAPSHOTS_REFS_KEY,
SnapshotsUtil.serializeMap(
IcebergTestUtil.obtainSnapshotRefsFromSnapshot(newSnapshots.get(0))));
properties.put(getCanonicalFieldName("tableLocation"), TEST_LOCATION);
TableMetadata metadata = base.replaceProperties(properties);
openHouseInternalTableOperations.doCommit(base, metadata);
Mockito.verify(mockHouseTableMapper).toHouseTable(tblMetadataCaptor.capture());
Map<String, String> updatedProperties = tblMetadataCaptor.getValue().properties();
// verify snapshots are staged but not appended
Assertions.assertEquals(
testWapSnapshots.stream()
.map(s -> Long.toString(s.snapshotId()))
.collect(Collectors.joining(",")),
updatedProperties.get(getCanonicalFieldName("staged_snapshots")));
Assertions.assertEquals(
null, updatedProperties.get(getCanonicalFieldName("appended_snapshots")));
Assertions.assertEquals(
null, updatedProperties.get(getCanonicalFieldName("cherry_picked_snapshots")));
Assertions.assertEquals(
null, updatedProperties.get(getCanonicalFieldName("deleted_snapshots")));
Mockito.verify(mockHouseTableRepository, Mockito.times(1)).save(Mockito.eq(mockHouseTable));
}
} |
static <T> T copy(T object, DataComplexTable alreadyCopied) throws CloneNotSupportedException
{
if (object == null)
{
return null;
}
else if (isComplex(object))
{
DataComplex src = (DataComplex) object;
@SuppressWarnings("unchecked")
T found = (T) alreadyCopied.get(src);
if (found != null)
{
return found;
}
else
{
DataComplex clone = src.clone();
alreadyCopied.put(src, clone);
if (clone instanceof DataMap)
{
((DataMap)clone).copyReferencedObjects(alreadyCopied);
}
else if (clone instanceof DataList)
{
((DataList)clone).copyReferencedObjects(alreadyCopied);
}
@SuppressWarnings("unchecked")
T converted = (T) clone;
return converted;
}
}
else if (isPrimitive(object))
{
return object;
}
else
{
throw new CloneNotSupportedException("Illegal value encountered: " + object);
}
} | @Test
public void testCopy() throws CloneNotSupportedException
{
boolean copyOnWrite = ! CheckedMap.class.isAssignableFrom(DataMap.class);
/* DataMap with only immutable types */
DataMap map1 = new DataMap(referenceMap1);
DataMap map2 = map1.copy();
DataMap map3 = map2.copy();
assertTrue(! copyOnWrite || map1.getUnderlying() == map2.getUnderlying());
assertTrue(! copyOnWrite || map1.getUnderlying() == map3.getUnderlying());
assertEquals(map1, map2);
assertEquals(map1, map3);
map2.put("2", "2");
assertTrue(! copyOnWrite || map1.getUnderlying() != map2.getUnderlying());
assertTrue(! copyOnWrite || map1.getUnderlying() == map3.getUnderlying());
assertFalse(map1.equals(map2));
assertEquals(map1, map3);
/* DataMap containing DataMap */
DataMap map2_1 = new DataMap(referenceMap1);
map2_1.put("2_1", "2_1");
map2.put("map2_1", map2_1);
assertTrue(map2.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
DataMap map4 = map2.copy();
assertTrue(! copyOnWrite || map4.getUnderlying() != map2.getUnderlying());
assertTrue(! copyOnWrite || map4.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
assertTrue(! copyOnWrite || map2.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
assertEquals(map4, map2);
DataMap map5 = map4.copy();
assertTrue(! copyOnWrite || map5.getUnderlying() != map2.getUnderlying());
assertTrue(! copyOnWrite || map5.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
assertTrue(! copyOnWrite || map2.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
assertEquals(map5, map4);
assertEquals(map5, map2);
map5.getDataMap("map2_1").put("x", "x");
assertTrue(! copyOnWrite || map5.getDataMap("map2_1").getUnderlying() != map2_1.getUnderlying());
assertTrue(! copyOnWrite || map4.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
assertFalse(map5.getDataMap("map2_1").equals(map2_1));
assertEquals(map4.getDataMap("map2_1"), map2_1);
/* DataList with only primitive types */
DataList list1 = new DataList(referenceList1);
DataList list2 = list1.copy();
DataList list3 = list2.copy();
assertTrue(! copyOnWrite || list1.getUnderlying() == list2.getUnderlying());
assertTrue(! copyOnWrite || list1.getUnderlying() == list3.getUnderlying());
assertEquals(list1, list2);
assertEquals(list1, list3);
list2.add("2");
assertTrue(! copyOnWrite || list1.getUnderlying() != list2.getUnderlying());
assertTrue(! copyOnWrite || list1.getUnderlying() == list3.getUnderlying());
assertFalse(list1.equals(list2));
assertEquals(list1, list3);
/* DataList containing DataList */
DataList list2_1 = new DataList(referenceList1);
list2_1.add("2_1");
list2.add(0, list2_1);
assertTrue(! copyOnWrite || list2.getDataList(0).getUnderlying() == list2_1.getUnderlying());
assertFalse(list2.equals(list1));
assertFalse(list2.equals(list3));
assertEquals(list1, list3);
DataList list4 = list2.copy();
assertTrue(! copyOnWrite || list4.getUnderlying() != list2.getUnderlying());
assertTrue(! copyOnWrite || list4.getDataList(0).getUnderlying() == list2_1.getUnderlying());
assertTrue(! copyOnWrite || list2.getDataList(0).getUnderlying() == list2_1.getUnderlying());
assertEquals(list4, list2);
assertEquals(list4.getDataList(0), list2_1);
assertEquals(list2.getDataList(0), list2_1);
DataList list5 = list4.copy();
assertTrue(! copyOnWrite || list5.getUnderlying() != list2.getUnderlying());
assertTrue(! copyOnWrite || list5.getDataList(0).getUnderlying() == list2_1.getUnderlying());
assertTrue(! copyOnWrite || list2.getDataList(0).getUnderlying() == list2_1.getUnderlying());
assertEquals(list5, list4);
assertEquals(list5.getDataList(0), list2_1);
assertEquals(list2.getDataList(0), list2_1);
/* DataMap containing DataList */
DataMap map6 = map1.copy();
assertTrue(! copyOnWrite || map6.getUnderlying() == map1.getUnderlying());
assertEquals(map6, map1);
map6.put("list2_1", list2_1);
assertTrue(! copyOnWrite || map6.getUnderlying() != map1.getUnderlying());
assertFalse(map6.equals(map1));
DataMap map7 = map6.copy();
assertTrue(! copyOnWrite || map7.getUnderlying() != map6.getUnderlying());
assertTrue(! copyOnWrite || map7.getDataList("list2_1").getUnderlying() == list2_1.getUnderlying());
assertEquals(map7, map6);
assertEquals(map7.getDataList("list2_1"), list2_1);
DataMap map8 = map6.copy();
assertTrue(! copyOnWrite || map8.getUnderlying() != map6.getUnderlying());
assertTrue(! copyOnWrite || map8.getDataList("list2_1").getUnderlying() == list2_1.getUnderlying());
assertEquals(map8, map6);
assertEquals(map8.getDataList("list2_1"), list2_1);
map7.getDataList("list2_1").remove(0);
assertTrue(! copyOnWrite || map7.getDataList("list2_1").getUnderlying() != list2_1.getUnderlying());
assertTrue(! copyOnWrite || map8.getDataList("list2_1").getUnderlying() == list2_1.getUnderlying());
assertFalse(map7.getDataList("list2_1").equals(list2_1));
assertEquals(map8.getDataList("list2_1"), list2_1);
/* DataList containing DataMap */
DataList list6 = list1.copy();
assertTrue(! copyOnWrite || list6.getUnderlying() == list1.getUnderlying());
assertEquals(list6, list1);
list6.add(0, map2_1);
assertTrue(! copyOnWrite || list6.getUnderlying() != list1.getUnderlying());
assertFalse(list6.equals(list1));
DataList list7 = list6.copy();
assertTrue(! copyOnWrite || list7.getUnderlying() != list6.getUnderlying());
assertTrue(! copyOnWrite || list7.getDataMap(0).getUnderlying() == map2_1.getUnderlying());
assertEquals(list7, list6);
assertEquals(list7.getDataMap(0), map2_1);
DataList list8 = list6.copy();
assertTrue(! copyOnWrite || list8.getUnderlying() != list6.getUnderlying());
assertTrue(! copyOnWrite || list8.getDataMap(0).getUnderlying() == map2_1.getUnderlying());
assertEquals(list8, list6);
assertEquals(list8.getDataMap(0), map2_1);
list7.getDataMap(0).remove(RM1_BOOLEAN_KEY);
assertTrue(! copyOnWrite || list7.getDataMap(0).getUnderlying() != map2_1.getUnderlying());
assertTrue(! copyOnWrite || list8.getDataMap(0).getUnderlying() == map2_1.getUnderlying());
assertFalse(list7.getDataMap(0).equals(map2_1));
assertEquals(list8.getDataMap(0), map2_1);
/* DataMap containing both DataList and DataMap */
DataMap map10 = map1.copy();
assertTrue(! copyOnWrite || map10.getUnderlying() == map1.getUnderlying());
assertEquals(map10, map1);
map10.put("map2_1", map2_1);
map10.put("list2_1", list2_1);
assertTrue(! copyOnWrite || map10.getUnderlying() != map1.getUnderlying());
assertFalse(map10.equals(map1));
DataMap map11 = map10.copy();
assertTrue(! copyOnWrite || map11.getUnderlying() != map10.getUnderlying());
assertTrue(! copyOnWrite || map11.getDataList("list2_1").getUnderlying() == list2_1.getUnderlying());
assertTrue(! copyOnWrite || map11.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
assertEquals(map11, map10);
assertEquals(map11.getDataList("list2_1"), list2_1);
assertEquals(map11.getDataMap("map2_1"), map2_1);
DataMap map12 = map11.copy();
assertTrue(! copyOnWrite || map12.getUnderlying() != map11.getUnderlying());
assertTrue(! copyOnWrite || map12.getDataList("list2_1").getUnderlying() == list2_1.getUnderlying());
assertTrue(! copyOnWrite || map12.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
assertEquals(map12, map10);
assertEquals(map12.getDataList("list2_1"), list2_1);
assertEquals(map12.getDataMap("map2_1"), map2_1);
map12.getDataList("list2_1").remove(1);
assertTrue(! copyOnWrite || map12.getDataList("list2_1").getUnderlying() != list2_1.getUnderlying());
assertTrue(! copyOnWrite || map11.getDataList("list2_1").getUnderlying() == list2_1.getUnderlying());
assertFalse(map12.getDataList("list2_1").equals(list2_1));
assertEquals(map11.getDataList("list2_1"), list2_1);
DataMap map13 = map10.copy();
assertTrue(! copyOnWrite || map13.getUnderlying() != map10.getUnderlying());
assertTrue(! copyOnWrite || map13.getDataList("list2_1").getUnderlying() == list2_1.getUnderlying());
assertTrue(! copyOnWrite || map13.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
assertEquals(map13, map10);
assertEquals(map13.getDataList("list2_1"), list2_1);
assertEquals(map13.getDataMap("map2_1"), map2_1);
map13.getDataMap("map2_1").clear();
assertTrue(! copyOnWrite || map13.getDataMap("map2_1").getUnderlying() != map2_1.getUnderlying());
assertTrue(! copyOnWrite || map10.getDataMap("map2_1").getUnderlying() == map2_1.getUnderlying());
assertFalse(map13.getDataMap("map2_1").equals(map2_1));
assertEquals(map10.getDataMap("map2_1"), map2_1);
/* DataList containing both DataList and DataMap */
DataList list10 = list1.copy();
assertTrue(! copyOnWrite || list10.getUnderlying() == list1.getUnderlying());
assertEquals(list10, list1);
list10.add(0, list2_1);
list10.add(1, map2_1);
assertTrue(! copyOnWrite || list10.getUnderlying() != list1.getUnderlying());
assertFalse(list10.equals(list1));
DataList list11 = list10.copy();
assertTrue(! copyOnWrite || list11.getUnderlying() != list10.getUnderlying());
assertTrue(! copyOnWrite || list11.getDataList(0).getUnderlying() == list2_1.getUnderlying());
assertTrue(! copyOnWrite || list11.getDataMap(1).getUnderlying() == map2_1.getUnderlying());
assertEquals(list11, list10);
assertEquals(list11.getDataList(0), list2_1);
assertEquals(list11.getDataMap(1), map2_1);
DataList list12 = list11.copy();
assertTrue(! copyOnWrite || list12.getUnderlying() != list11.getUnderlying());
assertTrue(! copyOnWrite || list12.getDataList(0).getUnderlying() == list2_1.getUnderlying());
assertTrue(! copyOnWrite || list12.getDataMap(1).getUnderlying() == map2_1.getUnderlying());
assertEquals(list12, list11);
assertEquals(list12.getDataList(0), list2_1);
assertEquals(list12.getDataMap(1), map2_1);
list12.getDataList(0).set(2, "xxx");
assertTrue(! copyOnWrite || list12.getDataList(0).getUnderlying() != list2_1.getUnderlying());
assertTrue(! copyOnWrite || list11.getDataList(0).getUnderlying() == list2_1.getUnderlying());
assertFalse(list12.getDataList(0).equals(list2_1));
assertEquals(list11.getDataList(0), list2_1);
DataList list13 = list10.copy();
assertTrue(! copyOnWrite || list13.getUnderlying() != list10.getUnderlying());
assertTrue(! copyOnWrite || list13.getDataList(0).getUnderlying() == list2_1.getUnderlying());
assertTrue(! copyOnWrite || list13.getDataMap(1).getUnderlying() == map2_1.getUnderlying());
assertEquals(list13, list10);
assertEquals(list13.getDataList(0), list2_1);
assertEquals(list13.getDataMap(1), map2_1);
list13.getDataMap(1).put("x", "XX");
assertTrue(! copyOnWrite || list13.getDataMap(1).getUnderlying() != map2_1.getUnderlying());
assertTrue(! copyOnWrite || list10.getDataMap(1).getUnderlying() == map2_1.getUnderlying());
assertFalse(list13.getDataMap(1).equals(map2_1));
assertEquals(list10.getDataMap(1), map2_1);
/* Diamond shaped object graph */
{
DataMap a = new DataMap();
DataList b = new DataList();
DataList c = new DataList();
DataMap d = new DataMap();
a.put("b", b);
a.put("c", c);
b.add(d);
c.add(d);
DataMap aCopy = a.copy();
DataList bCopy = (DataList) aCopy.get("b");
DataList cCopy = (DataList) aCopy.get("c");
assertSame(bCopy.get(0), cCopy.get(0));
}
{
DataList a = new DataList();
DataMap b = new DataMap();
DataMap c = new DataMap();
DataList d = new DataList();
a.add(b);
a.add(c);
b.put("d", d);
c.put("d", d);
DataList aCopy = a.copy();
DataMap bCopy = (DataMap) aCopy.get(0);
DataMap cCopy = (DataMap) aCopy.get(1);
assertSame(bCopy.get("d"), cCopy.get("d"));
}
/* Circular object graph */
{
// DataList only
DataList a = new DataList();
a.disableChecker();
a.add(a);
DataList aCopy = a.copy();
assertSame(aCopy.get(0), aCopy);
DataList b = new DataList();
b.disableChecker();
a.add(b);
b.add(a);
aCopy = a.copy();
DataList bCopy = (DataList) aCopy.get(1);
assertSame(aCopy.get(0), aCopy);
assertSame(bCopy.get(0), aCopy);
}
/* Circular object graph */
{
// DataMap only
DataMap a = new DataMap();
a.disableChecker();
a.put("a", a);
DataMap aCopy = a.copy();
assertSame(aCopy.get("a"), aCopy);
DataMap b = new DataMap();
b.disableChecker();
a.put("b", b);
b.put("a", a);
aCopy = a.copy();
DataMap bCopy = (DataMap) aCopy.get("b");
assertSame(aCopy.get("a"), aCopy);
assertSame(bCopy.get("a"), aCopy);
}
} |
@Override
public int run(String[] args) throws Exception {
YarnConfiguration yarnConf =
getConf() == null ? new YarnConfiguration() : new YarnConfiguration(
getConf());
boolean isHAEnabled =
yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED,
YarnConfiguration.DEFAULT_RM_HA_ENABLED);
if (args.length < 1) {
printUsage("", isHAEnabled);
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = args[i++];
exitCode = 0;
if ("-help".equals(cmd)) {
if (i < args.length) {
printUsage(args[i], isHAEnabled);
} else {
printHelp("", isHAEnabled);
}
return exitCode;
}
if (USAGE.containsKey(cmd)) {
if (isHAEnabled) {
return super.run(args);
}
System.out.println("Cannot run " + cmd
+ " when ResourceManager HA is not enabled");
return -1;
}
//
// verify that we have enough command line parameters
//
String subClusterId = StringUtils.EMPTY;
if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) ||
"-refreshNodesResources".equals(cmd) ||
"-refreshServiceAcl".equals(cmd) ||
"-refreshUserToGroupsMappings".equals(cmd) ||
"-refreshSuperUserGroupsConfiguration".equals(cmd) ||
"-refreshClusterMaxPriority".equals(cmd)) {
subClusterId = parseSubClusterId(args, isHAEnabled);
// If we enable Federation mode, the number of args may be either one or three.
// Example: -refreshQueues or -refreshQueues -subClusterId SC-1
if (isYarnFederationEnabled(getConf()) && args.length != 1 && args.length != 3) {
printUsage(cmd, isHAEnabled);
return exitCode;
} else if (!isYarnFederationEnabled(getConf()) && args.length != 1) {
// If Federation mode is not enabled, then the number of args can only be one.
// Example: -refreshQueues
printUsage(cmd, isHAEnabled);
return exitCode;
}
}
// If it is federation mode, we will print federation mode information
if (isYarnFederationEnabled(getConf())) {
System.out.println("Using YARN Federation mode.");
}
try {
if ("-refreshQueues".equals(cmd)) {
exitCode = refreshQueues(subClusterId);
} else if ("-refreshNodes".equals(cmd)) {
exitCode = handleRefreshNodes(args, cmd, isHAEnabled);
} else if ("-refreshNodesResources".equals(cmd)) {
exitCode = refreshNodesResources(subClusterId);
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings(subClusterId);
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration(subClusterId);
} else if ("-refreshAdminAcls".equals(cmd)) {
exitCode = refreshAdminAcls(subClusterId);
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcls(subClusterId);
} else if ("-refreshClusterMaxPriority".equals(cmd)) {
exitCode = refreshClusterMaxPriority(subClusterId);
} else if ("-getGroups".equals(cmd)) {
String[] usernames = Arrays.copyOfRange(args, i, args.length);
exitCode = getGroups(usernames);
} else if ("-updateNodeResource".equals(cmd)) {
exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled, subClusterId);
} else if ("-addToClusterNodeLabels".equals(cmd)) {
exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-removeFromClusterNodeLabels".equals(cmd)) {
exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-replaceLabelsOnNode".equals(cmd)) {
exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled);
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("", isHAEnabled);
}
} catch (IllegalArgumentException arge) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd, isHAEnabled);
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
System.err.println(cmd.substring(1) + ": "
+ content[0]);
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": "
+ ex.getLocalizedMessage());
}
} catch (Exception e) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": "
+ e.getLocalizedMessage());
}
if (null != localNodeLabelsManager) {
localNodeLabelsManager.stop();
}
return exitCode;
} | @Test
public void testRefreshNodesGracefulInvalidArgs() throws Exception {
// invalid graceful timeout parameter
String[] invalidArgs = {"-refreshNodes", "-ginvalid", "invalid", "-client"};
assertEquals(-1, rmAdminCLI.run(invalidArgs));
// invalid timeout
String[] invalidTimeoutArgs = {"-refreshNodes", "-g", "invalid", "-client"};
assertEquals(-1, rmAdminCLI.run(invalidTimeoutArgs));
// negative timeout
String[] negativeTimeoutArgs = {"-refreshNodes", "-g", "-1000", "-client"};
assertEquals(-1, rmAdminCLI.run(negativeTimeoutArgs));
// invalid tracking mode
String[] invalidTrackingArgs = {"-refreshNodes", "-g", "1", "-foo"};
assertEquals(-1, rmAdminCLI.run(invalidTrackingArgs));
} |
public static RestServerConfig forPublic(Integer rebalanceTimeoutMs, Map<?, ?> props) {
return new PublicConfig(rebalanceTimeoutMs, props);
} | @Test
public void testInvalidSslClientAuthConfig() {
Map<String, String> props = new HashMap<>();
props.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, "abc");
ConfigException ce = assertThrows(ConfigException.class, () -> RestServerConfig.forPublic(null, props));
assertTrue(ce.getMessage().contains(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG));
} |
@Override
public void setAttemptCount(JobVertexID jobVertexId, int subtaskIndex, int attemptNumber) {
Preconditions.checkArgument(subtaskIndex >= 0);
Preconditions.checkArgument(attemptNumber >= 0);
final List<Integer> attemptCounts =
vertexSubtaskToAttemptCounts.computeIfAbsent(
jobVertexId, ignored -> new ArrayList<>(32));
while (subtaskIndex >= attemptCounts.size()) {
attemptCounts.add(0);
}
attemptCounts.set(subtaskIndex, attemptNumber);
} | @Test
void testSetAttemptCountRejectsNegativeAttemptCount() {
final DefaultVertexAttemptNumberStore vertexAttemptNumberStore =
new DefaultVertexAttemptNumberStore();
assertThatThrownBy(() -> vertexAttemptNumberStore.setAttemptCount(new JobVertexID(), 0, -1))
.isInstanceOf(IllegalArgumentException.class);
} |
@Override
public Object run() {
if (field != null) {
field.setAccessible(true);
}
return field;
} | @Test
public void run() throws NoSuchFieldException {
final Field testField = TestField.class.getDeclaredField("testField");
AccessController.doPrivileged(new FieldAccessAction(testField));
final Optional<Object> override = ReflectUtils.getFieldValue(testField, "override");
Assert.assertTrue(override.isPresent());
Assert.assertEquals(override.get(), true);
} |
@Override
public RecoverableFsDataOutputStream open(Path path) throws IOException {
LOGGER.trace("Opening output stream for path {}", path);
Preconditions.checkNotNull(path);
GSBlobIdentifier finalBlobIdentifier = BlobUtils.parseUri(path.toUri());
return new GSRecoverableFsDataOutputStream(storage, options, finalBlobIdentifier);
} | @Test(expected = IllegalArgumentException.class)
public void testOpenWithMissingObjectName() throws IOException {
Path path = new Path("gs://foo");
writer.open(path);
} |
public static String toUriAuthority(NetworkEndpoint networkEndpoint) {
return toHostAndPort(networkEndpoint).toString();
} | @Test
public void toUriString_withIpV4AndPortEndpoint_returnsIpAddressAndPort() {
NetworkEndpoint ipV4AndPortEndpoint =
NetworkEndpoint.newBuilder()
.setType(NetworkEndpoint.Type.IP_PORT)
.setPort(Port.newBuilder().setPortNumber(8888))
.setIpAddress(
IpAddress.newBuilder().setAddress("1.2.3.4").setAddressFamily(AddressFamily.IPV4))
.build();
assertThat(NetworkEndpointUtils.toUriAuthority(ipV4AndPortEndpoint)).isEqualTo("1.2.3.4:8888");
} |
@Override
public void upgrade() {
if (configService.get(MigrationCompleted.class) != null) {
LOG.debug("Migration already completed.");
return;
}
var previousMigration = Optional.ofNullable(configService.get(V20191219090834_AddSourcesPage.MigrationCompleted.class));
var previousInstallation = previousMigration.flatMap(this::previousInstallation);
var notPreviouslyInstalled = previousInstallation.isEmpty();
final ContentPack contentPack = readContentPack();
var contentPackShouldBeUninstalled = previousInstallation
.filter(this::userHasNotModifiedSourcesPage);
var notLocallyModified = contentPackShouldBeUninstalled.isPresent();
var previousDashboard = contentPackShouldBeUninstalled.flatMap(this::dashboardFromInstallation);
var pack = insertContentPack(contentPack)
.orElseThrow(() -> {
configService.write(MigrationCompleted.create(contentPack.id().toString(), false, false));
return new ContentPackException("Content pack " + contentPack.id() + " with this revision " + contentPack.revision() + " already found!");
});
contentPackShouldBeUninstalled.ifPresent(this::uninstallContentPack);
if (notPreviouslyInstalled || notLocallyModified) {
var newInstallation = installContentPack(pack);
assert (newInstallation != null);
previousDashboard.ifPresent(dashboard -> fixupNewDashboardId(dashboard, newInstallation));
} else {
notificationService.publishIfFirst(notificationService.buildNow()
.addType(Notification.Type.GENERIC)
.addSeverity(Notification.Severity.NORMAL)
.addDetail("title", "Updating Sources Dashboard")
.addDetail("description", """
While updating the Sources Dashboard, it was detected that the previous version was modified locally. To save these modifications from getting lost,
a new version of the content pack containing the Sources Dashboard was uploaded, but not installed.
If you want to use the new version of the dashboard, you can go to "System" -> "Content Packs" -> "Sources Page Dashboard" and install version 2.
In addition, you can either keep your current "Sources" dashboard (having two "Sources" dashboards) or uninstall version 1 of the content pack to remove it.
"""));
}
configService.write(MigrationCompleted.create(pack.id().toString(), notPreviouslyInstalled || notLocallyModified, contentPackShouldBeUninstalled.isPresent()));
}
private void fixupNewDashboardId(Document previousDashboard, ContentPackInstallation newInstallation) {
var newDashboard = dashboardFromInstallation(newInstallation);
var previousDashboardId = previousDashboard.getObjectId("_id");
newDashboard.ifPresent(dashboard -> {
var newDashboardId = dashboard.getObjectId("_id");
dashboard.append("_id", previousDashboardId);
views.deleteOne(Filters.eq("_id", newDashboardId));
views.insertOne(dashboard);
contentPackInstallations.updateOne(Filters.eq("_id", newInstallation.id()), Updates.set("entities.0.id", previousDashboardId.toHexString()));
});
}
private Optional<ContentPackInstallation> previousInstallation(V20191219090834_AddSourcesPage.MigrationCompleted previousMigration) {
return Optional.ofNullable(previousMigration.contentPackId())
.map(id -> contentPackInstallationPersistenceService.findByContentPackId(ModelId.of(id)))
.flatMap(installations -> installations.stream()
.filter(installation -> installation.contentPackRevision() == 1
&& installation.createdBy().equals("admin")
&& installation.comment().equals("Add Sources Page"))
.findFirst());
}
private Optional<Document> dashboardFromInstallation(ContentPackInstallation installation) {
return Optional.ofNullable(installation.entities())
.flatMap(entities -> entities.stream().findFirst())
.map(Identified::id)
.map(ModelId::id)
.flatMap(dashboardId -> Optional.ofNullable(views.find(Filters.eq("_id", new ObjectId(dashboardId))).first()));
}
private boolean userHasNotModifiedSourcesPage(ContentPackInstallation previousInstallation) {
var previousDashboard = dashboardFromInstallation(previousInstallation)
.flatMap(dashboard -> Optional.ofNullable(dashboard.getString("search_id")))
.flatMap(searchId -> Optional.ofNullable(searches.find(Filters.eq("_id", new ObjectId(searchId))).first()));
var userHasModifiedSourcesPage = previousDashboard
.map(dashboard -> dashboard.getDate("created_at"))
.map(createdAt -> !createdAt.equals(UNMODIFIED_SOURCES_SEARCH_DATE))
.orElse(false);
return !userHasModifiedSourcesPage;
}
private ContentPackInstallation installContentPack(ContentPack contentPack) {
return contentPackService.installContentPack(contentPack, Collections.emptyMap(), "Add Sources Page V2", "admin");
}
private Optional<ContentPack> insertContentPack(ContentPack contentPack) {
return this.contentPackPersistenceService.insert(contentPack);
}
private void uninstallContentPack(ContentPackInstallation contentPackInstallation) {
contentPackPersistenceService.findByIdAndRevision(contentPackInstallation.contentPackId(), contentPackInstallation.contentPackRevision())
.ifPresent(contentPack -> contentPackService.uninstallContentPack(contentPack, contentPackInstallation));
}
private ContentPack readContentPack() {
try {
final URL contentPackURL = V20230601104500_AddSourcesPageV2.class.getResource("V20230601104500_AddSourcesPage_V2_Content_Pack.json");
return this.objectMapper.readValue(contentPackURL, ContentPack.class);
} catch (IOException e) {
throw new RuntimeException("Unable to read content pack source in migration: ", e);
}
}
@JsonAutoDetect
@AutoValue
@WithBeanGetter
public static abstract class MigrationCompleted {
@JsonProperty("content_pack_id")
public abstract String contentPackId();
@JsonProperty("installed_content_pack")
public abstract boolean installedContentPack();
@JsonProperty("uninstalled_previous_revision")
public abstract boolean uninstalledPreviousRevision();
@JsonCreator
public static MigrationCompleted create(@JsonProperty("content_pack_id") final String contentPackId,
@JsonProperty("installed_content_pack") boolean installedContentPack,
@JsonProperty("uninstalled_previous_revision") boolean uninstalledPreviousRevision) {
return new AutoValue_V20230601104500_AddSourcesPageV2_MigrationCompleted(contentPackId, installedContentPack, uninstalledPreviousRevision);
}
}
} | @Test
@MongoDBFixtures({"V20230601104500_AddSourcesPageV2/previousMigration.json", "V20230601104500_AddSourcesPageV2/previousInstallationWithLocalModifications.json"})
void previousInstallationWithLocalModificationsIsKept() {
previousMigrationHasRun();
thisMigrationHasNotRun();
when(notificationService.buildNow()).thenReturn(new NotificationImpl().addTimestamp(Tools.nowUTC()));
this.migration.upgrade();
var migrationCompleted = expectMigrationCompleted();
assertThat(migrationCompleted.contentPackId()).isNotBlank();
assertThat(migrationCompleted.installedContentPack()).isFalse();
assertThat(migrationCompleted.uninstalledPreviousRevision()).isFalse();
var notification = expectNotificationPublished();
assertThat(notification.getType()).isEqualTo(Notification.Type.GENERIC);
assertThat(notification.getSeverity()).isEqualTo(Notification.Severity.NORMAL);
assertThat(notification.getDetail("title")).isEqualTo("Updating Sources Dashboard");
} |
@Override
public boolean equals(Object o) {
return o instanceof AddOn
&& TextUtils.equals(((AddOn) o).getId(), getId())
&& ((AddOn) o).getApiVersion() == getApiVersion();
} | @Test
public void testEquals() {
TestableAddOn addOn1 = new TestableAddOn("id1", "name", 8);
TestableAddOn addOn2 = new TestableAddOn("id2", "name", 8);
TestableAddOn addOn11 = new TestableAddOn("id1", "name111", 8);
TestableAddOn addOn1DifferentApiVersion = new TestableAddOn("id1", "name", 7);
Assert.assertEquals(addOn1, addOn11);
Assert.assertNotEquals(addOn1, addOn2);
Assert.assertEquals(addOn1.hashCode(), addOn11.hashCode());
Assert.assertNotEquals(addOn1.hashCode(), addOn2.hashCode());
Assert.assertNotEquals(addOn1, addOn1DifferentApiVersion);
Assert.assertNotEquals(new Object(), addOn1);
} |
@Override
public Optional<KsqlConstants.PersistentQueryType> getPersistentQueryType() {
if (!queryPlan.isPresent()) {
return Optional.empty();
}
// CREATE_AS and CREATE_SOURCE commands contain a DDL command and a Query plan.
if (ddlCommand.isPresent()) {
if (ddlCommand.get() instanceof CreateTableCommand
&& ((CreateTableCommand) ddlCommand.get()).getIsSource()) {
return Optional.of(KsqlConstants.PersistentQueryType.CREATE_SOURCE);
} else {
return Optional.of(KsqlConstants.PersistentQueryType.CREATE_AS);
}
} else {
// INSERT INTO persistent queries are the only queries types that exist without a
// DDL command linked to the plan.
return Optional.of(KsqlConstants.PersistentQueryType.INSERT);
}
} | @Test
public void shouldReturnCreateAsPersistentQueryTypeOnCreateTable() {
// Given:
final CreateTableCommand ddlCommand = Mockito.mock(CreateTableCommand.class);
when(ddlCommand.getIsSource()).thenReturn(false);
final KsqlPlanV1 plan = new KsqlPlanV1(
"stmt",
Optional.of(ddlCommand),
Optional.of(queryPlan1));
// When/Then:
assertThat(plan.getPersistentQueryType(),
is(Optional.of(KsqlConstants.PersistentQueryType.CREATE_AS)));
} |
public Span nextSpan(Message message) {
TraceContextOrSamplingFlags extracted =
extractAndClearTraceIdProperties(processorExtractor, message, message);
Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler.
// When an upstream context was not present, lookup keys are unlikely added
if (extracted.context() == null && !result.isNoop()) {
// simplify code by re-using an existing MessagingRequest impl
tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result);
}
return result;
} | @Test void nextSpan_uses_current_context() {
Span child;
try (Scope scope = tracing.currentTraceContext().newScope(parent)) {
child = jmsTracing.nextSpan(message);
}
assertChildOf(child.context(), parent);
} |
static Set<String> getConfiguredSuperUsers(Map<String, ?> configs) {
Object configValue = configs.get(SUPER_USERS_CONFIG);
if (configValue == null) return Collections.emptySet();
String[] values = configValue.toString().split(";");
Set<String> result = new HashSet<>();
for (String value : values) {
String v = value.trim();
if (!v.isEmpty()) {
SecurityUtils.parseKafkaPrincipal(v);
result.add(v);
}
}
return result;
} | @Test
public void testGetConfiguredSuperUsers() {
assertEquals(Collections.emptySet(),
getConfiguredSuperUsers(Collections.emptyMap()));
assertEquals(Collections.emptySet(),
getConfiguredSuperUsers(Collections.singletonMap(SUPER_USERS_CONFIG, " ")));
assertEquals(new HashSet<>(asList("User:bob", "User:alice")),
getConfiguredSuperUsers(Collections.singletonMap(SUPER_USERS_CONFIG, "User:bob;User:alice ")));
assertEquals(new HashSet<>(asList("User:bob", "User:alice")),
getConfiguredSuperUsers(Collections.singletonMap(SUPER_USERS_CONFIG, "; User:bob ; User:alice ")));
assertEquals("expected a string in format principalType:principalName but got bob",
assertThrows(IllegalArgumentException.class, () -> getConfiguredSuperUsers(
Collections.singletonMap(SUPER_USERS_CONFIG, "bob;:alice"))).getMessage());
} |
public void deleteByGroupUuid(DbSession dbSession, String groupUuid) {
mapper(dbSession).deleteByGroupUuid(groupUuid);
} | @Test
void deleteFromGroupUuid_shouldNotFail_whenNoGroup() {
assertThatCode(() -> scimGroupDao.deleteByGroupUuid(db.getSession(), randomAlphanumeric(6))).doesNotThrowAnyException();
} |
@Override
public String doLayout(ILoggingEvent event) {
StringWriter output = new StringWriter();
try (JsonWriter json = new JsonWriter(output)) {
json.beginObject();
if (!"".equals(nodeName)) {
json.name("nodename").value(nodeName);
}
json.name("process").value(processKey);
for (Map.Entry<String, String> entry : event.getMDCPropertyMap().entrySet()) {
if (entry.getValue() != null && !exclusions.contains(entry.getKey())) {
json.name(entry.getKey()).value(entry.getValue());
}
}
json
.name("timestamp").value(DATE_FORMATTER.format(Instant.ofEpochMilli(event.getTimeStamp())))
.name("severity").value(event.getLevel().toString())
.name("logger").value(event.getLoggerName())
.name("message").value(NEWLINE_REGEXP.matcher(event.getFormattedMessage()).replaceAll("\r"));
IThrowableProxy tp = event.getThrowableProxy();
if (tp != null) {
json.name("stacktrace").beginArray();
int nbOfTabs = 0;
while (tp != null) {
printFirstLine(json, tp, nbOfTabs);
render(json, tp, nbOfTabs);
tp = tp.getCause();
nbOfTabs++;
}
json.endArray();
}
json.endObject();
} catch (Exception e) {
e.printStackTrace();
throw new IllegalStateException("BUG - fail to create JSON", e);
}
output.write(System.lineSeparator());
return output.toString();
} | @Test
public void test_log_with_suppressed_throwable() {
Exception exception = new Exception("BOOM");
exception.addSuppressed(new IllegalStateException("foo"));
LoggingEvent event = new LoggingEvent("org.foundation.Caller", (Logger) LoggerFactory.getLogger("the.logger"), Level.WARN, "the message", exception, new Object[0]);
String log = underTest.doLayout(event);
JsonLog json = new Gson().fromJson(log, JsonLog.class);
assertThat(json.stacktrace).hasSizeGreaterThan(5);
assertThat(json.stacktrace[0]).isEqualTo("java.lang.Exception: BOOM");
assertThat(json.stacktrace).contains("Suppressed: java.lang.IllegalStateException: foo");
} |
@Override
public void onMsg(TbContext ctx, TbMsg msg) {
JsonObject json = JsonParser.parseString(msg.getData()).getAsJsonObject();
String tmp;
if (msg.getOriginator().getEntityType() != EntityType.DEVICE) {
ctx.tellFailure(msg, new RuntimeException("Message originator is not a device entity!"));
} else if (!json.has("method")) {
ctx.tellFailure(msg, new RuntimeException("Method is not present in the message!"));
} else if (!json.has("params")) {
ctx.tellFailure(msg, new RuntimeException("Params are not present in the message!"));
} else {
int requestId = json.has("requestId") ? json.get("requestId").getAsInt() : random.nextInt();
boolean restApiCall = msg.isTypeOf(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE);
tmp = msg.getMetaData().getValue("oneway");
boolean oneway = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp);
tmp = msg.getMetaData().getValue(DataConstants.PERSISTENT);
boolean persisted = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp);
tmp = msg.getMetaData().getValue("requestUUID");
UUID requestUUID = !StringUtils.isEmpty(tmp) ? UUID.fromString(tmp) : Uuids.timeBased();
tmp = msg.getMetaData().getValue("originServiceId");
String originServiceId = !StringUtils.isEmpty(tmp) ? tmp : null;
tmp = msg.getMetaData().getValue(DataConstants.EXPIRATION_TIME);
long expirationTime = !StringUtils.isEmpty(tmp) ? Long.parseLong(tmp) : (System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(config.getTimeoutInSeconds()));
tmp = msg.getMetaData().getValue(DataConstants.RETRIES);
Integer retries = !StringUtils.isEmpty(tmp) ? Integer.parseInt(tmp) : null;
String params = parseJsonData(json.get("params"));
String additionalInfo = parseJsonData(json.get(DataConstants.ADDITIONAL_INFO));
RuleEngineDeviceRpcRequest request = RuleEngineDeviceRpcRequest.builder()
.oneway(oneway)
.method(json.get("method").getAsString())
.body(params)
.tenantId(ctx.getTenantId())
.deviceId(new DeviceId(msg.getOriginator().getId()))
.requestId(requestId)
.requestUUID(requestUUID)
.originServiceId(originServiceId)
.expirationTime(expirationTime)
.retries(retries)
.restApiCall(restApiCall)
.persisted(persisted)
.additionalInfo(additionalInfo)
.build();
ctx.getRpcService().sendRpcRequestToDevice(request, ruleEngineDeviceRpcResponse -> {
if (ruleEngineDeviceRpcResponse.getError().isEmpty()) {
TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), ruleEngineDeviceRpcResponse.getResponse().orElse(TbMsg.EMPTY_JSON_OBJECT));
ctx.enqueueForTellNext(next, TbNodeConnectionType.SUCCESS);
} else {
TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), wrap("error", ruleEngineDeviceRpcResponse.getError().get().name()));
ctx.enqueueForTellFailure(next, ruleEngineDeviceRpcResponse.getError().get().name());
}
});
ctx.ack(msg);
}
} | @Test
public void givenRpcResponseWithoutError_whenOnMsg_thenSendsRpcRequest() {
TbMsg outMsg = TbMsg.newMsg(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE, DEVICE_ID, TbMsgMetaData.EMPTY, TbMsg.EMPTY_JSON_OBJECT);
given(ctxMock.getRpcService()).willReturn(rpcServiceMock);
given(ctxMock.getTenantId()).willReturn(TENANT_ID);
// TODO: replace deprecated method newMsg()
given(ctxMock.newMsg(any(), any(String.class), any(), any(), any(), any())).willReturn(outMsg);
willAnswer(invocation -> {
Consumer<RuleEngineDeviceRpcResponse> consumer = invocation.getArgument(1);
RuleEngineDeviceRpcResponse rpcResponseMock = mock(RuleEngineDeviceRpcResponse.class);
given(rpcResponseMock.getError()).willReturn(Optional.empty());
given(rpcResponseMock.getResponse()).willReturn(Optional.of(TbMsg.EMPTY_JSON_OBJECT));
consumer.accept(rpcResponseMock);
return null;
}).given(rpcServiceMock).sendRpcRequestToDevice(any(RuleEngineDeviceRpcRequest.class), any(Consumer.class));
TbMsg msg = TbMsg.newMsg(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE, DEVICE_ID, TbMsgMetaData.EMPTY, MSG_DATA);
node.onMsg(ctxMock, msg);
then(ctxMock).should().enqueueForTellNext(outMsg, TbNodeConnectionType.SUCCESS);
then(ctxMock).should().ack(msg);
} |
public static String getSelectQuery(@Nullable String table, @Nullable String query) {
if (table != null && query != null) {
throw new IllegalArgumentException("withTable() can not be used together with withQuery()");
} else if (table != null) {
return "SELECT * FROM " + SingleStoreUtil.escapeIdentifier(table);
} else if (query != null) {
return query;
} else {
throw new IllegalArgumentException("One of withTable() or withQuery() is required");
}
} | @Test
public void testGetSelectQueryNonNullQuery() {
assertEquals(
"SELECT * FROM table", SingleStoreUtil.getSelectQuery(null, "SELECT * FROM table"));
} |
@Override
public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context,
Map<String, Long> recentlyUnloadedBundles,
Map<String, Long> recentlyUnloadedBrokers) {
final var conf = context.brokerConfiguration();
decisionCache.clear();
stats.clear();
Map<String, BrokerLookupData> availableBrokers;
try {
availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync()
.get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS);
} catch (ExecutionException | InterruptedException | TimeoutException e) {
counter.update(Failure, Unknown);
log.warn("Failed to fetch available brokers. Stop unloading.", e);
return decisionCache;
}
try {
final var loadStore = context.brokerLoadDataStore();
stats.setLoadDataStore(loadStore);
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
var skipReason = stats.update(
context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf);
if (skipReason.isPresent()) {
if (debugMode) {
log.warn(CANNOT_CONTINUE_UNLOAD_MSG
+ " Skipped the load stat update. Reason:{}.",
skipReason.get());
}
counter.update(Skip, skipReason.get());
return decisionCache;
}
counter.updateLoadData(stats.avg, stats.std);
if (debugMode) {
log.info("brokers' load stats:{}", stats);
}
// skip metrics
int numOfBrokersWithEmptyLoadData = 0;
int numOfBrokersWithFewBundles = 0;
final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd();
boolean transfer = conf.isLoadBalancerTransferEnabled();
if (stats.std() > targetStd
|| isUnderLoaded(context, stats.peekMinBroker(), stats)
|| isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
unloadConditionHitCount++;
} else {
unloadConditionHitCount = 0;
}
if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Shedding condition hit count:{} is less than or equal to the threshold:{}.",
unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold());
}
counter.update(Skip, HitCount);
return decisionCache;
}
while (true) {
if (!stats.hasTransferableBrokers()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Exhausted target transfer brokers.");
}
break;
}
UnloadDecision.Reason reason;
if (stats.std() > targetStd) {
reason = Overloaded;
} else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) {
reason = Underloaded;
if (debugMode) {
log.info(String.format("broker:%s is underloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this underloaded broker.",
stats.peekMinBroker(),
context.brokerLoadDataStore().get(stats.peekMinBroker()).get(),
stats.std(), targetStd));
}
} else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
reason = Overloaded;
if (debugMode) {
log.info(String.format("broker:%s is overloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this overloaded broker.",
stats.peekMaxBroker(),
context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(),
stats.std(), targetStd));
}
} else {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ "The overall cluster load meets the target, std:{} <= targetStd:{}."
+ "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.",
stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker());
}
break;
}
String maxBroker = stats.pollMaxBroker();
String minBroker = stats.peekMinBroker();
Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker);
Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker);
if (maxBrokerLoadData.isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " MaxBrokerLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
if (minBrokerLoadData.isEmpty()) {
log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker);
numOfBrokersWithEmptyLoadData++;
continue;
}
double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA();
double minLoad = minBrokerLoadData.get().getWeightedMaxEMA();
double offload = (maxLoad - minLoad) / 2;
BrokerLoadData brokerLoadData = maxBrokerLoadData.get();
double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn()
+ brokerLoadData.getMsgThroughputOut();
double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn()
+ minBrokerLoadData.get().getMsgThroughputOut();
double offloadThroughput = maxBrokerThroughput * offload / maxLoad;
if (debugMode) {
log.info(String.format(
"Attempting to shed load from broker:%s%s, which has the max resource "
+ "usage:%.2f%%, targetStd:%.2f,"
+ " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.",
maxBroker, transfer ? " to broker:" + minBroker : "",
maxLoad * 100,
targetStd,
offload * 100,
offloadThroughput / KB
));
}
double trafficMarkedToOffload = 0;
double trafficMarkedToGain = 0;
Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker);
if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " TopBundlesLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData();
if (maxBrokerTopBundlesLoadData.size() == 1) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Sole namespace bundle:%s is overloading the broker. ",
maxBroker, maxBrokerTopBundlesLoadData.iterator().next()));
continue;
}
Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker);
var minBrokerTopBundlesLoadDataIter =
minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() :
null;
if (maxBrokerTopBundlesLoadData.isEmpty()) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Broker overloaded despite having no bundles", maxBroker));
continue;
}
int remainingTopBundles = maxBrokerTopBundlesLoadData.size();
for (var e : maxBrokerTopBundlesLoadData) {
String bundle = e.bundleName();
if (channel != null && !channel.isOwner(bundle, maxBroker)) {
if (debugMode) {
log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " MaxBroker:%s is not the owner.", bundle, maxBroker));
}
continue;
}
if (recentlyUnloadedBundles.containsKey(bundle)) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " Bundle has been recently unloaded at ts:%d.",
bundle, recentlyUnloadedBundles.get(bundle)));
}
continue;
}
if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " This unload can't meet "
+ "affinity(isolation) or anti-affinity group policies.", bundle));
}
continue;
}
if (remainingTopBundles <= 1) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is"
+ " less than or equal to 1.",
bundle, maxBroker));
}
break;
}
var bundleData = e.stats();
double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut;
boolean swap = false;
List<Unload> minToMaxUnloads = new ArrayList<>();
double minBrokerBundleSwapThroughput = 0.0;
if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) {
// see if we can swap bundles from min to max broker to balance better.
if (transfer && minBrokerTopBundlesLoadDataIter != null) {
var maxBrokerNewThroughput =
maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain
- maxBrokerBundleThroughput;
var minBrokerNewThroughput =
minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain
+ maxBrokerBundleThroughput;
while (minBrokerTopBundlesLoadDataIter.hasNext()) {
var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next();
if (!isTransferable(context, availableBrokers,
minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) {
continue;
}
var minBrokerBundleThroughput =
minBrokerBundleData.stats().msgThroughputIn
+ minBrokerBundleData.stats().msgThroughputOut;
var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput;
var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput;
if (maxBrokerNewThroughputTmp < maxBrokerThroughput
&& minBrokerNewThroughputTmp < maxBrokerThroughput) {
minToMaxUnloads.add(new Unload(minBroker,
minBrokerBundleData.bundleName(), Optional.of(maxBroker)));
maxBrokerNewThroughput = maxBrokerNewThroughputTmp;
minBrokerNewThroughput = minBrokerNewThroughputTmp;
minBrokerBundleSwapThroughput += minBrokerBundleThroughput;
if (minBrokerNewThroughput <= maxBrokerNewThroughput
&& maxBrokerNewThroughput < maxBrokerThroughput * 0.75) {
swap = true;
break;
}
}
}
}
if (!swap) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is "
+ "greater than the target :%.2f KByte/s.",
bundle,
(trafficMarkedToOffload + maxBrokerBundleThroughput) / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB,
offloadThroughput / KB));
}
break;
}
}
Unload unload;
if (transfer) {
if (swap) {
minToMaxUnloads.forEach(minToMaxUnload -> {
if (debugMode) {
log.info("Decided to gain bundle:{} from min broker:{}",
minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker());
}
var decision = new UnloadDecision();
decision.setUnload(minToMaxUnload);
decision.succeed(reason);
decisionCache.add(decision);
});
if (debugMode) {
log.info(String.format(
"Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.",
minBrokerBundleSwapThroughput / KB, minBroker, maxBroker));
trafficMarkedToGain += minBrokerBundleSwapThroughput;
}
}
unload = new Unload(maxBroker, bundle, Optional.of(minBroker));
} else {
unload = new Unload(maxBroker, bundle);
}
var decision = new UnloadDecision();
decision.setUnload(unload);
decision.succeed(reason);
decisionCache.add(decision);
trafficMarkedToOffload += maxBrokerBundleThroughput;
remainingTopBundles--;
if (debugMode) {
log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s."
+ " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s."
+ " Target:%.2f KByte/s.",
bundle, maxBrokerBundleThroughput / KB,
trafficMarkedToOffload / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain) / KB,
offloadThroughput / KB));
}
}
if (trafficMarkedToOffload > 0) {
var adjustedOffload =
(trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput;
stats.offload(maxLoad, minLoad, adjustedOffload);
if (debugMode) {
log.info(
String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}",
stats, maxLoad, minLoad, adjustedOffload));
}
} else {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " There is no bundle that can be unloaded in top bundles load data. "
+ "Consider splitting bundles owned by the broker "
+ "to make each bundle serve less traffic "
+ "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport"
+ " to report more bundles in the top bundles load data.", maxBroker));
}
} // while end
if (debugMode) {
log.info("decisionCache:{}", decisionCache);
}
if (decisionCache.isEmpty()) {
UnloadDecision.Reason reason;
if (numOfBrokersWithEmptyLoadData > 0) {
reason = NoLoadData;
} else if (numOfBrokersWithFewBundles > 0) {
reason = NoBundles;
} else {
reason = HitCount;
}
counter.update(Skip, reason);
} else {
unloadConditionHitCount = 0;
}
} catch (Throwable e) {
log.error("Failed to process unloading. ", e);
this.counter.update(Failure, Unknown);
}
return decisionCache;
} | @Test
public void testUnderloadOutlier() {
UnloadCounter counter = new UnloadCounter();
TransferShedder transferShedder = new TransferShedder(counter);
var ctx = setupContextLoadSkewedUnderload(100);
var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of());
var expected = new HashSet<UnloadDecision>();
expected.add(new UnloadDecision(
new Unload("broker98:8080", "my-tenant/my-namespace98/0x00000000_0x0FFFFFFF",
Optional.of("broker99:8080")), Success, Underloaded));
assertEquals(res, expected);
assertEquals(counter.getLoadAvg(), 0.9704000000000005, 0.00001);
assertEquals(counter.getLoadStd(), 0.09652895938523735, 0.00001);
} |
@Override
public String getDocumentationLink(@Nullable String suffix) {
return documentationBaseUrl + Optional.ofNullable(suffix).orElse("");
} | @Test
public void getDocumentationLink_suffixProvided_withPropertyOverride() {
String propertyValue = "https://new-url.sonarqube.org/";
when(configuration.get(DOCUMENTATION_BASE_URL)).thenReturn(Optional.of(propertyValue));
documentationLinkGenerator = new DefaultDocumentationLinkGenerator(sonarQubeVersion, configuration);
String generatedLink = documentationLinkGenerator.getDocumentationLink(TEST_SUFFIX);
assertThat(generatedLink).isEqualTo(propertyValue + "100.1000/documentation/analyzing-source-code/scm-integration/");
} |
public Collection<RepositoryTuple> swapToRepositoryTuples(final YamlRuleConfiguration yamlRuleConfig) {
RepositoryTupleEntity tupleEntity = yamlRuleConfig.getClass().getAnnotation(RepositoryTupleEntity.class);
if (null == tupleEntity) {
return Collections.emptyList();
}
if (tupleEntity.leaf()) {
return Collections.singleton(new RepositoryTuple(tupleEntity.value(), YamlEngine.marshal(yamlRuleConfig)));
}
Collection<RepositoryTuple> result = new LinkedList<>();
RuleNodePath ruleNodePath = TypedSPILoader.getService(RuleNodePathProvider.class, yamlRuleConfig.getRuleConfigurationType()).getRuleNodePath();
for (Field each : getFields(yamlRuleConfig.getClass())) {
boolean isAccessible = each.isAccessible();
each.setAccessible(true);
result.addAll(swapToRepositoryTuples(yamlRuleConfig, ruleNodePath, each));
each.setAccessible(isAccessible);
}
return result;
} | @Test
void assertSwapToRepositoryTuplesWithLeafYamlRuleConfiguration() {
Collection<RepositoryTuple> actual = new RepositoryTupleSwapperEngine().swapToRepositoryTuples(new LeafYamlRuleConfiguration("foo"));
assertThat(actual.size(), is(1));
RepositoryTuple actualTuple = actual.iterator().next();
assertThat(actualTuple.getKey(), is("leaf"));
assertThat(actualTuple.getValue(), is("value: foo" + System.lineSeparator()));
} |
public void writeJndi(List<JndiBinding> jndiBindings, String path) throws IOException {
try {
document.open();
if (path.isEmpty()) {
addParagraph(getString("Arbre_JNDI"), "jndi.png");
} else {
addParagraph(getFormattedString("Arbre_JNDI_pour_contexte", path), "jndi.png");
}
new PdfJndiReport(jndiBindings, document).toPdf();
} catch (final DocumentException e) {
throw createIOException(e);
}
document.close();
} | @Test
public void testWriteJndi() throws NamingException, IOException {
final String contextPath = "comp/env/";
final Context context = createNiceMock(Context.class);
final NamingEnumeration<Binding> enumeration = createNiceMock(NamingEnumeration.class);
expect(context.listBindings("java:" + contextPath)).andReturn(enumeration).anyTimes();
expect(enumeration.hasMore()).andReturn(true).times(6);
expect(enumeration.next()).andReturn(new Binding("test value", "test value")).once();
expect(enumeration.next())
.andReturn(new Binding("test context", createNiceMock(Context.class))).once();
expect(enumeration.next()).andReturn(new Binding("", "test")).once();
expect(enumeration.next())
.andReturn(new Binding("java:/test context", createNiceMock(Context.class))).once();
expect(enumeration.next()).andReturn(new Binding("test null classname", null, null)).once();
expect(enumeration.next()).andThrow(new NamingException("test")).once();
final ServletContext servletContext = createNiceMock(ServletContext.class);
expect(servletContext.getServerInfo()).andReturn("Mock").anyTimes();
replay(servletContext);
Parameters.initialize(servletContext);
replay(context);
replay(enumeration);
final List<JndiBinding> bindings = JndiBinding.listBindings(context, contextPath);
final ByteArrayOutputStream output = new ByteArrayOutputStream();
final PdfOtherReport pdfOtherReport = new PdfOtherReport(TEST_APP, output);
pdfOtherReport.writeJndi(bindings, contextPath);
assertNotEmptyAndClear(output);
verify(context);
verify(enumeration);
verify(servletContext);
final PdfOtherReport pdfOtherReport2 = new PdfOtherReport(TEST_APP, output);
final List<JndiBinding> bindings2 = Collections.emptyList();
pdfOtherReport2.writeJndi(bindings2, "");
assertNotEmptyAndClear(output);
} |
@VisibleForTesting
void handleResponse(DiscoveryResponseData response)
{
ResourceType resourceType = response.getResourceType();
switch (resourceType)
{
case NODE:
handleD2NodeResponse(response);
break;
case D2_URI_MAP:
handleD2URIMapResponse(response);
break;
case D2_URI:
handleD2URICollectionResponse(response);
break;
default:
throw new AssertionError("Missing case in enum switch: " + resourceType);
}
} | @Test
public void testHandleD2URIMapUpdateWithEmptyResponse()
{
XdsClientImplFixture fixture = new XdsClientImplFixture();
// Sanity check that the code handles empty responses
fixture._xdsClientImpl.handleResponse(DISCOVERY_RESPONSE_WITH_EMPTY_URI_MAP_RESPONSE);
fixture.verifyAckSent(1);
} |
@Override
public String toString() {
return "CacheConfig{"
+ "name='" + name + '\''
+ ", managerPrefix='" + managerPrefix + '\''
+ ", inMemoryFormat=" + inMemoryFormat
+ ", backupCount=" + backupCount
+ ", hotRestart=" + hotRestartConfig
+ ", dataPersistenceConfig=" + dataPersistenceConfig
+ ", wanReplicationRef=" + wanReplicationRef
+ ", merkleTreeConfig=" + merkleTreeConfig
+ ", userCodeNamespace=" + userCodeNamespace
+ '}';
} | @Test
public void cacheManagerByLocationFileTest() throws URISyntaxException {
URI uri = new URI("MY-SCOPE");
String urlStr = configUrl1.toString();
assertEquals("file", urlStr.substring(0, 4));
Properties properties = new Properties();
properties.setProperty(HazelcastCachingProvider.HAZELCAST_CONFIG_LOCATION, urlStr);
CacheManager cacheManager = Caching.getCachingProvider().getCacheManager(uri, null, properties);
assertNotNull(cacheManager);
URI uri2 = new URI("MY-SCOPE-OTHER");
String urlStr2 = configUrl2.toString();
assertEquals("file", urlStr2.substring(0, 4));
Properties properties2 = new Properties();
properties2.setProperty(HazelcastCachingProvider.HAZELCAST_CONFIG_LOCATION, urlStr2);
CacheManager cacheManager2 = Caching.getCachingProvider().getCacheManager(uri2, null, properties2);
assertNotNull(cacheManager2);
assertEquals(2, Hazelcast.getAllHazelcastInstances().size());
} |
int snapshottableSize(long epoch) {
if (epoch == LATEST_EPOCH) {
return baseSize();
} else {
Iterator<Snapshot> iterator = snapshotRegistry.iterator(epoch);
while (iterator.hasNext()) {
Snapshot snapshot = iterator.next();
HashTier<T> tier = snapshot.getDelta(SnapshottableHashTable.this);
if (tier != null) {
return tier.size;
}
}
return baseSize();
}
} | @Test
public void testEmptyTable() {
SnapshotRegistry registry = new SnapshotRegistry(new LogContext());
SnapshottableHashTable<TestElement> table =
new SnapshottableHashTable<>(registry, 1);
assertEquals(0, table.snapshottableSize(Long.MAX_VALUE));
} |
@SuppressWarnings("unchecked")
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeStatus remoteNodeStatus = request.getNodeStatus();
/**
* Here is the node heartbeat sequence...
* 1. Check if it's a valid (i.e. not excluded) node
* 2. Check if it's a registered node
* 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
* 4. Send healthStatus to RMNode
* 5. Update node's labels if distributed Node Labels configuration is enabled
*/
NodeId nodeId = remoteNodeStatus.getNodeId();
// 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is
// in decommissioning.
if (!this.nodesListManager.isValidNode(nodeId.getHost())
&& !isNodeInDecommissioning(nodeId)) {
String message =
"Disallowed NodeManager nodeId: " + nodeId + " hostname: "
+ nodeId.getHost();
LOG.info(message);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(
NodeAction.SHUTDOWN, message);
}
// 2. Check if it's a registered node
RMNode rmNode = this.rmContext.getRMNodes().get(nodeId);
if (rmNode == null) {
/* node does not exist */
String message = "Node not found resyncing " + remoteNodeStatus.getNodeId();
LOG.info(message);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC,
message);
}
// Send ping
this.nmLivelinessMonitor.receivedPing(nodeId);
this.decommissioningWatcher.update(rmNode, remoteNodeStatus);
// 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse();
if (getNextResponseId(
remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse
.getResponseId()) {
LOG.info("Received duplicate heartbeat from node "
+ rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId());
return lastNodeHeartbeatResponse;
} else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse
.getResponseId()) {
String message =
"Too far behind rm response id:"
+ lastNodeHeartbeatResponse.getResponseId() + " nm response id:"
+ remoteNodeStatus.getResponseId();
LOG.info(message);
// TODO: Just sending reboot is not enough. Think more.
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING));
return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC,
message);
}
// Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED.
if (rmNode.getState() == NodeState.DECOMMISSIONING &&
decommissioningWatcher.checkReadyToBeDecommissioned(
rmNode.getNodeID())) {
String message = "DECOMMISSIONING " + nodeId +
" is ready to be decommissioned";
LOG.info(message);
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION));
this.nmLivelinessMonitor.unregister(nodeId);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(
NodeAction.SHUTDOWN, message);
}
if (timelineServiceV2Enabled) {
// Check & update collectors info from request.
updateAppCollectorsMap(request);
}
// Heartbeat response
long newInterval = nextHeartBeatInterval;
if (heartBeatIntervalScalingEnable) {
newInterval = rmNode.calculateHeartBeatInterval(
nextHeartBeatInterval, heartBeatIntervalMin,
heartBeatIntervalMax, heartBeatIntervalSpeedupFactor,
heartBeatIntervalSlowdownFactor);
}
NodeHeartbeatResponse nodeHeartBeatResponse =
YarnServerBuilderUtils.newNodeHeartbeatResponse(
getNextResponseId(lastNodeHeartbeatResponse.getResponseId()),
NodeAction.NORMAL, null, null, null, null, newInterval);
rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse);
populateKeys(request, nodeHeartBeatResponse);
populateTokenSequenceNo(request, nodeHeartBeatResponse);
if (timelineServiceV2Enabled) {
// Return collectors' map that NM needs to know
setAppCollectorsMapToResponse(rmNode.getRunningApps(),
nodeHeartBeatResponse);
}
// 4. Send status to RMNode, saving the latest response.
RMNodeStatusEvent nodeStatusEvent =
new RMNodeStatusEvent(nodeId, remoteNodeStatus);
if (request.getLogAggregationReportsForApps() != null
&& !request.getLogAggregationReportsForApps().isEmpty()) {
nodeStatusEvent.setLogAggregationReportsForApps(request
.getLogAggregationReportsForApps());
}
this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent);
// 5. Update node's labels to RM's NodeLabelManager.
if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) {
try {
updateNodeLabelsFromNMReport(
NodeLabelsUtils.convertToStringSet(request.getNodeLabels()),
nodeId);
nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage());
nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false);
}
}
// 6. check if node's capacity is load from dynamic-resources.xml
// if so, send updated resource back to NM.
String nid = nodeId.toString();
Resource capability = loadNodeResourceFromDRConfiguration(nid);
// sync back with new resource if not null.
if (capability != null) {
nodeHeartBeatResponse.setResource(capability);
}
// Check if we got an event (AdminService) that updated the resources
if (rmNode.isUpdatedCapability()) {
nodeHeartBeatResponse.setResource(rmNode.getTotalCapability());
rmNode.resetUpdatedCapability();
}
// 7. Send Container Queuing Limits back to the Node. This will be used by
// the node to truncate the number of Containers queued for execution.
if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) {
nodeHeartBeatResponse.setContainerQueuingLimit(
this.rmContext.getNodeManagerQueueLimitCalculator()
.createContainerQueuingLimit());
}
// 8. Get node's attributes and update node-to-attributes mapping
// in RMNodeAttributeManager.
if (request.getNodeAttributes() != null) {
try {
// update node attributes if necessary then update heartbeat response
updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes());
nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
String errorMsg =
nodeHeartBeatResponse.getDiagnosticsMessage() == null ?
ex.getMessage() :
nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex
.getMessage();
nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg);
nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false);
}
}
return nodeHeartBeatResponse;
} | @Test
public void testGracefulDecommissionWithApp() throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, hostFile
.getAbsolutePath());
writeToHostsFile("");
rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("host1:1234", 10240);
MockNM nm2 = rm.registerNode("host2:5678", 20480);
MockNM nm3 = rm.registerNode("host3:4433", 10240);
NodeId id1 = nm1.getNodeId();
NodeId id3 = nm3.getNodeId();
rm.waitForState(id1, NodeState.RUNNING);
rm.waitForState(id3, NodeState.RUNNING);
// Create an app and launch two containers on host1.
RMApp app = MockRMAppSubmitter.submitWithMemory(2000, rm);
MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1);
ApplicationAttemptId aaid = app.getCurrentAppAttempt().getAppAttemptId();
nm1.nodeHeartbeat(aaid, 2, ContainerState.RUNNING);
nm3.nodeHeartbeat(true);
// Graceful decommission host1 and host3
writeToHostsFile("host1", "host3");
rm.getNodesListManager().refreshNodes(conf, true);
rm.waitForState(id1, NodeState.DECOMMISSIONING);
rm.waitForState(id3, NodeState.DECOMMISSIONING);
// host1 should be DECOMMISSIONING due to running containers.
// host3 should become DECOMMISSIONED.
nm1.nodeHeartbeat(true);
nm3.nodeHeartbeat(true);
rm.waitForState(id1, NodeState.DECOMMISSIONING);
rm.waitForState(id3, NodeState.DECOMMISSIONED);
nm1.nodeHeartbeat(aaid, 2, ContainerState.RUNNING);
// Complete containers on host1.
// Since the app is still RUNNING, expect NodeAction.NORMAL.
NodeHeartbeatResponse nodeHeartbeat1 =
nm1.nodeHeartbeat(aaid, 2, ContainerState.COMPLETE);
Assert.assertEquals(NodeAction.NORMAL, nodeHeartbeat1.getNodeAction());
// Finish the app and verified DECOMMISSIONED.
MockRM.finishAMAndVerifyAppState(app, rm, nm1, am);
rm.waitForState(app.getApplicationId(), RMAppState.FINISHED);
nodeHeartbeat1 = nm1.nodeHeartbeat(aaid, 2, ContainerState.COMPLETE);
Assert.assertEquals(NodeAction.SHUTDOWN, nodeHeartbeat1.getNodeAction());
rm.waitForState(id1, NodeState.DECOMMISSIONED);
} |
@Override
public void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback, InternalRequestSignature requestSignature) {
log.trace("Submitting put task configuration request {}", connName);
if (requestNotSignedProperly(requestSignature, callback)) {
return;
}
addRequest(
() -> {
if (!isLeader())
callback.onCompletion(new NotLeaderException("Only the leader may write task configurations.", leaderUrl()), null);
else if (!configState.contains(connName))
callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null);
else {
writeTaskConfigs(connName, configs);
callback.onCompletion(null, null);
}
return null;
},
forwardErrorAndTickThreadStages(callback)
);
} | @Test
public void putTaskConfigsWorkerStillStarting() {
when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2);
InternalRequestSignature signature = mock(InternalRequestSignature.class);
when(signature.keyAlgorithm()).thenReturn("HmacSHA256");
Callback<Void> taskConfigCb = mock(Callback.class);
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, signature);
ArgumentCaptor<Throwable> errorCapture = ArgumentCaptor.forClass(Throwable.class);
verify(taskConfigCb).onCompletion(errorCapture.capture(), isNull());
assertInstanceOf(ConnectRestException.class, errorCapture.getValue());
assertEquals(SERVICE_UNAVAILABLE.getStatusCode(), ((ConnectRestException) errorCapture.getValue()).statusCode());
verifyNoMoreInteractions(member, taskConfigCb);
} |
public static List<IntPair> intersectSortedRangeSets(List<List<IntPair>> sortedRangeSetList) {
if (sortedRangeSetList == null || sortedRangeSetList.isEmpty()) {
return Collections.emptyList();
}
if (sortedRangeSetList.size() == 1) {
return sortedRangeSetList.get(0);
}
// if any list is empty return empty
for (List<IntPair> rangeSet : sortedRangeSetList) {
if (rangeSet.isEmpty()) {
return Collections.emptyList();
}
}
int[] currentRangeSetIndex = new int[sortedRangeSetList.size()];
Arrays.fill(currentRangeSetIndex, 0);
int maxHead = -1;
int maxHeadIndex = -1;
boolean reachedEnd = false;
List<IntPair> result = new ArrayList<IntPair>();
while (!reachedEnd) {
// find max Head in the current pointers
for (int i = 0; i < sortedRangeSetList.size(); i++) {
int head = sortedRangeSetList.get(i).get(currentRangeSetIndex[i]).getLeft();
if (head > maxHead) {
maxHead = head;
maxHeadIndex = i;
}
}
// move all pointers forward such that range they point to contain maxHead
int j = -1;
while (j++ < sortedRangeSetList.size() - 1) {
if (j == maxHeadIndex) {
continue;
}
boolean found = false;
while (!found && currentRangeSetIndex[j] < sortedRangeSetList.get(j).size()) {
IntPair range = sortedRangeSetList.get(j).get(currentRangeSetIndex[j]);
if (maxHead >= range.getLeft() && maxHead <= range.getRight()) {
found = true;
break;
}
if (range.getLeft() > maxHead) {
maxHead = range.getLeft();
maxHeadIndex = j;
j = -1;
break;
}
currentRangeSetIndex[j] = currentRangeSetIndex[j] + 1;
}
// new maxHead found
if (j == -1) {
continue;
}
if (!found) {
reachedEnd = true;
break;
}
}
if (reachedEnd) {
break;
}
// there is definitely some intersection possible here
IntPair intPair = sortedRangeSetList.get(0).get(currentRangeSetIndex[0]);
IntPair intersection = Pairs.intPair(intPair.getLeft(), intPair.getRight());
for (int i = 1; i < sortedRangeSetList.size(); i++) {
IntPair pair = sortedRangeSetList.get(i).get(currentRangeSetIndex[i]);
int start = Math.max(intersection.getLeft(), pair.getLeft());
int end = Math.min(intersection.getRight(), pair.getRight());
intersection.setLeft(start);
intersection.setRight(end);
}
if (!result.isEmpty()) {
// if new range is contiguous merge it
IntPair prevIntersection = result.get(result.size() - 1);
if (intersection.getLeft() == prevIntersection.getRight() + 1) {
prevIntersection.setRight(intersection.getRight());
} else {
result.add(intersection);
}
} else {
result.add(intersection);
}
// move the pointers forward for rangesets where the currenttail == intersection.tail
for (int i = 0; i < sortedRangeSetList.size(); i++) {
IntPair pair = sortedRangeSetList.get(i).get(currentRangeSetIndex[i]);
if (pair.getRight() == intersection.getRight()) {
currentRangeSetIndex[i] = currentRangeSetIndex[i] + 1;
if (currentRangeSetIndex[i] == sortedRangeSetList.get(i).size()) {
reachedEnd = true;
break;
}
}
}
}
return result;
} | @Test
public void testComplex() {
String rangeSet1 = "[[9148,10636], [18560,21885], [29475,32972], [34313,37642], [38008,47157], [50962,53911], "
+ "[59240,68238], [72458,83087], [92235,97593], [100690,103101], [111708,120102], [123212,124718], "
+ "[127544,134012], [134701,141314], [144966,146889], [147206,156680], [163842,168747], [175241,179737], "
+ "[180669,184662], [192538,200004], [207255,211226], [217529,219152], [221319,228532], [230549,236908], "
+ "[237353,240840], [245097,251894], [253228,257447], [263986,268166], [272168,276416], [282756,285555], "
+ "[286030,289848], [293220,303828], [308259,317810], [320830,330498], [337606,345534], [354205,361367], "
+ "[365751,375129], [379830,382548], [390661,399509], [409031,415694], [421748,428011], [436729,442978], "
+ "[443187,448760], [454285,464404], [469128,471735], [475965,478758], [483038,491060], [496477,499410], "
+ "[502719,507364], [511478,515427], [521615,523897], [524251,529600], [530904,536822], [541666,543826], "
+ "[551652,555367], [561244,565874], [573934,582151], [587804,593424], [596533,599490], [601884,605244], "
+ "[610479,618173], [627032,630079], [633582,643323], [648357,658921], [662083,664340], [666519,677174], "
+ "[681524,687223], [693032,696329], [700808,705461], [709573,713092], [722500,732846], [733115,741189], "
+ "[742183,743217], [748442,754700], [760482,768791], [769875,773877], [774153,775538], [778521,781333], "
+ "[781945,791595], [799389,809167], [811769,814445], [824160,831582], [838445,844533], [850597,858212], "
+ "[862638,867759], [877243,887468], [893193,895091], [902608,908295], [911058,915872], [916127,917590], "
+ "[922702,933633], [938082,946932], [953197,956096], [965980,970314], [976357,983182], [983378,991764]]";
String rangeSet2 = "[[4615,7365], [9048,19300], [25607,33900], [37975,45734], [53195,58803], [64401,72246], "
+ "[76305,84289], [86575,96695], [104465,114232], [121799,124496], [132587,137518], [146406,152055], "
+ "[154808,159304], [165855,168693], [177387,184548], [192275,202089], [204700,215167], [218780,219934], "
+ "[220492,224530], [227195,231541], [233667,241692], [249043,251396], [258494,263095], [271187,272880], "
+ "[279871,287604], [295906,302319], [302575,309352], [318221,320436], [324492,326129], [326623,333708], "
+ "[340839,349361], [356638,361977], [368099,373667], [374773,377525], [378682,380033], [387254,396509], "
+ "[405096,411616], [421132,424029], [426427,435377], [442540,447244], [453501,459620], [462366,471360], "
+ "[473395,484316], [492462,502422], [503755,507454], [507551,510017], [511176,516561], [522063,525977], "
+ "[531770,537861], [540539,542996], [547329,557420], [560641,570186], [570284,578197], [583861,588606], "
+ "[591016,596724], [601714,610872], [614557,622940], [632723,634975], [636710,647340], [647937,657306], "
+ "[666519,671699], [673434,679252], [679505,687724], [695809,697606], [705905,710503], [719044,728326], "
+ "[735262,739796], [748048,753094], [762698,768074], [771762,781103], [786979,789938], [790140,794143], "
+ "[800910,806993], [807930,811850], [818716,827521], [828786,839104], [840596,850617], [851100,858980], "
+ "[863671,874042], [874432,880240], [889917,897380], [897599,904508], [910935,914564], [919538,927762], "
+ "[933690,942122], [951330,959747], [969266,978196], [984965,991648]]";
String rangeSet3 = "[[3987,12147], [16890,26115], [34621,41095], [41476,44775], [50031,51695], [56539,60977], "
+ "[67558,76247], [83873,86817], [94900,102884], [111390,119073], [127792,138059], [145130,148633], "
+ "[155709,157847], [158118,161503], [164296,165881], [166123,169365], [178304,182866], [191004,193315], "
+ "[198648,199930], [204806,209054], [209177,213999], [214843,219858], [221497,223059], [228746,236624], "
+ "[241482,244679], [254433,255982], [257794,260287], [270207,277022], [278621,286999], [296719,303032], "
+ "[310590,318040], [323645,333283], [336166,341711], [347485,352255], [353348,358126], [362660,368061], "
+ "[376141,381147], [390178,393612], [399003,401626], [402609,412126], [419426,423852], [430009,432359], "
+ "[436647,442494], [446986,453904], [457694,461029], [466339,474088], [483026,486699], [495143,499556], "
+ "[506900,510220], [516325,521843], [523249,528070], [528549,532543], [533418,538998], [547895,549483], "
+ "[554460,555634], [562049,569098], [576463,584537], [588353,590300], [595284,600900], [609006,610452], "
+ "[618857,627533], [633186,637955], [642446,650849], [655326,662913], [663654,673388], [673914,678458], "
+ "[685951,690391], [699505,707103], [715822,718387], [725073,735513], [739306,741642], [750077,752683], "
+ "[759644,768461], [775885,778884], [783314,785772], [792568,802284], [806644,813017], [821962,828876], "
+ "[837176,847172], [854336,864364], [874180,881592], [888633,899508], [906913,907927], [908291,918558], "
+ "[927183,929930], [939548,949063], [951579,957887], [958917,960053], [968720,973220], [978731,989441]]";
String expectedOutputRangeSet = "[[9148,10636], [18560,19300], [38008,41095], [41476,44775], [67558,68238], "
+ "[94900,96695], [111708,114232], [132587,134012], [134701,137518], [146406,146889], [147206,148633], "
+ "[155709,156680], [165855,165881], [166123,168693], [178304,179737], [180669,182866], [192538,193315], "
+ "[198648,199930], [207255,209054], [209177,211226], [218780,219152], [221497,223059], [230549,231541], "
+ "[233667,236624], [272168,272880], [282756,285555], [286030,286999], [296719,302319], [302575,303032], "
+ "[324492,326129], [326623,330498], [340839,341711], [356638,358126], [379830,380033], [390661,393612], "
+ "[409031,411616], [421748,423852], [446986,447244], [457694,459620], [469128,471360], [483038,484316], "
+ "[496477,499410], [506900,507364], [523249,523897], [524251,525977], [531770,532543], [533418,536822], "
+ "[554460,555367], [562049,565874], [576463,578197], [588353,588606], [596533,596724], [633582,634975], "
+ "[636710,637955], [642446,643323], [648357,650849], [655326,657306], [666519,671699], [673914,677174], "
+ "[685951,687223], [725073,728326], [735262,735513], [739306,739796], [750077,752683], [762698,768074], "
+ "[778521,778884], [800910,802284], [806644,806993], [807930,809167], [811769,811850], [824160,827521], "
+ "[828786,828876], [838445,839104], [840596,844533], [854336,858212], [863671,864364], [877243,880240], "
+ "[893193,895091], [911058,914564], [927183,927762], [939548,942122], [953197,956096], [969266,970314], "
+ "[984965,989441]]";
List<List<IntPair>> sortedRangeSetList =
Arrays.asList(constructRangeSet(rangeSet1), constructRangeSet(rangeSet2), constructRangeSet(rangeSet3));
List<IntPair> expected = constructRangeSet(expectedOutputRangeSet);
List<IntPair> actual = SortedRangeIntersection.intersectSortedRangeSets(sortedRangeSetList);
Assert.assertEquals(actual, expected);
} |
public static void requireNotEmpty(final String str, final String name) {
requireNotNull(str, name);
if (str.isEmpty()) {
throw new IllegalArgumentException(name + " is an empty string");
}
} | @Test
public void testNotEmptyWithNotEmptyString() {
// This should not throw
ArgumentUtil.requireNotEmpty("not empty string", "foo");
} |
@Override
protected void doStart() throws Exception {
super.doStart();
LOG.debug("Creating connection to Azure ServiceBus");
client = getEndpoint().getServiceBusClientFactory().createServiceBusProcessorClient(getConfiguration(),
this::processMessage, this::processError);
client.start();
} | @Test
void consumerSubmitsExchangeToProcessor() throws Exception {
try (ServiceBusConsumer consumer = new ServiceBusConsumer(endpoint, processor)) {
consumer.doStart();
verify(client).start();
verify(clientFactory).createServiceBusProcessorClient(any(), any(), any());
when(messageContext.getMessage()).thenReturn(message);
configureMockMessage();
processMessageCaptor.getValue().accept(messageContext);
verify(processor).process(any(Exchange.class), any(AsyncCallback.class));
Exchange exchange = exchangeCaptor.getValue();
assertThat(exchange).isNotNull();
Message inMessage = exchange.getIn();
assertThat(inMessage).isNotNull();
assertThat(inMessage.getBody()).isInstanceOf(BinaryData.class);
assertThat(inMessage.getBody(BinaryData.class).toString()).isEqualTo(MESSAGE_BODY);
assertThat(inMessage.getHeaders()).isEqualTo(createExpectedMessageHeaders());
}
} |
@PutMapping("/id/{id}")
public ShenyuAdminResult updateTagRelation(@PathVariable("id") @Valid final String id,
@Valid @RequestBody final TagRelationDTO tagRelationDTO) {
tagRelationDTO.setId(id);
Integer updateCount = tagRelationService.update(tagRelationDTO);
return ShenyuAdminResult.success(ShenyuResultMessage.UPDATE_SUCCESS, updateCount);
} | @Test
public void testUpdateTagRelation() throws Exception {
TagRelationDTO tagRelationDTO = buildTagRelationDTO();
given(tagRelationService.update(any())).willReturn(1);
this.mockMvc.perform(MockMvcRequestBuilders.put("/tag-relation/id/123")
.contentType(MediaType.APPLICATION_JSON)
.param("id", "123")
.content(GsonUtils.getInstance().toJson(tagRelationDTO)))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.UPDATE_SUCCESS)))
.andReturn();
} |
public static DynamicVoters parse(String input) {
input = input.trim();
List<DynamicVoter> voters = new ArrayList<>();
for (String voterString : input.split(",")) {
if (!voterString.isEmpty()) {
voters.add(DynamicVoter.parse(voterString));
}
}
return new DynamicVoters(voters);
} | @Test
public void testParsingInvalidStringWithDuplicateNodeIds() {
assertEquals("Node id 1 was specified more than once.",
assertThrows(IllegalArgumentException.class,
() -> DynamicVoters.parse(
"0@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ," +
"1@localhost:8030:aUARLskQTCW4qCZDtS_cwA," +
"1@localhost:8040:2ggvsS4kQb-fSJ_-zC_Ang")).
getMessage());
} |
SqlResult execute(CreateMappingPlan plan, SqlSecurityContext ssc) {
catalog.createMapping(plan.mapping(), plan.replace(), plan.ifNotExists(), ssc);
return UpdateSqlResultImpl.createUpdateCountResult(0);
} | @Test
@Parameters({
"true, false",
"false, true"
})
public void test_createMappingExecution(boolean replace, boolean ifNotExists) {
// given
Mapping mapping = mapping();
CreateMappingPlan plan = new CreateMappingPlan(planKey(), mapping, replace, ifNotExists, planExecutor);
// when
SqlResult result = planExecutor.execute(plan, null);
// then
assertThat(result.updateCount()).isEqualTo(0);
verify(catalog).createMapping(mapping, replace, ifNotExists, null);
} |
@Override
public int rename(String oldPath, String newPath, int flags) {
return AlluxioFuseUtils.call(LOG, () -> renameInternal(oldPath, newPath, flags),
FuseConstants.FUSE_RENAME, "oldPath=%s,newPath=%s,", oldPath, newPath);
} | @Test
public void renameNewExist() throws Exception {
AlluxioURI oldPath = BASE_EXPECTED_URI.join("/old");
AlluxioURI newPath = BASE_EXPECTED_URI.join("/new");
doThrow(new FileAlreadyExistsException("File /new already exists"))
.when(mFileSystem).rename(oldPath, newPath);
when(mFileSystem.getStatus(any(AlluxioURI.class))).thenReturn(mock(URIStatus.class));
setUpOpenMock(oldPath);
assertEquals(-ErrorCodes.EIO(), mFuseFs.rename("/old", "/new",
AlluxioJniRenameUtils.NO_FLAGS));
} |
public static boolean isDataSourcesNode(final String path) {
return Pattern.compile(getMetaDataNode() + DATABASE_DATA_SOURCES_NODE + "?", Pattern.CASE_INSENSITIVE).matcher(path).find();
} | @Test
void assertIsDataSourcesNode() {
assertTrue(DataSourceMetaDataNode.isDataSourcesNode("/metadata/logic_db/data_sources/foo_ds"));
} |
public static void print(Context context) {
print(context, 0);
} | @Test
public void testWithException() {
Status s0 = new ErrorStatus("test0", this);
Status s1 = new InfoStatus("test1", this, new Exception("testEx"));
Status s11 = new InfoStatus("test11", this);
Status s12 = new InfoStatus("test12", this);
s1.add(s11);
s1.add(s12);
Status s2 = new InfoStatus("test2", this);
Status s21 = new InfoStatus("test21", this);
Status s211 = new WarnStatus("test211", this);
Status s22 = new InfoStatus("test22", this);
s2.add(s21);
s2.add(s22);
s21.add(s211);
Context context = new ContextBase();
context.getStatusManager().add(s0);
context.getStatusManager().add(s1);
context.getStatusManager().add(s2);
StatusPrinter.print(context);
String result = outputStream.toString();
assertTrue(result.contains("|-ERROR in "+this.getClass().getName()));
assertTrue(result.contains("+ INFO in "+this.getClass().getName()));
assertTrue(result.contains("ch.qos.logback.core.util.StatusPrinterTest.testWithException"));
} |
@Override
public void unSubscribeAllTransactionTopic(ProxyContext ctx, String group) {
groupClusterData.remove(group);
} | @Test
public void testUnSubscribeAllTransactionTopic() {
this.clusterTransactionService.addTransactionSubscription(ctx, GROUP, TOPIC);
this.clusterTransactionService.unSubscribeAllTransactionTopic(ctx, GROUP);
assertEquals(0, this.clusterTransactionService.getGroupClusterData().size());
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
final PathContainerService service = new DefaultPathContainerService();
if(service.isContainer(file)) {
for(RootFolder r : session.roots()) {
if(StringUtils.equalsIgnoreCase(file.getName(), PathNormalizer.name(r.getPath()))
|| StringUtils.equalsIgnoreCase(file.getName(), PathNormalizer.name(r.getName()))) {
return this.toAttributes(r);
}
}
throw new NotfoundException(file.getAbsolute());
}
final FilesApi files = new FilesApi(session.getClient());
return this.toAttributes(files.filesGet_0(URIEncoder.encode(fileid.getPrefixedPath(file))));
}
catch(ApiException e) {
throw new StoregateExceptionMappingService(fileid).map("Failure to read attributes of {0}", e, file);
}
} | @Test
public void testFind() throws Exception {
final StoregateIdProvider nodeid = new StoregateIdProvider(session);
final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir(
new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()),
EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
assertTrue(room.attributes().getPermission().isExecutable());
final Path test = new StoregateTouchFeature(session, nodeid).touch(
new Path(room, String.format("%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus());
final PathAttributes attr = new StoregateAttributesFinderFeature(session, nodeid).find(test);
assertEquals(attr, new StoregateAttributesFinderFeature(session, nodeid).find(new Path(test.getParent(), StringUtils.upperCase(test.getName()), test.getType())));
assertEquals(attr, new StoregateAttributesFinderFeature(session, nodeid).find(new Path(test.getParent(), StringUtils.lowerCase(test.getName()), test.getType())));
assertNotEquals(0L, attr.getModificationDate());
assertEquals(Checksum.NONE, attr.getChecksum());
assertNull(attr.getETag());
assertNotNull(attr.getFileId());
assertFalse(attr.getPermission().isExecutable());
assertTrue(attr.getPermission().isReadable());
assertTrue(attr.getPermission().isWritable());
final Path list = new StoregateListService(session, nodeid).list(room, new DisabledListProgressListener())
.find(new DefaultPathPredicate(test));
assertEquals(attr, list.attributes());
new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledPasswordCallback(), new Delete.DisabledCallback());
} |
@Override
public Optional<ShardingConditionValue> generate(final BetweenExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) {
ConditionValue betweenConditionValue = new ConditionValue(predicate.getBetweenExpr(), params);
ConditionValue andConditionValue = new ConditionValue(predicate.getAndExpr(), params);
Optional<Comparable<?>> betweenValue = betweenConditionValue.getValue();
Optional<Comparable<?>> andValue = andConditionValue.getValue();
List<Integer> parameterMarkerIndexes = new ArrayList<>(2);
betweenConditionValue.getParameterMarkerIndex().ifPresent(parameterMarkerIndexes::add);
andConditionValue.getParameterMarkerIndex().ifPresent(parameterMarkerIndexes::add);
if (betweenValue.isPresent() && andValue.isPresent()) {
return Optional.of(new RangeShardingConditionValue<>(column.getName(), column.getTableName(), SafeNumberOperationUtils.safeClosed(betweenValue.get(), andValue.get()),
parameterMarkerIndexes));
}
Timestamp timestamp = timestampServiceRule.getTimestamp();
if (!betweenValue.isPresent() && ExpressionConditionUtils.isNowExpression(predicate.getBetweenExpr())) {
betweenValue = Optional.of(timestamp);
}
if (!andValue.isPresent() && ExpressionConditionUtils.isNowExpression(predicate.getAndExpr())) {
andValue = Optional.of(timestamp);
}
if (!betweenValue.isPresent() || !andValue.isPresent()) {
return Optional.empty();
}
return Optional.of(new RangeShardingConditionValue<>(column.getName(), column.getTableName(), Range.closed(betweenValue.get(), andValue.get()), parameterMarkerIndexes));
} | @SuppressWarnings("unchecked")
@Test
void assertGenerateConditionValueWithParameter() {
ColumnSegment left = new ColumnSegment(0, 0, new IdentifierValue("id"));
ParameterMarkerExpressionSegment between = new ParameterMarkerExpressionSegment(0, 0, 0);
ParameterMarkerExpressionSegment and = new ParameterMarkerExpressionSegment(0, 0, 1);
BetweenExpression predicate = new BetweenExpression(0, 0, left, between, and, false);
Optional<ShardingConditionValue> actual = generator.generate(predicate, column, Arrays.asList(1, 2), timestampServiceRule);
assertTrue(actual.isPresent());
assertThat(actual.get(), instanceOf(RangeShardingConditionValue.class));
RangeShardingConditionValue<Integer> conditionValue = (RangeShardingConditionValue<Integer>) actual.get();
assertThat(conditionValue.getTableName(), is("tbl"));
assertThat(conditionValue.getColumnName(), is("id"));
assertThat(conditionValue.getValueRange(), is(Range.closed(1, 2)));
assertThat(conditionValue.getParameterMarkerIndexes(), is(Arrays.asList(0, 1)));
} |
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain chain) throws IOException, ServletException {
HttpServletRequest request = (HttpServletRequest) servletRequest;
HttpServletResponse response = (HttpServletResponse) servletResponse;
DBSessions dbSessions = platform.getContainer().getComponentByType(DBSessions.class);
ThreadLocalSettings settings = platform.getContainer().getComponentByType(ThreadLocalSettings.class);
UserSessionInitializer userSessionInitializer = platform.getContainer().getOptionalComponentByType(UserSessionInitializer.class).orElse(null);
LOG.trace("{} serves {}", Thread.currentThread(), request.getRequestURI());
dbSessions.enableCaching();
try {
settings.load();
try {
doFilter(request, response, chain, userSessionInitializer);
} finally {
settings.unload();
}
} finally {
dbSessions.disableCaching();
}
} | @Test
public void does_nothing_when_not_initialized() throws Exception {
underTest.doFilter(request, response, chain);
verify(chain).doFilter(request, response);
verifyNoInteractions(userSessionInitializer);
} |
public static VideosContainerResource mediaToVideo(MediaContainerResource mediaContainer) {
return new VideosContainerResource(
mediaContainer
.getAlbums()
.stream()
.map(MediaAlbum::mediaToVideoAlbum)
.collect(Collectors.toList()),
mediaContainer.getVideos());
} | @Test
public void verifyMediaToVideoContainer() {
List<MediaAlbum> mediaAlbums =
ImmutableList.of(new MediaAlbum("id1", "albumb1", "This:a fake album!"));
List<VideoAlbum> videoAlbums =
ImmutableList.of(new VideoAlbum("id1", "albumb1", "This:a fake album!"));
List<VideoModel> videos = ImmutableList.of(
new VideoModel(
"Vid1", "http://fake.com/1.mp4", "A vid", "mediatype", "p1", "id1", false, null),
new VideoModel(
"Vid3", "http://fake.com/2.mp4", "A vid", "mediatype", "p3", "id1", false, null));
MediaContainerResource data = new MediaContainerResource(mediaAlbums, null, videos);
VideosContainerResource expected = new VideosContainerResource(videoAlbums, videos);
VideosContainerResource actual = MediaContainerResource.mediaToVideo(data);
assertEquals(expected, actual);
} |
public static void removeAll() {
InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.getIfSet();
if (threadLocalMap == null) {
return;
}
try {
Object v = threadLocalMap.indexedVariable(VARIABLES_TO_REMOVE_INDEX);
if (v != null && v != InternalThreadLocalMap.UNSET) {
@SuppressWarnings("unchecked")
Set<FastThreadLocal<?>> variablesToRemove = (Set<FastThreadLocal<?>>) v;
FastThreadLocal<?>[] variablesToRemoveArray =
variablesToRemove.toArray(new FastThreadLocal[0]);
for (FastThreadLocal<?> tlv: variablesToRemoveArray) {
tlv.remove(threadLocalMap);
}
}
} finally {
InternalThreadLocalMap.remove();
}
} | @Test
@Timeout(value = 10000, unit = TimeUnit.MILLISECONDS)
public void testRemoveAll() throws Exception {
final AtomicBoolean removed = new AtomicBoolean();
final FastThreadLocal<Boolean> var = new FastThreadLocal<Boolean>() {
@Override
protected void onRemoval(Boolean value) {
removed.set(true);
}
};
// Initialize a thread-local variable.
assertThat(var.get(), is(nullValue()));
assertThat(FastThreadLocal.size(), is(1));
// And then remove it.
FastThreadLocal.removeAll();
assertThat(removed.get(), is(true));
assertThat(FastThreadLocal.size(), is(0));
} |
@Override
public boolean createTopic(
final String topic,
final int numPartitions,
final short replicationFactor,
final Map<String, ?> configs,
final CreateTopicsOptions createOptions
) {
final Optional<Long> retentionMs = KafkaTopicClient.getRetentionMs(configs);
if (isTopicExists(topic)) {
validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs);
return false;
}
final short resolvedReplicationFactor = replicationFactor == TopicProperties.DEFAULT_REPLICAS
? getDefaultClusterReplication()
: replicationFactor;
final NewTopic newTopic = new NewTopic(topic, numPartitions, resolvedReplicationFactor);
newTopic.configs(toStringConfigs(configs));
try {
LOG.info("Creating topic '{}' {}",
topic,
(createOptions.shouldValidateOnly()) ? "(ONLY VALIDATE)" : ""
);
ExecutorUtil.executeWithRetries(
() -> adminClient.get().createTopics(
Collections.singleton(newTopic),
createOptions
).all().get(),
ExecutorUtil.RetryBehaviour.ON_RETRYABLE);
return true;
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
throw new KafkaResponseGetFailedException(
"Failed to guarantee existence of topic " + topic, e);
} catch (final TopicExistsException e) {
// if the topic already exists, it is most likely because another node just created it.
// ensure that it matches the partition count, replication factor, and retention
// before returning success
validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs);
return false;
} catch (final TopicAuthorizationException e) {
throw new KsqlTopicAuthorizationException(
AclOperation.CREATE, Collections.singleton(topic));
} catch (final Exception e) {
throw new KafkaResponseGetFailedException(
"Failed to guarantee existence of topic " + topic, e);
}
} | @Test
public void shouldNotCreateTopicIfItAlreadyExistsWithMatchingDetails() {
// Given:
givenTopicExists("someTopic", 3, 2);
givenTopicConfigs(
"someTopic",
overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "8640000000")
);
// When:
kafkaTopicClient.createTopic("someTopic", 3, (short) 2, configs);
// Then:
verify(adminClient, never()).createTopics(any(), any());
} |
Proxy getProxy() {
if (proxyHost == null) {
return Proxy.NO_PROXY;
} else {
return new Proxy(Proxy.Type.HTTP, new InetSocketAddress(proxyHost, proxyPort));
}
} | @Test
void testGetProxy() {
DatadogHttpClient client =
new DatadogHttpClient("anApiKey", "localhost", 123, DataCenter.US, false);
assertThat(client.getProxy().address()).isInstanceOf(InetSocketAddress.class);
InetSocketAddress proxyAddress = (InetSocketAddress) client.getProxy().address();
assertThat(proxyAddress.getPort()).isEqualTo(123);
assertThat(proxyAddress.getHostString()).isEqualTo("localhost");
} |
public static Predicate parse(String expression)
{
final Stack<Predicate> predicateStack = new Stack<>();
final Stack<Character> operatorStack = new Stack<>();
final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll("");
final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true);
boolean isTokenMode = true;
while (true)
{
final Character operator;
final String token;
if (isTokenMode)
{
if (tokenizer.hasMoreTokens())
{
token = tokenizer.nextToken();
}
else
{
break;
}
if (OPERATORS.contains(token))
{
operator = token.charAt(0);
}
else
{
operator = null;
}
}
else
{
operator = operatorStack.pop();
token = null;
}
isTokenMode = true;
if (operator == null)
{
try
{
predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance());
}
catch (ClassCastException e)
{
throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e);
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
else
{
if (operatorStack.empty() || operator == '(')
{
operatorStack.push(operator);
}
else if (operator == ')')
{
while (operatorStack.peek() != '(')
{
evaluate(predicateStack, operatorStack);
}
operatorStack.pop();
}
else
{
if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek()))
{
evaluate(predicateStack, operatorStack);
isTokenMode = false;
}
operatorStack.push(operator);
}
}
}
while (!operatorStack.empty())
{
evaluate(predicateStack, operatorStack);
}
if (predicateStack.size() > 1)
{
throw new RuntimeException("Invalid logical expression");
}
return predicateStack.pop();
} | @Test(expectedExceptions = RuntimeException.class)
public void testNotPredicate()
{
PredicateExpressionParser.parse("com.linkedin.restli.tools.data.PredicateExpressionParser");
} |
public static <T> ListenableFuture<T> toListenableFuture(Task<T> task) {
// Setup cancellation propagation from ListenableFuture -> Task.
SettableFuture<T> listenableFuture = new SettableFuture<T>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return super.cancel(mayInterruptIfRunning) && task.cancel(new CancellationException());
}
@Override
public boolean setException(Throwable ex) {
if (!task.isDone() && ex instanceof CancellationException) {
task.cancel((CancellationException) ex);
}
return super.setException(ex);
}
};
// Setup forward event propagation Task -> ListenableFuture.
task.addListener(promise -> {
if (!promise.isFailed()) {
listenableFuture.set(promise.get());
}
else {
if (promise.getError() instanceof com.linkedin.parseq.CancellationException) {
listenableFuture.cancel(true);
} else {
listenableFuture.setException(promise.getError());
}
}
});
return listenableFuture;
} | @Test
public void testToListenableFuture() throws Exception {
Task<String> task;
final SettablePromise<String> p = Promises.settable();
task = Task.async("test", () -> p);
ListenableFuture<String> future = ListenableFutureUtil.toListenableFuture(task);
// Test cancel propagation from ListenableFuture to task
future.cancel(true);
runUntilComplete(task);
Assert.assertTrue(task.isDone());
Assert.assertTrue(task.isFailed());
Assert.assertEquals(task.getError().getCause().getClass(), CancellationException.class);
final SettablePromise<String> p1 = Promises.settable();
task = Task.async("test", () -> p1);
future = ListenableFutureUtil.toListenableFuture(task);
// Test successful completion of task.
p1.done("COMPLETED");
runUntilComplete(task);
Assert.assertTrue(future.isDone());
Assert.assertEquals(future.get(), "COMPLETED");
final SettablePromise<String> p2 = Promises.settable();
task = Task.async("test", () -> p2);
future = ListenableFutureUtil.toListenableFuture(task);
p2.fail(new RuntimeException("Test"));
runUntilComplete(task);
Assert.assertTrue(future.isDone());
Assert.assertTrue(future.isDone());
try {
future.get();
Assert.fail("ExecutionException not thrown");
} catch (ExecutionException e) {
Assert.assertEquals(e.getCause().getClass(), RuntimeException.class);
Assert.assertEquals(e.getCause().getMessage(), "Test");
}
final SettablePromise<String> p3 = Promises.settable();
task = Task.async("test", () -> p3);
future = ListenableFutureUtil.toListenableFuture(task);
// Test cancellation of task.
task.cancel(new RuntimeException("Cancelled"));
Assert.assertTrue(future.isDone());
Assert.assertTrue(future.isCancelled());
try {
future.get();
Assert.fail("Cancellation Exception not thrown");
} catch (CancellationException e) {
// Ignored since we expected a cancellation exception!
} catch (Throwable e) {
Assert.fail("Unexpected Exception thrown", e);
}
} |
public static boolean isValidIdentifier(String str) {
return notEmpty(str)
&& !isReserved(str)
&& VALID_JAVA_IDENTIFIER.matcher(str).matches();
} | @Test
public void notValidIdentifiers() {
assertThat(isValidIdentifier("1cls")).isFalse();
assertThat(isValidIdentifier("-cls")).isFalse();
assertThat(isValidIdentifier("A-cls")).isFalse();
} |
@Override
public void addBytesRead(long n) {
if (currentCounter != null) {
currentCounter.addValue(n);
}
} | @Test
public void testAddBytesReadUpdatesCounter() {
DataflowExecutionContext mockedExecutionContext = mock(DataflowExecutionContext.class);
DataflowOperationContext mockedOperationContext = mock(DataflowOperationContext.class);
final int siIndexId = 3;
ExecutionStateTracker mockedExecutionStateTracker = mock(ExecutionStateTracker.class);
when(mockedExecutionContext.getExecutionStateTracker()).thenReturn(mockedExecutionStateTracker);
Thread mockedThreadObject = mock(Thread.class);
when(mockedExecutionStateTracker.getTrackedThread()).thenReturn(mockedThreadObject);
DataflowExecutionState mockedExecutionState = mock(DataflowExecutionState.class);
when(mockedExecutionStateTracker.getCurrentState()).thenReturn(mockedExecutionState);
NameContext mockedNameContext = mock(NameContext.class);
when(mockedExecutionState.getStepName()).thenReturn(mockedNameContext);
when(mockedNameContext.originalName()).thenReturn("DummyName");
NameContext mockedDeclaringNameContext = mock(NameContext.class);
when(mockedOperationContext.nameContext()).thenReturn(mockedDeclaringNameContext);
when(mockedDeclaringNameContext.originalName()).thenReturn("DummyDeclaringName");
CounterFactory mockedCounterFactory = mock(CounterFactory.class);
when(mockedExecutionContext.getCounterFactory()).thenReturn(mockedCounterFactory);
Counter<Long, Long> mockedCounter = mock(Counter.class);
when(mockedCounterFactory.longSum(any())).thenReturn(mockedCounter);
when(mockedExecutionContext.getExecutionStateRegistry())
.thenReturn(mock(DataflowExecutionStateRegistry.class));
DataflowSideInputReadCounter testObject =
new DataflowSideInputReadCounter(mockedExecutionContext, mockedOperationContext, siIndexId);
testObject.addBytesRead(10l);
verify(mockedCounter).addValue(10l);
} |
@VisibleForTesting
public void validateDictTypeExists(String type) {
DictTypeDO dictType = dictTypeService.getDictType(type);
if (dictType == null) {
throw exception(DICT_TYPE_NOT_EXISTS);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(dictType.getStatus())) {
throw exception(DICT_TYPE_NOT_ENABLE);
}
} | @Test
public void testValidateDictTypeExists_notExists() {
assertServiceException(() -> dictDataService.validateDictTypeExists(randomString()), DICT_TYPE_NOT_EXISTS);
} |
@Override
public ByteBuf writeShortLE(int value) {
ensureWritable0(2);
_setShortLE(writerIndex, value);
writerIndex += 2;
return this;
} | @Test
public void testWriteShortLEAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().writeShortLE(1);
}
});
} |
public static List<String> split(String path) {
//
String[] pathelements = path.split("/");
List<String> dirs = new ArrayList<String>(pathelements.length);
for (String pathelement : pathelements) {
if (!pathelement.isEmpty()) {
dirs.add(pathelement);
}
}
return dirs;
} | @Test
public void testSplitting() throws Throwable {
assertEquals(1, split("/a").size());
assertEquals(0, split("/").size());
assertEquals(3, split("/a/b/c").size());
assertEquals(3, split("/a/b/c/").size());
assertEquals(3, split("a/b/c").size());
assertEquals(3, split("/a/b//c").size());
assertEquals(3, split("//a/b/c/").size());
List<String> split = split("//a/b/c/");
assertEquals("a", split.get(0));
assertEquals("b", split.get(1));
assertEquals("c", split.get(2));
} |
public Map<String, Map<String, String>> configAsMap() {
Map<String, Map<String, String>> configMap = new HashMap<>();
for (ConfigurationProperty property : configuration) {
Map<String, String> mapValue = new HashMap<>();
mapValue.put(VALUE_KEY, property.getValue());
if (!property.errors().isEmpty()) {
mapValue.put(ERRORS_KEY, StringUtils.join(property.errors().getAll(), ", "));
}
configMap.put(property.getConfigKeyName(), mapValue);
}
return configMap;
} | @Test
public void testConfigAsMap() throws Exception {
PluginConfiguration pluginConfiguration = new PluginConfiguration("test-plugin-id", "13.4");
GoCipher cipher = new GoCipher();
List<String> keys = List.of("Avengers 1", "Avengers 2", "Avengers 3", "Avengers 4");
List<String> values = List.of("Iron man", "Hulk", "Thor", "Captain America");
Configuration configuration = new Configuration(
new ConfigurationProperty(new ConfigurationKey(keys.get(0)), new ConfigurationValue(values.get(0))),
new ConfigurationProperty(new ConfigurationKey(keys.get(1)), new ConfigurationValue(values.get(1))),
new ConfigurationProperty(new ConfigurationKey(keys.get(2)), new ConfigurationValue(values.get(2))),
new ConfigurationProperty(new ConfigurationKey(keys.get(3)), null, new EncryptedConfigurationValue(cipher.encrypt(values.get(3))), cipher)
);
PluggableTask task = new PluggableTask(pluginConfiguration, configuration);
Map<String, Map<String, String>> configMap = task.configAsMap();
assertThat(configMap.keySet().size(), is(keys.size()));
assertThat(configMap.values().size(), is(values.size()));
assertThat(configMap.keySet().containsAll(keys), is(true));
for (int i = 0; i < keys.size(); i++) {
assertThat(configMap.get(keys.get(i)).get(PluggableTask.VALUE_KEY), is(values.get(i)));
}
} |
@Override
public void decorateRouteContext(final RouteContext routeContext, final QueryContext queryContext, final ShardingSphereDatabase database,
final ReadwriteSplittingRule rule, final ConfigurationProperties props, final ConnectionContext connectionContext) {
Collection<RouteUnit> toBeRemoved = new LinkedList<>();
Collection<RouteUnit> toBeAdded = new LinkedList<>();
for (RouteUnit each : routeContext.getRouteUnits()) {
String logicDataSourceName = each.getDataSourceMapper().getActualName();
rule.findDataSourceGroupRule(logicDataSourceName).ifPresent(optional -> {
toBeRemoved.add(each);
String actualDataSourceName = new ReadwriteSplittingDataSourceRouter(optional, connectionContext).route(queryContext.getSqlStatementContext(), queryContext.getHintValueContext());
toBeAdded.add(new RouteUnit(new RouteMapper(logicDataSourceName, actualDataSourceName), each.getTableMappers()));
});
}
routeContext.getRouteUnits().removeAll(toBeRemoved);
routeContext.getRouteUnits().addAll(toBeAdded);
} | @Test
void assertDecorateRouteContextToPrimaryDataSourceWithLock() {
RouteContext actual = mockRouteContext();
MySQLSelectStatement selectStatement = mock(MySQLSelectStatement.class);
when(sqlStatementContext.getSqlStatement()).thenReturn(selectStatement);
when(selectStatement.getLock()).thenReturn(Optional.of(mock(LockSegment.class)));
QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class));
RuleMetaData ruleMetaData = new RuleMetaData(Collections.singleton(staticRule));
ShardingSphereDatabase database = new ShardingSphereDatabase(DefaultDatabase.LOGIC_NAME,
mock(DatabaseType.class), mock(ResourceMetaData.class, RETURNS_DEEP_STUBS), ruleMetaData, Collections.emptyMap());
sqlRouter.decorateRouteContext(actual, queryContext, database, staticRule, new ConfigurationProperties(new Properties()), new ConnectionContext(Collections::emptySet));
Iterator<String> routedDataSourceNames = actual.getActualDataSourceNames().iterator();
assertThat(routedDataSourceNames.next(), is(NONE_READWRITE_SPLITTING_DATASOURCE_NAME));
assertThat(routedDataSourceNames.next(), is(WRITE_DATASOURCE));
} |
@Override
public void run(EnhancedPluginContext context) throws Throwable {
if (!this.reportProperties.isEnabled()) {
return;
}
EnhancedRequestContext request = context.getRequest();
ServiceInstance serviceInstance = Optional.ofNullable(context.getTargetServiceInstance()).orElse(new DefaultServiceInstance());
ResourceStat resourceStat = PolarisEnhancedPluginUtils.createInstanceResourceStat(
serviceInstance.getServiceId(),
serviceInstance.getHost(),
serviceInstance.getPort(),
request.getUrl(),
null,
context.getDelay(),
context.getThrowable()
);
LOG.debug("Will report CircuitBreaker ResourceStat of {}. Request=[{} {}]. Response=[{}]. Delay=[{}]ms.",
resourceStat.getRetStatus().name(), request.getHttpMethod().name(), request.getUrl().getPath(), context.getThrowable().getMessage(), context.getDelay());
circuitBreakAPI.report(resourceStat);
} | @Test
public void testRun() throws Throwable {
EnhancedPluginContext context = mock(EnhancedPluginContext.class);
// test not report
exceptionCircuitBreakerReporter.run(context);
verify(context, times(0)).getRequest();
doReturn(true).when(reporterProperties).isEnabled();
EnhancedPluginContext pluginContext = new EnhancedPluginContext();
EnhancedRequestContext request = EnhancedRequestContext.builder()
.httpMethod(HttpMethod.GET)
.url(URI.create("http://0.0.0.0/"))
.httpHeaders(new HttpHeaders())
.build();
EnhancedResponseContext response = EnhancedResponseContext.builder()
.httpStatus(200)
.build();
DefaultServiceInstance serviceInstance = new DefaultServiceInstance();
serviceInstance.setServiceId(SERVICE_PROVIDER);
pluginContext.setRequest(request);
pluginContext.setResponse(response);
pluginContext.setTargetServiceInstance(serviceInstance, null);
pluginContext.setThrowable(new RuntimeException());
exceptionCircuitBreakerReporter.run(pluginContext);
exceptionCircuitBreakerReporter.getOrder();
exceptionCircuitBreakerReporter.getName();
exceptionCircuitBreakerReporter.getType();
} |
public synchronized void refreshPartitionByEvent(HivePartitionName hivePartitionName,
HiveCommonStats commonStats,
Partition partition) {
Map<String, HiveColumnStats> columnStats = get(partitionStatsCache, hivePartitionName).getColumnStats();
HivePartitionStats updatedPartitionStats = createPartitionStats(commonStats, columnStats);
DatabaseTableName
databaseTableName = DatabaseTableName.of(hivePartitionName.getDatabaseName(), hivePartitionName.getTableName());
partitionKeysCache.asMap().keySet().stream().filter(hivePartitionValue -> hivePartitionValue.getHiveTableName().
equals(databaseTableName)).forEach(partitionKeysCache::invalidate);
partitionCache.put(hivePartitionName, partition);
partitionStatsCache.put(hivePartitionName, updatedPartitionStats);
} | @Test
public void testRefreshPartitionByEvent() {
CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore(
metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false);
HiveCommonStats stats = new HiveCommonStats(10, 100);
HivePartitionName hivePartitionName = HivePartitionName.of("db1", "unpartitioned_table", "col1=1");
Partition partition = cachingHiveMetastore.getPartition(
"db1", "unpartitioned_table", Lists.newArrayList("col1"));
cachingHiveMetastore.refreshPartitionByEvent(hivePartitionName, stats, partition);
} |
@Override
public SchemaAndValue get(final ProcessingLogConfig config) {
final Struct struct = new Struct(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA)
.put(ProcessingLogMessageSchema.TYPE, MessageType.DESERIALIZATION_ERROR.getTypeId())
.put(ProcessingLogMessageSchema.DESERIALIZATION_ERROR, deserializationError(config));
return new SchemaAndValue(ProcessingLogMessageSchema.PROCESSING_LOG_SCHEMA, struct);
} | @Test
public void shouldBuildErrorWithKeyComponent() {
// Given:
final DeserializationError deserError = new DeserializationError(
error,
Optional.of(record),
"topic",
true
);
// When:
final SchemaAndValue msg = deserError.get(config);
// Then:
final Struct struct = (Struct) msg.value();
final Struct deserializationError = struct.getStruct(DESERIALIZATION_ERROR);
assertThat(
deserializationError.get(DESERIALIZATION_ERROR_FIELD_TARGET),
equalTo("key")
);
} |
public static String checkRequiredProperty(Properties properties, String key) {
if (properties == null) {
throw new IllegalArgumentException("Properties are required");
}
String value = properties.getProperty(key);
return checkHasText(value, "Property '" + key + "' is required");
} | @Test
public void test_checkRequiredProperty_when_null() {
Assertions.assertThatThrownBy(() -> checkRequiredProperty(null, "some-key"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Properties are required");
} |
@Override public Result unwrap() {
return result;
} | @Test void unwrap() {
assertThat(response.unwrap()).isSameAs(result);
} |
@JsonIgnore
public LongParamDefinition getCompletedByTsParam() {
if (completedByTs != null) {
return ParamDefinition.buildParamDefinition(PARAM_NAME, completedByTs);
}
if (completedByHour != null) {
String timeZone = tz == null ? "WORKFLOW_CRON_TIMEZONE" : String.format("'%s'", tz);
return LongParamDefinition.builder()
.name(PARAM_NAME)
.expression(String.format(COMPLETED_HOUR_TCT_TS, timeZone, completedByHour))
.build();
}
if (durationMinutes != null) {
return LongParamDefinition.builder()
.name(PARAM_NAME)
.expression(String.format(DURATION_MINUTES_TCT_TS, durationMinutes))
.build();
}
throw new MaestroInternalError(
"Invalid TCT definition, neither of time fields is set: %s", this);
} | @Test
public void testGetCompletedByTsParamWithCompletedByHour() {
Tct tct = new Tct();
tct.setCompletedByHour(1);
tct.setTz("UTC");
LongParamDefinition expected =
LongParamDefinition.builder()
.name("completed_by_ts")
.expression(
"tz_dateint_formatter = DateTimeFormat.forPattern('yyyyMMdd').withZone(DateTimeZone.forID('UTC'));"
+ "dt = tz_dateint_formatter.parseDateTime(TARGET_RUN_DATE).plusHours(1).minusSeconds(1);"
+ "return dt.getMillis();")
.build();
LongParamDefinition actual = tct.getCompletedByTsParam();
assertEquals(expected, actual);
} |
public static <K, V> Write<K, V> write() {
return new AutoValue_CdapIO_Write.Builder<K, V>().build();
} | @Test
public void testWriteWithCdapBatchSinkPlugin() throws IOException {
List<KV<String, String>> data = new ArrayList<>();
for (int i = 0; i < EmployeeInputFormat.NUM_OF_TEST_EMPLOYEE_RECORDS; i++) {
data.add(KV.of(String.valueOf(i), EmployeeInputFormat.EMPLOYEE_NAME_PREFIX + i));
}
PCollection<KV<String, String>> input = p.apply(Create.of(data));
EmployeeConfig pluginConfig =
new ConfigWrapper<>(EmployeeConfig.class).withParams(TEST_EMPLOYEE_PARAMS_MAP).build();
input.apply(
"Write",
CdapIO.<String, String>write()
.withCdapPlugin(
Plugin.createBatch(
EmployeeBatchSink.class,
EmployeeOutputFormat.class,
EmployeeOutputFormatProvider.class))
.withPluginConfig(pluginConfig)
.withKeyClass(String.class)
.withValueClass(String.class)
.withLocksDirPath(tmpFolder.getRoot().getAbsolutePath()));
p.run();
List<KV<String, String>> writtenOutput = EmployeeOutputFormat.getWrittenOutput();
assertEquals(data.size(), writtenOutput.size());
assertTrue(data.containsAll(writtenOutput));
assertTrue(writtenOutput.containsAll(data));
Mockito.verify(EmployeeOutputFormat.getOutputCommitter()).commitJob(Mockito.any());
} |
public Map<Endpoint, CompletableFuture<Void>> futures() {
return futures;
} | @Test
public void testImmediateCompletion() {
EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder().
build(Optional.empty(), INFO);
assertEquals(new HashSet<>(Arrays.asList(EXTERNAL, INTERNAL)),
readyFutures.futures().keySet());
assertComplete(readyFutures, EXTERNAL, INTERNAL);
} |
private void setSpecifiedSimpleTypeProperty(Types type, String qualifiedName, Object propertyValue)
{
if (propertyValue == null)
{
// Search in properties to erase
for (AbstractField child : getContainer().getAllProperties())
{
if (child.getPropertyName().equals(qualifiedName))
{
getContainer().removeProperty(child);
return;
}
}
}
else
{
AbstractSimpleProperty specifiedTypeProperty;
try
{
TypeMapping tm = getMetadata().getTypeMapping();
specifiedTypeProperty = tm.instanciateSimpleProperty(null, getPrefix(), qualifiedName, propertyValue,
type);
}
catch (Exception e)
{
throw new IllegalArgumentException(
"Failed to create property with the specified type given in parameters", e);
}
// attribute placement for simple property has been removed
// Search in properties to erase
for (AbstractField child : getAllProperties())
{
if (child.getPropertyName().equals(qualifiedName))
{
removeProperty(child);
addProperty(specifiedTypeProperty);
return;
}
}
addProperty(specifiedTypeProperty);
}
} | @Test
void testSetSpecifiedSimpleTypeProperty() throws Exception
{
String prop = "testprop";
String val = "value";
String val2 = "value2";
schem.setTextPropertyValueAsSimple(prop, val);
assertEquals(val, schem.getUnqualifiedTextPropertyValue(prop));
schem.setTextPropertyValueAsSimple(prop, val2);
assertEquals(val2, schem.getUnqualifiedTextPropertyValue(prop));
schem.setTextPropertyValueAsSimple(prop, null);
assertNull(schem.getUnqualifiedTextProperty(prop));
} |
public void writeMBeans(List<MBeanNode> mbeans) throws IOException {
try {
document.open();
addParagraph(getString("MBeans"), "mbeans.png");
new PdfMBeansReport(mbeans, document).toPdf();
} catch (final DocumentException e) {
throw createIOException(e);
}
document.close();
} | @Test
public void testWriteMBeans() throws IOException, JMException {
final ByteArrayOutputStream output = new ByteArrayOutputStream();
final PdfOtherReport pdfOtherReport = new PdfOtherReport(TEST_APP, output);
final List<MBeanNode> allMBeanNodes = MBeans.getAllMBeanNodes();
pdfOtherReport.writeMBeans(allMBeanNodes);
assertNotEmptyAndClear(output);
final PdfOtherReport pdfOtherReport2 = new PdfOtherReport(TEST_APP, output);
pdfOtherReport2.writeMBeans(Collections.singletonMap("TEST_APP", allMBeanNodes));
assertNotEmptyAndClear(output);
} |
public Set<Cookie> decode(String header) {
Set<Cookie> cookies = new TreeSet<Cookie>();
decode(cookies, header);
return cookies;
} | @Test
public void testDecodingOldRFC2965Cookies() {
String source = "$Version=\"1\"; " +
"Part_Number1=\"Riding_Rocket_0023\"; $Path=\"/acme/ammo\"; " +
"Part_Number2=\"Rocket_Launcher_0001\"; $Path=\"/acme\"";
Set<Cookie> cookies = ServerCookieDecoder.STRICT.decode(source);
Iterator<Cookie> it = cookies.iterator();
Cookie c;
c = it.next();
assertEquals("Part_Number1", c.name());
assertEquals("Riding_Rocket_0023", c.value());
c = it.next();
assertEquals("Part_Number2", c.name());
assertEquals("Rocket_Launcher_0001", c.value());
assertFalse(it.hasNext());
} |
static Clustering clusteringFromJsonFields(String jsonStringClustering) {
JsonElement jsonClustering = JsonParser.parseString(jsonStringClustering);
checkArgument(
jsonClustering.isJsonArray(),
"Received an invalid Clustering json string: %s."
+ "Please provide a serialized json array like so: [\"column1\", \"column2\"]",
jsonStringClustering);
List<String> fields =
jsonClustering.getAsJsonArray().asList().stream()
.map(JsonElement::getAsString)
.collect(Collectors.toList());
return new Clustering().setFields(fields);
} | @Test
public void testClusteringJsonConversion() {
Clustering clustering =
new Clustering().setFields(Arrays.asList("column1", "column2", "column3"));
String jsonClusteringFields = "[\"column1\", \"column2\", \"column3\"]";
assertEquals(clustering, BigQueryHelpers.clusteringFromJsonFields(jsonClusteringFields));
} |
public ProtocolBuilder threads(Integer threads) {
this.threads = threads;
return getThis();
} | @Test
void threads() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.threads(20);
Assertions.assertEquals(20, builder.build().getThreads());
} |
@Override
public void deleteTenantPackage(Long id) {
// 校验存在
validateTenantPackageExists(id);
// 校验正在使用
validateTenantUsed(id);
// 删除
tenantPackageMapper.deleteById(id);
} | @Test
public void testDeleteTenantPackage_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> tenantPackageService.deleteTenantPackage(id), TENANT_PACKAGE_NOT_EXISTS);
} |
public static String join(final String... levels) {
return join(Arrays.asList(levels));
} | @Test
public void shouldJoinCorrectly() {
assertThat(ProcessingLoggerUtil.join("foo", "bar"), equalTo("foo.bar"));
} |
public KsqlTarget target(final URI server) {
return target(server, Collections.emptyMap());
} | @Test
public void shouldHandleForbiddenOnGetRequests() {
// Given:
server.setErrorCode(403);
// When:
KsqlTarget target = ksqlClient.target(serverUri);
RestResponse<ServerInfo> response = target.getServerInfo();
// Then:
assertThat(server.getHttpMethod(), is(HttpMethod.GET));
assertThat(response.isErroneous(), is(true));
assertThat(response.getErrorMessage().getMessage(),
is("You are forbidden from using this cluster."));
} |
@Override
public List<RedisClientInfo> getClientList(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST);
List<String> list = syncFuture(f);
return CONVERTER.convert(list.toArray(new String[list.size()]));
} | @Test
public void testGetClientList() {
RedisClusterNode master = getFirstMaster();
List<RedisClientInfo> list = connection.getClientList(master);
assertThat(list.size()).isGreaterThan(10);
} |
@Override public int statusCode() {
int result = ServletRuntime.get().status(response);
if (caught != null && result == 200) { // We may have a potentially bad status due to defaults
// Servlet only seems to define one exception that has a built-in code. Logic in Jetty
// defaults the status to 500 otherwise.
if (caught instanceof UnavailableException) {
return ((UnavailableException) caught).isPermanent() ? 404 : 503;
}
return 500;
}
return result;
} | @Test void statusCode() {
when(response.getStatus()).thenReturn(200);
HttpServerResponse wrapper = HttpServletResponseWrapper.create(request, response, null);
assertThat(wrapper.statusCode()).isEqualTo(200);
} |
public static DualInputSemanticProperties createProjectionPropertiesDual(
int[] fields,
boolean[] isFromFirst,
TypeInformation<?> inType1,
TypeInformation<?> inType2) {
DualInputSemanticProperties dsp = new DualInputSemanticProperties();
int[] sourceOffsets1;
if (inType1 instanceof TupleTypeInfo<?>) {
sourceOffsets1 = new int[inType1.getArity()];
sourceOffsets1[0] = 0;
for (int i = 1; i < inType1.getArity(); i++) {
sourceOffsets1[i] =
((TupleTypeInfo<?>) inType1).getTypeAt(i - 1).getTotalFields()
+ sourceOffsets1[i - 1];
}
} else {
sourceOffsets1 = new int[] {0};
}
int[] sourceOffsets2;
if (inType2 instanceof TupleTypeInfo<?>) {
sourceOffsets2 = new int[inType2.getArity()];
sourceOffsets2[0] = 0;
for (int i = 1; i < inType2.getArity(); i++) {
sourceOffsets2[i] =
((TupleTypeInfo<?>) inType2).getTypeAt(i - 1).getTotalFields()
+ sourceOffsets2[i - 1];
}
} else {
sourceOffsets2 = new int[] {0};
}
int targetOffset = 0;
for (int i = 0; i < fields.length; i++) {
int sourceOffset;
int numFieldsToCopy;
int input;
if (isFromFirst[i]) {
input = 0;
if (fields[i] == -1) {
sourceOffset = 0;
numFieldsToCopy = inType1.getTotalFields();
} else {
sourceOffset = sourceOffsets1[fields[i]];
numFieldsToCopy =
((TupleTypeInfo<?>) inType1).getTypeAt(fields[i]).getTotalFields();
}
} else {
input = 1;
if (fields[i] == -1) {
sourceOffset = 0;
numFieldsToCopy = inType2.getTotalFields();
} else {
sourceOffset = sourceOffsets2[fields[i]];
numFieldsToCopy =
((TupleTypeInfo<?>) inType2).getTypeAt(fields[i]).getTotalFields();
}
}
for (int j = 0; j < numFieldsToCopy; j++) {
dsp.addForwardedField(input, sourceOffset + j, targetOffset + j);
}
targetOffset += numFieldsToCopy;
}
return dsp;
} | @Test
void testDualProjectionProperties() {
int[] pMap = new int[] {4, 2, 0, 1, 3, 4};
boolean[] iMap = new boolean[] {true, true, false, true, false, false};
DualInputSemanticProperties sp =
SemanticPropUtil.createProjectionPropertiesDual(
pMap, iMap, fiveIntTupleType, fiveIntTupleType);
assertThat(sp.getForwardingTargetFields(0, 4)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(1);
assertThat(sp.getForwardingTargetFields(1, 0)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(3);
assertThat(sp.getForwardingTargetFields(1, 3)).contains(4);
assertThat(sp.getForwardingTargetFields(1, 4)).contains(5);
pMap = new int[] {4, 2, 0, 4, 0, 1};
iMap = new boolean[] {true, true, false, true, false, false};
sp =
SemanticPropUtil.createProjectionPropertiesDual(
pMap, iMap, fiveIntTupleType, fiveIntTupleType);
assertThat(sp.getForwardingTargetFields(0, 4)).containsExactly(0, 3);
assertThat(sp.getForwardingTargetFields(1, 0)).containsExactly(4, 2);
assertThat(sp.getForwardingTargetFields(0, 2)).containsExactly(1);
assertThat(sp.getForwardingTargetFields(1, 1)).containsExactly(5);
pMap = new int[] {2, 1, 0, 1};
iMap = new boolean[] {false, false, true, true};
sp =
SemanticPropUtil.createProjectionPropertiesDual(
pMap, iMap, nestedTupleType, threeIntTupleType);
assertThat(sp.getForwardingTargetFields(1, 2)).contains(0);
assertThat(sp.getForwardingTargetFields(1, 1)).contains(1);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(3);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(4);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(5);
pMap = new int[] {1, 0, 0};
iMap = new boolean[] {false, false, true};
sp =
SemanticPropUtil.createProjectionPropertiesDual(
pMap, iMap, nestedTupleType, deepNestedTupleType);
assertThat(sp.getForwardingTargetFields(1, 1)).contains(0);
assertThat(sp.getForwardingTargetFields(1, 2)).contains(1);
assertThat(sp.getForwardingTargetFields(1, 3)).contains(2);
assertThat(sp.getForwardingTargetFields(1, 4)).contains(3);
assertThat(sp.getForwardingTargetFields(1, 5)).contains(4);
assertThat(sp.getForwardingTargetFields(1, 0)).contains(5);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(6);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(7);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(8);
pMap = new int[] {4, 2, 1, 0};
iMap = new boolean[] {true, false, true, false};
sp =
SemanticPropUtil.createProjectionPropertiesDual(
pMap, iMap, fiveIntTupleType, pojoInTupleType);
assertThat(sp.getForwardingTargetFields(0, 4)).contains(0);
assertThat(sp.getForwardingTargetFields(1, 2)).contains(1);
assertThat(sp.getForwardingTargetFields(1, 3)).contains(2);
assertThat(sp.getForwardingTargetFields(1, 4)).contains(3);
assertThat(sp.getForwardingTargetFields(1, 5)).contains(4);
assertThat(sp.getForwardingTargetFields(0, 1)).contains(5);
assertThat(sp.getForwardingTargetFields(1, 0)).contains(6);
pMap = new int[] {2, 3, -1, 0};
iMap = new boolean[] {true, true, false, true};
sp = SemanticPropUtil.createProjectionPropertiesDual(pMap, iMap, fiveIntTupleType, intType);
assertThat(sp.getForwardingTargetFields(0, 2)).contains(0);
assertThat(sp.getForwardingTargetFields(0, 3)).contains(1);
assertThat(sp.getForwardingTargetFields(1, 0)).contains(2);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(3);
pMap = new int[] {-1, -1};
iMap = new boolean[] {false, true};
sp = SemanticPropUtil.createProjectionPropertiesDual(pMap, iMap, intType, nestedPojoType);
assertThat(sp.getForwardingTargetFields(1, 0)).contains(0);
assertThat(sp.getForwardingTargetFields(1, 1)).contains(1);
assertThat(sp.getForwardingTargetFields(1, 2)).contains(2);
assertThat(sp.getForwardingTargetFields(1, 3)).contains(3);
assertThat(sp.getForwardingTargetFields(1, 4)).contains(4);
assertThat(sp.getForwardingTargetFields(1, 5)).contains(5);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(6);
pMap = new int[] {-1, -1};
iMap = new boolean[] {true, false};
sp = SemanticPropUtil.createProjectionPropertiesDual(pMap, iMap, intType, nestedPojoType);
assertThat(sp.getForwardingTargetFields(0, 0)).contains(0);
assertThat(sp.getForwardingTargetFields(1, 0)).contains(1);
assertThat(sp.getForwardingTargetFields(1, 1)).contains(2);
assertThat(sp.getForwardingTargetFields(1, 2)).contains(3);
assertThat(sp.getForwardingTargetFields(1, 3)).contains(4);
assertThat(sp.getForwardingTargetFields(1, 4)).contains(5);
assertThat(sp.getForwardingTargetFields(1, 5)).contains(6);
} |
public static Config resolve(Config config) {
var resolveSystemProperty = System.getenv("KORA_SYSTEM_PROPERTIES_RESOLVE_ENABLED");
if (resolveSystemProperty == null) {
resolveSystemProperty = System.getProperty("kora.system.properties.resolve.enabled", "true");
}
var ctx = new ResolveContext(config, new ArrayDeque<>(), Boolean.parseBoolean(resolveSystemProperty));
var newRoot = resolve(ctx, config.root());
if (newRoot == config.root()) {
return config;
}
return new SimpleConfig(config.origin(), newRoot);
} | @Test
void testMultipleValues() {
var config = fromMap(Map.of(
"value", "value",
"reference", "value: ${value}, nullableValue1: ${?value}, nullableValue2: ${?value2}, valueWithDefault: ${value:default}, valueWithDefault: ${value2:default} leftover"
)).resolve();
assertThat(config.get("reference"))
.isInstanceOf(ConfigValue.StringValue.class)
.extracting("value", InstanceOfAssertFactories.STRING)
.isEqualTo("value: value, nullableValue1: value, nullableValue2: , valueWithDefault: value, valueWithDefault: default leftover");
} |
@Override
public boolean addReservation(ReservationAllocation reservation,
boolean isRecovering) throws PlanningException {
// Verify the allocation is memory based otherwise it is not supported
InMemoryReservationAllocation inMemReservation =
(InMemoryReservationAllocation) reservation;
if (inMemReservation.getUser() == null) {
String errMsg = "The specified Reservation with ID "
+ inMemReservation.getReservationId() + " is not mapped to any user";
LOG.error(errMsg);
throw new IllegalArgumentException(errMsg);
}
writeLock.lock();
try {
if (reservationTable.containsKey(inMemReservation.getReservationId())) {
String errMsg = "The specified Reservation with ID "
+ inMemReservation.getReservationId() + " already exists";
LOG.error(errMsg);
throw new IllegalArgumentException(errMsg);
}
// Validate if we can accept this reservation, throws exception if
// validation fails
if (!isRecovering) {
policy.validate(this, inMemReservation);
// we record here the time in which the allocation has been accepted
reservation.setAcceptanceTimestamp(clock.getTime());
if (rmStateStore != null) {
rmStateStore.storeNewReservation(
ReservationSystemUtil.buildStateProto(inMemReservation),
getQueueName(), inMemReservation.getReservationId().toString());
}
}
ReservationInterval searchInterval = new ReservationInterval(
inMemReservation.getStartTime(), inMemReservation.getEndTime());
Set<InMemoryReservationAllocation> reservations =
currentReservations.get(searchInterval);
if (reservations == null) {
reservations = new HashSet<InMemoryReservationAllocation>();
}
if (!reservations.add(inMemReservation)) {
LOG.error("Unable to add reservation: {} to plan.",
inMemReservation.getReservationId());
return false;
}
currentReservations.put(searchInterval, reservations);
reservationTable.put(inMemReservation.getReservationId(),
inMemReservation);
incrementAllocation(inMemReservation);
LOG.info("Successfully added reservation: {} to plan.",
inMemReservation.getReservationId());
return true;
} finally {
writeLock.unlock();
}
} | @Test
public void testAddReservation() {
Plan plan = new InMemoryPlan(queueMetrics, policy, agent, totalCapacity, 1L,
resCalc, minAlloc, maxAlloc, planName, replanner, true, context);
ReservationId reservationID =
ReservationSystemTestUtil.getNewReservationId();
int[] alloc = { 10, 10, 10, 10, 10, 10 };
int start = 100;
ReservationAllocation rAllocation =
createReservationAllocation(reservationID, start, alloc);
Assert.assertNull(plan.getReservationById(reservationID));
try {
plan.addReservation(rAllocation, false);
} catch (PlanningException e) {
Assert.fail(e.getMessage());
}
doAssertions(plan, rAllocation);
checkAllocation(plan, alloc, start, 0);
} |
@Override
public GoViewProjectDO getProject(Long id) {
return goViewProjectMapper.selectById(id);
} | @Test
public void testGetProject() {
// mock 数据
GoViewProjectDO dbGoViewProject = randomPojo(GoViewProjectDO.class);
goViewProjectMapper.insert(dbGoViewProject);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbGoViewProject.getId();
// 调用
GoViewProjectDO goViewProject = goViewProjectService.getProject(id);
// 断言
assertPojoEquals(dbGoViewProject, goViewProject);
} |
public static Map<String, String> parseToMap(String attributesModification) {
if (Strings.isNullOrEmpty(attributesModification)) {
return new HashMap<>();
}
// format: +key1=value1,+key2=value2,-key3,+key4=value4
Map<String, String> attributes = new HashMap<>();
String[] kvs = attributesModification.split(ATTR_ARRAY_SEPARATOR_COMMA);
for (String kv : kvs) {
String key;
String value;
if (kv.contains(ATTR_KEY_VALUE_EQUAL_SIGN)) {
String[] splits = kv.split(ATTR_KEY_VALUE_EQUAL_SIGN);
key = splits[0];
value = splits[1];
if (!key.contains(ATTR_ADD_PLUS_SIGN)) {
throw new RuntimeException("add/alter attribute format is wrong: " + key);
}
} else {
key = kv;
value = "";
if (!key.contains(ATTR_DELETE_MINUS_SIGN)) {
throw new RuntimeException("delete attribute format is wrong: " + key);
}
}
String old = attributes.put(key, value);
if (old != null) {
throw new RuntimeException("key duplication: " + key);
}
}
return attributes;
} | @Test
public void testParseBetweenStringAndMapWithoutDistortion() {
List<String> testCases = Arrays.asList("-a", "+a=b,+c=d,+z=z,+e=e", "+a=b,-d", "+a=b", "-a,-b");
for (String testCase : testCases) {
assertTrue(Maps.difference(AttributeParser.parseToMap(testCase), AttributeParser.parseToMap(parse(testCase))).areEqual());
}
} |
public void collectLog(LogEntry logEntry) {
if (logEntry.getLevel() == null || minLogLevel == null) {
LOGGER.warn("Log level or threshold level is null. Skipping.");
return;
}
if (logEntry.getLevel().compareTo(minLogLevel) < 0) {
LOGGER.debug("Log level below threshold. Skipping.");
return;
}
buffer.offer(logEntry);
if (logCount.incrementAndGet() >= BUFFER_THRESHOLD) {
flushBuffer();
}
} | @Test
void whenThreeInfoLogsAreCollected_thenCentralLogStoreShouldStoreAllOfThem() {
logAggregator.collectLog(createLogEntry(LogLevel.INFO, "Sample log message 1"));
logAggregator.collectLog(createLogEntry(LogLevel.INFO, "Sample log message 2"));
verifyNoInteractionsWithCentralLogStore();
logAggregator.collectLog(createLogEntry(LogLevel.INFO, "Sample log message 3"));
verifyCentralLogStoreInvokedTimes(3);
} |
public static String dotToCamel(String param) {
return formatCamel(param, DOT);
} | @Test
void dotToCamel() {
assertThat(StringFormatUtils.dotToCamel(null)).isEqualTo("");
assertThat(StringFormatUtils.dotToCamel(" ")).isEqualTo("");
assertThat(StringFormatUtils.dotToCamel("abc.def.gh")).isEqualTo("abcDefGh");
} |
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
} | @Test
public void testInvalidSpdySettingsFrameNumSettings() throws Exception {
short type = 4;
byte flags = 0;
int numSettings = 2;
int length = 8 * numSettings + 4;
byte idFlags = 0;
int id = RANDOM.nextInt() & 0x00FFFFFF;
int value = RANDOM.nextInt();
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length);
buf.writeInt(0); // invalid num_settings
for (int i = 0; i < numSettings; i++) {
buf.writeByte(idFlags);
buf.writeMedium(id);
buf.writeInt(value);
}
decoder.decode(buf);
verify(delegate).readFrameError(anyString());
assertFalse(buf.isReadable());
buf.release();
} |
public static Expression invokeGenerated(
CodegenContext ctx,
SerializableSupplier<Expression> groupExpressionsGenerator,
String methodPrefix) {
List<Expression> cutPoint =
ExpressionUtils.extractCapturedExpressions(groupExpressionsGenerator);
return invokeGenerated(
ctx, new HashSet<>(cutPoint), groupExpressionsGenerator.get(), methodPrefix, false);
} | @Test
public void testInvokeGenerated() throws Exception {
CodegenContext ctx = new CodegenContext();
String clsName = "TestInvokeGenerated";
ctx.setClassName(clsName);
ctx.setPackage("test");
Expression expression =
ExpressionOptimizer.invokeGenerated(
ctx, () -> new Return(new Add(Literal.ofInt(1), Literal.ofInt(2))), "test");
Code.ExprCode methodCode = new Return(expression).genCode(ctx);
ctx.addMethod("add", methodCode.code(), int.class);
String classCode = ctx.genCode();
ClassLoader classLoader =
new CodeGenerator(getClass().getClassLoader())
.compile(new CompileUnit("test", clsName, classCode));
Class<?> generatedClass = classLoader.loadClass("test." + clsName);
{
Method test = generatedClass.getMethod("add");
test.setAccessible(true);
assertEquals(test.invoke(generatedClass.newInstance()), 3);
}
{
// janino generated an static method which take instance of class instead.
assertTrue(
Arrays.stream(generatedClass.getDeclaredMethods())
.anyMatch(m -> m.getName().startsWith("test")));
}
} |
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
} | @Test
void testForwardedInvalidExpression() {
String[] forwardedFields = {"f0"};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
assertThatThrownBy(
() -> {
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, forwardedFields, null, null, intType, threeIntTupleType);
})
.isInstanceOf(InvalidSemanticAnnotationException.class);
} |
protected Pair<LIMEExplanation, List<Example<Regressor>>> explainWithSamples(Map<String, String> input) {
Optional<Example<Label>> optExample = generator.generateExample(input,false);
if (optExample.isPresent()) {
Example<Label> example = optExample.get();
if ((textDomain.size() == 0) && (binarisedCDFs.size() == 0)) {
// Short circuit if there are no text or binarised fields.
return explainWithSamples(example);
} else {
Prediction<Label> prediction = innerModel.predict(example);
// Build the input example with simplified text features
ArrayExample<Regressor> labelledExample = new ArrayExample<>(transformOutput(prediction));
// Add the tabular features
for (Feature f : example) {
if (tabularDomain.getID(f.getName()) != -1) {
labelledExample.add(f);
}
}
// Extract the tabular features into a SparseVector for later
SparseVector tabularVector = SparseVector.createSparseVector(labelledExample,tabularDomain,false);
// Tokenize the text fields, and generate the perturbed text representation
Map<String, String> exampleTextValues = new HashMap<>();
Map<String, List<Token>> exampleTextTokens = new HashMap<>();
for (Map.Entry<String,FieldProcessor> e : textFields.entrySet()) {
String value = input.get(e.getKey());
if (value != null) {
List<Token> tokens = tokenizerThreadLocal.get().tokenize(value);
for (int i = 0; i < tokens.size(); i++) {
labelledExample.add(nameFeature(e.getKey(),tokens.get(i).text,i),1.0);
}
exampleTextValues.put(e.getKey(),value);
exampleTextTokens.put(e.getKey(),tokens);
}
}
// Sample a dataset.
List<Example<Regressor>> sample = sampleData(tabularVector,exampleTextValues,exampleTextTokens);
// Generate a sparse model on the sampled data.
SparseModel<Regressor> model = trainExplainer(labelledExample, sample);
// Test the sparse model against the predictions of the real model.
List<Prediction<Regressor>> predictions = new ArrayList<>(model.predict(sample));
predictions.add(model.predict(labelledExample));
RegressionEvaluation evaluation = evaluator.evaluate(model,predictions,new SimpleDataSourceProvenance("LIMEColumnar sampled data",regressionFactory));
return new Pair<>(new LIMEExplanation(model, prediction, evaluation),sample);
}
} else {
throw new IllegalArgumentException("Label not found in input " + input.toString());
}
} | @Test
public void testBinarisedCategorical() throws URISyntaxException {
Pair<RowProcessor<Label>,Dataset<Label>> pair = generateBinarisedDataset();
RowProcessor<Label> rp = pair.getA();
Dataset<Label> dataset = pair.getB();
XGBoostClassificationTrainer trainer = new XGBoostClassificationTrainer(50);
Model<Label> model = trainer.train(dataset);
SparseTrainer<Regressor> sparseTrainer = new CARTJointRegressionTrainer(4,true);
LIMEColumnar lime = new LIMEColumnar(new SplittableRandom(1),model,sparseTrainer,1000,rp,tokenizer);
Map<String,String> testExample = new HashMap<>();
testExample.put("A","Small");
testExample.put("B","4.0");
testExample.put("C","4.0");
testExample.put("D","Red");
testExample.put("TextField","The full text field has more words in it than other fields.");
Pair<LIMEExplanation, List<Example<Regressor>>> explanation = lime.explainWithSamples(testExample);
for (Example<Regressor> e : explanation.getB()) {
int aCounter = 0;
int bCounter = 0;
int cCounter = 0;
int dCounter = 0;
int textCounter = 0;
for (Feature f : e) {
String featureName = f.getName();
if (featureName.startsWith("A")) {
aCounter++;
} else if (featureName.startsWith("B")) {
bCounter++;
} else if (featureName.startsWith("C")) {
cCounter++;
} else if (featureName.startsWith("D")) {
dCounter++;
} else if (featureName.startsWith("TextField")) {
textCounter++;
} else {
fail("Unknown feature with name " + featureName);
}
}
if (aCounter != 1) {
fail("Should only sample one A feature");
}
if (bCounter != 1) {
fail("Should only sample one B feature");
}
if (cCounter != 1) {
fail("Should only sample one C feature");
}
if (dCounter != 1) {
fail("Should only sample one D feature");
}
}
} |
public static Pipeline updateTransform(
String urn, Pipeline originalPipeline, TransformReplacement compositeBuilder) {
Components.Builder resultComponents = originalPipeline.getComponents().toBuilder();
for (Map.Entry<String, PTransform> pt :
originalPipeline.getComponents().getTransformsMap().entrySet()) {
if (pt.getValue().getSpec() != null && urn.equals(pt.getValue().getSpec().getUrn())) {
MessageWithComponents updated =
compositeBuilder.getReplacement(pt.getKey(), originalPipeline.getComponents());
if (updated == null) {
continue;
}
checkArgument(
updated.getPtransform().getOutputsMap().equals(pt.getValue().getOutputsMap()),
"A %s must produce all of the outputs of the original %s",
TransformReplacement.class.getSimpleName(),
PTransform.class.getSimpleName());
removeSubtransforms(pt.getValue(), resultComponents);
resultComponents
.mergeFrom(updated.getComponents())
.putTransforms(pt.getKey(), updated.getPtransform());
}
}
return originalPipeline.toBuilder().setComponents(resultComponents).build();
} | @Test
public void replacesMultiple() {
RunnerApi.Pipeline p =
Pipeline.newBuilder()
.addAllRootTransformIds(ImmutableList.of("first", "second"))
.setComponents(
Components.newBuilder()
.putTransforms(
"first",
PTransform.newBuilder()
.setSpec(FunctionSpec.newBuilder().setUrn("beam:first"))
.build())
.putTransforms(
"second",
PTransform.newBuilder()
.setSpec(FunctionSpec.newBuilder().setUrn("beam:repeated"))
.build())
.putTransforms(
"third",
PTransform.newBuilder()
.setSpec(FunctionSpec.newBuilder().setUrn("beam:repeated"))
.build())
.putPcollections(
"intermediatePc",
PCollection.newBuilder().setUniqueName("intermediate").build())
.putCoders(
"coder",
Coder.newBuilder().setSpec(FunctionSpec.getDefaultInstance()).build()))
.build();
ByteString newPayload = ByteString.copyFrom("foo-bar-baz".getBytes(StandardCharsets.UTF_8));
Pipeline updated =
ProtoOverrides.updateTransform(
"beam:repeated",
p,
(transformId, existingComponents) -> {
String subtransform = String.format("%s_sub", transformId);
return MessageWithComponents.newBuilder()
.setPtransform(
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn("beam:repeated:replacement")
.setPayload(newPayload))
.addSubtransforms(subtransform))
.setComponents(
Components.newBuilder()
.putTransforms(
subtransform,
PTransform.newBuilder().setUniqueName(subtransform).build()))
.build();
});
PTransform updatedSecond = updated.getComponents().getTransformsOrThrow("second");
PTransform updatedThird = updated.getComponents().getTransformsOrThrow("third");
assertThat(updatedSecond, not(equalTo(p.getComponents().getTransformsOrThrow("second"))));
assertThat(updatedThird, not(equalTo(p.getComponents().getTransformsOrThrow("third"))));
assertThat(updatedSecond.getSubtransformsList(), contains("second_sub"));
assertThat(updatedSecond.getSpec().getPayload(), equalTo(newPayload));
assertThat(updatedThird.getSubtransformsList(), contains("third_sub"));
assertThat(updatedThird.getSpec().getPayload(), equalTo(newPayload));
assertThat(updated.getComponents().getTransformsMap(), hasKey("second_sub"));
assertThat(updated.getComponents().getTransformsMap(), hasKey("third_sub"));
assertThat(
updated.getComponents().getTransformsOrThrow("second_sub").getUniqueName(),
equalTo("second_sub"));
assertThat(
updated.getComponents().getTransformsOrThrow("third_sub").getUniqueName(),
equalTo("third_sub"));
} |
@Override
@GuardedBy("getLock()")
public PageInfo getPageInfo(PageId pageId) throws PageNotFoundException {
if (!mPages.contains(INDEX_PAGE_ID, pageId)) {
throw new PageNotFoundException(String.format("Page %s could not be found", pageId));
}
PageInfo pageInfo = mPages.getFirstByField(INDEX_PAGE_ID, pageId);
pageInfo.getLocalCacheDir().getEvictor().updateOnGet(pageId);
return pageInfo;
} | @Test
public void getPageInfo() throws Exception {
mMetaStore.addPage(mPage, mPageInfo);
assertEquals(mPageInfo, mMetaStore.getPageInfo(mPage));
} |
public String getSecurityCredentialsUrl() {
if (securityCredentialsUrl == null && ramRoleName != null) {
return RAM_SECURITY_CREDENTIALS_URL + ramRoleName;
}
return securityCredentialsUrl;
} | @Test
void testGetSecurityCredentialsUrl() {
assertNull(StsConfig.getInstance().getSecurityCredentialsUrl());
String expect = "localhost";
StsConfig.getInstance().setSecurityCredentialsUrl(expect);
assertEquals(expect, StsConfig.getInstance().getSecurityCredentialsUrl());
} |
@Override
public void handlerPlugin(final PluginData pluginData) {
if (Objects.nonNull(pluginData) && Boolean.TRUE.equals(pluginData.getEnabled())) {
TarsRegisterConfig tarsRegisterConfig = GsonUtils.getInstance().fromJson(pluginData.getConfig(), TarsRegisterConfig.class);
TarsRegisterConfig exist = Singleton.INST.get(TarsRegisterConfig.class);
if (Objects.isNull(tarsRegisterConfig)) {
return;
}
if (Objects.isNull(exist) || !tarsRegisterConfig.equals(exist)) {
// If it is null, cache it
ApplicationConfigCache.getInstance().init(tarsRegisterConfig);
}
Singleton.INST.single(TarsRegisterConfig.class, tarsRegisterConfig);
}
} | @Test
public void testHandlerPlugin() {
final PluginData pluginData = new PluginData("id", "name", "{\"threadpool\":\"cached\",\"corethreads\":1,\"threads\":2,\"queues\":3}", "0", true, null);
tarsPluginDataHandlerUnderTest.handlerPlugin(pluginData);
assertTrue(pluginData.getName().endsWith("name"));
TarsRegisterConfig config = Singleton.INST.get(TarsRegisterConfig.class);
Assertions.assertEquals(config.getThreadpool(), "cached");
Assertions.assertEquals(config.getCorethreads(), 1);
Assertions.assertEquals(config.getThreads(), 2);
Assertions.assertEquals(config.getQueues(), 3);
} |
public static String getName(DistributedObject distributedObject) {
/*
* The motivation of this behaviour is that some distributed objects (`ICache`) can have prefixed name.
* For example, for the point of view of cache,
* it has pure name and full name which contains prefixes also.
*
* However, both of our `DistributedObject` and `javax.cache.Cache` (from JCache spec) interfaces
* have same method name with same signature. It is `String getName()`.
*
* From the distributed object side, name must be fully qualified name of object,
* but from the JCache spec side (also for backward compatibility),
* it must be pure cache name without any prefix.
* So there is same method name with same signature for different purposes.
* Therefore, `PrefixedDistributedObject` has been introduced to retrieve the
* fully qualified name of distributed object when it is needed.
*
* For cache case, the fully qualified name is full cache name contains Hazelcast prefix (`/hz`),
* cache name prefix regarding to URI and/or classloader if specified and pure cache name.
*/
if (distributedObject instanceof PrefixedDistributedObject object) {
return object.getPrefixedName();
} else {
return distributedObject.getName();
}
} | @Test
public void testGetName_withPrefixedDistributedObject() {
PrefixedDistributedObject distributedObject = mock(PrefixedDistributedObject.class);
when(distributedObject.getPrefixedName()).thenReturn("MockedPrefixedDistributedObject");
String name = DistributedObjectUtil.getName(distributedObject);
assertEquals("MockedPrefixedDistributedObject", name);
verify(distributedObject).getPrefixedName();
verifyNoMoreInteractions(distributedObject);
} |
@Override
public List<MenuDO> getMenuList() {
return menuMapper.selectList();
} | @Test
public void testGetMenuList_all() {
// mock 数据
MenuDO menu100 = randomPojo(MenuDO.class);
menuMapper.insert(menu100);
MenuDO menu101 = randomPojo(MenuDO.class);
menuMapper.insert(menu101);
// 准备参数
// 调用
List<MenuDO> list = menuService.getMenuList();
// 断言
assertEquals(2, list.size());
assertPojoEquals(menu100, list.get(0));
assertPojoEquals(menu101, list.get(1));
} |
private void updateInputPartitions(final Map<TaskId, CompletableFuture<StateUpdater.RemovedTaskResult>> futures,
final Map<TaskId, Set<TopicPartition>> newInputPartitions,
final Map<TaskId, RuntimeException> failedTasks) {
getNonFailedTasks(futures, failedTasks).forEach(task -> {
task.updateInputPartitions(
newInputPartitions.get(task.id()),
topologyMetadata.nodeToSourceTopics(task.id())
);
stateUpdater.add(task);
});
} | @Test
public void shouldUpdateExistingStandbyTaskIfStandbyIsReassignedWithDifferentInputPartitionWithoutStateUpdater() {
final StandbyTask standbyTask = standbyTask(taskId03, taskId03ChangelogPartitions)
.inState(State.RUNNING)
.withInputPartitions(taskId03Partitions).build();
updateExistingStandbyTaskIfStandbyIsReassignedWithoutStateUpdater(standbyTask, taskId04Partitions);
verify(standbyTask).updateInputPartitions(eq(taskId04Partitions), any());
} |
@Udf
public <T> List<T> slice(
@UdfParameter(description = "the input array") final List<T> in,
@UdfParameter(description = "start index") final Integer from,
@UdfParameter(description = "end index") final Integer to) {
if (in == null) {
return null;
}
try {
// SQL systems are usually 1-indexed and are inclusive of end index
final int start = from == null ? 0 : from - 1;
final int end = to == null ? in.size() : to;
return in.subList(start, end);
} catch (final IndexOutOfBoundsException e) {
return null;
}
} | @Test
public void shouldOneElementSlice() {
// Given:
final List<String> list = Lists.newArrayList("a", "b", "c");
// When:
final List<String> slice = new Slice().slice(list, 2, 2);
// Then:
assertThat(slice, is(Lists.newArrayList("b")));
} |
String renderInstructionForDisplay(Instruction instr) {
// special handling for Extension Instruction Wrappers...
if (instr instanceof Instructions.ExtensionInstructionWrapper) {
Instructions.ExtensionInstructionWrapper wrap =
(Instructions.ExtensionInstructionWrapper) instr;
return wrap.type() + COLON + wrap.extensionInstruction();
}
// special handling of other instruction classes could be placed here
// default to the natural string representation otherwise
return instr.toString();
} | @Test
public void renderExtensionInstruction() {
title("renderExtensionInstruction");
ExtensionTreatment extn = new Ofdpa3SetMplsType((short) 32);
DeviceId devid = deviceId(DEV_OF_204);
instr = Instructions.extension(extn, devid);
string = instr.toString();
render = handler.renderInstructionForDisplay(instr);
print(string);
print(render);
assertEquals("unexpected toString", EXT_FULL_STR, string);
assertEquals("unexpected short string", EXT_NO_DPID, render);
} |
public abstract long observeWm(int queueIndex, long wmValue); | @Test
public void when_i1HasWm_i2Idle_then_forwardedImmediately() {
assertEquals(Long.MIN_VALUE, wc.observeWm(0, 100));
assertEquals(100, wc.observeWm(1, IDLE_MESSAGE.timestamp()));
} |
public static String readUtf8Str(String resource) {
return getResourceObj(resource).readUtf8Str();
} | @Test
public void fileResourceTest(){
final FileResource resource = new FileResource(FileUtil.file("test.xml"));
assertEquals("test.xml", resource.getName());
assertTrue(StrUtil.isNotEmpty(resource.readUtf8Str()));
} |
public void free() {
if (watcher != null) {
watcher.stop();
}
LOGGER.info("[{}] {} is freed", appName, this.getClass().getSimpleName());
} | @Test
void testFree() throws NoSuchFieldException, IllegalAccessException {
CredentialService credentialService1 = CredentialService.getInstance();
CredentialWatcher mockWatcher = mock(CredentialWatcher.class);
Field watcherField = CredentialService.class.getDeclaredField("watcher");
watcherField.setAccessible(true);
watcherField.set(credentialService1, mockWatcher);
//when
credentialService1.free();
//then
verify(mockWatcher, times(1)).stop();
} |
@Override
public boolean isWriteable() throws FileSystemException {
return resolvedFileObject.isWriteable();
} | @Test
public void testDelegatesIsWritable() throws FileSystemException {
when( resolvedFileObject.isWriteable() ).thenReturn( true );
assertTrue( fileObject.isWriteable() );
when( resolvedFileObject.isWriteable() ).thenReturn( false );
assertFalse( fileObject.isWriteable() );
verify( resolvedFileObject, times( 2 ) ).isWriteable();
} |
public static void checkState(boolean b) {
if (!b) {
throw new IllegalStateException();
}
} | @Test
public void testPreconditionsMalformedState(){
//No %s:
Preconditions.checkState(true, "This is malformed", "A", "B", "C");
try{
Preconditions.checkState(false, "This is malformed", "A", "B", "C");
} catch (IllegalStateException e){
assertEquals("This is malformed [A,B,C]", e.getMessage());
}
//More args than %s:
Preconditions.checkState(true, "This is %s malformed", "A", "B", "C");
try{
Preconditions.checkState(false, "This is %s malformed", "A", "B", "C");
} catch (IllegalStateException e){
assertEquals("This is A malformed [B,C]", e.getMessage());
}
//No args
Preconditions.checkState(true, "This is %s %s malformed");
try{
Preconditions.checkState(false, "This is %s %s malformed");
} catch (IllegalStateException e){
assertEquals("This is %s %s malformed", e.getMessage());
}
//More %s than args
Preconditions.checkState(true, "This is %s %s malformed", "A");
try{
Preconditions.checkState(false, "This is %s %s malformed", "A");
} catch (IllegalStateException e){
assertEquals("This is A %s malformed", e.getMessage());
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.