focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static synchronized @Nonnull Map<String, Object> loadYamlFile(File file)
throws Exception {
try (FileInputStream inputStream = new FileInputStream((file))) {
Map<String, Object> yamlResult =
(Map<String, Object>) loader.loadFromInputStream(inputStream);
return yamlResult == null ? new HashMap<>() : yamlResult;
} catch (FileNotFoundException e) {
LOG.error("Failed to find YAML file", e);
throw e;
} catch (IOException | YamlEngineException e) {
if (e instanceof MarkedYamlEngineException) {
YamlEngineException exception =
wrapExceptionToHiddenSensitiveData((MarkedYamlEngineException) e);
LOG.error("Failed to parse YAML configuration", exception);
throw exception;
} else {
throw e;
}
}
}
|
@Test
void testLoadYamlFile() throws Exception {
File confFile = new File(tmpDir, "test.yaml");
try (final PrintWriter pw = new PrintWriter(confFile)) {
pw.println("key1: value1");
pw.println("key2: ");
pw.println(" subKey1: value2");
pw.println("key3: [a, b, c]");
pw.println("key4: {k1: v1, k2: v2, k3: v3}");
pw.println("key5: '*'");
pw.println("key6: true");
pw.println("key7: 'true'");
} catch (FileNotFoundException e) {
throw new RuntimeException(e);
}
Map<String, Object> yamlData = YamlParserUtils.loadYamlFile(confFile);
assertThat(yamlData).isNotNull();
assertThat(yamlData).containsEntry("key1", "value1");
assertThat(((Map<?, ?>) yamlData.get("key2")).get("subKey1")).isEqualTo("value2");
assertThat(yamlData).containsEntry("key3", Arrays.asList("a", "b", "c"));
Map<String, String> map = new HashMap<>();
map.put("k1", "v1");
map.put("k2", "v2");
map.put("k3", "v3");
assertThat(yamlData).containsEntry("key4", map);
assertThat(yamlData).containsEntry("key5", "*");
assertThat((Boolean) yamlData.get("key6")).isTrue();
assertThat(yamlData).containsEntry("key7", "true");
}
|
public Future<KafkaCluster> prepareKafkaCluster(
Kafka kafkaCr,
List<KafkaNodePool> nodePools,
Map<String, Storage> oldStorage,
Map<String, List<String>> currentPods,
KafkaVersionChange versionChange,
KafkaStatus kafkaStatus,
boolean tryToFixProblems) {
return createKafkaCluster(kafkaCr, nodePools, oldStorage, currentPods, versionChange)
.compose(kafka -> brokerRemovalCheck(kafkaCr, kafka))
.compose(kafka -> {
if (checkFailed() && tryToFixProblems) {
// We have a failure, and should try to fix issues
// Once we fix it, we call this method again, but this time with tryToFixProblems set to false
return revertScaleDown(kafka, kafkaCr, nodePools)
.compose(kafkaAndNodePools -> revertRoleChange(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs()))
.compose(kafkaAndNodePools -> prepareKafkaCluster(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs(), oldStorage, currentPods, versionChange, kafkaStatus, false));
} else if (checkFailed()) {
// We have a failure, but we should not try to fix it
List<String> errors = new ArrayList<>();
if (scaleDownCheckFailed) {
errors.add("Cannot scale-down Kafka brokers " + kafka.removedNodes() + " because they have assigned partition-replicas.");
}
if (usedToBeBrokersCheckFailed) {
errors.add("Cannot remove the broker role from nodes " + kafka.usedToBeBrokerNodes() + " because they have assigned partition-replicas.");
}
return Future.failedFuture(new InvalidResourceException("Following errors were found when processing the Kafka custom resource: " + errors));
} else {
// If everything succeeded, we return the KafkaCluster object
// If any warning conditions exist from the reverted changes, we add them to the status
if (!warningConditions.isEmpty()) {
kafkaStatus.addConditions(warningConditions);
}
return Future.succeededFuture(kafka);
}
});
}
|
@Test
public void testNewClusterWithKRaft(VertxTestContext context) {
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
KafkaStatus kafkaStatus = new KafkaStatus();
KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier);
Checkpoint async = context.checkpoint();
creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS, POOL_A, POOL_B), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true)
.onComplete(context.succeeding(kc -> context.verify(() -> {
// Kafka cluster is created
assertThat(kc, is(notNullValue()));
assertThat(kc.nodes().size(), is(9));
assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(0, 1, 2, 3, 4, 5, 6, 7, 8)));
assertThat(kc.removedNodes(), is(Set.of()));
// Check the status conditions
assertThat(kafkaStatus.getConditions(), is(nullValue()));
// No scale-down => scale-down check is not done
verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any());
async.flag();
})));
}
|
@Override
public CommandLineImpl parse(final List<String> originalArgs, final Logger logger) {
return CommandLineImpl.of(originalArgs, logger);
}
|
@Test
public void testVersion() throws Exception {
final CommandLineParserImpl parser = new CommandLineParserImpl();
final CommandLineImpl commandLine = parse(parser, "--version");
assertEquals(Command.NONE, commandLine.getCommand());
assertEquals("Embulk " + EmbulkVersion.VERSION + NEWLINE, commandLine.getStdOut());
assertEquals("", commandLine.getStdErr());
}
|
@PublicEvolving
public static <IN1, IN2, OUT> TypeInformation<OUT> getCoGroupReturnTypes(
CoGroupFunction<IN1, IN2, OUT> coGroupInterface,
TypeInformation<IN1> in1Type,
TypeInformation<IN2> in2Type) {
return getCoGroupReturnTypes(coGroupInterface, in1Type, in2Type, null, false);
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
@Test
void testBasicArray() {
// use getCoGroupReturnTypes()
RichCoGroupFunction<?, ?, ?> function =
new RichCoGroupFunction<String[], String[], String[]>() {
private static final long serialVersionUID = 1L;
@Override
public void coGroup(
Iterable<String[]> first,
Iterable<String[]> second,
Collector<String[]> out)
throws Exception {
// nothing to do
}
};
TypeInformation<?> ti =
TypeExtractor.getCoGroupReturnTypes(
function,
(TypeInformation) TypeInformation.of(new TypeHint<String[]>() {}),
(TypeInformation) TypeInformation.of(new TypeHint<String[]>() {}));
assertThat(ti.isBasicType()).isFalse();
assertThat(ti.isTupleType()).isFalse();
// Due to a Java 6 bug the classification can be slightly wrong
assertThat(
ti instanceof BasicArrayTypeInfo<?, ?>
|| ti instanceof ObjectArrayTypeInfo<?, ?>)
.isTrue();
if (ti instanceof BasicArrayTypeInfo<?, ?>) {
assertThat(ti).isEqualTo(BasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO);
} else {
assertThat(((ObjectArrayTypeInfo<?, ?>) ti).getComponentInfo())
.isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
}
}
|
static void checkValidIndexName(String indexName) {
if (indexName.length() > MAX_INDEX_NAME_LENGTH) {
throw new IllegalArgumentException(
"Index name "
+ indexName
+ " cannot be longer than "
+ MAX_INDEX_NAME_LENGTH
+ " characters.");
}
Matcher matcher = ILLEGAL_INDEX_NAME_CHARS.matcher(indexName);
if (matcher.find()) {
throw new IllegalArgumentException(
"Index name "
+ indexName
+ " is not a valid name. Character \""
+ matcher.group()
+ "\" is not allowed.");
}
if (indexName.charAt(0) == '-' || indexName.charAt(0) == '_' || indexName.charAt(0) == '+') {
throw new IllegalArgumentException(
"Index name " + indexName + " can not start with -, _ or +.");
}
}
|
@Test
public void testCheckValidIndexNameThrowsErrorWhenNameContainsPoundSymbol() {
assertThrows(IllegalArgumentException.class, () -> checkValidIndexName("test#collection"));
}
|
@VisibleForTesting
void setTimeout(FTPClient client, Configuration conf) {
long timeout = conf.getLong(FS_FTP_TIMEOUT, DEFAULT_TIMEOUT);
client.setControlKeepAliveTimeout(timeout);
}
|
@Test
public void testFTPSetTimeout() {
Configuration conf = new Configuration();
FTPClient client = new FTPClient();
FTPFileSystem ftp = new FTPFileSystem();
ftp.setTimeout(client, conf);
assertEquals(client.getControlKeepAliveTimeout(),
FTPFileSystem.DEFAULT_TIMEOUT);
long timeout = 600;
conf.setLong(FTPFileSystem.FS_FTP_TIMEOUT, timeout);
ftp.setTimeout(client, conf);
assertEquals(client.getControlKeepAliveTimeout(), timeout);
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
// {"code":400,"message":"Bad Request","debugInfo":"Node ID must be positive.","errorCode":-80001}
final PathAttributes attributes = new PathAttributes();
if(session.userAccount().isUserInRole(SDSPermissionsFeature.ROOM_MANAGER_ROLE)) {
// We need to map user roles to ACLs in order to decide if creating a top-level room is allowed
final Acl acl = new Acl();
acl.addAll(new Acl.CanonicalUser(), SDSPermissionsFeature.CREATE_ROLE);
attributes.setAcl(acl);
}
return attributes;
}
// Throw failure if looking up file fails
final String id = nodeid.getVersionId(file);
try {
return this.findNode(file, id);
}
catch(NotfoundException e) {
if(log.isWarnEnabled()) {
log.warn(String.format("Previously cached node id %s no longer found for file %s", id, file));
}
// Try with reset cache after failure finding node id
return this.findNode(file, nodeid.getVersionId(file));
}
}
|
@Test(expected = NotfoundException.class)
public void testFindNotFound() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSAttributesFinderFeature f = new SDSAttributesFinderFeature(session, nodeid);
try {
f.find(test);
}
finally {
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
}
|
@Override
public ByteBuffer nioBuffer() {
return nioBuffer(readerIndex, readableBytes());
}
|
@Test
public void testNioBufferAfterRelease1() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().nioBuffer(0, 1);
}
});
}
|
@Udf
public <T> List<T> slice(
@UdfParameter(description = "the input array") final List<T> in,
@UdfParameter(description = "start index") final Integer from,
@UdfParameter(description = "end index") final Integer to) {
if (in == null) {
return null;
}
try {
// SQL systems are usually 1-indexed and are inclusive of end index
final int start = from == null ? 0 : from - 1;
final int end = to == null ? in.size() : to;
return in.subList(start, end);
} catch (final IndexOutOfBoundsException e) {
return null;
}
}
|
@Test
public void shouldReturnNullOnIndexError() {
// Given:
final List<String> list = Lists.newArrayList("a", "b", "c");
// When:
final List<String> slice = new Slice().slice(list, 2, 5);
// Then:
assertThat(slice, nullValue());
}
|
@Override
@SuppressWarnings("BanSerializableRead")
protected Object deserialize(byte[] data, ClassLoader classLoader) {
try (var bytes = new ByteArrayInputStream(data);
var input = newInputStream(bytes, classLoader)) {
return input.readObject();
} catch (IOException e) {
throw new CacheException("Failed to deserialize", e);
} catch (ClassNotFoundException e) {
throw new CacheException("Failed to resolve a deserialized class", e);
}
}
|
@Test(dataProvider = "copier")
public void deserializable_badData(JavaSerializationCopier copier) {
assertThrows(CacheException.class, () ->
copier.deserialize(new byte[0], Thread.currentThread().getContextClassLoader()));
}
|
@Override
public boolean isAllowedTaskMovement(final ClientState source, final ClientState destination) {
final Map<String, String> sourceClientTags = clientTagFunction.apply(source.processId(), source);
final Map<String, String> destinationClientTags = clientTagFunction.apply(destination.processId(), destination);
for (final Entry<String, String> sourceClientTagEntry : sourceClientTags.entrySet()) {
if (!sourceClientTagEntry.getValue().equals(destinationClientTags.get(sourceClientTagEntry.getKey()))) {
return false;
}
}
return true;
}
|
@Test
public void shouldPermitSingleTaskMoveWhenDifferentClientTagCountNotChange() {
final ClientState source = createClientStateWithCapacity(PID_1, 1, mkMap(mkEntry(ZONE_TAG, ZONE_1), mkEntry(CLUSTER_TAG, CLUSTER_1)));
final ClientState destination = createClientStateWithCapacity(PID_2, 1, mkMap(mkEntry(ZONE_TAG, ZONE_2), mkEntry(CLUSTER_TAG, CLUSTER_1)));
final ClientState clientState = createClientStateWithCapacity(PID_3, 1, mkMap(mkEntry(ZONE_TAG, ZONE_3), mkEntry(CLUSTER_TAG, CLUSTER_2)));
final Map<ProcessId, ClientState> clientStateMap = mkMap(
mkEntry(PID_1, source),
mkEntry(PID_2, destination),
mkEntry(PID_3, clientState)
);
final TaskId taskId = new TaskId(0, 0);
clientState.assignActive(taskId);
source.assignStandby(taskId);
assertTrue(standbyTaskAssignor.isAllowedTaskMovement(source, destination, taskId, clientStateMap));
}
|
public IssueQuery create(SearchRequest request) {
try (DbSession dbSession = dbClient.openSession(false)) {
final ZoneId timeZone = parseTimeZone(request.getTimeZone()).orElse(clock.getZone());
Collection<RuleDto> ruleDtos = ruleKeysToRuleId(dbSession, request.getRules());
Collection<String> ruleUuids = ruleDtos.stream().map(RuleDto::getUuid).collect(Collectors.toSet());
Collection<String> issueKeys = collectIssueKeys(dbSession, request);
if (request.getRules() != null && request.getRules().stream().collect(Collectors.toSet()).size() != ruleDtos.size()) {
ruleUuids.add("non-existing-uuid");
}
IssueQuery.Builder builder = IssueQuery.builder()
.issueKeys(issueKeys)
.severities(request.getSeverities())
.cleanCodeAttributesCategories(request.getCleanCodeAttributesCategories())
.impactSoftwareQualities(request.getImpactSoftwareQualities())
.impactSeverities(request.getImpactSeverities())
.statuses(request.getStatuses())
.resolutions(request.getResolutions())
.issueStatuses(request.getIssueStatuses())
.resolved(request.getResolved())
.prioritizedRule(request.getPrioritizedRule())
.rules(ruleDtos)
.ruleUuids(ruleUuids)
.assigneeUuids(request.getAssigneeUuids())
.authors(request.getAuthors())
.scopes(request.getScopes())
.languages(request.getLanguages())
.tags(request.getTags())
.types(request.getTypes())
.pciDss32(request.getPciDss32())
.pciDss40(request.getPciDss40())
.owaspAsvs40(request.getOwaspAsvs40())
.owaspAsvsLevel(request.getOwaspAsvsLevel())
.owaspTop10(request.getOwaspTop10())
.owaspTop10For2021(request.getOwaspTop10For2021())
.stigAsdR5V3(request.getStigAsdV5R3())
.casa(request.getCasa())
.sansTop25(request.getSansTop25())
.cwe(request.getCwe())
.sonarsourceSecurity(request.getSonarsourceSecurity())
.assigned(request.getAssigned())
.createdAt(parseStartingDateOrDateTime(request.getCreatedAt(), timeZone))
.createdBefore(parseEndingDateOrDateTime(request.getCreatedBefore(), timeZone))
.facetMode(request.getFacetMode())
.timeZone(timeZone)
.codeVariants(request.getCodeVariants());
List<ComponentDto> allComponents = new ArrayList<>();
boolean effectiveOnComponentOnly = mergeDeprecatedComponentParameters(dbSession, request, allComponents);
addComponentParameters(builder, dbSession, effectiveOnComponentOnly, allComponents, request);
setCreatedAfterFromRequest(dbSession, builder, request, allComponents, timeZone);
String sort = request.getSort();
if (!isNullOrEmpty(sort)) {
builder.sort(sort);
builder.asc(request.getAsc());
}
return builder.build();
}
}
|
@Test
public void create_with_rule_key_that_does_not_exist_in_the_db() {
db.users().insertUser(u -> u.setLogin("joanna"));
ComponentDto project = db.components().insertPrivateProject().getMainBranchComponent();
db.components().insertComponent(newFileDto(project));
newRule(RuleKey.of("findbugs", "NullReference"));
SearchRequest request = new SearchRequest()
.setRules(asList("unknown:key1", "unknown:key2"));
IssueQuery query = underTest.create(request);
assertThat(query.rules()).isEmpty();
assertThat(query.ruleUuids()).containsExactly("non-existing-uuid");
}
|
static String getUsernameFromConf(Configuration conf) {
String oldStyleUgi = conf.get(DEPRECATED_UGI_KEY);
if (oldStyleUgi != null) {
// We can't use the normal configuration deprecation mechanism here
// since we need to split out the username from the configured UGI.
LOG.warn(DEPRECATED_UGI_KEY + " should not be used. Instead, use " +
HADOOP_HTTP_STATIC_USER + ".");
String[] parts = oldStyleUgi.split(",");
return parts[0];
} else {
return conf.get(HADOOP_HTTP_STATIC_USER,
DEFAULT_HADOOP_HTTP_STATIC_USER);
}
}
|
@Test
public void testOldStyleConfiguration() {
Configuration conf = new Configuration();
conf.set("dfs.web.ugi", "joe,group1,group2");
assertEquals("joe", StaticUserWebFilter.getUsernameFromConf(conf));
}
|
protected Long getRoleIdByNameNoLock(String name) throws PrivilegeException {
Long roleId = roleNameToId.get(name);
if (roleId == null) {
throw new PrivilegeException(String.format("Role %s doesn't exist!", name));
}
return roleId;
}
|
@Test
public void testSetRole() throws Exception {
GlobalVariable.setActivateAllRolesOnLogin(false);
AuthorizationMgr manager = ctx.getGlobalStateMgr().getAuthorizationMgr();
// create user
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(
String.format("create user user_test_set_role"), ctx), ctx);
UserIdentity user = UserIdentity.createAnalyzedUserIdentWithIp("user_test_set_role", "%");
// create role0 ~ role3
// grant select on tblx to rolex
// grant role0, role1, role2 to user
long[] roleIds = new long[4];
for (int i = 0; i != 4; ++i) {
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(
String.format("create role test_set_role_%d;", i), ctx), ctx);
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(
"grant select on db.tbl" + i + " to role test_set_role_" + i, ctx), ctx);
if (i != 3) {
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(
"grant test_set_role_" + i + " to user_test_set_role", ctx), ctx);
}
roleIds[i] = manager.getRoleIdByNameNoLock("test_set_role_" + i);
}
// default: user can select all tables
assertTableSelectOnTestWithoutSetRole(user, false, false, false, false);
// set one role
ctx.setCurrentUserIdentity(user);
new StmtExecutor(ctx, UtFrameUtils.parseStmtWithNewParser(
String.format("set role 'test_set_role_0'"), ctx)).execute();
Assert.assertEquals(new HashSet<>(Arrays.asList(roleIds[0])), ctx.getCurrentRoleIds());
assertTableSelectOnTestWithoutSetRole(user, true, false, false, false);
// set on other 3 roles
setCurrentUserAndRoles(ctx, user);
new StmtExecutor(ctx, UtFrameUtils.parseStmtWithNewParser(
String.format("set role 'test_set_role_1', 'test_set_role_2'"), ctx)).execute();
Assert.assertEquals(new HashSet<>(Arrays.asList(roleIds[1], roleIds[2])), ctx.getCurrentRoleIds());
assertTableSelectOnTestWithoutSetRole(user, false, true, true, false);
// bad case: role not exists
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser("create role bad_role", ctx), ctx);
SetRoleStmt stmt = (SetRoleStmt) UtFrameUtils.parseStmtWithNewParser(
"set role 'test_set_role_1', 'test_set_role_2', 'bad_role'", ctx);
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser("drop role bad_role", ctx), ctx);
setCurrentUserAndRoles(ctx, user);
try {
SetRoleExecutor.execute(stmt, ctx);
Assert.fail();
} catch (UserException e) {
Assert.assertTrue(e.getMessage().contains("Cannot find role bad_role"));
}
try {
SetRoleExecutor.execute((SetRoleStmt) UtFrameUtils.parseStmtWithNewParser(
"set role 'test_set_role_1', 'test_set_role_3'", ctx), ctx);
Assert.fail();
} catch (UserException e) {
Assert.assertTrue(e.getMessage().contains("Role test_set_role_3 is not granted"));
}
try {
SetRoleExecutor.execute((SetRoleStmt) UtFrameUtils.parseStmtWithNewParser(
"set role all except 'test_set_role_1', 'test_set_role_3'", ctx), ctx);
Assert.fail();
} catch (UserException e) {
Assert.assertTrue(e.getMessage().contains("Role test_set_role_3 is not granted"));
}
// drop role1
setCurrentUserAndRoles(ctx, UserIdentity.ROOT);
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(
"drop role test_set_role_1;", ctx), ctx);
ctx.setCurrentUserIdentity(user);
SetRoleExecutor.execute((SetRoleStmt) UtFrameUtils.parseStmtWithNewParser(
"set role all", ctx), ctx);
assertTableSelectOnTestWithoutSetRole(user, true, false, true, false);
setCurrentUserAndRoles(ctx, user);
new StmtExecutor(ctx, UtFrameUtils.parseStmtWithNewParser(
String.format("set role all"), ctx)).execute();
Assert.assertEquals(new HashSet<>(Arrays.asList(roleIds[0], roleIds[2])), ctx.getCurrentRoleIds());
assertTableSelectOnTestWithoutSetRole(user, true, false, true, false);
setCurrentUserAndRoles(ctx, user);
new StmtExecutor(ctx, UtFrameUtils.parseStmtWithNewParser(
String.format("set role all except 'test_set_role_2'"), ctx)).execute();
Assert.assertEquals(new HashSet<>(Arrays.asList(roleIds[0])), ctx.getCurrentRoleIds());
assertTableSelectOnTestWithoutSetRole(user, true, false, false, false);
// predecessors
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(
"grant test_set_role_3 to role test_set_role_0;", ctx), ctx);
assertTableSelectOnTestWithoutSetRole(user, true, false, false, true);
GlobalVariable.setActivateAllRolesOnLogin(true);
}
|
public PrepareResult prepare(HostValidator hostValidator, DeployLogger logger, PrepareParams params,
Optional<ApplicationVersions> activeApplicationVersions, Instant now, File serverDbSessionDir,
ApplicationPackage applicationPackage, SessionZooKeeperClient sessionZooKeeperClient) {
ApplicationId applicationId = params.getApplicationId();
Preparation preparation = new Preparation(hostValidator, logger, params, activeApplicationVersions,
TenantRepository.getTenantPath(applicationId.tenant()),
serverDbSessionDir, applicationPackage, sessionZooKeeperClient,
onnxModelCost, endpointCertificateSecretStores);
preparation.preprocess();
try {
AllocatedHosts allocatedHosts = preparation.buildModels(now);
preparation.makeResult(allocatedHosts);
if ( ! params.isDryRun()) {
FileReference fileReference = preparation.triggerDistributionOfApplicationPackage();
preparation.writeStateZK(fileReference);
preparation.writeEndpointCertificateMetadataZK();
preparation.writeContainerEndpointsZK();
}
log.log(Level.FINE, () -> "time used " + params.getTimeoutBudget().timesUsed() + " : " + applicationId);
return preparation.result();
}
catch (IllegalArgumentException e) {
if (e instanceof InvalidApplicationException)
throw e;
throw new InvalidApplicationException("Invalid application package", e);
}
}
|
@Test(expected = InvalidApplicationException.class)
public void require_exception_for_overlapping_host() throws IOException {
FilesApplicationPackage app = getApplicationPackage(testApp);
HostRegistry hostValidator = new HostRegistry();
hostValidator.update(applicationId("foo"), List.of("mytesthost"));
preparer.prepare(hostValidator, new BaseDeployLogger(), new PrepareParams.Builder().applicationId(applicationId("default")).build(),
Optional.empty(), Instant.now(), app.getAppDir(), app, createSessionZooKeeperClient());
}
|
@Override
public void validate(final String name, final Object value) {
if (immutableProps.contains(name)) {
throw new IllegalArgumentException(String.format("Cannot override property '%s'", name));
}
final Consumer<Object> validator = HANDLERS.get(name);
if (validator != null) {
validator.accept(value);
}
}
|
@Test
public void shouldNotThrowOnOtherOffsetReset() {
validator.validate(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "caught-by-normal-mech");
}
|
@Override
public void updateGroupDescription(DeviceId deviceId,
GroupKey oldAppCookie,
UpdateType type,
GroupBuckets newBuckets,
GroupKey newAppCookie) {
// Check if group update to be done by a remote instance
if (mastershipService.getMasterFor(deviceId) != null &&
mastershipService.getLocalRole(deviceId) != MastershipRole.MASTER) {
log.debug("updateGroupDescription: Device {} local role is not MASTER",
deviceId);
if (mastershipService.getMasterFor(deviceId) == null) {
log.error("No Master for device {}..."
+ "Can not perform update group (appCookie {}) operation",
deviceId, newAppCookie);
//TODO: Send Group operation failure event
return;
}
GroupStoreMessage groupOp = GroupStoreMessage.
createGroupUpdateRequestMsg(deviceId,
oldAppCookie,
type,
newBuckets,
newAppCookie);
clusterCommunicator.unicast(groupOp,
GroupStoreMessageSubjects.REMOTE_GROUP_OP_REQUEST,
clusterMsgSerializer::serialize,
mastershipService.getMasterFor(deviceId)).whenComplete((result, error) -> {
if (error != null) {
log.warn("Failed to send request to master: {} to {}",
groupOp, mastershipService.getMasterFor(deviceId), error);
}
//TODO: Send Group operation failure event
});
return;
}
log.debug("updateGroupDescription for device {} is getting handled locally (appCookie {})",
deviceId, newAppCookie);
updateGroupDescriptionInternal(deviceId,
oldAppCookie,
type,
newBuckets,
newAppCookie);
}
|
@Test
public void testUpdateGroupDescription() {
GroupBuckets buckets = new GroupBuckets(ImmutableList.of(allGroupBucket2));
groupStore.deviceInitialAuditCompleted(deviceId1, true);
groupStore.storeGroupDescription(groupDescription1);
GroupKey newKey = new DefaultGroupKey("123".getBytes());
groupStore.updateGroupDescription(deviceId1,
groupKey1,
ADD,
buckets,
newKey);
Group group1 = groupStore.getGroup(deviceId1, groupId1);
assertThat(group1.appCookie(), is(newKey));
assertThat(group1.buckets().buckets(), hasSize(2));
buckets = new GroupBuckets(ImmutableList.of(allGroupBucket, allGroupBucket2));
groupStore.updateGroupDescription(deviceId1,
newKey,
ADD,
buckets,
newKey);
group1 = groupStore.getGroup(deviceId1, groupId1);
assertThat(group1.appCookie(), is(newKey));
assertThat(group1.buckets().buckets(), hasSize(2));
for (GroupBucket bucket : group1.buckets().buckets()) {
assertTrue(bucket.treatment().equals(treatment) ||
bucket.treatment().equals(treatment2));
}
buckets = new GroupBuckets(ImmutableList.of(allGroupBucket2));
groupStore.updateGroupDescription(deviceId1,
newKey,
SET,
buckets,
newKey);
group1 = groupStore.getGroup(deviceId1, groupId1);
assertThat(group1.appCookie(), is(newKey));
assertThat(group1.buckets().buckets(), hasSize(1));
GroupBucket onlyBucket = group1.buckets().buckets().iterator().next();
assertEquals(treatment2, onlyBucket.treatment());
}
|
@Override
public Num calculate(BarSeries series, Position position) {
return criterion.calculate(series, position).dividedBy(enterAndHoldCriterion.calculate(series, position));
}
|
@Test
public void calculateWithNoPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 70);
AnalysisCriterion buyAndHold = getCriterion(new ReturnCriterion());
assertNumEquals(1 / 0.7, buyAndHold.calculate(series, new BaseTradingRecord()));
}
|
@Override
@CacheEvict(cacheNames = RedisKeyConstants.NOTIFY_TEMPLATE,
allEntries = true) // allEntries 清空所有缓存,因为可能修改到 code 字段,不好清理
public void updateNotifyTemplate(NotifyTemplateSaveReqVO updateReqVO) {
// 校验存在
validateNotifyTemplateExists(updateReqVO.getId());
// 校验站内信编码是否重复
validateNotifyTemplateCodeDuplicate(updateReqVO.getId(), updateReqVO.getCode());
// 更新
NotifyTemplateDO updateObj = BeanUtils.toBean(updateReqVO, NotifyTemplateDO.class);
updateObj.setParams(parseTemplateContentParams(updateObj.getContent()));
notifyTemplateMapper.updateById(updateObj);
}
|
@Test
public void testUpdateNotifyTemplate_success() {
// mock 数据
NotifyTemplateDO dbNotifyTemplate = randomPojo(NotifyTemplateDO.class);
notifyTemplateMapper.insert(dbNotifyTemplate);// @Sql: 先插入出一条存在的数据
// 准备参数
NotifyTemplateSaveReqVO reqVO = randomPojo(NotifyTemplateSaveReqVO.class, o -> {
o.setId(dbNotifyTemplate.getId()); // 设置更新的 ID
o.setStatus(randomCommonStatus());
});
// 调用
notifyTemplateService.updateNotifyTemplate(reqVO);
// 校验是否更新正确
NotifyTemplateDO notifyTemplate = notifyTemplateMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, notifyTemplate);
}
|
public static String prepareUrl(@NonNull String url) {
url = url.trim();
String lowerCaseUrl = url.toLowerCase(Locale.ROOT); // protocol names are case insensitive
if (lowerCaseUrl.startsWith("feed://")) {
Log.d(TAG, "Replacing feed:// with http://");
return prepareUrl(url.substring("feed://".length()));
} else if (lowerCaseUrl.startsWith("pcast://")) {
Log.d(TAG, "Removing pcast://");
return prepareUrl(url.substring("pcast://".length()));
} else if (lowerCaseUrl.startsWith("pcast:")) {
Log.d(TAG, "Removing pcast:");
return prepareUrl(url.substring("pcast:".length()));
} else if (lowerCaseUrl.startsWith("itpc")) {
Log.d(TAG, "Replacing itpc:// with http://");
return prepareUrl(url.substring("itpc://".length()));
} else if (lowerCaseUrl.startsWith(AP_SUBSCRIBE)) {
Log.d(TAG, "Removing antennapod-subscribe://");
return prepareUrl(url.substring(AP_SUBSCRIBE.length()));
} else if (lowerCaseUrl.contains(AP_SUBSCRIBE_DEEPLINK)) {
Log.d(TAG, "Removing " + AP_SUBSCRIBE_DEEPLINK);
String query = Uri.parse(url).getQueryParameter("url");
try {
return prepareUrl(URLDecoder.decode(query, "UTF-8"));
} catch (UnsupportedEncodingException e) {
return prepareUrl(query);
}
} else if (!(lowerCaseUrl.startsWith("http://") || lowerCaseUrl.startsWith("https://"))) {
Log.d(TAG, "Adding http:// at the beginning of the URL");
return "http://" + url;
} else {
return url;
}
}
|
@Test
public void testPcastProtocolNoScheme() {
final String in = "pcast://example.com";
final String out = UrlChecker.prepareUrl(in);
assertEquals("http://example.com", out);
}
|
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuffer buf = new StringBuffer();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case FORMAT_MODIFIER_STATE:
handleFormatModifierState(c, tokenList, buf);
break;
case OPTION_STATE:
processOption(c, tokenList, buf);
break;
case KEYWORD_STATE:
handleKeywordState(c, tokenList, buf);
break;
case RIGHT_PARENTHESIS_STATE:
handleRightParenthesisState(c, tokenList, buf);
break;
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addValuedToken(Token.LITERAL, buf, tokenList);
break;
case KEYWORD_STATE:
tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString()));
break;
case RIGHT_PARENTHESIS_STATE:
tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN);
break;
case FORMAT_MODIFIER_STATE:
case OPTION_STATE:
throw new ScanException("Unexpected end of pattern string");
}
return tokenList;
}
|
@Test
public void testSingleLiteral() throws ScanException {
List<Token> tl = new TokenStream("hello").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(new Token(Token.LITERAL, "hello"));
assertEquals(witness, tl);
}
|
public static Stream<ItemSet> apply(FPTree tree) {
FPGrowth growth = new FPGrowth(tree);
return StreamSupport.stream(growth.spliterator(), false);
}
|
@Test
public void testKosarak() {
System.out.println("kosarak");
FPTree tree = FPTree.of(1500, () -> ItemSetTestData.read("transaction/kosarak.dat"));
assertEquals(219725, FPGrowth.apply(tree).count());
}
|
@Override
public ItemChangeSets resolve(long namespaceId, String configText, List<ItemDTO> baseItems) {
Map<Integer, ItemDTO> oldLineNumMapItem = BeanUtils.mapByKey("lineNum", baseItems);
Map<String, ItemDTO> oldKeyMapItem = BeanUtils.mapByKey("key", baseItems);
//remove comment and blank item map.
oldKeyMapItem.remove("");
String[] newItems = configText.split(ITEM_SEPARATOR);
Set<String> repeatKeys = new HashSet<>();
if (isHasRepeatKey(newItems, repeatKeys)) {
throw new BadRequestException("Config text has repeated keys: %s, please check your input.", repeatKeys);
}
ItemChangeSets changeSets = new ItemChangeSets();
Map<Integer, String> newLineNumMapItem = new HashMap<>();//use for delete blank and comment item
int lineCounter = 1;
for (String newItem : newItems) {
newItem = newItem.trim();
newLineNumMapItem.put(lineCounter, newItem);
ItemDTO oldItemByLine = oldLineNumMapItem.get(lineCounter);
//comment item
if (isCommentItem(newItem)) {
handleCommentLine(namespaceId, oldItemByLine, newItem, lineCounter, changeSets);
//blank item
} else if (isBlankItem(newItem)) {
handleBlankLine(namespaceId, oldItemByLine, lineCounter, changeSets);
//normal item
} else {
handleNormalLine(namespaceId, oldKeyMapItem, newItem, lineCounter, changeSets);
}
lineCounter++;
}
deleteCommentAndBlankItem(oldLineNumMapItem, newLineNumMapItem, changeSets);
deleteNormalKVItem(oldKeyMapItem, changeSets);
return changeSets;
}
|
@Test
public void testUpdateCommentItem() {
ItemChangeSets changeSets = resolver.resolve(1, "#ww\n"
+ "a=b\n"
+"\n"
+ "b=c", mockBaseItemWith2Key1Comment1Blank());
Assert.assertEquals(1, changeSets.getDeleteItems().size());
Assert.assertEquals(0, changeSets.getUpdateItems().size());
Assert.assertEquals(1, changeSets.getCreateItems().size());
}
|
public PipelineTemplateConfig templateByName(CaseInsensitiveString foo) {
for (PipelineTemplateConfig templateConfig : this) {
if (templateConfig.name().equals(foo)) {
return templateConfig;
}
}
return null;
}
|
@Test
public void shouldReturnNullIfTemplateIsNotFound() {
PipelineTemplateConfig template1 = template("template1");
TemplatesConfig templates = new TemplatesConfig(template1);
assertThat(templates.templateByName(new CaseInsensitiveString("some_invalid_template")), is(nullValue()));
}
|
public synchronized long nextGtid() {
long timestamp = timeGen();
if (timestamp < lastTimestamp) {
timestamp = lastTimestamp;
}
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & MAX_SEQUENCE;
if (sequence == 0) {
timestamp += 1;
}
} else {
sequence = 0L;
}
if (timestamp - EPOCH >= (1L << 42)) {
throw new IllegalStateException("Timestamp overflow");
}
lastTimestamp = timestamp;
return ((timestamp - EPOCH) << TIMESTAMP_SHIFT) | (CLUSTER_ID << CLUSTER_ID_SHIFT) | sequence;
}
|
@Test
public void testNextGtidResetsSequenceOnNewMillisecond() throws InterruptedException {
long firstGtid = gtidGenerator.nextGtid();
Thread.sleep(1); // Ensure the next GTID is in a new millisecond
long nextGtid = gtidGenerator.nextGtid();
Assertions.assertTrue((nextGtid & GtidGenerator.MAX_SEQUENCE) == 0, "Sequence should reset on a new millisecond");
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldHandleMultipleAliasedJoinDataSources() {
// Given:
final SingleStatementContext stmt = givenQuery("SELECT * FROM TEST1 t1 "
+ "JOIN TEST2 t2 ON t1.col1 = t2.col1 "
+ "JOIN TEST3 t3 ON t1.col1 = t3.col1;");
// When:
final Query result = (Query) builder.buildStatement(stmt);
// Then:
assertThat(result.getFrom(), is(instanceOf(Join.class)));
assertThat((Join) result.getFrom(), hasLeft(new AliasedRelation(TEST1, SourceName.of("T1"))));
assertThat((Join) result.getFrom(), hasRights(
new AliasedRelation(TEST2, SourceName.of("T2")),
new AliasedRelation(TEST3, SourceName.of("T3"))
));
}
|
public int getUnknown_000c() {
return unknown_000c;
}
|
@Test
public void getUnknown_000c() {
assertEquals(TestParameters.VP_UNKNOWN_000C, chmItsfHeader.getUnknown_000c());
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public void updateCodegen(CodegenUpdateReqVO updateReqVO) {
// 校验是否已经存在
if (codegenTableMapper.selectById(updateReqVO.getTable().getId()) == null) {
throw exception(CODEGEN_TABLE_NOT_EXISTS);
}
// 校验主表字段存在
if (Objects.equals(updateReqVO.getTable().getTemplateType(), CodegenTemplateTypeEnum.SUB.getType())) {
if (codegenTableMapper.selectById(updateReqVO.getTable().getMasterTableId()) == null) {
throw exception(CODEGEN_MASTER_TABLE_NOT_EXISTS, updateReqVO.getTable().getMasterTableId());
}
if (CollUtil.findOne(updateReqVO.getColumns(), // 关联主表的字段不存在
column -> column.getId().equals(updateReqVO.getTable().getSubJoinColumnId())) == null) {
throw exception(CODEGEN_SUB_COLUMN_NOT_EXISTS, updateReqVO.getTable().getSubJoinColumnId());
}
}
// 更新 table 表定义
CodegenTableDO updateTableObj = BeanUtils.toBean(updateReqVO.getTable(), CodegenTableDO.class);
codegenTableMapper.updateById(updateTableObj);
// 更新 column 字段定义
List<CodegenColumnDO> updateColumnObjs = BeanUtils.toBean(updateReqVO.getColumns(), CodegenColumnDO.class);
updateColumnObjs.forEach(updateColumnObj -> codegenColumnMapper.updateById(updateColumnObj));
}
|
@Test
public void testUpdateCodegen_sub_masterNotExists() {
// mock 数据
CodegenTableDO table = randomPojo(CodegenTableDO.class,
o -> o.setTemplateType(CodegenTemplateTypeEnum.SUB.getType())
.setScene(CodegenSceneEnum.ADMIN.getScene()));
codegenTableMapper.insert(table);
// 准备参数
CodegenUpdateReqVO updateReqVO = randomPojo(CodegenUpdateReqVO.class,
o -> o.getTable().setId(table.getId())
.setTemplateType(CodegenTemplateTypeEnum.SUB.getType()));
// 调用,并断言
assertServiceException(() -> codegenService.updateCodegen(updateReqVO),
CODEGEN_MASTER_TABLE_NOT_EXISTS, updateReqVO.getTable().getMasterTableId());
}
|
public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) {
Objects.requireNonNull(storeName, "storeName cannot be null");
if (topologyMetadata.hasNamedTopologies()) {
throw new IllegalArgumentException("Cannot invoke the getAllMetadataForStore(storeName) method when"
+ "using named topologies, please use the overload that accepts"
+ "a topologyName parameter to identify the correct store");
}
if (!isInitialized()) {
return Collections.emptyList();
}
if (globalStores.contains(storeName)) {
return allMetadata;
}
final Collection<String> sourceTopics = topologyMetadata.sourceTopicsForStore(storeName, null);
if (sourceTopics.isEmpty()) {
return Collections.emptyList();
}
final ArrayList<StreamsMetadata> results = new ArrayList<>();
for (final StreamsMetadata metadata : allMetadata) {
if (metadata.stateStoreNames().contains(storeName) || metadata.standbyStateStoreNames().contains(storeName)) {
results.add(metadata);
}
}
return results;
}
|
@Test
public void shouldHaveGlobalStoreInAllMetadata() {
final Collection<StreamsMetadata> metadata = metadataState.getAllMetadataForStore(globalTable);
assertEquals(3, metadata.size());
for (final StreamsMetadata streamsMetadata : metadata) {
assertTrue(streamsMetadata.stateStoreNames().contains(globalTable));
}
}
|
@Override
public Long del(byte[]... keys) {
if (isQueueing() || isPipelined()) {
for (byte[] key: keys) {
write(key, LongCodec.INSTANCE, RedisCommands.DEL, key);
}
return null;
}
CommandBatchService es = new CommandBatchService(executorService);
for (byte[] key: keys) {
es.writeAsync(key, StringCodec.INSTANCE, RedisCommands.DEL, key);
}
BatchResult<Long> b = (BatchResult<Long>) es.execute();
return b.getResponses().stream().collect(Collectors.summarizingLong(v -> v)).getSum();
}
|
@Test
public void testDel() {
testInCluster(connection -> {
List<byte[]> keys = new ArrayList<>();
for (int i = 0; i < 10; i++) {
byte[] key = ("test" + i).getBytes();
keys.add(key);
connection.set(key, ("test" + i).getBytes());
}
assertThat(connection.del(keys.toArray(new byte[0][]))).isEqualTo(10);
});
}
|
@Override
public List<PartitionGroupMetadata> computePartitionGroupMetadata(String clientId, StreamConfig streamConfig,
List<PartitionGroupConsumptionStatus> partitionGroupConsumptionStatuses, int timeoutMillis)
throws IOException, TimeoutException {
List<PartitionGroupMetadata> newPartitionGroupMetadataList = new ArrayList<>();
Map<String, Shard> shardIdToShardMap = _kinesisConnectionHandler.getShards().stream()
.collect(Collectors.toMap(Shard::shardId, s -> s, (s1, s2) -> s1));
Set<String> shardsInCurrent = new HashSet<>();
Set<String> shardsEnded = new HashSet<>();
// TODO: Once we start supporting multiple shards in a PartitionGroup,
// we need to iterate over all shards to check if any of them have reached end
// Process existing shards. Add them to new list if still consuming from them
for (PartitionGroupConsumptionStatus currentPartitionGroupConsumptionStatus : partitionGroupConsumptionStatuses) {
KinesisPartitionGroupOffset kinesisStartCheckpoint =
(KinesisPartitionGroupOffset) currentPartitionGroupConsumptionStatus.getStartOffset();
String shardId = kinesisStartCheckpoint.getShardId();
shardsInCurrent.add(shardId);
Shard shard = shardIdToShardMap.get(shardId);
if (shard == null) { // Shard has expired
shardsEnded.add(shardId);
String lastConsumedSequenceID = kinesisStartCheckpoint.getSequenceNumber();
LOGGER.warn(
"Kinesis shard with id: {} has expired. Data has been consumed from the shard till sequence number: {}. "
+ "There can be potential data loss.", shardId, lastConsumedSequenceID);
continue;
}
StreamPartitionMsgOffset newStartOffset;
StreamPartitionMsgOffset currentEndOffset = currentPartitionGroupConsumptionStatus.getEndOffset();
if (currentEndOffset != null) { // Segment DONE (committing/committed)
String endingSequenceNumber = shard.sequenceNumberRange().endingSequenceNumber();
if (endingSequenceNumber != null) { // Shard has ended, check if we're also done consuming it
if (consumedEndOfShard(currentEndOffset, currentPartitionGroupConsumptionStatus)) {
shardsEnded.add(shardId);
continue; // Shard ended and we're done consuming it. Skip
}
}
newStartOffset = currentEndOffset;
} else { // Segment IN_PROGRESS
newStartOffset = currentPartitionGroupConsumptionStatus.getStartOffset();
}
newPartitionGroupMetadataList.add(
new PartitionGroupMetadata(currentPartitionGroupConsumptionStatus.getPartitionGroupId(), newStartOffset));
}
// Add brand new shards
for (Map.Entry<String, Shard> entry : shardIdToShardMap.entrySet()) {
// If shard was already in current list, skip
String newShardId = entry.getKey();
if (shardsInCurrent.contains(newShardId)) {
continue;
}
Shard newShard = entry.getValue();
String parentShardId = newShard.parentShardId();
// Add the new shard in the following 3 cases:
// 1. Root shards - Parent shardId will be null. Will find this case when creating new table.
// 2. Parent expired - Parent shardId will not be part of shardIdToShard map
// 3. Parent reached EOL and completely consumed.
if (parentShardId == null || !shardIdToShardMap.containsKey(parentShardId) || shardsEnded.contains(
parentShardId)) {
// TODO: Revisit this. Kinesis starts consuming AFTER the start sequence number, and we might miss the first
// message.
StreamPartitionMsgOffset newStartOffset =
new KinesisPartitionGroupOffset(newShardId, newShard.sequenceNumberRange().startingSequenceNumber());
int partitionGroupId = getPartitionGroupIdFromShardId(newShardId);
newPartitionGroupMetadataList.add(new PartitionGroupMetadata(partitionGroupId, newStartOffset));
}
}
return newPartitionGroupMetadataList;
}
|
@Test
public void getPartitionsGroupInfoEndOfShardTest()
throws Exception {
List<PartitionGroupConsumptionStatus> currentPartitionGroupMeta = new ArrayList<>();
KinesisPartitionGroupOffset kinesisPartitionGroupOffset = new KinesisPartitionGroupOffset("0", "1");
currentPartitionGroupMeta.add(
new PartitionGroupConsumptionStatus(0, 1, kinesisPartitionGroupOffset, kinesisPartitionGroupOffset,
"CONSUMING"));
ArgumentCaptor<StreamPartitionMsgOffset> checkpointArgs = ArgumentCaptor.forClass(StreamPartitionMsgOffset.class);
ArgumentCaptor<PartitionGroupConsumptionStatus> partitionGroupMetadataCapture =
ArgumentCaptor.forClass(PartitionGroupConsumptionStatus.class);
ArgumentCaptor<Integer> intArguments = ArgumentCaptor.forClass(Integer.class);
ArgumentCaptor<String> stringCapture = ArgumentCaptor.forClass(String.class);
Shard shard0 = Shard.builder().shardId(SHARD_ID_0).sequenceNumberRange(
SequenceNumberRange.builder().startingSequenceNumber("1").endingSequenceNumber("1").build()).build();
Shard shard1 = Shard.builder().shardId(SHARD_ID_1)
.sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").build()).build();
when(_kinesisConnectionHandler.getShards()).thenReturn(ImmutableList.of(shard0, shard1));
when(_streamConsumerFactory.createPartitionGroupConsumer(stringCapture.capture(),
partitionGroupMetadataCapture.capture())).thenReturn(_partitionGroupConsumer);
when(_partitionGroupConsumer.fetchMessages(checkpointArgs.capture(), intArguments.capture())).thenReturn(
new KinesisMessageBatch(new ArrayList<>(), kinesisPartitionGroupOffset, true));
List<PartitionGroupMetadata> result =
_kinesisStreamMetadataProvider.computePartitionGroupMetadata(CLIENT_ID, getStreamConfig(),
currentPartitionGroupMeta, TIMEOUT);
Assert.assertEquals(result.size(), 1);
Assert.assertEquals(result.get(0).getPartitionGroupId(), 1);
Assert.assertEquals(partitionGroupMetadataCapture.getValue().getSequenceNumber(), 1);
}
|
@Override
public List<String> splitAndEvaluate() {
return Strings.isNullOrEmpty(inlineExpression) ? Collections.emptyList() : flatten(evaluate(GroovyUtils.split(handlePlaceHolder(inlineExpression))));
}
|
@Test
void assertEvaluateForExpressionIsNull() {
InlineExpressionParser parser = TypedSPILoader.getService(InlineExpressionParser.class, "GROOVY", new Properties());
List<String> expected = parser.splitAndEvaluate();
assertThat(expected, is(Collections.<String>emptyList()));
}
|
@Override
protected void doStop() throws Exception {
shutdownReconnectService(reconnectService);
LOG.debug("Disconnecting from: {}...", getEndpoint().getConnectionString());
super.doStop();
closeSession();
LOG.info("Disconnected from: {}", getEndpoint().getConnectionString());
}
|
@Test
public void doStopShouldNotCloseTheSMPPSessionIfItIsNull() throws Exception {
when(endpoint.getConnectionString())
.thenReturn("smpp://smppclient@localhost:2775");
when(endpoint.isSingleton()).thenReturn(true);
producer.doStop();
}
|
public List<ScanFilterData> createScanFilterDataForBeaconParser(BeaconParser beaconParser, List<Identifier> identifiers) {
ArrayList<ScanFilterData> scanFilters = new ArrayList<ScanFilterData>();
long typeCode = beaconParser.getMatchingBeaconTypeCode();
int startOffset = beaconParser.getMatchingBeaconTypeCodeStartOffset();
int endOffset = beaconParser.getMatchingBeaconTypeCodeEndOffset();
byte[] typeCodeBytes = BeaconParser.longToByteArray(typeCode, endOffset-startOffset+1);
if (identifiers != null && identifiers.size() > 0 && identifiers.get(0) != null && beaconParser.getMatchingBeaconTypeCode() == 0x0215) {
// If type code 0215 ibeacon, we allow also adding identifiers to the filter
for (int manufacturer : beaconParser.getHardwareAssistManufacturers()) {
ScanFilterData sfd = new ScanFilterData();
sfd.manufacturer = manufacturer;
int length = 18;
if (identifiers.size() == 2) {
length = 20;
}
if (identifiers.size() == 3) {
length = 22;
}
sfd.filter = new byte[length];
sfd.filter[0] = typeCodeBytes[0];
sfd.filter[1] = typeCodeBytes[1];
byte[] idBytes = identifiers.get(0).toByteArray();
for (int i = 0; i < idBytes.length; i++) {
sfd.filter[i+2] = idBytes[i];
}
if (identifiers.size() > 1 && identifiers.get(1) != null) {
idBytes = identifiers.get(1).toByteArray();
for (int i = 0; i < idBytes.length; i++) {
sfd.filter[i+18] = idBytes[i];
}
}
if (identifiers.size() > 2 && identifiers.get(2) != null) {
idBytes = identifiers.get(2).toByteArray();
for (int i = 0; i < idBytes.length; i++) {
sfd.filter[i+20] = idBytes[i];
}
}
sfd.mask = new byte[length];
for (int i = 0 ; i < length; i++) {
sfd.mask[i] = (byte) 0xff;
}
sfd.serviceUuid = null;
sfd.serviceUuid128Bit = new byte[0];
scanFilters.add(sfd);
return scanFilters;
}
}
for (int manufacturer : beaconParser.getHardwareAssistManufacturers()) {
ScanFilterData sfd = new ScanFilterData();
Long serviceUuid = beaconParser.getServiceUuid();
// Note: the -2 here is because we want the filter and mask to start after the
// two-byte manufacturer code, and the beacon parser expression is based on offsets
// from the start of the two byte code
int length = endOffset + 1 - 2;
byte[] filter = new byte[0];
byte[] mask = new byte[0];
if (length > 0) {
filter = new byte[length];
mask = new byte[length];
for (int layoutIndex = 2; layoutIndex <= endOffset; layoutIndex++) {
int filterIndex = layoutIndex-2;
if (layoutIndex < startOffset) {
filter[filterIndex] = 0;
mask[filterIndex] = 0;
} else {
filter[filterIndex] = typeCodeBytes[layoutIndex-startOffset];
mask[filterIndex] = (byte) 0xff;
}
}
}
sfd.manufacturer = manufacturer;
sfd.filter = filter;
sfd.mask = mask;
sfd.serviceUuid = serviceUuid;
sfd.serviceUuid128Bit = beaconParser.getServiceUuid128Bit();
scanFilters.add(sfd);
}
return scanFilters;
}
|
@Test
public void testGetAltBeaconScanFilter() throws Exception {
org.robolectric.shadows.ShadowLog.stream = System.err;
BeaconParser parser = new AltBeaconParser();
BeaconManager.setManifestCheckingDisabled(true); // no manifest available in robolectric
List<ScanFilterUtils.ScanFilterData> scanFilterDatas = new ScanFilterUtils().createScanFilterDataForBeaconParser(parser, null);
assertEquals("scanFilters should be of correct size", 1, scanFilterDatas.size());
ScanFilterUtils.ScanFilterData sfd = scanFilterDatas.get(0);
assertEquals("manufacturer should be right", 0x0118, sfd.manufacturer);
assertEquals("mask length should be right", 2, sfd.mask.length);
assertArrayEquals("mask should be right", new byte[] {(byte)0xff, (byte)0xff}, sfd.mask);
assertArrayEquals("filter should be right", new byte[] {(byte)0xbe, (byte)0xac}, sfd.filter);
}
|
public Boolean isRevisionSupported() {
if(!zConf.getBoolean(ConfVars.ZEPPELIN_NOTEBOOK_VERSIONED_MODE_ENABLE)) {
return false;
}
if (notebookRepo instanceof NotebookRepoSync) {
return ((NotebookRepoSync) notebookRepo).isRevisionSupportedInDefaultRepo();
} else if (notebookRepo instanceof NotebookRepoWithVersionControl) {
return true;
} else {
return false;
}
}
|
@Test
void testRevisionSupported() throws IOException {
NotebookRepo notebookRepo;
Notebook notebook;
notebookRepo = new DummyNotebookRepo();
notebook = new Notebook(zConf, mock(AuthorizationService.class), notebookRepo, new NoteManager(notebookRepo, zConf), interpreterFactory,
interpreterSettingManager, credentials, null);
assertFalse( notebook.isRevisionSupported(), "Revision is not supported in DummyNotebookRepo");
notebookRepo = new DummyNotebookRepoWithVersionControl();
notebook = new Notebook(zConf, mock(AuthorizationService.class), notebookRepo, new NoteManager(notebookRepo, zConf), interpreterFactory,
interpreterSettingManager, credentials, null);
assertTrue(notebook.isRevisionSupported(), "Revision is supported in DummyNotebookRepoWithVersionControl");
}
|
@Override
public DataSourceProvenance getProvenance() {
return new DemoLabelDataSourceProvenance(this);
}
|
@Test
public void testCheckerboard() {
// Check zero samples throws
assertThrows(PropertyException.class, () -> new CheckerboardDataSource(0, 1, 10, 0.0, 1.0));
// Check invalid numSquares throws
assertThrows(PropertyException.class, () -> new CheckerboardDataSource(200, 1, 1, 0.0, 1.0));
// Check invalid min & max
assertThrows(PropertyException.class, () -> new CheckerboardDataSource(200, 1, 10, 0.0, 0.0));
assertThrows(PropertyException.class, () -> new CheckerboardDataSource(200, 1, 10, 0.0, -1.0));
// Check valid parameters work
CheckerboardDataSource source = new CheckerboardDataSource(2000, 1, 10, -1.0, 1.0);
assertEquals(2000, source.examples.size());
Dataset<Label> dataset = new MutableDataset<>(source);
Map<String, Long> map = new HashMap<>();
dataset.getOutputInfo().outputCountsIterable().forEach((p) -> map.put(p.getA(), p.getB()));
assertEquals(996, map.get("X"));
assertEquals(1004, map.get("O"));
Helpers.testProvenanceMarshalling(source.getProvenance());
}
|
static String levelToString(String feature, short level) {
if (feature.equals(MetadataVersion.FEATURE_NAME)) {
try {
return MetadataVersion.fromFeatureLevel(level).version();
} catch (Throwable e) {
return "UNKNOWN " + level;
}
}
return String.valueOf(level);
}
|
@Test
public void testLevelToString() {
assertEquals("5", FeatureCommand.levelToString("foo.bar", (short) 5));
assertEquals("3.3-IV0",
FeatureCommand.levelToString(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV0.featureLevel()));
}
|
@VisibleForTesting
boolean skipUpdate(int newSize) {
if (newSize == lastBufferSize) {
return true;
}
// According to logic of this class newSize can not be less than min or greater than max
// buffer size but if considering this method independently the behaviour for the small or
// big value should be the same as for min and max buffer size correspondingly.
if (newSize <= minBufferSize || newSize >= maxBufferSize) {
return false;
}
int delta = (int) (lastBufferSize * bufferDebloatThresholdFactor);
return Math.abs(newSize - lastBufferSize) < delta;
}
|
@Test
void testSkipUpdate() {
int maxBufferSize = 32768;
int minBufferSize = 256;
double threshold = 0.3;
BufferDebloater bufferDebloater =
testBufferDebloater()
.withDebloatTarget(1000)
.withBufferSize(minBufferSize, maxBufferSize)
// 30 % Threshold.
.withThresholdPercentages((int) (threshold * 100))
.getBufferDebloater();
int currentBufferSize = maxBufferSize / 2;
OptionalInt optionalInt = bufferDebloater.recalculateBufferSize(currentBufferSize, 1);
assertThat(optionalInt).isPresent().hasValue(currentBufferSize);
// It is true because less than threshold.
assertThat(bufferDebloater.skipUpdate(currentBufferSize)).isTrue();
assertThat(bufferDebloater.skipUpdate(currentBufferSize - 1)).isTrue();
assertThat(bufferDebloater.skipUpdate(currentBufferSize + 1)).isTrue();
assertThat(
bufferDebloater.skipUpdate(
currentBufferSize - (int) (currentBufferSize * threshold) + 1))
.isTrue();
assertThat(
bufferDebloater.skipUpdate(
currentBufferSize + (int) (currentBufferSize * threshold) - 1))
.isTrue();
// It is false because it reaches threshold.
assertThat(
bufferDebloater.skipUpdate(
currentBufferSize - (int) (currentBufferSize * threshold)))
.isFalse();
assertThat(
bufferDebloater.skipUpdate(
currentBufferSize + (int) (currentBufferSize * threshold)))
.isFalse();
assertThat(bufferDebloater.skipUpdate(minBufferSize + 1)).isFalse();
assertThat(bufferDebloater.skipUpdate(minBufferSize)).isFalse();
assertThat(bufferDebloater.skipUpdate(maxBufferSize - 1)).isFalse();
assertThat(bufferDebloater.skipUpdate(maxBufferSize)).isFalse();
// Beyond the min and max size is always false.
assertThat(bufferDebloater.skipUpdate(maxBufferSize + 1)).isFalse();
assertThat(bufferDebloater.skipUpdate(minBufferSize - 1)).isFalse();
}
|
@Override
public boolean isManagedIndex(String indexName) {
return isManagedIndex(findAllMongoIndexSets(), indexName);
}
|
@Test
public void isManagedIndexWithUnmanagedIndexReturnsFalse() {
final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class);
final List<IndexSetConfig> indexSetConfigs = Collections.singletonList(indexSetConfig);
final MongoIndexSet indexSet = mock(MongoIndexSet.class);
when(mongoIndexSetFactory.create(indexSetConfig)).thenReturn(indexSet);
when(indexSetService.findAll()).thenReturn(indexSetConfigs);
when(indexSet.isManagedIndex("index")).thenReturn(false);
assertThat(indexSetRegistry.isManagedIndex("index")).isFalse();
}
|
public static String toJson(Message message) {
StringWriter json = new StringWriter();
try (JsonWriter jsonWriter = JsonWriter.of(json)) {
write(message, jsonWriter);
}
return json.toString();
}
|
@Test
public void write_empty_map() {
TestMap.Builder builder = TestMap.newBuilder();
assertThat(toJson(builder.build())).isEqualTo("{\"stringMap\":{},\"nestedMap\":{}}");
}
|
public static ImmutableSet<HttpUrl> allSubPaths(String url) {
return allSubPaths(HttpUrl.parse(url));
}
|
@Test
public void allSubPaths_whenMultipleSubPathsWithTrailingSlash_returnsExpectedUrl() {
assertThat(allSubPaths("http://localhost/a/b/c/"))
.containsExactly(
HttpUrl.parse("http://localhost/"),
HttpUrl.parse("http://localhost/a/"),
HttpUrl.parse("http://localhost/a/b/"),
HttpUrl.parse("http://localhost/a/b/c/"));
}
|
@Override
public boolean mkdirs(final Path f) throws IOException {
checkNotNull(f, "path is null");
return mkdirsInternal(pathToFile(f));
}
|
@Test
void testConcurrentMkdirs() throws Exception {
final FileSystem fs = FileSystem.getLocalFileSystem();
final File root = TempDirUtils.newFolder(tempFolder);
final int directoryDepth = 10;
final int concurrentOperations = 10;
final Collection<File> targetDirectories =
createTargetDirectories(root, directoryDepth, concurrentOperations);
final ExecutorService executor = Executors.newFixedThreadPool(concurrentOperations);
final CyclicBarrier cyclicBarrier = new CyclicBarrier(concurrentOperations);
try {
final Collection<CompletableFuture<Void>> mkdirsFutures =
new ArrayList<>(concurrentOperations);
for (File targetDirectory : targetDirectories) {
final CompletableFuture<Void> mkdirsFuture =
CompletableFuture.runAsync(
() -> {
try {
cyclicBarrier.await();
assertThat(fs.mkdirs(Path.fromLocalFile(targetDirectory)))
.isEqualTo(true);
} catch (Exception e) {
throw new CompletionException(e);
}
},
executor);
mkdirsFutures.add(mkdirsFuture);
}
final CompletableFuture<Void> allFutures =
CompletableFuture.allOf(
mkdirsFutures.toArray(new CompletableFuture[concurrentOperations]));
allFutures.get();
} finally {
final long timeout = 10000L;
ExecutorUtils.gracefulShutdown(timeout, TimeUnit.MILLISECONDS, executor);
}
}
|
@Override
public DescriptiveUrlBag toUrl(final Path file) {
final DescriptiveUrlBag list = new DescriptiveUrlBag();
if(new HostPreferences(session.getHost()).getBoolean("s3.bucket.virtualhost.disable")) {
list.addAll(new DefaultUrlProvider(session.getHost()).toUrl(file));
}
else {
list.add(this.toUrl(file, session.getHost().getProtocol().getScheme(), session.getHost().getPort()));
list.add(this.toUrl(file, Scheme.http, 80));
if(StringUtils.isNotBlank(session.getHost().getWebURL())) {
// Only include when custom domain is configured
list.addAll(new HostWebUrlProvider(session.getHost()).toUrl(file));
}
}
if(file.isFile()) {
if(!session.getHost().getCredentials().isAnonymousLogin()) {
// X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less
// than 604800 seconds
// In one hour
list.add(this.toSignedUrl(file, (int) TimeUnit.HOURS.toSeconds(1)));
// Default signed URL expiring in 24 hours.
list.add(this.toSignedUrl(file, (int) TimeUnit.SECONDS.toSeconds(
new HostPreferences(session.getHost()).getInteger("s3.url.expire.seconds"))));
// 1 Week
list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(7)));
switch(session.getSignatureVersion()) {
case AWS2:
// 1 Month
list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(30)));
// 1 Year
list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(365)));
break;
case AWS4HMACSHA256:
break;
}
}
}
// AWS services require specifying an Amazon S3 bucket using S3://bucket
list.add(new DescriptiveUrl(URI.create(String.format("s3://%s%s",
containerService.getContainer(file).getName(),
file.isRoot() ? Path.DELIMITER : containerService.isContainer(file) ? Path.DELIMITER : String.format("/%s", URIEncoder.encode(containerService.getKey(file))))),
DescriptiveUrl.Type.provider,
MessageFormat.format(LocaleFactory.localizedString("{0} URL"), "S3")));
// Filter by matching container name
final Optional<Set<Distribution>> filtered = distributions.entrySet().stream().filter(entry ->
new SimplePathPredicate(containerService.getContainer(file)).test(entry.getKey()))
.map(Map.Entry::getValue).findFirst();
if(filtered.isPresent()) {
// Add CloudFront distributions
for(Distribution distribution : filtered.get()) {
list.addAll(new DistributionUrlProvider(distribution).toUrl(file));
}
}
return list;
}
|
@Test
public void testProviderUriRoot() {
final Iterator<DescriptiveUrl> provider = new S3UrlProvider(session, Collections.emptyMap()).toUrl(new Path("/test-eu-west-1-cyberduck",
EnumSet.of(Path.Type.directory))).filter(DescriptiveUrl.Type.provider).iterator();
assertEquals("s3://test-eu-west-1-cyberduck/", provider.next().getUrl());
}
|
public static <K, V> Cache<K, V> eternal() {
return forMaximumBytes(Long.MAX_VALUE);
}
|
@Test
public void testEternalCache() throws Exception {
testCache(Caches.eternal());
}
|
@SuppressWarnings("unchecked")
public <TCOSBase extends COSBase> TCOSBase cloneForNewDocument(TCOSBase base) throws IOException
{
if (base == null)
{
return null;
}
COSBase retval = clonedVersion.get(base);
if (retval != null)
{
// we are done, it has already been converted.
return (TCOSBase) retval;
}
if (clonedValues.contains(base))
{
// Don't clone a clone
return base;
}
retval = cloneCOSBaseForNewDocument(base);
clonedVersion.put(base, retval);
clonedValues.add(retval);
return (TCOSBase) retval;
}
|
@Test
void testClonePDFWithCosArrayStream() throws IOException
{
try (PDDocument srcDoc = new PDDocument();
PDDocument dstDoc = new PDDocument())
{
PDPage pdPage = new PDPage();
srcDoc.addPage(pdPage);
new PDPageContentStream(srcDoc, pdPage, AppendMode.APPEND, true).close();
new PDPageContentStream(srcDoc, pdPage, AppendMode.APPEND, true).close();
new PDFCloneUtility(dstDoc).cloneForNewDocument(pdPage.getCOSObject());
}
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void modResolution0() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/crash-report/mod_resolution0.txt")),
CrashReportAnalyzer.Rule.MOD_RESOLUTION0);
}
|
@Nullable
@Override
public AccessToken loadById(String id) {
try {
final DBObject dbObject = get(AccessTokenImpl.class, id);
if (dbObject != null) {
return fromDBObject(dbObject);
}
} catch (IllegalArgumentException e) {
// Happens when id is not a valid BSON ObjectId
LOG.debug("Couldn't load access token", e);
}
return null;
}
|
@Test
@MongoDBFixtures("accessTokensMultipleTokens.json")
public void testLoadById() {
assertThat(accessTokenService.loadById("54e3deadbeefdeadbeefaffe"))
.isNotNull()
.satisfies(token -> {
assertThat(token.getId()).isEqualTo("54e3deadbeefdeadbeefaffe");
assertThat(token.getName()).isEqualTo("web");
});
assertThat(accessTokenService.loadById("54f9deadbeefdeadbeefaffe"))
.isNotNull()
.satisfies(token -> {
assertThat(token.getId()).isEqualTo("54f9deadbeefdeadbeefaffe");
assertThat(token.getName()).isEqualTo("rest");
});
assertThat(accessTokenService.loadById("54f9deadbeefdeadbeef0000"))
.as("check that loading a non-existent token returns null")
.isNull();
}
|
public Class<?> getTargetClass() {
return targetClass;
}
|
@Test
void testConstructorWithTargetClass() {
NacosDeserializationException exception = new NacosDeserializationException(
NacosDeserializationExceptionTest.class);
assertEquals(Constants.Exception.DESERIALIZE_ERROR_CODE, exception.getErrCode());
assertEquals(String.format("errCode: 101, errMsg: Nacos deserialize for class [%s] failed. ",
NacosDeserializationExceptionTest.class.getName()), exception.getMessage());
assertEquals(NacosDeserializationExceptionTest.class, exception.getTargetClass());
}
|
@Override
public Object decode(Response response, Type type) throws IOException {
JsonAdapter<Object> jsonAdapter = moshi.adapter(type);
if (response.status() == 404 || response.status() == 204)
return Util.emptyValueOf(type);
if (response.body() == null)
return null;
try (BufferedSource source = Okio.buffer(Okio.source(response.body().asInputStream()))) {
if (source.exhausted()) {
return null; // empty body
}
return jsonAdapter.fromJson(source);
} catch (JsonDataException e) {
if (e.getCause() != null && e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
}
throw e;
}
}
|
@Test
void decodes() throws Exception {
class Zone extends LinkedHashMap<String, Object> {
Zone(String name) {
this(name, null);
}
Zone(String name, String id) {
put("name", name);
if (id != null) {
put("id", id);
}
}
private static final long serialVersionUID = 1L;
}
List<Zone> zones = new LinkedList<>();
zones.add(new Zone("denominator.io."));
zones.add(new Zone("denominator.io.", "ABCD"));
Response response = Response.builder()
.status(200)
.reason("OK")
.headers(Collections.emptyMap())
.request(Request.create(Request.HttpMethod.GET, "/api", Collections.emptyMap(), null,
Util.UTF_8))
.body(zonesJson, UTF_8)
.build();
assertThat(new MoshiDecoder().decode(response, List.class)).isEqualTo(zones);
}
|
public static ValueLabel formatBytes(long bytes) {
return new ValueLabel(bytes, BYTES_UNIT);
}
|
@Test
public void formatGigaBytes() {
vl = TopoUtils.formatBytes(4_000_000_000L);
assertEquals(AM_WM, TopoUtils.Magnitude.GIGA, vl.magnitude());
assertEquals(AM_WL, "3.73 GB", vl.toString());
}
|
public int getSegmentId() {
return segmentId;
}
|
@Test
void testGetSegmentId() {
int segmentId = 1;
NettyPayload nettyPayload = NettyPayload.newSegment(segmentId);
assertThat(nettyPayload.getSegmentId()).isEqualTo(segmentId);
assertThatThrownBy(() -> NettyPayload.newSegment(-1))
.isInstanceOf(IllegalStateException.class);
}
|
public String compile(final DataProvider dataProvider,
final String template) {
final InputStream templateStream = this.getClass().getResourceAsStream(template);
return compile(dataProvider,
templateStream);
}
|
@Test
public void testCompilerObjs() throws Exception {
Collection<Object> objs = new ArrayList<Object>();
final ObjectDataCompiler converter = new ObjectDataCompiler();
final InputStream templateStream =
this.getClass().getResourceAsStream("/templates/rule_template_1.drl");
for (String[] row : rows) {
OBJ obj = new OBJ(row);
objs.add(obj);
}
final String drl = converter.compile(objs,
templateStream);
assertThat(EXPECTED_RULES.toString()).isEqualToIgnoringWhitespace(drl);
}
|
public static TableElements parse(final String schema, final TypeRegistry typeRegistry) {
return new SchemaParser(typeRegistry).parse(schema);
}
|
@Test
public void shouldThrowOnInvalidSchema() {
// Given:
final String schema = "foo-bar INTEGER";
// Expect:
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> parser.parse(schema)
);
// Then:
assertThat(e.getMessage(), containsString("Error parsing schema \"foo-bar INTEGER\" at 1:4: extraneous input '-' "));
}
|
public PipelineConfigs findGroupByPipeline(CaseInsensitiveString pipelineName) {
for (PipelineConfigs group : this) {
if (group.hasPipeline(pipelineName)) {
return group;
}
}
return null;
}
|
@Test
public void shouldFindGroupByPipelineName() throws Exception {
PipelineConfig p1Config = createPipelineConfig("pipeline1", "stage1");
PipelineConfig p2Config = createPipelineConfig("pipeline2", "stage1");
PipelineConfig p3Config = createPipelineConfig("pipeline3", "stage1");
PipelineConfigs group1 = createGroup("group1", p1Config, p2Config);
PipelineConfigs group2 = createGroup("group2", p3Config);
PipelineGroups groups = new PipelineGroups(group1, group2);
assertThat(groups.findGroupByPipeline(new CaseInsensitiveString("pipeline1")), is(group1));
assertThat(groups.findGroupByPipeline(new CaseInsensitiveString("pipeline2")), is(group1));
assertThat(groups.findGroupByPipeline(new CaseInsensitiveString("pipeline3")), is(group2));
}
|
public boolean containsGroup(String group) {
return clusterNodes.containsKey(group);
}
|
@Test
public void testContainsGroup() {
Assertions.assertFalse(metadata.containsGroup("group"));
}
|
@Override
public void execute(ComputationStep.Context context) {
Metric qProfilesMetric = metricRepository.getByKey(CoreMetrics.QUALITY_PROFILES_KEY);
new PathAwareCrawler<>(new QProfileAggregationComponentVisitor(qProfilesMetric))
.visit(treeRootHolder.getRoot());
}
|
@Test
public void fail_if_report_inconsistent() {
treeRootHolder.setRoot(MULTI_MODULE_PROJECT);
QualityProfile qpJava = createQProfile(QP_NAME_1, LANGUAGE_KEY_1);
analysisMetadataHolder.setQProfilesByLanguage(ImmutableMap.of(LANGUAGE_KEY_1, qpJava));
try {
underTest.execute(new TestComputationStepContext());
fail("Expected exception");
} catch (Exception e) {
assertThat(e).hasCause(new IllegalStateException("Report contains a file with language 'php' but no matching quality profile"));
}
}
|
Map<String, Object> sourceConsumerConfig(String role) {
Map<String, Object> result = sourceConsumerConfig(originals());
addClientId(result, role);
return result;
}
|
@Test
public void testSourceConsumerConfig() {
Map<String, String> connectorProps = makeProps(
MirrorConnectorConfig.CONSUMER_CLIENT_PREFIX + "max.poll.interval.ms", "120000",
MirrorConnectorConfig.SOURCE_CLUSTER_PREFIX + "bootstrap.servers", "localhost:2345"
);
MirrorConnectorConfig config = new TestMirrorConnectorConfig(connectorProps);
Map<String, Object> connectorConsumerProps = config.sourceConsumerConfig("test");
Map<String, Object> expectedConsumerProps = new HashMap<>();
expectedConsumerProps.put("enable.auto.commit", "false");
expectedConsumerProps.put("auto.offset.reset", "earliest");
expectedConsumerProps.put("max.poll.interval.ms", "120000");
expectedConsumerProps.put("client.id", "source1->target2|ConnectorName|test");
expectedConsumerProps.put("bootstrap.servers", "localhost:2345");
assertEquals(expectedConsumerProps, connectorConsumerProps);
// checking auto.offset.reset override works
connectorProps = makeProps(
MirrorConnectorConfig.CONSUMER_CLIENT_PREFIX + "auto.offset.reset", "latest",
MirrorConnectorConfig.SOURCE_CLUSTER_PREFIX + "bootstrap.servers", "localhost:2345"
);
config = new TestMirrorConnectorConfig(connectorProps);
connectorConsumerProps = config.sourceConsumerConfig("test");
expectedConsumerProps.put("auto.offset.reset", "latest");
expectedConsumerProps.remove("max.poll.interval.ms");
assertEquals(expectedConsumerProps, connectorConsumerProps,
MirrorConnectorConfig.CONSUMER_CLIENT_PREFIX + " source consumer config not matching");
}
|
@VisibleForTesting
protected void copyFromHost(MapHost host) throws IOException {
// reset retryStartTime for a new host
retryStartTime = 0;
// Get completed maps on 'host'
List<TaskAttemptID> maps = scheduler.getMapsForHost(host);
// Sanity check to catch hosts with only 'OBSOLETE' maps,
// especially at the tail of large jobs
if (maps.size() == 0) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps);
}
// List of maps to be fetched yet
Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps);
// Construct the url and connect
URL url = getMapOutputURL(host, maps);
DataInputStream input = null;
try {
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
// Loop through available map-outputs and fetch them
// On any error, faildTasks is not null and we exit
// after putting back the remaining maps to the
// yet_to_be_fetched list and marking the failed tasks.
TaskAttemptID[] failedTasks = null;
while (!remaining.isEmpty() && failedTasks == null) {
try {
failedTasks = copyMapOutput(host, input, remaining, fetchRetryEnabled);
} catch (IOException e) {
IOUtils.cleanupWithLogger(LOG, input);
//
// Setup connection again if disconnected by NM
connection.disconnect();
// Get map output from remaining tasks only.
url = getMapOutputURL(host, remaining);
input = openShuffleUrl(host, remaining, url);
if (input == null) {
return;
}
}
}
if(failedTasks != null && failedTasks.length > 0) {
LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks));
scheduler.hostFailed(host.getHostName());
for(TaskAttemptID left: failedTasks) {
scheduler.copyFailed(left, host, true, false);
}
}
// Sanity check
if (failedTasks == null && !remaining.isEmpty()) {
throw new IOException("server didn't return all expected map outputs: "
+ remaining.size() + " left.");
}
input.close();
input = null;
} finally {
if (input != null) {
IOUtils.cleanupWithLogger(LOG, input);
input = null;
}
for (TaskAttemptID left : remaining) {
scheduler.putBackKnownMapOutput(host, left);
}
}
}
|
@Test
public void testReduceOutOfDiskSpace() throws Throwable {
LOG.info("testReduceOutOfDiskSpace");
Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm,
r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))
.thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH))
.thenReturn(replyHash);
when(connection.getInputStream()).thenReturn(in);
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt()))
.thenThrow(new DiskErrorException("No disk space available"));
underTest.copyFromHost(host);
verify(ss).reportLocalError(any(IOException.class));
}
|
@Udf(description = "Returns all substrings of the input that matches the given regex pattern")
public List<String> regexpExtractAll(
@UdfParameter(description = "The regex pattern") final String pattern,
@UdfParameter(description = "The input string to apply regex on") final String input
) {
return regexpExtractAll(pattern, input, 0);
}
|
@Test
public void shouldReturnNullIfGivenGroupNumberGreaterThanAvailableGroupNumbers() {
assertThat(udf.regexpExtractAll("e", "test string", 3), nullValue());
}
|
@Override
public boolean offerLast(T t)
{
addLastNode(t);
return true;
}
|
@Test
public void testOfferLast()
{
List<Integer> control = new ArrayList<>(Arrays.asList(1, 2, 3));
LinkedDeque<Integer> q = new LinkedDeque<>(control);
control.add(99);
Assert.assertTrue(q.offerLast(99));
Assert.assertEquals(q, control);
}
|
@Override
public synchronized ListenableFuture<BufferResult> get(OutputBufferId bufferId, long startSequenceId, DataSize maxSize)
{
requireNonNull(bufferId, "outputBufferId is null");
checkArgument(bufferId.getId() == outputBufferId.getId(), "Invalid buffer id");
checkArgument(maxSize.toBytes() > 0, "maxSize must be at least 1 byte");
acknowledge(bufferId, startSequenceId);
long currentSequenceId = this.currentSequenceId.get();
// process the request if we have no more data coming in, have data to read, or if this is an outdated request
if (noMorePages.get() || !handleInfoQueue.isEmpty() || !pages.isEmpty() || currentSequenceId != startSequenceId) {
return processRead(startSequenceId, maxSize);
}
// creating a pending read, and abort the previous one
PendingRead oldPendingRead = pendingRead;
pendingRead = new PendingRead(taskInstanceId, currentSequenceId, maxSize);
if (oldPendingRead != null) {
oldPendingRead.completeResultFutureWithEmpty();
}
return pendingRead.getResultFuture();
}
|
@Test
public void testSimplePendingRead()
{
SpoolingOutputBuffer buffer = createSpoolingOutputBuffer();
// attempt to get a page
ListenableFuture<BufferResult> future = buffer.get(BUFFER_ID, 0, sizeOfPages(2));
assertFalse(future.isDone());
// add three pages
List<Page> pages = new LinkedList<>();
for (int i = 0; i < 3; i++) {
pages.add(createPage(i));
}
addPages(buffer, pages);
// pending read should have two pages added
assertBufferResultEquals(TYPES, getFuture(future, MAX_WAIT), bufferResult(0, createPage(0), createPage(1)));
// checks we can still read first three pages
assertBufferResultEquals(TYPES, getBufferResult(buffer, BUFFER_ID, 0, sizeOfPages(3), MAX_WAIT), createBufferResult(TASK_INSTANCE_ID, 0, pages));
acknowledgeBufferResult(buffer, BUFFER_ID, 2);
compareTotalBuffered(buffer, 3);
assertBufferResultEquals(TYPES, getBufferResult(buffer, BUFFER_ID, 2, sizeOfPages(3), MAX_WAIT), bufferResult(2, createPage(2)));
// file should be removed after acknowledging all three pages
acknowledgeBufferResult(buffer, BUFFER_ID, 3);
compareTotalBuffered(buffer, 0);
// attempt to read, but nothing can be read
future = buffer.get(BUFFER_ID, 3, sizeOfPages(3));
assertFalse(future.isDone());
}
|
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) {
OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers);
if (filteredOpenAPI == null) {
return filteredOpenAPI;
}
OpenAPI clone = new OpenAPI();
clone.info(filteredOpenAPI.getInfo());
clone.openapi(filteredOpenAPI.getOpenapi());
clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect());
clone.setSpecVersion(filteredOpenAPI.getSpecVersion());
clone.setExtensions(filteredOpenAPI.getExtensions());
clone.setExternalDocs(filteredOpenAPI.getExternalDocs());
clone.setSecurity(filteredOpenAPI.getSecurity());
clone.setServers(filteredOpenAPI.getServers());
clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags()));
final Set<String> allowedTags = new HashSet<>();
final Set<String> filteredTags = new HashSet<>();
Paths clonedPaths = new Paths();
if (filteredOpenAPI.getPaths() != null) {
for (String resourcePath : filteredOpenAPI.getPaths().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clonedPaths.addPathItem(resourcePath, clonedPathItem);
}
}
}
clone.paths(clonedPaths);
}
filteredTags.removeAll(allowedTags);
final List<Tag> tags = clone.getTags();
if (tags != null && !filteredTags.isEmpty()) {
tags.removeIf(tag -> filteredTags.contains(tag.getName()));
if (clone.getTags().isEmpty()) {
clone.setTags(null);
}
}
if (filteredOpenAPI.getWebhooks() != null) {
for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clone.addWebhooks(resourcePath, clonedPathItem);
}
}
}
}
if (filteredOpenAPI.getComponents() != null) {
clone.components(new Components());
clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers));
clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes());
clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks());
clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples());
clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions());
clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders());
clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks());
clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters());
clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies());
clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses());
clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems());
}
if (filter.isRemovingUnreferencedDefinitions()) {
clone = removeBrokenReferenceDefinitions(clone);
}
return clone;
}
|
@Test(description = "it should clone everything")
public void cloneEverything() throws IOException {
final OpenAPI openAPI = getOpenAPI(RESOURCE_PATH);
final OpenAPI filtered = new SpecFilter().filter(openAPI, new NoOpOperationsFilter(), null, null, null);
assertEquals(Json.pretty(filtered), Json.pretty(openAPI));
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path file : files.keySet()) {
callback.delete(file);
try {
if(file.isFile() || file.isSymbolicLink()) {
if(!session.getClient().deleteFile(file.getAbsolute())) {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
}
else if(file.isDirectory()) {
// Change working directory to parent
if(!session.getClient().changeWorkingDirectory(file.getParent().getAbsolute())) {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
if(!session.getClient().removeDirectory(file.getAbsolute())) {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
}
}
catch(IOException e) {
throw new FTPExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
}
|
@Test
public void testDeleteDirectory() throws Exception {
final Path test = new Path(new FTPWorkdirService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory));
new FTPDirectoryFeature(session).mkdir(test, new TransferStatus());
new FTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public <T extends ShardingSphereRule> Collection<T> findRules(final Class<T> clazz) {
Collection<T> result = new LinkedList<>();
for (ShardingSphereRule each : rules) {
if (clazz.isAssignableFrom(each.getClass())) {
result.add(clazz.cast(each));
}
}
return result;
}
|
@Test
void assertFindRules() {
assertThat(ruleMetaData.findRules(ShardingSphereRuleFixture.class).size(), is(1));
}
|
public LocalComponentIdDrlSession get(String basePath, long identifier) {
return new LocalComponentIdDrlSession(basePath, identifier);
}
|
@Test
void get() {
long identifier = Math.abs(new Random().nextLong());
String basePath = "/TestingRule/TestedRule";
ModelLocalUriId modelLocalUriId = new ModelLocalUriId(LocalUri.parse("/pmml" + basePath));
assertThat(modelLocalUriId.model()).isEqualTo("pmml");
assertThat(modelLocalUriId.basePath()).isEqualTo(basePath);
LocalComponentIdDrlSession retrieved = new EfestoAppRoot()
.get(KieDrlComponentRoot.class)
.get(DrlSessionIdFactory.class)
.get(modelLocalUriId.basePath(), identifier);
assertThat(retrieved.model()).isEqualTo(LocalComponentIdDrlSession.PREFIX);
String expected = basePath + SLASH + identifier;
assertThat(retrieved.basePath()).isEqualTo(expected);
}
|
@VisibleForTesting
static String formatTimestamp(Long timestampMicro) {
// timestampMicro is in "microseconds since epoch" format,
// e.g., 1452062291123456L means "2016-01-06 06:38:11.123456 UTC".
// Separate into seconds and microseconds.
long timestampSec = timestampMicro / 1_000_000;
long micros = timestampMicro % 1_000_000;
if (micros < 0) {
micros += 1_000_000;
timestampSec -= 1;
}
String dayAndTime = DATE_AND_SECONDS_FORMATTER.print(timestampSec * 1000);
if (micros == 0) {
return String.format("%s UTC", dayAndTime);
}
return String.format("%s.%06d UTC", dayAndTime, micros);
}
|
@Test
public void testFormatTimestamp() {
assertThat(
BigQueryAvroUtils.formatTimestamp(1452062291123456L),
equalTo("2016-01-06 06:38:11.123456 UTC"));
}
|
public static String stringEmptyAndThenExecute(String source, Callable<String> callable) {
if (StringUtils.isEmpty(source)) {
try {
return callable.call();
} catch (Exception e) {
LogUtils.NAMING_LOGGER.error("string empty and then execute cause an exception.", e);
}
}
return source == null ? null : source.trim();
}
|
@Test
void testStringEmptyAndThenExecuteSuccess() {
String word = " ";
String actual = TemplateUtils.stringEmptyAndThenExecute(word, () -> "call");
assertEquals("", actual);
}
|
@Override
public int writerIndex() {
return writerIndex;
}
|
@Test
void writerIndexBoundaryCheck1() {
Assertions.assertThrows(IndexOutOfBoundsException.class, () -> {
buffer.writerIndex(-1);
});
}
|
public static List<Section> order(Collection<Section> sections, String... orderedNames) {
Map<String, Section> alphabeticalOrderedMap = new TreeMap<>();
sections.forEach(section -> alphabeticalOrderedMap.put(section.getName(), section));
List<Section> result = new ArrayList<>(sections.size());
stream(orderedNames).forEach(name -> {
Section section = alphabeticalOrderedMap.remove(name);
if (section != null) {
result.add(section);
}
});
result.addAll(alphabeticalOrderedMap.values());
return result;
}
|
@Test
public void test_order() {
Collection<Section> sections = asList(
newSection("end2"),
newSection("bar"),
newSection("end1"),
newSection("foo"));
List<String> ordered = SystemInfoUtils.order(sections, "foo", "bar").stream()
.map(Section::getName)
.toList();
assertThat(ordered).isEqualTo(asList("foo", "bar", "end1", "end2"));
}
|
public RingbufferStoreConfig setProperty(String name, String value) {
properties.setProperty(name, value);
return this;
}
|
@Test
public void setProperty() {
config.setProperty("key", "value");
assertEquals("value", config.getProperty("key"));
}
|
public String filterRequest(Order order) {
return filterChain.execute(order);
}
|
@Test
void testFilterRequest() {
final var target = mock(Target.class);
final var filterManager = new FilterManager();
assertEquals("RUNNING...", filterManager.filterRequest(mock(Order.class)));
verifyNoMoreInteractions(target);
}
|
@Override
protected void doStop() throws Exception {
if (connection != null) {
for (IrcChannel channel : getEndpoint().getConfiguration().getChannelList()) {
LOG.debug("Parting: {}", channel);
connection.doPart(channel.getName());
}
connection.removeIRCEventListener(listener);
}
super.doStop();
}
|
@Test
public void doStopTest() {
producer.stop();
verify(connection).doPart("#chan1");
verify(connection).doPart("#chan2");
verify(connection).removeIRCEventListener(listener);
}
|
private void fail(final ChannelHandlerContext ctx, int length) {
fail(ctx, String.valueOf(length));
}
|
@Test
public void testTooLongLineWithFailFast() throws Exception {
EmbeddedChannel ch = new EmbeddedChannel(new LenientLineBasedFrameDecoder(16, false, true, false));
try {
ch.writeInbound(copiedBuffer("12345678901234567", CharsetUtil.US_ASCII));
fail();
} catch (Exception e) {
assertThat(e, is(instanceOf(TooLongFrameException.class)));
}
assertThat(ch.writeInbound(copiedBuffer("890", CharsetUtil.US_ASCII)), is(false));
assertThat(ch.writeInbound(copiedBuffer("123\r\nfirst\r\n", CharsetUtil.US_ASCII)), is(true));
ByteBuf buf = ch.readInbound();
ByteBuf buf2 = copiedBuffer("first\r\n", CharsetUtil.US_ASCII);
assertThat(buf, is(buf2));
assertThat(ch.finish(), is(false));
buf.release();
buf2.release();
}
|
public ClusterStateBundle cloneWithMapper(Function<ClusterState, ClusterState> mapper) {
AnnotatedClusterState clonedBaseline = baselineState.cloneWithClusterState(
mapper.apply(baselineState.getClusterState().clone()));
Map<String, AnnotatedClusterState> clonedDerived = derivedBucketSpaceStates.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().cloneWithClusterState(
mapper.apply(e.getValue().getClusterState().clone()))));
return new ClusterStateBundle(clonedBaseline, clonedDerived, distributionConfig, feedBlock, deferredActivation);
}
|
@Test
void cloning_preserves_distribution_config() {
var bundle = createTestBundleWithDistributionConfig(DistributionBuilder.configForFlatCluster(5));
var derived = bundle.cloneWithMapper(Function.identity());
assertEquals(bundle, derived);
}
|
public DrlxParseResult drlxParse(Class<?> patternType, String bindingId, String expression) {
return drlxParse(patternType, bindingId, expression, false);
}
|
@Test
public void bigDecimalInWithInt() {
SingleDrlxParseSuccess result = (SingleDrlxParseSuccess) parser.drlxParse(Person.class, "$p", "(money in (100, 200))");
assertThat(result.getExpr().toString()).isEqualTo("D.eval(org.drools.model.operators.InOperator.INSTANCE, _this.getMoney(), 100, 200)");
}
|
public void triggerNextSuperstep() {
synchronized (monitor) {
if (terminated) {
throw new IllegalStateException("Already terminated.");
}
superstepNumber++;
monitor.notifyAll();
}
}
|
@Test
public void testWaitAlreadyFulfilled() {
try {
SuperstepKickoffLatch latch = new SuperstepKickoffLatch();
latch.triggerNextSuperstep();
Waiter w = new Waiter(latch, 2);
Thread waiter = new Thread(w);
waiter.setDaemon(true);
waiter.start();
WatchDog wd = new WatchDog(waiter, 2000);
wd.start();
Thread.sleep(100);
wd.join();
if (wd.getError() != null) {
throw wd.getError();
}
if (w.getError() != null) {
throw w.getError();
}
} catch (Throwable t) {
t.printStackTrace();
Assert.fail("Error: " + t.getMessage());
}
}
|
public List<Bson> parse(final List<String> filterExpressions,
final List<EntityAttribute> attributes) {
if (filterExpressions == null || filterExpressions.isEmpty()) {
return List.of();
}
final Map<String, List<Filter>> groupedByField = filterExpressions.stream()
.map(expr -> singleFilterParser.parseSingleExpression(expr, attributes))
.collect(groupingBy(Filter::field));
return groupedByField.values().stream()
.map(grouped -> grouped.stream()
.map(Filter::toBson)
.collect(Collectors.toList()))
.map(groupedFilters -> {
if (groupedFilters.size() == 1) {
return groupedFilters.get(0);
} else {
return Filters.or(groupedFilters);
}
})
.toList();
}
|
@Test
void returnsEmptyListOnEmptyFilterList() {
assertThat(toTest.parse(List.of(), List.of()))
.isEmpty();
}
|
@Override
public boolean shouldFire(TriggerStateMachine.TriggerContext context) throws Exception {
return context.trigger().subTrigger(ACTUAL).invokeShouldFire(context)
|| context.trigger().subTrigger(UNTIL).invokeShouldFire(context);
}
|
@Test
public void testActualFiresButUntilFinishes() throws Exception {
tester =
TriggerStateMachineTester.forTrigger(
new OrFinallyStateMachine(
RepeatedlyStateMachine.forever(AfterPaneStateMachine.elementCountAtLeast(2)),
AfterPaneStateMachine.elementCountAtLeast(3)),
FixedWindows.of(Duration.millis(10)));
IntervalWindow window = new IntervalWindow(new Instant(0), new Instant(10));
// Before any firing
tester.injectElements(1);
assertFalse(tester.shouldFire(window));
assertFalse(tester.isMarkedFinished(window));
// The actual fires but doesn't finish
tester.injectElements(2);
assertTrue(tester.shouldFire(window));
tester.fireIfShouldFire(window);
assertFalse(tester.isMarkedFinished(window));
// The until fires and finishes; the trigger is finished
tester.injectElements(3);
assertTrue(tester.shouldFire(window));
tester.fireIfShouldFire(window);
assertTrue(tester.isMarkedFinished(window));
}
|
public OffsetAndMetadata findNextCommitOffset(final String commitMetadata) {
boolean found = false;
long currOffset;
long nextCommitOffset = committedOffset;
for (KafkaSpoutMessageId currAckedMsg : ackedMsgs) { // complexity is that of a linear scan on a TreeMap
currOffset = currAckedMsg.offset();
if (currOffset == nextCommitOffset) {
// found the next offset to commit
found = true;
nextCommitOffset = currOffset + 1;
} else if (currOffset > nextCommitOffset) {
if (emittedOffsets.contains(nextCommitOffset)) {
LOG.debug("topic-partition [{}] has non-sequential offset [{}]."
+ " It will be processed in a subsequent batch.", tp, currOffset);
break;
} else {
/*
This case will arise in case of non-sequential offset being processed.
So, if the topic doesn't contain offset = nextCommitOffset (possible
if the topic is compacted or deleted), the consumer should jump to
the next logical point in the topic. Next logical offset should be the
first element after nextCommitOffset in the ascending ordered emitted set.
*/
LOG.debug("Processed non-sequential offset."
+ " The earliest uncommitted offset is no longer part of the topic."
+ " Missing offset: [{}], Processed: [{}]", nextCommitOffset, currOffset);
final Long nextEmittedOffset = emittedOffsets.ceiling(nextCommitOffset);
if (nextEmittedOffset != null && currOffset == nextEmittedOffset) {
LOG.debug("Found committable offset: [{}] after missing offset: [{}], skipping to the committable offset",
currOffset, nextCommitOffset);
found = true;
nextCommitOffset = currOffset + 1;
} else {
LOG.debug("Topic-partition [{}] has non-sequential offset [{}]."
+ " Next offset to commit should be [{}]", tp, currOffset, nextCommitOffset);
break;
}
}
} else {
throw new IllegalStateException("The offset [" + currOffset + "] is below the current nextCommitOffset "
+ "[" + nextCommitOffset + "] for [" + tp + "]."
+ " This should not be possible, and likely indicates a bug in the spout's acking or emit logic.");
}
}
OffsetAndMetadata nextCommitOffsetAndMetadata = null;
if (found) {
nextCommitOffsetAndMetadata = new OffsetAndMetadata(nextCommitOffset, commitMetadata);
LOG.debug("Topic-partition [{}] has offsets [{}-{}] ready to be committed."
+ " Processing will resume at offset [{}] upon spout restart",
tp, committedOffset, nextCommitOffsetAndMetadata.offset() - 1, nextCommitOffsetAndMetadata.offset());
} else {
LOG.debug("Topic-partition [{}] has no offsets ready to be committed", tp);
}
LOG.trace("{}", this);
return nextCommitOffsetAndMetadata;
}
|
@Test
public void testFindNextCommitOffsetWithOneAck() {
/*
* The KafkaConsumer commitSync API docs: "The committed offset should be the next message your application will consume, i.e.
* lastProcessedMessageOffset + 1. "
*/
emitAndAckMessage(getMessageId(initialFetchOffset));
OffsetAndMetadata nextCommitOffset = manager.findNextCommitOffset(COMMIT_METADATA);
assertThat("The next commit offset should be one past the processed message offset", nextCommitOffset.offset(), is(initialFetchOffset + 1));
}
|
@Override
public boolean containsAll(Collection<?> c) {
return get(containsAllAsync(c));
}
|
@Test
public void testContainsAll() {
Set<Integer> set = redisson.getSet("set");
for (int i = 0; i < 200; i++) {
set.add(i);
}
Assertions.assertTrue(set.containsAll(Collections.emptyList()));
Assertions.assertTrue(set.containsAll(Arrays.asList(30, 11)));
Assertions.assertFalse(set.containsAll(Arrays.asList(30, 711, 11)));
}
|
@Override
public void open() throws Exception {
this.timerService =
getInternalTimerService("processing timer", VoidNamespaceSerializer.INSTANCE, this);
this.keySet = new HashSet<>();
super.open();
}
|
@Test
void testEndInput() throws Exception {
AtomicInteger counter = new AtomicInteger();
OutputTag<Long> sideOutputTag = new OutputTag<Long>("side-output") {};
KeyedTwoOutputProcessOperator<Integer, Integer, Integer, Long> processOperator =
new KeyedTwoOutputProcessOperator<>(
new TwoOutputStreamProcessFunction<Integer, Integer, Long>() {
@Override
public void processRecord(
Integer record,
Collector<Integer> output1,
Collector<Long> output2,
PartitionedContext ctx) {
// do nothing.
}
@Override
public void endInput(
TwoOutputNonPartitionedContext<Integer, Long> ctx) {
try {
ctx.applyToAllPartitions(
(firstOutput, secondOutput, context) -> {
counter.incrementAndGet();
Integer currentKey =
context.getStateManager().getCurrentKey();
firstOutput.collect(currentKey);
secondOutput.collect(Long.valueOf(currentKey));
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
},
sideOutputTag);
try (KeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness =
new KeyedOneInputStreamOperatorTestHarness<>(
processOperator,
(KeySelector<Integer, Integer>) value -> value,
Types.INT)) {
testHarness.open();
testHarness.processElement(new StreamRecord<>(1)); // key is 1
testHarness.processElement(new StreamRecord<>(2)); // key is 2
testHarness.endInput();
assertThat(counter).hasValue(2);
Collection<StreamRecord<Integer>> firstOutput = testHarness.getRecordOutput();
ConcurrentLinkedQueue<StreamRecord<Long>> secondOutput =
testHarness.getSideOutput(sideOutputTag);
assertThat(firstOutput).containsExactly(new StreamRecord<>(1), new StreamRecord<>(2));
assertThat(secondOutput)
.containsExactly(new StreamRecord<>(1L), new StreamRecord<>(2L));
}
}
|
@Override
public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException {
final AttributedList<Path> children = new AttributedList<>();
if(replies.isEmpty()) {
return children;
}
// At least one entry successfully parsed
boolean success = false;
for(String line : replies) {
final Map<String, Map<String, String>> file = this.parseFacts(line);
if(null == file) {
log.error(String.format("Error parsing line %s", line));
continue;
}
for(Map.Entry<String, Map<String, String>> f : file.entrySet()) {
final String name = f.getKey();
// size -- Size in octets
// modify -- Last modification time
// create -- Creation time
// type -- Entry type
// unique -- Unique id of file/directory
// perm -- File permissions, whether read, write, execute is allowed for the login id.
// lang -- Language of the file name per IANA [11] registry.
// media-type -- MIME media-type of file contents per IANA registry.
// charset -- Character set per IANA registry (if not UTF-8)
final Map<String, String> facts = f.getValue();
if(!facts.containsKey("type")) {
log.error(String.format("No type fact in line %s", line));
continue;
}
final Path parsed;
if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory));
}
else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file));
}
else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink));
// Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar
final String[] type = facts.get("type").split(":");
if(type.length == 2) {
final String target = type[1];
if(target.startsWith(String.valueOf(Path.DELIMITER))) {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file)));
}
else {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file)));
}
}
else {
log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line));
continue;
}
}
else {
log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line));
continue;
}
if(!success) {
if(parsed.isDirectory() && directory.getName().equals(name)) {
log.warn(String.format("Possibly bogus response line %s", line));
}
else {
success = true;
}
}
if(name.equals(".") || name.equals("..")) {
if(log.isDebugEnabled()) {
log.debug(String.format("Skip %s", name));
}
continue;
}
if(facts.containsKey("size")) {
parsed.attributes().setSize(Long.parseLong(facts.get("size")));
}
if(facts.containsKey("unix.uid")) {
parsed.attributes().setOwner(facts.get("unix.uid"));
}
if(facts.containsKey("unix.owner")) {
parsed.attributes().setOwner(facts.get("unix.owner"));
}
if(facts.containsKey("unix.gid")) {
parsed.attributes().setGroup(facts.get("unix.gid"));
}
if(facts.containsKey("unix.group")) {
parsed.attributes().setGroup(facts.get("unix.group"));
}
if(facts.containsKey("unix.mode")) {
parsed.attributes().setPermission(new Permission(facts.get("unix.mode")));
}
else if(facts.containsKey("perm")) {
if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) {
Permission.Action user = Permission.Action.none;
final String flags = facts.get("perm");
if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) {
// RETR command may be applied to that object
// Listing commands, LIST, NLST, and MLSD may be applied
user = user.or(Permission.Action.read);
}
if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) {
user = user.or(Permission.Action.write);
}
if(StringUtils.contains(flags, 'e')) {
// CWD command naming the object should succeed
user = user.or(Permission.Action.execute);
if(parsed.isDirectory()) {
user = user.or(Permission.Action.read);
}
}
final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none);
parsed.attributes().setPermission(permission);
}
}
if(facts.containsKey("modify")) {
// Time values are always represented in UTC
parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify")));
}
if(facts.containsKey("create")) {
// Time values are always represented in UTC
parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create")));
}
children.add(parsed);
}
}
if(!success) {
throw new FTPInvalidListException(children);
}
return children;
}
|
@Test
public void testParseMlsdSymbolic() throws Exception {
Path path = new Path(
"/www", EnumSet.of(Path.Type.directory));
String[] replies = new String[]{
"Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar"
};
final AttributedList<Path> children = new FTPMlsdListResponseReader()
.read(path, Arrays.asList(replies));
assertEquals(1, children.size());
assertEquals("/www/foobar", children.get(0).getAbsolute());
assertEquals("/foobar", children.get(0).getSymlinkTarget().getAbsolute());
}
|
public Set<AuthorizationPluginInfo> getPluginsThatSupportsWebBasedAuthentication() {
return getPluginsThatSupports(SupportedAuthType.Web);
}
|
@Test
public void shouldGetPluginsThatSupportWebBasedAuthorization() {
Set<AuthorizationPluginInfo> pluginsThatSupportsWebBasedAuthentication = store.getPluginsThatSupportsWebBasedAuthentication();
assertThat(pluginsThatSupportsWebBasedAuthentication.size(), is(2));
assertThat(pluginsThatSupportsWebBasedAuthentication.contains(plugin1), is(true));
assertThat(pluginsThatSupportsWebBasedAuthentication.contains(plugin3), is(true));
}
|
static JavaType constructType(Type type) {
try {
return constructTypeInner(type);
} catch (Exception e) {
throw new InvalidDataTableTypeException(type, e);
}
}
|
@Test
void raw_list_should_equal_a_list_of_objects() {
JavaType javaType = TypeFactory.constructType(List.class);
JavaType other = TypeFactory.constructType(LIST_OF_OBJECT);
assertThat(javaType, equalTo(other));
}
|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testSkippingAbortedTransactions() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset,
new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()),
new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
abortTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Fetch<byte[], byte[]> fetch = collectFetch();
assertEquals(emptyMap(), fetch.records());
assertTrue(fetch.positionAdvanced());
}
|
public static boolean isNormalizedPathOutsideWorkingDir(String path) {
final String normalize = FilenameUtils.normalize(path);
final String prefix = FilenameUtils.getPrefix(normalize);
return (normalize != null && StringUtils.isBlank(prefix));
}
|
@Test
public void shouldReturnTrueIfGivenFolderWithRelativeKeepsYouInsideSandbox() {
assertThat(FilenameUtil.isNormalizedPathOutsideWorkingDir("tmp/../home/cruise"), is(true));
}
|
public final long getWhen() {
return when;
}
|
@Test
public void getWhenOutputZero() {
// Arrange
final LogHeader objectUnderTest = new LogHeader(0);
// Act
final long actual = objectUnderTest.getWhen();
// Assert result
Assert.assertEquals(0L, actual);
}
|
public static void generate(String cluster, OutputStream out,
List<PrometheusRawMetricsProvider> metricsProviders)
throws IOException {
ByteBuf buf = PulsarByteBufAllocator.DEFAULT.heapBuffer();
try {
SimpleTextOutputStream stream = new SimpleTextOutputStream(buf);
generateSystemMetrics(stream, cluster);
if (metricsProviders != null) {
for (PrometheusRawMetricsProvider metricsProvider : metricsProviders) {
metricsProvider.generate(stream);
}
}
out.write(buf.array(), buf.arrayOffset(), buf.readableBytes());
} finally {
buf.release();
}
}
|
@Test
public void testGenerateSystemMetricsWithDefaultCluster() throws Exception {
String defaultClusterValue = "cluster_test";
String labelName = "lb_name";
String labelValue = "lb_value";
// default cluster.
String metricsName = "label_use_default_cluster" + randomString();
Counter counter = new Counter.Builder()
.name(metricsName)
.labelNames(labelName)
.help("x")
.register(CollectorRegistry.defaultRegistry);
counter.labels(labelValue).inc();
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrometheusMetricsGeneratorUtils.generate(defaultClusterValue, out, Collections.emptyList());
assertTrue(out.toString().contains(
String.format("%s_total{cluster=\"%s\",%s=\"%s\"} 1.0",
metricsName, defaultClusterValue, labelName, labelValue)
));
// cleanup
out.close();
CollectorRegistry.defaultRegistry.unregister(counter);
}
|
public void wakeup() {
if (hasNotified.compareAndSet(false, true)) {
waitPoint.countDown(); // notify
}
}
|
@Test
public void testWakeup() {
ServiceThread testServiceThread = startTestServiceThread();
testServiceThread.wakeup();
assertEquals(true, testServiceThread.hasNotified.get());
assertEquals(0, testServiceThread.waitPoint.getCount());
}
|
@Override
@CheckForNull
public EmailMessage format(Notification notif) {
if (!(notif instanceof ChangesOnMyIssuesNotification)) {
return null;
}
ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif;
if (notification.getChange() instanceof AnalysisChange) {
checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty");
return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification);
}
return formatMultiProject(notification);
}
|
@Test
public void formats_fails_with_ISE_if_change_from_Analysis_and_no_issue() {
AnalysisChange analysisChange = newAnalysisChange();
assertThatThrownBy(() -> underTest.format(new ChangesOnMyIssuesNotification(analysisChange, Collections.emptySet())))
.isInstanceOf(IllegalStateException.class)
.hasMessage("changedIssues can't be empty");
}
|
Object[] findValues(int ordinal) {
return getAllValues(ordinal, type, 0);
}
|
@Test
public void testMapReference() throws Exception {
Map<Integer, String> map = new HashMap<>();
map.put(1, "one");
map.put(2, "two");
MapReference mapReference = new MapReference();
mapReference.mapValues = map;
objectMapper.add(mapReference);
StateEngineRoundTripper.roundTripSnapshot(writeStateEngine, readStateEngine);
// no auto-expand for a map
FieldPath fieldPath = new FieldPath(readStateEngine, "MapReference", "mapValues.value");
Object[] values = fieldPath.findValues(0);
Assert.assertEquals(2, values.length);
Set<String> valuesAsSet = new HashSet<>();
for (Object v : values) valuesAsSet.add((String) v);
Assert.assertTrue(valuesAsSet.contains("one"));
Assert.assertTrue(valuesAsSet.contains("two"));
}
|
@Override
public NetconfDevice getNetconfDevice(DeviceId deviceInfo) {
return netconfDeviceMap.get(deviceInfo);
}
|
@Test
public void testGetNetconfDevice() {
NetconfDevice fetchedDevice1 = ctrl.getNetconfDevice(deviceId1);
assertThat("Incorrect device fetched", fetchedDevice1, is(device1));
NetconfDevice fetchedDevice2 = ctrl.getNetconfDevice(deviceId2);
assertThat("Incorrect device fetched", fetchedDevice2, is(device2));
}
|
public static Pair<String, String> decryptHandler(String dataId, String secretKey, String content) {
if (!checkCipher(dataId)) {
return Pair.with(secretKey, content);
}
Optional<String> algorithmName = parseAlgorithmName(dataId);
Optional<EncryptionPluginService> optional = algorithmName.flatMap(
EncryptionPluginManager.instance()::findEncryptionService);
if (!optional.isPresent()) {
LOGGER.warn("[EncryptionHandler] [decryptHandler] No encryption program with the corresponding name found");
return Pair.with(secretKey, content);
}
EncryptionPluginService encryptionPluginService = optional.get();
String decryptSecretKey = encryptionPluginService.decryptSecretKey(secretKey);
String decryptContent = encryptionPluginService.decrypt(decryptSecretKey, content);
return Pair.with(decryptSecretKey, decryptContent);
}
|
@Test
void testUnknownAlgorithmNameDecrypt() {
String dataId = "cipher-mySM4-application";
String content = "content";
Pair<String, String> pair = EncryptionHandler.decryptHandler(dataId, "", content);
assertNotNull(pair);
assertEquals(content, pair.getSecond(), "should return original content if algorithm is not defined.");
}
|
public static boolean parseBooleanValue(String value, String name) {
if (value.equalsIgnoreCase("ON")
|| value.equalsIgnoreCase("TRUE")
|| value.equalsIgnoreCase("1")) {
return true;
}
if (value.equalsIgnoreCase("OFF")
|| value.equalsIgnoreCase("FALSE")
|| value.equalsIgnoreCase("0")) {
return false;
}
ErrorReport.reportSemanticException(ErrorCode.ERR_INVALID_VALUE, name, value,
"1, 0, on, off, true, or false (case insensitive)");
return false;
}
|
@Test
public void testParseBooleanValueException() {
expectedEx.expect(SemanticException.class);
expectedEx.expectMessage(
"Invalid var: 'tru'. Expected values should be 1, 0, on, off, true, or false (case insensitive)");
ParseUtil.parseBooleanValue("tru", "var");
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, String.valueOf(Path.DELIMITER));
}
|
@Test
public void testListCommonPrefixSlashOnly() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck-unsupportedprefix", EnumSet.of(Path.Type.directory, Path.Type.volume));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
assertTrue(new S3ObjectListService(session, acl).list(container, new DisabledListProgressListener()).isEmpty());
}
|
@Override
@Nullable
public long[] readLongArray(@Nonnull String fieldName) throws IOException {
return readIncompatibleField(fieldName, LONG_ARRAY, super::readLongArray);
}
|
@Test
public void testReadLongArray() throws Exception {
assertNull(reader.readLongArray("NO SUCH FIELD"));
}
|
@Override
public V getValue() {
if (valueObject == null) {
valueObject = serializationService.toObject(valueData);
}
return valueObject;
}
|
@Override
@Test
public void getValue_caching() {
QueryableEntry entry = createEntry("key", "value");
assertSame(entry.getValue(), entry.getValue());
}
|
@Override
public void registerAndStartNewCoordinators(
Collection<OperatorCoordinatorHolder> coordinators,
ComponentMainThreadExecutor mainThreadExecutor,
final int parallelism) {
for (OperatorCoordinatorHolder coordinator : coordinators) {
coordinatorMap.put(coordinator.operatorId(), coordinator);
coordinator.lazyInitialize(
globalFailureHandler,
mainThreadExecutor,
executionGraph.getCheckpointCoordinator(),
parallelism);
}
startOperatorCoordinators(coordinators);
}
|
@Test
void testRegisterAndStartNewCoordinators() throws Exception {
final JobVertex[] jobVertices = createJobVertices(BLOCKING);
OperatorID operatorId1 = OperatorID.fromJobVertexID(jobVertices[0].getID());
OperatorID operatorId2 = OperatorID.fromJobVertexID(jobVertices[1].getID());
ExecutionGraph executionGraph = createDynamicGraph(jobVertices);
ExecutionJobVertex ejv1 = executionGraph.getJobVertex(jobVertices[0].getID());
ExecutionJobVertex ejv2 = executionGraph.getJobVertex(jobVertices[1].getID());
executionGraph.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());
executionGraph.initializeJobVertex(ejv1, 0L);
DefaultOperatorCoordinatorHandler handler =
new DefaultOperatorCoordinatorHandler(executionGraph, throwable -> {});
assertThat(handler.getCoordinatorMap().keySet()).contains(operatorId1);
executionGraph.initializeJobVertex(ejv2, 0L);
handler.registerAndStartNewCoordinators(
ejv2.getOperatorCoordinators(),
executionGraph.getJobMasterMainThreadExecutor(),
ejv2.getParallelism());
assertThat(handler.getCoordinatorMap().keySet()).contains(operatorId1, operatorId2);
}
|
public DescriptiveUrl find(DescriptiveUrl.Type type) {
for(DescriptiveUrl url : this) {
if(url.getType().equals(type)) {
return url;
}
}
return DescriptiveUrl.EMPTY;
}
|
@Test
public void testFind() {
final DescriptiveUrlBag list = new DescriptiveUrlBag();
final DescriptiveUrl url = new DescriptiveUrl(URI.create("http://example.net"));
list.add(url);
assertEquals(DescriptiveUrl.EMPTY, list.find(DescriptiveUrl.Type.provider));
assertEquals(url, list.find(DescriptiveUrl.Type.http));
}
|
public static <T> Iterator<T> asReadOnlyIterator(Iterator<T> iterator) {
if (iterator instanceof UnmodifiableIterator) {
return iterator;
}
return new UnmodifiableIterator<>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public T next() {
return iterator.next();
}
};
}
|
@Test(expected = UnsupportedOperationException.class)
public void test_asReadOnlyIterator_throws_exception_when_remove_called() {
Iterator<Integer> iterator = IterableUtil.asReadOnlyIterator(numbers.iterator());
while (iterator.hasNext()) {
iterator.next();
iterator.remove();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.