focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static Combine.BinaryCombineIntegerFn ofIntegers() {
return new Max.MaxIntegerFn();
}
|
@Test
public void testMaxIntegerFn() {
testCombineFn(Max.ofIntegers(), Lists.newArrayList(1, 2, 3, 4), 4);
}
|
public List<ShardingCondition> createShardingConditions(final InsertStatementContext sqlStatementContext, final List<Object> params) {
List<ShardingCondition> result = null == sqlStatementContext.getInsertSelectContext()
? createShardingConditionsWithInsertValues(sqlStatementContext, params)
: createShardingConditionsWithInsertSelect(sqlStatementContext, params);
appendGeneratedKeyConditions(sqlStatementContext, result);
return result;
}
|
@Test
void assertCreateShardingConditionsInsertStatementWithGeneratedKeyContextUsingCommonExpressionSegmentNow() {
when(insertStatementContext.getInsertValueContexts()).thenReturn(Collections.singletonList(createInsertValueContextAsCommonExpressionSegmentWithNow()));
when(insertStatementContext.getGeneratedKeyContext()).thenReturn(Optional.of(mock(GeneratedKeyContext.class)));
when(shardingRule.findShardingColumn("foo_col_1", "foo_table")).thenReturn(Optional.of("foo_col_1"));
List<ShardingCondition> shardingConditions = shardingConditionEngine.createShardingConditions(insertStatementContext, Collections.emptyList());
assertThat(shardingConditions.get(0).getStartIndex(), is(0));
assertFalse(shardingConditions.get(0).getValues().isEmpty());
}
|
@Override
public void write(ProjectDump.Metadata metadata) {
checkNotPublished();
if (metadataWritten.get()) {
throw new IllegalStateException("Metadata has already been written");
}
File file = new File(rootDir, METADATA.filename());
try (FileOutputStream output = FILES2.openOutputStream(file, false)) {
PROTOBUF2.writeTo(metadata, output);
metadataWritten.set(true);
} catch (IOException e) {
throw new IllegalStateException("Can not write to file " + file, e);
}
}
|
@Test
public void writeMetadata_fails_if_called_twice() {
underTest.write(newMetadata());
assertThatThrownBy(() -> underTest.write(newMetadata()))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Metadata has already been written");
}
|
@Udf(schema = "ARRAY<STRUCT<K STRING, V BIGINT>>")
public List<Struct> entriesBigInt(
@UdfParameter(description = "The map to create entries from") final Map<String, Long> map,
@UdfParameter(description = "If true then the resulting entries are sorted by key")
final boolean sorted
) {
return entries(map, BIGINT_STRUCT_SCHEMA, sorted);
}
|
@Test
public void shouldComputeBigIntEntriesSorted() {
final Map<String, Long> map = createMap(Long::valueOf);
shouldComputeEntriesSorted(map, () -> entriesUdf.entriesBigInt(map, true));
}
|
@Override
public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain)
throws IOException, ServletException
{
HttpServletRequest request = (HttpServletRequest)req;
HttpServletResponse response = (HttpServletResponse)res;
// Do not allow framing; OF-997
response.setHeader("X-Frame-Options", JiveGlobals.getProperty("adminConsole.frame-options", "SAMEORIGIN"));
// Reset the defaultLoginPage variable
String loginPage = defaultLoginPage;
if (loginPage == null) {
loginPage = request.getContextPath() + (AuthFactory.isOneTimeAccessTokenEnabled() ? "/loginToken.jsp" : "/login.jsp" );
}
// Get the page we're on:
String url = request.getRequestURI().substring(1);
if (url.startsWith("plugins/")) {
url = url.substring("plugins/".length());
}
// See if it's contained in the exclude list. If so, skip filter execution
boolean doExclude = false;
for (String exclude : excludes) {
if (testURLPassesExclude(url, exclude)) {
doExclude = true;
break;
}
}
if (!doExclude || IP_ACCESS_IGNORE_EXCLUDES.getValue()) {
if (!passesBlocklist(req) || !passesAllowList(req)) {
response.sendError(HttpServletResponse.SC_FORBIDDEN);
return;
}
}
if (!doExclude) {
WebManager manager = new WebManager();
manager.init(request, response, request.getSession(), context);
boolean haveOneTimeToken = manager.getAuthToken() instanceof AuthToken.OneTimeAuthToken;
User loggedUser = manager.getUser();
boolean loggedAdmin = loggedUser == null ? false : adminManager.isUserAdmin(loggedUser.getUsername(), true);
if (!haveOneTimeToken && !loggedAdmin && !authUserFromRequest(request)) {
response.sendRedirect(getRedirectURL(request, loginPage, null));
return;
}
}
chain.doFilter(req, res);
}
|
@Test
public void nonExcludedUrlWillNotErrorWhenCIDROnAllowlist() throws Exception {
AuthCheckFilter.SERVLET_REQUEST_AUTHENTICATOR.setValue(AdminUserServletAuthenticatorClass.class);
final AuthCheckFilter filter = new AuthCheckFilter(adminManager, loginLimitManager);
final String cidr = remoteAddr.substring(0, remoteAddr.lastIndexOf('.')) + ".0/24";
AuthCheckFilter.IP_ACCESS_ALLOWLIST.setValue(Collections.singleton(cidr));
filter.doFilter(request, response, filterChain);
verify(response, never()).sendError(anyInt());
verify(filterChain, atLeastOnce()).doFilter(any(), any());
}
|
@Override
public List<String> getPermissions() {
final Set<String> permissionSet = isServiceAccount() ? new HashSet<>() : new HashSet<>(this.permissions.userSelfEditPermissions(getName()));
@SuppressWarnings("unchecked")
final List<String> permissions = (List<String>) fields.get(PERMISSIONS);
if (permissions != null) {
permissionSet.addAll(permissions);
}
return new ArrayList<>(permissionSet);
}
|
@Test
public void getPermissionsReturnsListOfPermissions() throws Exception {
final Permissions permissions = new Permissions(Collections.emptySet());
final List<String> customPermissions = Collections.singletonList("subject:action");
final Map<String, Object> fields = ImmutableMap.of(
UserImpl.USERNAME, "foobar",
UserImpl.PERMISSIONS, customPermissions);
user = createUserImpl(passwordAlgorithmFactory, permissions, fields);
assertThat(user.getPermissions())
.containsAll(permissions.userSelfEditPermissions("foobar"))
.contains("subject:action");
}
|
public static void setKiePMMLModelConstructor(final String generatedClassName,
final ConstructorDeclaration constructorDeclaration,
final String fileName,
final String name,
final List<MiningField> miningFields,
final List<OutputField> outputFields,
final List<TargetField> targetFields) {
setKiePMMLConstructorSuperNameInvocation(generatedClassName, constructorDeclaration, fileName, name);
final BlockStmt body = constructorDeclaration.getBody();
final List<ObjectCreationExpr> miningFieldsObjectCreations = getMiningFieldsObjectCreations(miningFields);
addListPopulationByObjectCreationExpr(miningFieldsObjectCreations, body, "miningFields");
final List<ObjectCreationExpr> outputFieldsObjectCreations = getOutputFieldsObjectCreations(outputFields);
addListPopulationByObjectCreationExpr(outputFieldsObjectCreations, body, "outputFields");
final List<MethodCallExpr> kiePMMLTargetFieldsObjectCreations =
getKiePMMLTargetFieldsObjectCreations(targetFields);
addListPopulationByMethodCallExpr(kiePMMLTargetFieldsObjectCreations, body, "kiePMMLTargets");
}
|
@Test
void setKiePMMLModelConstructor() {
String generatedClassName = "generatedClassName";
String fileName = "fileName";
String name = "newName";
List<MiningField> miningFields = IntStream.range(0, 3)
.mapToObj(i -> ModelUtils.convertToKieMiningField(getRandomMiningField(),
getRandomDataField()))
.collect(Collectors.toList());
List<OutputField> outputFields = IntStream.range(0, 2)
.mapToObj(i -> ModelUtils.convertToKieOutputField(getRandomOutputField(),
getRandomDataField()))
.collect(Collectors.toList());
List<TargetField> targetFields = IntStream.range(0, 2)
.mapToObj(i -> ModelUtils.convertToKieTargetField(getRandomTarget()))
.collect(Collectors.toList());
org.kie.pmml.compiler.commons.codegenfactories.KiePMMLModelFactoryUtils.setKiePMMLModelConstructor(generatedClassName,
constructorDeclaration,
fileName,
name,
miningFields,
outputFields,
targetFields);
commonVerifySuperInvocation(generatedClassName, fileName, name);
List<MethodCallExpr> retrieved = getMethodCallExprList(constructorDeclaration.getBody(), miningFields.size(),
"miningFields",
"add");
MethodCallExpr addMethodCall = retrieved.get(0);
NodeList<Expression> arguments = addMethodCall.getArguments();
commonVerifyMiningFieldsObjectCreation(arguments, miningFields);
retrieved = getMethodCallExprList(constructorDeclaration.getBody(), outputFields.size(), "outputFields",
"add");
addMethodCall = retrieved.get(0);
arguments = addMethodCall.getArguments();
commonVerifyOutputFieldsObjectCreation(arguments, outputFields);
retrieved = getMethodCallExprList(constructorDeclaration.getBody(), outputFields.size(), "kiePMMLTargets",
"add");
addMethodCall = retrieved.get(0);
arguments = addMethodCall.getArguments();
commonVerifyKiePMMLTargetFieldsMethodCallExpr(arguments, targetFields);
}
|
public ControllerResult<ElectMasterResponseHeader> electMaster(final ElectMasterRequestHeader request,
final ElectPolicy electPolicy) {
final String brokerName = request.getBrokerName();
final Long brokerId = request.getBrokerId();
final ControllerResult<ElectMasterResponseHeader> result = new ControllerResult<>(new ElectMasterResponseHeader());
final ElectMasterResponseHeader response = result.getResponse();
if (!isContainsBroker(brokerName)) {
// this broker set hasn't been registered
result.setCodeAndRemark(ResponseCode.CONTROLLER_BROKER_NEED_TO_BE_REGISTERED, "Broker hasn't been registered");
return result;
}
final SyncStateInfo syncStateInfo = this.syncStateSetInfoTable.get(brokerName);
final BrokerReplicaInfo brokerReplicaInfo = this.replicaInfoTable.get(brokerName);
final Set<Long> syncStateSet = syncStateInfo.getSyncStateSet();
final Long oldMaster = syncStateInfo.getMasterBrokerId();
Set<Long> allReplicaBrokers = controllerConfig.isEnableElectUncleanMaster() ? brokerReplicaInfo.getAllBroker() : null;
Long newMaster = null;
if (syncStateInfo.isFirstTimeForElect()) {
// If never have a master in this broker set, in other words, it is the first time to elect a master
// elect it as the first master
newMaster = brokerId;
}
// elect by policy
if (newMaster == null || newMaster == -1) {
// we should assign this assignedBrokerId when the brokerAddress need to be elected by force
Long assignedBrokerId = request.getDesignateElect() ? brokerId : null;
newMaster = electPolicy.elect(brokerReplicaInfo.getClusterName(), brokerReplicaInfo.getBrokerName(), syncStateSet, allReplicaBrokers, oldMaster, assignedBrokerId);
}
if (newMaster != null && newMaster.equals(oldMaster)) {
// old master still valid, change nothing
String err = String.format("The old master %s is still alive, not need to elect new master for broker %s", oldMaster, brokerReplicaInfo.getBrokerName());
LOGGER.warn("{}", err);
// the master still exist
response.setMasterEpoch(syncStateInfo.getMasterEpoch());
response.setSyncStateSetEpoch(syncStateInfo.getSyncStateSetEpoch());
response.setMasterBrokerId(oldMaster);
response.setMasterAddress(brokerReplicaInfo.getBrokerAddress(oldMaster));
result.setBody(new ElectMasterResponseBody(syncStateSet).encode());
result.setCodeAndRemark(ResponseCode.CONTROLLER_MASTER_STILL_EXIST, err);
return result;
}
// a new master is elected
if (newMaster != null) {
final int masterEpoch = syncStateInfo.getMasterEpoch();
final int syncStateSetEpoch = syncStateInfo.getSyncStateSetEpoch();
final HashSet<Long> newSyncStateSet = new HashSet<>();
newSyncStateSet.add(newMaster);
response.setMasterBrokerId(newMaster);
response.setMasterAddress(brokerReplicaInfo.getBrokerAddress(newMaster));
response.setMasterEpoch(masterEpoch + 1);
response.setSyncStateSetEpoch(syncStateSetEpoch + 1);
ElectMasterResponseBody responseBody = new ElectMasterResponseBody(newSyncStateSet);
BrokerMemberGroup brokerMemberGroup = buildBrokerMemberGroup(brokerReplicaInfo);
if (null != brokerMemberGroup) {
responseBody.setBrokerMemberGroup(brokerMemberGroup);
}
result.setBody(responseBody.encode());
final ElectMasterEvent event = new ElectMasterEvent(brokerName, newMaster);
result.addEvent(event);
LOGGER.info("Elect new master {} for broker {}", newMaster, brokerName);
return result;
}
// If elect failed and the electMaster is triggered by controller (we can figure it out by brokerAddress),
// we still need to apply an ElectMasterEvent to tell the statemachine
// that the master was shutdown and no new master was elected.
if (request.getBrokerId() == null || request.getBrokerId() == -1) {
final ElectMasterEvent event = new ElectMasterEvent(false, brokerName);
result.addEvent(event);
result.setCodeAndRemark(ResponseCode.CONTROLLER_MASTER_NOT_AVAILABLE, "Old master has down and failed to elect a new broker master");
} else {
result.setCodeAndRemark(ResponseCode.CONTROLLER_ELECT_MASTER_FAILED, "Failed to elect a new master");
}
LOGGER.warn("Failed to elect a new master for broker {}", brokerName);
return result;
}
|
@Test
public void testElectMasterPreferHigherEpoch() {
mockMetaData();
final ElectMasterRequestHeader request = ElectMasterRequestHeader.ofControllerTrigger(DEFAULT_BROKER_NAME);
ElectPolicy electPolicy = new DefaultElectPolicy(this.heartbeatManager::isBrokerActive, this.heartbeatManager::getBrokerLiveInfo);
mockHeartbeatDataHigherEpoch();
final ControllerResult<ElectMasterResponseHeader> cResult = this.replicasInfoManager.electMaster(request,
electPolicy);
final ElectMasterResponseHeader response = cResult.getResponse();
assertEquals(DEFAULT_IP[1], response.getMasterAddress());
assertEquals(2L, response.getMasterBrokerId().longValue());
assertEquals(2, response.getMasterEpoch().intValue());
}
|
@Override
public Path move(final Path file, final Path target, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException {
try {
final EueApiClient client = new EueApiClient(session);
if(status.isExists()) {
if(!new CaseInsensitivePathPredicate(file).test(target)) {
if(log.isWarnEnabled()) {
log.warn(String.format("Trash file %s to be replaced with %s", target, file));
}
new EueTrashFeature(session, fileid).delete(Collections.singletonMap(target, status), callback, delete);
}
}
final String resourceId = fileid.getFileId(file);
if(!new SimplePathPredicate(file.getParent()).test(target.getParent())) {
final ResourceMoveResponseEntries resourceMoveResponseEntries;
final String parentResourceId = fileid.getFileId(target.getParent());
switch(parentResourceId) {
case EueResourceIdProvider.ROOT:
case EueResourceIdProvider.TRASH:
resourceMoveResponseEntries = new MoveChildrenForAliasApiApi(client)
.resourceAliasAliasChildrenMovePost(parentResourceId,
Collections.singletonList(String.format("%s/resource/%s",
session.getBasePath(), resourceId)), null, null, null,
"rename", null);
break;
default:
resourceMoveResponseEntries = new MoveChildrenApi(client)
.resourceResourceIdChildrenMovePost(parentResourceId,
Collections.singletonList(String.format("%s/resource/%s",
session.getBasePath(), resourceId)), null, null, null,
"rename", null);
}
if(null == resourceMoveResponseEntries) {
// Move of single file will return 200 status code with empty response body
}
else {
for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) {
switch(resourceMoveResponseEntry.getStatusCode()) {
case HttpStatus.SC_OK:
break;
default:
log.warn(String.format("Failure %s moving file %s", resourceMoveResponseEntries, file));
final ResourceCreationResponseEntryEntity entity = resourceMoveResponseEntry.getEntity();
if(null == entity) {
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getEntity().getError(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
}
}
}
if(!StringUtils.equals(file.getName(), target.getName())) {
final ResourceUpdateModel resourceUpdateModel = new ResourceUpdateModel();
final ResourceUpdateModelUpdate resourceUpdateModelUpdate = new ResourceUpdateModelUpdate();
final Uifs uifs = new Uifs();
uifs.setName(target.getName());
resourceUpdateModelUpdate.setUifs(uifs);
resourceUpdateModel.setUpdate(resourceUpdateModelUpdate);
final ResourceMoveResponseEntries resourceMoveResponseEntries = new UpdateResourceApi(client).resourceResourceIdPatch(resourceId,
resourceUpdateModel, null, null, null);
if(null == resourceMoveResponseEntries) {
// Move of single file will return 200 status code with empty response body
}
else {
for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) {
switch(resourceMoveResponseEntry.getStatusCode()) {
case HttpStatus.SC_CREATED:
break;
default:
log.warn(String.format("Failure %s renaming file %s", resourceMoveResponseEntry, file));
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
}
}
}
fileid.cache(file, null);
return target;
}
catch(ApiException e) {
throw new EueExceptionMappingService().map("Cannot rename {0}", e, file);
}
}
|
@Test
public void testMoveFile() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final Path sourceFolder = new EueDirectoryFeature(session, fileid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path sourceFile = new Path(sourceFolder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
createFile(fileid, sourceFile, RandomUtils.nextBytes(541));
final PathAttributes sourceAttr = new EueAttributesFinderFeature(session, fileid).find(sourceFile);
assertTrue(new EueFindFeature(session, fileid).find(sourceFile));
final Path targetFolder = new EueDirectoryFeature(session, fileid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path targetFile = new EueMoveFeature(session, fileid).move(sourceFile,
new Path(targetFolder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new EueFindFeature(session, fileid).find(sourceFile));
assertTrue(new EueFindFeature(session, fileid).find(targetFile));
assertFalse(new DefaultFindFeature(session).find(sourceFile));
assertTrue(new DefaultFindFeature(session).find(targetFile));
final PathAttributes targetAttributes = new EueAttributesFinderFeature(session, fileid).find(targetFile);
assertEquals(sourceAttr.getSize(), targetAttributes.getSize());
assertNotEquals(sourceAttr.getETag(), targetAttributes.getETag());
assertEquals(sourceAttr.getFileId(), targetAttributes.getFileId());
new EueDeleteFeature(session, fileid).delete(Arrays.asList(sourceFolder, targetFolder), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public ArtifactPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
PluggableInstanceSettings storeConfigSettings = storeConfigMetadata(descriptor.id());
PluggableInstanceSettings publishArtifactConfigSettings = publishArtifactMetadata(descriptor.id());
PluggableInstanceSettings fetchArtifactConfigSettings = fetchArtifactMetadata(descriptor.id());
Image image = image(descriptor.id());
return new ArtifactPluginInfo(descriptor, storeConfigSettings, publishArtifactConfigSettings, fetchArtifactConfigSettings, image, getCapabilities(descriptor.id()));
}
|
@Test
public void shouldContinueWithBuildingPluginInfoIfPluginSettingsIsNotProvidedByPlugin() {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
doThrow(new RuntimeException("foo")).when(extension).getPluginSettingsConfiguration("plugin1");
ArtifactPluginInfo artifactPluginInfo = new ArtifactPluginInfoBuilder(extension).pluginInfoFor(descriptor);
assertThat(artifactPluginInfo.getDescriptor(), is(descriptor));
assertThat(artifactPluginInfo.getExtensionName(), is(PluginConstants.ARTIFACT_EXTENSION));
}
|
public void close() {
synchronized (this) {
if (!closed) {
leaderCopyRLMTasks.values().forEach(RLMTaskWithFuture::cancel);
leaderExpirationRLMTasks.values().forEach(RLMTaskWithFuture::cancel);
followerRLMTasks.values().forEach(RLMTaskWithFuture::cancel);
Utils.closeQuietly(remoteLogStorageManager, "RemoteLogStorageManager");
Utils.closeQuietly(remoteLogMetadataManager, "RemoteLogMetadataManager");
Utils.closeQuietly(indexCache, "RemoteIndexCache");
rlmCopyThreadPool.close();
rlmExpirationThreadPool.close();
followerThreadPool.close();
try {
shutdownAndAwaitTermination(remoteStorageReaderThreadPool, "RemoteStorageReaderThreadPool", 10, TimeUnit.SECONDS);
} finally {
removeMetrics();
}
leaderCopyRLMTasks.clear();
leaderExpirationRLMTasks.clear();
followerRLMTasks.clear();
closed = true;
}
}
}
|
@Test
public void testRemoveMetricsOnClose() throws IOException {
MockedConstruction<KafkaMetricsGroup> mockMetricsGroupCtor = mockConstruction(KafkaMetricsGroup.class);
try {
RemoteLogManager remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId,
time, tp -> Optional.of(mockLog), (topicPartition, offset) -> { }, brokerTopicStats, metrics) {
public RemoteStorageManager createRemoteStorageManager() {
return remoteStorageManager;
}
public RemoteLogMetadataManager createRemoteLogMetadataManager() {
return remoteLogMetadataManager;
}
};
// Close RemoteLogManager so that metrics are removed
remoteLogManager.close();
KafkaMetricsGroup mockRlmMetricsGroup = mockMetricsGroupCtor.constructed().get(0);
KafkaMetricsGroup mockThreadPoolMetricsGroup = mockMetricsGroupCtor.constructed().get(1);
List<MetricName> remoteLogManagerMetricNames = Arrays.asList(
REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC,
REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC);
Set<String> remoteStorageThreadPoolMetricNames = REMOTE_STORAGE_THREAD_POOL_METRICS;
verify(mockRlmMetricsGroup, times(1)).newGauge(any(MetricName.class), any());
verify(mockRlmMetricsGroup, times(1)).newTimer(any(MetricName.class), any(), any());
// Verify that the RemoteLogManager metrics are removed
remoteLogManagerMetricNames.forEach(metricName -> verify(mockRlmMetricsGroup).removeMetric(metricName));
verify(mockThreadPoolMetricsGroup, times(remoteStorageThreadPoolMetricNames.size())).newGauge(anyString(), any());
// Verify that the RemoteStorageThreadPool metrics are removed
remoteStorageThreadPoolMetricNames.forEach(metricName -> verify(mockThreadPoolMetricsGroup).removeMetric(metricName));
verifyNoMoreInteractions(mockRlmMetricsGroup);
verifyNoMoreInteractions(mockThreadPoolMetricsGroup);
} finally {
mockMetricsGroupCtor.close();
}
}
|
@Override
public boolean syncVerifyData(DistroData verifyData, String targetServer) {
if (isNoExistTarget(targetServer)) {
return true;
}
// replace target server as self server so that can callback.
verifyData.getDistroKey().setTargetServer(memberManager.getSelf().getAddress());
DistroDataRequest request = new DistroDataRequest(verifyData, DataOperation.VERIFY);
Member member = memberManager.find(targetServer);
if (checkTargetServerStatusUnhealthy(member)) {
Loggers.DISTRO
.warn("[DISTRO] Cancel distro verify caused by target server {} unhealthy, key: {}", targetServer,
verifyData.getDistroKey());
return false;
}
try {
Response response = clusterRpcClientProxy.sendRequest(member, request);
return checkResponse(response);
} catch (NacosException e) {
Loggers.DISTRO.error("[DISTRO-FAILED] Verify distro data failed! key: {} ", verifyData.getDistroKey(), e);
}
return false;
}
|
@Test
void testSyncVerifyDataWithCallbackFailure() throws NacosException {
DistroData verifyData = new DistroData();
verifyData.setDistroKey(new DistroKey());
when(memberManager.hasMember(member.getAddress())).thenReturn(true);
when(memberManager.find(member.getAddress())).thenReturn(member);
member.setState(NodeState.UP);
response.setErrorInfo(ResponseCode.FAIL.getCode(), "TEST");
when(clusterRpcClientProxy.isRunning(member)).thenReturn(true);
transportAgent.syncVerifyData(verifyData, member.getAddress(), distroCallback);
verify(distroCallback).onFailed(null);
}
|
static Future<Optional<String>> newUpdateChecker(
GlobalConfig globalConfig, Verbosity verbosity, Consumer<LogEvent> log) {
if (!verbosity.atLeast(Verbosity.info) || globalConfig.isDisableUpdateCheck()) {
return Futures.immediateFuture(Optional.empty());
}
ExecutorService executorService = Executors.newSingleThreadExecutor();
try {
return UpdateChecker.checkForUpdate(
executorService, VERSION_URL, VersionInfo.TOOL_NAME, VersionInfo.getVersionSimple(), log);
} finally {
executorService.shutdown();
}
}
|
@Test
public void testNewUpdateChecker_noUpdateCheck() throws ExecutionException, InterruptedException {
when(globalConfig.isDisableUpdateCheck()).thenReturn(true);
Future<Optional<String>> updateChecker =
JibCli.newUpdateChecker(globalConfig, Verbosity.info, ignored -> {});
assertThat(updateChecker.get()).isEmpty();
}
|
public static Predicate parse(String expression)
{
final Stack<Predicate> predicateStack = new Stack<>();
final Stack<Character> operatorStack = new Stack<>();
final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll("");
final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true);
boolean isTokenMode = true;
while (true)
{
final Character operator;
final String token;
if (isTokenMode)
{
if (tokenizer.hasMoreTokens())
{
token = tokenizer.nextToken();
}
else
{
break;
}
if (OPERATORS.contains(token))
{
operator = token.charAt(0);
}
else
{
operator = null;
}
}
else
{
operator = operatorStack.pop();
token = null;
}
isTokenMode = true;
if (operator == null)
{
try
{
predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance());
}
catch (ClassCastException e)
{
throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e);
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
else
{
if (operatorStack.empty() || operator == '(')
{
operatorStack.push(operator);
}
else if (operator == ')')
{
while (operatorStack.peek() != '(')
{
evaluate(predicateStack, operatorStack);
}
operatorStack.pop();
}
else
{
if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek()))
{
evaluate(predicateStack, operatorStack);
isTokenMode = false;
}
operatorStack.push(operator);
}
}
}
while (!operatorStack.empty())
{
evaluate(predicateStack, operatorStack);
}
if (predicateStack.size() > 1)
{
throw new RuntimeException("Invalid logical expression");
}
return predicateStack.pop();
}
|
@Test
public void testAnd()
{
final Predicate parsed = PredicateExpressionParser.parse("com.linkedin.data.it.AlwaysTruePredicate & com.linkedin.data.it.AlwaysFalsePredicate");
Assert.assertEquals(parsed.getClass(), AndPredicate.class);
final List<Predicate> children = ((AndPredicate) parsed).getChildPredicates();
Assert.assertEquals(children.get(0).getClass(), AlwaysTruePredicate.class);
Assert.assertEquals(children.get(1).getClass(), AlwaysFalsePredicate.class);
}
|
public String getProgress(final boolean running, final long size, final long transferred) {
return this.getProgress(System.currentTimeMillis(), running, size, transferred);
}
|
@Test
public void testProgressRemaining() {
final long start = System.currentTimeMillis();
Speedometer m = new Speedometer(start, true);
assertEquals("1 B of 5 B (20%, 1 B/sec, 4 seconds remaining)", m.getProgress(start + 1000L, true, 5L, 1L));
assertEquals("4 B of 5 B (80%, 2 B/sec, 1 seconds remaining)", m.getProgress(start + 3000L, true, 5L, 4L));
assertEquals("4 B of 5 B (80%, 0 B/sec)", m.getProgress(start + 4000L, true, 5L, 4L));
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof DefaultMappingTreatment) {
DefaultMappingTreatment that = (DefaultMappingTreatment) obj;
return Objects.equals(address, that.address) &&
Objects.equals(instructions, that.instructions);
}
return false;
}
|
@Test
public void testEquals() {
IpPrefix ip1 = IpPrefix.valueOf(IP_ADDRESS_1);
MappingAddress address1 = MappingAddresses.ipv4MappingAddress(ip1);
MappingTreatment treatment1 = DefaultMappingTreatment.builder()
.withAddress(address1)
.setUnicastPriority(10)
.setUnicastWeight(10)
.build();
MappingTreatment sameAsTreatment1 = DefaultMappingTreatment.builder()
.withAddress(address1)
.setUnicastPriority(10)
.setUnicastWeight(10)
.build();
IpPrefix ip2 = IpPrefix.valueOf(IP_ADDRESS_2);
MappingAddress address2 = MappingAddresses.ipv4MappingAddress(ip2);
MappingTreatment treatment2 = DefaultMappingTreatment.builder()
.withAddress(address2)
.setMulticastPriority(20)
.setMulticastWeight(20)
.build();
new EqualsTester()
.addEqualityGroup(treatment1, sameAsTreatment1)
.addEqualityGroup(treatment2)
.testEquals();
}
|
public boolean hasValues(K key)
{
List<V> list = data.get(key);
if (list == null) {
return false;
}
return !list.isEmpty();
}
|
@Test
public void testHasValue()
{
assertThat(map.hasValues(1L), is(true));
assertThat(map.hasValues(42L), is(false));
}
|
public static String generateRandomAlphanumericString(Random rnd, int length) {
checkNotNull(rnd);
checkArgument(length >= 0);
StringBuilder buffer = new StringBuilder(length);
for (int i = 0; i < length; i++) {
buffer.append(nextAlphanumericChar(rnd));
}
return buffer.toString();
}
|
@Test
void testGenerateAlphanumeric() {
String str = StringUtils.generateRandomAlphanumericString(new Random(), 256);
assertThat(str).matches("[a-zA-Z0-9]{256}");
}
|
@SuppressWarnings("unchecked")
public static void appendValue(Map<String, Object> map, String key, Object value) {
Object oldValue = map.get(key);
if (oldValue != null) {
List<Object> list;
if (oldValue instanceof List) {
list = (List<Object>) oldValue;
} else {
list = new ArrayList<>();
list.add(oldValue);
// replace old entry with list
map.remove(key);
map.put(key, list);
}
list.add(value);
} else {
map.put(key, value);
}
}
|
@Test
public void testAppendValue() {
Map<String, Object> map = new HashMap<>();
CollectionHelper.appendValue(map, "foo", 123);
assertEquals(1, map.size());
CollectionHelper.appendValue(map, "foo", 456);
assertEquals(1, map.size());
CollectionHelper.appendValue(map, "bar", 789);
assertEquals(2, map.size());
List<?> values = (List<?>) map.get("foo");
assertEquals(2, values.size());
assertEquals(123, values.get(0));
assertEquals(456, values.get(1));
Integer value = (Integer) map.get("bar");
assertEquals(789, value.intValue());
}
|
public CoordinatorResult<Void, CoordinatorRecord> cleanupGroupMetadata() {
long startMs = time.milliseconds();
List<CoordinatorRecord> records = new ArrayList<>();
groupMetadataManager.groupIds().forEach(groupId -> {
boolean allOffsetsExpired = offsetMetadataManager.cleanupExpiredOffsets(groupId, records);
if (allOffsetsExpired) {
groupMetadataManager.maybeDeleteGroup(groupId, records);
}
});
log.info("Generated {} tombstone records while cleaning up group metadata in {} milliseconds.",
records.size(), time.milliseconds() - startMs);
// Reschedule the next cycle.
scheduleGroupMetadataExpiration();
return new CoordinatorResult<>(records, false);
}
|
@Test
public void testCleanupGroupMetadata() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
Time mockTime = new MockTime();
MockCoordinatorTimer<Void, CoordinatorRecord> timer = new MockCoordinatorTimer<>(mockTime);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
mockTime,
timer,
mock(GroupCoordinatorConfig.class),
mock(CoordinatorMetrics.class),
mock(CoordinatorMetricsShard.class)
);
CoordinatorRecord offsetCommitTombstone = GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord("group-id", "topic", 0);
CoordinatorRecord groupMetadataTombstone = GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord("group-id");
@SuppressWarnings("unchecked")
ArgumentCaptor<List<CoordinatorRecord>> recordsCapture = ArgumentCaptor.forClass(List.class);
when(groupMetadataManager.groupIds()).thenReturn(mkSet("group-id", "other-group-id"));
when(offsetMetadataManager.cleanupExpiredOffsets(eq("group-id"), recordsCapture.capture()))
.thenAnswer(invocation -> {
List<CoordinatorRecord> records = recordsCapture.getValue();
records.add(offsetCommitTombstone);
return true;
});
when(offsetMetadataManager.cleanupExpiredOffsets("other-group-id", Collections.emptyList())).thenReturn(false);
doAnswer(invocation -> {
List<CoordinatorRecord> records = recordsCapture.getValue();
records.add(groupMetadataTombstone);
return null;
}).when(groupMetadataManager).maybeDeleteGroup(eq("group-id"), recordsCapture.capture());
assertFalse(timer.contains(GROUP_EXPIRATION_KEY));
CoordinatorResult<Void, CoordinatorRecord> result = coordinator.cleanupGroupMetadata();
assertTrue(timer.contains(GROUP_EXPIRATION_KEY));
List<CoordinatorRecord> expectedRecords = Arrays.asList(offsetCommitTombstone, groupMetadataTombstone);
assertEquals(expectedRecords, result.records());
assertNull(result.response());
assertNull(result.appendFuture());
verify(groupMetadataManager, times(1)).groupIds();
verify(offsetMetadataManager, times(1)).cleanupExpiredOffsets(eq("group-id"), any());
verify(offsetMetadataManager, times(1)).cleanupExpiredOffsets(eq("other-group-id"), any());
verify(groupMetadataManager, times(1)).maybeDeleteGroup(eq("group-id"), any());
verify(groupMetadataManager, times(0)).maybeDeleteGroup(eq("other-group-id"), any());
}
|
@Override
public void deleteFiles(Iterable<String> pathsToDelete) throws BulkDeletionFailureException {
internalDeleteFiles(Streams.stream(pathsToDelete).map(BlobId::fromGsUtilUri));
}
|
@Test
public void testDeleteFiles() {
String prefix = "del/path/";
String path1 = prefix + "data1.dat";
storage.create(BlobInfo.newBuilder(TEST_BUCKET, path1).build());
String path2 = prefix + "data2.dat";
storage.create(BlobInfo.newBuilder(TEST_BUCKET, path2).build());
String path3 = "del/skip/data3.dat";
storage.create(BlobInfo.newBuilder(TEST_BUCKET, path3).build());
assertThat(StreamSupport.stream(io.listPrefix(gsUri("del/")).spliterator(), false).count())
.isEqualTo(3);
Iterable<String> deletes =
() -> ImmutableList.of(gsUri(path1), gsUri(path3)).stream().iterator();
io.deleteFiles(deletes);
assertThat(StreamSupport.stream(io.listPrefix(gsUri("del/")).spliterator(), false).count())
.isEqualTo(1);
}
|
@POST
@Timed
@ApiOperation(
value = "Launch input on this node",
response = InputCreated.class
)
@ApiResponses(value = {
@ApiResponse(code = 404, message = "No such input type registered"),
@ApiResponse(code = 400, message = "Missing or invalid configuration"),
@ApiResponse(code = 400, message = "Type is exclusive and already has input running")
})
@RequiresPermissions(RestPermissions.INPUTS_CREATE)
@AuditEvent(type = AuditEventTypes.MESSAGE_INPUT_CREATE)
public Response create(@ApiParam(name = "JSON body", required = true)
@Valid @NotNull InputCreateRequest lr) throws ValidationException {
try {
throwBadRequestIfNotGlobal(lr);
// TODO Configuration type values need to be checked. See ConfigurationMapConverter.convertValues()
final MessageInput messageInput = messageInputFactory.create(lr, getCurrentUser().getName(), lr.node());
if (config.isCloud() && !messageInput.isCloudCompatible()) {
throw new BadRequestException(String.format(Locale.ENGLISH,
"The input type <%s> is not allowed in the cloud environment!", lr.type()));
}
messageInput.checkConfiguration();
final Input input = this.inputService.create(messageInput.asMap());
final String newId = inputService.save(input);
final URI inputUri = getUriBuilderToSelf().path(InputsResource.class)
.path("{inputId}")
.build(newId);
return Response.created(inputUri).entity(InputCreated.create(newId)).build();
} catch (NoSuchInputTypeException e) {
LOG.error("There is no such input type registered.", e);
throw new NotFoundException("There is no such input type registered.", e);
} catch (ConfigurationException e) {
LOG.error("Missing or invalid input configuration.", e);
throw new BadRequestException("Missing or invalid input configuration.", e);
}
}
|
@Test
public void testCreateInput() throws Exception {
when(configuration.isCloud()).thenReturn(false);
when(messageInputFactory.create(any(), any(), any())).thenReturn(messageInput);
when(inputService.save(any())).thenReturn("id");
assertThat(inputsResource.create(inputCreateRequest).getStatus()).isEqualTo(201);
}
|
@Scheduled(
initialDelayString = "${kayenta.prometheus.health.initial-delay:PT2S}",
fixedDelayString = "${kayenta.prometheus.health.fixed-delay:PT5M}")
public void run() {
List<PrometheusHealthStatus> healthStatuses =
prometheusConfigurationProperties.getAccounts().stream()
.map(
account -> {
String name = account.getName();
return accountCredentialsRepository.getOne(name);
})
.filter(Optional::isPresent)
.map(Optional::get)
.filter(credentials -> credentials instanceof PrometheusManagedAccount)
.map(credentials -> ((PrometheusManagedAccount) credentials))
.map(
credentials -> {
try {
PrometheusRemoteService remote = credentials.getPrometheusRemoteService();
remote.isHealthy();
return PrometheusHealthStatus.builder()
.accountName(credentials.getName())
.status(Status.UP)
.build();
} catch (Throwable ex) {
log.warn(
"Prometheus health FAILED for account: {} with exception: ",
credentials.getName(),
ex);
return PrometheusHealthStatus.builder()
.accountName(credentials.getName())
.status(Status.DOWN)
.errorDetails(ex.getClass().getName() + ": " + ex.getMessage())
.build();
}
})
.collect(Collectors.toList());
healthCache.setHealthStatuses(healthStatuses);
}
|
@Test
public void oneRemoteIsDown() {
when(PROM_REMOTE_1.isHealthy()).thenReturn("OK");
when(PROM_REMOTE_2.isHealthy()).thenThrow(new RuntimeException("test 2"));
healthJob.run();
verify(healthCache)
.setHealthStatuses(
Arrays.asList(
PrometheusHealthJob.PrometheusHealthStatus.builder()
.accountName(PROM_ACCOUNT_1)
.status(Status.UP)
.build(),
PrometheusHealthJob.PrometheusHealthStatus.builder()
.accountName(PROM_ACCOUNT_2)
.status(Status.DOWN)
.errorDetails("java.lang.RuntimeException: test 2")
.build()));
}
|
@Override
public Publisher<Exchange> to(String uri, Object data) {
String streamName = requestedUriToStream.computeIfAbsent(uri, camelUri -> {
try {
String uuid = context.getUuidGenerator().generateUuid();
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from("reactive-streams:" + uuid)
.to(camelUri);
}
});
return uuid;
} catch (Exception e) {
throw new IllegalStateException("Unable to create requested reactive stream from direct URI: " + uri, e);
}
});
return toStream(streamName, data);
}
|
@Test
public void testToFunction() throws Exception {
context.start();
Set<String> values = Collections.synchronizedSet(new TreeSet<>());
CountDownLatch latch = new CountDownLatch(3);
Function<Object, Publisher<String>> fun = crs.to("bean:hello", String.class);
Flux.just(1, 2, 3)
.flatMap(fun)
.doOnNext(values::add)
.doOnNext(res -> latch.countDown())
.subscribe();
assertTrue(latch.await(2, TimeUnit.SECONDS));
assertEquals(new TreeSet<>(Arrays.asList("Hello 1", "Hello 2", "Hello 3")), values);
}
|
@Override
public boolean decide(final SelectStatementContext selectStatementContext, final List<Object> parameters,
final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final ShardingRule rule, final Collection<DataNode> includedDataNodes) {
Collection<String> tableNames = rule.getShardingLogicTableNames(selectStatementContext.getTablesContext().getTableNames());
if (tableNames.isEmpty()) {
return false;
}
includedDataNodes.addAll(getTableDataNodes(rule, tableNames, database));
if (selectStatementContext.isContainsSubquery() || selectStatementContext.isContainsHaving()
|| selectStatementContext.isContainsCombine() || selectStatementContext.isContainsPartialDistinctAggregation()) {
return true;
}
if (!selectStatementContext.isContainsJoinQuery() || rule.isAllTablesInSameDataSource(tableNames)) {
return false;
}
if (1 == tableNames.size() && selectStatementContext.isContainsJoinQuery() && !rule.isAllBindingTables(database, selectStatementContext, tableNames)) {
return true;
}
return tableNames.size() > 1 && !rule.isAllBindingTables(database, selectStatementContext, tableNames);
}
|
@Test
void assertDecideWhenContainsPartialDistinctAggregation() {
SelectStatementContext select = createStatementContext();
when(select.isContainsPartialDistinctAggregation()).thenReturn(true);
Collection<DataNode> includedDataNodes = new HashSet<>();
ShardingRule shardingRule = createShardingRule();
assertTrue(new ShardingSQLFederationDecider().decide(select, Collections.emptyList(), mock(RuleMetaData.class), createDatabase(shardingRule), shardingRule, includedDataNodes));
assertThat(includedDataNodes.size(), is(4));
}
|
static void setConstructor(final TreeCompilationDTO compilationDTO,
final ClassOrInterfaceDeclaration modelTemplate,
final String fullNodeClassName) {
KiePMMLModelFactoryUtils.init(compilationDTO,
modelTemplate);
final ConstructorDeclaration constructorDeclaration =
modelTemplate.getDefaultConstructor().orElseThrow(() -> new KiePMMLInternalException(String.format(MISSING_DEFAULT_CONSTRUCTOR, modelTemplate.getName())));
final BlockStmt body = constructorDeclaration.getBody();
// set predicate function
MethodReferenceExpr nodeReference = new MethodReferenceExpr();
nodeReference.setScope(new NameExpr(fullNodeClassName));
nodeReference.setIdentifier("evaluateNode");
CommonCodegenUtils.setAssignExpressionValue(body, "nodeFunction", nodeReference);
}
|
@Test
void setConstructor() {
String className = getSanitizedClassName(treeModel1.getModelName());
CompilationUnit cloneCU = JavaParserUtils.getKiePMMLModelCompilationUnit(className, PACKAGE_NAME,
KIE_PMML_TREE_MODEL_TEMPLATE_JAVA,
KIE_PMML_TREE_MODEL_TEMPLATE);
ClassOrInterfaceDeclaration modelTemplate = cloneCU.getClassByName(className)
.orElseThrow(() -> new KiePMMLException(MAIN_CLASS_NOT_FOUND + ": " + className));
String targetField = "whatIdo";
String fullNodeClassName = "full.Node.ClassName";
CommonCompilationDTO<TreeModel> source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
pmml1,
treeModel1,
new PMMLCompilationContextMock(),
SOURCE_1);
KiePMMLTreeModelFactory.setConstructor(TreeCompilationDTO.fromCompilationDTO(source),
modelTemplate,
fullNodeClassName);
ConstructorDeclaration constructorDeclaration = modelTemplate
.getDefaultConstructor()
.orElseThrow(() -> new KiePMMLInternalException(String.format(MISSING_DEFAULT_CONSTRUCTOR,
modelTemplate.getName())))
.clone();
BlockStmt body = constructorDeclaration.getBody();
// targetField
Optional<AssignExpr> optRetrieved = CommonCodegenUtils.getAssignExpression(body, "targetField");
assertThat(optRetrieved).isPresent();
AssignExpr retrieved = optRetrieved.get();
Expression initializer = retrieved.getValue();
assertThat(initializer).isInstanceOf(StringLiteralExpr.class);
String expected = String.format("\"%s\"", targetField);
assertThat(initializer.toString()).isEqualTo(expected);
// miningFunction
optRetrieved = CommonCodegenUtils.getAssignExpression(body, "miningFunction");
assertThat(optRetrieved).isPresent();
retrieved = optRetrieved.get();
initializer = retrieved.getValue();
assertThat(initializer).isInstanceOf(NameExpr.class);
MINING_FUNCTION miningFunction = MINING_FUNCTION.byName(treeModel1.getMiningFunction().value());
expected = miningFunction.getClass().getName() + "." + miningFunction.name();
assertThat(initializer.toString()).isEqualTo(expected);
// pmmlMODEL
optRetrieved = CommonCodegenUtils.getAssignExpression(body, "pmmlMODEL");
assertThat(optRetrieved).isPresent();
retrieved = optRetrieved.get();
initializer = retrieved.getValue();
assertThat(initializer).isInstanceOf(NameExpr.class);
expected = PMML_MODEL.TREE_MODEL.getClass().getName() + "." + PMML_MODEL.TREE_MODEL.name();
assertThat(initializer.toString()).isEqualTo(expected);
// nodeFunction
optRetrieved = CommonCodegenUtils.getAssignExpression(body, "nodeFunction");
assertThat(optRetrieved).isPresent();
retrieved = optRetrieved.get();
initializer = retrieved.getValue();
assertThat(initializer).isInstanceOf(MethodReferenceExpr.class);
expected = fullNodeClassName;
assertThat(((MethodReferenceExpr) initializer).getScope().toString()).isEqualTo(expected);
expected = "evaluateNode";
assertThat(((MethodReferenceExpr) initializer).getIdentifier()).isEqualTo(expected);
}
|
@Override
public int run(String[] args) throws Exception {
try {
webServiceClient = WebServiceClient.getWebServiceClient().createClient();
return runCommand(args);
} finally {
if (yarnClient != null) {
yarnClient.close();
}
if (webServiceClient != null) {
webServiceClient.destroy();
}
}
}
|
@Test(timeout = 5000l)
public void testUnknownApplicationId() throws Exception {
YarnClient mockYarnClient = createMockYarnClientUnknownApp();
LogsCLI cli = new LogsCLIForTest(mockYarnClient);
cli.setConf(conf);
int exitCode = cli.run(new String[] { "-applicationId",
ApplicationId.newInstance(1, 1).toString() });
// Error since no logs present for the app.
assertTrue(exitCode != 0);
assertTrue(sysErrStream.toString().startsWith(
"Unable to get ApplicationState"));
}
|
@Override
public int run(String[] args) throws Exception {
if (args.length != 2) {
return usage(args);
}
String action = args[0];
String name = args[1];
int result;
if (A_LOAD.equals(action)) {
result = loadClass(name);
} else if (A_CREATE.equals(action)) {
//first load to separate load errors from create
result = loadClass(name);
if (result == SUCCESS) {
//class loads, so instantiate it
result = createClassInstance(name);
}
} else if (A_RESOURCE.equals(action)) {
result = loadResource(name);
} else if (A_PRINTRESOURCE.equals(action)) {
result = dumpResource(name);
} else {
result = usage(args);
}
return result;
}
|
@Test
public void testLoadFindsSelf() throws Throwable {
run(FindClass.SUCCESS,
FindClass.A_LOAD, "org.apache.hadoop.util.TestFindClass");
}
|
@Override
protected List<Object[]> rows() {
List<Object[]> rows = new ArrayList<>(mappings.size());
for (Mapping mapping : mappings) {
Map<String, String> options;
if (!securityEnabled) {
options = mapping.options();
} else {
options = new TreeMap<>();
final SqlConnector sqlConnector = sqlConnectorCache.forType(mapping.connectorType());
final Set<String> secureConnectorOptions = sqlConnector.nonSensitiveConnectorOptions();
for (Entry<String, String> e : mapping.options().entrySet()) {
if (secureConnectorOptions.contains(e.getKey())) {
options.put(e.getKey(), e.getValue());
}
}
}
Object[] row = new Object[]{
catalog(),
mappingsSchema,
mapping.name(),
quoteCompoundIdentifier(mapping.externalName()),
Optional.ofNullable(mapping.dataConnection())
.map(dataConnectionTypeResolver)
.orElse(mapping.connectorType()),
uncheckCall(() -> JsonUtil.toJson(options))
};
rows.add(row);
}
return rows;
}
|
@Test
public void test_rows_dataconnection() {
// given
Mapping mapping = new Mapping(
"table-name",
new String[]{"external-schema", "table-external-name"},
"some-dc",
null,
null,
emptyList(),
singletonMap("key", "value")
);
MappingsTable mappingTable = new MappingsTable(
"catalog",
null,
"table-schema",
singletonList(mapping),
null,
(dc) -> {
assertThat(dc).isEqualTo("some-dc");
return "external-dc-type";
}, false);
// when
List<Object[]> rows = mappingTable.rows();
// then
assertThat(rows).containsExactly(new Object[]{
"catalog"
, "table-schema"
, "table-name"
, "\"external-schema\".\"table-external-name\""
, "external-dc-type"
, "{\"key\":\"value\"}"
});
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@Test
public void testUnpartitionedBucketString() throws Exception {
createUnpartitionedTable(spark, tableName);
SparkScanBuilder builder = scanBuilder();
BucketFunction.BucketString function = new BucketFunction.BucketString();
UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(5), fieldRef("data")));
Predicate predicate = new Predicate("<=", expressions(udf, intLit(2)));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT LTEQ
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
}
|
@Override
@Nullable
public V put(@Nullable K key, @Nullable V value) {
return put(key, value, true);
}
|
@Test
void shouldApplySupplementalHash() {
Integer key = 123;
this.map.put(key, "123");
assertNotEquals(this.map.getSupplementalHash(), key.hashCode());
assertNotEquals(this.map.getSupplementalHash() >> 30 & 0xFF, 0);
}
|
@Override
public Object clone() {
MultiMergeJoinMeta retval = (MultiMergeJoinMeta) super.clone();
int nrKeys = keyFields == null ? 0 : keyFields.length;
int nrSteps = inputSteps == null ? 0 : inputSteps.length;
retval.allocateKeys( nrKeys );
retval.allocateInputSteps( nrSteps );
System.arraycopy( keyFields, 0, retval.keyFields, 0, nrKeys );
System.arraycopy( inputSteps, 0, retval.inputSteps, 0, nrSteps );
return retval;
}
|
@Test
public void cloneTest() throws Exception {
MultiMergeJoinMeta meta = new MultiMergeJoinMeta();
meta.allocateKeys( 2 );
meta.allocateInputSteps( 3 );
meta.setKeyFields( new String[] { "key1", "key2" } );
meta.setInputSteps( new String[] { "step1", "step2", "step3" } );
// scalars should be cloned using super.clone() - makes sure they're calling super.clone()
meta.setJoinType( "INNER" );
MultiMergeJoinMeta aClone = (MultiMergeJoinMeta) meta.clone();
Assert.assertFalse( aClone == meta );
Assert.assertTrue( Arrays.equals( meta.getKeyFields(), aClone.getKeyFields() ) );
Assert.assertTrue( Arrays.equals( meta.getInputSteps(), aClone.getInputSteps() ) );
Assert.assertEquals( meta.getJoinType(), aClone.getJoinType() );
}
|
public static boolean isUnderDeviceRootNode(ResourceId path) {
int rootIdx = ResourceIds.startsWithRootNode(path) ? 0 : -1;
return path.nodeKeys().size() >= rootIdx + 3 &&
DEVICE_SCHEMA.equals(path.nodeKeys().get(rootIdx + DEVICE_INDEX).schemaId()) &&
(path.nodeKeys().get(rootIdx + DEVICE_INDEX) instanceof ListKey) &&
DEVICES_SCHEMA.equals(path.nodeKeys().get(rootIdx + DEVICES_INDEX).schemaId()); }
|
@Test
public void testDeviceSubtreeEventTest() {
// root relative ResourceId used by DynamicConfigEvent
ResourceId evtDevice = ResourceId.builder()
.addBranchPointSchema(DEVICES_NAME, DCS_NAMESPACE)
.addBranchPointSchema(DEVICE_NAME, DCS_NAMESPACE)
.addKeyLeaf(DEVICE_ID_KL_NAME, DCS_NAMESPACE, DID_A.toString())
.build();
NodeKey<?> deviceKey = evtDevice.nodeKeys().get(1);
assertThat(deviceKey, is(instanceOf(ListKey.class)));
assertThat(deviceKey.schemaId().namespace(), is(equalTo(DCS_NAMESPACE)));
assertThat(deviceKey.schemaId().name(), is(equalTo(DEVICE_NAME)));
assertTrue(DeviceResourceIds.isUnderDeviceRootNode(evtDevice));
}
|
public PipelineGroups getLocal() {
PipelineGroups locals = new PipelineGroups();
for (PipelineConfigs pipelineConfigs : this) {
PipelineConfigs local = pipelineConfigs.getLocal();
if (local != null)
locals.add(local);
}
return locals;
}
|
@Test
public void shouldGetLocalPartsWhenOriginIsRepo() {
PipelineConfigs defaultGroup = createGroup("defaultGroup", createPipelineConfig("pipeline1", "stage1"));
defaultGroup.setOrigins(new RepoConfigOrigin());
PipelineGroups groups = new PipelineGroups(defaultGroup);
assertThat(groups.getLocal().size(), is(0));
assertThat(groups.getLocal().isEmpty(), is(true));
}
|
@Override
protected void setProperties(Map<String, String> properties) throws DdlException {
Preconditions.checkState(properties != null);
for (String key : properties.keySet()) {
if (!DRIVER_URL.equals(key) && !URI.equals(key) && !USER.equals(key) && !PASSWORD.equals(key)
&& !TYPE.equals(key) && !NAME.equals(key) && !DRIVER_CLASS.equals(key)) {
throw new DdlException("Property " + key + " is unknown");
}
}
configs = properties;
checkProperties(DRIVER_URL);
checkProperties(DRIVER_CLASS);
checkProperties(URI);
checkProperties(USER);
checkProperties(PASSWORD);
computeDriverChecksum();
}
|
@Test(expected = DdlException.class)
public void testWithoutURI() throws Exception {
Map<String, String> configs = getMockConfigs();
configs.remove(JDBCResource.URI);
JDBCResource resource = new JDBCResource("jdbc_resource_test");
resource.setProperties(configs);
}
|
public static GSBlobIdentifier parseUri(URI uri) {
Preconditions.checkArgument(
uri.getScheme().equals(GSFileSystemFactory.SCHEME),
String.format("URI scheme for %s must be %s", uri, GSFileSystemFactory.SCHEME));
String finalBucketName = uri.getAuthority();
if (StringUtils.isNullOrWhitespaceOnly(finalBucketName)) {
throw new IllegalArgumentException(String.format("Bucket name in %s is invalid", uri));
}
String path = uri.getPath();
if (StringUtils.isNullOrWhitespaceOnly(path)) {
throw new IllegalArgumentException(String.format("Object name in %s is invalid", uri));
}
String finalObjectName = path.substring(1); // remove leading slash from path
if (StringUtils.isNullOrWhitespaceOnly(finalObjectName)) {
throw new IllegalArgumentException(String.format("Object name in %s is invalid", uri));
}
return new GSBlobIdentifier(finalBucketName, finalObjectName);
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldFailToParseUriMissingBucketName() {
BlobUtils.parseUri(URI.create("gs:///foo/bar"));
}
|
public static <K> KTableHolder<K> build(
final KTableHolder<K> left,
final KTableHolder<K> right,
final TableTableJoin<K> join
) {
final LogicalSchema leftSchema;
final LogicalSchema rightSchema;
if (join.getJoinType().equals(RIGHT)) {
leftSchema = right.getSchema();
rightSchema = left.getSchema();
} else {
leftSchema = left.getSchema();
rightSchema = right.getSchema();
}
final JoinParams joinParams = JoinParamsFactory
.create(join.getKeyColName(), leftSchema, rightSchema);
final KTable<K, GenericRow> result;
switch (join.getJoinType()) {
case INNER:
result = left.getTable().join(right.getTable(), joinParams.getJoiner());
break;
case LEFT:
result = left.getTable().leftJoin(right.getTable(), joinParams.getJoiner());
break;
case RIGHT:
result = right.getTable().leftJoin(left.getTable(), joinParams.getJoiner());
break;
case OUTER:
result = left.getTable().outerJoin(right.getTable(), joinParams.getJoiner());
break;
default:
throw new IllegalStateException("invalid join type: " + join.getJoinType());
}
return KTableHolder.unmaterialized(
result,
joinParams.getSchema(),
left.getExecutionKeyFactory());
}
|
@Test
public void shouldDoInnerJoin() {
// Given:
givenInnerJoin(R_KEY);
// When:
final KTableHolder<Struct> result = join.build(planBuilder, planInfo);
// Then:
verify(leftKTable).join(
same(rightKTable),
eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 0))
);
verifyNoMoreInteractions(leftKTable, rightKTable, resultKTable);
assertThat(result.getTable(), is(resultKTable));
assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory));
}
|
@Override
public HttpResponseOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
return this.write(file, this.toHeaders(file, status, expect), status);
}
catch(ConflictException e) {
if(expect) {
if(null != status.getLockId()) {
// Handle 412 Precondition Failed with expired token
log.warn(String.format("Retry failure %s with lock id %s removed", e, status.getLockId()));
return this.write(file, this.toHeaders(file, status.withLockId(null), expect), status);
}
}
throw e;
}
catch(InteroperabilityException e) {
if(expect) {
// Handle 417 Expectation Failed
log.warn(String.format("Retry failure %s with Expect: Continue removed", e));
return this.write(file, this.toHeaders(file, status.withLockId(null), false), status);
}
throw e;
}
}
|
@Test
public void testWriteContentRangeTwoBytes() throws Exception {
final DAVWriteFeature feature = new DAVWriteFeature(session);
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final byte[] source = RandomUtils.nextBytes(2);
{
final TransferStatus status = new TransferStatus();
status.setLength(1L);
status.setOffset(0L);
final HttpResponseOutputStream<Void> out = feature.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(source), out);
out.close();
}
{
final TransferStatus status = new TransferStatus();
status.setLength(1L);
status.setOffset(1L);
status.setAppend(true);
final HttpResponseOutputStream<Void> out = feature.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(source), out);
out.close();
}
final ByteArrayOutputStream out = new ByteArrayOutputStream(source.length);
IOUtils.copy(new DAVReadFeature(session).read(test, new TransferStatus().withLength(source.length), new DisabledConnectionCallback()), out);
assertArrayEquals(source, out.toByteArray());
new DAVDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
Map<String, String> getShardIterators() {
if (streamArn == null) {
streamArn = getStreamArn();
}
// Either return cached ones or get new ones via GetShardIterator requests.
if (currentShardIterators.isEmpty()) {
DescribeStreamResponse streamDescriptionResult
= getClient().describeStream(DescribeStreamRequest.builder().streamArn(streamArn).build());
shardTree.populate(streamDescriptionResult.streamDescription().shards());
StreamIteratorType streamIteratorType = getEndpoint().getConfiguration().getStreamIteratorType();
currentShardIterators = getCurrentShardIterators(streamIteratorType);
} else {
Map<String, String> childShardIterators = new HashMap<>();
for (Entry<String, String> currentShardIterator : currentShardIterators.entrySet()) {
List<Shard> children = shardTree.getChildren(currentShardIterator.getKey());
if (children.isEmpty()) { // This is still an active leaf shard, reuse it.
childShardIterators.put(currentShardIterator.getKey(), currentShardIterator.getValue());
} else {
for (Shard child : children) { // Inactive shard, move down to its children.
String shardIterator = getShardIterator(child.shardId(), ShardIteratorType.TRIM_HORIZON);
childShardIterators.put(child.shardId(), shardIterator);
}
}
}
currentShardIterators = childShardIterators;
}
LOG.trace("Shard Iterators are: {}", currentShardIterators);
return currentShardIterators;
}
|
@Test
void shouldProgressThroughTreeWhenShardIteratorsAreRetrievedRepeatedly() throws Exception {
component.getConfiguration().setStreamIteratorType(StreamIteratorType.FROM_START);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
assertEquals(Collections.singletonMap(SHARD_0.shardId(), SHARD_ITERATOR_0), underTest.getShardIterators());
Map<String, String> expectedShardIterators1 = new HashMap<>();
expectedShardIterators1.put(SHARD_1.shardId(), SHARD_ITERATOR_1);
expectedShardIterators1.put(SHARD_2.shardId(), SHARD_ITERATOR_2);
assertEquals(expectedShardIterators1, underTest.getShardIterators());
Map<String, String> expectedShardIterators2 = new HashMap<>();
expectedShardIterators2.put(SHARD_3.shardId(), SHARD_ITERATOR_3);
expectedShardIterators2.put(SHARD_4.shardId(), SHARD_ITERATOR_4);
expectedShardIterators2.put(SHARD_5.shardId(), SHARD_ITERATOR_5);
expectedShardIterators2.put(SHARD_6.shardId(), SHARD_ITERATOR_6);
assertEquals(expectedShardIterators2, underTest.getShardIterators());
Map<String, String> expectedShardIterators3 = expectedShardIterators2;
assertEquals(expectedShardIterators3, underTest.getShardIterators());
}
|
public boolean isValidConnectionNameCharacter( char c ) {
return CONNECTION_NAME_INVALID_CHARACTERS.indexOf( c ) < 0;
}
|
@Test
public void testIsValidConnectionNameCharacterReturnsTrueOnValidCharacters() {
for ( char c : ACCEPTED_CHARACTERS_FULL_SET.toCharArray() ) {
assertTrue( fileNameParser.isValidConnectionNameCharacter( c ) );
}
}
|
@Override
public AppResponse process(Flow flow, ConfirmRequest request) throws FlowNotDefinedException, IOException, NoSuchAlgorithmException {
var authAppSession = appSessionService.getSession(request.getAuthSessionId());
if (!isAppSessionAuthenticated(authAppSession) || !request.getUserAppId().equals(authAppSession.getUserAppId())){
return new NokResponse();
}
appAuthenticator = appAuthenticatorService.findByUserAppId(authAppSession.getUserAppId());
if (!isAppAuthenticatorActivated(appAuthenticator) || !appAuthenticatorService.exists(appAuthenticator)) return new NokResponse();
if (appSession.getEidasUit()){
var response = validatePipSignature(request.getSignatureOfPip());
if (response != null) return response;
}
if (appSession.getAction() != null ) {
var result = digidClient.getAccountStatus(appAuthenticator.getAccountId());
if (ERROR_DECEASED.equals(result.get("error"))) return deceasedResponse();
switch(appSession.getAction()){
case "activate_with_app" ->
digidClient.remoteLog("1366", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true));
case "upgrade_rda_widchecker" ->
digidClient.remoteLog("1318", getAppDetails());
default ->
digidClient.remoteLog("1344", getAppDetails());
}
}
appSession.setAppAuthenticationLevel(appAuthenticator.getAuthenticationLevel());
appSession.setAccountId(authAppSession.getAccountId());
appSession.setSubstantialActivatedAt(appAuthenticator.getSubstantieelActivatedAt());
appSession.setSubstantialDocumentType(appAuthenticator.getSubstantieelDocumentType());
appSession.setUserAppId(authAppSession.getUserAppId());
if (appSession.getOidcSessionId() != null && authAppSession.getState().equals(State.AUTHENTICATED.name())) {
oidcClient.confirmOidc(appSession.getAccountId(), appAuthenticator.getAuthenticationLevel(), appSession.getOidcSessionId());
}
if (appSession.getAdSessionId() != null && authAppSession.getState().equals(State.AUTHENTICATED.name())) {
var bsn = digidClient.getBsn(appSession.getAccountId());
samlClient.updateAdSession(appSession.getAdSessionId(), appAuthenticator.getAuthenticationLevel(), bsn.get(BSN));
}
return new ConfirmationResponse(appAuthenticator.getId().equals(appSession.getAppToDestroy()));
}
|
@Test
public void processHasOidcSession() throws FlowNotDefinedException, IOException, NoSuchAlgorithmException {
authAppSession.setEidasUit(false);
authAppSession.setOidcSessionId("test");
authAppSession.setAction(null);
when(oidcClient.confirmOidc(authAppSession.getAccountId(), mockedAppAuthenticator.getAuthenticationLevel(), authAppSession.getOidcSessionId())).thenReturn(null);
AppResponse appResponse = confirmed.process(mockedFlow, confirmRequest);
assertTrue(appResponse instanceof OkResponse);
}
|
public Result parse(final String string) throws DateNotParsableException {
return this.parse(string, new Date());
}
|
@Test
public void testLast4hours() throws Exception {
DateTime reference = DateTime.now(DateTimeZone.UTC);
NaturalDateParser.Result last4 = naturalDateParser.parse("last 4 hours", reference.toDate());
assertThat(last4.getFrom()).as("from should be exactly 4 hours in the past").isEqualTo(reference.minusHours(4));
assertThat(last4.getTo()).as("to should be the reference date").isEqualTo(reference);
}
|
@InvokeOnHeader(Web3jConstants.ETH_SIGN)
void ethSign(Message message) throws IOException {
String address = message.getHeader(Web3jConstants.ADDRESS, configuration::getAddress, String.class);
String sha3HashOfDataToSign = message.getHeader(Web3jConstants.SHA3_HASH_OF_DATA_TO_SIGN,
configuration::getSha3HashOfDataToSign, String.class);
Request<?, EthSign> request = web3j.ethSign(address, sha3HashOfDataToSign);
setRequestId(message, request);
EthSign response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getSignature());
}
}
|
@Test
public void ethSignTest() throws Exception {
EthSign response = Mockito.mock(EthSign.class);
Mockito.when(mockWeb3j.ethSign(any(), any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getSignature()).thenReturn("test");
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_SIGN);
template.send(exchange);
String body = exchange.getIn().getBody(String.class);
assertEquals("test", body);
}
|
@InvokeOnHeader(Web3jConstants.ETH_SUBMIT_HASHRATE)
void ethSubmitHashrate(Message message) throws IOException {
String hashrate = message.getHeader(Web3jConstants.ETH_HASHRATE, configuration::getHashrate, String.class);
String clientId = message.getHeader(Web3jConstants.CLIENT_ID, configuration::getClientId, String.class);
Request<?, EthSubmitHashrate> request = web3j.ethSubmitHashrate(hashrate, clientId);
setRequestId(message, request);
EthSubmitHashrate response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.submissionSuccessful());
}
}
|
@Test
public void ethSubmitHashrateTest() throws Exception {
EthSubmitHashrate response = Mockito.mock(EthSubmitHashrate.class);
Mockito.when(mockWeb3j.ethSubmitHashrate(any(), any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.submissionSuccessful()).thenReturn(Boolean.TRUE);
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_SUBMIT_HASHRATE);
template.send(exchange);
Boolean body = exchange.getIn().getBody(Boolean.class);
assertTrue(body);
}
|
@Udf
public <T> Map<String, T> union(
@UdfParameter(description = "first map to union") final Map<String, T> map1,
@UdfParameter(description = "second map to union") final Map<String, T> map2) {
final List<Map<String, T>> nonNullInputs =
Stream.of(map1, map2)
.filter(Objects::nonNull)
.collect(Collectors.toList());
if (nonNullInputs.size() == 0) {
return null;
}
final Map<String, T> output = new HashMap<>();
nonNullInputs
.forEach(output::putAll);
return output;
}
|
@Test
public void shouldUnionNonEmptyMaps() {
final Map<String, String> input1 = Maps.newHashMap();
input1.put("foo", "spam");
input1.put("bar", "baloney");
final Map<String, String> input2 = Maps.newHashMap();
input2.put("one", "apple");
input2.put("two", "banana");
input2.put("three", "cherry");
final Map<String, String> result = udf.union(input1, input2);
assertThat(result.size(), is(5));
assertThat(result.get("foo"), is("spam"));
assertThat(result.get("two"), is("banana"));
}
|
@Override
public Object nextEntry() throws IOException {
return null;
}
|
@Test
public void testNextEntry() throws IOException {
assertNull( inStream.nextEntry() );
}
|
public List<String> getInsertColumnNames() {
return getSqlStatement().getSetAssignment().map(this::getColumnNamesForSetAssignment).orElseGet(() -> getColumnNamesForInsertColumns(getSqlStatement().getColumns()));
}
|
@Test
void assertGetInsertColumnNamesForSetAssignmentForMySQL() {
MySQLInsertStatement insertStatement = new MySQLInsertStatement();
List<ColumnSegment> columns = new LinkedList<>();
columns.add(new ColumnSegment(0, 0, new IdentifierValue("col")));
ColumnAssignmentSegment insertStatementAssignment = new ColumnAssignmentSegment(0, 0, columns, new LiteralExpressionSegment(0, 0, 1));
insertStatement.setSetAssignment(new SetAssignmentSegment(0, 0, Collections.singletonList(insertStatementAssignment)));
insertStatement.setTable(new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue(""))));
InsertStatementContext insertStatementContext = createInsertStatementContext(Collections.emptyList(), insertStatement);
List<String> columnNames = insertStatementContext.getInsertColumnNames();
assertThat(columnNames.size(), is(1));
assertThat(columnNames.iterator().next(), is("col"));
}
|
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
if (existsInTfsJar(name)) {
return jarClassLoader.loadClass(name);
}
return super.loadClass(name);
}
|
@Test
public void canLoadClassFromParent() throws Exception {
assertThat(nestedJarClassLoader.loadClass(this.getClass().getCanonicalName()))
.isNotNull()
.hasPackage(this.getClass().getPackageName());
assertThat(nestedJarClassLoader.loadClass(this.getClass().getCanonicalName(), true))
.isNotNull()
.hasPackage(this.getClass().getPackageName());
}
|
ConcurrentPublication addPublication(final String channel, final int streamId)
{
clientLock.lock();
try
{
ensureActive();
ensureNotReentrant();
final long registrationId = driverProxy.addPublication(channel, streamId);
stashedChannelByRegistrationId.put(registrationId, channel);
awaitResponse(registrationId);
return (ConcurrentPublication)resourceByRegIdMap.get(registrationId);
}
finally
{
clientLock.unlock();
}
}
|
@Test
void shouldNotPreTouchLogBuffersForPublicationIfDisabled()
{
final int streamId = -53453894;
final String channel = "aeron:ipc?alias=test";
final long publicationId = 113;
final String logFileName = SESSION_ID_2 + "-log";
context.preTouchMappedMemory(false);
whenReceiveBroadcastOnMessage(
ControlProtocolEvents.ON_PUBLICATION_READY,
publicationReadyBuffer,
(buffer) ->
{
publicationReady.correlationId(publicationId);
publicationReady.registrationId(publicationId);
publicationReady.logFileName(logFileName);
return publicationReady.length();
});
when(driverProxy.addPublication(channel, streamId)).thenReturn(publicationId);
final ConcurrentPublication publication = conductor.addPublication(channel, streamId);
assertNotNull(publication);
final LogBuffers logBuffers = logBuffersFactory.map(logFileName);
assertNotNull(logBuffers);
verify(logBuffers, never()).preTouch();
}
|
protected int compareDataNode(final DatanodeDescriptor a,
final DatanodeDescriptor b, boolean isBalanceLocal) {
boolean toleranceLimit = Math.max(a.getDfsUsedPercent(), b.getDfsUsedPercent())
< balancedSpaceToleranceLimit;
if (a.equals(b)
|| (toleranceLimit && Math.abs(a.getDfsUsedPercent() - b.getDfsUsedPercent())
< balancedSpaceTolerance) || ((
isBalanceLocal && a.getDfsUsedPercent() < 50))) {
return 0;
}
return a.getDfsUsedPercent() < b.getDfsUsedPercent() ? -1 : 1;
}
|
@Test
public void testChooseSimilarDataNode() {
DatanodeDescriptor[] tolerateDataNodes;
DatanodeStorageInfo[] tolerateStorages;
int capacity = 3;
Collection<Node> allTolerateNodes = new ArrayList<>(capacity);
String[] ownerRackOfTolerateNodes = new String[capacity];
for (int i = 0; i < capacity; i++) {
ownerRackOfTolerateNodes[i] = "rack"+i;
}
tolerateStorages = DFSTestUtil.createDatanodeStorageInfos(ownerRackOfTolerateNodes);
tolerateDataNodes = DFSTestUtil.toDatanodeDescriptor(tolerateStorages);
Collections.addAll(allTolerateNodes, tolerateDataNodes);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
AvailableSpaceBlockPlacementPolicy toleratePlacementPolicy =
(AvailableSpaceBlockPlacementPolicy)bm.getBlockPlacementPolicy();
updateHeartbeatWithUsage(tolerateDataNodes[0],
20 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
1 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
* blockSize, 0L, 0L, 0L, 0, 0);
updateHeartbeatWithUsage(tolerateDataNodes[1],
11 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
1 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
* blockSize, 0L, 0L, 0L, 0, 0);
updateHeartbeatWithUsage(tolerateDataNodes[2],
10 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
1 * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE * blockSize,
HdfsServerConstants.MIN_BLOCKS_FOR_WRITE
* blockSize, 0L, 0L, 0L, 0, 0);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[0],
tolerateDataNodes[1], false) == 0);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[1],
tolerateDataNodes[0], false) == 0);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[0],
tolerateDataNodes[2], false) == -1);
assertTrue(toleratePlacementPolicy.compareDataNode(tolerateDataNodes[2],
tolerateDataNodes[0], false) == 1);
}
|
@Override
public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, CreateDownloadShareRequest options, final PasswordCallback callback) throws BackgroundException {
try {
if(log.isDebugEnabled()) {
log.debug(String.format("Create download share for %s", file));
}
if(null == options) {
options = new CreateDownloadShareRequest();
log.warn(String.format("Use default share options %s", options));
}
final Long fileid = Long.parseLong(nodeid.getVersionId(file));
final Host bookmark = session.getHost();
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(file)) {
// get existing file key associated with the sharing user
final FileKey key = new NodesApi(session.getClient()).requestUserFileKey(fileid, null, null);
final EncryptedFileKey encFileKey = TripleCryptConverter.toCryptoEncryptedFileKey(key);
final UserKeyPairContainer keyPairContainer = session.getKeyPairForFileKey(encFileKey.getVersion());
final UserKeyPair userKeyPair = TripleCryptConverter.toCryptoUserKeyPair(keyPairContainer);
final Credentials passphrase = new TripleCryptKeyPair().unlock(callback, bookmark, userKeyPair);
final PlainFileKey plainFileKey = Crypto.decryptFileKey(encFileKey, userKeyPair.getUserPrivateKey(), passphrase.getPassword().toCharArray());
// encrypt file key with a new key pair
final UserKeyPair pair;
if(null == options.getPassword()) {
pair = Crypto.generateUserKeyPair(session.requiredKeyPairVersion(), callback.prompt(
bookmark, LocaleFactory.localizedString("Passphrase", "Cryptomator"),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"), new LoginOptions().icon(session.getHost().getProtocol().disk())
).getPassword().toCharArray());
}
else {
pair = Crypto.generateUserKeyPair(session.requiredKeyPairVersion(), options.getPassword().toCharArray());
}
final EncryptedFileKey encryptedFileKey = Crypto.encryptFileKey(plainFileKey, pair.getUserPublicKey());
options.setPassword(null);
options.setKeyPair(TripleCryptConverter.toSwaggerUserKeyPairContainer(pair));
options.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptedFileKey));
}
final DownloadShare share = new SharesApi(session.getClient()).createDownloadShare(
options.nodeId(fileid), StringUtils.EMPTY, null);
final String help;
if(null == share.getExpireAt()) {
help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3"));
}
else {
final long expiry = share.getExpireAt().getMillis();
help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3")) + " (" + MessageFormat.format(LocaleFactory.localizedString("Expires {0}", "S3") + ")",
UserDateFormatterFactory.get().getShortFormat(expiry * 1000)
);
}
final Matcher matcher = Pattern.compile(SDSSession.VERSION_REGEX).matcher(session.softwareVersion().getRestApiVersion());
if(matcher.matches()) {
if(new Version(matcher.group(1)).compareTo(new Version("4.26")) < 0) {
return new DescriptiveUrl(URI.create(String.format("%s://%s/#/public/shares-downloads/%s",
bookmark.getProtocol().getScheme(),
bookmark.getHostname(),
share.getAccessKey())),
DescriptiveUrl.Type.signed, help);
}
}
return new DescriptiveUrl(URI.create(String.format("%s://%s/public/download-shares/%s",
bookmark.getProtocol().getScheme(),
bookmark.getHostname(),
share.getAccessKey())),
DescriptiveUrl.Type.signed, help);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map(e);
}
catch(CryptoException e) {
throw new TripleCryptExceptionMappingService().map(e);
}
}
|
@Test
public void testEncrypted() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).createRoom(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), true);
final Path test = new SDSTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final DescriptiveUrl url = new SDSShareFeature(session, nodeid).toDownloadUrl(test,
Share.Sharee.world, new CreateDownloadShareRequest()
.expiration(new ObjectExpiration().enableExpiration(false))
.notifyCreator(false)
.sendMail(false)
.sendSms(false)
.password(null)
.mailRecipients(null)
.mailSubject(null)
.mailBody(null)
.maxDownloads(null), new DisabledPasswordCallback() {
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
assertNotEquals(DescriptiveUrl.EMPTY, url);
assertEquals(DescriptiveUrl.Type.signed, url.getType());
assertTrue(url.getUrl().startsWith("https://duck.dracoon.com/public/download-shares/"));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public void meddle(MigrationHistory migrationHistory) {
// change last migration number on specific cases
migrationHistory.getLastMigrationNumber()
.ifPresent(migrationNumber -> {
Long newMigrationNumber = meddledSteps.get(migrationNumber);
if (newMigrationNumber != null) {
RegisteredMigrationStep registeredMigrationStep = migrationSteps.readFrom(newMigrationNumber).get(0);
migrationHistory.done(registeredMigrationStep);
}
});
}
|
@Test
public void no_effect_if_no_last_migration_number() {
when(migrationHistory.getLastMigrationNumber()).thenReturn(Optional.empty());
underTest.meddle(migrationHistory);
verify(migrationHistory).getLastMigrationNumber();
verifyNoMoreInteractions(migrationHistory, migrationSteps);
}
|
@Override
public void onLeave(Class<? extends State> newState) {
if (!StateWithExecutionGraph.class.isAssignableFrom(newState)) {
// we are leaving the StateWithExecutionGraph --> we need to dispose temporary services
operatorCoordinatorHandler.disposeAllOperatorCoordinators();
}
}
|
@Test
void testOperatorCoordinatorShutdownOnLeave() throws Exception {
try (MockStateWithExecutionGraphContext context =
new MockStateWithExecutionGraphContext()) {
final TestingOperatorCoordinatorHandler testingOperatorCoordinatorHandler =
new TestingOperatorCoordinatorHandler();
final TestingStateWithExecutionGraph stateWithExecutionGraph =
createStateWithExecutionGraph(context, testingOperatorCoordinatorHandler);
stateWithExecutionGraph.onLeave(AdaptiveSchedulerTest.DummyState.class);
assertThat(testingOperatorCoordinatorHandler.isDisposed()).isTrue();
}
}
|
String loadAll(int n) {
return loadAllQueries.computeIfAbsent(n, loadAllFactory);
}
|
@Test
public void testLoadAllIsEscaped() {
Queries queries = new Queries(mappingEscape, idColumnEscape, columnMetadataEscape);
String result = queries.loadAll(2);
assertEquals("SELECT * FROM \"my\"\"mapping\" WHERE \"i\"\"d\" IN (?, ?)", result);
}
|
@Udf(description = "Subtracts a duration from a time")
public Time timeSub(
@UdfParameter(description = "A unit of time, for example SECOND or HOUR") final TimeUnit unit,
@UdfParameter(
description = "An integer number of intervals to subtract") final Integer interval,
@UdfParameter(description = "A TIME value.") final Time time
) {
if (unit == null || interval == null || time == null) {
return null;
}
final long nanoResult = LocalTime.ofNanoOfDay(time.getTime() * 1000_000)
.minus(unit.toNanos(interval), ChronoUnit.NANOS)
.toNanoOfDay();
return new Time(TimeUnit.NANOSECONDS.toMillis(nanoResult));
}
|
@Test
public void shouldAddToTime() {
// When:
assertThat(udf.timeSub(TimeUnit.MILLISECONDS, 50, new Time(1000)).getTime(), is(950L));
assertThat(udf.timeSub(TimeUnit.DAYS, 2, new Time(1000)).getTime(), is(1000L));
assertThat(udf.timeSub(TimeUnit.DAYS, -2, new Time(1000)).getTime(), is(1000L));
assertThat(udf.timeSub(TimeUnit.MINUTES, -1, new Time(60000)).getTime(), is(120000L));
}
|
public SchemaKStream<K> selectKey(
final FormatInfo valueFormat,
final List<Expression> keyExpression,
final Optional<KeyFormat> forceInternalKeyFormat,
final Stacker contextStacker,
final boolean forceRepartition
) {
final boolean keyFormatChange = forceInternalKeyFormat.isPresent()
&& !forceInternalKeyFormat.get().equals(keyFormat);
final boolean repartitionNeeded = repartitionNeeded(keyExpression);
if (!keyFormatChange && !forceRepartition && !repartitionNeeded) {
return this;
}
if ((repartitionNeeded || !forceRepartition) && keyFormat.isWindowed()) {
throw new KsqlException(
"Implicit repartitioning of windowed sources is not supported. "
+ "See https://github.com/confluentinc/ksql/issues/4385."
);
}
final ExecutionStep<KStreamHolder<K>> step = ExecutionStepFactory
.streamSelectKey(contextStacker, sourceStep, keyExpression);
final KeyFormat newKeyFormat = forceInternalKeyFormat.orElse(keyFormat);
return new SchemaKStream<>(
step,
resolveSchema(step),
SerdeFeaturesFactory.sanitizeKeyFormat(
newKeyFormat,
toSqlTypes(keyExpression),
true),
ksqlConfig,
functionRegistry
);
}
|
@Test(expected = KsqlException.class)
public void shouldThrowOnRepartitionByMissingField() {
// Given:
final PlanNode logicalPlan = givenInitialKStreamOf(
"SELECT col0, col2, col3 FROM test1 PARTITION BY not_here EMIT CHANGES;");
final UserRepartitionNode repartitionNode = (UserRepartitionNode) logicalPlan.getSources().get(0).getSources().get(0);
// When:
initialSchemaKStream.selectKey(valueFormat.getFormatInfo(), repartitionNode.getPartitionBys(),
Optional.empty(), childContextStacker, false
);
}
|
public void updateCheckboxes( EnumSet<RepositoryFilePermission> permissionEnumSet ) {
updateCheckboxes( false, permissionEnumSet );
}
|
@Test
public void testUpdateCheckboxesAllPermissionsAppropriateTrue() {
permissionsCheckboxHandler.updateCheckboxes( true, EnumSet.of( RepositoryFilePermission.ALL ) );
verify( readCheckbox, times( 1 ) ).setChecked( true );
verify( writeCheckbox, times( 1 ) ).setChecked( true );
verify( deleteCheckbox, times( 1 ) ).setChecked( true );
verify( manageCheckbox, times( 1 ) ).setChecked( true );
verify( readCheckbox, times( 1 ) ).setDisabled( true );
verify( writeCheckbox, times( 1 ) ).setDisabled( true );
verify( deleteCheckbox, times( 1 ) ).setDisabled( true );
verify( manageCheckbox, times( 1 ) ).setDisabled( false );
}
|
public static String removeHtmlAttr(String content, String... attrs) {
String regex;
for (final String attr : attrs) {
// (?i) 表示忽略大小写
// \s* 属性名前后的空白符去除
// [^>]+? 属性值,至少有一个非>的字符,>表示标签结束
// \s+(?=>) 表示属性值后跟空格加>,即末尾的属性,此时去掉空格
// (?=\s|>) 表示属性值后跟空格(属性后还有别的属性)或者跟>(最后一个属性)
regex = StrUtil.format("(?i)(\\s*{}\\s*=\\s*)" +
"(" +
// name="xxxx"
"([\"][^\"]+?[\"])|" +
// name=xxx > 或者 name=xxx> 或者 name=xxx name2=xxx
"([^>]+?\\s*(?=\\s|>))" +
")", attr);
content = content.replaceAll(regex, StrUtil.EMPTY);
}
// issue#I8YV0K 去除尾部空格
content = ReUtil.replaceAll(content, "\\s+(>|/>)", "$1");
return content;
}
|
@Test
public void issueI6YNTFTest() {
String html = "<html><body><div class=\"a1 a2\">hello world</div></body></html>";
String cleanText = HtmlUtil.removeHtmlAttr(html,"class");
assertEquals("<html><body><div>hello world</div></body></html>", cleanText);
html = "<html><body><div class=a1>hello world</div></body></html>";
cleanText = HtmlUtil.removeHtmlAttr(html,"class");
assertEquals("<html><body><div>hello world</div></body></html>", cleanText);
}
|
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
new BrickAttributesFinderFeature(session).find(file);
return true;
}
catch(NotfoundException e) {
return false;
}
}
|
@Test
public void testFindNotFound() throws Exception {
assertFalse(new BrickFindFeature(session).find(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file))));
}
|
@Override
public Collection<LocalDataQueryResultRow> getRows(final ShowLogicalTablesStatement sqlStatement, final ContextManager contextManager) {
DialectDatabaseMetaData dialectDatabaseMetaData = new DatabaseTypeRegistry(database.getProtocolType()).getDialectDatabaseMetaData();
String schemaName = dialectDatabaseMetaData.getDefaultSchema().orElse(database.getName());
if (null == database.getSchema(schemaName)) {
return Collections.emptyList();
}
return getTables(schemaName, sqlStatement).stream().map(each -> getRow(each, sqlStatement)).collect(Collectors.toList());
}
|
@Test
void assertRowDataWithFullAndLike() {
Collection<LocalDataQueryResultRow> actual = executor.getRows(new ShowLogicalTablesStatement(true, null, "t_order_%"), mock(ContextManager.class));
assertThat(actual.size(), is(1));
LocalDataQueryResultRow row = actual.iterator().next();
assertThat(row.getCell(1), is("t_order_item"));
assertThat(row.getCell(2), is("TABLE"));
}
|
DateRange getRange(String dateRangeString) throws ParseException {
if (dateRangeString == null || dateRangeString.isEmpty())
return null;
String[] dateArr = dateRangeString.split("-");
if (dateArr.length > 2 || dateArr.length < 1)
return null;
// throw new IllegalArgumentException("Only Strings containing two Date separated by a '-' or a single Date are allowed");
ParsedCalendar from = parseDateString(dateArr[0]);
ParsedCalendar to;
if (dateArr.length == 2)
to = parseDateString(dateArr[1]);
else
// faster and safe?
// to = new ParsedCalendar(from.parseType, (Calendar) from.parsedCalendar.clone());
to = parseDateString(dateArr[0]);
try {
return new DateRange(from, to);
} catch (IllegalArgumentException ex) {
return null;
}
}
|
@Test
public void testParseReverseDateRangeWithoutYearAndDay_645() throws ParseException {
DateRange dateRange = dateRangeParser.getRange("Aug 10-Jan");
assertFalse(dateRange.isInRange(getCalendar(2016, Calendar.AUGUST, 9)));
assertTrue(dateRange.isInRange(getCalendar(2016, Calendar.AUGUST, 10)));
assertTrue(dateRange.isInRange(getCalendar(2016, Calendar.JANUARY, 1)));
assertTrue(dateRange.isInRange(getCalendar(2016, Calendar.JANUARY, 20)));
assertTrue(dateRange.isInRange(getCalendar(2016, Calendar.JANUARY, 31)));
assertFalse(dateRange.isInRange(getCalendar(2016, Calendar.FEBRUARY, 1)));
}
|
public SourceRecordJson(@Nullable SourceRecord sourceRecord) {
if (sourceRecord == null) {
throw new IllegalArgumentException();
}
this.value = (Struct) sourceRecord.value();
if (this.value == null) {
this.event = new Event(null, null, null);
} else {
Event.Metadata metadata = this.loadMetadata();
Event.Before before = this.loadBefore();
Event.After after = this.loadAfter();
this.event = new Event(metadata, before, after);
}
}
|
@Test
public void testSourceRecordJson() {
SourceRecord record = buildSourceRecord();
SourceRecordJson json = new SourceRecordJson(record);
String jsonString = json.toJson();
String expectedJson =
"{\"metadata\":"
+ "{\"connector\":\"test-connector\",\"version\":\"version-connector\","
+ "\"name\":\"test-connector-sql\","
+ "\"database\":\"test-db\",\"schema\":\"test-schema\",\"table\":\"test-table\"},"
+ "\"before\":{\"fields\":{\"country\":null,\"distance\":123.423,\"birthYear\":null,"
+ "\"name\":\"before-name\","
+ "\"temperature\":104.4,\"childrenAndAge\":null,\"age\":16}},"
+ "\"after\":{\"fields\":{\"country\":null,\"distance\":123.423,\"birthYear\":null,"
+ "\"name\":\"after-name\","
+ "\"temperature\":104.4,\"childrenAndAge\":null,\"age\":16}}}";
assertEquals(expectedJson, jsonString);
}
|
public Fetch<K, V> collectFetch() {
return fetchCollector.collectFetch(fetchBuffer);
}
|
@Test
public void testFetchRequestInternalError() {
buildFetcher();
makeFetchRequestWithIncompleteRecord();
try {
collectFetch();
fail("collectFetch should have thrown a KafkaException");
} catch (KafkaException e) {
assertTrue(e.getMessage().startsWith("Failed to make progress reading messages"));
// the position should not advance since no data has been returned
assertEquals(0, subscriptions.position(tp0).offset);
}
}
|
public static Optional<PMMLModel> getPMMLModel(String fileName, String modelName, PMMLRuntimeContext pmmlContext) {
logger.trace("getPMMLModel {} {}", fileName, modelName);
String fileNameToUse = !fileName.endsWith(PMML_SUFFIX) ? fileName + PMML_SUFFIX : fileName;
return getPMMLModels(pmmlContext)
.stream()
.filter(model -> Objects.equals(fileNameToUse, model.getFileName()) && Objects.equals(modelName,
model.getName()))
.findFirst();
}
|
@Test
void getPMMLModelFromClassLoader() {
modelLocalUriId = getModelLocalUriIdFromPmmlIdFactory(FILE_NAME, MODEL_NAME);
KiePMMLModelFactory kiePmmlModelFactory = PMMLLoaderUtils.loadKiePMMLModelFactory(modelLocalUriId,
getPMMLContext(FILE_NAME,
MODEL_NAME,
memoryCompilerClassLoader));
Optional<KiePMMLModel> retrieved = PMMLRuntimeHelper.getPMMLModel(kiePmmlModelFactory.getKiePMMLModels(),
FILE_NAME,
MODEL_NAME);
assertThat(retrieved).isNotNull().isPresent();
retrieved = PMMLRuntimeHelper.getPMMLModel(kiePmmlModelFactory.getKiePMMLModels(), "FileName", "NoTestMod");
assertThat(retrieved).isNotNull().isNotPresent();
}
|
@JsonProperty("progress")
public int progress() {
if (indices.isEmpty()) {
return 100; // avoid division by zero. No indices == migration is immediately done
}
final BigDecimal sum = indices.stream()
.filter(i -> i.progress() != null)
.map(RemoteReindexMigration::indexProgress)
.reduce(BigDecimal.ZERO, BigDecimal::add);
return sum.divide(BigDecimal.valueOf(indices.size()), 4, RoundingMode.HALF_UP).scaleByPowerOfTen(2).intValue();
}
|
@Test
void testProgressOneIndexNotStarted() {
final RemoteReindexMigration migration = withIndices(
index("one", RemoteReindexingMigrationAdapter.Status.NOT_STARTED)
);
Assertions.assertThat(migration.progress()).isEqualTo(0);
}
|
@Override
public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext)
{
doEvaluateDisruptContext(request, requestContext);
return _client.sendRequest(request, requestContext);
}
|
@Test
public void testSendRequest10()
{
when(_builder.build()).thenReturn(_request);
when(_controller.getDisruptContext(any(String.class), any(ResourceMethod.class))).thenReturn(_disrupt);
_client.sendRequest(_builder, _behavior);
verify(_underlying, times(1)).sendRequest(eq(_request), any(RequestContext.class), eq(_behavior));
}
|
protected JobMeta instantiateJobMeta() {
return new JobMeta();
}
|
@Test
public void testInstantiateJobMeta() {
JobMeta jobMeta = zipService.instantiateJobMeta();
assertNotNull( jobMeta );
}
|
public static int compare(Date date1, Date date2) {
return CompareUtil.compare(date1, date2);
}
|
@Test
public void compareTest() {
final Date date1 = DateUtil.parse("2021-04-13 23:59:59.999");
final Date date2 = DateUtil.parse("2021-04-13 23:59:10");
assertEquals(1, DateUtil.compare(date1, date2));
assertEquals(1, DateUtil.compare(date1, date2, DatePattern.NORM_DATETIME_PATTERN));
assertEquals(0, DateUtil.compare(date1, date2, DatePattern.NORM_DATE_PATTERN));
assertEquals(0, DateUtil.compare(date1, date2, DatePattern.NORM_DATETIME_MINUTE_PATTERN));
final Date date11 = DateUtil.parse("2021-04-13 23:59:59.999");
final Date date22 = DateUtil.parse("2021-04-11 23:10:10");
assertEquals(0, DateUtil.compare(date11, date22, DatePattern.NORM_MONTH_PATTERN));
}
|
public static BadRequestException userAlreadyExists(String userName) {
return new BadRequestException("user already exists for userName:%s", userName);
}
|
@Test
public void testUserAlreadyExists(){
BadRequestException userAlreadyExists = BadRequestException.userAlreadyExists("user");
assertEquals("user already exists for userName:user", userAlreadyExists.getMessage());
}
|
@VisibleForTesting
void initializeForeachArtifactRollup(
ForeachStepOverview foreachOverview,
ForeachStepOverview prevForeachOverview,
String foreachWorkflowId) {
Set<Long> iterationsToRunInNewRun =
foreachOverview.getIterationsToRunFromDetails(prevForeachOverview);
WorkflowRollupOverview aggregatedRollupsPrevRun =
getAggregatedRollupFromIterations(foreachWorkflowId, iterationsToRunInNewRun);
foreachOverview.initiateStepRollup(prevForeachOverview.getRollup(), aggregatedRollupsPrevRun);
}
|
@Test
public void testGetAggregatedRollupFromIterationsNotManyUneven() {
ArgumentCaptor<List<Long>> captor = ArgumentCaptor.forClass(List.class);
Set<Long> iterations = LongStream.rangeClosed(1, 3).boxed().collect(Collectors.toSet());
doReturn(Collections.singletonList(new WorkflowRollupOverview()))
.when(workflowInstanceDao)
.getBatchForeachLatestRunRollupForIterations(anyString(), any());
ForeachStepOverview stepOverview = mock(ForeachStepOverview.class);
ForeachStepOverview prevStepOverview = new ForeachStepOverview();
doReturn(iterations).when(stepOverview).getIterationsToRunFromDetails(any());
foreachStepRuntime.initializeForeachArtifactRollup(
stepOverview, prevStepOverview, "myworkflowid");
Mockito.verify(workflowInstanceDao, times(1))
.getBatchForeachLatestRunRollupForIterations(eq("myworkflowid"), captor.capture());
List<List<Long>> values = captor.getAllValues();
assertEquals(1, values.get(0).get(0).longValue());
assertEquals(2, values.get(0).get(1).longValue());
assertEquals(3, values.get(0).get(2).longValue());
assertEquals(3, values.get(0).size());
}
|
public Set<ModuleState> getAllModuleStates() {
reBuildModuleState();
return new HashSet<>(moduleStates.values());
}
|
@Test
void testGetAllModuleStates() {
assertEquals(2, ModuleStateHolder.getInstance().getAllModuleStates().size());
}
|
@Override
@PublicAPI(usage = ACCESS)
public boolean isAnnotatedWith(Class<? extends Annotation> annotationType) {
return isAnnotatedWith(annotationType.getName());
}
|
@Test
public void isAnnotatedWith_predicate() {
assertThat(importClassWithContext(Parent.class)
.isAnnotatedWith(DescribedPredicate.alwaysTrue()))
.as("predicate matches").isTrue();
assertThat(importClassWithContext(Parent.class)
.isAnnotatedWith(DescribedPredicate.alwaysFalse()))
.as("predicate matches").isFalse();
}
|
@Override
public ProcessingResult process(ReplicationTask task) {
try {
EurekaHttpResponse<?> httpResponse = task.execute();
int statusCode = httpResponse.getStatusCode();
Object entity = httpResponse.getEntity();
if (logger.isDebugEnabled()) {
logger.debug("Replication task {} completed with status {}, (includes entity {})", task.getTaskName(), statusCode, entity != null);
}
if (isSuccess(statusCode)) {
task.handleSuccess();
} else if (statusCode == 503) {
logger.debug("Server busy (503) reply for task {}", task.getTaskName());
return ProcessingResult.Congestion;
} else {
task.handleFailure(statusCode, entity);
return ProcessingResult.PermanentError;
}
} catch (Throwable e) {
if (maybeReadTimeOut(e)) {
logger.error("It seems to be a socket read timeout exception, it will retry later. if it continues to happen and some eureka node occupied all the cpu time, you should set property 'eureka.server.peer-node-read-timeout-ms' to a bigger value", e);
//read timeout exception is more Congestion then TransientError, return Congestion for longer delay
return ProcessingResult.Congestion;
} else if (isNetworkConnectException(e)) {
logNetworkErrorSample(task, e);
return ProcessingResult.TransientError;
} else {
logger.error("{}: {} Not re-trying this exception because it does not seem to be a network exception",
peerId, task.getTaskName(), e);
return ProcessingResult.PermanentError;
}
}
return ProcessingResult.Success;
}
|
@Test
public void testBatchableTaskNetworkFailureHandling() throws Exception {
TestableInstanceReplicationTask task = aReplicationTask().build();
replicationClient.withNetworkError(1);
ProcessingResult status = replicationTaskProcessor.process(Collections.<ReplicationTask>singletonList(task));
assertThat(status, is(ProcessingResult.TransientError));
assertThat(task.getProcessingState(), is(ProcessingState.Pending));
}
|
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() != ChatMessageType.SPAM)
{
return;
}
var message = event.getMessage();
if (FISHING_CATCH_REGEX.matcher(message).find())
{
session.setLastFishCaught(Instant.now());
spotOverlay.setHidden(false);
fishingSpotMinimapOverlay.setHidden(false);
}
if (message.equals("A flying fish jumps up and eats some of your minnows!"))
{
notifier.notify(config.flyingFishNotification(), "A flying fish is eating your minnows!");
}
}
|
@Test
public void testAnglerfish()
{
ChatMessage chatMessage = new ChatMessage();
chatMessage.setType(ChatMessageType.SPAM);
chatMessage.setMessage("You catch an Anglerfish.");
fishingPlugin.onChatMessage(chatMessage);
assertNotNull(fishingPlugin.getSession().getLastFishCaught());
}
|
@Override
public ImportResult importItem(
UUID jobId,
IdempotentImportExecutor idempotentExecutor,
TokenSecretAuthData authData,
PhotosContainerResource data)
throws Exception {
if (data == null) {
// Nothing to do
return ImportResult.OK;
}
BackblazeDataTransferClient b2Client = b2ClientFactory.getOrCreateB2Client(jobId, authData);
if (data.getAlbums() != null && data.getAlbums().size() > 0) {
for (PhotoAlbum album : data.getAlbums()) {
idempotentExecutor.executeAndSwallowIOExceptions(
album.getId(),
String.format("Caching album name for album '%s'", album.getId()),
() -> album.getName());
}
}
final LongAdder totalImportedFilesSizes = new LongAdder();
if (data.getPhotos() != null && data.getPhotos().size() > 0) {
for (PhotoModel photo : data.getPhotos()) {
idempotentExecutor.importAndSwallowIOExceptions(
photo,
p -> {
ItemImportResult<String> fileImportResult =
importSinglePhoto(idempotentExecutor, b2Client, jobId, p);
if (fileImportResult.hasBytes()) {
totalImportedFilesSizes.add(fileImportResult.getBytes());
}
return fileImportResult;
});
}
}
return ImportResult.OK.copyWithBytes(totalImportedFilesSizes.longValue());
}
|
@Test
public void testEmptyPhotosAndAlbums() throws Exception {
PhotosContainerResource data = mock(PhotosContainerResource.class);
when(data.getAlbums()).thenReturn(new ArrayList<>());
when(data.getPhotos()).thenReturn(new ArrayList<>());
BackblazePhotosImporter sut =
new BackblazePhotosImporter(monitor, dataStore, streamProvider, clientFactory);
ImportResult result = sut.importItem(UUID.randomUUID(), executor, authData, data);
assertEquals(ImportResult.ResultType.OK, result.getType());
}
|
public CosmosDbContainerOperations createContainerIfNotExistAndGetContainerOperations(
final String containerId, final String containerPartitionKeyPath, final ThroughputProperties throughputProperties,
final IndexingPolicy indexingPolicy) {
CosmosDbUtils.validateIfParameterIsNotEmpty(containerId, PARAM_CONTAINER_ID);
CosmosDbUtils.validateIfParameterIsNotEmpty(containerPartitionKeyPath, PARAM_CONTAINER_PARTITION_KEY_PATH);
return new CosmosDbContainerOperations(
getAndCreateContainerIfNotExist(containerId, containerPartitionKeyPath, true, throughputProperties,
indexingPolicy));
}
|
@Test
void createContainerIfNotExistAndGetContainerOperations() {
final CosmosAsyncDatabase database = mock(CosmosAsyncDatabase.class);
final CosmosAsyncContainer containerNew = mock(CosmosAsyncContainer.class);
final CosmosAsyncContainer containerExisting = mock(CosmosAsyncContainer.class);
when(containerNew.getId()).thenReturn("container-new");
when(containerExisting.getId()).thenReturn("container-existing");
when(database.getContainer("container-new")).thenReturn(containerNew);
when(database.getContainer("container-existing")).thenReturn(containerExisting);
when(database.createContainerIfNotExists(any(), any(), any()))
.thenReturn(Mono.just(mock(CosmosContainerResponse.class)));
final CosmosDbDatabaseOperations databaseOperations = new CosmosDbDatabaseOperations(Mono.just(database));
// assert params
CosmosDbTestUtils.assertIllegalArgumentException(
() -> databaseOperations.createContainerIfNotExistAndGetContainerOperations(null, null, null, null));
CosmosDbTestUtils.assertIllegalArgumentException(
() -> databaseOperations.createContainerIfNotExistAndGetContainerOperations("", null, null, null));
CosmosDbTestUtils.assertIllegalArgumentException(
() -> databaseOperations.createContainerIfNotExistAndGetContainerOperations("", "", null, null));
CosmosDbTestUtils.assertIllegalArgumentException(
() -> databaseOperations.createContainerIfNotExistAndGetContainerOperations("test", "", null, null));
CosmosDbTestUtils.assertIllegalArgumentException(
() -> databaseOperations.createContainerIfNotExistAndGetContainerOperations("", "test", null, null));
CosmosDbTestUtils.assertIllegalArgumentException(() -> databaseOperations.getContainerOperations(null));
CosmosDbTestUtils.assertIllegalArgumentException(() -> databaseOperations.getContainerOperations(""));
assertEquals("container-new", databaseOperations
.createContainerIfNotExistAndGetContainerOperations("container-new", "/path", null, null).getContainerId()
.block());
assertEquals("container-existing",
databaseOperations.getContainerOperations("container-existing").getContainerId().block());
}
|
public GetShardIteratorRequest request(String stream, Shard shard) {
for (Specification specification : specifications) {
if (specification.matches(shard)) {
return specification.request(stream, shard);
}
}
return defaultRequest(stream, shard);
}
|
@Test
public void unspecified() {
assertEquals(
new GetShardIteratorRequest()
.withShardId(SHARD3.getShardId())
.withStreamName(STREAM)
.withShardIteratorType(ShardIteratorType.AT_SEQUENCE_NUMBER)
.withStartingSequenceNumber("1500"),
iterators.request(STREAM, SHARD3)
);
}
|
@Override
public Object read(final PostgreSQLPacketPayload payload, final int parameterValueLength) {
byte[] bytes = new byte[parameterValueLength];
payload.getByteBuf().readBytes(bytes);
String result = new String(bytes);
return new PostgreSQLTypeUnspecifiedSQLParameter(result);
}
|
@Test
void assertRead() {
String timestampStr = "2020-08-23 15:57:03+08";
int expectedLength = 4 + timestampStr.length();
ByteBuf byteBuf = ByteBufTestUtils.createByteBuf(expectedLength);
byteBuf.writeInt(timestampStr.length());
byteBuf.writeCharSequence(timestampStr, StandardCharsets.ISO_8859_1);
byteBuf.readInt();
PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8);
Object actual = new PostgreSQLUnspecifiedBinaryProtocolValue().read(payload, timestampStr.length());
assertThat(actual, instanceOf(PostgreSQLTypeUnspecifiedSQLParameter.class));
assertThat(actual.toString(), is(timestampStr));
assertThat(byteBuf.readerIndex(), is(expectedLength));
}
|
@Override
public void deleteTenant(Long id) {
// 校验存在
validateUpdateTenant(id);
// 删除
tenantMapper.deleteById(id);
}
|
@Test
public void testDeleteTenant_system() {
// mock 数据
TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setPackageId(PACKAGE_ID_SYSTEM));
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbTenant.getId();
// 调用, 并断言异常
assertServiceException(() -> tenantService.deleteTenant(id), TENANT_CAN_NOT_UPDATE_SYSTEM);
}
|
@Override
public ScalarOperator visitSubfield(SubfieldOperator operator, Void context) {
return shuttleIfUpdate(operator);
}
|
@Test
void testSubfieldOperator() {
ColumnRefOperator column1 = new ColumnRefOperator(1, INT, "id", true);
SubfieldOperator operator = new SubfieldOperator(column1, INT, Lists.newArrayList("a"));
{
ScalarOperator newOperator = shuttle.visitSubfield(operator, null);
assertEquals(operator, newOperator);
}
{
ScalarOperator newOperator = shuttle2.visitSubfield(operator, null);
assertEquals(operator, newOperator);
}
}
|
public static RocketMQLogCollectClient getRocketMqLogCollectClient() {
return ROCKET_MQ_LOG_COLLECT_CLIENT;
}
|
@Test
public void testGetRocketMqLogCollectClient() {
Assertions.assertEquals(LoggingRocketMQPluginDataHandler.getRocketMqLogCollectClient().getClass(), RocketMQLogCollectClient.class);
}
|
public static Map<String, Object> merge(Map<String, Object> a, Map<String, Object> b) {
if (a == null && b == null) {
return null;
}
if (a == null || a.isEmpty()) {
return copyMap(b);
}
if (b == null || b.isEmpty()) {
return copyMap(a);
}
Map copy = copyMap(a);
Map<String, Object> copyMap = b
.entrySet()
.stream()
.collect(
() -> newHashMap(copy.size()),
(m, v) -> {
Object original = copy.get(v.getKey());
Object value = v.getValue();
Object found;
if (value == null && original == null) {
found = null;
} else if (value == null) {
found = original;
} else if (original == null) {
found = value;
} else if (value instanceof Map && original instanceof Map) {
found = merge((Map) original, (Map) value);
} else if (value instanceof Collection
&& original instanceof Collection) {
try {
found = Lists
.newArrayList(
(Collection) original,
(Collection) value
)
.stream()
.flatMap(Collection::stream)
.toList();
} catch (Exception e) {
throw new RuntimeException(e);
}
} else {
found = value;
}
m.put(v.getKey(), found);
},
HashMap::putAll
);
copy.putAll(copyMap);
return copy;
}
|
@SuppressWarnings("unchecked")
@Test
void merge() {
Map<String, Object> a = Map.of(
"map", Map.of(
"map_a", "a",
"map_b", "b",
"map_c", "c"
),
"string", "a",
"int", 1,
"lists", Collections.singletonList(1)
);
Map<String, Object> b = Map.of(
"map", Map.of(
"map_c", "e",
"map_d", "d"
),
"string", "b",
"float", 1F,
"lists", Collections.singletonList(2)
);
Map<String, Object> merge = MapUtils.merge(a, b);
assertThat(((Map<String, Object>) merge.get("map")).size(), is(4));
assertThat(((Map<String, Object>) merge.get("map")).get("map_c"), is("e"));
assertThat(merge.get("string"), is("b"));
assertThat(merge.get("int"), is(1));
assertThat(merge.get("float"), is(1F));
assertThat((List<?>) merge.get("lists"), hasSize(2));
}
|
@GetMapping("/plugin/deleteAll")
public Mono<String> deleteAll() {
LOG.info("delete all apache shenyu local plugin");
subscriber.refreshPluginDataAll();
return Mono.just(Constants.SUCCESS);
}
|
@Test
public void testDeleteAll() throws Exception {
final String[] testPluginName = {"testDeleteAllPluginName", "testDeleteAllPluginName2"};
Arrays.stream(testPluginName).map(s ->
new PluginData("id", s, null, null, null, null))
.forEach(subscriber::onSubscribe);
Arrays.stream(testPluginName)
.forEach(s -> assertThat(baseDataCache.obtainPluginData(s)).isNotNull());
this.mockMvc
.perform(MockMvcRequestBuilders.get("/shenyu/plugin/deleteAll"))
.andExpect(status().isOk())
.andReturn();
Arrays.stream(testPluginName)
.forEach(s -> assertThat(baseDataCache.obtainPluginData(s)).isNull());
}
|
@Override
public void lock() {
try {
lockInterruptibly(-1, null);
} catch (InterruptedException e) {
throw new IllegalStateException();
}
}
|
@Test
public void testAutoExpire() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
RedissonClient r = createInstance();
Thread t = new Thread() {
@Override
public void run() {
RLock lock = r.getSpinLock("lock");
lock.lock();
latch.countDown();
try {
Thread.sleep(15000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
};
t.start();
Assertions.assertTrue(latch.await(1, TimeUnit.SECONDS));
RLock lock = redisson.getSpinLock("lock");
t.join();
r.shutdown();
await().atMost(redisson.getConfig().getLockWatchdogTimeout(), TimeUnit.MILLISECONDS).until(() -> !lock.isLocked());
}
|
@Override
public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
return offsetsForTimes(timestampsToSearch, Duration.ofMillis(defaultApiTimeoutMs));
}
|
@Test
public void testOffsetsForTimesWithZeroTimeout() {
consumer = newConsumer();
TopicPartition tp = new TopicPartition("topic1", 0);
Map<TopicPartition, OffsetAndTimestamp> expectedResult = Collections.singletonMap(tp, null);
Map<TopicPartition, Long> timestampToSearch = Collections.singletonMap(tp, 5L);
Map<TopicPartition, OffsetAndTimestamp> result =
assertDoesNotThrow(() -> consumer.offsetsForTimes(timestampToSearch, Duration.ZERO));
assertEquals(expectedResult, result);
verify(applicationEventHandler, never()).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class));
}
|
public static int intOrZero(Integer value) {
if (value == null) {
return 0;
}
return value;
}
|
@Test
public void testNullSafeInt() {
assertEquals(0, Apiary.intOrZero(null));
Integer value = 82348902;
assertEquals(value.intValue(), Apiary.intOrZero(value));
}
|
@Override
public Map<PCollection<?>, ReplacementOutput> mapOutputs(
Map<TupleTag<?>, PCollection<?>> outputs, PCollection<T> newOutput) {
return ReplacementOutputs.singleton(outputs, newOutput);
}
|
@Test
public void mapOutputsSucceeds() {
PCollection<Long> original = pipeline.apply("Original", GenerateSequence.from(0));
PCollection<Long> replacement = pipeline.apply("Replacement", GenerateSequence.from(0));
Map<PCollection<?>, ReplacementOutput> mapping =
factory.mapOutputs(PValues.expandOutput(original), replacement);
assertThat(
mapping,
Matchers.hasEntry(
replacement,
ReplacementOutput.of(
TaggedPValue.ofExpandedValue(original),
TaggedPValue.ofExpandedValue(replacement))));
}
|
Plugin create(Options.Plugin plugin) {
try {
return instantiate(plugin.pluginString(), plugin.pluginClass(), plugin.argument());
} catch (IOException | URISyntaxException e) {
throw new CucumberException(e);
}
}
|
@Test
void instantiates_custom_string_arg_plugin() {
PluginOption option = parse(WantsString.class.getName() + ":hello");
WantsString plugin = (WantsString) fc.create(option);
assertThat(plugin.arg, is(equalTo("hello")));
}
|
@Override
public void commitSync() {
commitSync(Duration.ofMillis(defaultApiTimeoutMs));
}
|
@Test
public void testCommitSyncAwaitsCommitAsyncCompletionWithNonEmptyOffsets() {
final TopicPartition tp = new TopicPartition("foo", 0);
final CompletableFuture<Void> asyncCommitFuture = setUpConsumerWithIncompleteAsyncCommit(tp);
// Mock to complete sync event
completeCommitSyncApplicationEventSuccessfully();
// Commit async is not completed yet, so commit sync should wait for it to complete (time out)
assertThrows(TimeoutException.class, () -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100)));
// Complete async commit event
asyncCommitFuture.complete(null);
// Commit async is completed, so commit sync does not need to wait before committing its offsets
assertDoesNotThrow(() -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100)));
}
|
@VisibleForTesting
LoadingCache<String, AlluxioURI> getPathResolverCache() {
return mPathResolverCache;
}
|
@Test
@DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "LuQQiu")
@Ignore
public void pathTranslation() {
final LoadingCache<String, AlluxioURI> resolver = mFuseFs.getPathResolverCache();
AlluxioURI expected = new AlluxioURI(TEST_ROOT_PATH);
AlluxioURI actual = resolver.apply("/");
assertEquals("/ should resolve to " + expected, expected, actual);
expected = new AlluxioURI(TEST_ROOT_PATH + "/home/foo");
actual = resolver.apply("/home/foo");
assertEquals("/home/foo should resolve to " + expected, expected, actual);
}
|
public Set<ContentPackInstallation> findByContentPackId(ModelId id) {
final DBQuery.Query query = DBQuery.is(ContentPackInstallation.FIELD_CONTENT_PACK_ID, id);
try (final DBCursor<ContentPackInstallation> installations = dbCollection.find(query)) {
return ImmutableSet.copyOf((Iterator<ContentPackInstallation>) installations);
}
}
|
@Test
@MongoDBFixtures("ContentPackInstallationPersistenceServiceTest.json")
public void findByContentPackIdWithInvalidId() {
final Set<ContentPackInstallation> contentPacks = persistenceService.findByContentPackId(ModelId.of("does-not-exist"));
assertThat(contentPacks).isEmpty();
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldSupportExplicitEmitFinalOnBareQuery() {
// Given:
final SingleStatementContext stmt =
givenQuery("SELECT * FROM TEST1 EMIT FINAL;");
// When:
final Query result = (Query) builder.buildStatement(stmt);
// Then:
assertThat("Should be push", result.isPullQuery(), is(false));
assertThat(result.getRefinement().get().getOutputRefinement(), is(OutputRefinement.FINAL));
}
|
public boolean validateTree(ValidationContext validationContext) {
validate(validationContext);
boolean isValid = errors().isEmpty();
for (JobConfig jobConfig : this) {
isValid = jobConfig.validateTree(validationContext) && isValid;
}
return isValid;
}
|
@Test
public void shouldReturnFalseIfAnyDescendentIsInvalid() {
JobConfig jobConfig = mock(JobConfig.class);
when(jobConfig.validateTree(any(PipelineConfigSaveValidationContext.class))).thenReturn(false);
JobConfigs jobConfigs = new JobConfigs(jobConfig);
boolean isValid = jobConfigs.validateTree(PipelineConfigSaveValidationContext.forChain(true, "group", new PipelineConfig()));
assertFalse(isValid);
verify(jobConfig).validateTree(any(PipelineConfigSaveValidationContext.class));
}
|
public void eval(Object... args) throws HiveException {
// When the parameter is (Integer, Array[Double]), Flink calls udf.eval(Integer,
// Array[Double]), which is not a problem.
// But when the parameter is a single array, Flink calls udf.eval(Array[Double]),
// at this point java's var-args will cast Array[Double] to Array[Object] and let it be
// Object... args, So we need wrap it.
if (isArgsSingleArray) {
args = new Object[] {args};
}
checkArgument(args.length == conversions.length);
if (!allIdentityConverter) {
for (int i = 0; i < args.length; i++) {
args[i] = conversions[i].toHiveObject(args[i]);
}
}
function.process(args);
}
|
@Test
public void testStack() throws Exception {
Object[] constantArgs = new Object[] {2, null, null, null, null};
DataType[] dataTypes =
new DataType[] {
DataTypes.INT(),
DataTypes.STRING(),
DataTypes.STRING(),
DataTypes.STRING(),
DataTypes.STRING()
};
HiveGenericUDTF udf = init(GenericUDTFStack.class, constantArgs, dataTypes);
udf.eval(2, "a", "b", "c", "d");
assertThat(collector.result).isEqualTo(Arrays.asList(Row.of("a", "b"), Row.of("c", "d")));
}
|
public static FunctionConfig convertFromDetails(FunctionDetails functionDetails) {
functionDetails = validateFunctionDetails(functionDetails);
FunctionConfig functionConfig = new FunctionConfig();
functionConfig.setTenant(functionDetails.getTenant());
functionConfig.setNamespace(functionDetails.getNamespace());
functionConfig.setName(functionDetails.getName());
functionConfig.setParallelism(functionDetails.getParallelism());
functionConfig.setProcessingGuarantees(
FunctionCommon.convertProcessingGuarantee(functionDetails.getProcessingGuarantees()));
Map<String, ConsumerConfig> consumerConfigMap = new HashMap<>();
for (Map.Entry<String, Function.ConsumerSpec> input : functionDetails.getSource().getInputSpecsMap()
.entrySet()) {
ConsumerConfig consumerConfig = new ConsumerConfig();
if (isNotEmpty(input.getValue().getSerdeClassName())) {
consumerConfig.setSerdeClassName(input.getValue().getSerdeClassName());
}
if (isNotEmpty(input.getValue().getSchemaType())) {
consumerConfig.setSchemaType(input.getValue().getSchemaType());
}
if (input.getValue().hasReceiverQueueSize()) {
consumerConfig.setReceiverQueueSize(input.getValue().getReceiverQueueSize().getValue());
}
if (input.getValue().hasCryptoSpec()) {
consumerConfig.setCryptoConfig(CryptoUtils.convertFromSpec(input.getValue().getCryptoSpec()));
}
consumerConfig.setRegexPattern(input.getValue().getIsRegexPattern());
consumerConfig.setSchemaProperties(input.getValue().getSchemaPropertiesMap());
consumerConfig.setPoolMessages(input.getValue().getPoolMessages());
consumerConfigMap.put(input.getKey(), consumerConfig);
}
functionConfig.setInputSpecs(consumerConfigMap);
if (!isEmpty(functionDetails.getSource().getSubscriptionName())) {
functionConfig.setSubName(functionDetails.getSource().getSubscriptionName());
}
functionConfig.setRetainOrdering(functionDetails.getRetainOrdering());
functionConfig.setRetainKeyOrdering(functionDetails.getRetainKeyOrdering());
functionConfig.setCleanupSubscription(functionDetails.getSource().getCleanupSubscription());
functionConfig.setAutoAck(functionDetails.getAutoAck());
// Set subscription position
functionConfig.setSubscriptionPosition(
convertFromFunctionDetailsSubscriptionPosition(functionDetails.getSource().getSubscriptionPosition()));
if (functionDetails.getSource().getTimeoutMs() != 0) {
functionConfig.setTimeoutMs(functionDetails.getSource().getTimeoutMs());
}
if (!isEmpty(functionDetails.getSink().getTopic())) {
functionConfig.setOutput(functionDetails.getSink().getTopic());
}
if (!isEmpty(functionDetails.getSink().getSerDeClassName())) {
functionConfig.setOutputSerdeClassName(functionDetails.getSink().getSerDeClassName());
}
if (!isEmpty(functionDetails.getSink().getSchemaType())) {
functionConfig.setOutputSchemaType(functionDetails.getSink().getSchemaType());
}
if (functionDetails.getSink().getProducerSpec() != null) {
functionConfig.setProducerConfig(
convertProducerSpecToProducerConfig(functionDetails.getSink().getProducerSpec()));
}
if (!isEmpty(functionDetails.getLogTopic())) {
functionConfig.setLogTopic(functionDetails.getLogTopic());
}
if (functionDetails.getSink().getForwardSourceMessageProperty()) {
functionConfig.setForwardSourceMessageProperty(functionDetails.getSink().getForwardSourceMessageProperty());
}
functionConfig.setRuntime(FunctionCommon.convertRuntime(functionDetails.getRuntime()));
if (functionDetails.hasRetryDetails()) {
functionConfig.setMaxMessageRetries(functionDetails.getRetryDetails().getMaxMessageRetries());
if (!isEmpty(functionDetails.getRetryDetails().getDeadLetterTopic())) {
functionConfig.setDeadLetterTopic(functionDetails.getRetryDetails().getDeadLetterTopic());
}
}
Map<String, Object> userConfig;
if (!isEmpty(functionDetails.getUserConfig())) {
Type type = new TypeToken<Map<String, Object>>() {
}.getType();
userConfig = new Gson().fromJson(functionDetails.getUserConfig(), type);
} else {
userConfig = new HashMap<>();
}
if (userConfig.containsKey(WindowConfig.WINDOW_CONFIG_KEY)) {
WindowConfig windowConfig = new Gson().fromJson(
(new Gson().toJson(userConfig.get(WindowConfig.WINDOW_CONFIG_KEY))),
WindowConfig.class);
userConfig.remove(WindowConfig.WINDOW_CONFIG_KEY);
if (windowConfig.getProcessingGuarantees() != null) {
functionConfig.setProcessingGuarantees(
FunctionConfig.ProcessingGuarantees.valueOf(windowConfig.getProcessingGuarantees().name()));
}
functionConfig.setClassName(windowConfig.getActualWindowFunctionClassName());
functionConfig.setWindowConfig(windowConfig);
} else {
functionConfig.setClassName(functionDetails.getClassName());
}
functionConfig.setUserConfig(userConfig);
if (!isEmpty(functionDetails.getSecretsMap())) {
Type type = new TypeToken<Map<String, Object>>() {
}.getType();
Map<String, Object> secretsMap = new Gson().fromJson(functionDetails.getSecretsMap(), type);
functionConfig.setSecrets(secretsMap);
}
if (functionDetails.hasResources()) {
Resources resources = new Resources();
resources.setCpu(functionDetails.getResources().getCpu());
resources.setRam(functionDetails.getResources().getRam());
resources.setDisk(functionDetails.getResources().getDisk());
functionConfig.setResources(resources);
}
if (!isEmpty(functionDetails.getRuntimeFlags())) {
functionConfig.setRuntimeFlags(functionDetails.getRuntimeFlags());
}
if (!isEmpty(functionDetails.getCustomRuntimeOptions())) {
functionConfig.setCustomRuntimeOptions(functionDetails.getCustomRuntimeOptions());
}
return functionConfig;
}
|
@Test
public void testFunctionConfigConvertFromDetails() {
String name = "test1";
String namespace = "ns1";
String tenant = "tenant1";
String classname = getClass().getName();
int parallelism = 3;
Map<String, String> userConfig = new HashMap<>();
userConfig.put("key1", "val1");
Function.ProcessingGuarantees processingGuarantees = Function.ProcessingGuarantees.EFFECTIVELY_ONCE;
Function.FunctionDetails.Runtime runtime = Function.FunctionDetails.Runtime.JAVA;
Function.SinkSpec sinkSpec = Function.SinkSpec.newBuilder().setTopic("sinkTopic1").build();
Map<String, Function.ConsumerSpec> consumerSpecMap = new HashMap<>();
consumerSpecMap.put("sourceTopic1", Function.ConsumerSpec.newBuilder()
.setSchemaType(JSONSchema.class.getName()).build());
Function.SourceSpec sourceSpec = Function.SourceSpec.newBuilder()
.putAllInputSpecs(consumerSpecMap)
.setSubscriptionType(Function.SubscriptionType.FAILOVER)
.setCleanupSubscription(true)
.build();
boolean autoAck = true;
String logTopic = "log-topic1";
Function.Resources resources = Function.Resources.newBuilder().setCpu(1.5).setDisk(1024 * 20).setRam(1024 * 10).build();
String packageUrl = "http://package.url";
Map<String, String> secretsMap = new HashMap<>();
secretsMap.put("secretConfigKey1", "secretConfigVal1");
Function.RetryDetails retryDetails = Function.RetryDetails.newBuilder().setDeadLetterTopic("dead-letter-1").build();
Function.FunctionDetails functionDetails = Function.FunctionDetails
.newBuilder()
.setNamespace(namespace)
.setTenant(tenant)
.setName(name)
.setClassName(classname)
.setParallelism(parallelism)
.setUserConfig(new Gson().toJson(userConfig))
.setProcessingGuarantees(processingGuarantees)
.setRuntime(runtime)
.setSink(sinkSpec)
.setSource(sourceSpec)
.setAutoAck(autoAck)
.setLogTopic(logTopic)
.setResources(resources)
.setPackageUrl(packageUrl)
.setSecretsMap(new Gson().toJson(secretsMap))
.setRetryDetails(retryDetails)
.build();
FunctionConfig functionConfig = FunctionConfigUtils.convertFromDetails(functionDetails);
assertEquals(functionConfig.getTenant(), tenant);
assertEquals(functionConfig.getNamespace(), namespace);
assertEquals(functionConfig.getName(), name);
assertEquals(functionConfig.getClassName(), classname);
assertEquals(functionConfig.getLogTopic(), logTopic);
assertEquals((Object) functionConfig.getResources().getCpu(), resources.getCpu());
assertEquals(functionConfig.getResources().getDisk().longValue(), resources.getDisk());
assertEquals(functionConfig.getResources().getRam().longValue(), resources.getRam());
assertEquals(functionConfig.getOutput(), sinkSpec.getTopic());
assertEquals(functionConfig.getInputSpecs().keySet(), sourceSpec.getInputSpecsMap().keySet());
assertEquals(functionConfig.getCleanupSubscription().booleanValue(), sourceSpec.getCleanupSubscription());
}
|
public void printKsqlEntityList(final List<KsqlEntity> entityList) {
switch (outputFormat) {
case JSON:
printAsJson(entityList);
break;
case TABULAR:
final boolean showStatements = entityList.size() > 1;
for (final KsqlEntity ksqlEntity : entityList) {
writer().println();
if (showStatements) {
writer().println(ksqlEntity.getStatementText());
}
printAsTable(ksqlEntity);
}
break;
default:
throw new RuntimeException(String.format(
"Unexpected output format: '%s'",
outputFormat.name()
));
}
}
|
@Test
public void shouldPrintTablesList() {
// Given:
final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of(
new TablesList("e", ImmutableList.of(
new SourceInfo.Table("B", "t2", "JSON", "JSON", true),
new SourceInfo.Table("A", "t1", "KAFKA", "AVRO", false)
))
));
// When:
console.printKsqlEntityList(entityList);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
}
|
@Override
public void onAction(Action action) {
if (action.getType().equals(ActionType.MENU_ITEM_SELECTED)) {
var menuAction = (MenuAction) action;
selected = menuAction.getMenuItem();
notifyChange();
}
}
|
@Test
void testOnAction() {
final var menuStore = new MenuStore();
final var view = mock(View.class);
menuStore.registerView(view);
verifyNoMoreInteractions(view);
// Menu should not react on content action ...
menuStore.onAction(new ContentAction(Content.COMPANY));
verifyNoMoreInteractions(view);
// ... but it should react on a menu action
menuStore.onAction(new MenuAction(MenuItem.PRODUCTS));
verify(view, times(1)).storeChanged(eq(menuStore));
verifyNoMoreInteractions(view);
assertEquals(MenuItem.PRODUCTS, menuStore.getSelected());
}
|
@Override
public int hashCode()
{
return Objects.hash(_elements, _metadata, _total, _pageIncrement);
}
|
@Test(dataProvider = "testHashCodeDataProvider")
public void testHashCode
(
boolean hasSameHashCode,
@Nonnull CollectionResult<TestRecordTemplateClass.Foo, TestRecordTemplateClass.Bar> collectionResult1,
@Nonnull CollectionResult<TestRecordTemplateClass.Foo, TestRecordTemplateClass.Bar> collectionResult2
)
{
if (hasSameHashCode)
{
assertEquals(collectionResult1.hashCode(), collectionResult2.hashCode());
}
else
{
assertNotEquals(collectionResult1.hashCode(), collectionResult2.hashCode());
}
}
|
@Override
public void removeListener(String key, String group, ConfigurationListener listener) {}
|
@Test
void testRemoveListener() {
configuration.removeListener(null, null);
configuration.removeListener(null, null, null);
}
|
@SuppressWarnings("unchecked")
@Override
public <S extends StateStore> S getStateStore(final String name) {
final StateStore store = stateManager.getGlobalStore(name);
return (S) getReadWriteStore(store);
}
|
@Test
public void shouldNotAllowCloseForTimestampedKeyValueStore() {
when(stateManager.getGlobalStore(GLOBAL_TIMESTAMPED_KEY_VALUE_STORE_NAME)).thenReturn(mock(TimestampedKeyValueStore.class));
final StateStore store = globalContext.getStateStore(GLOBAL_TIMESTAMPED_KEY_VALUE_STORE_NAME);
try {
store.close();
fail("Should have thrown UnsupportedOperationException.");
} catch (final UnsupportedOperationException expected) { }
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.