focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
public SubClusterId getHomeSubcluster(
ApplicationSubmissionContext appSubmissionContext,
List<SubClusterId> blackListSubClusters) throws YarnException {
// the maps are concurrent, but we need to protect from reset()
// reinitialization mid-execution by creating a new reference local to this
// method.
Map<String, SubClusterPolicyConfiguration> cachedConfs = globalConfMap;
Map<String, FederationRouterPolicy> policyMap = globalPolicyMap;
if (appSubmissionContext == null) {
throw new FederationPolicyException(
"The ApplicationSubmissionContext cannot be null.");
}
String queue = appSubmissionContext.getQueue();
// respecting YARN behavior we assume default queue if the queue is not
// specified. This also ensures that "null" can be used as a key to get the
// default behavior.
if (queue == null) {
queue = YarnConfiguration.DEFAULT_QUEUE_NAME;
}
FederationRouterPolicy policy = getFederationRouterPolicy(cachedConfs, policyMap, queue);
if (policy == null) {
// this should never happen, as the to maps are updated together
throw new FederationPolicyException("No FederationRouterPolicy found "
+ "for queue: " + appSubmissionContext.getQueue() + " (for "
+ "application: " + appSubmissionContext.getApplicationId() + ") "
+ "and no default specified.");
}
return policy.getHomeSubcluster(appSubmissionContext, blackListSubClusters);
} | @Test
public void testFallbacks() throws YarnException {
// this tests the behavior of the system when the queue requested is
// not configured (or null) and there is no default policy configured
// for DEFAULT_FEDERATION_POLICY_KEY (*). This is our second line of
// defense.
ApplicationSubmissionContext applicationSubmissionContext =
mock(ApplicationSubmissionContext.class);
// The facade answers also for non-initialized policies (using the
// defaultPolicy)
String uninitQueue = "non-initialized-queue";
when(applicationSubmissionContext.getQueue()).thenReturn(uninitQueue);
SubClusterId chosen =
routerFacade.getHomeSubcluster(applicationSubmissionContext, null);
Assert.assertTrue(subClusterIds.contains(chosen));
Assert.assertFalse(routerFacade.globalPolicyMap.containsKey(uninitQueue));
// empty string
when(applicationSubmissionContext.getQueue()).thenReturn("");
chosen = routerFacade.getHomeSubcluster(applicationSubmissionContext, null);
Assert.assertTrue(subClusterIds.contains(chosen));
Assert.assertFalse(routerFacade.globalPolicyMap.containsKey(uninitQueue));
// null queue also falls back to default
when(applicationSubmissionContext.getQueue()).thenReturn(null);
chosen = routerFacade.getHomeSubcluster(applicationSubmissionContext, null);
Assert.assertTrue(subClusterIds.contains(chosen));
Assert.assertFalse(routerFacade.globalPolicyMap.containsKey(uninitQueue));
} |
@PublicAPI(usage = ACCESS)
public JavaClasses importPath(String path) {
return importPaths(path);
} | @Test
public void import_is_resilient_against_broken_class_files() throws Exception {
Class<?> expectedClass = getClass();
File folder = temporaryFolder.newFolder();
copyClassFile(expectedClass, folder);
Files.write(new File(folder, "Evil.class").toPath(), "broken".getBytes(UTF_8));
logTest.watch(ClassFileProcessor.class, Level.WARN);
JavaClasses classes = new ClassFileImporter().importPath(folder.toPath());
assertThatTypes(classes).matchExactly(expectedClass);
logTest.assertLogMessage(Level.WARN, "Evil.class");
} |
public static void copyFile(File source, File dest) throws IOException {
try (FileChannel in = new FileInputStream(source).getChannel(); FileChannel out = new FileOutputStream(dest).getChannel()) {
out.transferFrom(in, 0, in.size());
}
} | @Test
public void testCopyFile() throws IOException {
IOKit.copyFile(new File(IOKitTest.class.getResource("/application.properties").getPath()), new File("./tmp.properties"));
File tmp = new File("./tmp.properties");
Assert.assertTrue(tmp.exists() && tmp.isFile());
tmp.delete();
} |
static List<ClassLoader> selectClassLoaders(ClassLoader classLoader) {
// list prevents reordering!
List<ClassLoader> classLoaders = new ArrayList<>();
if (classLoader != null) {
classLoaders.add(classLoader);
}
// check if TCCL is same as given classLoader
ClassLoader tccl = Thread.currentThread().getContextClassLoader();
if (tccl != null && tccl != classLoader) {
classLoaders.add(tccl);
}
// Hazelcast core classLoader
ClassLoader coreClassLoader = ServiceLoader.class.getClassLoader();
if (coreClassLoader != classLoader && coreClassLoader != tccl) {
classLoaders.add(coreClassLoader);
}
// Hazelcast client classLoader
try {
Class<?> hzClientClass = Class.forName("com.hazelcast.client.HazelcastClient");
ClassLoader clientClassLoader = hzClientClass.getClassLoader();
if (clientClassLoader != classLoader && clientClassLoader != tccl && clientClassLoader != coreClassLoader) {
classLoaders.add(clientClassLoader);
}
} catch (ClassNotFoundException ignore) {
// ignore since we may not have the HazelcastClient in the classpath
ignore(ignore);
}
return classLoaders;
} | @Test
public void selectClassLoaders_whenPassedClassLoaderIsisNull_thenDoNotSelectNullClassloader() {
Thread.currentThread().setContextClassLoader(null);
List<ClassLoader> classLoaders = ServiceLoader.selectClassLoaders(null);
assertNotContains(classLoaders, null);
} |
public CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> commitTransactionalOffset(
RequestContext context,
TxnOffsetCommitRequestData request
) throws ApiException {
validateTransactionalOffsetCommit(context, request);
final TxnOffsetCommitResponseData response = new TxnOffsetCommitResponseData();
final List<CoordinatorRecord> records = new ArrayList<>();
final long currentTimeMs = time.milliseconds();
request.topics().forEach(topic -> {
final TxnOffsetCommitResponseTopic topicResponse = new TxnOffsetCommitResponseTopic().setName(topic.name());
response.topics().add(topicResponse);
topic.partitions().forEach(partition -> {
if (isMetadataInvalid(partition.committedMetadata())) {
topicResponse.partitions().add(new TxnOffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code()));
} else {
log.debug("[GroupId {}] Committing transactional offsets {} for partition {}-{} from member {} with leader epoch {}.",
request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(),
request.memberId(), partition.committedLeaderEpoch());
topicResponse.partitions().add(new TxnOffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.NONE.code()));
final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest(
partition,
currentTimeMs
);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
request.groupId(),
topic.name(),
partition.partitionIndex(),
offsetAndMetadata,
metadataImage.features().metadataVersion()
));
}
});
});
if (!records.isEmpty()) {
metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size());
}
return new CoordinatorResult<>(records, response);
} | @Test
public void testConsumerGroupTransactionalOffsetCommit() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
// Create an empty group.
ConsumerGroup group = context.groupMetadataManager.getOrMaybeCreatePersistedConsumerGroup(
"foo",
true
);
// Add member.
group.updateMember(new ConsumerGroupMember.Builder("member")
.setMemberEpoch(10)
.setPreviousMemberEpoch(10)
.build()
);
CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> result = context.commitTransactionalOffset(
new TxnOffsetCommitRequestData()
.setGroupId("foo")
.setMemberId("member")
.setGenerationId(10)
.setTopics(Collections.singletonList(
new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic()
.setName("bar")
.setPartitions(Collections.singletonList(
new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedOffset(100L)
.setCommittedLeaderEpoch(10)
.setCommittedMetadata("metadata")
))
))
);
assertEquals(
new TxnOffsetCommitResponseData()
.setTopics(Collections.singletonList(
new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic()
.setName("bar")
.setPartitions(Collections.singletonList(
new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition()
.setPartitionIndex(0)
.setErrorCode(Errors.NONE.code())
))
)),
result.response()
);
assertEquals(
Collections.singletonList(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
"foo",
"bar",
0,
new OffsetAndMetadata(
100L,
OptionalInt.of(10),
"metadata",
context.time.milliseconds(),
OptionalLong.empty()
),
MetadataImage.EMPTY.features().metadataVersion()
)),
result.records()
);
} |
@Override
public Optional<CompletableFuture<TaskManagerLocation>> getTaskManagerLocation(
ExecutionVertexID executionVertexId) {
ExecutionVertex ev = getExecutionVertex(executionVertexId);
if (ev.getExecutionState() != ExecutionState.CREATED) {
return Optional.of(ev.getCurrentTaskManagerLocationFuture());
} else {
return Optional.empty();
}
} | @Test
void testGetTaskManagerLocationWhenScheduled() throws Exception {
final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1);
final TestingLogicalSlot testingLogicalSlot =
new TestingLogicalSlotBuilder().createTestingLogicalSlot();
final ExecutionGraph eg =
ExecutionGraphTestUtils.createExecutionGraph(
EXECUTOR_EXTENSION.getExecutor(), jobVertex);
final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever =
new ExecutionGraphToInputsLocationsRetrieverAdapter(eg);
final ExecutionVertex onlyExecutionVertex = eg.getAllExecutionVertices().iterator().next();
onlyExecutionVertex.getCurrentExecutionAttempt().transitionState(ExecutionState.SCHEDULED);
onlyExecutionVertex.deployToSlot(testingLogicalSlot);
ExecutionVertexID executionVertexId = new ExecutionVertexID(jobVertex.getID(), 0);
Optional<CompletableFuture<TaskManagerLocation>> taskManagerLocationOptional =
inputsLocationsRetriever.getTaskManagerLocation(executionVertexId);
assertThat(taskManagerLocationOptional).isPresent();
final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture =
taskManagerLocationOptional.get();
assertThat(taskManagerLocationFuture.get())
.isEqualTo(testingLogicalSlot.getTaskManagerLocation());
} |
@Override
public int read() throws IOException {
if (left == 0) {
return -1;
}
int result = in.read();
if (result != -1) {
--left;
}
return result;
} | @Test
public void testRead() throws IOException {
try (LimitInputStream limitInputStream =
new LimitInputStream(new RandomInputStream(), 0)) {
assertEquals("Reading byte after reaching limit should return -1", -1,
limitInputStream.read());
}
try (LimitInputStream limitInputStream =
new LimitInputStream(new RandomInputStream(), 4)) {
assertEquals("Incorrect byte returned", new Random(0).nextInt(),
limitInputStream.read());
}
} |
public static <T> T toObj(byte[] json, Class<T> cls) {
try {
return mapper.readValue(json, cls);
} catch (Exception e) {
throw new NacosDeserializationException(cls, e);
}
} | @Test
void testToObject5() {
assertThrows(Exception.class, () -> {
JacksonUtils.toObj("{\"key\":\"value\"}".getBytes(), Object.class.getGenericSuperclass());
});
} |
@Override
public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets,
ListOffsetsOptions options) {
AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future =
ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet());
Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue())));
ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new ListOffsetsResult(future.all());
} | @Test
public void testListOffsetsHandlesFulfillmentTimeouts() throws Exception {
Node node = new Node(0, "localhost", 8120);
List<Node> nodes = Collections.singletonList(node);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node, new Node[]{node}, new Node[]{node}));
pInfos.add(new PartitionInfo("foo", 1, node, new Node[]{node}, new Node[]{node}));
final Cluster cluster = new Cluster(
"mockClusterId",
nodes,
pInfos,
Collections.emptySet(),
Collections.emptySet(),
node);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
int numRetries = 2;
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster,
AdminClientConfig.RETRIES_CONFIG, Integer.toString(numRetries))) {
ListOffsetsTopicResponse tp0ErrorResponse =
ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.REQUEST_TIMED_OUT, -1L, -1L, -1);
ListOffsetsTopicResponse tp1Response =
ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 345L, 543);
ListOffsetsResponseData responseDataWithError = new ListOffsetsResponseData()
.setThrottleTimeMs(0)
.setTopics(asList(tp0ErrorResponse, tp1Response));
ListOffsetsTopicResponse tp0Response =
ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 789L, 987);
ListOffsetsResponseData responseData = new ListOffsetsResponseData()
.setThrottleTimeMs(0)
.setTopics(asList(tp0Response, tp1Response));
// Test that one-too-many timeouts for partition 0 result in partial success overall -
// timeout for partition 0 and success for partition 1.
// It might be desirable to have the AdminApiDriver mechanism also handle all retriable
// exceptions like TimeoutException during the lookup stage (it currently doesn't).
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
for (int i = 0; i < numRetries + 1; i++) {
env.kafkaClient().prepareResponseFrom(
request -> request instanceof ListOffsetsRequest,
new ListOffsetsResponse(responseDataWithError), node);
}
ListOffsetsResult result = env.adminClient().listOffsets(
new HashMap<TopicPartition, OffsetSpec>() {
{
put(tp0, OffsetSpec.latest());
put(tp1, OffsetSpec.latest());
}
});
TestUtils.assertFutureThrows(result.partitionResult(tp0), TimeoutException.class);
ListOffsetsResultInfo tp1Result = result.partitionResult(tp1).get();
assertEquals(345L, tp1Result.offset());
assertEquals(543, tp1Result.leaderEpoch().get().intValue());
assertEquals(-1L, tp1Result.timestamp());
// Now test that only numRetries timeouts for partition 0 result in success for both
// partition 0 and partition 1.
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
for (int i = 0; i < numRetries; i++) {
env.kafkaClient().prepareResponseFrom(
request -> request instanceof ListOffsetsRequest,
new ListOffsetsResponse(responseDataWithError), node);
}
env.kafkaClient().prepareResponseFrom(
request -> request instanceof ListOffsetsRequest, new ListOffsetsResponse(responseData), node);
result = env.adminClient().listOffsets(
new HashMap<TopicPartition, OffsetSpec>() {
{
put(tp0, OffsetSpec.latest());
put(tp1, OffsetSpec.latest());
}
});
ListOffsetsResultInfo tp0Result = result.partitionResult(tp0).get();
assertEquals(789L, tp0Result.offset());
assertEquals(987, tp0Result.leaderEpoch().get().intValue());
assertEquals(-1L, tp0Result.timestamp());
tp1Result = result.partitionResult(tp1).get();
assertEquals(345L, tp1Result.offset());
assertEquals(543, tp1Result.leaderEpoch().get().intValue());
assertEquals(-1L, tp1Result.timestamp());
}
} |
@Override
public ExecuteContext before(ExecuteContext context) {
Object object = context.getObject();
String serviceId = getServiceId(object).orElse(null);
if (StringUtils.isBlank(serviceId)) {
return context;
}
Object obj = context.getMemberFieldValue("serviceInstances");
if (obj instanceof Flux<?>) {
List<Object> instances = getInstances((Flux<Object>) obj, object);
if (CollectionUtils.isEmpty(instances)) {
return context;
}
RequestData requestData = ThreadLocalUtils.getRequestData();
List<Object> targetInstances = loadBalancerService.getTargetInstances(serviceId, instances, requestData);
context.skip(Flux.just(targetInstances));
}
return context;
} | @Test
public void testBefore() {
ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", ""));
interceptor.before(context);
ServiceInstanceListSupplier supplier = (ServiceInstanceListSupplier) context.getObject();
List<ServiceInstance> instances = supplier.get().blockFirst();
Assert.assertNotNull(instances);
Assert.assertEquals(1, instances.size());
} |
@Override
public Component getComponentByRef(int ref) {
return getOptionalComponentByRef(ref)
.orElseThrow(() -> new IllegalArgumentException(String.format("Component with ref '%s' can't be found", ref)));
} | @Test
public void getComponentByRef_throws_ISE_if_root_has_not_been_set() {
assertThatThrownBy(() -> underTest.getComponentByRef(12))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Holder has not been initialized yet");
} |
@Override
public void publish(ScannerReportWriter writer) {
for (final DefaultInputFile inputFile : componentCache.allChangedFilesToPublish()) {
File iofile = writer.getSourceFile(inputFile.scannerId());
try (OutputStream output = new BufferedOutputStream(new FileOutputStream(iofile));
InputStream in = inputFile.inputStream();
BufferedReader reader = new BufferedReader(new InputStreamReader(in, inputFile.charset()))) {
writeSource(reader, output, inputFile.lines());
} catch (IOException e) {
throw new IllegalStateException("Unable to store file source in the report", e);
}
}
} | @Test
public void publishEmptySource() throws Exception {
FileUtils.write(sourceFile, "", StandardCharsets.ISO_8859_1);
publisher.publish(writer);
File out = writer.getSourceFile(inputFile.scannerId());
assertThat(FileUtils.readFileToString(out, StandardCharsets.UTF_8)).isEmpty();
} |
public synchronized String createTopic(String topicName, int partitions)
throws KafkaResourceManagerException {
checkArgument(partitions > 0, "partitions must be positive.");
String uniqueName = KafkaResourceManagerUtils.generateTopicName(topicName);
try {
Set<String> currentTopics = kafkaClient.listTopics().names().get();
if (!currentTopics.contains(uniqueName)) {
kafkaClient
.createTopics(
Collections.singletonList(new NewTopic(uniqueName, partitions, (short) 1)))
.all()
.get();
topicNames.add(uniqueName);
}
} catch (Exception e) {
throw new KafkaResourceManagerException("Error creating topics.", e);
}
LOG.info("Successfully created topic {}.", uniqueName);
return uniqueName;
} | @Test
public void testCreateTopicZeroPartitionsThrowErrors() {
assertThrows(IllegalArgumentException.class, () -> testManager.createTopic(TOPIC_NAME, 0));
} |
@Override
public ConcurrentJobModificationResolveResult resolve(Job localJob, Job storageProviderJob) {
if (localJob.getState() == StateName.SUCCEEDED && storageProviderJob.getState() == StateName.SUCCEEDED) {
throw shouldNotHappenException("Should not happen as matches filter should be filtering out this StateChangeFilter");
} else if (localJob.getState() == StateName.PROCESSING && storageProviderJob.getState() == StateName.SUCCEEDED) {
localJob.delete("Job has already succeeded in StorageProvider");
final Thread threadProcessingJob = jobSteward.getThreadProcessingJob(localJob);
if (threadProcessingJob != null) {
threadProcessingJob.interrupt();
}
}
return ConcurrentJobModificationResolveResult.succeeded(localJob);
} | @Test
void ifJobSucceededWhileInProgress() {
final Job jobInProgress = aJobInProgress().build();
final Job jobInProgressWithUpdate = aCopyOf(jobInProgress).withMetadata("extra", "metadata").build();
final Job succeededJob = aCopyOf(jobInProgress).withSucceededState().build();
Thread mockThread = mock(Thread.class);
when(jobZooKeeper.getThreadProcessingJob(jobInProgressWithUpdate)).thenReturn(mockThread);
final ConcurrentJobModificationResolveResult resolveResult = allowedStateChange.resolve(jobInProgressWithUpdate, succeededJob);
assertThat(resolveResult.failed()).isFalse();
verify(mockThread).interrupt();
} |
@Override
public void deletePrefix(String prefix) {
Path prefixToDelete = new Path(prefix);
FileSystem fs = Util.getFs(prefixToDelete, hadoopConf.get());
try {
fs.delete(prefixToDelete, true /* recursive */);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} | @Test
public void testDeletePrefix() {
Path parent = new Path(tempDir.toURI());
List<Integer> scaleSizes = Lists.newArrayList(1, 1000, 2500);
scaleSizes
.parallelStream()
.forEach(
scale -> {
Path scalePath = new Path(parent, Integer.toString(scale));
createRandomFiles(scalePath, scale);
hadoopFileIO.deletePrefix(scalePath.toUri().toString());
// Hadoop filesystem will throw if the path does not exist
assertThatThrownBy(
() -> hadoopFileIO.listPrefix(scalePath.toUri().toString()).iterator())
.isInstanceOf(UncheckedIOException.class)
.hasMessageContaining("java.io.FileNotFoundException");
});
hadoopFileIO.deletePrefix(parent.toUri().toString());
// Hadoop filesystem will throw if the path does not exist
assertThatThrownBy(() -> hadoopFileIO.listPrefix(parent.toUri().toString()).iterator())
.isInstanceOf(UncheckedIOException.class)
.hasMessageContaining("java.io.FileNotFoundException");
} |
public SqlType getExpressionSqlType(final Expression expression) {
return getExpressionSqlType(expression, Collections.emptyMap());
} | @Test
public void shouldEvaluateTypeForStringUDF() {
// Given:
givenUdfWithNameAndReturnType("LCASE", SqlTypes.STRING);
final Expression expression =
new FunctionCall(FunctionName.of("LCASE"), ImmutableList.of(COL2));
// When:
final SqlType exprType = expressionTypeManager.getExpressionSqlType(expression);
// Then:
assertThat(exprType, is(SqlTypes.STRING));
verify(udfFactory).getFunction(ImmutableList.of(SqlArgument.of(SqlTypes.STRING)));
verify(function).getReturnType(ImmutableList.of(SqlArgument.of(SqlTypes.STRING)));
} |
public static TaskId toTaskID(String tid) {
return TypeConverter.toYarn(TaskID.forName(tid));
} | @Test
@Timeout(120000)
public void testToTaskID() {
TaskId tid = MRApps.toTaskID("task_1_2_r_3");
assertEquals(1, tid.getJobId().getAppId().getClusterTimestamp());
assertEquals(2, tid.getJobId().getAppId().getId());
assertEquals(2, tid.getJobId().getId());
assertEquals(TaskType.REDUCE, tid.getTaskType());
assertEquals(3, tid.getId());
tid = MRApps.toTaskID("task_1_2_m_3");
assertEquals(TaskType.MAP, tid.getTaskType());
} |
@Override
public int compareTo( MonetDbVersion mDbVersion ) {
int result = majorVersion.compareTo( mDbVersion.majorVersion );
if ( result != 0 ) {
return result;
}
result = minorVersion.compareTo( mDbVersion.minorVersion );
if ( result != 0 ) {
return result;
}
result = patchVersion.compareTo( mDbVersion.patchVersion );
if ( result != 0 ) {
return result;
}
return result;
} | @Test
public void testCompareVersions_DiffInMinor() throws Exception {
String dbVersionBigger = "785.5.3";
String dbVersion = "785.2.2";
assertEquals( 1, new MonetDbVersion( dbVersionBigger ).compareTo( new MonetDbVersion( dbVersion ) ) );
} |
@Override
public void start() throws Exception {
validateConfiguration(configs, registry.getNames());
} | @Test
void startValidationsShouldSucceedButLogWhenNotAllHealthChecksAreConfigured() throws Exception {
// given
ArgumentCaptor<LoggingEvent> captor = ArgumentCaptor.forClass(LoggingEvent.class);
HealthCheckConfiguration check1 = new HealthCheckConfiguration();
check1.setName("check-1");
List<HealthCheckConfiguration> configs = singletonList(check1);
HealthCheckRegistry registry = new HealthCheckRegistry();
registry.register("check-1", mock(HealthCheck.class));
registry.register("check-2", mock(HealthCheck.class));
registry.register("check-3", mock(HealthCheck.class));
// when
HealthCheckConfigValidator validator = new HealthCheckConfigValidator(configs, registry);
validator.start();
// then
verify(mockLogAppender).doAppend(captor.capture());
LoggingEvent logEvent = captor.getValue();
assertThat(logEvent.getLevel()).isEqualTo(Level.INFO);
assertThat(logEvent.getFormattedMessage())
.doesNotContain(" * check-1");
assertThat(logEvent.getFormattedMessage())
.contains(" * check-2");
assertThat(logEvent.getFormattedMessage())
.contains(" * check-3");
} |
@Udf
public <T> Map<String, T> union(
@UdfParameter(description = "first map to union") final Map<String, T> map1,
@UdfParameter(description = "second map to union") final Map<String, T> map2) {
final List<Map<String, T>> nonNullInputs =
Stream.of(map1, map2)
.filter(Objects::nonNull)
.collect(Collectors.toList());
if (nonNullInputs.size() == 0) {
return null;
}
final Map<String, T> output = new HashMap<>();
nonNullInputs
.forEach(output::putAll);
return output;
} | @Test
public void shouldReturnNullForNullInput() {
Map<String, Long> result = udf.union((Map<String, Long>) null, (Map<String, Long>) null);
assertThat(result, is(nullValue()));
} |
@Override
public Long incrementCounter(String name) {
return incrementCounter(name, 1L);
} | @Test
public void testCount() throws Exception {
int runs = 5;
for (int i = 0; i < runs; i++) {
MetricsFactory.getInstance().incrementCounter("count1");
Counter counter = metricRegistry.getCounters().get("count1");
Assert.assertEquals(i + 1, counter.getCount());
}
} |
public SortedSet<Sdk> getSupportedSdks() {
return supportedSdks;
} | @Test
public void getSupportedSdks_shouldReturnOnlySupported() throws Exception {
assertThat(sdkCollection.getSupportedSdks())
.containsExactly(fakeSdk1234, fakeSdk1235, fakeSdk1236)
.inOrder();
} |
public abstract void filter(Metadata metadata) throws TikaException; | @Test
public void testCaptureGroupBasic() throws Exception {
TikaConfig config = getConfig("TIKA-4133-capture-group.xml");
Metadata metadata = new Metadata();
metadata.set(TikaCoreProperties.TIKA_CONTENT, "quick brown fox");
metadata.set(Metadata.CONTENT_TYPE, "text/html; charset=UTF-8");
MetadataFilter filter = config.getMetadataFilter();
filter.filter(metadata);
assertEquals("quick brown fox", metadata.get(TikaCoreProperties.TIKA_CONTENT));
assertEquals("text/html", metadata.get("mime"));
} |
public synchronized Collection<FunctionMetaData> listFunctions(String tenant, String namespace) {
List<FunctionMetaData> ret = new LinkedList<>();
if (!this.functionMetaDataMap.containsKey(tenant)) {
return ret;
}
if (!this.functionMetaDataMap.get(tenant).containsKey(namespace)) {
return ret;
}
for (FunctionMetaData functionMetaData : this.functionMetaDataMap.get(tenant).get(namespace).values()) {
ret.add(functionMetaData);
}
return ret;
} | @Test
public void testListFunctions() throws PulsarClientException {
FunctionMetaDataManager functionMetaDataManager = spy(
new FunctionMetaDataManager(new WorkerConfig(),
mock(SchedulerManager.class),
mockPulsarClient(), ErrorNotifier.getDefaultImpl()));
Map<String, Function.FunctionMetaData> functionMetaDataMap1 = new HashMap<>();
Function.FunctionMetaData f1 = Function.FunctionMetaData.newBuilder().setFunctionDetails(
Function.FunctionDetails.newBuilder().setName("func-1")).build();
functionMetaDataMap1.put("func-1", f1);
Function.FunctionMetaData f2 = Function.FunctionMetaData.newBuilder().setFunctionDetails(
Function.FunctionDetails.newBuilder().setName("func-2")).build();
functionMetaDataMap1.put("func-2", f2);
Function.FunctionMetaData f3 = Function.FunctionMetaData.newBuilder().setFunctionDetails(
Function.FunctionDetails.newBuilder().setName("func-3")).build();
Map<String, Function.FunctionMetaData> functionMetaDataInfoMap2 = new HashMap<>();
functionMetaDataInfoMap2.put("func-3", f3);
functionMetaDataManager.functionMetaDataMap.put("tenant-1", new HashMap<>());
functionMetaDataManager.functionMetaDataMap.get("tenant-1").put("namespace-1", functionMetaDataMap1);
functionMetaDataManager.functionMetaDataMap.get("tenant-1").put("namespace-2", functionMetaDataInfoMap2);
Assert.assertEquals(0, functionMetaDataManager.listFunctions(
"tenant", "namespace").size());
Assert.assertEquals(2, functionMetaDataManager.listFunctions(
"tenant-1", "namespace-1").size());
Assert.assertTrue(functionMetaDataManager.listFunctions(
"tenant-1", "namespace-1").contains(f1));
Assert.assertTrue(functionMetaDataManager.listFunctions(
"tenant-1", "namespace-1").contains(f2));
Assert.assertEquals(1, functionMetaDataManager.listFunctions(
"tenant-1", "namespace-2").size());
Assert.assertTrue(functionMetaDataManager.listFunctions(
"tenant-1", "namespace-2").contains(f3));
} |
@Udf
public Integer length(@UdfParameter final String jsonArray) {
if (jsonArray == null) {
return null;
}
final JsonNode node = UdfJsonMapper.parseJson(jsonArray);
if (node.isMissingNode() || !node.isArray()) {
return null;
}
return node.size();
} | @Test
public void shouldReturnNullForStrNull() {
// When:
final Integer result = udf.length("null");
// Then:
assertNull(result);
} |
public boolean bucketAccessible(GcsPath path) throws IOException {
return bucketAccessible(path, createBackOff(), Sleeper.DEFAULT);
} | @Test
public void testBucketAccessible() throws IOException {
GcsOptions pipelineOptions = gcsOptionsWithTestCredential();
GcsUtil gcsUtil = pipelineOptions.getGcsUtil();
Storage mockStorage = Mockito.mock(Storage.class);
gcsUtil.setStorageClient(mockStorage);
Storage.Buckets mockStorageObjects = Mockito.mock(Storage.Buckets.class);
Storage.Buckets.Get mockStorageGet = Mockito.mock(Storage.Buckets.Get.class);
BackOff mockBackOff = BackOffAdapter.toGcpBackOff(FluentBackoff.DEFAULT.backoff());
when(mockStorage.buckets()).thenReturn(mockStorageObjects);
when(mockStorageObjects.get("testbucket")).thenReturn(mockStorageGet);
when(mockStorageGet.execute())
.thenThrow(new SocketTimeoutException("SocketException"))
.thenReturn(new Bucket());
assertTrue(
gcsUtil.bucketAccessible(
GcsPath.fromComponents("testbucket", "testobject"),
mockBackOff,
new FastNanoClockAndSleeper()::sleep));
} |
public static Map<String, Object> mergeWithNullableValues(final Map<String, Object>...maps) {
return Arrays.stream(maps)
.flatMap(map -> map.entrySet().stream())
// https://bugs.openjdk.org/browse/JDK-8148463
.collect(HashMap::new, (m, v) -> m.put(v.getKey(), v.getValue()), HashMap::putAll);
} | @Test
void shouldMergeWithNullableValuesGivenNullAndDuplicate() {
@SuppressWarnings("unchecked")
Map<String, Object> results = MapUtils.mergeWithNullableValues(
Map.of("k1", "v1", "k2", "v1", "k3", "v1"),
Map.of("k1", "v2"),
Map.of("k2", "v2"),
Map.of("k3", "v2"),
new HashMap<>() {{
put("k4", null);
}}
);
Assertions.assertEquals(4, results.size());
Assertions.assertEquals("v2", results.get("k1"));
Assertions.assertEquals("v2", results.get("k2"));
Assertions.assertEquals("v2", results.get("k3"));
Assertions.assertNull(results.get("k4"));
} |
public ConsoleAppender<ILoggingEvent> newConsoleAppender(Context loggerContext, String name, Encoder<ILoggingEvent> encoder) {
ConsoleAppender<ILoggingEvent> consoleAppender = new ConsoleAppender<>();
consoleAppender.setContext(loggerContext);
consoleAppender.setEncoder(encoder);
consoleAppender.setName(name);
consoleAppender.setTarget("System.out");
consoleAppender.start();
return consoleAppender;
} | @Test
public void newConsoleAppender() {
LoggerContext ctx = underTest.getRootContext();
PatternLayoutEncoder encoder = new PatternLayoutEncoder();
encoder.setContext(ctx);
encoder.setPattern("%msg%n");
encoder.start();
ConsoleAppender<?> appender = underTest.newConsoleAppender(ctx, "MY_APPENDER", encoder);
assertThat(appender.getName()).isEqualTo("MY_APPENDER");
assertThat(appender.getContext()).isSameAs(ctx);
assertThat(appender.isStarted()).isTrue();
assertThat(((PatternLayoutEncoder) appender.getEncoder()).getPattern()).isEqualTo("%msg%n");
assertThat(appender.getCopyOfAttachedFiltersList()).isEmpty();
} |
@Override
public void decodeObjectByTemplate(AbstractByteBuf data, Map<String, String> context, SofaResponse template)
throws SofaRpcException {
try {
UnsafeByteArrayInputStream inputStream = new UnsafeByteArrayInputStream(data.array());
Hessian2Input input = new Hessian2Input(inputStream);
// 根据SerializeType信息决定序列化器
boolean genericSerialize = context != null && isGenericResponse(
context.get(RemotingConstants.HEAD_GENERIC_TYPE));
if (genericSerialize) {
input.setSerializerFactory(genericSerializerFactory);
GenericObject genericObject = (GenericObject) input.readObject();
template.setErrorMsg((String) genericObject.getField("errorMsg"));
template.setAppResponse(judgeCustomThrowableForGenericObject(genericObject.getField("appResponse")));
template.setResponseProps((Map<String, String>) genericObject.getField("responseProps"));
} else {
input.setSerializerFactory(serializerFactory);
SofaResponse tmp = (SofaResponse) input.readObject();
// copy values to template
template.setErrorMsg(tmp.getErrorMsg());
template.setAppResponse(tmp.getAppResponse());
template.setResponseProps(tmp.getResponseProps());
}
input.close();
} catch (IOException e) {
throw buildDeserializeError(e.getMessage(), e);
}
} | @Test
public void testCustomThrowableDeserializer() throws Exception {
GenericMultipleClassLoaderSofaSerializerFactory factory = new GenericMultipleClassLoaderSofaSerializerFactory();
SofaResponseHessianSerializer serializer = new SofaResponseHessianSerializer(null, factory);
ByteArrayOutputStream bsOut = new ByteArrayOutputStream();
Hessian2Output hessian2Output = new Hessian2Output(bsOut);
hessian2Output.setSerializerFactory(factory);
SofaResponse sofaResponse = new SofaResponse();
MockError mockError = new MockError("MockError");
sofaResponse.setAppResponse(mockError);
hessian2Output.writeObject(sofaResponse);
hessian2Output.flush();
ByteArrayWrapperByteBuf bsIn = new ByteArrayWrapperByteBuf(bsOut.toByteArray());
Map<String, String> ctx = new HashMap<>();
ctx.put(RemotingConstants.HEAD_GENERIC_TYPE, "2");
SofaResponse sofaResponse2 = new SofaResponse();
serializer.decodeObjectByTemplate(bsIn, ctx, sofaResponse2);
Assert.assertTrue(sofaResponse2.getAppResponse() instanceof GenericObject);
Assert.assertEquals("MockError", ((GenericObject) sofaResponse2.getAppResponse()).getField("detailMessage"));
} |
public List<String> getLocalDirs() {
return localDirs.getGoodDirs();
} | @Test
public void testDirStructure() throws Exception {
Configuration conf = new YarnConfiguration();
String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1);
String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
dirSvc.init(conf);
Assert.assertEquals(1, dirSvc.getLocalDirs().size());
dirSvc.close();
} |
public static Calendar getCalendar(Object date, Calendar defaultValue) {
Calendar cal = new GregorianCalendar();
if (date instanceof java.util.Date) {
cal.setTime((java.util.Date) date);
return cal;
} else if (date != null) {
Optional<Date> d = tryToParseDate(date);
if (!d.isPresent()) {
return defaultValue;
}
cal.setTime(d.get());
} else {
cal = defaultValue;
}
return cal;
} | @Test
public void testGetCalendarObjectCalendarWithNullAndCalendarAsDefault() {
Calendar cal = new GregorianCalendar();
assertEquals(cal, Converter.getCalendar(null, cal));
} |
@PUT
@Path("/{logger}")
@Operation(summary = "Set the log level for the specified logger")
@SuppressWarnings("fallthrough")
public Response setLevel(final @PathParam("logger") String namespace,
final Map<String, String> levelMap,
@DefaultValue("worker") @QueryParam("scope") @Parameter(description = "The scope for the logging modification (single-worker, cluster-wide, etc.)") String scope) {
if (scope == null) {
log.warn("Received null scope in request to adjust logging level; will default to {}", WORKER_SCOPE);
scope = WORKER_SCOPE;
}
String levelString = levelMap.get("level");
if (levelString == null) {
throw new BadRequestException("Desired 'level' parameter was not specified in request.");
}
// Make sure that this is a valid level
Level level = Level.toLevel(levelString.toUpperCase(Locale.ROOT), null);
if (level == null) {
throw new NotFoundException("invalid log level '" + levelString + "'.");
}
switch (scope.toLowerCase(Locale.ROOT)) {
default:
log.warn("Received invalid scope '{}' in request to adjust logging level; will default to {}", scope, WORKER_SCOPE);
case WORKER_SCOPE:
List<String> affectedLoggers = herder.setWorkerLoggerLevel(namespace, levelString);
return Response.ok(affectedLoggers).build();
case CLUSTER_SCOPE:
herder.setClusterLoggerLevel(namespace, levelString);
return Response.noContent().build();
}
} | @Test
public void testSetLevelClusterScope() {
final String logger = "org.apache.kafka.connect";
final String level = "TRACE";
Response response = loggingResource.setLevel(logger, Collections.singletonMap("level", level), "cluster");
assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus());
assertNull(response.getEntity());
verify(herder).setClusterLoggerLevel(logger, level);
} |
@Override
public boolean intersects(BitMask mask) {
return mask instanceof LongBitMask ?
(this.mask & ((LongBitMask)mask).asLong()) != 0 :
mask.intersects(this);
} | @Test
public void testIntersects() {
assertThat(new LongBitMask(0L).intersects(EmptyBitMask.get())).isFalse();
assertThat(new LongBitMask(0L).intersects(new LongBitMask(0L))).isFalse();
assertThat(new LongBitMask(2L).intersects(new LongBitMask(2L))).isTrue();
} |
public Optional<KsMaterialization> create(
final String stateStoreName,
final KafkaStreams kafkaStreams,
final Topology topology,
final LogicalSchema schema,
final Serializer<GenericKey> keySerializer,
final Optional<WindowInfo> windowInfo,
final Map<String, ?> streamsProperties,
final KsqlConfig ksqlConfig,
final String applicationId,
final String queryId
) {
final Object appServer = streamsProperties.get(StreamsConfig.APPLICATION_SERVER_CONFIG);
if (appServer == null) {
return Optional.empty();
}
final URL localHost = buildLocalHost(appServer);
final KsLocator locator = locatorFactory.create(
stateStoreName,
kafkaStreams,
topology,
keySerializer,
localHost,
ksqlConfig.getBoolean(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED),
queryId
);
final KsStateStore stateStore = storeFactory.create(
stateStoreName,
kafkaStreams,
schema,
ksqlConfig,
queryId
);
final KsMaterialization materialization = materializationFactory.create(
windowInfo,
locator,
stateStore
);
return Optional.of(materialization);
} | @Test
public void shouldBuildStateStoreWithCorrectParams() {
// When:
factory.create(STORE_NAME, kafkaStreams, topology, SCHEMA, keySerializer, Optional.empty(),
streamsProperties, ksqlConfig, APPLICATION_ID, "queryId");
// Then:
verify(storeFactory).create(
STORE_NAME,
kafkaStreams,
SCHEMA,
ksqlConfig,
"queryId"
);
} |
public static Entry entry(String name) throws BlockException {
return Env.sph.entry(name, EntryType.OUT, 1, OBJECTS0);
} | @Test
public void testStringEntryNormal() throws BlockException {
Entry e = SphU.entry("resourceName");
assertNotNull(e);
assertEquals(e.resourceWrapper.getName(), "resourceName");
assertEquals(e.resourceWrapper.getEntryType(), EntryType.OUT);
assertEquals(ContextUtil.getContext().getName(), Constants.CONTEXT_DEFAULT_NAME);
e.exit();
} |
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
} | @Test
public void testCorporealBeastKill()
{
ChatMessage chatMessageEvent = new ChatMessage(null, GAMEMESSAGE, "", "Your Corporeal Beast kill count is: <col=ff0000>4</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessageEvent);
verify(configManager).setRSProfileConfiguration("killcount", "corporeal beast", 4);
} |
@Override
@CacheEvict(value = RedisKeyConstants.MAIL_ACCOUNT, key = "#updateReqVO.id")
public void updateMailAccount(MailAccountSaveReqVO updateReqVO) {
// 校验是否存在
validateMailAccountExists(updateReqVO.getId());
// 更新
MailAccountDO updateObj = BeanUtils.toBean(updateReqVO, MailAccountDO.class);
mailAccountMapper.updateById(updateObj);
} | @Test
public void testUpdateMailAccount_success() {
// mock 数据
MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class);
mailAccountMapper.insert(dbMailAccount);// @Sql: 先插入出一条存在的数据
// 准备参数
MailAccountSaveReqVO reqVO = randomPojo(MailAccountSaveReqVO.class, o -> {
o.setId(dbMailAccount.getId()); // 设置更新的 ID
o.setMail(randomEmail());
});
// 调用
mailAccountService.updateMailAccount(reqVO);
// 校验是否更新正确
MailAccountDO mailAccount = mailAccountMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, mailAccount);
} |
public CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> commitTransactionalOffset(
RequestContext context,
TxnOffsetCommitRequestData request
) throws ApiException {
validateTransactionalOffsetCommit(context, request);
final TxnOffsetCommitResponseData response = new TxnOffsetCommitResponseData();
final List<CoordinatorRecord> records = new ArrayList<>();
final long currentTimeMs = time.milliseconds();
request.topics().forEach(topic -> {
final TxnOffsetCommitResponseTopic topicResponse = new TxnOffsetCommitResponseTopic().setName(topic.name());
response.topics().add(topicResponse);
topic.partitions().forEach(partition -> {
if (isMetadataInvalid(partition.committedMetadata())) {
topicResponse.partitions().add(new TxnOffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code()));
} else {
log.debug("[GroupId {}] Committing transactional offsets {} for partition {}-{} from member {} with leader epoch {}.",
request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(),
request.memberId(), partition.committedLeaderEpoch());
topicResponse.partitions().add(new TxnOffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.NONE.code()));
final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest(
partition,
currentTimeMs
);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
request.groupId(),
topic.name(),
partition.partitionIndex(),
offsetAndMetadata,
metadataImage.features().metadataVersion()
));
}
});
});
if (!records.isEmpty()) {
metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size());
}
return new CoordinatorResult<>(records, response);
} | @Test
public void testConsumerGroupTransactionalOffsetCommitWithUnknownMemberId() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
// Create an empty group.
context.groupMetadataManager.getOrMaybeCreatePersistedConsumerGroup(
"foo",
true
);
assertThrows(UnknownMemberIdException.class, () -> context.commitTransactionalOffset(
new TxnOffsetCommitRequestData()
.setGroupId("foo")
.setMemberId("member")
.setGenerationId(10)
.setTopics(Collections.singletonList(
new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic()
.setName("bar")
.setPartitions(Collections.singletonList(
new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedOffset(100L)
.setCommittedLeaderEpoch(10)
.setCommittedMetadata("metadata")
))
))
));
} |
@WorkerThread
@Override
public Unit call()
throws IOException,
StreamNotFoundException,
ShellNotRunningException,
IllegalArgumentException {
OutputStream outputStream;
File destFile = null;
switch (fileAbstraction.scheme) {
case CONTENT:
Objects.requireNonNull(fileAbstraction.uri);
if (fileAbstraction.uri.getAuthority().equals(context.get().getPackageName())) {
DocumentFile documentFile =
DocumentFile.fromSingleUri(AppConfig.getInstance(), fileAbstraction.uri);
if (documentFile != null && documentFile.exists() && documentFile.canWrite()) {
outputStream = contentResolver.openOutputStream(fileAbstraction.uri, "wt");
} else {
destFile = FileUtils.fromContentUri(fileAbstraction.uri);
outputStream = openFile(destFile, context.get());
}
} else {
outputStream = contentResolver.openOutputStream(fileAbstraction.uri, "wt");
}
break;
case FILE:
final HybridFileParcelable hybridFileParcelable = fileAbstraction.hybridFileParcelable;
Objects.requireNonNull(hybridFileParcelable);
Context context = this.context.get();
if (context == null) {
return null;
}
outputStream = openFile(hybridFileParcelable.getFile(), context);
destFile = fileAbstraction.hybridFileParcelable.getFile();
break;
default:
throw new IllegalArgumentException(
"The scheme for '" + fileAbstraction.scheme + "' cannot be processed!");
}
Objects.requireNonNull(outputStream);
outputStream.write(dataToSave.getBytes());
outputStream.close();
if (cachedFile != null && cachedFile.exists() && destFile != null) {
// cat cache content to original file and delete cache file
ConcatenateFileCommand.INSTANCE.concatenateFile(cachedFile.getPath(), destFile.getPath());
cachedFile.delete();
}
return Unit.INSTANCE;
} | @Test
@Config(shadows = {BlockAllOutputStreamsFileUtil.class})
public void testWriteFileRootCacheFileNotFound()
throws ShellNotRunningException, IOException, StreamNotFoundException {
File file = new File(Environment.getExternalStorageDirectory(), "test.txt");
Uri uri = Uri.fromFile(file);
File cacheFile = new File(Environment.getExternalStorageDirectory(), "test.txt.cache");
Context ctx = ApplicationProvider.getApplicationContext();
ContentResolver cr = ctx.getContentResolver();
try {
WriteTextFileCallable task =
new WriteTextFileCallable(
ctx, cr, new EditableFileAbstraction(ctx, uri), contents, cacheFile, true);
task.call();
} catch (StreamNotFoundException e) {
return;
}
fail();
} |
@Override
public String toString() {
return "[" + startOffset + ", " + endOffset + ")";
} | @Test
public void testReadingGranularityAndFractionConsumed() throws IOException {
// Tests that the reader correctly snaps to multiples of the given granularity
// (note: this is testing test code), and that getFractionConsumed works sensibly
// in the face of that.
PipelineOptions options = PipelineOptionsFactory.create();
CoarseRangeSource source = new CoarseRangeSource(13, 35, 1, 10);
try (CoarseRangeReader reader = source.createReader(options)) {
List<Integer> items = new ArrayList<>();
assertEquals(0.0, reader.getFractionConsumed(), 1e-6);
assertTrue(reader.start());
items.add(reader.getCurrent());
while (reader.advance()) {
Double fraction = reader.getFractionConsumed();
assertNotNull(fraction);
assertTrue(fraction.toString(), fraction > 0.0);
assertTrue(fraction.toString(), fraction <= 1.0);
items.add(reader.getCurrent());
}
assertEquals(1.0, reader.getFractionConsumed(), 1e-6);
assertEquals(20, items.size());
assertEquals(20, items.get(0).intValue());
assertEquals(39, items.get(items.size() - 1).intValue());
source = new CoarseRangeSource(13, 17, 1, 10);
}
try (BoundedSource.BoundedReader<Integer> reader = source.createReader(options)) {
assertFalse(reader.start());
}
} |
@Override
public AwsProxyResponse handle(Throwable ex) {
log.error("Called exception handler for:", ex);
// adding a print stack trace in case we have no appender or we are running inside SAM local, where need the
// output to go to the stderr.
ex.printStackTrace();
if (ex instanceof InvalidRequestEventException || ex instanceof InternalServerErrorException) {
return new AwsProxyResponse(500, HEADERS, getErrorJson(INTERNAL_SERVER_ERROR));
} else {
return new AwsProxyResponse(502, HEADERS, getErrorJson(GATEWAY_TIMEOUT_ERROR));
}
} | @Test
void typedHandle_InvalidResponseObjectException_502State() {
AwsProxyResponse resp = exceptionHandler.handle(new InvalidResponseObjectException(INVALID_RESPONSE_MESSAGE, null));
assertNotNull(resp);
assertEquals(502, resp.getStatusCode());
} |
@Override
public void validate(final String methodName, final Class<?>[] parameterTypes, final Object[] arguments) throws Exception {
List<Class<?>> groups = new ArrayList<>();
Class<?> methodClass = methodClass(methodName);
if (Objects.nonNull(methodClass)) {
groups.add(methodClass);
}
Set<ConstraintViolation<?>> violations = new HashSet<>();
Method method = clazz.getMethod(methodName, parameterTypes);
Class<?>[] methodClasses;
if (method.isAnnotationPresent(MethodValidated.class)) {
methodClasses = method.getAnnotation(MethodValidated.class).value();
groups.addAll(Arrays.asList(methodClasses));
}
// add into default group
groups.add(0, Default.class);
groups.add(1, clazz);
// convert list to array
Class<?>[] classGroups = new Class<?>[groups.size()];
classGroups = groups.toArray(classGroups);
Object parameterBean = getMethodParameterBean(clazz, method, arguments);
if (parameterBean != null) {
violations.addAll(validator.validate(parameterBean, classGroups));
}
for (Object arg : arguments) {
validate(violations, arg, classGroups);
}
if (!violations.isEmpty()) {
LOG.error("Failed to validate service: {}, method: {}, cause: {}", clazz.getName(), methodName, violations);
StringBuilder validateError = new StringBuilder();
violations.forEach(each -> validateError.append(each.getMessage()).append(","));
throw new ValidationException(validateError.substring(0, validateError.length() - 1));
}
} | @Test
public void testValidateWithNonExistMethod() throws Exception {
assertThrows(NoSuchMethodException.class, () -> apacheDubboClientValidatorUnderTest
.validate("nonExistingMethod", new Class<?>[]{String.class}, new Object[]{"arg1"}));
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void nestedContexts2() {
String inputExpression = "{ an applicant : { "
+ " home address : {"
+ " street name: \"broadway st\","
+ " city : \"New York\" "
+ " } "
+ " },\n "
+ " street : an applicant.home address.street name \n"
+ "}";
BaseNode ctxbase = parse( inputExpression );
assertThat( ctxbase).isInstanceOf(ContextNode.class);
assertThat( ctxbase.getText()).isEqualTo(inputExpression);
ContextNode ctx = (ContextNode) ctxbase;
assertThat( ctx.getEntries()).hasSize(2);
ContextEntryNode entry = ctx.getEntries().get( 1 );
assertThat( entry.getName()).isInstanceOf(NameDefNode.class);
assertThat( entry.getResultType()).isEqualTo(BuiltInType.STRING);
NameDefNode name = (NameDefNode) entry.getName();
assertThat( name.getText()).isEqualTo("street");
assertThat( entry.getValue()).isInstanceOf(QualifiedNameNode.class);
QualifiedNameNode qnn = (QualifiedNameNode) entry.getValue();
assertThat( qnn.getParts().get( 0 ).getText()).isEqualTo("an applicant");
assertThat( qnn.getParts().get( 1 ).getText()).isEqualTo("home address");
assertThat( qnn.getParts().get( 2 ).getText()).isEqualTo("street name");
} |
@Override
@Deprecated
public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier,
final String... stateStoreNames) {
process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames);
} | @Test
public void shouldNotAllowNullProcessSupplierOnProcessWithNamedAndStores() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.process((ProcessorSupplier<? super String, ? super String, Void, Void>) null,
Named.as("processor"), "stateStore"));
assertThat(exception.getMessage(), equalTo("processorSupplier can't be null"));
} |
@Override
public int run(String[] args) throws Exception {
YarnConfiguration yarnConf =
getConf() == null ? new YarnConfiguration() : new YarnConfiguration(
getConf());
boolean isHAEnabled =
yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED,
YarnConfiguration.DEFAULT_RM_HA_ENABLED);
if (args.length < 1) {
printUsage("", isHAEnabled);
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = args[i++];
exitCode = 0;
if ("-help".equals(cmd)) {
if (i < args.length) {
printUsage(args[i], isHAEnabled);
} else {
printHelp("", isHAEnabled);
}
return exitCode;
}
if (USAGE.containsKey(cmd)) {
if (isHAEnabled) {
return super.run(args);
}
System.out.println("Cannot run " + cmd
+ " when ResourceManager HA is not enabled");
return -1;
}
//
// verify that we have enough command line parameters
//
String subClusterId = StringUtils.EMPTY;
if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) ||
"-refreshNodesResources".equals(cmd) ||
"-refreshServiceAcl".equals(cmd) ||
"-refreshUserToGroupsMappings".equals(cmd) ||
"-refreshSuperUserGroupsConfiguration".equals(cmd) ||
"-refreshClusterMaxPriority".equals(cmd)) {
subClusterId = parseSubClusterId(args, isHAEnabled);
// If we enable Federation mode, the number of args may be either one or three.
// Example: -refreshQueues or -refreshQueues -subClusterId SC-1
if (isYarnFederationEnabled(getConf()) && args.length != 1 && args.length != 3) {
printUsage(cmd, isHAEnabled);
return exitCode;
} else if (!isYarnFederationEnabled(getConf()) && args.length != 1) {
// If Federation mode is not enabled, then the number of args can only be one.
// Example: -refreshQueues
printUsage(cmd, isHAEnabled);
return exitCode;
}
}
// If it is federation mode, we will print federation mode information
if (isYarnFederationEnabled(getConf())) {
System.out.println("Using YARN Federation mode.");
}
try {
if ("-refreshQueues".equals(cmd)) {
exitCode = refreshQueues(subClusterId);
} else if ("-refreshNodes".equals(cmd)) {
exitCode = handleRefreshNodes(args, cmd, isHAEnabled);
} else if ("-refreshNodesResources".equals(cmd)) {
exitCode = refreshNodesResources(subClusterId);
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings(subClusterId);
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration(subClusterId);
} else if ("-refreshAdminAcls".equals(cmd)) {
exitCode = refreshAdminAcls(subClusterId);
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcls(subClusterId);
} else if ("-refreshClusterMaxPriority".equals(cmd)) {
exitCode = refreshClusterMaxPriority(subClusterId);
} else if ("-getGroups".equals(cmd)) {
String[] usernames = Arrays.copyOfRange(args, i, args.length);
exitCode = getGroups(usernames);
} else if ("-updateNodeResource".equals(cmd)) {
exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled, subClusterId);
} else if ("-addToClusterNodeLabels".equals(cmd)) {
exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-removeFromClusterNodeLabels".equals(cmd)) {
exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-replaceLabelsOnNode".equals(cmd)) {
exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled);
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("", isHAEnabled);
}
} catch (IllegalArgumentException arge) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd, isHAEnabled);
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
System.err.println(cmd.substring(1) + ": "
+ content[0]);
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": "
+ ex.getLocalizedMessage());
}
} catch (Exception e) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": "
+ e.getLocalizedMessage());
}
if (null != localNodeLabelsManager) {
localNodeLabelsManager.stop();
}
return exitCode;
} | @Test
public void testTransitionToStandby() throws Exception {
String[] args = {"-transitionToStandby", "rm1"};
// RM HA is disabled.
// transitionToStandby should not be executed
assertEquals(-1, rmAdminCLI.run(args));
verify(haadmin, never()).transitionToStandby(
any(HAServiceProtocol.StateChangeRequestInfo.class));
// Now RM HA is enabled.
// transitionToActive should be executed
assertEquals(0, rmAdminCLIWithHAEnabled.run(args));
verify(haadmin).transitionToStandby(
any(HAServiceProtocol.StateChangeRequestInfo.class));
} |
static public Entry buildMenuStructure(String xml) {
final Reader reader = new StringReader(xml);
return buildMenuStructure(reader);
} | @Test
public void givenXmlWithChildEntryWithTwoBuilders_createsStructureWithChildEntry() {
String xmlWithoutContent = "<FreeplaneUIEntries><Entry builder='builder1, builder2'/></FreeplaneUIEntries>";
Entry builtMenuStructure = XmlEntryStructureBuilder.buildMenuStructure(xmlWithoutContent);
Entry menuStructureWithChildEntry = new Entry();
final Entry childEntry = new Entry();
childEntry.setBuilders(asList("builder1", "builder2"));
menuStructureWithChildEntry.addChild(childEntry);
assertThat(builtMenuStructure, equalTo(menuStructureWithChildEntry));
} |
@Override
public Collection values() {
return null;
} | @Test
public void testValues() throws Exception {
assertNull(NULL_QUERY_CACHE.values());
} |
@Override
protected Map<String, ConfigValue> validateSourceConnectorConfig(SourceConnector connector, ConfigDef configDef, Map<String, String> config) {
Map<String, ConfigValue> result = super.validateSourceConnectorConfig(connector, configDef, config);
validateSourceConnectorExactlyOnceSupport(config, result, connector);
validateSourceConnectorTransactionBoundary(config, result, connector);
return result;
} | @Test
public void testConnectorTransactionBoundaryValidationHandlesConnectorErrorsGracefully() {
herder = exactlyOnceHerder();
Map<String, String> config = new HashMap<>();
config.put(SourceConnectorConfig.TRANSACTION_BOUNDARY_CONFIG, CONNECTOR.toString());
SourceConnector connectorMock = mock(SourceConnector.class);
String errorMessage = "Wait I thought we tested for this?";
when(connectorMock.canDefineTransactionBoundaries(eq(config))).thenThrow(new ConnectException(errorMessage));
Map<String, ConfigValue> validatedConfigs = herder.validateSourceConnectorConfig(
connectorMock, SourceConnectorConfig.configDef(), config);
List<String> errors = validatedConfigs.get(SourceConnectorConfig.TRANSACTION_BOUNDARY_CONFIG).errorMessages();
assertFalse(errors.isEmpty());
assertTrue(
errors.get(0).contains(errorMessage),
"Error message did not contain expected text: " + errors.get(0));
assertEquals(1, errors.size());
} |
public static String substVars(String val, PropertyContainer pc1) throws ScanException {
return substVars(val, pc1, null);
} | @Test
public void emptyVariableIsAccepted() throws JoranException, ScanException {
String varName = "var"+diff;
context.putProperty(varName, "");
String r = OptionHelper.substVars("x ${"+varName+"} b", context);
assertEquals("x b", r);
} |
@Subscribe
public synchronized void renew(final QualifiedDataSourceStateEvent event) {
QualifiedDataSource qualifiedDataSource = event.getQualifiedDataSource();
if (!contextManager.getMetaDataContexts().getMetaData().containsDatabase(qualifiedDataSource.getDatabaseName())) {
return;
}
ShardingSphereDatabase database = contextManager.getMetaDataContexts().getMetaData().getDatabase(qualifiedDataSource.getDatabaseName());
for (StaticDataSourceRuleAttribute each : database.getRuleMetaData().getAttributes(StaticDataSourceRuleAttribute.class)) {
each.updateStatus(qualifiedDataSource, event.getStatus().getState());
}
} | @Test
void assertRenewForDisableStateChanged() {
StaticDataSourceRuleAttribute ruleAttribute = mock(StaticDataSourceRuleAttribute.class);
when(database.getRuleMetaData().getAttributes(StaticDataSourceRuleAttribute.class)).thenReturn(Collections.singleton(ruleAttribute));
QualifiedDataSourceStateEvent event = new QualifiedDataSourceStateEvent(new QualifiedDataSource("db.readwrite_ds.ds_0"), new QualifiedDataSourceState(DataSourceState.DISABLED));
subscriber.renew(event);
verify(ruleAttribute).updateStatus(
argThat(qualifiedDataSource -> Objects.equals(event.getQualifiedDataSource(), qualifiedDataSource)),
argThat(dataSourceState -> event.getStatus().getState() == dataSourceState));
} |
public static <T> PTransform<PCollection<T>, PCollection<T>> unionAll(
PCollection<T> rightCollection) {
checkNotNull(rightCollection, "rightCollection argument is null");
return new SetImpl<>(rightCollection, unionAll());
} | @Test
@Category(NeedsRunner.class)
public void testUnionAllCollections() {
PCollection<String> third = p.apply("third", Create.of(Arrays.asList("a", "b", "b", "k", "k")));
PCollection<Row> thirdRows = p.apply("thirdRows", Create.of(toRows("a", "b", "b", "k", "k")));
PAssert.that(
PCollectionList.of(first).and(second).and(third).apply("stringsCols", Sets.unionAll()))
.containsInAnyOrder(
"a", "a", "a", "a", "a", "a", "b", "b", "b", "b", "b", "b", "b", "c", "c", "d", "d",
"d", "d", "e", "e", "f", "f", "g", "g", "h", "h", "k", "k");
PCollection<Row> results =
PCollectionList.of(firstRows)
.and(secondRows)
.and(thirdRows)
.apply("rowCols", Sets.unionAll());
PAssert.that(results)
.containsInAnyOrder(
toRows(
"a", "a", "a", "a", "a", "a", "b", "b", "b", "b", "b", "b", "b", "c", "c", "d", "d",
"d", "d", "e", "e", "f", "f", "g", "g", "h", "h", "k", "k"));
assertEquals(schema, results.getSchema());
p.run();
} |
public static Collection<java.nio.file.Path> listFilesInDirectory(
final java.nio.file.Path directory, final Predicate<java.nio.file.Path> fileFilter)
throws IOException {
checkNotNull(directory, "directory");
checkNotNull(fileFilter, "fileFilter");
if (!Files.exists(directory)) {
throw new IllegalArgumentException(
String.format("The directory %s dose not exist.", directory));
}
if (!Files.isDirectory(directory)) {
throw new IllegalArgumentException(
String.format("The %s is not a directory.", directory));
}
final FilterFileVisitor filterFileVisitor = new FilterFileVisitor(fileFilter);
Files.walkFileTree(
directory,
EnumSet.of(FileVisitOption.FOLLOW_LINKS),
Integer.MAX_VALUE,
filterFileVisitor);
return filterFileVisitor.getFiles();
} | @Test
void testListFilesInPath() throws IOException {
final java.nio.file.Path testDir =
TempDirUtils.newFolder(temporaryFolder, "_test_1").toPath();
final Collection<java.nio.file.Path> testFiles = prepareTestFiles(testDir);
final Collection<java.nio.file.Path> filesInDirectory =
FileUtils.listFilesInDirectory(testDir, FileUtils::isJarFile);
assertThat(filesInDirectory).containsExactlyInAnyOrderElementsOf(testFiles);
} |
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload,
final ConnectionSession connectionSession) {
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitPacket();
case COM_INIT_DB:
return new MySQLComInitDbPacket(payload);
case COM_FIELD_LIST:
return new MySQLComFieldListPacket(payload);
case COM_QUERY:
return new MySQLComQueryPacket(payload);
case COM_STMT_PREPARE:
return new MySQLComStmtPreparePacket(payload);
case COM_STMT_EXECUTE:
MySQLServerPreparedStatement serverPreparedStatement =
connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex()));
return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount());
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataPacket(payload);
case COM_STMT_RESET:
return new MySQLComStmtResetPacket(payload);
case COM_STMT_CLOSE:
return new MySQLComStmtClosePacket(payload);
case COM_SET_OPTION:
return new MySQLComSetOptionPacket(payload);
case COM_PING:
return new MySQLComPingPacket();
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionPacket();
default:
return new MySQLUnsupportedCommandPacket(commandPacketType);
}
} | @Test
void assertNewInstanceWithComTimePacket() {
assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_TIME, payload, connectionSession), instanceOf(MySQLUnsupportedCommandPacket.class));
} |
@Override
public byte[] serialize() {
byte[] payloadData = null;
if (this.payload != null) {
this.payload.setParent(this);
payloadData = this.payload.serialize();
}
int payloadLength = 0;
if (payloadData != null) {
payloadLength = payloadData.length;
}
final byte[] data = new byte[HEADER_LENGTH + payloadLength];
final ByteBuffer bb = ByteBuffer.wrap(data);
bb.putInt(this.securityParamIndex);
bb.putInt(this.sequence);
if (payloadData != null) {
bb.put(payloadData);
}
if (this.parent != null && this.parent instanceof IExtensionHeader) {
((IExtensionHeader) this.parent).setNextHeader(IPv6.PROTOCOL_ESP);
}
return data;
} | @Test
public void testSerialize() {
EncapSecurityPayload esp = new EncapSecurityPayload();
esp.setSecurityParamIndex(0x13572468);
esp.setSequence(0xffff00);
esp.setPayload(data);
assertArrayEquals(esp.serialize(), bytePacket);
} |
public static <T> Bounded<T> from(BoundedSource<T> source) {
return new Bounded<>(null, source);
} | @Test
public void testReadBoundedPreservesTypeDescriptor() {
PCollection<String> input = pipeline.apply(Read.from(new SerializableBoundedSource()));
TypeDescriptor<String> typeDescriptor = input.getTypeDescriptor();
assertEquals(String.class, typeDescriptor.getType());
ListBoundedSource<Long> longs = new ListBoundedSource<>(VarLongCoder.of());
PCollection<List<Long>> numbers = pipeline.apply(Read.from(longs));
assertEquals(new TypeDescriptor<List<Long>>() {}, numbers.getTypeDescriptor());
} |
public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) {
Objects.requireNonNull(pred, "pred cannot be null");
Objects.requireNonNull(columns, "columns cannot be null");
return pred.accept(new StatisticsFilter(columns));
} | @Test
public void testLt() {
assertTrue(canDrop(lt(intColumn, 9), columnMetas));
assertTrue(canDrop(lt(intColumn, 10), columnMetas));
assertFalse(canDrop(lt(intColumn, 100), columnMetas));
assertFalse(canDrop(lt(intColumn, 101), columnMetas));
assertTrue(canDrop(lt(intColumn, 0), nullColumnMetas));
assertTrue(canDrop(lt(intColumn, 7), nullColumnMetas));
assertTrue(canDrop(lt(missingColumn, fromString("any")), columnMetas));
assertFalse(canDrop(lt(intColumn, 0), missingMinMaxColumnMetas));
assertFalse(canDrop(lt(doubleColumn, 0.0), missingMinMaxColumnMetas));
} |
@Override public IMetaStore getMetastore() {
try {
return supplier.getMetastore();
} catch ( MetaStoreException e ) {
logger.error( "Unable to open local metastore", e );
return null;
}
} | @Test
public void testGetMetastoreSuccess() throws MetaStoreException {
IMetaStore metaStore = mock( IMetaStore.class );
when( supplier.getMetastore() ).thenReturn( metaStore );
assertEquals( metaStore, localFileMetastoreProvider.getMetastore() );
} |
public Optional<Table> getTable(TableName tableName) {
return Optional.ofNullable(getTable(tableName.getCatalog(), tableName.getDb(), tableName.getTbl()));
} | @Test
public void testGetTable(@Mocked HiveMetaStoreClient metaStoreThriftClient) throws TException {
List<FieldSchema> partKeys = Lists.newArrayList(new FieldSchema("col1", "BIGINT", ""));
List<FieldSchema> unPartKeys = Lists.newArrayList(new FieldSchema("col2", "INT", ""));
String hdfsPath = "hdfs://127.0.0.1:10000/hive";
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(unPartKeys);
sd.setLocation(hdfsPath);
sd.setInputFormat(MAPRED_PARQUET_INPUT_FORMAT_CLASS);
Table msTable1 = new Table();
msTable1.setDbName("hive_db");
msTable1.setTableName("hive_table");
msTable1.setPartitionKeys(partKeys);
msTable1.setSd(sd);
msTable1.setTableType("MANAGED_TABLE");
msTable1.setCreateTime(20201010);
new Expectations() {
{
metaStoreThriftClient.getTable("hive_db", "hive_table");
result = msTable1;
}
};
MetadataMgr metadataMgr = GlobalStateMgr.getCurrentState().getMetadataMgr();
com.starrocks.catalog.Table internalTable = metadataMgr.getTable("default_catalog", "db1", "tbl1");
Assert.assertNotNull(internalTable);
Assert.assertNull(metadataMgr.getTable("default_catalog", "not_exist_db", "xxx"));
Assert.assertNull(metadataMgr.getTable("default_catalog", "db1", "not_exist_table"));
com.starrocks.catalog.Table tbl1 = metadataMgr.getTable("hive_catalog", "hive_db", "hive_table");
Assert.assertNotNull(tbl1);
Assert.assertEquals("hive_catalog.hive_db.hive_table.20201010", tbl1.getUUID());
com.starrocks.catalog.Table tbl2 = metadataMgr.getTable("not_exist_catalog", "xxx", "xxx");
Assert.assertNull(tbl2);
Assert.assertThrows(StarRocksConnectorException.class,
() -> metadataMgr.getTable("hive_catalog", "not_exist_db", "xxx"));
Assert.assertThrows(StarRocksConnectorException.class,
() -> metadataMgr.getTable("hive_catalog", "hive_db", "not_exist_tbl"));
} |
@Override
public Set<Permission> getGrantedPermissions(ApplicationId appId) {
return states.asJavaMap().getOrDefault(appId, new SecurityInfo(ImmutableSet.of(), null)).getPermissions();
} | @Test
public void testGetGrantedPermissions() {
Set<Permission> permissions = states.get(appId).getPermissions();
assertTrue(permissions.contains(testPermission));
} |
ConfigResponse get(ConfigCacheKey key) {
PayloadChecksum xxhash64 = checksums.get(key);
if (xxhash64 == null) return null;
return checksumToConfig.get(xxhash64);
} | @Test
public void testThatCacheWorksWithDifferentKeySameMd5() {
assertSame(cache.get(fooBarCacheKey), cache.get(bazQuuxCacheKey));
} |
@Override
public NodeHealth get() {
NodeHealth.Builder builder = NodeHealth.newNodeHealthBuilder();
if (clusterAppState.isOperational(ProcessId.ELASTICSEARCH, true)) {
builder.setStatus(NodeHealth.Status.GREEN);
} else {
builder.setStatus(NodeHealth.Status.RED)
.addCause("Elasticsearch is not operational");
}
return builder
.setDetails(nodeDetails)
.build();
} | @Test
public void get_returns_status_GREEN_if_elasticsearch_process_is_operational_in_ClusterAppState() {
Properties properties = new Properties();
setRequiredPropertiesAndMocks(properties);
when(clusterAppState.isOperational(ProcessId.ELASTICSEARCH, true)).thenReturn(true);
SearchNodeHealthProvider underTest = new SearchNodeHealthProvider(new Props(properties), clusterAppState, networkUtils, clock);
NodeHealth nodeHealth = underTest.get();
assertThat(nodeHealth.getStatus()).isEqualTo(NodeHealth.Status.GREEN);
} |
public void receiveMessage(ProxyContext ctx, ReceiveMessageRequest request,
StreamObserver<ReceiveMessageResponse> responseObserver) {
ReceiveMessageResponseStreamWriter writer = createWriter(ctx, responseObserver);
try {
Settings settings = this.grpcClientSettingsManager.getClientSettings(ctx);
Subscription subscription = settings.getSubscription();
boolean fifo = subscription.getFifo();
int maxAttempts = settings.getBackoffPolicy().getMaxAttempts();
ProxyConfig config = ConfigurationManager.getProxyConfig();
Long timeRemaining = ctx.getRemainingMs();
long pollingTime;
if (request.hasLongPollingTimeout()) {
pollingTime = Durations.toMillis(request.getLongPollingTimeout());
} else {
pollingTime = timeRemaining - Durations.toMillis(settings.getRequestTimeout()) / 2;
}
if (pollingTime < config.getGrpcClientConsumerMinLongPollingTimeoutMillis()) {
pollingTime = config.getGrpcClientConsumerMinLongPollingTimeoutMillis();
}
if (pollingTime > config.getGrpcClientConsumerMaxLongPollingTimeoutMillis()) {
pollingTime = config.getGrpcClientConsumerMaxLongPollingTimeoutMillis();
}
if (pollingTime > timeRemaining) {
if (timeRemaining >= config.getGrpcClientConsumerMinLongPollingTimeoutMillis()) {
pollingTime = timeRemaining;
} else {
final String clientVersion = ctx.getClientVersion();
Code code =
null == clientVersion || ILLEGAL_POLLING_TIME_INTRODUCED_CLIENT_VERSION.compareTo(clientVersion) > 0 ?
Code.BAD_REQUEST : Code.ILLEGAL_POLLING_TIME;
writer.writeAndComplete(ctx, code, "The deadline time remaining is not enough" +
" for polling, please check network condition");
return;
}
}
validateTopicAndConsumerGroup(request.getMessageQueue().getTopic(), request.getGroup());
String topic = request.getMessageQueue().getTopic().getName();
String group = request.getGroup().getName();
long actualInvisibleTime = Durations.toMillis(request.getInvisibleDuration());
ProxyConfig proxyConfig = ConfigurationManager.getProxyConfig();
if (proxyConfig.isEnableProxyAutoRenew() && request.getAutoRenew()) {
actualInvisibleTime = proxyConfig.getDefaultInvisibleTimeMills();
} else {
validateInvisibleTime(actualInvisibleTime,
ConfigurationManager.getProxyConfig().getMinInvisibleTimeMillsForRecv());
}
FilterExpression filterExpression = request.getFilterExpression();
SubscriptionData subscriptionData;
try {
subscriptionData = FilterAPI.build(topic, filterExpression.getExpression(),
GrpcConverter.getInstance().buildExpressionType(filterExpression.getType()));
} catch (Exception e) {
writer.writeAndComplete(ctx, Code.ILLEGAL_FILTER_EXPRESSION, e.getMessage());
return;
}
this.messagingProcessor.popMessage(
ctx,
new ReceiveMessageQueueSelector(
request.getMessageQueue().getBroker().getName()
),
group,
topic,
request.getBatchSize(),
actualInvisibleTime,
pollingTime,
ConsumeInitMode.MAX,
subscriptionData,
fifo,
new PopMessageResultFilterImpl(maxAttempts),
request.hasAttemptId() ? request.getAttemptId() : null,
timeRemaining
).thenAccept(popResult -> {
if (proxyConfig.isEnableProxyAutoRenew() && request.getAutoRenew()) {
if (PopStatus.FOUND.equals(popResult.getPopStatus())) {
List<MessageExt> messageExtList = popResult.getMsgFoundList();
for (MessageExt messageExt : messageExtList) {
String receiptHandle = messageExt.getProperty(MessageConst.PROPERTY_POP_CK);
if (receiptHandle != null) {
MessageReceiptHandle messageReceiptHandle =
new MessageReceiptHandle(group, topic, messageExt.getQueueId(), receiptHandle, messageExt.getMsgId(),
messageExt.getQueueOffset(), messageExt.getReconsumeTimes());
messagingProcessor.addReceiptHandle(ctx, grpcChannelManager.getChannel(ctx.getClientID()), group, messageExt.getMsgId(), messageReceiptHandle);
}
}
}
}
writer.writeAndComplete(ctx, request, popResult);
})
.exceptionally(t -> {
writer.writeAndComplete(ctx, request, t);
return null;
});
} catch (Throwable t) {
writer.writeAndComplete(ctx, request, t);
}
} | @Test
public void testReceiveMessagePollingTime() {
StreamObserver<ReceiveMessageResponse> receiveStreamObserver = mock(ServerCallStreamObserver.class);
ArgumentCaptor<ReceiveMessageResponse> responseArgumentCaptor = ArgumentCaptor.forClass(ReceiveMessageResponse.class);
doNothing().when(receiveStreamObserver).onNext(responseArgumentCaptor.capture());
ArgumentCaptor<Long> pollTimeCaptor = ArgumentCaptor.forClass(Long.class);
when(this.grpcClientSettingsManager.getClientSettings(any())).thenReturn(Settings.newBuilder()
.setRequestTimeout(Durations.fromSeconds(3))
.build());
when(this.messagingProcessor.popMessage(any(), any(), anyString(), anyString(), anyInt(), anyLong(),
pollTimeCaptor.capture(), anyInt(), any(), anyBoolean(), any(), isNull(), anyLong()))
.thenReturn(CompletableFuture.completedFuture(new PopResult(PopStatus.NO_NEW_MSG, Collections.emptyList())));
ProxyContext context = createContext();
context.setRemainingMs(1L);
this.receiveMessageActivity.receiveMessage(
context,
ReceiveMessageRequest.newBuilder()
.setGroup(Resource.newBuilder().setName(CONSUMER_GROUP).build())
.setMessageQueue(MessageQueue.newBuilder().setTopic(Resource.newBuilder().setName(TOPIC).build()).build())
.setAutoRenew(true)
.setFilterExpression(FilterExpression.newBuilder()
.setType(FilterType.TAG)
.setExpression("*")
.build())
.build(),
receiveStreamObserver
);
assertEquals(Code.MESSAGE_NOT_FOUND, getResponseCodeFromReceiveMessageResponseList(responseArgumentCaptor.getAllValues()));
assertEquals(0L, pollTimeCaptor.getValue().longValue());
} |
@Override
protected QueryScopeInfo.TaskQueryScopeInfo createQueryServiceMetricInfo(
CharacterFilter filter) {
return new QueryScopeInfo.TaskQueryScopeInfo(
this.parent.jobId.toString(),
String.valueOf(this.vertexId),
this.subtaskIndex,
this.attemptNumber);
} | @Test
void testCreateQueryServiceMetricInfo() {
JobID jid = new JobID();
JobVertexID vid = new JobVertexID();
ExecutionAttemptID eid = createExecutionAttemptId(vid, 4, 5);
TaskManagerMetricGroup tm =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
registry, "host", new ResourceID("id"));
TaskMetricGroup task = tm.addJob(jid, "jobname").addTask(eid, "taskName");
QueryScopeInfo.TaskQueryScopeInfo info =
task.createQueryServiceMetricInfo(new DummyCharacterFilter());
assertThat(info.scope).isEmpty();
assertThat(info.jobID).isEqualTo(jid.toString());
assertThat(info.vertexID).isEqualTo(vid.toString());
assertThat(info.subtaskIndex).isEqualTo(4);
} |
@Udf
public <T> String join(
@UdfParameter(description = "the array to join using the default delimiter '"
+ DEFAULT_DELIMITER + "'") final List<T> array
) {
return join(array, DEFAULT_DELIMITER);
} | @Test
public void shouldReturnNullForNullInput() {
assertThat(arrayJoinUDF.join(null), nullValue());
assertThat(arrayJoinUDF.join(null,CUSTOM_DELIMITER), nullValue());
} |
public static DeleteAclsRequest parse(ByteBuffer buffer, short version) {
return new DeleteAclsRequest(new DeleteAclsRequestData(new ByteBufferAccessor(buffer), version), version);
} | @Test
public void shouldRoundTripAnyV0AsLiteral() {
final DeleteAclsRequest original = new DeleteAclsRequest.Builder(requestData(ANY_FILTER)).build(V0);
final DeleteAclsRequest expected = new DeleteAclsRequest.Builder(requestData(
new AclBindingFilter(new ResourcePatternFilter(
ANY_FILTER.patternFilter().resourceType(),
ANY_FILTER.patternFilter().name(),
PatternType.LITERAL),
ANY_FILTER.entryFilter()))
).build(V0);
final DeleteAclsRequest result = DeleteAclsRequest.parse(original.serialize(), V0);
assertRequestEquals(expected, result);
} |
@Override
public PostgreSQLIdentifierTag getIdentifier() {
return PostgreSQLMessagePacketType.EMPTY_QUERY_RESPONSE;
} | @Test
void assertIdentifier() {
PostgreSQLIdentifierTag actual = new PostgreSQLEmptyQueryResponsePacket().getIdentifier();
assertThat(actual, is(PostgreSQLMessagePacketType.EMPTY_QUERY_RESPONSE));
} |
public MirrorClientConfig clientConfig(String cluster) {
Map<String, String> props = new HashMap<>();
props.putAll(originalsStrings());
props.putAll(clusterProps(cluster));
return new MirrorClientConfig(transform(props));
} | @Test
public void testClientConfigProperties() {
String clusterABootstrap = "127.0.0.1:9092, 127.0.0.2:9092";
String clusterBBootstrap = "127.0.0.3:9092, 127.0.0.4:9092";
MirrorMakerConfig mirrorConfig = new MirrorMakerConfig(makeProps(
"clusters", "a, b",
"config.providers", "fake",
"config.providers.fake.class", FakeConfigProvider.class.getName(),
"replication.policy.separator", "__",
"ssl.key.password", "${fake:secret:password}", // resolves to "secret2"
"security.protocol", "SSL",
"a.security.protocol", "PLAINTEXT",
"a.producer.security.protocol", "SSL",
"a.bootstrap.servers", clusterABootstrap,
"b.bootstrap.servers", clusterBBootstrap,
"metrics.reporter", FakeMetricsReporter.class.getName(),
"a.metrics.reporter", FakeMetricsReporter.class.getName(),
"b->a.metrics.reporter", FakeMetricsReporter.class.getName(),
"b.forwarding.admin.class", FakeForwardingAdmin.class.getName(),
"a.xxx", "yyy",
"xxx", "zzz"));
MirrorClientConfig aClientConfig = mirrorConfig.clientConfig("a");
MirrorClientConfig bClientConfig = mirrorConfig.clientConfig("b");
assertEquals("__", aClientConfig.getString("replication.policy.separator"),
"replication.policy.separator is picked up in MirrorClientConfig");
assertEquals("b__topic1", aClientConfig.replicationPolicy().formatRemoteTopic("b", "topic1"),
"replication.policy.separator is honored");
assertEquals(clusterABootstrap, aClientConfig.adminConfig().get("bootstrap.servers"),
"client configs include bootstrap.servers");
try (ForwardingAdmin forwardingAdmin = aClientConfig.forwardingAdmin(aClientConfig.adminConfig())) {
assertEquals(ForwardingAdmin.class.getName(), forwardingAdmin.getClass().getName(),
"Cluster a uses the default ForwardingAdmin");
}
assertEquals("PLAINTEXT", aClientConfig.adminConfig().get("security.protocol"),
"client configs include security.protocol");
assertEquals("SSL", aClientConfig.producerConfig().get("security.protocol"),
"producer configs include security.protocol");
assertFalse(aClientConfig.adminConfig().containsKey("xxx"),
"unknown properties aren't included in client configs");
assertFalse(aClientConfig.adminConfig().containsKey("metric.reporters"),
"top-level metrics reporters aren't included in client configs");
assertEquals("secret2", aClientConfig.getPassword("ssl.key.password").value(),
"security properties are translated from external sources");
assertEquals("secret2", ((Password) aClientConfig.adminConfig().get("ssl.key.password")).value(),
"client configs are translated from external sources");
assertFalse(aClientConfig.producerConfig().containsKey("metrics.reporter"),
"client configs should not include metrics reporter");
assertFalse(bClientConfig.adminConfig().containsKey("metrics.reporter"),
"client configs should not include metrics reporter");
try (ForwardingAdmin forwardingAdmin = bClientConfig.forwardingAdmin(bClientConfig.adminConfig())) {
assertEquals(FakeForwardingAdmin.class.getName(), forwardingAdmin.getClass().getName(),
"Cluster b should use the FakeForwardingAdmin");
}
} |
@Override
public void process(Tuple input) {
String key = filterMapper.getKeyFromTuple(input);
boolean found;
JedisCommandsContainer jedisCommand = null;
try {
jedisCommand = getInstance();
switch (dataType) {
case STRING:
found = jedisCommand.exists(key);
break;
case SET:
found = jedisCommand.sismember(additionalKey, key);
break;
case HASH:
found = jedisCommand.hexists(additionalKey, key);
break;
case SORTED_SET:
found = jedisCommand.zrank(additionalKey, key) != null;
break;
case HYPER_LOG_LOG:
found = jedisCommand.pfcount(key) > 0;
break;
case GEO:
List<GeoCoordinate> geopos = jedisCommand.geopos(additionalKey, key);
if (geopos == null || geopos.isEmpty()) {
found = false;
} else {
// If any entry is NOT null, then we have a match.
found = geopos.stream()
.anyMatch(Objects::nonNull);
}
break;
default:
throw new IllegalArgumentException("Cannot process such data type: " + dataType);
}
if (found) {
collector.emit(input, input.getValues());
}
collector.ack(input);
} catch (Exception e) {
this.collector.reportError(e);
this.collector.fail(input);
}
} | @Test
void smokeTest_sismember_isMember() {
// Define input key
final String setKey = "ThisIsMySet";
final String inputKey = "ThisIsMyKey";
// Ensure key does exist in redis
jedisHelper.smember(setKey, inputKey);
assertTrue(jedisHelper.sismember(setKey, inputKey), "Sanity check, should be a member");
// Create an input tuple
final Map<String, Object> values = new HashMap<>();
values.put("key", inputKey);
values.put("value", "ThisIsMyValue");
final Tuple tuple = new StubTuple(values);
final JedisPoolConfig config = configBuilder.build();
final TestMapper mapper = new TestMapper(SET, setKey);
final RedisFilterBolt bolt = new RedisFilterBolt(config, mapper);
bolt.prepare(new HashMap<>(), topologyContext, new OutputCollector(outputCollector));
bolt.process(tuple);
// Verify Tuple passed through the bolt
verifyTuplePassed(tuple);
} |
public static void checkNullOrNonNullNonEmptyEntries(
@Nullable Collection<String> values, String propertyName) {
if (values == null) {
// pass
return;
}
for (String value : values) {
Preconditions.checkNotNull(
value, "Property '" + propertyName + "' cannot contain null entries");
Preconditions.checkArgument(
!value.trim().isEmpty(), "Property '" + propertyName + "' cannot contain empty strings");
}
} | @Test
public void testCheckNullOrNonNullNonEmptyEntries_emptyPass() {
Validator.checkNullOrNonNullNonEmptyEntries(ImmutableList.of(), "test");
// pass
} |
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Result other = (Result) obj;
if (descend != other.descend)
return false;
if (success != other.success)
return false;
return true;
} | @Test
public void notEquals() {
assertFalse(Result.PASS.equals(Result.FAIL));
assertFalse(Result.PASS.equals(Result.STOP));
assertFalse(Result.FAIL.equals(Result.PASS));
assertFalse(Result.FAIL.equals(Result.STOP));
assertFalse(Result.STOP.equals(Result.PASS));
assertFalse(Result.STOP.equals(Result.FAIL));
} |
public RemotingCommand checkTransactionState(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
final CheckTransactionStateRequestHeader requestHeader =
(CheckTransactionStateRequestHeader) request.decodeCommandCustomHeader(CheckTransactionStateRequestHeader.class);
final ByteBuffer byteBuffer = ByteBuffer.wrap(request.getBody());
final MessageExt messageExt = MessageDecoder.decode(byteBuffer);
if (messageExt != null) {
if (StringUtils.isNotEmpty(this.mqClientFactory.getClientConfig().getNamespace())) {
messageExt.setTopic(NamespaceUtil
.withoutNamespace(messageExt.getTopic(), this.mqClientFactory.getClientConfig().getNamespace()));
}
String transactionId = messageExt.getProperty(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX);
if (null != transactionId && !"".equals(transactionId)) {
messageExt.setTransactionId(transactionId);
}
final String group = messageExt.getProperty(MessageConst.PROPERTY_PRODUCER_GROUP);
if (group != null) {
MQProducerInner producer = this.mqClientFactory.selectProducer(group);
if (producer != null) {
final String addr = RemotingHelper.parseChannelRemoteAddr(ctx.channel());
producer.checkTransactionState(addr, messageExt, requestHeader);
} else {
logger.debug("checkTransactionState, pick producer by group[{}] failed", group);
}
} else {
logger.warn("checkTransactionState, pick producer group failed");
}
} else {
logger.warn("checkTransactionState, decode message failed");
}
return null;
} | @Test
public void testCheckTransactionState() throws Exception {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
RemotingCommand request = mock(RemotingCommand.class);
when(request.getCode()).thenReturn(RequestCode.CHECK_TRANSACTION_STATE);
when(request.getBody()).thenReturn(getMessageResult());
CheckTransactionStateRequestHeader requestHeader = new CheckTransactionStateRequestHeader();
when(request.decodeCommandCustomHeader(CheckTransactionStateRequestHeader.class)).thenReturn(requestHeader);
assertNull(processor.processRequest(ctx, request));
} |
ArrayList<String> processLines(final String filename, ArrayList<String> lines) {
Collections.sort(lines, KEY_COMPARATOR);
ArrayList<String> result = new ArrayList<String>(lines.size());
String lastKey = null;
String lastValue = null;
for (final String line : lines) {
if (line.indexOf('#') == 0 || line.matches("\\s*"))
continue;
final String standardUnicodeLine = convertUnicodeCharacterRepresentation(line);
final String[] keyValue = standardUnicodeLine.split("\\s*=\\s*", 2);
if (keyValue.length != 2 || keyValue[0].length() == 0) {
// broken line: no '=' sign or empty key (we had " = ======")
warn(filename + ": no key/val: " + line);
continue;
}
final String thisKey = keyValue[0];
String thisValue = keyValue[1].trim();
if (thisValue.matches("(\\[auto\\]|\\[translate me\\])?")) {
warn(filename + ": drop empty translation: " + line);
continue;
}
if (thisValue.indexOf("{1}") != -1 && keyValue[1].indexOf("{0}") == -1) {
warn(filename + ": errorneous placeholders usage: {1} used without {0}: " + line);
}
if (thisValue.matches(".*\\$\\d.*")) {
warn(filename + ": use '{0}' instead of '$1' as placeholder! (likewise for $2...): " + line);
thisValue = thisValue.replaceAll("\\$1", "{0}").replaceAll("\\$2", "{1}");
}
if (thisValue.matches(".*\\{\\d[^},]*")) {
warn(filename + ": mismatched braces in placeholder: '{' not closed by '}': " + line);
}
if (lastKey != null && thisKey.equals(lastKey)) {
if (quality(thisValue) < quality(lastValue)) {
log(filename + ": drop " + TaskUtils.toLine(lastKey, thisValue));
continue;
}
else if (quality(thisValue) == quality(lastValue)) {
if (thisValue.equals(lastValue)) {
log(filename + ": drop duplicate " + TaskUtils.toLine(lastKey, thisValue));
}
else if (quality(thisValue) == QUALITY_MANUALLY_TRANSLATED) {
warn(filename //
+ ": drop one of two of equal quality (revisit!):keep: "
+ TaskUtils.toLine(lastKey, lastValue));
warn(filename //
+ ": drop one of two of equal quality (revisit!):drop: "
+ TaskUtils.toLine(thisKey, thisValue));
}
else {
log(filename + ": drop " + TaskUtils.toLine(lastKey, thisValue));
}
continue;
}
else {
log(filename + ": drop " + TaskUtils.toLine(lastKey, lastValue));
}
lastValue = thisValue;
}
else {
if (lastKey != null)
result.add(TaskUtils.toLine(lastKey, lastValue));
lastKey = thisKey;
lastValue = thisValue;
}
}
if (lastKey != null)
result.add(TaskUtils.toLine(lastKey, lastValue));
return result;
} | @Test
public void testRemoveEmptyLines() throws Exception {
final String msgConserved = "empty lines should be conserved";
final String msgRemoved = "empty lines should be removed";
final FormatTranslation formatTranslation = new FormatTranslation();
String input;
ArrayList<String> lines = new ArrayList<String>();
//
input = "\n \nx=y\n\n";
assertTrue("unique unix", TaskUtils.checkEolStyleAndReadLines(input, lines, unix));
assertEquals(msgConserved, 4, lines.size());
assertEquals(msgRemoved, 1, formatTranslation.processLines("a_file", new ArrayList<String>(lines)).size());
//
input = "\n";
assertTrue("unique unix", TaskUtils.checkEolStyleAndReadLines(input, lines, unix));
assertEquals(msgConserved, 1, lines.size());
assertEquals(msgRemoved, 0, formatTranslation.processLines("a_file", new ArrayList<String>(lines)).size());
//
input = " \n";
assertTrue("unique unix", TaskUtils.checkEolStyleAndReadLines(input, lines, unix));
assertEquals(msgConserved, 1, lines.size());
assertEquals(msgRemoved, 0, formatTranslation.processLines("a_file", new ArrayList<String>(lines)).size());
//
input = "x=y";
assertTrue("unique unix", TaskUtils.checkEolStyleAndReadLines(input, lines, unix));
assertEquals(msgConserved, 1, lines.size());
assertEquals(msgRemoved, 1, formatTranslation.processLines("a_file", new ArrayList<String>(lines)).size());
//
} |
public Object eval(String expr, Map<String, Object> params) {
try {
Extension ext = extensionRepo == null ? null : extensionRepo.get();
SelType result = evaluator.evaluate(sanitize(expr), params, ext);
switch (result.type()) {
case STRING:
case LONG:
case DOUBLE:
case BOOLEAN:
return result.getInternalVal();
case STRING_ARRAY:
case LONG_ARRAY:
case DOUBLE_ARRAY:
case BOOLEAN_ARRAY:
case MAP:
return result.unbox();
case ERROR:
throw new MaestroInvalidExpressionException(
"Expression throws an error [%s] for expr=[%s]", result, expr);
default:
throw new MaestroInvalidExpressionException(
"Invalid return type [%s] for expr=[%s]", result.type(), expr);
}
} catch (MaestroRuntimeException me) {
throw me;
} catch (ExecutionException ee) {
throw new MaestroInvalidExpressionException(
ee, "Expression evaluation throws an exception for expr=[%s]", expr);
} catch (Exception e) {
throw new MaestroInternalError(
e, "Expression evaluation is failed with an exception for expr=[%s]", expr);
}
} | @Test
public void testEvalLiterals() {
assertEquals(Boolean.TRUE, evaluator.eval("1 + 1 == 2", Collections.emptyMap()));
assertEquals(Boolean.TRUE, evaluator.eval("1 + 1 > 1;", Collections.emptyMap()));
assertEquals(Boolean.FALSE, evaluator.eval("1 + 1 < 1", Collections.emptyMap()));
} |
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
if (!(statement.getStatement() instanceof CreateSource)
&& !(statement.getStatement() instanceof CreateAsSelect)) {
return statement;
}
try {
if (statement.getStatement() instanceof CreateSource) {
final ConfiguredStatement<CreateSource> createStatement =
(ConfiguredStatement<CreateSource>) statement;
return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement);
} else {
final ConfiguredStatement<CreateAsSelect> createStatement =
(ConfiguredStatement<CreateAsSelect>) statement;
return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse(
createStatement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
} | @Test
public void shouldReturnStatementUnchangedIfHasValueSchemaAndKeyFormatNotSupported() {
// Given:
givenValueButNotKeyInferenceSupported();
when(cs.getElements()).thenReturn(SOME_VALUE_ELEMENTS);
// When:
final ConfiguredStatement<?> result = injector.inject(csStatement);
// Then:
assertThat(result, is(sameInstance(csStatement)));
} |
@Override
public void recordLoadSuccess(long loadTime) {
loadSuccess.update(loadTime, TimeUnit.NANOSECONDS);
totalLoadTime.add(loadTime);
} | @Test
public void loadSuccess() {
stats.recordLoadSuccess(256);
assertThat(registry.timer(PREFIX + ".loads-success").getCount()).isEqualTo(1);
} |
@Override
public void updateHost(K8sHost host) {
checkNotNull(host, ERR_NULL_HOST);
hostStore.updateHost(host);
log.info(String.format(MSG_HOST, host.hostIp().toString(), MSG_UPDATED));
} | @Test
public void testRemoveNodesFromHost() {
K8sHost updated = HOST_2.updateNodeNames(ImmutableSet.of("3"));
target.updateHost(updated);
validateEvents(K8S_HOST_UPDATED, K8S_NODES_REMOVED);
} |
public void safeStop() {
if ( steps == null ) {
return;
}
steps.stream()
.filter( this::isInputStep )
.forEach( combi -> stopStep( combi, true ) );
notifyStoppedListeners();
} | @Test
public void testSafeStop() {
StepInterface stepMock1 = mock( StepInterface.class );
StepDataInterface stepDataMock1 = mock( StepDataInterface.class );
StepMeta stepMetaMock1 = mock( StepMeta.class );
when( stepMock1.getStepname() ).thenReturn( "stepName" );
trans.setSteps( of( combi( stepMock1, stepDataMock1, stepMetaMock1 ) ) );
// Scenario: step not stopped
when( stepMock1.isSafeStopped() ).thenReturn( false );
Result result = trans.getResult();
assertFalse( result.isSafeStop() );
// Scenario: step stopped
when( stepMock1.isSafeStopped() ).thenReturn( true );
result = trans.getResult();
assertTrue( result.isSafeStop() );
} |
@Override
public boolean isReadOnly() {
return false;
} | @Test
void assertNullsAreSortedHigh() {
assertFalse(metaData.isReadOnly());
} |
@Override
public ByteBuf setDouble(int index, double value) {
setLong(index, Double.doubleToRawLongBits(value));
return this;
} | @Test
public void testSetDoubleAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().setDouble(0, 1);
}
});
} |
public static Invoker getInvoker(Object proxyObject, String proxyType) {
try {
ExtensionClass<Proxy> ext = ExtensionLoaderFactory.getExtensionLoader(Proxy.class)
.getExtensionClass(proxyType);
if (ext == null) {
throw new SofaRpcRuntimeException(LogCodes.getLog(LogCodes.ERROR_LOAD_EXT, "Registry", proxyType));
}
Proxy proxy = ext.getExtInstance();
return proxy.getInvoker(proxyObject);
} catch (SofaRpcRuntimeException e) {
throw e;
} catch (Throwable e) {
throw new SofaRpcRuntimeException(LogCodes.getLog(LogCodes.ERROR_LOAD_EXT, "Registry", proxyType));
}
} | @Test
public void getInvoker() {
Invoker invoke = ProxyFactory.getInvoker(null, "test");
Assert.assertEquals(invoke, null);
} |
@Override
public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) {
AbstractConfig config = new AbstractConfig(CONFIG_DEF, connectorConfig);
String filename = config.getString(FILE_CONFIG);
if (filename == null || filename.isEmpty()) {
throw new ConnectException("Offsets cannot be modified if the '" + FILE_CONFIG + "' configuration is unspecified. " +
"This is because stdin is used for input and offsets are not tracked.");
}
// This connector makes use of a single source partition at a time which represents the file that it is configured to read from.
// However, there could also be source partitions from previous configurations of the connector.
for (Map.Entry<Map<String, ?>, Map<String, ?>> partitionOffset : offsets.entrySet()) {
Map<String, ?> offset = partitionOffset.getValue();
if (offset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
if (!offset.containsKey(POSITION_FIELD)) {
throw new ConnectException("Offset objects should either be null or contain the key '" + POSITION_FIELD + "'");
}
// The 'position' in the offset represents the position in the file's byte stream and should be a non-negative long value
if (!(offset.get(POSITION_FIELD) instanceof Long)) {
throw new ConnectException("The value for the '" + POSITION_FIELD + "' key in the offset is expected to be a Long value");
}
long offsetPosition = (Long) offset.get(POSITION_FIELD);
if (offsetPosition < 0) {
throw new ConnectException("The value for the '" + POSITION_FIELD + "' key in the offset should be a non-negative value");
}
Map<String, ?> partition = partitionOffset.getKey();
if (partition == null) {
throw new ConnectException("Partition objects cannot be null");
}
if (!partition.containsKey(FILENAME_FIELD)) {
throw new ConnectException("Partition objects should contain the key '" + FILENAME_FIELD + "'");
}
}
// Let the task check whether the actual value for the offset position is valid for the configured file on startup
return true;
} | @Test
public void testAlterOffsetsIncorrectOffsetKey() {
Map<Map<String, ?>, Map<String, ?>> offsets = Collections.singletonMap(
Collections.singletonMap(FILENAME_FIELD, FILENAME),
Collections.singletonMap("other_offset_key", 0L)
);
assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, offsets));
} |
@UdafFactory(description = "collect values of a field into a single Array")
public static <T> TableUdaf<T, List<T>, List<T>> createCollectListT() {
return new Collect<>();
} | @Test
public void shouldRespectSizeLimit() {
final TableUdaf<Integer, List<Integer>, List<Integer>> udaf = CollectListUdaf.createCollectListT();
((Configurable) udaf).configure(ImmutableMap.of(CollectListUdaf.LIMIT_CONFIG, 10));
List<Integer> runningList = udaf.initialize();
for (int i = 1; i < 25; i++) {
runningList = udaf.aggregate(i, runningList);
}
assertThat(runningList, hasSize(10));
assertThat(runningList, hasItem(1));
assertThat(runningList, hasItem(10));
assertThat(runningList, not(hasItem(11)));
} |
@VisibleForTesting
AuthRequest buildAuthRequest(Integer socialType, Integer userType) {
// 1. 先查找默认的配置项,从 application-*.yaml 中读取
AuthRequest request = authRequestFactory.get(SocialTypeEnum.valueOfType(socialType).getSource());
Assert.notNull(request, String.format("社交平台(%d) 不存在", socialType));
// 2. 查询 DB 的配置项,如果存在则进行覆盖
SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType(socialType, userType);
if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) {
// 2.1 构造新的 AuthConfig 对象
AuthConfig authConfig = (AuthConfig) ReflectUtil.getFieldValue(request, "config");
AuthConfig newAuthConfig = ReflectUtil.newInstance(authConfig.getClass());
BeanUtil.copyProperties(authConfig, newAuthConfig);
// 2.2 修改对应的 clientId + clientSecret 密钥
newAuthConfig.setClientId(client.getClientId());
newAuthConfig.setClientSecret(client.getClientSecret());
if (client.getAgentId() != null) { // 如果有 agentId 则修改 agentId
newAuthConfig.setAgentId(client.getAgentId());
}
// 2.3 设置会 request 里,进行后续使用
ReflectUtil.setFieldValue(request, "config", newAuthConfig);
}
return request;
} | @Test
public void testBuildAuthRequest_clientNull() {
// 准备参数
Integer socialType = SocialTypeEnum.WECHAT_MP.getType();
Integer userType = randomPojo(SocialTypeEnum.class).getType();
// mock 获得对应的 AuthRequest 实现
AuthRequest authRequest = mock(AuthDefaultRequest.class);
AuthConfig authConfig = (AuthConfig) ReflectUtil.getFieldValue(authRequest, "config");
when(authRequestFactory.get(eq("WECHAT_MP"))).thenReturn(authRequest);
// 调用
AuthRequest result = socialClientService.buildAuthRequest(socialType, userType);
// 断言
assertSame(authRequest, result);
assertSame(authConfig, ReflectUtil.getFieldValue(authConfig, "config"));
} |
@Override
public int lastIndexOf(Object o) {
if (o == null) {
for (int i = mElements.length - 1; i >= 0; i--) {
if (mElements[i] == null) {
return i;
}
}
} else {
for (int i = mElements.length - 1; i >= 0; i--) {
if (o.equals(mElements[i])) {
return i;
}
}
}
return -1;
} | @Test
public void lookupWithDuplicationAndNull() {
String[] array = new String[]{"a", "b", "a", null, "b", null};
UnmodifiableArrayList<String> list = new UnmodifiableArrayList<>(array);
List<String> arrAsList = Arrays.asList(array);
for (int i = 0; i < array.length; i++) {
String s = array[i];
assertEquals(arrAsList.lastIndexOf(s), list.lastIndexOf(s));
}
} |
public ArgumentListBuilder toWindowsCommand(boolean escapeVars) {
ArgumentListBuilder windowsCommand = new ArgumentListBuilder().add("cmd.exe", "/C");
boolean quoted, percent;
for (int i = 0; i < args.size(); i++) {
StringBuilder quotedArgs = new StringBuilder();
String arg = args.get(i);
quoted = percent = false;
for (int j = 0; j < arg.length(); j++) {
char c = arg.charAt(j);
if (!quoted && (c == ' ' || c == '*' || c == '?' || c == ',' || c == ';')) {
quoted = startQuoting(quotedArgs, arg, j);
}
else if (c == '^' || c == '&' || c == '<' || c == '>' || c == '|') {
if (!quoted) quoted = startQuoting(quotedArgs, arg, j);
// quotedArgs.append('^'); See note in javadoc above
}
else if (c == '"') {
if (!quoted) quoted = startQuoting(quotedArgs, arg, j);
quotedArgs.append('"');
}
else if (percent && escapeVars
&& ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'))) {
if (!quoted) quoted = startQuoting(quotedArgs, arg, j);
quotedArgs.append('"').append(c);
c = '"';
}
percent = c == '%';
if (quoted) quotedArgs.append(c);
}
if (i == 0) {
if (quoted) {
quotedArgs.insert(0, '"');
} else {
quotedArgs.append('"');
}
}
if (quoted) {
quotedArgs.append('"');
} else {
quotedArgs.append(arg);
}
windowsCommand.add(quotedArgs, mask.get(i));
}
// (comment copied from old code in hudson.tasks.Ant)
// on Windows, executing batch file can't return the correct error code,
// so we need to wrap it into cmd.exe.
// double %% is needed because we want ERRORLEVEL to be expanded after
// batch file executed, not before. This alone shows how broken Windows is...
windowsCommand.add("&&").add("exit").add("%%ERRORLEVEL%%\"");
return windowsCommand;
} | @Test
public void testToWindowsCommand() {
ArgumentListBuilder builder = new ArgumentListBuilder().
add("ant.bat").add("-Dfoo1=abc"). // nothing special, no quotes
add("-Dfoo2=foo bar").add("-Dfoo3=/u*r").add("-Dfoo4=/us?"). // add quotes
add("-Dfoo10=bar,baz").
add("-Dfoo5=foo;bar^baz").add("-Dfoo6=<xml>&here;</xml>"). // add quotes
add("-Dfoo7=foo|bar\"baz"). // add quotes and "" for "
add("-Dfoo8=% %QED% %comspec% %-%(%.%"). // add quotes, and extra quotes for %Q and %c
add("-Dfoo9=%'''%%@%"); // no quotes as none of the % are followed by a letter
// By default, does not escape %VAR%
assertThat(builder.toWindowsCommand().toCommandArray(), is(new String[] { "cmd.exe", "/C",
"\"ant.bat", "-Dfoo1=abc", "\"-Dfoo2=foo bar\"", "\"-Dfoo3=/u*r\"", "\"-Dfoo4=/us?\"",
"\"-Dfoo10=bar,baz\"", "\"-Dfoo5=foo;bar^baz\"", "\"-Dfoo6=<xml>&here;</xml>\"",
"\"-Dfoo7=foo|bar\"\"baz\"", "\"-Dfoo8=% %QED% %comspec% %-%(%.%\"",
"-Dfoo9=%'''%%@%", "&&", "exit", "%%ERRORLEVEL%%\"" }));
// Pass flag to escape %VAR%
assertThat(builder.toWindowsCommand(true).toCommandArray(), is(new String[] { "cmd.exe", "/C",
"\"ant.bat", "-Dfoo1=abc", "\"-Dfoo2=foo bar\"", "\"-Dfoo3=/u*r\"", "\"-Dfoo4=/us?\"",
"\"-Dfoo10=bar,baz\"", "\"-Dfoo5=foo;bar^baz\"", "\"-Dfoo6=<xml>&here;</xml>\"",
"\"-Dfoo7=foo|bar\"\"baz\"", "\"-Dfoo8=% %\"Q\"ED% %\"c\"omspec% %-%(%.%\"",
"-Dfoo9=%'''%%@%", "&&", "exit", "%%ERRORLEVEL%%\"" }));
// Try to hide password
builder.add("-Dpassword=hidden", true);
// By default, does not escape %VAR%
assertThat(builder.toWindowsCommand().toString(), is(
"cmd.exe /C \"ant.bat -Dfoo1=abc \"\"-Dfoo2=foo bar\"\" \"-Dfoo3=/u*r\" "
+ "\"-Dfoo4=/us?\" \"-Dfoo10=bar,baz\" \"-Dfoo5=foo;bar^baz\" "
+ "\"-Dfoo6=<xml>&here;</xml>\" \"-Dfoo7=foo|bar\"\"baz\" "
+ "\"\"-Dfoo8=% %QED% %comspec% %-%(%.%\"\" -Dfoo9=%'''%%@% ****** "
+ "&& exit %%ERRORLEVEL%%\""));
// Pass flag to escape %VAR%
assertThat(builder.toWindowsCommand(true).toString(), is(
"cmd.exe /C \"ant.bat -Dfoo1=abc \"\"-Dfoo2=foo bar\"\" \"-Dfoo3=/u*r\" "
+ "\"-Dfoo4=/us?\" \"-Dfoo10=bar,baz\" \"-Dfoo5=foo;bar^baz\" "
+ "\"-Dfoo6=<xml>&here;</xml>\" \"-Dfoo7=foo|bar\"\"baz\" "
+ "\"\"-Dfoo8=% %\"Q\"ED% %\"c\"omspec% %-%(%.%\"\" -Dfoo9=%'''%%@% ****** "
+ "&& exit %%ERRORLEVEL%%\""));
} |
public boolean isCompatible(TriggerStateMachine other) {
if (!getClass().equals(other.getClass())) {
return false;
}
if (subTriggers == null) {
return other.subTriggers == null;
} else if (other.subTriggers == null) {
return false;
} else if (subTriggers.size() != other.subTriggers.size()) {
return false;
}
for (int i = 0; i < subTriggers.size(); i++) {
if (!subTriggers.get(i).isCompatible(other.subTriggers.get(i))) {
return false;
}
}
return true;
} | @Test
public void testIsCompatible() throws Exception {
assertTrue(new Trigger1(null).isCompatible(new Trigger1(null)));
assertTrue(
new Trigger1(Arrays.asList(new Trigger2(null)))
.isCompatible(new Trigger1(Arrays.asList(new Trigger2(null)))));
assertFalse(new Trigger1(null).isCompatible(new Trigger2(null)));
assertFalse(
new Trigger1(Arrays.asList(new Trigger1(null)))
.isCompatible(new Trigger1(Arrays.asList(new Trigger2(null)))));
} |
public static TaskAndAction createAddTask(final Task task) {
Objects.requireNonNull(task, "Task to add is null!");
return new TaskAndAction(task, null, Action.ADD, null);
} | @Test
public void shouldThrowIfAddTaskActionIsCreatedWithNullTask() {
final Exception exception = assertThrows(NullPointerException.class, () -> createAddTask(null));
assertTrue(exception.getMessage().contains("Task to add is null!"));
} |
public void delete(DeletionTask deletionTask) {
if (debugDelay != -1) {
LOG.debug("Scheduling DeletionTask (delay {}) : {}", debugDelay,
deletionTask);
recordDeletionTaskInStateStore(deletionTask);
sched.schedule(deletionTask, debugDelay, TimeUnit.SECONDS);
}
} | @Test
public void testNoDelete() throws Exception {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List<Path> dirs = buildDirs(r, base, 20);
createDirs(new Path("."), dirs);
FakeDefaultContainerExecutor exec = new FakeDefaultContainerExecutor();
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, -1);
exec.setConf(conf);
DeletionService del = new DeletionService(exec);
try {
del.init(conf);
del.start();
for (Path p : dirs) {
FileDeletionTask deletionTask = new FileDeletionTask(del,
(Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p, null);
del.delete(deletionTask);
}
int msecToWait = 20 * 1000;
for (Path p : dirs) {
while (msecToWait > 0 && lfs.util().exists(p)) {
Thread.sleep(100);
msecToWait -= 100;
}
assertTrue(lfs.util().exists(p));
}
} finally {
del.stop();
}
} |
@Override
public int ncol() {
return n;
} | @Test
public void testNcols() {
System.out.println("ncol");
assertEquals(3, matrix.ncol());
} |
@Override
public boolean initMethod(PluginAnnotation pluginAnnotation) {
Object plugin = pluginAnnotation.getPlugin();
if (plugin == null) {
// special case - static method - register callback
if (Modifier.isStatic(pluginAnnotation.getMethod().getModifiers()))
return registerClassLoaderInit(pluginAnnotation);
else
return true;
} else {
if (!Modifier.isStatic(pluginAnnotation.getMethod().getModifiers())) {
ClassLoader appClassLoader = pluginManager.getPluginRegistry().getAppClassLoader(plugin);
return invokeInitMethod(pluginAnnotation, plugin, appClassLoader);
} else
return true;
}
} | @Test
public void testInitMethod() throws Exception {
PluginManager pluginManager = PluginManager.getInstance();
SimplePlugin simplePlugin = new SimplePlugin();
// register the plugin
pluginManager.getPluginRegistry().getRegisteredPlugins().put(SimplePlugin.class,
Collections.<ClassLoader, Object>singletonMap(getClass().getClassLoader(), simplePlugin));
InitHandler initHandler = new InitHandler(pluginManager);
Method method = SimplePlugin.class.getMethod("initPlugin", PluginManager.class);
PluginAnnotation<Init> pluginAnnotation = new PluginAnnotation<Init>(SimplePlugin.class,
simplePlugin, method.getAnnotation(Init.class), method);
assertTrue("Init successful",
initHandler.initMethod(pluginAnnotation));
} |
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
if (!(statement.getStatement() instanceof CreateSource)
&& !(statement.getStatement() instanceof CreateAsSelect)) {
return statement;
}
try {
if (statement.getStatement() instanceof CreateSource) {
final ConfiguredStatement<CreateSource> createStatement =
(ConfiguredStatement<CreateSource>) statement;
return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement);
} else {
final ConfiguredStatement<CreateAsSelect> createStatement =
(ConfiguredStatement<CreateAsSelect>) statement;
return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse(
createStatement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
} | @Test
public void shouldInjectValueAndMaintainKeyColumnsForCs() {
// Given:
givenValueButNotKeyInferenceSupported();
when(cs.getElements()).thenReturn(SOME_KEY_ELEMENTS_STREAM);
// When:
final ConfiguredStatement<CreateStream> result = injector.inject(csStatement);
// Then:
assertThat(result.getStatement().getElements(),
is(combineElements(SOME_KEY_ELEMENTS_STREAM, INFERRED_KSQL_VALUE_SCHEMA)));
assertThat(result.getMaskedStatementText(), is(
"CREATE STREAM `cs` ("
+ "`bob` STRING KEY, "
+ "`intField` INTEGER, "
+ "`bigIntField` BIGINT, "
+ "`doubleField` DOUBLE, "
+ "`stringField` STRING, "
+ "`booleanField` BOOLEAN, "
+ "`arrayField` ARRAY<INTEGER>, "
+ "`mapField` MAP<STRING, BIGINT>, "
+ "`structField` STRUCT<`s0` BIGINT>, "
+ "`decimalField` DECIMAL(4, 2)) "
+ "WITH (KAFKA_TOPIC='some-topic', KEY_FORMAT='kafka', VALUE_FORMAT='json_sr');"
));
} |
public <T> T getBean(@NonNull Class<T> cls) {
return this.ioc.getBean(cls);
} | @Test
public void testGetBean() {
Blade blade = Blade.create();
blade.register("hello world");
String str = blade.getBean(String.class);
Assert.assertNotNull(str);
assertEquals("hello world", str);
} |
@VisibleForTesting
@Nullable
static String getHostAddress(InetSocketAddress socketAddress) {
InetAddress address = socketAddress.getAddress();
if (address instanceof Inet6Address) {
// Strip the scope from the address since some other classes choke on it.
// TODO(carl-mastrangelo): Consider adding this back in once issues like
// https://github.com/google/guava/issues/2587 are fixed.
try {
return InetAddress.getByAddress(address.getAddress()).getHostAddress();
} catch (UnknownHostException e) {
throw new RuntimeException(e);
}
} else if (address instanceof Inet4Address) {
return address.getHostAddress();
} else {
assert address == null;
return null;
}
} | @Test
void ipv6AddressScopeNameRemoved() throws Exception {
List<NetworkInterface> nics = Collections.list(NetworkInterface.getNetworkInterfaces());
Assumptions.assumeTrue(!nics.isEmpty(), "No network interfaces");
List<Throwable> failures = new ArrayList<>();
for (NetworkInterface nic : nics) {
Inet6Address address;
try {
address = Inet6Address.getByAddress(
"localhost", new byte[] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, nic);
} catch (UnknownHostException e) {
// skip, the nic doesn't match
failures.add(e);
continue;
}
assertTrue(address.toString().contains("%"), address.toString());
String addressString = SourceAddressChannelHandler.getHostAddress(new InetSocketAddress(address, 8080));
assertEquals("0:0:0:0:0:0:0:1", addressString);
return;
}
AssumptionViolatedException failure = new AssumptionViolatedException("No Compatible Nics were found");
failures.forEach(failure::addSuppressed);
throw failure;
} |
@Override
public <T> T create(Class<T> extensionClass) {
log.debug("Create instance for extension '{}'", extensionClass.getName());
try {
return extensionClass.newInstance();
} catch (Exception e) {
throw new PluginRuntimeException(e);
}
} | @Test
public void testCreateFailConstructor() {
JavaFileObject object = JavaSources.compile(FailTestExtension);
JavaFileObjectClassLoader classLoader = new JavaFileObjectClassLoader();
Class<?> extensionClass = (Class<?>) classLoader.load(object).values().toArray()[0];
assertThrows(PluginRuntimeException.class, () -> extensionFactory.create(extensionClass));
} |
@Override
public byte[] serialize(Event event) {
if (event instanceof SchemaChangeEvent) {
Schema schema;
SchemaChangeEvent schemaChangeEvent = (SchemaChangeEvent) event;
if (event instanceof CreateTableEvent) {
CreateTableEvent createTableEvent = (CreateTableEvent) event;
schema = createTableEvent.getSchema();
} else {
schema =
SchemaUtils.applySchemaChangeEvent(
jsonSerializers.get(schemaChangeEvent.tableId()).getSchema(),
schemaChangeEvent);
}
LogicalType rowType =
DataTypeUtils.toFlinkDataType(schema.toRowDataType()).getLogicalType();
JsonRowDataSerializationSchema jsonSerializer =
new JsonRowDataSerializationSchema(
createJsonRowType(fromLogicalToDataType(rowType)),
timestampFormat,
mapNullKeyMode,
mapNullKeyLiteral,
encodeDecimalAsPlainNumber);
try {
jsonSerializer.open(context);
} catch (Exception e) {
throw new RuntimeException(e);
}
jsonSerializers.put(
schemaChangeEvent.tableId(),
new TableSchemaInfo(
schemaChangeEvent.tableId(), schema, jsonSerializer, zoneId));
return null;
}
DataChangeEvent dataChangeEvent = (DataChangeEvent) event;
reuseGenericRowData.setField(
3,
GenericRowData.of(
StringData.fromString(dataChangeEvent.tableId().getSchemaName()),
StringData.fromString(dataChangeEvent.tableId().getTableName())));
try {
switch (dataChangeEvent.op()) {
case INSERT:
reuseGenericRowData.setField(0, null);
reuseGenericRowData.setField(
1,
jsonSerializers
.get(dataChangeEvent.tableId())
.getRowDataFromRecordData(dataChangeEvent.after(), false));
reuseGenericRowData.setField(2, OP_INSERT);
return jsonSerializers
.get(dataChangeEvent.tableId())
.getSerializationSchema()
.serialize(reuseGenericRowData);
case DELETE:
reuseGenericRowData.setField(
0,
jsonSerializers
.get(dataChangeEvent.tableId())
.getRowDataFromRecordData(dataChangeEvent.before(), false));
reuseGenericRowData.setField(1, null);
reuseGenericRowData.setField(2, OP_DELETE);
return jsonSerializers
.get(dataChangeEvent.tableId())
.getSerializationSchema()
.serialize(reuseGenericRowData);
case UPDATE:
case REPLACE:
reuseGenericRowData.setField(
0,
jsonSerializers
.get(dataChangeEvent.tableId())
.getRowDataFromRecordData(dataChangeEvent.before(), false));
reuseGenericRowData.setField(
1,
jsonSerializers
.get(dataChangeEvent.tableId())
.getRowDataFromRecordData(dataChangeEvent.after(), false));
reuseGenericRowData.setField(2, OP_UPDATE);
return jsonSerializers
.get(dataChangeEvent.tableId())
.getSerializationSchema()
.serialize(reuseGenericRowData);
default:
throw new UnsupportedOperationException(
format(
"Unsupported operation '%s' for OperationType.",
dataChangeEvent.op()));
}
} catch (Throwable t) {
throw new RuntimeException(format("Could not serialize event '%s'.", event), t);
}
} | @Test
public void testSerialize() throws Exception {
ObjectMapper mapper =
JacksonMapperFactory.createObjectMapper()
.configure(JsonGenerator.Feature.WRITE_BIGDECIMAL_AS_PLAIN, false);
SerializationSchema<Event> serializationSchema =
ChangeLogJsonFormatFactory.createSerializationSchema(
new Configuration(),
JsonSerializationType.DEBEZIUM_JSON,
ZoneId.systemDefault());
serializationSchema.open(new MockInitializationContext());
// create table
Schema schema =
Schema.newBuilder()
.physicalColumn("col1", DataTypes.STRING())
.physicalColumn("col2", DataTypes.STRING())
.primaryKey("col1")
.build();
CreateTableEvent createTableEvent = new CreateTableEvent(TABLE_1, schema);
Assertions.assertNull(serializationSchema.serialize(createTableEvent));
BinaryRecordDataGenerator generator =
new BinaryRecordDataGenerator(RowType.of(DataTypes.STRING(), DataTypes.STRING()));
// insert
DataChangeEvent insertEvent1 =
DataChangeEvent.insertEvent(
TABLE_1,
generator.generate(
new Object[] {
BinaryStringData.fromString("1"),
BinaryStringData.fromString("1")
}));
JsonNode expected =
mapper.readTree(
"{\"before\":null,\"after\":{\"col1\":\"1\",\"col2\":\"1\"},\"op\":\"c\",\"source\":{\"db\":\"default_schema\",\"table\":\"table1\"}}");
JsonNode actual = mapper.readTree(serializationSchema.serialize(insertEvent1));
Assertions.assertEquals(expected, actual);
DataChangeEvent insertEvent2 =
DataChangeEvent.insertEvent(
TABLE_1,
generator.generate(
new Object[] {
BinaryStringData.fromString("2"),
BinaryStringData.fromString("2")
}));
expected =
mapper.readTree(
"{\"before\":null,\"after\":{\"col1\":\"2\",\"col2\":\"2\"},\"op\":\"c\",\"source\":{\"db\":\"default_schema\",\"table\":\"table1\"}}");
actual = mapper.readTree(serializationSchema.serialize(insertEvent2));
Assertions.assertEquals(expected, actual);
DataChangeEvent deleteEvent =
DataChangeEvent.deleteEvent(
TABLE_1,
generator.generate(
new Object[] {
BinaryStringData.fromString("2"),
BinaryStringData.fromString("2")
}));
expected =
mapper.readTree(
"{\"before\":{\"col1\":\"2\",\"col2\":\"2\"},\"after\":null,\"op\":\"d\",\"source\":{\"db\":\"default_schema\",\"table\":\"table1\"}}");
actual = mapper.readTree(serializationSchema.serialize(deleteEvent));
Assertions.assertEquals(expected, actual);
DataChangeEvent updateEvent =
DataChangeEvent.updateEvent(
TABLE_1,
generator.generate(
new Object[] {
BinaryStringData.fromString("1"),
BinaryStringData.fromString("1")
}),
generator.generate(
new Object[] {
BinaryStringData.fromString("1"),
BinaryStringData.fromString("x")
}));
expected =
mapper.readTree(
"{\"before\":{\"col1\":\"1\",\"col2\":\"1\"},\"after\":{\"col1\":\"1\",\"col2\":\"x\"},\"op\":\"u\",\"source\":{\"db\":\"default_schema\",\"table\":\"table1\"}}");
actual = mapper.readTree(serializationSchema.serialize(updateEvent));
Assertions.assertEquals(expected, actual);
} |
@Override
public int compareTo(StandardAcl other) {
int result;
result = resourceType.compareTo(other.resourceType);
if (result != 0) return result;
result = other.resourceName.compareTo(resourceName); // REVERSE sort by resource name.
if (result != 0) return result;
result = patternType.compareTo(other.patternType);
if (result != 0) return result;
result = operation.compareTo(other.operation);
if (result != 0) return result;
result = principal.compareTo(other.principal);
if (result != 0) return result;
result = host.compareTo(other.host);
if (result != 0) return result;
result = permissionType.compareTo(other.permissionType);
return result;
} | @Test
public void testCompareTo() {
assertEquals(1, signum(TEST_ACLS.get(0).compareTo(TEST_ACLS.get(1))));
assertEquals(-1, signum(TEST_ACLS.get(1).compareTo(TEST_ACLS.get(0))));
assertEquals(-1, signum(TEST_ACLS.get(2).compareTo(TEST_ACLS.get(3))));
assertEquals(1, signum(TEST_ACLS.get(4).compareTo(TEST_ACLS.get(3))));
assertEquals(-1, signum(TEST_ACLS.get(3).compareTo(TEST_ACLS.get(4))));
} |
protected static void checkJavaVersion(final PrintStream logger, String javaCommand,
final BufferedReader r)
throws IOException {
String line;
Pattern p = Pattern.compile("(?i)(?:java|openjdk) version \"([0-9.]+).*\".*");
while (null != (line = r.readLine())) {
Matcher m = p.matcher(line);
if (m.matches()) {
final String versionStr = m.group(1);
logger.println(Messages.ComputerLauncher_JavaVersionResult(javaCommand, versionStr));
try {
if (new VersionNumber(versionStr).isOlderThan(new VersionNumber("1.8"))) {
throw new IOException(Messages
.ComputerLauncher_NoJavaFound(line));
}
} catch (NumberFormatException x) {
throw new IOException(Messages.ComputerLauncher_NoJavaFound(line), x);
}
return;
}
}
logger.println(Messages.ComputerLauncher_UnknownJavaVersion(javaCommand));
throw new IOException(Messages.ComputerLauncher_UnknownJavaVersion(javaCommand));
} | @Test public void j2sdk4() {
assertThrows(
IOException.class,
() ->
ComputerLauncher.checkJavaVersion(
new PrintStream(OutputStream.nullOutputStream()),
"-",
new BufferedReader(
new StringReader(
"java version \"1.4.2_19\"\n"
+ "Java(TM) 2 Runtime Environment, Standard Edition (build 1.4.2_19-b04)\n"
+ "Java HotSpot(TM) Client VM (build 1.4.2_19-b04, mixed mode)\n"))));
} |
public static void executeWithRetry(RetryFunction function) throws Exception {
executeWithRetry(maxAttempts, minDelay, function);
} | @Test
public void retryFunctionThatRecovers() throws Exception {
startTimeMeasure = System.currentTimeMillis();
executeWithRetry(IOITHelperTest::recoveringFunction);
assertEquals(1, listOfExceptionsThrown.size());
} |
@Override
public String ping(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<String> f = executorService.readAsync(entry, LongCodec.INSTANCE, RedisCommands.PING);
return syncFuture(f);
} | @Test
public void testClusterPing() {
testInCluster(connection -> {
RedisClusterNode master = getFirstMaster(connection);
String res = connection.ping(master);
assertThat(res).isEqualTo("PONG");
});
} |
public Optional<Details> runForeachBatch(
Workflow workflow,
Long internalId,
long workflowVersionId,
RunProperties runProperties,
String foreachStepId,
ForeachArtifact artifact,
List<RunRequest> requests,
List<Long> instanceIds,
int batchSize) {
if (ObjectHelper.isCollectionEmptyOrNull(requests)) {
return Optional.empty();
}
Checks.checkTrue(
requests.size() == instanceIds.size(),
"Run request list size [%s] must match instance id list size [%s]",
requests.size(),
instanceIds.size());
List<WorkflowInstance> instances;
if (artifact.isFreshRun()) {
instances =
createStartForeachInstances(
workflow,
internalId,
workflowVersionId,
artifact.getForeachRunId(),
runProperties,
requests,
instanceIds);
} else {
instances =
createRestartForeachInstances(
workflow,
internalId,
workflowVersionId,
runProperties,
foreachStepId,
artifact,
requests,
instanceIds);
}
if (ObjectHelper.isCollectionEmptyOrNull(instances)) {
return Optional.empty();
}
return instanceDao.runWorkflowInstances(workflow.getId(), instances, batchSize);
} | @Test
public void testCreateRestartForeachInstancesUpstreamModeFromBeginning() {
doNothing().when(workflowHelper).updateWorkflowInstance(any(), any());
when(instanceDao.getLatestWorkflowInstanceRun(anyString(), anyLong()))
.thenReturn(new WorkflowInstance());
Map<String, Map<String, ParamDefinition>> stepRunParams =
Collections.singletonMap(
"job1",
Collections.singletonMap("p1", ParamDefinition.buildParamDefinition("p1", "d1")));
ForeachArtifact artifact = new ForeachArtifact();
artifact.setRunPolicy(RunPolicy.RESTART_FROM_BEGINNING);
artifact.setTotalLoopCount(10);
artifact.setForeachWorkflowId("maestro_foreach_x");
artifact.setAncestorIterationCount(null);
artifact.setForeachRunId(3L);
RestartConfig restartConfig =
RestartConfig.builder()
.restartPolicy(RunPolicy.RESTART_FROM_BEGINNING)
.downstreamPolicy(RunPolicy.RESTART_FROM_INCOMPLETE)
.stepRestartParams(stepRunParams)
.addRestartNode("maestro_foreach_x", 1, null)
.build();
ForeachInitiator initiator = new ForeachInitiator();
UpstreamInitiator.Info parent = new UpstreamInitiator.Info();
parent.setWorkflowId("maestro_foreach_x");
parent.setInstanceId(1);
parent.setRunId(1);
parent.setStepId("foreach-step");
parent.setStepAttemptId(1);
initiator.setAncestors(Collections.singletonList(parent));
RunRequest runRequest =
RunRequest.builder()
.initiator(initiator)
.currentPolicy(RunPolicy.RESTART_FROM_INCOMPLETE)
.restartConfig(restartConfig)
.build();
Optional<Details> errors =
actionHandler.runForeachBatch(
definition.getWorkflow(),
123L,
10L,
new RunProperties(),
"foreach-step",
artifact,
Collections.singletonList(runRequest),
Collections.singletonList(2L),
3);
assertFalse(errors.isPresent());
verify(instanceDao, times(1)).runWorkflowInstances(any(), any(), anyInt());
ArgumentCaptor<RunRequest> captor = ArgumentCaptor.forClass(RunRequest.class);
verify(workflowHelper, times(1)).updateWorkflowInstance(any(), captor.capture());
RunRequest res = captor.getValue();
assertEquals(RunPolicy.RESTART_FROM_BEGINNING, res.getCurrentPolicy());
// it will keep the restart config with step restart params
assertEquals(restartConfig, res.getRestartConfig());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.