focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public TDescribeTableResult describeTable(TDescribeTableParams params) throws TException {
LOG.debug("get desc table request: {}", params);
TDescribeTableResult result = new TDescribeTableResult();
List<TColumnDef> columns = Lists.newArrayList();
result.setColumns(columns);
// database privs should be checked in analysis phrase
UserIdentity currentUser = null;
if (params.isSetCurrent_user_ident()) {
currentUser = UserIdentity.fromThrift(params.current_user_ident);
} else {
currentUser = UserIdentity.createAnalyzedUserIdentWithIp(params.user, params.user_ip);
}
long limit = params.isSetLimit() ? params.getLimit() : -1;
// if user query schema meta such as "select * from information_schema.columns limit 10;",
// in this case, there is no predicate and only has limit clause,we can call the
// describe_table interface only once, which can reduce RPC time from BE to FE, and
// the amount of data. In additional,we need add db_name & table_name values to TColumnDesc.
if (!params.isSetDb() && StringUtils.isBlank(params.getTable_name())) {
describeWithoutDbAndTable(currentUser, columns, limit);
return result;
}
String catalogName = null;
if (params.isSetCatalog_name()) {
catalogName = params.getCatalog_name();
}
MetadataMgr metadataMgr = GlobalStateMgr.getCurrentState().getMetadataMgr();
Database db = metadataMgr.getDb(catalogName, params.db);
if (db != null) {
Locker locker = new Locker();
try {
locker.lockDatabase(db, LockType.READ);
Table table = metadataMgr.getTable(catalogName, params.db, params.table_name);
if (table == null) {
return result;
}
try {
Authorizer.checkAnyActionOnTableLikeObject(currentUser,
null, params.db, table);
} catch (AccessDeniedException e) {
return result;
}
setColumnDesc(columns, table, limit, false, params.db, params.table_name);
} finally {
locker.unLockDatabase(db, LockType.READ);
}
}
return result;
}
|
@Test
public void testDefaultValueMeta() throws Exception {
starRocksAssert.withDatabase("test_table").useDatabase("test_table")
.withTable("CREATE TABLE `test_default_value` (\n" +
" `id` datetime NULL DEFAULT CURRENT_TIMESTAMP COMMENT \"\",\n" +
" `value` int(11) NULL DEFAULT \"2\" COMMENT \"\"\n" +
") ENGINE=OLAP \n" +
"DUPLICATE KEY(`id`, `value`)\n" +
"DISTRIBUTED BY RANDOM\n" +
"PROPERTIES (\n" +
"\"replication_num\" = \"1\",\n" +
"\"in_memory\" = \"false\",\n" +
"\"enable_persistent_index\" = \"false\",\n" +
"\"replicated_storage\" = \"true\",\n" +
"\"compression\" = \"LZ4\"\n" +
");");
ConnectContext ctx = starRocksAssert.getCtx();
String createUserSql = "create user test2";
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(createUserSql, ctx), ctx);
String grantSql = "GRANT SELECT ON TABLE test_table.test_default_value TO USER `test2`@`%`;";
DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(grantSql, ctx), ctx);
FrontendServiceImpl impl = new FrontendServiceImpl(exeEnv);
TDescribeTableParams request = new TDescribeTableParams();
TUserIdentity userIdentity = new TUserIdentity();
userIdentity.setUsername("test2");
userIdentity.setHost("%");
userIdentity.setIs_domain(false);
request.setCurrent_user_ident(userIdentity);
TDescribeTableResult response = impl.describeTable(request);
List<TColumnDef> columnDefList = response.getColumns();
List<TColumnDef> testDefaultValue = columnDefList.stream()
.filter(u -> u.getColumnDesc().getTableName().equalsIgnoreCase("test_default_value"))
.collect(Collectors.toList());
Assert.assertEquals(2, testDefaultValue.size());
Assert.assertEquals("CURRENT_TIMESTAMP", testDefaultValue.get(0).getColumnDesc().getColumnDefault());
Assert.assertEquals("2", testDefaultValue.get(1).getColumnDesc().getColumnDefault());
}
|
@Override
public synchronized void cleanupAll() {
try {
if (usingStaticInstance) {
if (databaseAdminClient != null) {
Failsafe.with(retryOnQuotaException())
.run(() -> databaseAdminClient.dropDatabase(instanceId, databaseId));
}
} else {
LOG.info("Deleting instance {}...", instanceId);
if (instanceAdminClient != null) {
Failsafe.with(retryOnQuotaException())
.run(() -> instanceAdminClient.deleteInstance(instanceId));
}
hasInstance = false;
}
hasDatabase = false;
} catch (SpannerException e) {
throw new SpannerResourceManagerException("Failed to delete instance.", e);
} finally {
if (!spanner.isClosed()) {
spanner.close();
}
}
LOG.info("Manager successfully cleaned up.");
}
|
@Test
public void testCleanupAllShouldNotDeleteInstanceWhenStatic() {
// arrange
doNothing().when(databaseAdminClient).dropDatabase(any(), any());
when(spanner.getInstanceAdminClient()).thenReturn(instanceAdminClient);
when(spanner.getDatabaseAdminClient()).thenReturn(databaseAdminClient);
testManager =
new SpannerResourceManager(
spanner, TEST_ID, PROJECT_ID, REGION, DIALECT, true, "existing-instance", NODE_COUNT);
// act
testManager.cleanupAll();
// assert
verify(spanner.getDatabaseAdminClient()).dropDatabase(eq("existing-instance"), any());
verify(spanner.getInstanceAdminClient(), never()).deleteInstance(any());
verify(spanner).close();
}
|
public static Set<String> findKeywordsFromCrashReport(String crashReport) {
Matcher matcher = CRASH_REPORT_STACK_TRACE_PATTERN.matcher(crashReport);
Set<String> result = new HashSet<>();
if (matcher.find()) {
for (String line : matcher.group("stacktrace").split("\\n")) {
Matcher lineMatcher = STACK_TRACE_LINE_PATTERN.matcher(line);
if (lineMatcher.find()) {
String[] method = lineMatcher.group("method").split("\\.");
for (int i = 0; i < method.length - 2; i++) {
if (PACKAGE_KEYWORD_BLACK_LIST.contains(method[i])) {
continue;
}
result.add(method[i]);
}
Matcher moduleMatcher = STACK_TRACE_LINE_MODULE_PATTERN.matcher(line);
if (moduleMatcher.find()) {
for (String module : moduleMatcher.group("tokens").split(",")) {
String[] split = module.split(":");
if (split.length >= 2 && "xf".equals(split[0])) {
if (PACKAGE_KEYWORD_BLACK_LIST.contains(split[1])) {
continue;
}
result.add(split[1]);
}
}
}
}
}
}
return result;
}
|
@Test
public void wizardry() throws IOException {
assertEquals(
new HashSet<>(Arrays.asList("wizardry", "electroblob", "projectile")),
CrashReportAnalyzer.findKeywordsFromCrashReport(loadLog("/crash-report/mod/wizardry.txt")));
}
|
public Mono<Table<String, TopicPartition, Long>> listConsumerGroupOffsets(List<String> consumerGroups,
// all partitions if null passed
@Nullable List<TopicPartition> partitions) {
Function<Collection<String>, Mono<Map<String, Map<TopicPartition, OffsetAndMetadata>>>> call =
groups -> toMono(
client.listConsumerGroupOffsets(
groups.stream()
.collect(Collectors.toMap(
g -> g,
g -> new ListConsumerGroupOffsetsSpec().topicPartitions(partitions)
))).all()
);
Mono<Map<String, Map<TopicPartition, OffsetAndMetadata>>> merged = partitionCalls(
consumerGroups,
25,
4,
call,
mapMerger()
);
return merged.map(map -> {
var table = ImmutableTable.<String, TopicPartition, Long>builder();
map.forEach((g, tpOffsets) -> tpOffsets.forEach((tp, offset) -> {
if (offset != null) {
// offset will be null for partitions that don't have committed offset for this group
table.put(g, tp, offset.offset());
}
}));
return table.build();
});
}
|
@Test
void testListConsumerGroupOffsets() throws Exception {
String topic = UUID.randomUUID().toString();
String anotherTopic = UUID.randomUUID().toString();
createTopics(new NewTopic(topic, 2, (short) 1), new NewTopic(anotherTopic, 1, (short) 1));
fillTopic(topic, 10);
Function<String, KafkaConsumer<String, String>> consumerSupplier = groupName -> {
Properties p = new Properties();
p.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
p.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupName);
p.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
p.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
p.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
p.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
return new KafkaConsumer<String, String>(p);
};
String fullyPolledConsumer = UUID.randomUUID().toString();
try (KafkaConsumer<String, String> c = consumerSupplier.apply(fullyPolledConsumer)) {
c.subscribe(List.of(topic));
int polled = 0;
while (polled < 10) {
polled += c.poll(Duration.ofMillis(50)).count();
}
c.commitSync();
}
String polled1MsgConsumer = UUID.randomUUID().toString();
try (KafkaConsumer<String, String> c = consumerSupplier.apply(polled1MsgConsumer)) {
c.subscribe(List.of(topic));
c.poll(Duration.ofMillis(100));
c.commitSync(Map.of(tp(topic, 0), new OffsetAndMetadata(1)));
}
String noCommitConsumer = UUID.randomUUID().toString();
try (KafkaConsumer<String, String> c = consumerSupplier.apply(noCommitConsumer)) {
c.subscribe(List.of(topic));
c.poll(Duration.ofMillis(100));
}
Map<TopicPartition, ListOffsetsResultInfo> endOffsets = adminClient.listOffsets(Map.of(
tp(topic, 0), OffsetSpec.latest(),
tp(topic, 1), OffsetSpec.latest())).all().get();
StepVerifier.create(
reactiveAdminClient.listConsumerGroupOffsets(
List.of(fullyPolledConsumer, polled1MsgConsumer, noCommitConsumer),
List.of(
tp(topic, 0),
tp(topic, 1),
tp(anotherTopic, 0))
)
).assertNext(table -> {
assertThat(table.row(polled1MsgConsumer))
.containsEntry(tp(topic, 0), 1L)
.hasSize(1);
assertThat(table.row(noCommitConsumer))
.isEmpty();
assertThat(table.row(fullyPolledConsumer))
.containsEntry(tp(topic, 0), endOffsets.get(tp(topic, 0)).offset())
.containsEntry(tp(topic, 1), endOffsets.get(tp(topic, 1)).offset())
.hasSize(2);
})
.verifyComplete();
}
|
@Override
public void update(Object elem) throws Exception {
// Increment object counter.
if (objectCount != null) {
objectCount.addValue(1L);
}
// Increment byte counter.
if ((byteCountObserver != null || meanByteCountObserver != null)
&& (sampleElement() || elementByteSizeObservable.isRegisterByteSizeObserverCheap(elem))) {
if (byteCountObserver != null) {
byteCountObserver.setScalingFactor(
Math.max(samplingToken, SAMPLING_CUTOFF) / (double) SAMPLING_CUTOFF);
elementByteSizeObservable.registerByteSizeObserver(elem, byteCountObserver);
}
if (meanByteCountObserver != null) {
elementByteSizeObservable.registerByteSizeObserver(elem, meanByteCountObserver);
}
if (byteCountObserver != null && !byteCountObserver.getIsLazy()) {
byteCountObserver.advance();
}
if (meanByteCountObserver != null && !meanByteCountObserver.getIsLazy()) {
meanByteCountObserver.advance();
}
}
}
|
@Test
public void testNullArgument() throws Exception {
TestOutputCounter outputCounter =
new TestOutputCounter(NameContextsForTests.nameContextForTest());
thrown.expect(CoderException.class);
outputCounter.update(null);
}
|
private static BinaryArray fromPrimitiveArray(Object arr, int offset, int length, Field field) {
BinaryArray result = new BinaryArray(field);
final long headerInBytes = calculateHeaderInBytes(length);
final long valueRegionInBytes = result.elementSize * length;
final long totalSize = headerInBytes + valueRegionInBytes;
if (totalSize > Integer.MAX_VALUE) {
throw new UnsupportedOperationException(
"Cannot convert this array to binary format as " + "it's too big.");
}
final byte[] data = new byte[(int) totalSize];
Platform.putLong(data, Platform.BYTE_ARRAY_OFFSET, length);
Platform.copyMemory(
arr, offset, data, Platform.BYTE_ARRAY_OFFSET + headerInBytes, valueRegionInBytes);
MemoryBuffer memoryBuffer = MemoryUtils.wrap(data);
result.pointTo(memoryBuffer, 0, (int) totalSize);
return result;
}
|
@Test(enabled = false)
public void testAccessPerf() {
int length = 10000;
int[] arr = new int[length];
Random random = new Random();
for (int i = 0; i < length; i++) {
arr[i] = random.nextInt();
}
BinaryArray binaryArray = BinaryArray.fromPrimitiveArray(arr);
int iterNums = 100_000;
// warm
for (int i = 0; i < iterNums; i++) {
for (int j = 0; j < length; j++) {
elem = arr[j];
}
}
// test array
long startTime = System.nanoTime();
for (int i = 0; i < iterNums; i++) {
for (int j = 0; j < length; j++) {
elem = arr[j];
}
}
long duration = System.nanoTime() - startTime;
LOG.info("access array take " + duration + "ns, " + duration / 1000_000 + " ms\n");
for (int i = 0; i < iterNums; i++) {
for (int j = 0; j < length; j++) {
elem = binaryArray.getInt32(j);
}
}
// test binary array
startTime = System.nanoTime();
for (int i = 0; i < iterNums; i++) {
for (int j = 0; j < length; j++) {
elem = binaryArray.getInt32(j);
}
}
duration = System.nanoTime() - startTime;
LOG.info("access BinaryArray take " + duration + "ns, " + duration / 1000_000 + " ms\n");
}
|
public LegacyDeleteResult<T, K> remove(DBObject query) {
return new LegacyDeleteResult<>(delegate.deleteMany(new BasicDBObject(query.toMap())));
}
|
@Test
void remove() {
final var collection = jacksonCollection("simple", Simple.class);
final var foo = new Simple("000000000000000000000001", "foo");
final var bar = new Simple("000000000000000000000002", "bar");
collection.insert(List.of(foo, bar));
assertThat(collection.remove(DBQuery.is("name", "foo")).getN()).isEqualTo(1);
assertThat((Iterable<Simple>) collection.find()).containsExactly(bar);
collection.insert(foo);
assertThat(collection.remove(DBQuery.is("name", "foo")).getN()).isEqualTo(1);
assertThat((Iterable<Simple>) collection.find()).containsExactly(bar);
assertThatThrownBy(() -> collection.remove(DBQuery.empty(), WriteConcern.W2))
.isInstanceOf(MongoCommandException.class)
.hasMessageContaining("cannot use 'w' > 1 when a host is not replicated");
}
|
@Nonnull
public static <T> StreamSource<T> connect(@Nonnull Properties properties,
@Nonnull FunctionEx<SourceRecord, T> projectionFn) {
return connect(properties, projectionFn, DEFAULT_RECONNECT_BEHAVIOR);
}
|
@Test
public void should_fail_when_no_name_property() {
Properties properties = new Properties();
assertThatThrownBy(() -> connect(properties, TestUtil::convertToString))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Property 'name' is required");
}
|
@VisibleForTesting
List<BatchInterface> makeGetBatches(
Collection<GcsPath> paths, List<StorageObjectOrIOException[]> results) throws IOException {
List<BatchInterface> batches = new ArrayList<>();
for (List<GcsPath> filesToGet :
Lists.partition(Lists.newArrayList(paths), MAX_REQUESTS_PER_BATCH)) {
BatchInterface batch = batchRequestSupplier.get();
for (GcsPath path : filesToGet) {
results.add(enqueueGetFileSize(path, batch));
}
batches.add(batch);
}
return batches;
}
|
@Test
public void testMakeGetBatches() throws IOException {
GcsUtil gcsUtil = gcsOptionsWithTestCredential().getGcsUtil();
// Small number of files fits in 1 batch
List<StorageObjectOrIOException[]> results = Lists.newArrayList();
List<BatchInterface> batches = gcsUtil.makeGetBatches(makeGcsPaths("s", 3), results);
assertThat(batches.size(), equalTo(1));
assertThat(sumBatchSizes(batches), equalTo(3));
assertEquals(3, results.size());
// 1 batch of files fits in 1 batch
results = Lists.newArrayList();
batches = gcsUtil.makeGetBatches(makeGcsPaths("s", 100), results);
assertThat(batches.size(), equalTo(1));
assertThat(sumBatchSizes(batches), equalTo(100));
assertEquals(100, results.size());
// A little more than 5 batches of files fits in 6 batches
results = Lists.newArrayList();
batches = gcsUtil.makeGetBatches(makeGcsPaths("s", 501), results);
assertThat(batches.size(), equalTo(6));
assertThat(sumBatchSizes(batches), equalTo(501));
assertEquals(501, results.size());
}
|
@Override
public boolean canPass(Node node, int acquireCount) {
return canPass(node, acquireCount, false);
}
|
@Test
public void testWarmUp() throws InterruptedException {
try (MockedStatic<TimeUtil> mocked = super.mockTimeUtil()) {
WarmUpController warmupController = new WarmUpController(10, 10, 3);
setCurrentMillis(mocked, System.currentTimeMillis());
Node node = mock(Node.class);
when(node.passQps()).thenReturn(8d);
when(node.previousPassQps()).thenReturn(1d);
assertFalse(warmupController.canPass(node, 1));
when(node.passQps()).thenReturn(1d);
when(node.previousPassQps()).thenReturn(1d);
assertTrue(warmupController.canPass(node, 1));
when(node.previousPassQps()).thenReturn(10d);
for (int i = 0; i < 100; i++) {
sleep(mocked, 100);
warmupController.canPass(node, 1);
}
when(node.passQps()).thenReturn(8d);
assertTrue(warmupController.canPass(node, 1));
when(node.passQps()).thenReturn(10d);
assertFalse(warmupController.canPass(node, 1));
}
}
|
public void copyTo(T valueCopy, StreamRecord<T> target) {
target.value = valueCopy;
target.timestamp = this.timestamp;
target.hasTimestamp = this.hasTimestamp;
}
|
@Test
void testCopyTo() {
StreamRecord<String> recNoTimestamp = new StreamRecord<>("test");
StreamRecord<String> recNoTimestampCopy = new StreamRecord<>(null);
recNoTimestamp.copyTo("test", recNoTimestampCopy);
assertThat(recNoTimestampCopy).isEqualTo(recNoTimestamp);
StreamRecord<String> recWithTimestamp = new StreamRecord<>("test", 99);
StreamRecord<String> recWithTimestampCopy = new StreamRecord<>(null);
recWithTimestamp.copyTo("test", recWithTimestampCopy);
assertThat(recWithTimestampCopy).isEqualTo(recWithTimestamp);
}
|
@Override
public V peek() {
return getValue(0);
}
|
@Test
public void testRemoveWithCodec() {
RQueue<TestModel> queue = redisson.getQueue("queue");
TestModel msg = new TestModel("key", "traceId", 0L);
queue.add(msg);
assertThat(queue.contains(queue.peek())).isTrue();
}
|
@Override
public boolean decide(final SelectStatementContext selectStatementContext, final List<Object> parameters,
final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final SingleRule rule, final Collection<DataNode> includedDataNodes) {
Collection<QualifiedTable> singleTables = getSingleTables(selectStatementContext, database, rule);
if (singleTables.isEmpty()) {
return false;
}
if (containsView(database, singleTables)) {
return true;
}
if (!includedDataNodes.isEmpty() && !isInnerCommaJoin(selectStatementContext.getSqlStatement())) {
return true;
}
boolean result = rule.isAllTablesInSameComputeNode(includedDataNodes, singleTables);
includedDataNodes.addAll(getTableDataNodes(rule, singleTables));
return !result;
}
|
@Test
void assertDecideWhenNotContainsSingleTable() {
SelectStatementContext select = createStatementContext();
Collection<DataNode> includedDataNodes = new HashSet<>();
assertFalse(new SingleSQLFederationDecider().decide(select, Collections.emptyList(), mock(RuleMetaData.class), createDatabase(), mock(SingleRule.class), includedDataNodes));
assertTrue(includedDataNodes.isEmpty());
}
|
public void close()
{
if (!isClosed)
{
isClosed = true;
unmapAndCloseChannel();
}
}
|
@Test
void shouldNotThrowWhenOldRecordingLogsAreDeleted() throws IOException
{
final File segmentFile = new File(archiveDir, segmentFileName(recordingThreeId, SEGMENT_LENGTH * 2));
try (FileChannel log = FileChannel.open(segmentFile.toPath(), READ, WRITE, CREATE))
{
final ByteBuffer bb = allocate(HEADER_LENGTH);
final DataHeaderFlyweight flyweight = new DataHeaderFlyweight(bb);
flyweight.frameLength(256);
log.write(bb);
}
final Catalog catalog = new Catalog(archiveDir, null, 0, CAPACITY, clock, null, segmentFileBuffer);
catalog.close();
}
|
@Override
public void doInject(RequestResource resource, RamContext context, LoginIdentityContext result) {
if (context.validate()) {
try {
String accessKey = context.getAccessKey();
String secretKey = context.getSecretKey();
// STS 临时凭证鉴权的优先级高于 AK/SK 鉴权
if (StsConfig.getInstance().isStsOn()) {
StsCredential stsCredential = StsCredentialHolder.getInstance().getStsCredential();
accessKey = stsCredential.getAccessKeyId();
secretKey = stsCredential.getAccessKeySecret();
result.setParameter(IdentifyConstants.SECURITY_TOKEN_HEADER, stsCredential.getSecurityToken());
}
String signatureKey = secretKey;
if (StringUtils.isNotEmpty(context.getRegionId())) {
signatureKey = CalculateV4SigningKeyUtil
.finalSigningKeyStringWithDefaultInfo(secretKey, context.getRegionId());
result.setParameter(RamConstants.SIGNATURE_VERSION, RamConstants.V4);
}
String signData = getSignData(getGroupedServiceName(resource));
String signature = SignUtil.sign(signData, signatureKey);
result.setParameter(SIGNATURE_FILED, signature);
result.setParameter(DATA_FILED, signData);
result.setParameter(AK_FILED, accessKey);
} catch (Exception e) {
NAMING_LOGGER.error("inject ak/sk failed.", e);
}
}
}
|
@Test
void testDoInjectForV4Sign() throws Exception {
resource = RequestResource.namingBuilder().setResource("test@@aaa").setGroup("group").build();
LoginIdentityContext actual = new LoginIdentityContext();
ramContext.setRegionId("cn-hangzhou");
namingResourceInjector.doInject(resource, ramContext, actual);
assertEquals(4, actual.getAllKey().size());
assertEquals(PropertyKeyConst.ACCESS_KEY, actual.getParameter("ak"));
assertEquals(RamConstants.V4, actual.getParameter(RamConstants.SIGNATURE_VERSION));
assertTrue(actual.getParameter("data").endsWith("@@test@@aaa"));
String signatureKey = CalculateV4SigningKeyUtil.finalSigningKeyStringWithDefaultInfo(
PropertyKeyConst.SECRET_KEY, "cn-hangzhou");
String expectSign = SignUtil.sign(actual.getParameter("data"), signatureKey);
assertEquals(expectSign, actual.getParameter("signature"));
}
|
@Override
public JobResourceRequirements requestJobResourceRequirements() {
final JobResourceRequirements.Builder builder = JobResourceRequirements.newBuilder();
for (JobInformation.VertexInformation vertex : jobInformation.getVertices()) {
builder.setParallelismForJobVertex(
vertex.getJobVertexID(), vertex.getMinParallelism(), vertex.getParallelism());
}
return builder.build();
}
|
@Test
void testRequestDefaultResourceRequirements() throws Exception {
final JobGraph jobGraph = createJobGraph();
final Configuration configuration = new Configuration();
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
jobGraph, mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor())
.setJobMasterConfiguration(configuration)
.build();
assertThat(scheduler.requestJobResourceRequirements())
.isEqualTo(
JobResourceRequirements.newBuilder()
.setParallelismForJobVertex(
JOB_VERTEX.getID(), 1, JOB_VERTEX.getParallelism())
.build());
}
|
public static File zip(String srcPath) throws UtilException {
return zip(srcPath, DEFAULT_CHARSET);
}
|
@Test
@Disabled
public void zipToStreamTest(){
final String zip = "d:/test/testToStream.zip";
final OutputStream out = FileUtil.getOutputStream(zip);
ZipUtil.zip(out, new String[]{"sm1_alias.txt"},
new InputStream[]{FileUtil.getInputStream("d:/test/sm4_1.txt")});
}
|
@Override
public String getConfig(final String dataId) {
try {
return configService.getConfig(dataId, NacosPathConstants.GROUP, NacosPathConstants.DEFAULT_TIME_OUT);
} catch (NacosException e) {
LOG.error("Get data from nacos error.", e);
throw new ShenyuException(e.getMessage());
}
}
|
@Test
public void testOnSelectorChanged() throws NacosException {
when(configService.getConfig(anyString(), anyString(), anyLong())).thenReturn(null);
SelectorData selectorData = SelectorData.builder().id(MOCK_ID).name(MOCK_NAME).pluginName(MOCK_PLUGIN_NAME).build();
nacosDataChangedListener.onSelectorChanged(ImmutableList.of(selectorData), DataEventTypeEnum.DELETE);
nacosDataChangedListener.onSelectorChanged(ImmutableList.of(selectorData), DataEventTypeEnum.REFRESH);
nacosDataChangedListener.onSelectorChanged(ImmutableList.of(selectorData), DataEventTypeEnum.MYSELF);
nacosDataChangedListener.onSelectorChanged(ImmutableList.of(selectorData), DataEventTypeEnum.CREATE);
verify(configService, times(6)).publishConfig(any(String.class), any(String.class), any(String.class), any(String.class));
}
|
public String toString() {
return "[if " + booleanExpression.toString(0) + "]";
}
|
@Test
public void testIfVertexWithSecretsIsntLeaked() throws InvalidIRException {
BooleanExpression booleanExpression = DSL.eEq(DSL.eEventValue("password"), DSL.eValue("${secret_key}"));
ConfigVariableExpander cve = ConfigVariableExpanderTest.getFakeCve(
Collections.singletonMap("secret_key", "s3cr3t"), Collections.emptyMap());
IfVertex ifVertex = new IfVertex(randMeta(),
(BooleanExpression) ExpressionSubstitution.substituteBoolExpression(cve, booleanExpression));
// Exercise
String output = ifVertex.toString();
// Verify
assertThat(output, not(containsString("s3cr3t")));
}
|
public List<LispTeRecord> getTeRecords() {
return ImmutableList.copyOf(records);
}
|
@Test
public void testConstruction() {
LispTeLcafAddress teLcafAddress = address1;
LispIpv4Address rtrRloc1 = new LispIpv4Address(IpAddress.valueOf("192.168.1.1"));
LispIpv4Address rtrRloc2 = new LispIpv4Address(IpAddress.valueOf("192.168.1.2"));
assertThat("lookup flag value in TeRecord is not correct",
teLcafAddress.getTeRecords().get(0).isLookup(), is(false));
assertThat("RLOC probe flag value in TeRecord is not correct",
teLcafAddress.getTeRecords().get(0).isRlocProbe(), is(false));
assertThat("strict flag value in TeRecord is not correct",
teLcafAddress.getTeRecords().get(0).isStrict(), is(false));
assertThat("RTR RLOC address in TeRecord is not correct",
teLcafAddress.getTeRecords().get(0).getRtrRlocAddress(), is(rtrRloc1));
assertThat("lookup flag value in TeRecord in not correct",
teLcafAddress.getTeRecords().get(1).isLookup(), is(false));
assertThat("RLOC probe flag value in TeRecord is not correct",
teLcafAddress.getTeRecords().get(1).isRlocProbe(), is(true));
assertThat("strict flag value in TeRecord is not correct",
teLcafAddress.getTeRecords().get(1).isStrict(), is(false));
assertThat("RTR RLOC address in TeRecord is not correct",
teLcafAddress.getTeRecords().get(1).getRtrRlocAddress(), is(rtrRloc2));
}
|
public static DataSchema dataMapToDataSchema(DataMap map, PegasusSchemaParser parser)
{
// Convert DataMap into DataSchema
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
JacksonDataCodec codec = new JacksonDataCodec();
try
{
codec.writeMap(map, outputStream);
}
catch (IOException e)
{
// This should never occur
throw new IllegalStateException(UNEXPECTED_IOEXCEPTION + map, e);
}
ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
parser.parse(inputStream);
List<DataSchema> topLevelDataSchemas = parser.topLevelDataSchemas();
assert(topLevelDataSchemas.size() <= 1);
if (parser.hasError())
{
return null;
}
else if (topLevelDataSchemas.size() != 1)
{
// This should never occur
throw new IllegalStateException(WRONG_NUMBER_OF_SCHEMA_LEFT + topLevelDataSchemas);
}
return topLevelDataSchemas.get(0);
}
|
@Test
public void testConvertDataMapToDataSchema() throws IOException
{
for (String good : goodInputs)
{
NamedDataSchema dataSchema = (NamedDataSchema) TestUtil.dataSchemaFromString(good);
DataMap mapFromString = TestUtil.dataMapFromString(good);
PegasusSchemaParser parser = new SchemaParser();
DataSchema schemaFromMap = Conversions.dataMapToDataSchema(mapFromString, parser);
assertEquals(schemaFromMap, dataSchema);
}
for (String bad : badInputs)
{
DataMap mapFromString = TestUtil.dataMapFromString(bad);
PegasusSchemaParser parser = new SchemaParser();
DataSchema schemaFromMap = Conversions.dataMapToDataSchema(mapFromString, parser);
assertNull(schemaFromMap);
assertTrue(parser.hasError());
}
}
|
public void setTemplateEntriesForChild(CapacitySchedulerConfiguration conf,
QueuePath childQueuePath) {
setTemplateEntriesForChild(conf, childQueuePath, false);
}
|
@Test
public void testOneLevelWildcardTemplate() {
conf.set(getTemplateKey(TEST_QUEUE_A_WILDCARD, "capacity"), "6w");
AutoCreatedQueueTemplate template =
new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB);
template.setTemplateEntriesForChild(conf, TEST_QUEUE_ABC);
Assert.assertEquals("weight is not set", 6f,
conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6);
}
|
@Override
public byte[] getBytes(final int columnIndex) {
return values.getBytes(columnIndex - 1);
}
|
@Test
public void shouldGetBytes() {
assertThat(row.getBytes("f_bytes"), is(new byte[]{0, 1, 2}));
}
|
public String build(TablePath tablePath) {
StringBuilder createTableSql = new StringBuilder();
createTableSql
.append("CREATE TABLE ")
.append(CatalogUtils.quoteIdentifier(tablePath.getDatabaseName(), fieldIde, "\""))
.append(".")
.append(CatalogUtils.quoteIdentifier(tablePath.getTableName(), fieldIde, "\""))
.append(" (\n");
List<String> columnSqls =
columns.stream()
.map(column -> CatalogUtils.getFieldIde(buildColumnSql(column), fieldIde))
.collect(Collectors.toList());
// Add primary key directly in the create table statement
if (createIndex
&& primaryKey != null
&& primaryKey.getColumnNames() != null
&& !primaryKey.getColumnNames().isEmpty()) {
columnSqls.add(buildPrimaryKeySql(primaryKey));
}
if (createIndex && CollectionUtils.isNotEmpty(constraintKeys)) {
for (ConstraintKey constraintKey : constraintKeys) {
if (StringUtils.isBlank(constraintKey.getConstraintName())
|| (primaryKey != null
&& (StringUtils.equals(
primaryKey.getPrimaryKey(),
constraintKey.getConstraintName())
|| primaryContainsAllConstrainKey(
primaryKey, constraintKey)))) {
continue;
}
switch (constraintKey.getConstraintType()) {
case UNIQUE_KEY:
String uniqueKeySql = buildUniqueKeySql(constraintKey);
columnSqls.add(uniqueKeySql);
break;
case INDEX_KEY:
case FOREIGN_KEY:
break;
}
}
}
createTableSql.append(String.join(",\n", columnSqls));
createTableSql.append("\n)");
if (comment != null) {
createTableSql.append(" COMMENT '").append(comment).append("'");
}
return createTableSql.toString();
}
|
@Test
public void testBuild() {
String dataBaseName = "test_database";
String tableName = "test_table";
TablePath tablePath = TablePath.of(dataBaseName, tableName);
TableSchema tableSchema =
TableSchema.builder()
.column(PhysicalColumn.of("id", BasicType.LONG_TYPE, 22, false, null, "id"))
.column(
PhysicalColumn.of(
"name", BasicType.STRING_TYPE, 128, false, null, "name"))
.column(
PhysicalColumn.of(
"age", BasicType.INT_TYPE, (Long) null, true, null, "age"))
.column(
PhysicalColumn.of(
"createTime",
LocalTimeType.LOCAL_DATE_TIME_TYPE,
3,
true,
null,
"createTime"))
.column(
PhysicalColumn.of(
"lastUpdateTime",
LocalTimeType.LOCAL_DATE_TIME_TYPE,
3,
true,
null,
"lastUpdateTime"))
.primaryKey(PrimaryKey.of("id", Lists.newArrayList("id")))
.constraintKey(
ConstraintKey.of(
ConstraintKey.ConstraintType.UNIQUE_KEY,
"name",
Lists.newArrayList(
ConstraintKey.ConstraintKeyColumn.of(
"name", null))))
.build();
CatalogTable catalogTable =
CatalogTable.of(
TableIdentifier.of("test_catalog", dataBaseName, tableName),
tableSchema,
new HashMap<>(),
new ArrayList<>(),
"User table");
String createTableSql =
new SapHanaCreateTableSqlBuilder(catalogTable, true).build(tablePath);
String expect =
"CREATE TABLE \"test_database\".\"test_table\" (\n"
+ "\"id\" BIGINT NOT NULL COMMENT 'id',\n"
+ "\"name\" NVARCHAR(128) NOT NULL COMMENT 'name',\n"
+ "\"age\" INTEGER NULL COMMENT 'age',\n"
+ "\"createTime\" SECONDDATE NULL COMMENT 'createTime',\n"
+ "\"lastUpdateTime\" SECONDDATE NULL COMMENT 'lastUpdateTime',\n"
+ "PRIMARY KEY (\"id\"),\n"
+ "UNIQUE (\"name\")\n"
+ ") COMMENT 'User table'";
Assertions.assertEquals(expect, createTableSql);
// skip index
String createTableSqlSkipIndex =
new SapHanaCreateTableSqlBuilder(catalogTable, false).build(tablePath);
String expectSkipIndex =
"CREATE TABLE \"test_database\".\"test_table\" (\n"
+ "\"id\" BIGINT NOT NULL COMMENT 'id',\n"
+ "\"name\" NVARCHAR(128) NOT NULL COMMENT 'name',\n"
+ "\"age\" INTEGER NULL COMMENT 'age',\n"
+ "\"createTime\" SECONDDATE NULL COMMENT 'createTime',\n"
+ "\"lastUpdateTime\" SECONDDATE NULL COMMENT 'lastUpdateTime'\n"
+ ") COMMENT 'User table'";
Assertions.assertEquals(expectSkipIndex, createTableSqlSkipIndex);
}
|
@Override
public ZonedDateTime createdAt() {
return ZonedDateTime.parse("2016-12-15T16:39:00Z");
}
|
@Test
public void createdAt() throws Exception {
assertThat(migration.createdAt()).isEqualTo(ZonedDateTime.parse("2016-12-15T16:39:00Z"));
}
|
@Override
public long offset() {
throw new UnsupportedOperationException("StateStores can't access offset.");
}
|
@Test
public void shouldThrowOnOffset() {
assertThrows(UnsupportedOperationException.class, () -> context.offset());
}
|
private int refreshQueues(String subClusterId) throws IOException, YarnException {
// Refresh the queue properties
ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol();
RefreshQueuesRequest request =
recordFactory.newRecordInstance(RefreshQueuesRequest.class);
if (StringUtils.isNotBlank(subClusterId)) {
request.setSubClusterId(subClusterId);
}
adminProtocol.refreshQueues(request);
return 0;
}
|
@Test
public void testRefreshQueues() throws Exception {
String[] args = { "-refreshQueues" };
assertEquals(0, rmAdminCLI.run(args));
verify(admin).refreshQueues(any(RefreshQueuesRequest.class));
}
|
@VisibleForTesting
protected void writeToFile( FileObject fileObject, String backupFileName ) throws IOException, KettleException {
OutputStream outputStream = null;
PrintStream out = null;
try {
outputStream = initOutputStreamUsingKettleVFS( fileObject );
out = new PrintStream( outputStream );
out.print( XMLHandler.getXMLHeader( Const.XML_ENCODING ) );
out.println( "<" + XML_TAG + ">" );
Collection<SharedObjectInterface> collection = objectsMap.values();
for ( SharedObjectInterface sharedObject : collection ) {
String xmlContent = sharedObject.getXML();
out.println( xmlContent );
}
out.println( "</" + XML_TAG + ">" );
} catch ( Exception e ) {
// restore file if something wrong
boolean isRestored = false;
if ( backupFileName != null ) {
restoreFileFromBackup( backupFileName );
isRestored = true;
}
throw new KettleException(
BaseMessages.getString( PKG, "SharedOjects.WriteToFile.ErrorWritingFile", isRestored ), e );
} finally {
if ( out != null ) {
out.flush();
}
if ( out != null ) {
out.close();
}
if ( out != null ) {
outputStream.close();
}
}
}
|
@Test
public void writeToFileTest() throws KettleException, IOException {
doCallRealMethod().when( sharedObjectsMock ).writeToFile( any( FileObject.class ), anyString() );
when( sharedObjectsMock.initOutputStreamUsingKettleVFS( any( FileObject.class ) ) ).thenThrow(
new RuntimeException() );
try {
sharedObjectsMock.writeToFile( any( FileObject.class ), anyString() );
} catch ( KettleException e ) {
// NOP: catch block throws an KettleException after calling sharedObjectsMock method
}
// check if file restored in case of exception is occurred
verify( sharedObjectsMock ).writeToFile( any(), anyString() );
}
|
public static boolean isBasicInfoChanged(Member actual, Member expected) {
if (null == expected) {
return null != actual;
}
if (!expected.getIp().equals(actual.getIp())) {
return true;
}
if (expected.getPort() != actual.getPort()) {
return true;
}
if (!expected.getAddress().equals(actual.getAddress())) {
return true;
}
if (!expected.getState().equals(actual.getState())) {
return true;
}
// if change
if (expected.isGrpcReportEnabled() != actual.isGrpcReportEnabled()) {
return true;
}
return isBasicInfoChangedInExtendInfo(expected, actual);
}
|
@Test
void testIsBasicInfoChangedForChangedAbilities() {
Member newMember = buildMember();
newMember.setGrpcReportEnabled(true);
assertTrue(MemberUtil.isBasicInfoChanged(newMember, originalMember));
}
|
public static String getDefaultHost(@Nullable String strInterface,
@Nullable String nameserver,
boolean tryfallbackResolution)
throws UnknownHostException {
if (strInterface == null || "default".equals(strInterface)) {
return cachedHostname;
}
if (nameserver != null && "default".equals(nameserver)) {
nameserver = null;
}
String[] hosts = getHosts(strInterface, nameserver, tryfallbackResolution);
return hosts[0];
}
|
@Test
public void testGetLocalHost() throws Exception {
String hostname = DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname);
}
|
@Override
public Object decode(Response response, Type type) throws IOException, DecodeException {
if (response.status() == 404 || response.status() == 204)
if (JSONObject.class.isAssignableFrom((Class<?>) type))
return new JSONObject();
else if (JSONArray.class.isAssignableFrom((Class<?>) type))
return new JSONArray();
else if (String.class.equals(type))
return null;
else
throw new DecodeException(response.status(),
format("%s is not a type supported by this decoder.", type), response.request());
if (response.body() == null)
return null;
try (Reader reader = response.body().asReader(response.charset())) {
Reader bodyReader = (reader.markSupported()) ? reader : new BufferedReader(reader);
bodyReader.mark(1);
if (bodyReader.read() == -1) {
return null; // Empty body
}
bodyReader.reset();
return decodeBody(response, type, bodyReader);
} catch (JSONException jsonException) {
if (jsonException.getCause() != null && jsonException.getCause() instanceof IOException) {
throw (IOException) jsonException.getCause();
}
throw new DecodeException(response.status(), jsonException.getMessage(), response.request(),
jsonException);
}
}
|
@Test
void emptyBodyDecodesToEmpty() throws IOException {
Response response = Response.builder()
.status(204)
.reason("OK")
.headers(Collections.emptyMap())
.body("", UTF_8)
.request(request)
.build();
assertThat(((JSONObject) new JsonDecoder().decode(response, JSONObject.class)).isEmpty())
.isTrue();
}
|
protected static void handleMigration(ListMultimap<TStorageMedium, Long> tabletMetaMigrationMap,
long backendId) {
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex();
AgentBatchTask batchTask = new AgentBatchTask();
OUTER:
for (TStorageMedium storageMedium : tabletMetaMigrationMap.keySet()) {
List<Long> tabletIds = tabletMetaMigrationMap.get(storageMedium);
List<TabletMeta> tabletMetaList = invertedIndex.getTabletMetaList(tabletIds);
for (int i = 0; i < tabletMetaList.size(); i++) {
long tabletId = tabletIds.get(i);
TabletMeta tabletMeta = tabletMetaList.get(i);
// 1. If size of tabletMigrationMap exceeds (Config.tablet_sched_max_migration_task_sent_once - running_tasks_on_be),
// dot not send more tasks. The number of tasks running on BE cannot exceed Config.tablet_sched_max_migration_task_sent_once
if (batchTask.getTaskNum() >=
Config.tablet_sched_max_migration_task_sent_once
- AgentTaskQueue.getTaskNum(backendId, TTaskType.STORAGE_MEDIUM_MIGRATE, false)) {
LOG.debug("size of tabletMigrationMap + size of running tasks on BE is bigger than {}",
Config.tablet_sched_max_migration_task_sent_once);
break OUTER;
}
// 2. If the task already running on BE, do not send again
if (AgentTaskQueue.getTask(backendId, TTaskType.STORAGE_MEDIUM_MIGRATE, tabletId) != null) {
LOG.debug("migrate of tablet:{} is already running on BE", tabletId);
continue;
}
// 3. There are some limitations for primary table, details in migratableTablet()
Database db = GlobalStateMgr.getCurrentState().getDb(tabletMeta.getDbId());
if (db == null) {
continue;
}
OlapTable table = (OlapTable) db.getTable(tabletMeta.getTableId());
if (table == null) {
continue;
}
if (!migratableTablet(db, table, tabletMeta.getPhysicalPartitionId(), tabletMeta.getIndexId(), tabletId)) {
continue;
}
// always get old schema hash(as effective one)
int effectiveSchemaHash = tabletMeta.getOldSchemaHash();
StorageMediaMigrationTask task = new StorageMediaMigrationTask(backendId, tabletId,
effectiveSchemaHash, storageMedium);
batchTask.addTask(task);
}
}
AgentTaskQueue.addBatchTask(batchTask);
AgentTaskExecutor.submit(batchTask);
}
|
@Test
public void testHandleMigrationTaskControl() {
long backendId = 10001L;
// mock the task execution on BE
new MockUp<AgentTaskExecutor>() {
@Mock
public void submit(AgentBatchTask task) {
}
};
OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState()
.getDb("test").getTable("binlog_report_handler_test");
ListMultimap<TStorageMedium, Long> tabletMetaMigrationMap = ArrayListMultimap.create();
List<Long> allTablets = new ArrayList<>();
for (MaterializedIndex index : olapTable.getPartition("binlog_report_handler_test")
.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
for (Tablet tablet : index.getTablets()) {
tabletMetaMigrationMap.put(TStorageMedium.HDD, tablet.getId());
allTablets.add(tablet.getId());
}
}
Assert.assertEquals(50, tabletMetaMigrationMap.size());
ReportHandler.handleMigration(tabletMetaMigrationMap, backendId);
Assert.assertEquals(50, AgentTaskQueue.getTaskNum(backendId, TTaskType.STORAGE_MEDIUM_MIGRATE, false));
// finish 30 tablets migration
for (int i = 0; i < 30; i++) {
AgentTaskQueue.removeTask(backendId, TTaskType.STORAGE_MEDIUM_MIGRATE, allTablets.get(49 - i));
}
// limit the batch size to 30
Config.tablet_sched_max_migration_task_sent_once = 30;
ReportHandler.handleMigration(tabletMetaMigrationMap, backendId);
Assert.assertEquals(30, AgentTaskQueue.getTaskNum(backendId, TTaskType.STORAGE_MEDIUM_MIGRATE, false));
}
|
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
}
|
@Test
public void shouldFillInRowtime() {
// Given:
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
ImmutableList.of(K0, COL0, COL1),
ImmutableList.of(
new StringLiteral("str"),
new StringLiteral("str"),
new LongLiteral(2L)
)
);
// When:
executor.execute(statement, mock(SessionProperties.class), engine, serviceContext);
// Then:
verify(keySerializer).serialize(TOPIC_NAME, genericKey("str"));
verify(valueSerializer).serialize(TOPIC_NAME, genericRow("str", 2L));
verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE));
}
|
public static DateTime beginOfWeek(Date date) {
return new DateTime(beginOfWeek(calendar(date)));
}
|
@Test
public void beginOfWeekTest() {
final String dateStr = "2017-03-01 22:33:23";
final DateTime date = DateUtil.parse(dateStr);
Objects.requireNonNull(date).setFirstDayOfWeek(Week.MONDAY);
// 一周的开始
final Date beginOfWeek = DateUtil.beginOfWeek(date);
assertEquals("2017-02-27 00:00:00", beginOfWeek.toString());
// 一周的结束
final Date endOfWeek = DateUtil.endOfWeek(date);
assertEquals("2017-03-05 23:59:59", endOfWeek.toString());
final Calendar calendar = DateUtil.calendar(date);
// 一周的开始
final Calendar begin = DateUtil.beginOfWeek(calendar);
assertEquals("2017-02-27 00:00:00", DateUtil.date(begin).toString());
// 一周的结束
final Calendar end = DateUtil.endOfWeek(calendar);
assertEquals("2017-03-05 23:59:59", DateUtil.date(end).toString());
}
|
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
return helper.interpret(session, st, context);
}
|
@Test
void should_interpret_select_statement_with_cql_format() {
// When
intrContext.getLocalProperties().put("outputFormat", "cql");
final InterpreterResult actual = interpreter.interpret(
"SELECT * FROM " + ARTISTS_TABLE + " LIMIT 2;", intrContext);
intrContext.getLocalProperties().remove("outputFormat");
// Then
assertNotNull(actual);
assertEquals(Code.SUCCESS, actual.code());
assertEquals("name\tborn\tcountry\tdied\tgender\tstyles\ttype\n" +
"'Bogdan Raczynski'\t'1977-01-01'\t'Poland'\tnull\t'Male'\t" +
"['Dance','Electro']\t'Person'\n" +
"'Krishna Das'\t'1947-05-31'\t'USA'\tnull\t'Male'\t['Unknown']\t'Person'\n",
actual.message().get(0).getData());
}
|
public static String normalize(final String path) {
return normalize(path, true);
}
|
@Test
public void test972() {
assertEquals("//home/path", PathNormalizer.normalize("//home/path"));
}
|
@Override
public void write(InputT element, Context context) throws IOException, InterruptedException {
while (bufferedRequestEntries.size() >= maxBufferedRequests) {
flush();
}
addEntryToBuffer(elementConverter.apply(element, context), false);
nonBlockingFlush();
}
|
@Test
public void testThatIntermittentlyFailingEntriesShouldBeFlushedWithMainBatchInTimeBasedFlush()
throws Exception {
AsyncSinkWriterImpl sink =
new AsyncSinkWriterImplBuilder()
.context(sinkInitContext)
.maxBatchSizeInBytes(10_000)
.maxTimeInBufferMS(100)
.maxRecordSizeInBytes(10_000)
.simulateFailures(true)
.build();
TestProcessingTimeService tpts = sinkInitContext.getTestProcessingTimeService();
tpts.setCurrentTime(0L);
sink.write("1");
sink.write("2");
sink.write("225");
tpts.setCurrentTime(100L);
assertThat(res.size()).isEqualTo(2);
sink.write("3");
sink.write("4");
tpts.setCurrentTime(199L);
assertThat(res.size()).isEqualTo(2);
tpts.setCurrentTime(200L);
assertThat(res.size()).isEqualTo(5);
}
|
<T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
checkArgument(iface.isInterface(), "Not an interface: %s", iface);
T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
synchronized (this) {
// double check
existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
Registration<T> registration =
PipelineOptionsFactory.CACHE
.get()
.validateWellFormed(iface, computedProperties.knownInterfaces);
List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors();
Class<T> proxyClass = registration.getProxyClass();
existingOption =
InstanceBuilder.ofType(proxyClass)
.fromClass(proxyClass)
.withArg(InvocationHandler.class, this)
.build();
computedProperties =
computedProperties.updated(iface, existingOption, propertyDescriptors);
}
}
}
return existingOption;
}
|
@Test
public void testDisplayDataInheritanceNamespace() {
ExtendsBaseOptions options = PipelineOptionsFactory.as(ExtendsBaseOptions.class);
options.setFoo("bar");
DisplayData displayData = DisplayData.from(options);
assertThat(
displayData,
hasDisplayItem(
allOf(hasKey("foo"), hasValue("bar"), hasNamespace(ExtendsBaseOptions.class))));
}
|
@Override
public List<String> assignSegment(String segmentName, Map<String, Map<String, String>> currentAssignment,
InstancePartitions instancePartitions, InstancePartitionsType instancePartitionsType) {
String serverTag = _tenantConfig.getServer();
Set<String> instances = HelixHelper.getServerInstancesForTenant(_helixManager, serverTag);
int numInstances = instances.size();
Preconditions.checkState(numInstances > 0, "No instance found with tag: %s or %s",
TagNameUtils.getOfflineTagForTenant(serverTag), TagNameUtils.getRealtimeTagForTenant(serverTag));
return new ArrayList<>(instances);
}
|
@Test
public void testSegmentAssignmentToRealtimeHosts() {
List<HelixProperty> instanceConfigList = new ArrayList<>();
for (String instance : INSTANCES) {
ZNRecord znRecord = new ZNRecord(instance);
znRecord.setListField(TAG_LIST.name(), ImmutableList.of(REALTIME_SERVER_TAG));
instanceConfigList.add(new InstanceConfig(znRecord));
}
HelixDataAccessor dataAccessor = mock(HelixDataAccessor.class);
PropertyKey.Builder builder = new PropertyKey.Builder("cluster");
when(dataAccessor.keyBuilder()).thenReturn(builder);
when(dataAccessor.getChildValues(builder.instanceConfigs(), true)).thenReturn(instanceConfigList);
when(_helixManager.getHelixDataAccessor()).thenReturn(dataAccessor);
List<String> instances = _segmentAssignment.assignSegment(SEGMENT_NAME, new TreeMap(), _instancePartitionsMap);
assertEquals(instances.size(), NUM_INSTANCES);
assertEqualsNoOrder(instances.toArray(), INSTANCES.toArray());
}
|
@Override
public ShardingSphereSchema swapToObject(final YamlShardingSphereSchema yamlConfig) {
return Optional.ofNullable(yamlConfig).map(this::swapSchema).orElseGet(() -> new ShardingSphereSchema(yamlConfig.getName()));
}
|
@Test
void assertSwapToShardingSphereSchemaWithoutTable() {
YamlShardingSphereSchema yamlSchema = YamlEngine.unmarshal(readYAML(YAML_WITHOUT_TABLE), YamlShardingSphereSchema.class);
assertTrue(new YamlSchemaSwapper().swapToObject(yamlSchema).getAllTableNames().isEmpty());
}
|
public static Capacity from(ClusterResources resources) {
return from(resources, resources);
}
|
@Test
void testCapacityValidation() {
// Equal min and max is allowed
Capacity.from(new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)),
new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)),
IntRange.empty(),
false,
true,
Optional.empty(),
ClusterInfo.empty());
assertValidationFailure(new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)),
new ClusterResources(2, 2, new NodeResources(1, 2, 3, 4)));
assertValidationFailure(new ClusterResources(4, 4, new NodeResources(1, 2, 3, 4)),
new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)));
assertValidationFailure(new ClusterResources(4, 2, new NodeResources(2, 2, 3, 4)),
new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)));
assertValidationFailure(new ClusterResources(4, 2, new NodeResources(1, 3, 3, 4)),
new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)));
assertValidationFailure(new ClusterResources(4, 2, new NodeResources(1, 2, 4, 4)),
new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)));
assertValidationFailure(new ClusterResources(4, 2, new NodeResources(1, 2, 3, 5)),
new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)));
// It's enough that one dimension is smaller also when the others are larger
assertValidationFailure(new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)),
new ClusterResources(8, 4, new NodeResources(2, 1, 6, 8)));
assertEquals("Cannot set hostTTL without a custom cloud account",
assertThrows(IllegalArgumentException.class,
() -> Capacity.from(new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)),
new ClusterResources(4, 2, new NodeResources(1, 2, 3, 4)),
IntRange.empty(), false, true, Optional.empty(), new ClusterInfo.Builder().hostTTL(Duration.ofSeconds(1)).build()))
.getMessage());
}
|
@Udf
public Long round(@UdfParameter final long val) {
return val;
}
|
@Test
public void shouldRoundBigDecimalWithDecimalPlacesNegative() {
assertThat(udf.round(new BigDecimal("-1.0"), 0), is(new BigDecimal("-1.0")));
assertThat(udf.round(new BigDecimal("-1.1"), 0), is(new BigDecimal("-1.0")));
assertThat(udf.round(new BigDecimal("-1.5"), 0), is(new BigDecimal("-1.0")));
assertThat(udf.round(new BigDecimal("-1.75"), 0), is(new BigDecimal("-2.00")));
assertThat(udf.round(new BigDecimal("-100.1"), 0), is(new BigDecimal("-100.0")));
assertThat(udf.round(new BigDecimal("-100.5"), 0), is(new BigDecimal("-100.0")));
assertThat(udf.round(new BigDecimal("-100.75"), 0), is(new BigDecimal("-101.00")));
assertThat(udf.round(new BigDecimal("-100.10"), 1), is(new BigDecimal("-100.10")));
assertThat(udf.round(new BigDecimal("-100.11"), 1), is(new BigDecimal("-100.10")));
assertThat(udf.round(new BigDecimal("-100.15"), 1), is(new BigDecimal("-100.10")));
assertThat(udf.round(new BigDecimal("-100.17"), 1), is(new BigDecimal("-100.20")));
assertThat(udf.round(new BigDecimal("-100.110"), 2), is(new BigDecimal("-100.110")));
assertThat(udf.round(new BigDecimal("-100.111"), 2), is(new BigDecimal("-100.110")));
assertThat(udf.round(new BigDecimal("-100.115"), 2), is(new BigDecimal("-100.110")));
assertThat(udf.round(new BigDecimal("-100.117"), 2), is(new BigDecimal("-100.120")));
assertThat(udf.round(new BigDecimal("-100.1110"), 3), is(new BigDecimal("-100.1110")));
assertThat(udf.round(new BigDecimal("-100.1111"), 3), is(new BigDecimal("-100.1110")));
assertThat(udf.round(new BigDecimal("-100.1115"), 3), is(new BigDecimal("-100.1110")));
assertThat(udf.round(new BigDecimal("-100.1117"), 3), is(new BigDecimal("-100.1120")));
assertThat(udf.round(new BigDecimal("-12345.67"), -2), is(new BigDecimal("-12300.00")));
assertThat(udf.round(new BigDecimal("-12345.67"), -3), is(new BigDecimal("-12000.00")));
assertThat(udf.round(new BigDecimal("-12345.67"), -4), is(new BigDecimal("-10000.00")));
assertThat(udf.round(new BigDecimal("-12345.67"), -5), is(new BigDecimal("0.00")));
}
|
public List<QueryMetadata> sql(final String sql) {
return sql(sql, Collections.emptyMap());
}
|
@Test
public void shouldParseStatements() {
// When:
ksqlContext.sql("Some SQL", SOME_PROPERTIES);
// Then:
verify(ksqlEngine).parse("Some SQL");
}
|
@Override
public ParsedLine parse(final String line, final int cursor, final ParseContext context) {
final ParsedLine parsed = delegate.parse(line, cursor, context);
if (context != ParseContext.ACCEPT_LINE) {
return parsed;
}
if (UnclosedQuoteChecker.isUnclosedQuote(line)) {
throw new EOFError(-1, -1, "Missing end quote", "end quote char");
}
final String bare = CommentStripper.strip(parsed.line());
if (bare.isEmpty()) {
return parsed;
}
if (cliCmdPredicate.test(bare)) {
return parsed;
}
if (!bare.endsWith(TERMINATION_CHAR)) {
throw new EOFError(-1, -1, "Missing termination char", "termination char");
}
return parsed;
}
|
@Test
public void shouldAlwaysAcceptCommentLines() {
// Given:
givenDelegateWillReturn(" -- this is a comment");
// When:
final ParsedLine result = parser.parse("what ever", 0, ParseContext.ACCEPT_LINE);
// Then:
assertThat(result, is(parsedLine));
}
|
@Override
public void startIt() {
if (semaphore.tryAcquire()) {
try {
executorService.execute(this::doDatabaseMigration);
} catch (RuntimeException e) {
semaphore.release();
throw e;
}
} else {
LOGGER.trace("{}: lock is already taken or process is already running", Thread.currentThread().getName());
}
}
|
@Test
public void status_is_SUCCEEDED_and_failure_is_null_when_trigger_runs_without_an_exception() {
underTest.startIt();
assertThat(migrationState.getStatus()).isEqualTo(DatabaseMigrationState.Status.SUCCEEDED);
assertThat(migrationState.getError()).isEmpty();
assertThat(migrationState.getStartedAt()).isNotNull();
}
|
public static <T> Window<T> into(WindowFn<? super T, ?> fn) {
try {
fn.windowCoder().verifyDeterministic();
} catch (NonDeterministicException e) {
throw new IllegalArgumentException("Window coders must be deterministic.", e);
}
return Window.<T>configure().withWindowFn(fn);
}
|
@Test
@Category({ValidatesRunner.class, UsesCustomWindowMerging.class})
public void testMergingCustomWindowsWithoutCustomWindowTypes() {
Instant startInstant = new Instant(0L);
PCollection<KV<String, Integer>> inputCollection =
pipeline.apply(
Create.timestamped(
TimestampedValue.of(KV.of("a", 1), startInstant.plus(Duration.standardSeconds(1))),
TimestampedValue.of(KV.of("a", 2), startInstant.plus(Duration.standardSeconds(2))),
TimestampedValue.of(KV.of("a", 3), startInstant.plus(Duration.standardSeconds(3))),
TimestampedValue.of(KV.of("a", 4), startInstant.plus(Duration.standardSeconds(4))),
TimestampedValue.of(
KV.of("a", 5), startInstant.plus(Duration.standardSeconds(5)))));
PCollection<KV<String, Integer>> windowedCollection =
inputCollection.apply(Window.into(new WindowOddEvenMergingBuckets<>()));
PCollection<String> result =
windowedCollection
.apply(GroupByKey.create())
.apply(
ParDo.of(
new DoFn<KV<String, Iterable<Integer>>, String>() {
@ProcessElement
public void processElement(ProcessContext c, BoundedWindow window) {
List<Integer> elements = Lists.newArrayList();
c.element().getValue().forEach(elements::add);
Collections.sort(elements);
c.output(elements.toString());
}
}));
PAssert.that("Wrong output collection", result).containsInAnyOrder("[2, 4]", "[1, 3, 5]");
pipeline.run();
}
|
public static Coordinate gcj02ToBd09(double lng, double lat) {
double z = Math.sqrt(lng * lng + lat * lat) + 0.00002 * Math.sin(lat * X_PI);
double theta = Math.atan2(lat, lng) + 0.000003 * Math.cos(lng * X_PI);
double bd_lng = z * Math.cos(theta) + 0.0065;
double bd_lat = z * Math.sin(theta) + 0.006;
return new Coordinate(bd_lng, bd_lat);
}
|
@Test
public void gcj02ToBd09Test() {
final CoordinateUtil.Coordinate coordinate = CoordinateUtil.gcj02ToBd09(116.404, 39.915);
assertEquals(116.41036949371029D, coordinate.getLng(), 0);
assertEquals(39.92133699351022D, coordinate.getLat(), 0);
}
|
NewExternalIssue mapResult(String driverName, @Nullable Result.Level ruleSeverity, @Nullable Result.Level ruleSeverityForNewTaxonomy, Result result) {
NewExternalIssue newExternalIssue = sensorContext.newExternalIssue();
newExternalIssue.type(DEFAULT_TYPE);
newExternalIssue.engineId(driverName);
newExternalIssue.severity(toSonarQubeSeverity(ruleSeverity));
newExternalIssue.ruleId(requireNonNull(result.getRuleId(), "No ruleId found for issue thrown by driver " + driverName));
newExternalIssue.cleanCodeAttribute(DEFAULT_CLEAN_CODE_ATTRIBUTE);
newExternalIssue.addImpact(DEFAULT_SOFTWARE_QUALITY, toSonarQubeImpactSeverity(ruleSeverityForNewTaxonomy));
mapLocations(result, newExternalIssue);
return newExternalIssue;
}
|
@Test
@UseDataProvider("rule_severity_to_sonarqube_severity_mapping")
public void mapResult_mapsCorrectlyLevelToSeverity(Result.Level ruleSeverity, Severity sonarQubeSeverity, org.sonar.api.issue.impact.Severity impactSeverity) {
NewExternalIssue newExternalIssue = resultMapper.mapResult(DRIVER_NAME, ruleSeverity, ruleSeverity, result);
verify(newExternalIssue).severity(sonarQubeSeverity);
verify(newExternalIssue).addImpact(DEFAULT_SOFTWARE_QUALITY, impactSeverity);
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String sqlServerType = typeDefine.getDataType().toUpperCase();
switch (sqlServerType) {
case SQLSERVER_BIT:
builder.sourceType(SQLSERVER_BIT);
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case SQLSERVER_TINYINT:
case SQLSERVER_TINYINT_IDENTITY:
builder.sourceType(SQLSERVER_TINYINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case SQLSERVER_SMALLINT:
case SQLSERVER_SMALLINT_IDENTITY:
builder.sourceType(SQLSERVER_SMALLINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case SQLSERVER_INTEGER:
case SQLSERVER_INTEGER_IDENTITY:
case SQLSERVER_INT:
case SQLSERVER_INT_IDENTITY:
builder.sourceType(SQLSERVER_INT);
builder.dataType(BasicType.INT_TYPE);
break;
case SQLSERVER_BIGINT:
case SQLSERVER_BIGINT_IDENTITY:
builder.sourceType(SQLSERVER_BIGINT);
builder.dataType(BasicType.LONG_TYPE);
break;
case SQLSERVER_REAL:
builder.sourceType(SQLSERVER_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
break;
case SQLSERVER_FLOAT:
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() <= 24) {
builder.sourceType(SQLSERVER_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
} else {
builder.sourceType(SQLSERVER_FLOAT);
builder.dataType(BasicType.DOUBLE_TYPE);
}
break;
case SQLSERVER_DECIMAL:
case SQLSERVER_NUMERIC:
builder.sourceType(
String.format(
"%s(%s,%s)",
SQLSERVER_DECIMAL,
typeDefine.getPrecision(),
typeDefine.getScale()));
builder.dataType(
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale()));
builder.columnLength(typeDefine.getPrecision());
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_MONEY:
builder.sourceType(SQLSERVER_MONEY);
builder.dataType(
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale()));
builder.columnLength(typeDefine.getPrecision());
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_SMALLMONEY:
builder.sourceType(SQLSERVER_SMALLMONEY);
builder.dataType(
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale()));
builder.columnLength(typeDefine.getPrecision());
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_CHAR:
builder.sourceType(String.format("%s(%s)", SQLSERVER_CHAR, typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(
TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength()));
break;
case SQLSERVER_NCHAR:
builder.sourceType(
String.format("%s(%s)", SQLSERVER_NCHAR, typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(
TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength()));
break;
case SQLSERVER_VARCHAR:
if (typeDefine.getLength() == -1) {
builder.sourceType(MAX_VARCHAR);
builder.columnLength(TypeDefineUtils.doubleByteTo4ByteLength(POWER_2_31 - 1));
} else {
builder.sourceType(
String.format("%s(%s)", SQLSERVER_VARCHAR, typeDefine.getLength()));
builder.columnLength(
TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength()));
}
builder.dataType(BasicType.STRING_TYPE);
break;
case SQLSERVER_NVARCHAR:
if (typeDefine.getLength() == -1) {
builder.sourceType(MAX_NVARCHAR);
builder.columnLength(TypeDefineUtils.doubleByteTo4ByteLength(POWER_2_31 - 1));
} else {
builder.sourceType(
String.format("%s(%s)", SQLSERVER_NVARCHAR, typeDefine.getLength()));
builder.columnLength(
TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength()));
}
builder.dataType(BasicType.STRING_TYPE);
break;
case SQLSERVER_TEXT:
builder.sourceType(SQLSERVER_TEXT);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_31 - 1);
break;
case SQLSERVER_NTEXT:
builder.sourceType(SQLSERVER_NTEXT);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_30 - 1);
break;
case SQLSERVER_XML:
builder.sourceType(SQLSERVER_XML);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_31 - 1);
break;
case SQLSERVER_UNIQUEIDENTIFIER:
builder.sourceType(SQLSERVER_UNIQUEIDENTIFIER);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
break;
case SQLSERVER_SQLVARIANT:
builder.sourceType(SQLSERVER_SQLVARIANT);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(typeDefine.getLength());
break;
case SQLSERVER_BINARY:
builder.sourceType(
String.format("%s(%s)", SQLSERVER_BINARY, typeDefine.getLength()));
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case SQLSERVER_VARBINARY:
if (typeDefine.getLength() == -1) {
builder.sourceType(MAX_VARBINARY);
builder.columnLength(POWER_2_31 - 1);
} else {
builder.sourceType(
String.format("%s(%s)", SQLSERVER_VARBINARY, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
}
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case SQLSERVER_IMAGE:
builder.sourceType(SQLSERVER_IMAGE);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(POWER_2_31 - 1);
break;
case SQLSERVER_TIMESTAMP:
builder.sourceType(SQLSERVER_TIMESTAMP);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(8L);
break;
case SQLSERVER_DATE:
builder.sourceType(SQLSERVER_DATE);
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case SQLSERVER_TIME:
builder.sourceType(String.format("%s(%s)", SQLSERVER_TIME, typeDefine.getScale()));
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_DATETIME:
builder.sourceType(SQLSERVER_DATETIME);
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(3);
break;
case SQLSERVER_DATETIME2:
builder.sourceType(
String.format("%s(%s)", SQLSERVER_DATETIME2, typeDefine.getScale()));
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_DATETIMEOFFSET:
builder.sourceType(
String.format("%s(%s)", SQLSERVER_DATETIMEOFFSET, typeDefine.getScale()));
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_SMALLDATETIME:
builder.sourceType(SQLSERVER_SMALLDATETIME);
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.SQLSERVER, sqlServerType, typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertFloat() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder().name("test").columnType("real").dataType("real").build();
Column column = SqlServerTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.FLOAT_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("float")
.dataType("float")
.precision(24L)
.build();
column = SqlServerTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.FLOAT_TYPE, column.getDataType());
Assertions.assertEquals(
SqlServerTypeConverter.SQLSERVER_REAL, column.getSourceType().toUpperCase());
}
|
@Override
public void write(InputT element, Context context) throws IOException, InterruptedException {
while (bufferedRequestEntries.size() >= maxBufferedRequests) {
flush();
}
addEntryToBuffer(elementConverter.apply(element, context), false);
nonBlockingFlush();
}
|
@Test
public void testThatABatchWithSizeSmallerThanMaxBatchSizeIsFlushedOnTimeoutExpiry()
throws Exception {
AsyncSinkWriterImpl sink =
new AsyncSinkWriterImplBuilder()
.context(sinkInitContext)
.maxBatchSize(10)
.maxInFlightRequests(20)
.maxBatchSizeInBytes(10_000)
.maxTimeInBufferMS(100)
.maxRecordSizeInBytes(10_000)
.simulateFailures(true)
.build();
TestProcessingTimeService tpts = sinkInitContext.getTestProcessingTimeService();
tpts.setCurrentTime(0L);
for (int i = 0; i < 8; i++) {
sink.write(String.valueOf(i));
}
tpts.setCurrentTime(99L);
assertThat(res.size()).isEqualTo(0);
tpts.setCurrentTime(100L);
assertThat(res.size()).isEqualTo(8);
}
|
public static void forceMkdir(String path) throws IOException {
FileUtils.forceMkdir(new File(path));
}
|
@Test
void testForceMkdir() throws IOException {
File dir = Paths.get(TMP_PATH, UUID.randomUUID().toString(), UUID.randomUUID().toString()).toFile();
DiskUtils.forceMkdir(dir);
assertTrue(dir.exists());
dir.deleteOnExit();
}
|
@Override
public SCMView responseMessageForSCMView(String responseBody) {
try {
final Map map = parseResponseToMap(responseBody);
if (map.isEmpty()) {
throw new RuntimeException("The JSON for SCM View cannot be empty");
}
final String displayValue;
try {
displayValue = (String) map.get("displayValue");
} catch (Exception e) {
throw new RuntimeException("SCM View's 'displayValue' should be of type string");
}
if (isEmpty(displayValue)) {
throw new RuntimeException("SCM View's 'displayValue' is a required field");
}
final String template;
try {
template = (String) map.get("template");
} catch (Exception e) {
throw new RuntimeException("SCM View's 'template' should be of type string");
}
if (isEmpty(template)) {
throw new RuntimeException("SCM View's 'template' is a required field");
}
return new SCMView() {
@Override
public String displayValue() {
return displayValue;
}
@Override
public String template() {
return template;
}
};
} catch (Exception e) {
throw new RuntimeException(String.format("Unable to de-serialize json response. Error: %s.", e.getMessage()));
}
}
|
@Test
public void shouldBuildSCMViewFromResponse() {
String jsonResponse = "{\"displayValue\":\"MySCMPlugin\", \"template\":\"<html>junk</html>\"}";
SCMView view = messageHandler.responseMessageForSCMView(jsonResponse);
assertThat(view.displayValue(), is("MySCMPlugin"));
assertThat(view.template(), is("<html>junk</html>"));
}
|
private RemotingCommand querySubscriptionByConsumer(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
QuerySubscriptionByConsumerRequestHeader requestHeader =
(QuerySubscriptionByConsumerRequestHeader) request.decodeCommandCustomHeader(QuerySubscriptionByConsumerRequestHeader.class);
SubscriptionData subscriptionData = this.brokerController.getConsumerManager()
.findSubscriptionData(requestHeader.getGroup(), requestHeader.getTopic());
QuerySubscriptionResponseBody responseBody = new QuerySubscriptionResponseBody();
responseBody.setGroup(requestHeader.getGroup());
responseBody.setTopic(requestHeader.getTopic());
responseBody.setSubscriptionData(subscriptionData);
byte[] body = responseBody.encode();
response.setBody(body);
response.setCode(ResponseCode.SUCCESS);
response.setRemark(null);
return response;
}
|
@Test
public void testQuerySubscriptionByConsumer() throws RemotingCommandException {
QuerySubscriptionByConsumerRequestHeader requestHeader = new QuerySubscriptionByConsumerRequestHeader();
requestHeader.setGroup("group");
requestHeader.setTopic("topic");
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_SUBSCRIPTION_BY_CONSUMER, requestHeader);
request.makeCustomHeaderToNet();
when(brokerController.getConsumerManager()).thenReturn(consumerManager);
when(consumerManager.findSubscriptionData(anyString(),anyString())).thenReturn(null);
RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
|
@Override
public void ensureValid(String name, Object value) {
if (value == null || ((List) value).isEmpty()) {
throw new ConfigException(name, value, "Empty list");
}
}
|
@Test
public void testValidList() {
new NonEmptyListValidator().ensureValid("foo", Collections.singletonList("foo"));
}
|
public SalesforceHttpClient getHttpClient() {
return httpClient;
}
|
@Test
public void usesUserSuppliedHttpClient() {
assertEquals(client, component.getHttpClient());
}
|
@JsonAnyGetter
public Map<String, PartitionsSpec> get() {
return map;
}
|
@Test
public void testPartitionNumbers() {
List<Integer> partsANumbers = PARTSA.partitionNumbers();
assertEquals(Integer.valueOf(0), partsANumbers.get(0));
assertEquals(Integer.valueOf(1), partsANumbers.get(1));
assertEquals(Integer.valueOf(2), partsANumbers.get(2));
assertEquals(3, partsANumbers.size());
List<Integer> partsBNumbers = PARTSB.partitionNumbers();
assertEquals(Integer.valueOf(0), partsBNumbers.get(0));
assertEquals(Integer.valueOf(1), partsBNumbers.get(1));
assertEquals(2, partsBNumbers.size());
}
|
public boolean cleanTable() {
boolean allRemoved = true;
Set<String> removedPaths = new HashSet<>();
for (PhysicalPartition partition : table.getAllPhysicalPartitions()) {
try {
WarehouseManager manager = GlobalStateMgr.getCurrentState().getWarehouseMgr();
Warehouse warehouse = manager.getBackgroundWarehouse();
ShardInfo shardInfo = LakeTableHelper.getAssociatedShardInfo(partition, warehouse.getId()).orElse(null);
if (shardInfo == null || removedPaths.contains(shardInfo.getFilePath().getFullPath())) {
continue;
}
removedPaths.add(shardInfo.getFilePath().getFullPath());
if (!LakeTableHelper.removeShardRootDirectory(shardInfo)) {
allRemoved = false;
}
} catch (StarClientException e) {
LOG.warn("Fail to get shard info of partition {}: {}", partition.getId(), e.getMessage());
allRemoved = false;
}
}
return allRemoved;
}
|
@Test
public void testRPCFailed(@Mocked LakeTable table,
@Mocked PhysicalPartition partition,
@Mocked MaterializedIndex index,
@Mocked LakeTablet tablet,
@Mocked LakeService lakeService) throws StarClientException {
LakeTableCleaner cleaner = new LakeTableCleaner(table);
new MockUp<Utils>() {
@Mock
public ComputeNode chooseNode(ShardInfo info) {
return new ComputeNode();
}
};
new MockUp<BrpcProxy>() {
@Mock
public LakeService getLakeService(TNetworkAddress address) {
return lakeService;
}
};
new MockUp<WarehouseManager>() {
@Mock
public Warehouse getWarehouse(String warehouseName) {
return new DefaultWarehouse(WarehouseManager.DEFAULT_WAREHOUSE_ID, WarehouseManager.DEFAULT_WAREHOUSE_NAME);
}
@Mock
public Warehouse getWarehouse(long warehouseId) {
return new DefaultWarehouse(WarehouseManager.DEFAULT_WAREHOUSE_ID, WarehouseManager.DEFAULT_WAREHOUSE_NAME);
}
};
new Expectations() {
{
table.getAllPhysicalPartitions();
result = Lists.newArrayList(partition);
minTimes = 1;
maxTimes = 1;
partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL);
result = Lists.newArrayList(index);
minTimes = 1;
maxTimes = 1;
index.getTablets();
result = Lists.newArrayList(tablet);
minTimes = 1;
maxTimes = 1;
lakeService.dropTable((DropTableRequest) any);
result = new RuntimeException("Injected RPC error");
minTimes = 1;
maxTimes = 1;
}
};
Assert.assertFalse(cleaner.cleanTable());
}
|
ProducerListeners listeners() {
return new ProducerListeners(eventListeners.toArray(new HollowProducerEventListener[0]));
}
|
@Test
public void testFireValidationStartDontStopWhenOneFails() {
long version = 31337;
HollowProducer.ReadState readState = Mockito.mock(HollowProducer.ReadState.class);
Mockito.when(readState.getVersion()).thenReturn(version);
Mockito.doThrow(RuntimeException.class).when(validationStatusListener).onValidationStatusStart(version);
listenerSupport.listeners().fireValidationStart(readState);
Mockito.verify(listener).onValidationStart(version);
Mockito.verify(validationStatusListener).onValidationStatusStart(version);
Mockito.verify(producerAndValidationStatusListener).onValidationStart(version);
}
|
@Override
public PostgreSQLPacket getQueryRowPacket() throws SQLException {
return new PostgreSQLDataRowPacket(proxyBackendHandler.getRowData().getData());
}
|
@Test
void assertGetQueryRowPacket() throws SQLException {
when(proxyBackendHandler.getRowData()).thenReturn(new QueryResponseRow(Collections.emptyList()));
PostgreSQLPacket actual = queryExecutor.getQueryRowPacket();
assertThat(actual, is(instanceOf(PostgreSQLDataRowPacket.class)));
}
|
public ConfigurationProperty create(String key, String value, String encryptedValue, Boolean isSecure) {
ConfigurationProperty configurationProperty = new ConfigurationProperty();
configurationProperty.setConfigurationKey(new ConfigurationKey(key));
if (isNotBlank(value) && isNotBlank(encryptedValue)) {
configurationProperty.addError("configurationValue", "You may only specify `value` or `encrypted_value`, not both!");
configurationProperty.addError("encryptedValue", "You may only specify `value` or `encrypted_value`, not both!");
configurationProperty.setConfigurationValue(new ConfigurationValue(value));
configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(encryptedValue));
return configurationProperty;
}
if (isSecure) {
if (isNotBlank(encryptedValue)) {
configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(encryptedValue));
}
if (isNotBlank(value)) {
configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(encrypt(value)));
}
} else {
if (isNotBlank(encryptedValue)) {
configurationProperty.addError("encryptedValue", "encrypted_value cannot be specified to a unsecured property.");
configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(encryptedValue));
}
if (value != null) {
configurationProperty.setConfigurationValue(new ConfigurationValue(value));
}
}
if (isNotBlank(configurationProperty.getEncryptedValue())) {
configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(configurationProperty.getEncryptedValue()));
}
return configurationProperty;
}
|
@Test
public void shouldCreateWithValueForAUnsecuredProperty() {
Property key = new Property("key");
key.with(Property.SECURE, false);
ConfigurationProperty property = new ConfigurationPropertyBuilder().create("key", "value", null, false);
assertThat(property.getConfigurationValue().getValue(), is("value"));
assertNull(property.getEncryptedConfigurationValue());
}
|
public static String removeInvalidChars(String name, String prefix) {
String result = removeInvalidCharsMiddle(name);
if (!result.isEmpty()) {
int codePoint = result.codePointAt(0);
if (!isValidIdentifierStart(codePoint)) {
return prefix + result;
}
}
return result;
}
|
@Test
public void testRemoveInvalidChars() {
assertThat(removeInvalidChars("1cls", "C")).isEqualTo("C1cls");
assertThat(removeInvalidChars("-cls", "C")).isEqualTo("cls");
assertThat(removeInvalidChars("A-cls", "C")).isEqualTo("Acls");
}
|
public synchronized void registerNewConf(Address address, List<ConfigProperty> configList) {
Preconditions.checkNotNull(address, "address should not be null");
Preconditions.checkNotNull(configList, "configuration list should not be null");
// Instead of recording property name, we record property key.
mConfMap.put(address, configList.stream().map(c -> new ConfigRecord()
.setKey(toPropertyKey(c.getName())).setSource(c.getSource())
.setValue(c.getValue())).collect(Collectors.toList()));
mLostNodes.remove(address);
for (Runnable function : mChangeListeners) {
function.run();
}
}
|
@Test
public void registerNewConf() {
ConfigurationStore configStore = createConfigStore();
Map<Address, List<ConfigRecord>> confMap = configStore.getConfMap();
assertTrue(confMap.containsKey(mAddressOne));
assertTrue(confMap.containsKey(mAddressTwo));
}
|
@Override
public void refreshSelectorDataSelf(final List<SelectorData> selectorDataList) {
if (CollectionUtils.isEmpty(selectorDataList)) {
return;
}
BaseDataCache.getInstance().cleanSelectorDataSelf(selectorDataList);
}
|
@Test
public void testRefreshSelectorDataSelf() {
baseDataCache.cleanSelectorData();
SelectorData firstCachedSelectorData = SelectorData.builder().id("1").pluginName(mockPluginName1).build();
SelectorData secondCachedSelectorData = SelectorData.builder().id("2").pluginName(mockPluginName2).build();
baseDataCache.cacheSelectData(firstCachedSelectorData);
baseDataCache.cacheSelectData(secondCachedSelectorData);
assertNotNull(baseDataCache.obtainSelectorData(firstCachedSelectorData.getPluginName()));
assertNotNull(baseDataCache.obtainSelectorData(secondCachedSelectorData.getPluginName()));
commonPluginDataSubscriber.refreshSelectorDataSelf(Lists.newArrayList(firstCachedSelectorData));
assertEquals(Lists.newArrayList(), baseDataCache.obtainSelectorData(firstCachedSelectorData.getPluginName()));
assertEquals(Lists.newArrayList(secondCachedSelectorData), baseDataCache.obtainSelectorData(secondCachedSelectorData.getPluginName()));
}
|
static void unregister(MBeanServer server, ObjectName objectName) {
try {
for (ObjectName name : server.queryNames(objectName, null)) {
server.unregisterMBean(name);
}
} catch (MBeanRegistrationException | InstanceNotFoundException e) {
throw new CacheException("Error unregistering " + objectName, e);
}
}
|
@Test(dataProvider = "unegisterExceptions")
public void unregister_error(Class<? extends Throwable> throwableType) throws JMException {
var name = new ObjectName("");
MBeanServer server = Mockito.mock();
when(server.queryNames(any(), any())).thenReturn(Set.of(name));
doThrow(throwableType).when(server).unregisterMBean(any());
assertThrows(CacheException.class, () -> JmxRegistration.unregister(server, name));
}
|
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public RouteContext route(final ConnectionContext connectionContext, final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database) {
RouteContext result = new RouteContext();
Optional<String> dataSourceName = findDataSourceByHint(queryContext.getHintValueContext(), database.getResourceMetaData().getStorageUnits());
if (dataSourceName.isPresent()) {
result.getRouteUnits().add(new RouteUnit(new RouteMapper(dataSourceName.get(), dataSourceName.get()), Collections.emptyList()));
return result;
}
for (Entry<ShardingSphereRule, SQLRouter> entry : routers.entrySet()) {
if (result.getRouteUnits().isEmpty() && entry.getValue() instanceof EntranceSQLRouter) {
result = ((EntranceSQLRouter) entry.getValue()).createRouteContext(queryContext, globalRuleMetaData, database, entry.getKey(), props, connectionContext);
} else if (entry.getValue() instanceof DecorateSQLRouter) {
((DecorateSQLRouter) entry.getValue()).decorateRouteContext(result, queryContext, database, entry.getKey(), props, connectionContext);
}
}
if (result.getRouteUnits().isEmpty() && 1 == database.getResourceMetaData().getStorageUnits().size()) {
String singleDataSourceName = database.getResourceMetaData().getStorageUnits().keySet().iterator().next();
result.getRouteUnits().add(new RouteUnit(new RouteMapper(singleDataSourceName, singleDataSourceName), Collections.emptyList()));
}
return result;
}
|
@Test
void assertRouteBySQLCommentHintWithException() {
when(hintValueContext.findHintDataSourceName()).thenReturn(Optional.of("ds_3"));
QueryContext queryContext = new QueryContext(commonSQLStatementContext, "", Collections.emptyList(), hintValueContext, connectionContext, metaData);
assertThrows(DataSourceHintNotExistsException.class, () -> partialSQLRouteExecutor.route(connectionContext, queryContext, mock(RuleMetaData.class), database));
}
|
public int tryUnblockFailedWorkflowInstances(String workflowId, int limit, TimelineEvent event) {
return withMetricLogError(
() ->
withRetryableUpdate(
UNBLOCK_INSTANCES_FAILED_STATUS,
stmt -> {
int idx = 0;
stmt.setString(++idx, toJson(event));
stmt.setString(++idx, workflowId);
stmt.setInt(++idx, limit);
}),
"tryUnblockFailedWorkflowInstances",
"Failed to try to unblock the failed workflow instances for workflow id[{}]",
workflowId);
}
|
@Test
public void testTryUnblockFailedWorkflowInstances() {
wfi.setWorkflowUuid("test-uuid");
wfi.setWorkflowInstanceId(0L);
int res = runStrategyDao.startWithRunStrategy(wfi, Defaults.DEFAULT_RUN_STRATEGY);
assertEquals(1, res);
int cnt =
instanceDao.terminateQueuedInstances(
TEST_WORKFLOW_ID, 2, WorkflowInstance.Status.FAILED, "test-reason");
assertEquals(2L, cnt);
String status = instanceDao.getWorkflowInstanceRawStatus(TEST_WORKFLOW_ID, 1L, 1L);
assertEquals("FAILED", status);
status = instanceDao.getWorkflowInstanceRawStatus(TEST_WORKFLOW_ID, 2L, 1L);
assertEquals("FAILED", status);
int ret = instanceDao.tryUnblockFailedWorkflowInstances(TEST_WORKFLOW_ID, 1, null);
assertEquals(1, ret);
status = instanceDao.getWorkflowInstanceRawStatus(TEST_WORKFLOW_ID, 1L, 1L);
assertEquals("FAILED_1", status);
status = instanceDao.getWorkflowInstanceRawStatus(TEST_WORKFLOW_ID, 2L, 1L);
assertEquals("FAILED", status);
ret = instanceDao.tryUnblockFailedWorkflowInstances(TEST_WORKFLOW_ID, 2, null);
assertEquals(1, ret);
status = instanceDao.getWorkflowInstanceRawStatus(TEST_WORKFLOW_ID, 1L, 1L);
assertEquals("FAILED_1", status);
status = instanceDao.getWorkflowInstanceRawStatus(TEST_WORKFLOW_ID, 2L, 1L);
assertEquals("FAILED_1", status);
MaestroTestHelper.removeWorkflowInstance(dataSource, TEST_WORKFLOW_ID, 2);
}
|
public static <InputT> KeyByBuilder<InputT> of(PCollection<InputT> input) {
return named(null).of(input);
}
|
@Test
public void testBuild_Windowing() {
final PCollection<String> dataset = TestUtils.createMockDataset(TypeDescriptors.strings());
final PCollection<KV<String, Long>> counted =
SumByKey.of(dataset)
.keyBy(s -> s)
.valueBy(s -> 1L)
.windowBy(FixedWindows.of(org.joda.time.Duration.standardHours(1)))
.triggeredBy(DefaultTrigger.of())
.discardingFiredPanes()
.withAllowedLateness(Duration.millis(1000))
.output();
final SumByKey sum = (SumByKey) TestUtils.getProducer(counted);
assertTrue(sum.getWindow().isPresent());
@SuppressWarnings("unchecked")
final WindowDesc<?> windowDesc = WindowDesc.of((Window) sum.getWindow().get());
assertEquals(
FixedWindows.of(org.joda.time.Duration.standardHours(1)), windowDesc.getWindowFn());
assertEquals(DefaultTrigger.of(), windowDesc.getTrigger());
assertEquals(AccumulationMode.DISCARDING_FIRED_PANES, windowDesc.getAccumulationMode());
assertEquals(Duration.millis(1000), windowDesc.getAllowedLateness());
}
|
@Override
public JWK getJwk() {
return this.jwk;
}
|
@Test
void shouldGetJwk() {
var jwk = service.getJwk();
assertEquals("RSA", jwk.getKeyType().getValue());
assertEquals(JWSAlgorithm.RS256, jwk.getAlgorithm());
assertEquals(KeyUse.SIGNATURE, jwk.getKeyUse());
assertEquals(Set.of(SIGN, VERIFY), jwk.getKeyOperations());
}
|
public void close() {
if (isOpen()) {
LOG.debug("Closing stream ({})", mDescription);
mClosed = true;
mRequestObserver.onCompleted();
}
}
|
@Test
public void close() throws Exception {
mStream.close();
assertTrue(mStream.isClosed());
assertFalse(mStream.isOpen());
verify(mRequestObserver).onCompleted();
}
|
static String isHostParam(final String given) {
final String hostUri = StringHelper.notEmpty(given, "host");
final Matcher matcher = HOST_PATTERN.matcher(given);
if (!matcher.matches()) {
throw new IllegalArgumentException(
"host must be an absolute URI (e.g. http://api.example.com), given: `" + hostUri + "`");
}
return hostUri;
}
|
@Test
public void emptyHostParamsAreNotAllowed() {
assertThrows(IllegalArgumentException.class,
() -> RestOpenApiHelper.isHostParam(""));
}
|
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
return inject(statement, new TopicProperties.Builder());
}
|
@Test
public void shouldNotUseSourceTopicForCreateMissingTopic() {
// Given:
givenStatement("CREATE STREAM x (FOO VARCHAR) WITH(value_format='avro', kafka_topic='topic', partitions=2);");
// When:
injector.inject(statement, builder);
// Then:
verify(builder, never()).withSource(any(), any());
}
|
@Override
public Map<K, V> getCachedMap() {
return localCacheView.getCachedMap();
}
|
@Test
public void testGetAllCache() {
RLocalCachedMap<String, Integer> map = redisson.getLocalCachedMap(LocalCachedMapOptions.name("test"));
Map<String, Integer> cache = map.getCachedMap();
map.put("1", 100);
map.put("2", 200);
map.put("3", 300);
map.put("4", 400);
assertThat(cache.size()).isEqualTo(4);
Map<String, Integer> filtered = map.getAll(new HashSet<>(Arrays.asList("2", "3", "5")));
Map<String, Integer> expectedMap = new HashMap<>();
expectedMap.put("2", 200);
expectedMap.put("3", 300);
assertThat(filtered).isEqualTo(expectedMap);
RMap<String, Integer> map1 = redisson.getLocalCachedMap(LocalCachedMapOptions.name("test"));
Map<String, Integer> filtered1 = map1.getAll(new HashSet<>(Arrays.asList("2", "3", "5")));
assertThat(filtered1).isEqualTo(expectedMap);
}
|
@Override
public void execute(Context context) {
try (CloseableIterator<DefaultIssue> issues = protoIssueCache.traverse()) {
while (issues.hasNext()) {
DefaultIssue issue = issues.next();
if (shouldUpdateIndexForIssue(issue)) {
changedIssuesRepository.addIssueKey(issue.key());
}
}
}
}
|
@Test
public void execute_whenIssueIsChanged_shouldLoadIssue() {
protoIssueCache.newAppender()
.append(newDefaultIssue().setChanged(true))
.close();
underTest.execute(mock(ComputationStep.Context.class));
verify(changedIssuesRepository).addIssueKey("issueKey1");
}
|
public static Float parseFloat(String value) {
return parseFloat(value, ZERO_RADIX);
}
|
@Test
public void parseFloat() {
String floatValStr = floatVal.toString();
Assertions.assertEquals(java.util.Optional.of(floatVal).get(), TbUtils.parseFloat(floatValStr));
String floatValHex = "41EA62CC";
Assertions.assertEquals(0, Float.compare(floatVal, TbUtils.parseHexToFloat(floatValHex)));
Assertions.assertEquals(0, Float.compare(floatValRev, TbUtils.parseHexToFloat(floatValHex, false)));
Assertions.assertEquals(0, Float.compare(floatVal, TbUtils.parseBigEndianHexToFloat(floatValHex)));
String floatValHexRev = "CC62EA41";
Assertions.assertEquals(0, Float.compare(floatVal, TbUtils.parseLittleEndianHexToFloat(floatValHexRev)));
}
|
@VisibleForTesting
static SortedMap<OffsetRange, Integer> computeOverlappingRanges(Iterable<OffsetRange> ranges) {
ImmutableSortedMap.Builder<OffsetRange, Integer> rval =
ImmutableSortedMap.orderedBy(OffsetRangeComparator.INSTANCE);
List<OffsetRange> sortedRanges = Lists.newArrayList(ranges);
if (sortedRanges.isEmpty()) {
return rval.build();
}
Collections.sort(sortedRanges, OffsetRangeComparator.INSTANCE);
// Stores ranges in smallest 'from' and then smallest 'to' order
// e.g. [2, 7), [3, 4), [3, 5), [3, 5), [3, 6), [4, 0)
PriorityQueue<OffsetRange> rangesWithSameFrom =
new PriorityQueue<>(OffsetRangeComparator.INSTANCE);
Iterator<OffsetRange> iterator = sortedRanges.iterator();
// Stored in reverse sorted order so that when we iterate and re-add them back to
// overlappingRanges they are stored in sorted order from smallest to largest range.to
List<OffsetRange> rangesToProcess = new ArrayList<>();
while (iterator.hasNext()) {
OffsetRange current = iterator.next();
// Skip empty ranges
if (current.getFrom() == current.getTo()) {
continue;
}
// If the current range has a different 'from' then a prior range then we must produce
// ranges in [rangesWithSameFrom.from, current.from)
while (!rangesWithSameFrom.isEmpty()
&& rangesWithSameFrom.peek().getFrom() != current.getFrom()) {
rangesToProcess.addAll(rangesWithSameFrom);
Collections.sort(rangesToProcess, OffsetRangeComparator.INSTANCE);
rangesWithSameFrom.clear();
int i = 0;
long lastTo = rangesToProcess.get(i).getFrom();
// Output all the ranges that are strictly less then current.from
// e.g. current.to := 7 for [3, 4), [3, 5), [3, 5), [3, 6) will produce
// [3, 4) := 4
// [4, 5) := 3
// [5, 6) := 1
for (; i < rangesToProcess.size(); ++i) {
if (rangesToProcess.get(i).getTo() > current.getFrom()) {
break;
}
// Output only the first of any subsequent duplicate ranges
if (i == 0 || rangesToProcess.get(i - 1).getTo() != rangesToProcess.get(i).getTo()) {
rval.put(
new OffsetRange(lastTo, rangesToProcess.get(i).getTo()),
rangesToProcess.size() - i);
lastTo = rangesToProcess.get(i).getTo();
}
}
// We exitted the loop with 'to' > current.from, we must add the range [lastTo,
// current.from) if it is non-empty
if (lastTo < current.getFrom() && i != rangesToProcess.size()) {
rval.put(new OffsetRange(lastTo, current.getFrom()), rangesToProcess.size() - i);
}
// The remaining ranges have a 'to' that is greater then 'current.from' and will overlap
// with current so add them back to rangesWithSameFrom with the updated 'from'
for (; i < rangesToProcess.size(); ++i) {
rangesWithSameFrom.add(
new OffsetRange(current.getFrom(), rangesToProcess.get(i).getTo()));
}
rangesToProcess.clear();
}
rangesWithSameFrom.add(current);
}
// Process the last chunk of overlapping ranges
while (!rangesWithSameFrom.isEmpty()) {
// This range always represents the range with with the smallest 'to'
OffsetRange current = rangesWithSameFrom.remove();
rangesToProcess.addAll(rangesWithSameFrom);
Collections.sort(rangesToProcess, OffsetRangeComparator.INSTANCE);
rangesWithSameFrom.clear();
rval.put(current, rangesToProcess.size() + 1 /* include current */);
// Shorten all the remaining ranges such that they start with current.to
for (OffsetRange rangeWithDifferentFrom : rangesToProcess) {
// Skip any duplicates of current
if (rangeWithDifferentFrom.getTo() > current.getTo()) {
rangesWithSameFrom.add(new OffsetRange(current.getTo(), rangeWithDifferentFrom.getTo()));
}
}
rangesToProcess.clear();
}
return rval.build();
}
|
@Test
public void testOverlappingTos() {
Iterable<OffsetRange> ranges = Arrays.asList(range(0, 12), range(4, 12), range(8, 12));
Map<OffsetRange, Integer> nonOverlappingRangesToNumElementsPerPosition =
computeOverlappingRanges(ranges);
assertEquals(
ImmutableMap.builder().put(range(0, 4), 1).put(range(4, 8), 2).put(range(8, 12), 3).build(),
nonOverlappingRangesToNumElementsPerPosition);
assertNonEmptyRangesAndPositions(ranges, nonOverlappingRangesToNumElementsPerPosition);
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
void testPrimaryKeyNoColumns() {
assertThatThrownBy(
() ->
TableSchema.builder()
.field("f0", DataTypes.BIGINT())
.primaryKey("pk", new String[] {})
.build())
.isInstanceOf(ValidationException.class)
.hasMessage("PRIMARY KEY constraint must be defined for at least a single column.");
}
|
public static boolean classExists(String fqcn) {
try {
Class.forName(fqcn);
return true;
} catch (ClassNotFoundException e) {
return false;
}
}
|
@Test
public void testClassExists() {
assertTrue(Reflections.classExists(String.class.getName()));
assertFalse(Reflections.classExists("com.fake.class"));
}
|
public static ReservationListResponse mergeReservationsList(
Collection<ReservationListResponse> responses) {
ReservationListResponse reservationListResponse =
Records.newRecord(ReservationListResponse.class);
List<ReservationAllocationState> reservationAllocationStates =
new ArrayList<>();
for (ReservationListResponse response : responses) {
if (response != null && response.getReservationAllocationState() != null) {
reservationAllocationStates.addAll(
response.getReservationAllocationState());
}
}
reservationListResponse.setReservationAllocationState(
reservationAllocationStates);
return reservationListResponse;
}
|
@Test
public void testMergeReservationsList() {
// normal response
ReservationListResponse response1 = createReservationListResponse(
165348678000L, 165348690000L, 165348678000L, 1L);
ReservationListResponse response2 = createReservationListResponse(
165348750000L, 165348768000L, 165348750000L, 1L);
// empty response
ReservationListResponse response3 = ReservationListResponse.newInstance(new ArrayList<>());
// null response
ReservationListResponse response4 = null;
List<ReservationListResponse> responses = new ArrayList<>();
responses.add(response1);
responses.add(response2);
responses.add(response3);
responses.add(response4);
// expected response
List<ReservationAllocationState> expectedResponse = new ArrayList<>();
expectedResponse.addAll(response1.getReservationAllocationState());
expectedResponse.addAll(response2.getReservationAllocationState());
ReservationListResponse response =
RouterYarnClientUtils.mergeReservationsList(responses);
Assert.assertEquals(expectedResponse, response.getReservationAllocationState());
}
|
public List<Supplier<PageProjectionWithOutputs>> compileProjections(
SqlFunctionProperties sqlFunctionProperties,
Map<SqlFunctionId, SqlInvokedFunction> sessionFunctions,
List<? extends RowExpression> projections,
boolean isOptimizeCommonSubExpression,
Optional<String> classNameSuffix)
{
if (isOptimizeCommonSubExpression) {
ImmutableList.Builder<Supplier<PageProjectionWithOutputs>> pageProjections = ImmutableList.builder();
ImmutableMap.Builder<RowExpression, Integer> expressionsWithPositionBuilder = ImmutableMap.builder();
Set<RowExpression> expressionCandidates = new HashSet<>();
for (int i = 0; i < projections.size(); i++) {
RowExpression projection = projections.get(i);
// Duplicate expressions are not expected here in general due to duplicate assignments pruning in query optimization, hence we skip CSE for them to allow for a
// simpler implementation (and duplicate projections in expressionsWithPositionBuilder will throw exception when calling expressionsWithPositionBuilder.build())
if (projection instanceof ConstantExpression || projection instanceof InputReferenceExpression || expressionCandidates.contains(projection)) {
pageProjections.add(toPageProjectionWithOutputs(compileProjection(sqlFunctionProperties, sessionFunctions, projection, classNameSuffix), new int[] {i}));
}
else {
expressionsWithPositionBuilder.put(projection, i);
expressionCandidates.add(projection);
}
}
Map<RowExpression, Integer> expressionsWithPosition = expressionsWithPositionBuilder.build();
Map<List<RowExpression>, Boolean> projectionsPartitionedByCSE = getExpressionsPartitionedByCSE(expressionsWithPosition.keySet(), MAX_PROJECTION_GROUP_SIZE);
for (Map.Entry<List<RowExpression>, Boolean> entry : projectionsPartitionedByCSE.entrySet()) {
if (entry.getValue()) {
pageProjections.add(toPageProjectionWithOutputs(
compileProjectionCached(sqlFunctionProperties, sessionFunctions, entry.getKey(), true, classNameSuffix),
toIntArray(entry.getKey().stream().map(expressionsWithPosition::get).collect(toImmutableList()))));
}
else {
verify(entry.getKey().size() == 1, "Expect non-cse expression list to only have one element");
RowExpression projection = entry.getKey().get(0);
pageProjections.add(toPageProjectionWithOutputs(
compileProjection(sqlFunctionProperties, sessionFunctions, projection, classNameSuffix),
new int[] {expressionsWithPosition.get(projection)}));
}
}
return pageProjections.build();
}
return IntStream.range(0, projections.size())
.mapToObj(outputChannel -> toPageProjectionWithOutputs(
compileProjection(sqlFunctionProperties, sessionFunctions, projections.get(outputChannel), classNameSuffix),
new int[] {outputChannel}))
.collect(toImmutableList());
}
|
@Test
public void testCommonSubExpressionLongProjectionList()
{
PageFunctionCompiler functionCompiler = new PageFunctionCompiler(createTestMetadataManager(), 0);
List<Supplier<PageProjectionWithOutputs>> pageProjections = functionCompiler.compileProjections(SESSION.getSqlFunctionProperties(), createIfProjectionList(5), true, Optional.empty());
assertEquals(pageProjections.size(), 1);
pageProjections = functionCompiler.compileProjections(SESSION.getSqlFunctionProperties(), createIfProjectionList(10), true, Optional.empty());
assertEquals(pageProjections.size(), 1);
pageProjections = functionCompiler.compileProjections(SESSION.getSqlFunctionProperties(), createIfProjectionList(11), true, Optional.empty());
assertEquals(pageProjections.size(), 2);
pageProjections = functionCompiler.compileProjections(SESSION.getSqlFunctionProperties(), createIfProjectionList(20), true, Optional.empty());
assertEquals(pageProjections.size(), 2);
pageProjections = functionCompiler.compileProjections(SESSION.getSqlFunctionProperties(), createIfProjectionList(101), true, Optional.empty());
assertEquals(pageProjections.size(), 11);
}
|
@Override
public boolean supportsSchemasInPrivilegeDefinitions() {
return false;
}
|
@Test
void assertSupportsSchemasInPrivilegeDefinitions() {
assertFalse(metaData.supportsSchemasInPrivilegeDefinitions());
}
|
public <T> T retryable(CheckedSupplier<T> action) throws RetryException {
long attempt = 0L;
do {
try {
attempt++;
return action.get();
} catch (Exception ex) {
logger.error("Backoff retry exception", ex);
}
if (hasRetry(attempt)) {
try {
int ms = backoffTime(attempt);
logger.info("Retry({}) will execute in {} second", attempt, ms/1000.0);
Thread.sleep(ms);
} catch (InterruptedException e) {
throw new RetryException("Backoff retry aborted", e);
}
}
} while (hasRetry(attempt));
throw new RetryException("Reach max retry");
}
|
@Test
@SuppressWarnings("unchecked")
public void testExceptionsReachMaxRetry() throws Exception {
ExponentialBackoff backoff = new ExponentialBackoff(2L);
CheckedSupplier<Boolean> supplier = Mockito.mock(CheckedSupplier.class);
Mockito.when(supplier.get()).thenThrow(new IOException("can't write to disk"));
Assertions.assertThatThrownBy(() -> backoff.retryable(supplier))
.isInstanceOf(ExponentialBackoff.RetryException.class)
.hasMessageContaining("max retry");
}
|
@Override
public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) {
String propertyTypeName = getTypeName(node);
JType type;
if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) {
type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema);
} else if (node.has("existingJavaType")) {
String typeName = node.path("existingJavaType").asText();
if (isPrimitive(typeName, jClassContainer.owner())) {
type = primitiveType(typeName, jClassContainer.owner());
} else {
type = resolveType(jClassContainer, typeName);
}
} else if (propertyTypeName.equals("string")) {
type = jClassContainer.owner().ref(String.class);
} else if (propertyTypeName.equals("number")) {
type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig());
} else if (propertyTypeName.equals("integer")) {
type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig());
} else if (propertyTypeName.equals("boolean")) {
type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig());
} else if (propertyTypeName.equals("array")) {
type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema);
} else {
type = jClassContainer.owner().ref(Object.class);
}
if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) {
type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema);
} else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) {
type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema);
}
return type;
}
|
@Test
public void applyGeneratesIntegerUsingJavaTypeLongWhenMaximumGreaterThanIntegerMax() {
JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName());
ObjectNode objectNode = new ObjectMapper().createObjectNode();
objectNode.put("type", "integer");
objectNode.put("maximum", Integer.MAX_VALUE + 1L);
when(config.isUsePrimitives()).thenReturn(false);
JType result = rule.apply("fooBar", objectNode, null, jpackage, null);
assertThat(result.fullName(), is(Long.class.getName()));
}
|
@Override
public boolean equals(Object o) {
if (o == null) return false;
if (!o.getClass().equals(ScramCredentialData.class)) return false;
ScramCredentialData other = (ScramCredentialData) o;
return Arrays.equals(salt, other.salt) &&
Arrays.equals(storedKey, other.storedKey) &&
Arrays.equals(serverKey, other.serverKey) &&
iterations == other.iterations;
}
|
@Test
public void testEquals() {
assertNotEquals(SCRAMCREDENTIALDATA.get(0), SCRAMCREDENTIALDATA.get(1));
assertNotEquals(SCRAMCREDENTIALDATA.get(1), SCRAMCREDENTIALDATA.get(0));
assertNotEquals(SCRAMCREDENTIALDATA.get(0), SCRAMCREDENTIALDATA.get(2));
assertNotEquals(SCRAMCREDENTIALDATA.get(2), SCRAMCREDENTIALDATA.get(0));
assertEquals(SCRAMCREDENTIALDATA.get(0), SCRAMCREDENTIALDATA.get(0));
assertEquals(SCRAMCREDENTIALDATA.get(1), SCRAMCREDENTIALDATA.get(1));
assertEquals(SCRAMCREDENTIALDATA.get(2), SCRAMCREDENTIALDATA.get(2));
}
|
public boolean submitProcessingErrors(Message message) {
return submitProcessingErrorsInternal(message, message.processingErrors());
}
|
@Test
public void submitProcessingErrors_nothingSubmittedAndMessageNotFilteredOut_ifSubmissionDisabledAndDuplicatesAreNotKept() throws Exception {
// given
final Message msg = Mockito.mock(Message.class);
when(msg.getMessageId()).thenReturn("msg-x");
when(msg.supportsFailureHandling()).thenReturn(true);
when(msg.processingErrors()).thenReturn(List.of(
new Message.ProcessingError(() -> "Cause 1", "Message 1", "Details 1"),
new Message.ProcessingError(() -> "Cause 2", "Message 2", "Details 2")
));
when(failureHandlingConfiguration.submitProcessingFailures()).thenReturn(false);
when(failureHandlingConfiguration.keepFailedMessageDuplicate()).thenReturn(false);
// when
final boolean notFilterOut = underTest.submitProcessingErrors(msg);
// then
assertThat(notFilterOut).isTrue();
verifyNoInteractions(failureSubmissionQueue);
}
|
public double interpolate(double... x) {
if (x.length != this.x[0].length) {
throw new IllegalArgumentException(String.format("Invalid input vector size: %d, expected: %d", x.length, this.x[0].length));
}
int n = this.x.length;
double y = yvi[n];
for (int i = 0; i < n; i++) {
y += yvi[i] * variogram.f(rdist(x, this.x[i]));
}
return y;
}
|
@Test
public void testInterpolate() {
System.out.println("interpolate");
double[][] x = {{0, 0}, {1, 1}};
double[] y = {0, 1};
KrigingInterpolation instance = new KrigingInterpolation(x, y);
double[] x1 = {0.5, 0.5};
assertEquals(0, instance.interpolate(x[0]), 1E-7);
assertEquals(1, instance.interpolate(x[1]), 1E-7);
assertEquals(0.5, instance.interpolate(x1), 1E-7);
}
|
@Override
public void start() throws Exception {
if (!state.compareAndSet(State.LATENT, State.STARTED)) {
throw new IllegalStateException();
}
try {
client.create().creatingParentContainersIfNeeded().forPath(queuePath);
} catch (KeeperException.NodeExistsException ignore) {
// this is OK
}
if (lockPath != null) {
try {
client.create().creatingParentContainersIfNeeded().forPath(lockPath);
} catch (KeeperException.NodeExistsException ignore) {
// this is OK
}
}
if (!isProducerOnly || (maxItems != QueueBuilder.NOT_SET)) {
childrenCache.start();
}
if (!isProducerOnly) {
service.submit(new Callable<Object>() {
@Override
public Object call() {
runLoop();
return null;
}
});
}
}
|
@Test
public void testSimple() throws Exception {
final int itemQty = 10;
DistributedQueue<TestQueueItem> queue = null;
CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
client.start();
try {
BlockingQueueConsumer<TestQueueItem> consumer =
new BlockingQueueConsumer<>(new DummyConnectionStateListener());
queue = QueueBuilder.builder(client, consumer, serializer, QUEUE_PATH)
.buildQueue();
queue.start();
QueueTestProducer producer = new QueueTestProducer(queue, itemQty, 0);
ExecutorService service = Executors.newCachedThreadPool();
service.submit(producer);
int iteration = 0;
while (consumer.size() < itemQty) {
assertTrue(++iteration < 10);
Thread.sleep(1000);
}
int i = 0;
for (TestQueueItem item : consumer.getItems()) {
assertEquals(item.str, Integer.toString(i++));
}
} finally {
CloseableUtils.closeQuietly(queue);
CloseableUtils.closeQuietly(client);
}
}
|
@GuardedBy("lock")
private boolean isLeader(ResourceManager<?> resourceManager) {
return running && this.leaderResourceManager == resourceManager;
}
|
@Test
void grantLeadership_withExistingLeader_stopExistLeader() throws Exception {
final UUID leaderSessionId1 = UUID.randomUUID();
final UUID leaderSessionId2 = UUID.randomUUID();
final CompletableFuture<UUID> startRmFuture1 = new CompletableFuture<>();
final CompletableFuture<UUID> startRmFuture2 = new CompletableFuture<>();
final CompletableFuture<UUID> terminateRmFuture = new CompletableFuture<>();
rmFactoryBuilder
.setInitializeConsumer(
uuid -> {
if (!startRmFuture1.isDone()) {
startRmFuture1.complete(uuid);
} else {
startRmFuture2.complete(uuid);
}
})
.setTerminateConsumer(terminateRmFuture::complete);
createAndStartResourceManager();
// first time grant leadership
leaderElection.isLeader(leaderSessionId1).join();
// second time grant leadership
final CompletableFuture<LeaderInformation> confirmedLeaderInformation =
leaderElection.isLeader(leaderSessionId2);
// should terminate first RM, start a new RM and confirm leader session
assertThatFuture(terminateRmFuture).eventuallySucceeds().isSameAs(leaderSessionId1);
assertThatFuture(startRmFuture2).eventuallySucceeds().isSameAs(leaderSessionId2);
assertThat(confirmedLeaderInformation.get().getLeaderSessionID())
.isSameAs(leaderSessionId2);
}
|
@Override
public String toString() {
return "QueryCacheConfig{"
+ "batchSize=" + batchSize
+ ", bufferSize=" + bufferSize
+ ", delaySeconds=" + delaySeconds
+ ", includeValue=" + includeValue
+ ", populate=" + populate
+ ", coalesce=" + coalesce
+ ", serializeKeys=" + serializeKeys
+ ", inMemoryFormat=" + inMemoryFormat
+ ", name='" + name + '\''
+ ", predicateConfig=" + predicateConfig
+ ", evictionConfig=" + evictionConfig
+ ", entryListenerConfigs=" + entryListenerConfigs
+ ", indexConfigs=" + indexConfigs
+ '}';
}
|
@Test
public void testToString() {
QueryCacheConfig config = new QueryCacheConfig();
assertNotNull(config.toString());
assertContains(config.toString(), "QueryCacheConfig");
}
|
@Override
public boolean isClosed()
throws SQLException {
return _closed;
}
|
@Test
public void isClosedTest()
throws Exception {
PinotConnection pinotConnection =
new PinotConnection("dummy", _dummyPinotClientTransport, "dummy", _dummyPinotControllerTransport);
Assert.assertFalse(pinotConnection.isClosed());
pinotConnection.close();
Assert.assertTrue(pinotConnection.isClosed());
}
|
@SuppressWarnings("unchecked")
@Override
public boolean canHandleReturnType(Class returnType) {
return rxSupportedTypes.stream()
.anyMatch(classType -> classType.isAssignableFrom(returnType));
}
|
@Test
public void testCheckTypes() {
assertThat(rxJava3RateLimiterAspectExt.canHandleReturnType(Flowable.class)).isTrue();
assertThat(rxJava3RateLimiterAspectExt.canHandleReturnType(Single.class)).isTrue();
}
|
private void index() {
IntStream.range(0, this.size()).forEach(i -> preferences.setProperty(toProperty(this.get(i), prefix), i));
}
|
@Test
public void testIndex() {
BookmarkCollection c = new BookmarkCollection(new NullLocal("", "f")) {
@Override
protected void save(Host bookmark) {
assertNotNull(bookmark.getUuid());
}
};
final Host d = new Host(new TestProtocol(), "c");
final Host b = new Host(new TestProtocol(), "b");
final Host a = new Host(new TestProtocol(), "a");
c.add(a);
c.add(b);
assertEquals(a, c.get(0));
assertEquals(b, c.get(1));
c.add(0, d);
assertEquals(d, c.get(0));
assertEquals(a, c.get(1));
assertEquals(b, c.get(2));
}
|
@Override
public void clearUp(ProcessContext context) {
RootContext.unbind();
RootContext.unbindBranchType();
if (sagaTransactionalTemplate != null) {
GlobalTransaction globalTransaction;
StateMachineInstance machineInstance = (StateMachineInstance) context.getVariable(DomainConstants.VAR_NAME_STATEMACHINE_INST);
if (machineInstance != null) {
try {
globalTransaction = getGlobalTransaction(machineInstance, context);
sagaTransactionalTemplate.cleanUp(globalTransaction);
} catch (ExecutionException e) {
LOGGER.error("Report transaction finish to server error: {}, StateMachine: {}, XID: {}, Reason: {}",
e.getCode(), machineInstance.getStateMachine().getName(), machineInstance.getId(), e.getMessage(), e);
} catch (TransactionException e) {
LOGGER.error("Report transaction finish to server error: {}, StateMachine: {}, XID: {}, Reason: {}",
e.getCode(), machineInstance.getStateMachine().getName(), machineInstance.getId(), e.getMessage(), e);
}
}
}
}
|
@Test
public void testClearUp() {
ProcessContextImpl context = new ProcessContextImpl();
context.setVariable(DomainConstants.VAR_NAME_STATEMACHINE_INST, new StateMachineInstanceImpl());
Assertions.assertDoesNotThrow(() -> dbAndReportTcStateLogStore.clearUp(context));
}
|
public String saveGitlabConfiguration(GitlabConfiguration gitlabConfiguration) {
String body = String.format("""
{
"enabled": "%s",
"applicationId": "%s",
"url": "%s",
"secret": "%s",
"synchronizeGroups": "%s",
"provisioningType": "%s",
"allowUsersToSignUp": "%s",
"provisioningToken": "%s",
"allowedGroups": ["%s"]
}
""", gitlabConfiguration.enabled(), gitlabConfiguration.applicationId(), gitlabConfiguration.url(), gitlabConfiguration.secret(),
gitlabConfiguration.synchronizeGroups(), gitlabConfiguration.provisioningType(), gitlabConfiguration.allowUsersToSignUp(),
gitlabConfiguration.provisioningToken(), gitlabConfiguration.singleAllowedGroup());
WsResponse response = call(
new PostRequest(path()).setBody(body)
);
return new Gson().fromJson(response.content(), JsonObject.class).get("id").getAsString();
}
|
@Test
public void saveGitlabConfiguration_shouldNotFail() {
when(wsConnector.call(any()).failIfNotSuccessful().content()).thenReturn("{\"id\": \"configId\"}");
assertThatNoException().isThrownBy(() -> gitlabConfigurationService.saveGitlabConfiguration(new GitlabConfiguration(true, "appId", "url", "secret",
true, "JIT", false, "token", "group")));
}
|
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuffer buf = new StringBuffer();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case FORMAT_MODIFIER_STATE:
handleFormatModifierState(c, tokenList, buf);
break;
case OPTION_STATE:
processOption(c, tokenList, buf);
break;
case KEYWORD_STATE:
handleKeywordState(c, tokenList, buf);
break;
case RIGHT_PARENTHESIS_STATE:
handleRightParenthesisState(c, tokenList, buf);
break;
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addValuedToken(Token.LITERAL, buf, tokenList);
break;
case KEYWORD_STATE:
tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString()));
break;
case RIGHT_PARENTHESIS_STATE:
tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN);
break;
case FORMAT_MODIFIER_STATE:
case OPTION_STATE:
throw new ScanException("Unexpected end of pattern string");
}
return tokenList;
}
|
@Test
public void testNested() throws ScanException {
List<Token> tl = new TokenStream("%(%a%(%b))").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(Token.PERCENT_TOKEN);
witness.add(Token.BARE_COMPOSITE_KEYWORD_TOKEN);
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "a"));
witness.add(Token.PERCENT_TOKEN);
witness.add(Token.BARE_COMPOSITE_KEYWORD_TOKEN);
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "b"));
witness.add(Token.RIGHT_PARENTHESIS_TOKEN);
witness.add(Token.RIGHT_PARENTHESIS_TOKEN);
assertEquals(witness, tl);
}
|
public void write(WriteRequest writeRequest) {
if (!tryAcquireSemaphore()) {
return;
}
mSerializingExecutor.execute(() -> {
try {
if (mContext == null) {
LOG.debug("Received write request {}.",
RpcSensitiveConfigMask.CREDENTIAL_FIELD_MASKER.maskObjects(LOG, writeRequest));
try {
mContext = createRequestContext(writeRequest);
} catch (Exception e) {
// abort() assumes context is initialized.
// Reply with the error in order to prevent clients getting stuck.
replyError(new Error(AlluxioStatusException.fromThrowable(e), true));
throw e;
}
} else {
Preconditions.checkState(!mContext.isDoneUnsafe(),
"invalid request after write request is completed.");
}
if (mContext.isDoneUnsafe() || mContext.getError() != null) {
return;
}
validateWriteRequest(writeRequest);
if (writeRequest.hasCommand()) {
WriteRequestCommand command = writeRequest.getCommand();
if (command.getFlush()) {
flush();
} else {
handleCommand(command, mContext);
}
} else {
Preconditions.checkState(writeRequest.hasChunk(),
"write request is missing data chunk in non-command message");
ByteString data = writeRequest.getChunk().getData();
Preconditions.checkState(data != null && data.size() > 0,
"invalid data size from write request message");
writeData(new NioDataBuffer(data.asReadOnlyByteBuffer(), data.size()));
}
} catch (Exception e) {
LogUtils.warnWithException(LOG, "Exception occurred while processing write request {}.",
writeRequest, e);
abort(new Error(AlluxioStatusException.fromThrowable(e), true));
} finally {
mSemaphore.release();
}
});
}
|
@Test
public void writeInvalidOffsetLaterRequest() throws Exception {
mWriteHandler.write(newWriteRequestCommand(0));
// The write request contains an invalid offset
mWriteHandler.write(newWriteRequestCommand(1));
waitForResponses();
checkErrorCode(mResponseObserver, Status.Code.INVALID_ARGUMENT);
}
|
@Nullable
static ProxyProvider createFrom(Properties properties) {
Objects.requireNonNull(properties, "properties");
if (properties.containsKey(HTTP_PROXY_HOST) || properties.containsKey(HTTPS_PROXY_HOST)) {
return createHttpProxyFrom(properties);
}
if (properties.containsKey(SOCKS_PROXY_HOST)) {
return createSocksProxyFrom(properties);
}
return null;
}
|
@Test
void proxyFromSystemProperties_nullProxyProviderIfNoHostnamePropertySet() {
Properties properties = new Properties();
ProxyProvider provider = ProxyProvider.createFrom(properties);
assertThat(provider).isNull();
}
|
public void createPlainAccessConfig(final String addr, final PlainAccessConfig plainAccessConfig,
final long timeoutMillis)
throws RemotingException, InterruptedException, MQClientException {
CreateAccessConfigRequestHeader requestHeader = new CreateAccessConfigRequestHeader();
requestHeader.setAccessKey(plainAccessConfig.getAccessKey());
requestHeader.setSecretKey(plainAccessConfig.getSecretKey());
requestHeader.setAdmin(plainAccessConfig.isAdmin());
requestHeader.setDefaultGroupPerm(plainAccessConfig.getDefaultGroupPerm());
requestHeader.setDefaultTopicPerm(plainAccessConfig.getDefaultTopicPerm());
requestHeader.setWhiteRemoteAddress(plainAccessConfig.getWhiteRemoteAddress());
requestHeader.setTopicPerms(UtilAll.join(plainAccessConfig.getTopicPerms(), ","));
requestHeader.setGroupPerms(UtilAll.join(plainAccessConfig.getGroupPerms(), ","));
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.UPDATE_AND_CREATE_ACL_CONFIG, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return;
}
default:
break;
}
throw new MQClientException(response.getCode(), response.getRemark());
}
|
@Test
public void testCreatePlainAccessConfig_Exception() throws InterruptedException, RemotingException {
doAnswer(mock -> {
RemotingCommand request = mock.getArgument(1);
return createErrorResponse4UpdateAclConfig(request);
}).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong());
PlainAccessConfig config = createUpdateAclConfig();
try {
mqClientAPI.createPlainAccessConfig(brokerAddr, config, 3 * 1000);
} catch (MQClientException ex) {
assertThat(ex.getResponseCode()).isEqualTo(209);
assertThat(ex.getErrorMessage()).isEqualTo("corresponding to accessConfig has been updated failed");
}
}
|
public void finish(Promise<Void> aggregatePromise) {
ObjectUtil.checkNotNull(aggregatePromise, "aggregatePromise");
checkInEventLoop();
if (this.aggregatePromise != null) {
throw new IllegalStateException("Already finished");
}
this.aggregatePromise = aggregatePromise;
if (doneCount == expectedCount) {
tryPromise();
}
}
|
@SuppressWarnings("unchecked")
@Test
public void testFinishCalledTwiceThrows() {
combiner.finish(p1);
assertThrows(IllegalStateException.class, new Executable() {
@Override
public void execute() {
combiner.finish(p1);
}
});
}
|
@Override
public boolean isOutput() {
return false;
}
|
@Test
public void testIsOutput() throws Exception {
assertFalse( analyzer.isOutput() );
}
|
public SuperTrendIndicator(final BarSeries series) {
this(series, 10, 3d);
}
|
@Test
public void testSuperTrendIndicator() {
SuperTrendIndicator superTrendIndicator = new SuperTrendIndicator(data);
assertNumEquals(this.numOf(15.730621000000003), superTrendIndicator.getValue(4));
assertNumEquals(this.numOf(17.602360938100002), superTrendIndicator.getValue(9));
assertNumEquals(this.numOf(22.78938583966133), superTrendIndicator.getValue(14));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.