focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token)
throws AuthenticationException {
UsernamePasswordToken userToken = (UsernamePasswordToken) token;
UnixUser user;
try {
user = (new PAM(this.getService()))
.authenticate(userToken.getUsername(), new String(userToken.getPassword()));
} catch (PAMException e) {
throw new AuthenticationException("Authentication failed for PAM.", e);
}
return new SimpleAuthenticationInfo(
new UserPrincipal(user),
userToken.getCredentials(),
getName());
}
|
@Test
public void testDoGetAuthenticationInfo() {
PamRealm realm = new PamRealm();
realm.setService("sshd");
String pamUser = System.getenv("PAM_USER");
String pamPass = System.getenv("PAM_PASS");
assumeTrue(pamUser != null);
assumeTrue(pamPass != null);
// mock shiro auth token
UsernamePasswordToken authToken = mock(UsernamePasswordToken.class);
when(authToken.getUsername()).thenReturn(pamUser);
when(authToken.getPassword()).thenReturn(pamPass.toCharArray());
when(authToken.getCredentials()).thenReturn(pamPass);
AuthenticationInfo authInfo = realm.doGetAuthenticationInfo(authToken);
assertNotNull(authInfo.getCredentials());
}
|
@Override
public String getMediaType() {
return firstNonNull(
mediaTypeFromUrl(source.getRequestURI()),
firstNonNull(
acceptedContentTypeInResponse(),
MediaTypes.DEFAULT));
}
|
@Test
public void getMediaType() {
when(source.getHeader(HttpHeaders.ACCEPT)).thenReturn(MediaTypes.JSON);
when(source.getRequestURI()).thenReturn("/path/to/resource/search");
assertThat(underTest.getMediaType()).isEqualTo(MediaTypes.JSON);
}
|
@Override
public void e(String tag, String message, Object... args) {
Log.e(tag, formatString(message, args));
}
|
@Test
public void errorWithThrowableLoggedCorrectly() {
String expectedMessage = "Hello World";
Throwable t = new Throwable("Test Throwable");
logger.e(t, tag, "Hello %s", "World");
assertLogged(ERROR, tag, expectedMessage, t);
}
|
public ClientConfig build() {
return build(Thread.currentThread().getContextClassLoader());
}
|
@Override
@Test
public void loadingThroughSystemProperty_existingClasspathResource() {
System.setProperty("hazelcast.client.config", "classpath:test-hazelcast-client.xml");
XmlClientConfigBuilder configBuilder = new XmlClientConfigBuilder();
ClientConfig config = configBuilder.build();
assertEquals("foobar-xml", config.getClusterName());
assertEquals("com.hazelcast.nio.ssl.BasicSSLContextFactory", config.getNetworkConfig().getSSLConfig().getFactoryClassName());
assertEquals(128, config.getNetworkConfig().getSocketOptions().getBufferSize());
assertFalse(config.getNetworkConfig().getSocketOptions().isKeepAlive());
assertFalse(config.getNetworkConfig().getSocketOptions().isTcpNoDelay());
assertEquals(3, config.getNetworkConfig().getSocketOptions().getLingerSeconds());
}
|
@Nonnull
@Override
public CpcSketch getResult() {
return unionAll();
}
|
@Test
public void testUnionWithEmptyInput() {
CpcSketchAccumulator accumulator = new CpcSketchAccumulator(_lgNominalEntries, 3);
CpcSketchAccumulator emptyAccumulator = new CpcSketchAccumulator(_lgNominalEntries, 3);
accumulator.merge(emptyAccumulator);
Assert.assertTrue(accumulator.isEmpty());
Assert.assertEquals(accumulator.getResult().getEstimate(), 0.0);
}
|
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the number of days since 1970-01-01 00:00:00 UTC/GMT.")
public int stringToDate(
@UdfParameter(
description = "The string representation of a date.") final String formattedDate,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
// NB: We do not perform a null here preferring to throw an exception as
// there is no sentinel value for a "null" Date.
try {
final DateTimeFormatter formatter = formatters.get(formatPattern);
return ((int)LocalDate.parse(formattedDate, formatter).toEpochDay());
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to parse date '" + formattedDate
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldBeThreadSafeAndWorkWithManyDifferentFormatters() {
IntStream.range(0, 10_000)
.parallel()
.forEach(idx -> {
try {
final String sourceDate = "2021-12-01X" + idx;
final String pattern = "yyyy-MM-dd'X" + idx + "'";
final int result = udf.stringToDate(sourceDate, pattern);
assertThat(result, is(18962));
} catch (final Exception e) {
fail(e.getMessage());
}
});
}
|
public static boolean isRoute(URL url) {
return ROUTE_PROTOCOL.equals(url.getProtocol()) || ROUTERS_CATEGORY.equals(url.getCategory(DEFAULT_CATEGORY));
}
|
@Test
public void testIsRoute() {
String address1 = "http://example.com";
URL url1 = UrlUtils.parseURL(address1, null);
String address2 = "route://example.com";
URL url2 = UrlUtils.parseURL(address2, null);
String address3 = "http://example.com?category=routers";
URL url3 = UrlUtils.parseURL(address3, null);
assertFalse(UrlUtils.isRoute(url1));
assertTrue(UrlUtils.isRoute(url2));
assertTrue(UrlUtils.isRoute(url3));
}
|
@Override
public void setPermission(final Path file, final TransferStatus status) throws BackgroundException {
try {
final Path bucket = containerService.getContainer(file);
if(containerService.isContainer(file)) {
final List<BucketAccessControl> bucketAccessControls = this.toBucketAccessControl(status.getAcl());
status.setResponse(new GoogleStorageAttributesFinderFeature(session).toAttributes(
session.getClient().buckets().update(bucket.getName(),
new Bucket().setAcl(bucketAccessControls)).execute()
));
}
else {
final List<ObjectAccessControl> objectAccessControls = this.toObjectAccessControl(status.getAcl());
final Storage.Objects.Update request = session.getClient().objects().update(bucket.getName(), containerService.getKey(file),
new StorageObject().setAcl(objectAccessControls));
if(bucket.attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
request.setUserProject(session.getHost().getCredentials().getUsername());
}
status.setResponse(new GoogleStorageAttributesFinderFeature(session).toAttributes(
request.execute()
));
}
}
catch(IOException e) {
final BackgroundException failure = new GoogleStorageExceptionMappingService().map("Cannot change permissions of {0}", e, file);
if(file.isDirectory()) {
if(failure instanceof NotfoundException) {
// No placeholder file may exist but we just have a common prefix
return;
}
}
// 400 Bad Request response for buckets with uniform bucket-level access enabled
throw failure;
}
}
|
@Test(expected = NotfoundException.class)
public void testWriteNotFound() throws Exception {
final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory));
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final GoogleStorageAccessControlListFeature f = new GoogleStorageAccessControlListFeature(session);
final Acl acl = new Acl();
acl.addAll(new Acl.GroupUser(Acl.GroupUser.EVERYONE), new Acl.Role(Acl.Role.READ));
f.setPermission(test, acl);
}
|
public static void copyFile(File src, File target) throws IOException {
FileUtils.copyFile(src, target);
}
|
@Test
void testCopyFile() throws IOException {
File nacos = DiskUtils.createTmpFile("nacos", ".ut");
DiskUtils.copyFile(testFile, nacos);
assertEquals(DiskUtils.readFile(testFile), DiskUtils.readFile(nacos));
nacos.deleteOnExit();
}
|
@VisibleForTesting
static List<Tuple2<ConfigGroup, String>> generateTablesForClass(
Class<?> optionsClass, Collection<OptionWithMetaInfo> optionWithMetaInfos) {
ConfigGroups configGroups = optionsClass.getAnnotation(ConfigGroups.class);
List<OptionWithMetaInfo> allOptions = selectOptionsToDocument(optionWithMetaInfos);
if (allOptions.isEmpty()) {
return Collections.emptyList();
}
List<Tuple2<ConfigGroup, String>> tables;
if (configGroups != null) {
tables = new ArrayList<>(configGroups.groups().length + 1);
Tree tree = new Tree(configGroups.groups(), allOptions);
for (ConfigGroup group : configGroups.groups()) {
List<OptionWithMetaInfo> configOptions = tree.findConfigOptions(group);
if (!configOptions.isEmpty()) {
sortOptions(configOptions);
tables.add(Tuple2.of(group, toHtmlTable(configOptions)));
}
}
List<OptionWithMetaInfo> configOptions = tree.getDefaultOptions();
if (!configOptions.isEmpty()) {
sortOptions(configOptions);
tables.add(Tuple2.of(null, toHtmlTable(configOptions)));
}
} else {
sortOptions(allOptions);
tables = Collections.singletonList(Tuple2.of(null, toHtmlTable(allOptions)));
}
return tables;
}
|
@Test
void testCreatingMultipleGroups() {
final List<Tuple2<ConfigGroup, String>> tables =
ConfigOptionsDocGenerator.generateTablesForClass(
TestConfigMultipleSubGroup.class,
ConfigurationOptionLocator.extractConfigOptions(
TestConfigMultipleSubGroup.class));
assertThat(tables).hasSize(3);
final HashMap<String, String> tablesConverted = new HashMap<>();
for (Tuple2<ConfigGroup, String> table : tables) {
tablesConverted.put(table.f0 != null ? table.f0.name() : "default", table.f1);
}
assertThat(tablesConverted)
.containsEntry(
"firstGroup",
"<table class=\"configuration table table-bordered\">\n"
+ " <thead>\n"
+ " <tr>\n"
+ " <th class=\"text-left\" style=\"width: 20%\">Key</th>\n"
+ " <th class=\"text-left\" style=\"width: 15%\">Default</th>\n"
+ " <th class=\"text-left\" style=\"width: 10%\">Type</th>\n"
+ " <th class=\"text-left\" style=\"width: 55%\">Description</th>\n"
+ " </tr>\n"
+ " </thead>\n"
+ " <tbody>\n"
+ " <tr>\n"
+ " <td><h5>first.option.a</h5></td>\n"
+ " <td style=\"word-wrap: break-word;\">2</td>\n"
+ " <td>Integer</td>\n"
+ " <td>This is example description for the first option.</td>\n"
+ " </tr>\n"
+ " </tbody>\n"
+ "</table>\n");
assertThat(tablesConverted)
.containsEntry(
"secondGroup",
"<table class=\"configuration table table-bordered\">\n"
+ " <thead>\n"
+ " <tr>\n"
+ " <th class=\"text-left\" style=\"width: 20%\">Key</th>\n"
+ " <th class=\"text-left\" style=\"width: 15%\">Default</th>\n"
+ " <th class=\"text-left\" style=\"width: 10%\">Type</th>\n"
+ " <th class=\"text-left\" style=\"width: 55%\">Description</th>\n"
+ " </tr>\n"
+ " </thead>\n"
+ " <tbody>\n"
+ " <tr>\n"
+ " <td><h5>second.option.a</h5></td>\n"
+ " <td style=\"word-wrap: break-word;\">(none)</td>\n"
+ " <td>String</td>\n"
+ " <td>This is long example description for the second option.</td>\n"
+ " </tr>\n"
+ " </tbody>\n"
+ "</table>\n");
assertThat(tablesConverted)
.containsEntry(
"default",
"<table class=\"configuration table table-bordered\">\n"
+ " <thead>\n"
+ " <tr>\n"
+ " <th class=\"text-left\" style=\"width: 20%\">Key</th>\n"
+ " <th class=\"text-left\" style=\"width: 15%\">Default</th>\n"
+ " <th class=\"text-left\" style=\"width: 10%\">Type</th>\n"
+ " <th class=\"text-left\" style=\"width: 55%\">Description</th>\n"
+ " </tr>\n"
+ " </thead>\n"
+ " <tbody>\n"
+ " <tr>\n"
+ " <td><h5>fourth.option.a</h5></td>\n"
+ " <td style=\"word-wrap: break-word;\">(none)</td>\n"
+ " <td>String</td>\n"
+ " <td>This is long example description for the fourth option.</td>\n"
+ " </tr>\n"
+ " <tr>\n"
+ " <td><h5>third.option.a</h5></td>\n"
+ " <td style=\"word-wrap: break-word;\">2</td>\n"
+ " <td>Integer</td>\n"
+ " <td>This is example description for the third option.</td>\n"
+ " </tr>\n"
+ " </tbody>\n"
+ "</table>\n");
}
|
public static <T> Values<T> of(Iterable<T> elems) {
return new Values<>(elems, Optional.absent(), Optional.absent(), false);
}
|
@Test
@Category(ValidatesRunner.class)
public void testCreate() {
PCollection<String> output = p.apply(Create.of(LINES));
PAssert.that(output).containsInAnyOrder(LINES_ARRAY);
p.run();
}
|
public static Coin ofBtc(BigDecimal coins) throws ArithmeticException {
return Coin.valueOf(btcToSatoshi(coins));
}
|
@Test
public void testOfBtc() {
assertEquals(Coin.valueOf(Long.MIN_VALUE), Coin.ofBtc(new BigDecimal("-92233720368.54775808")));
assertEquals(ZERO, Coin.ofBtc(BigDecimal.ZERO));
assertEquals(COIN, Coin.ofBtc(BigDecimal.ONE));
assertEquals(Coin.valueOf(Long.MAX_VALUE), Coin.ofBtc(new BigDecimal("92233720368.54775807")));
}
|
public void createUsersWithArrayInput(List<User> body) throws RestClientException {
createUsersWithArrayInputWithHttpInfo(body);
}
|
@Test
public void createUsersWithArrayInputTest() {
List<User> body = null;
api.createUsersWithArrayInput(body);
// TODO: test validations
}
|
@Override
public String toString() {
return "EntryView{"
+ "key=" + key
+ ", value=" + value
+ ", cost=" + cost
+ ", creationTime=" + creationTime
+ ", expirationTime=" + expirationTime
+ ", hits=" + hits
+ ", lastAccessTime=" + lastAccessTime
+ ", lastStoredTime=" + lastStoredTime
+ ", lastUpdateTime=" + lastUpdateTime
+ ", version=" + version
+ ", ttl=" + ttl
+ ", maxIdle=" + maxIdle
+ '}';
}
|
@Test
public void test_toString() throws Exception {
HazelcastInstance instance = createHazelcastInstance();
IMap<Integer, Integer> map = instance.getMap("test");
map.put(1, 1);
EntryView<Integer, Integer> entryView = map.getEntryView(1);
assertEquals(stringify(entryView), entryView.toString());
}
|
@Nonnull
public static Number mul(@Nonnull Number first, @Nonnull Number second) {
// Check for widest types first, go down the type list to narrower types until reaching int.
if (second instanceof Double || first instanceof Double) {
return first.doubleValue() * second.doubleValue();
} else if (second instanceof Float || first instanceof Float) {
return first.floatValue() * second.floatValue();
} else if (second instanceof Long || first instanceof Long) {
return first.longValue() * second.longValue();
} else {
return first.intValue() * second.intValue();
}
}
|
@Test
void testMul() {
assertEquals(21, NumberUtil.mul(3, 7));
assertEquals(21D, NumberUtil.mul(3, 7.0D));
assertEquals(21F, NumberUtil.mul(3, 7.0F));
assertEquals(21L, NumberUtil.mul(3, 7L));
}
|
@Override
public Map<String, Boolean> getProjectUuidToManaged(DbSession dbSession, Set<String> projectUuids) {
return findManagedProjectService()
.map(managedProjectService -> managedProjectService.getProjectUuidToManaged(dbSession, projectUuids))
.orElse(returnNonManagedForAll(projectUuids));
}
|
@Test
public void getProjectUuidToManaged_whenNoDelegates_setAllProjectsAsNonManaged() {
Set<String> projectUuids = Set.of("a", "b");
DelegatingManagedServices managedInstanceService = NO_MANAGED_SERVICES;
Map<String, Boolean> projectUuidToManaged = managedInstanceService.getProjectUuidToManaged(dbSession, projectUuids);
assertThat(projectUuidToManaged).containsExactlyInAnyOrderEntriesOf(Map.of("a", false, "b", false));
}
|
public static void copyFile(String source, String target) throws IOException {
File sf = new File(source);
if (!sf.exists()) {
throw new IllegalArgumentException("source file does not exist.");
}
File tf = new File(target);
if (!tf.getParentFile().mkdirs()) {
throw new RuntimeException("failed to create parent directory.");
}
if (!tf.exists() && !tf.createNewFile()) {
throw new RuntimeException("failed to create target file.");
}
try (
FileChannel sc = new FileInputStream(sf).getChannel();
FileChannel tc = new FileOutputStream(tf).getChannel()) {
sc.transferTo(0, sc.size(), tc);
}
}
|
@Test
public void testCopyFile() throws Exception {
String sourcePath = sourceFile.getAbsolutePath();
String targetPath = sourceFile.getParent() + File.separator + "copy" + File.separator + "target.txt";
IoUtil.copyFile(sourcePath, targetPath);
File targetFile = new File(targetPath);
Assert.assertTrue(targetFile.exists());
Assert.assertEquals(sourceFile.length(), targetFile.length());
byte[] sourceBytes = Files.readAllBytes(sourceFile.toPath());
byte[] targetBytes = Files.readAllBytes(targetFile.toPath());
Assert.assertArrayEquals(sourceBytes, targetBytes);
}
|
public static void addShutdownHook(String name, Runnable runnable) {
shutdownHookAdder.addShutdownHook(name, runnable);
}
|
@Test
public void shouldNotInvokeShutdownHookImmediately() {
List<Object> list = new ArrayList<>();
Runnable runnable = () -> list.add(this);
Exit.addShutdownHook("message", runnable);
assertEquals(0, list.size());
}
|
public List<String> deprecationPaths(Flow flow) {
return deprecationTraversal("", flow).toList();
}
|
@SuppressWarnings("deprecation")
@Test
void propertyRenamingDeprecation() {
Flow flow = Flow.builder()
.id("flowId")
.namespace("io.kestra.unittest")
.inputs(List.of(
StringInput.builder()
.id("inputWithId")
.type(Type.STRING)
.build(),
StringInput.builder()
.name("inputWithName")
.type(Type.STRING)
.build()
))
.tasks(Collections.singletonList(Echo.builder()
.id("taskId")
.type(Return.class.getName())
.format("test")
.build()))
.build();
assertThat(flowService.deprecationPaths(flow), containsInAnyOrder("inputs[1].name", "tasks[0]"));
}
|
protected void logEdit(short op, Writable writable) {
JournalTask task = submitLog(op, writable, -1);
waitInfinity(task);
}
|
@Test
public void testInterrupt() throws Exception {
// block if more than one task is put
BlockingQueue<JournalTask> journalQueue = new ArrayBlockingQueue<>(1);
Thread t1 = new Thread(new Runnable() {
@Override
public void run() {
EditLog editLog = new EditLog(journalQueue);
editLog.logEdit((short) 1, new Text("111"));
}
});
t1.start();
while (journalQueue.isEmpty()) {
Thread.sleep(50);
}
// t1 is blocked in task.get() now
Assert.assertEquals(1, journalQueue.size());
// t2 will be blocked in queue.put() because queue is full
Thread t2 = new Thread(new Runnable() {
@Override
public void run() {
EditLog editLog = new EditLog(journalQueue);
editLog.logEdit((short) 2, new Text("222"));
}
});
t2.start();
// t1 got interrupt exception while blocking in task.get()
for (int i = 0; i != 3; i++) {
t1.interrupt();
Thread.sleep(100);
}
// t2 got interrupt exception while blocking in queue.put()
for (int i = 0; i != 3; i++) {
t2.interrupt();
Thread.sleep(100);
}
Assert.assertEquals(1, journalQueue.size());
JournalTask task = journalQueue.take();
task.markSucceed();
task = journalQueue.take();
task.markSucceed();
t1.join();
t2.join();
}
|
@Override
public void appendDataInfluence(String entityName, String entityId, String fieldName,
String fieldCurrentValue) {
// might be
if (traceContext.tracer() == null) {
return;
}
if (traceContext.tracer().getActiveSpan() == null) {
return;
}
String spanId = traceContext.tracer().getActiveSpan().spanId();
OpType type = traceContext.tracer().getActiveSpan().getOpType();
ApolloAuditLogDataInfluence.Builder builder = ApolloAuditLogDataInfluence.builder().spanId(spanId)
.entityName(entityName).entityId(entityId).fieldName(fieldName);
if (type == null) {
return;
}
switch (type) {
case CREATE:
case UPDATE:
builder.newVal(fieldCurrentValue);
break;
case DELETE:
builder.oldVal(fieldCurrentValue);
}
dataInfluenceService.save(builder.build());
}
|
@Test
public void testAppendDataInfluenceCaseDelete() {
{
ApolloAuditSpan span = Mockito.mock(ApolloAuditSpan.class);
Mockito.when(tracer.getActiveSpan()).thenReturn(span);
Mockito.when(span.spanId()).thenReturn(spanId);
Mockito.when(span.getOpType()).thenReturn(delete);
}
api.appendDataInfluence(entityName, entityId, fieldName, fieldCurrentValue);
Mockito.verify(dataInfluenceService, Mockito.times(1)).save(influenceCaptor.capture());
ApolloAuditLogDataInfluence capturedInfluence = influenceCaptor.getValue();
assertEquals(entityId, capturedInfluence.getInfluenceEntityId());
assertEquals(entityName, capturedInfluence.getInfluenceEntityName());
assertEquals(fieldName, capturedInfluence.getFieldName());
assertEquals(fieldCurrentValue, capturedInfluence.getFieldOldValue());
assertNull(capturedInfluence.getFieldNewValue());
assertEquals(spanId, capturedInfluence.getSpanId());
}
|
@Override
public int getOrder() {
return 0;
}
|
@Test
void testGetOrder() {
int order = configEncryptionFilter.getOrder();
assertEquals(0, order);
}
|
public void add(TProtocol p) {
events.addLast(p);
}
|
@Test
public void testList() throws TException {
final List<String> names = new ArrayList<String>();
names.add("John");
names.add("Jack");
final TestNameList o = new TestNameList("name", names);
validate(o);
}
|
private boolean init() {
// Check and create backup dir if necessarily
File backupDir = new File(BACKUP_ROOT_DIR.toString());
if (!backupDir.exists()) {
if (!backupDir.mkdirs()) {
LOG.warn("failed to create backup dir: " + BACKUP_ROOT_DIR);
return false;
}
} else {
if (!backupDir.isDirectory()) {
LOG.warn("backup dir is not a directory: " + BACKUP_ROOT_DIR);
return false;
}
}
// Check and create restore dir if necessarily
File restoreDir = new File(RESTORE_ROOT_DIR.toString());
if (!restoreDir.exists()) {
if (!restoreDir.mkdirs()) {
LOG.warn("failed to create restore dir: " + RESTORE_ROOT_DIR);
return false;
}
} else {
if (!restoreDir.isDirectory()) {
LOG.warn("restore dir is not a directory: " + RESTORE_ROOT_DIR);
return false;
}
}
isInit = true;
return true;
}
|
@Test
public void testInit(@Mocked GlobalStateMgr globalStateMgr, @Mocked BrokerMgr brokerMgr, @Mocked EditLog editLog) {
setUpMocker(globalStateMgr, brokerMgr, editLog);
handler = new BackupHandler(globalStateMgr);
handler.runAfterCatalogReady();
File backupDir = new File(BackupHandler.BACKUP_ROOT_DIR.toString());
Assert.assertTrue(backupDir.exists());
}
|
@Override
public List<RecognisedObject> recognise(InputStream stream, ContentHandler handler,
Metadata metadata, ParseContext context)
throws IOException, SAXException, TikaException {
INDArray image = imageLoader.asMatrix(stream);
preProcessor.transform(image);
INDArray[] output = model.output(false, image);
return predict(output[0]);
}
|
@Test
public void recognise() throws Exception {
assumeFalse(SystemUtils.OS_ARCH.equals("aarch64"), "doesn't yet work on aarch64");
TikaConfig config = null;
try (InputStream is = getClass().getResourceAsStream("dl4j-vgg16-config.xml")) {
config = new TikaConfig(is);
} catch (Exception e) {
if (e.getMessage() != null && (e.getMessage().contains("Connection refused") ||
e.getMessage().contains("connect timed out") || e.getMessage().contains("403 for URL"))) {
assumeTrue(false, "skipping test because of connection issue");
}
throw e;
}
assumeTrue(false, "something went wrong loading tika config");
Tika tika = new Tika(config);
Metadata md = new Metadata();
try (InputStream is = getClass().getResourceAsStream("lion.jpg")) {
tika.parse(is, md);
}
String[] objects = md.getValues("OBJECT");
boolean found = false;
for (String object : objects) {
if (object.contains("lion")) {
found = true;
break;
}
}
assertTrue(found);
}
|
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
}
if (!(o instanceof EipAttribute)) {
return false;
}
return id.equals(((EipAttribute) o).id);
}
|
@Test
public void testEquals() {
EipAttribute attribute1 = getInstance();
EipAttribute attribute2 = getInstance();
boolean result = attribute1.equals(attribute1);
assertTrue(result);
result = attribute1.equals(attribute2);
assertTrue(result);
result = attribute1.equals(null);
assertFalse(result);
attribute2.setId(SOME_VALUE);
result = attribute1.equals(attribute2);
assertFalse(result);
}
|
@Override
public Set<String> initialize() {
try {
checkpointFileCache.putAll(checkpointFile.read());
} catch (final IOException e) {
throw new StreamsException("Failed to read checkpoints for global state globalStores", e);
}
final Set<String> changelogTopics = new HashSet<>();
for (final StateStore stateStore : topology.globalStateStores()) {
final String sourceTopic = storeToChangelogTopic.get(stateStore.name());
changelogTopics.add(sourceTopic);
stateStore.init((StateStoreContext) globalProcessorContext, stateStore);
}
// make sure each topic-partition from checkpointFileCache is associated with a global state store
checkpointFileCache.keySet().forEach(tp -> {
if (!changelogTopics.contains(tp.topic())) {
log.error(
"Encountered a topic-partition in the global checkpoint file not associated with any global" +
" state store, topic-partition: {}, checkpoint file: {}. If this topic-partition is no longer valid," +
" an application reset and state store directory cleanup will be required.",
tp.topic(),
checkpointFile
);
throw new StreamsException("Encountered a topic-partition not associated with any global state store");
}
});
return Collections.unmodifiableSet(globalStoreNames);
}
|
@Test
public void shouldNotDeleteCheckpointFileAfterLoaded() throws IOException {
writeCheckpoint();
stateManager.initialize();
assertTrue(checkpointFile.exists());
}
|
public MapConfig setAsyncBackupCount(int asyncBackupCount) {
this.asyncBackupCount = checkAsyncBackupCount(backupCount, asyncBackupCount);
return this;
}
|
@Test(expected = IllegalArgumentException.class)
public void setAsyncBackupCount_whenTooLarge() {
MapConfig config = new MapConfig();
// max allowed is 6
config.setAsyncBackupCount(200);
}
|
public static List<String> split( String str, char delim ) {
return split( str, delim, false, false );
}
|
@Test
void split() {
// empty
assertEquals(
Arrays.asList( "" ),
StringUtils.split( "", ',' ) );
// not empty
assertEquals(
Arrays.asList( "a" ),
StringUtils.split( "a", ',' ) );
assertEquals(
Arrays.asList( "a", "b" ),
StringUtils.split( "a,b", ',' ) );
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( "a,b,c", ',' ) );
// empty parts
assertEquals(
Arrays.asList( "", "b", "c" ),
StringUtils.split( ",b,c", ',' ) );
assertEquals(
Arrays.asList( "a", "", "c" ),
StringUtils.split( "a,,c", ',' ) );
assertEquals(
Arrays.asList( "a", "b", "" ),
StringUtils.split( "a,b,", ',' ) );
// parts with leading/trailing spaces
assertEquals(
Arrays.asList( "a", " b", " c" ),
StringUtils.split( "a, b, c", ',' ) );
assertEquals(
Arrays.asList( " a", "b", "c " ),
StringUtils.split( " a,b,c ", ',' ) );
assertEquals(
Arrays.asList( " a", " b ", "c " ),
StringUtils.split( " a, b ,c ", ',' ) );
// space delimiter
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( "a b c", ' ' ) );
assertEquals(
Arrays.asList( "a", "", "", "b", "", "c" ),
StringUtils.split( "a b c", ' ' ) );
// new line delimiter
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( "a\nb\nc", '\n' ) );
assertEquals(
Arrays.asList( "a", "", "", "b", "", "c" ),
StringUtils.split( "a\n\n\nb\n\nc", '\n' ) );
}
|
@Override
@Deprecated
public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier,
final String... stateStoreNames) {
process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames);
}
|
@Test
public void shouldNotAllowNullProcessSupplierOnProcessValuesWithNamedAndStores() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.process((ProcessorSupplier<? super String, ? super String, Void, Void>) null,
Named.as("processor"), "stateStore"));
assertThat(exception.getMessage(), equalTo("processorSupplier can't be null"));
}
|
@Override
protected void runTask() {
LOGGER.debug("Updating currently processed jobs... ");
convertAndProcessJobs(new ArrayList<>(backgroundJobServer.getJobSteward().getJobsInProgress()), this::updateCurrentlyProcessingJob);
}
|
@Test
void evenWhenNoWorkCanBeOnboardedJobsThatAreProcessedAreBeingUpdatedWithAHeartbeat() {
// GIVEN
final Job job = anEnqueuedJob().withId().build();
startProcessingJob(job);
// WHEN
runTask(task);
// THEN
verify(storageProvider).save(singletonList(job));
ProcessingState processingState = job.getJobState();
Assertions.assertThat(processingState.getUpdatedAt()).isAfter(processingState.getCreatedAt());
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
try {
try {
for(final DavResource resource : this.list(file)) {
if(resource.isDirectory()) {
if(!file.getType().contains(Path.Type.directory)) {
throw new NotfoundException(String.format("File %s has set MIME type %s",
file.getAbsolute(), DavResource.HTTPD_UNIX_DIRECTORY_CONTENT_TYPE));
}
}
else {
if(!file.getType().contains(Path.Type.file)) {
throw new NotfoundException(String.format("File %s has set MIME type %s",
file.getAbsolute(), resource.getContentType()));
}
}
return this.toAttributes(resource);
}
throw new NotfoundException(file.getAbsolute());
}
catch(SardineException e) {
try {
throw new DAVExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
catch(InteroperabilityException | ConflictException i) {
// PROPFIND Method not allowed
if(log.isWarnEnabled()) {
log.warn(String.format("Failure with PROPFIND request for %s. %s", file, i.getMessage()));
}
final PathAttributes attr = this.head(file);
if(PathAttributes.EMPTY == attr) {
throw i;
}
return attr;
}
}
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map(e, file);
}
}
|
@Test
public void testFindFile() throws Exception {
final Path test = new DAVTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(),
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final DAVAttributesFinderFeature f = new DAVAttributesFinderFeature(session);
final PathAttributes attributes = f.find(test);
assertEquals(0L, attributes.getSize());
assertNotEquals(-1L, attributes.getModificationDate());
assertNotNull(attributes.getETag());
// Test wrong type
try {
f.find(new Path(test.getAbsolute(), EnumSet.of(Path.Type.directory)));
fail();
}
catch(NotfoundException e) {
// Expected
}
finally {
new DAVDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
}
|
public StreamDestinationFilterRuleDTO deleteFromStream(String streamId, String id) {
final var dto = utils.getById(id)
.orElseThrow(() -> new IllegalArgumentException(f("Couldn't find document with ID <%s> for deletion", id)));
if (collection.deleteOne(and(eq(FIELD_STREAM_ID, streamId), idEq(id))).getDeletedCount() > 0) {
clusterEventBus.post(StreamDestinationFilterDeletedEvent.of(id));
}
return dto;
}
|
@Test
void deleteFromStreamWithInvalidID() {
assertThatThrownBy(() -> service.deleteFromStream("54e3deadbeefdeadbeef1000", "54e3deadbeefdeadbeef9999"))
.hasMessageContaining("54e3deadbeefdeadbeef9999")
.isInstanceOf(IllegalArgumentException.class);
}
|
@Override
public void open(Configuration parameters) throws Exception {
this.rateLimiterTriggeredCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED);
this.concurrentRunThrottledCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED);
this.nothingToTriggerCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER);
this.triggerCounters =
taskNames.stream()
.map(
name ->
getRuntimeContext()
.getMetricGroup()
.addGroup(TableMaintenanceMetrics.GROUP_KEY, name)
.counter(TableMaintenanceMetrics.TRIGGERED))
.collect(Collectors.toList());
this.nextEvaluationTimeState =
getRuntimeContext()
.getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG));
this.accumulatedChangesState =
getRuntimeContext()
.getListState(
new ListStateDescriptor<>(
"triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class)));
this.lastTriggerTimesState =
getRuntimeContext()
.getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG));
tableLoader.open();
}
|
@Test
void testEqDeleteRecordCount() throws Exception {
TriggerManager manager =
manager(
sql.tableLoader(TABLE_NAME),
new TriggerEvaluator.Builder().eqDeleteRecordCount(3).build());
try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness =
harness(manager)) {
testHarness.open();
addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(1L).build(), 0);
addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(2L).build(), 1);
addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(5L).build(), 2);
// No trigger in this case
addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(1L).build(), 2);
addEventAndCheckResult(testHarness, TableChange.builder().eqDeleteRecordCount(2L).build(), 3);
}
}
|
public PlanNode plan(Analysis analysis)
{
return planStatement(analysis, analysis.getStatement());
}
|
@Test
public void testSameScalarSubqueryIsAppliedOnlyOnce()
{
// three subqueries with two duplicates (coerced to two different types), only two scalar joins should be in plan
assertEquals(
countOfMatchingNodes(
plan("SELECT * FROM orders WHERE CAST(orderkey AS INTEGER) = (SELECT 1) AND custkey = (SELECT 2) AND CAST(custkey as REAL) != (SELECT 1)"),
EnforceSingleRowNode.class::isInstance),
2);
// same query used for left, right and complex join condition
assertEquals(
countOfMatchingNodes(
plan("SELECT * FROM orders o1 JOIN orders o2 ON o1.orderkey = (SELECT 1) AND o2.orderkey = (SELECT 1) AND o1.orderkey + o2.orderkey = (SELECT 2)"),
EnforceSingleRowNode.class::isInstance),
2);
}
|
public static String password(String password) {
if (StrUtil.isBlank(password)) {
return StrUtil.EMPTY;
}
return StrUtil.repeat('*', password.length());
}
|
@Test
public void passwordTest() {
assertEquals("**********", DesensitizedUtil.password("1234567890"));
}
|
public CoordinatorResult<DeleteGroupsResponseData.DeletableGroupResultCollection, CoordinatorRecord> deleteGroups(
RequestContext context,
List<String> groupIds
) throws ApiException {
final DeleteGroupsResponseData.DeletableGroupResultCollection resultCollection =
new DeleteGroupsResponseData.DeletableGroupResultCollection(groupIds.size());
final List<CoordinatorRecord> records = new ArrayList<>();
int numDeletedOffsets = 0;
final List<String> deletedGroups = new ArrayList<>();
for (String groupId : groupIds) {
try {
groupMetadataManager.validateDeleteGroup(groupId);
numDeletedOffsets += offsetMetadataManager.deleteAllOffsets(groupId, records);
groupMetadataManager.createGroupTombstoneRecords(groupId, records);
deletedGroups.add(groupId);
resultCollection.add(
new DeleteGroupsResponseData.DeletableGroupResult()
.setGroupId(groupId)
);
} catch (ApiException exception) {
resultCollection.add(
new DeleteGroupsResponseData.DeletableGroupResult()
.setGroupId(groupId)
.setErrorCode(Errors.forException(exception).code())
);
}
}
log.info("The following groups were deleted: {}. A total of {} offsets were removed.",
String.join(", ", deletedGroups),
numDeletedOffsets
);
return new CoordinatorResult<>(records, resultCollection);
}
|
@Test
public void testDeleteGroups() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
mock(CoordinatorMetrics.class),
mock(CoordinatorMetricsShard.class)
);
RequestContext context = requestContext(ApiKeys.DELETE_GROUPS);
List<String> groupIds = Arrays.asList("group-id-1", "group-id-2");
DeleteGroupsResponseData.DeletableGroupResultCollection expectedResultCollection = new DeleteGroupsResponseData.DeletableGroupResultCollection();
List<CoordinatorRecord> expectedRecords = new ArrayList<>();
for (String groupId : groupIds) {
expectedResultCollection.add(new DeleteGroupsResponseData.DeletableGroupResult().setGroupId(groupId));
expectedRecords.addAll(Arrays.asList(
GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord(groupId, "topic-name", 0),
GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId)
));
}
CoordinatorResult<DeleteGroupsResponseData.DeletableGroupResultCollection, CoordinatorRecord> expectedResult = new CoordinatorResult<>(
expectedRecords,
expectedResultCollection
);
when(offsetMetadataManager.deleteAllOffsets(anyString(), anyList())).thenAnswer(invocation -> {
String groupId = invocation.getArgument(0);
List<CoordinatorRecord> records = invocation.getArgument(1);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord(groupId, "topic-name", 0));
return 1;
});
// Mockito#when only stubs method returning non-void value, so we use Mockito#doAnswer instead.
doAnswer(invocation -> {
String groupId = invocation.getArgument(0);
List<CoordinatorRecord> records = invocation.getArgument(1);
records.add(GroupCoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId));
return null;
}).when(groupMetadataManager).createGroupTombstoneRecords(anyString(), anyList());
CoordinatorResult<DeleteGroupsResponseData.DeletableGroupResultCollection, CoordinatorRecord> coordinatorResult =
coordinator.deleteGroups(context, groupIds);
for (String groupId : groupIds) {
verify(groupMetadataManager, times(1)).validateDeleteGroup(ArgumentMatchers.eq(groupId));
verify(groupMetadataManager, times(1)).createGroupTombstoneRecords(ArgumentMatchers.eq(groupId), anyList());
verify(offsetMetadataManager, times(1)).deleteAllOffsets(ArgumentMatchers.eq(groupId), anyList());
}
assertEquals(expectedResult, coordinatorResult);
}
|
@Nullable static String lastStringHeader(Headers headers, String key) {
Header header = headers.lastHeader(key);
if (header == null || header.value() == null) return null;
return new String(header.value(), UTF_8);
}
|
@Test void lastStringHeader() {
record.headers().add("b3", new byte[] {'1'});
assertThat(KafkaHeaders.lastStringHeader(record.headers(), "b3"))
.isEqualTo("1");
}
|
public static String join(CharSequence separator, Iterable<?> strings) {
Iterator<?> i = strings.iterator();
if (!i.hasNext()) {
return "";
}
StringBuilder sb = new StringBuilder(i.next().toString());
while (i.hasNext()) {
sb.append(separator);
sb.append(i.next().toString());
}
return sb.toString();
}
|
@Test (timeout = 30000)
public void testJoin() {
List<String> s = new ArrayList<String>();
s.add("a");
s.add("b");
s.add("c");
assertEquals("", StringUtils.join(":", s.subList(0, 0)));
assertEquals("a", StringUtils.join(":", s.subList(0, 1)));
assertEquals("", StringUtils.join(':', s.subList(0, 0)));
assertEquals("a", StringUtils.join(':', s.subList(0, 1)));
assertEquals("a:b", StringUtils.join(":", s.subList(0, 2)));
assertEquals("a:b:c", StringUtils.join(":", s.subList(0, 3)));
assertEquals("a:b", StringUtils.join(':', s.subList(0, 2)));
assertEquals("a:b:c", StringUtils.join(':', s.subList(0, 3)));
}
|
private static void convertToTelemetry(JsonElement jsonElement, long systemTs, Map<Long, List<KvEntry>> result, PostTelemetryMsg.Builder builder) {
if (jsonElement.isJsonObject()) {
parseObject(systemTs, result, builder, jsonElement.getAsJsonObject());
} else if (jsonElement.isJsonArray()) {
jsonElement.getAsJsonArray().forEach(je -> {
if (je.isJsonObject()) {
parseObject(systemTs, result, builder, je.getAsJsonObject());
} else {
throw new JsonSyntaxException(CAN_T_PARSE_VALUE + je);
}
});
} else {
throw new JsonSyntaxException(CAN_T_PARSE_VALUE + jsonElement);
}
}
|
@Test
public void testParseBigDecimalAsLong() {
var result = JsonConverter.convertToTelemetry(JsonParser.parseString("{\"meterReadingDelta\": 1E+1}"), 0L);
Assertions.assertEquals(10L, result.get(0L).get(0).getLongValue().get().longValue());
}
|
static int readDirectBuffer(InputStream f, ByteBuffer buf, byte[] temp) throws IOException {
// copy all the bytes that return immediately, stopping at the first
// read that doesn't return a full buffer.
int nextReadLength = Math.min(buf.remaining(), temp.length);
int totalBytesRead = 0;
int bytesRead;
while ((bytesRead = f.read(temp, 0, nextReadLength)) == temp.length) {
buf.put(temp);
totalBytesRead += bytesRead;
nextReadLength = Math.min(buf.remaining(), temp.length);
}
if (bytesRead < 0) {
// return -1 if nothing was read
return totalBytesRead == 0 ? -1 : totalBytesRead;
} else {
// copy the last partial buffer
buf.put(temp, 0, bytesRead);
totalBytesRead += bytesRead;
return totalBytesRead;
}
}
|
@Test
public void testDirectPositionAndLimit() throws Exception {
ByteBuffer readBuffer = ByteBuffer.allocateDirect(20);
readBuffer.position(5);
readBuffer.limit(13);
readBuffer.mark();
MockInputStream stream = new MockInputStream(7);
int len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(7, len);
Assert.assertEquals(12, readBuffer.position());
Assert.assertEquals(13, readBuffer.limit());
len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(1, len);
Assert.assertEquals(13, readBuffer.position());
Assert.assertEquals(13, readBuffer.limit());
len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get());
Assert.assertEquals(0, len);
readBuffer.reset();
Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 8), readBuffer);
}
|
@Override
public Num calculate(BarSeries series, Position position) {
if (position == null || position.getEntry() == null || position.getExit() == null) {
return series.zero();
}
Returns returns = new Returns(series, position, Returns.ReturnType.LOG);
return calculateES(returns, confidence);
}
|
@Test
public void calculateOnlyWithGainPositions() {
series = new MockBarSeries(numFunction, 100d, 105d, 106d, 107d, 108d, 115d);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series),
Trade.buyAt(3, series), Trade.sellAt(5, series));
AnalysisCriterion varCriterion = getCriterion();
assertNumEquals(numOf(0.0), varCriterion.calculate(series, tradingRecord));
}
|
public ProviderBuilder telnet(String telnet) {
this.telnet = telnet;
return getThis();
}
|
@Test
void telnet() {
ProviderBuilder builder = ProviderBuilder.newBuilder();
builder.telnet("mocktelnethandler");
Assertions.assertEquals("mocktelnethandler", builder.build().getTelnet());
}
|
public String getPackageWithoutClassName() {
if (className.contains(".")) {
return className.substring(0, className.lastIndexOf("."));
} else {
return "";
}
}
|
@Test
public void getPackageWithoutClassName() {
commonGetPackageWithoutClassName("test", "com.Test", "com");
}
|
@Override
public SelType call(String methodName, SelType[] args) {
if (args.length == 1) {
if ("dateIntToTs".equals(methodName)) {
return dateIntToTs(args[0]);
} else if ("tsToDateInt".equals(methodName)) {
return tsToDateInt(args[0]);
}
} else if (args.length == 2) {
if ("incrementDateInt".equals(methodName)) {
return incrementDateInt(args[0], args[1]);
} else if ("timeoutForDateTimeDeadline".equals(methodName)) {
return timeoutForDateTimeDeadline(args[0], args[1]);
} else if ("timeoutForDateIntDeadline".equals(methodName)) {
return timeoutForDateIntDeadline(args[0], args[1]);
}
} else if (args.length == 3) {
if ("dateIntsBetween".equals(methodName)) {
return dateIntsBetween(args[0], args[1], args[2]);
} else if ("intsBetween".equals(methodName)) {
return intsBetween(args[0], args[1], args[2]);
}
} else if (args.length == 5 && "dateIntHourToTs".equals(methodName)) {
return dateIntHourToTs(args);
}
throw new UnsupportedOperationException(
type()
+ " DO NOT support calling method: "
+ methodName
+ " with args: "
+ Arrays.toString(args));
}
|
@Test(expected = UnsupportedOperationException.class)
public void testCallTimeoutForDateTimeDeadlineInvalid() {
SelUtilFunc.INSTANCE.call("timeoutForDateTimeDeadline", new SelType[] {SelString.of("1 day")});
}
|
@Override
public List<Pair<HoodieKey, Long>> fetchRecordKeysWithPositions(HoodieStorage storage, StoragePath filePath) {
return fetchRecordKeysWithPositions(storage, filePath, Option.empty());
}
|
@Test
public void testFetchRecordKeyPartitionPathVirtualKeysFromParquet() throws Exception {
List<String> rowKeys = new ArrayList<>();
List<HoodieKey> expected = new ArrayList<>();
String partitionPath = "path1";
for (int i = 0; i < 1000; i++) {
String rowKey = UUID.randomUUID().toString();
rowKeys.add(rowKey);
expected.add(new HoodieKey(rowKey, partitionPath));
}
String filePath = Paths.get(basePath, "test.parquet").toUri().toString();
Schema schema = getSchemaWithFields(Arrays.asList(new String[] {"abc", "def"}));
writeParquetFile(BloomFilterTypeCode.SIMPLE.name(), filePath, rowKeys, schema, true, partitionPath,
false, "abc", "def");
// Read and verify
List<Pair<HoodieKey, Long>> fetchedRows = parquetUtils.fetchRecordKeysWithPositions(
HoodieTestUtils.getStorage(filePath), new StoragePath(filePath),
Option.of(new TestBaseKeyGen("abc", "def")));
assertEquals(rowKeys.size(), fetchedRows.size(), "Total count does not match");
for (Pair<HoodieKey, Long> entry : fetchedRows) {
assertTrue(expected.contains(entry.getLeft()), "Record key must be in the given filter");
}
}
|
public void unJar(File jarFile, File toDir) throws IOException {
unJar(jarFile, toDir, MATCH_ANY);
}
|
@Test
public void testUnJar2() throws IOException {
// make a simple zip
File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
JarOutputStream jstream =
new JarOutputStream(new FileOutputStream(jarFile));
JarEntry je = new JarEntry("META-INF/MANIFEST.MF");
byte[] data = "Manifest-Version: 1.0\nCreated-By: 1.8.0_1 (Manual)"
.getBytes(StandardCharsets.UTF_8);
je.setSize(data.length);
jstream.putNextEntry(je);
jstream.write(data);
jstream.closeEntry();
je = new JarEntry("../outside.path");
data = "any data here".getBytes(StandardCharsets.UTF_8);
je.setSize(data.length);
jstream.putNextEntry(je);
jstream.write(data);
jstream.closeEntry();
jstream.close();
File unjarDir = getUnjarDir("unjar-path");
// Unjar everything
try {
RunJar.unJar(jarFile, unjarDir, MATCH_ANY);
fail("unJar should throw IOException.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"would create file outside of", e);
}
try {
RunJar.unJar(new FileInputStream(jarFile), unjarDir, MATCH_ANY);
fail("unJar should throw IOException.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"would create file outside of", e);
}
}
|
static void replaceHeader(Headers headers, String key, String value) {
headers.remove(key);
headers.add(key, value.getBytes(UTF_8));
}
|
@Test void replaceHeader_replace() {
record.headers().add("b3", new byte[0]);
KafkaHeaders.replaceHeader(record.headers(), "b3", "1");
assertThat(record.headers().lastHeader("b3").value())
.containsExactly('1');
}
|
@Override
public void blame(BlameInput input, BlameOutput output) {
File basedir = input.fileSystem().baseDir();
try (Repository repo = JGitUtils.buildRepository(basedir.toPath())) {
File gitBaseDir = repo.getWorkTree();
if (cloneIsInvalid(gitBaseDir)) {
return;
}
Profiler profiler = Profiler.create(LOG);
profiler.startDebug("Collecting committed files");
Map<String, InputFile> inputFileByGitRelativePath = getCommittedFilesToBlame(repo, gitBaseDir, input);
profiler.stopDebug();
BlameAlgorithmEnum blameAlgorithmEnum = this.blameStrategy.getBlameAlgorithm(Runtime.getRuntime().availableProcessors(), inputFileByGitRelativePath.size());
LOG.debug("Using {} strategy to blame files", blameAlgorithmEnum);
if (blameAlgorithmEnum == GIT_FILES_BLAME) {
blameWithFilesGitCommand(output, repo, inputFileByGitRelativePath);
} else {
blameWithNativeGitCommand(output, repo, inputFileByGitRelativePath, gitBaseDir);
}
}
}
|
@Test
@UseDataProvider("blameAlgorithms")
public void blame_on_nested_module(BlameAlgorithmEnum strategy) throws IOException {
CompositeBlameCommand blameCommand = new CompositeBlameCommand(analysisWarnings, pathResolver, jGitBlameCommand, nativeGitBlameCommand, (p, f) -> strategy);
File projectDir = createNewTempFolder();
javaUnzip("dummy-git-nested.zip", projectDir);
File baseDir = new File(projectDir, "dummy-git-nested/dummy-project");
DefaultFileSystem fs = new DefaultFileSystem(baseDir);
when(input.fileSystem()).thenReturn(fs);
DefaultInputFile inputFile = new TestInputFileBuilder("foo", DUMMY_JAVA)
.setModuleBaseDir(baseDir.toPath())
.build();
fs.add(inputFile);
BlameCommand.BlameOutput blameResult = mock(BlameCommand.BlameOutput.class);
when(input.filesToBlame()).thenReturn(List.of(inputFile));
blameCommand.blame(input, blameResult);
Date revisionDate = DateUtils.parseDateTime("2012-07-17T16:12:48+0200");
String revision = "6b3aab35a3ea32c1636fee56f996e677653c48ea";
String author = "david@gageot.net";
verify(blameResult).blameResult(inputFile,
IntStream.range(0, 26)
.mapToObj(i -> new BlameLine().revision(revision).date(revisionDate).author(author))
.collect(Collectors.toList()));
}
|
@Override
public synchronized Response handle(Request req) { // note the [synchronized]
if (corsEnabled && "OPTIONS".equals(req.getMethod())) {
Response response = new Response(200);
response.setHeader("Allow", ALLOWED_METHODS);
response.setHeader("Access-Control-Allow-Origin", "*");
response.setHeader("Access-Control-Allow-Methods", ALLOWED_METHODS);
List<String> requestHeaders = req.getHeaderValues("Access-Control-Request-Headers");
if (requestHeaders != null) {
response.setHeader("Access-Control-Allow-Headers", requestHeaders);
}
return response;
}
if (prefix != null && req.getPath().startsWith(prefix)) {
req.setPath(req.getPath().substring(prefix.length()));
}
// rare case when http-client is active within same jvm
// snapshot existing thread-local to restore
ScenarioEngine prevEngine = ScenarioEngine.get();
for (Map.Entry<Feature, ScenarioRuntime> entry : scenarioRuntimes.entrySet()) {
Feature feature = entry.getKey();
ScenarioRuntime runtime = entry.getValue();
// important for graal to work properly
Thread.currentThread().setContextClassLoader(runtime.featureRuntime.suite.classLoader);
LOCAL_REQUEST.set(req);
req.processBody();
ScenarioEngine engine = initEngine(runtime, globals, req);
for (FeatureSection fs : feature.getSections()) {
if (fs.isOutline()) {
runtime.logger.warn("skipping scenario outline - {}:{}", feature, fs.getScenarioOutline().getLine());
break;
}
Scenario scenario = fs.getScenario();
if (isMatchingScenario(scenario, engine)) {
Map<String, Object> configureHeaders;
Variable response, responseStatus, responseHeaders, responseDelay;
ScenarioActions actions = new ScenarioActions(engine);
Result result = executeScenarioSteps(feature, runtime, scenario, actions);
engine.mockAfterScenario();
configureHeaders = engine.mockConfigureHeaders();
response = engine.vars.remove(ScenarioEngine.RESPONSE);
responseStatus = engine.vars.remove(ScenarioEngine.RESPONSE_STATUS);
responseHeaders = engine.vars.remove(ScenarioEngine.RESPONSE_HEADERS);
responseDelay = engine.vars.remove(RESPONSE_DELAY);
globals.putAll(engine.shallowCloneVariables());
Response res = new Response(200);
if (result.isFailed()) {
response = new Variable(result.getError().getMessage());
responseStatus = new Variable(500);
} else {
if (corsEnabled) {
res.setHeader("Access-Control-Allow-Origin", "*");
}
res.setHeaders(configureHeaders);
if (responseHeaders != null && responseHeaders.isMap()) {
res.setHeaders(responseHeaders.getValue());
}
if (responseDelay != null) {
res.setDelay(responseDelay.getAsInt());
}
}
if (response != null && !response.isNull()) {
res.setBody(response.getAsByteArray());
if (res.getContentType() == null) {
ResourceType rt = ResourceType.fromObject(response.getValue());
if (rt != null) {
res.setContentType(rt.contentType);
}
}
}
if (responseStatus != null) {
res.setStatus(responseStatus.getAsInt());
}
if (prevEngine != null) {
ScenarioEngine.set(prevEngine);
}
if (mockInterceptor != null) {
mockInterceptor.intercept(req, res, scenario);
}
return res;
}
}
}
logger.warn("no scenarios matched, returning 404: {}", req); // NOTE: not logging with engine.logger
if (prevEngine != null) {
ScenarioEngine.set(prevEngine);
}
return new Response(404);
}
|
@Test
void testUrlWithSpecialCharacters() {
background().scenario(
"pathMatches('/hello/{raw}')",
"def response = pathParams.raw"
);
request.path("/hello/�Ill~Formed@RequiredString!");
handle();
match(response.getBodyAsString(), "�Ill~Formed@RequiredString!");
}
|
@Override
public int getDecimals(final int columnIndex) throws SQLException {
return resultSetMetaData.getScale(columnIndex);
}
|
@Test
void assertGetDecimals() throws SQLException {
assertThat(queryResultMetaData.getDecimals(1), is(0));
}
|
public List<? extends COSBase> toList()
{
return new ArrayList<>(objects);
}
|
@Test
void testToList()
{
COSArray cosArray = COSArray
.ofCOSIntegers(Arrays.asList(0, 1, 2, 3, 4, 5));
List<? extends COSBase> list = cosArray.toList();
assertEquals(6, list.size());
assertEquals(COSInteger.get(0), list.get(0));
assertEquals(COSInteger.get(5), list.get(5));
}
|
@Override
public IcebergEnumeratorState snapshotState(long checkpointId) {
return new IcebergEnumeratorState(
enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot());
}
|
@Test
public void testRequestingReaderUnavailableWhenSplitDiscovered() throws Exception {
TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext =
new TestingSplitEnumeratorContext<>(4);
ScanContext scanContext =
ScanContext.builder()
.streaming(true)
.startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL)
.build();
ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 0);
ContinuousIcebergEnumerator enumerator =
createEnumerator(enumeratorContext, scanContext, splitPlanner);
// register one reader, and let it request a split
enumeratorContext.registerReader(2, "localhost");
enumerator.addReader(2);
enumerator.handleSourceEvent(2, new SplitRequestEvent());
// remove the reader (like in a failure)
enumeratorContext.registeredReaders().remove(2);
// make one split available and trigger the periodic discovery
List<IcebergSourceSplit> splits =
SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 1, 1);
assertThat(splits).hasSize(1);
splitPlanner.addSplits(splits);
enumeratorContext.triggerAllActions();
assertThat(enumeratorContext.getSplitAssignments()).doesNotContainKey(2);
List<String> pendingSplitIds =
enumerator.snapshotState(1).pendingSplits().stream()
.map(IcebergSourceSplitState::split)
.map(IcebergSourceSplit::splitId)
.collect(Collectors.toList());
assertThat(pendingSplitIds).hasSameSizeAs(splits).first().isEqualTo(splits.get(0).splitId());
// register the reader again, and let it request a split
enumeratorContext.registerReader(2, "localhost");
enumerator.addReader(2);
enumerator.handleSourceEvent(2, new SplitRequestEvent());
assertThat(enumerator.snapshotState(2).pendingSplits()).isEmpty();
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.contains(splits.get(0));
}
|
@Override
public Table getTable(String dbName, String tblName) {
Table table;
try {
table = hmsOps.getTable(dbName, tblName);
} catch (StarRocksConnectorException e) {
LOG.error("Failed to get hive table [{}.{}.{}]", catalogName, dbName, tblName, e);
throw e;
} catch (Exception e) {
LOG.error("Failed to get hive table [{}.{}.{}]", catalogName, dbName, tblName, e);
return null;
}
return table;
}
|
@Test
public void testGetTableThrowConnectorException() {
new Expectations(hmsOps) {
{
hmsOps.getTable("acid_db", "acid_table");
result = new StarRocksConnectorException("hive acid table is not supported");
minTimes = 1;
}
};
Assert.assertThrows(StarRocksConnectorException.class,
() -> hiveMetadata.getTable("acid_db", "acid_table"));
}
|
public static String getCharsetNameFromContentType(String contentType) {
// try optimized for direct match without using splitting
int pos = contentType.indexOf("charset=");
if (pos != -1) {
// special optimization for utf-8 which is a common charset
if (contentType.regionMatches(true, pos + 8, "utf-8", 0, 5)) {
return "UTF-8";
}
int end = contentType.indexOf(';', pos);
String charset;
if (end > pos) {
charset = contentType.substring(pos + 8, end);
} else {
charset = contentType.substring(pos + 8);
}
return normalizeCharset(charset);
}
String[] values = contentType.split(";");
for (String value : values) {
value = value.trim();
// Perform a case insensitive "startsWith" check that works for different locales
String prefix = "charset=";
if (value.regionMatches(true, 0, prefix, 0, prefix.length())) {
// Take the charset name
String charset = value.substring(8);
return normalizeCharset(charset);
}
}
// use UTF-8 as default
return "UTF-8";
}
|
@Test
public void testCharset() {
assertEquals("UTF-8", IOHelper.getCharsetNameFromContentType("charset=utf-8"));
assertEquals("UTF-8", IOHelper.getCharsetNameFromContentType("charset=UTF-8"));
assertEquals("UTF-8", IOHelper.getCharsetNameFromContentType("text/plain; charset=UTF-8"));
assertEquals("UTF-8", IOHelper.getCharsetNameFromContentType("application/json; charset=utf-8"));
assertEquals("iso-8859-1", IOHelper.getCharsetNameFromContentType("application/json; charset=iso-8859-1"));
}
|
static void validateRegion(String region) {
if (region == null) {
throw new InvalidConfigurationException("The provided region is null.");
}
if (!AWS_REGION_PATTERN.matcher(region).matches()) {
String message = String.format("The provided region %s is not a valid AWS region.", region);
throw new InvalidConfigurationException(message);
}
}
|
@Test
public void validateInvalidRegion() {
// given
String region = "us-wrong-1";
String expectedMessage = String.format("The provided region %s is not a valid AWS region.", region);
//when
ThrowingRunnable validateRegion = () -> RegionValidator.validateRegion(region);
//then
InvalidConfigurationException thrownEx = assertThrows(InvalidConfigurationException.class, validateRegion);
assertEquals(expectedMessage, thrownEx.getMessage());
}
|
public int getNumber4()
{
checkAvailable(4);
return get(Wire::getUInt32, 4);
}
|
@Test(expected = IllegalArgumentException.class)
public void testGetIncorrectInt()
{
ZFrame frame = new ZFrame(new byte[3]);
ZNeedle needle = new ZNeedle(frame);
needle.getNumber4();
}
|
@SuppressWarnings("unchecked")
@Override
public OUT extract(Object in) {
return (OUT) Array.get(in, fieldId);
}
|
@Test
void testStringArray() {
for (int i = 0; i < this.testStringArray.length; i++) {
assertThat(new FieldFromArray<String>(i).extract(testStringArray))
.isEqualTo(testStringArray[i]);
}
}
|
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
}
|
@Test
public void testUnsupportedForMessageFormatInProduceRequest() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
Future<RecordMetadata> future = appendToAccumulator(tp0);
client.prepareResponse(
body -> body instanceof ProduceRequest && RequestTestUtils.hasIdempotentRecords((ProduceRequest) body),
produceResponse(tp0, -1, Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, 0));
sender.runOnce();
assertFutureFailure(future, UnsupportedForMessageFormatException.class);
// unsupported for message format is not a fatal error
assertFalse(transactionManager.hasError());
}
|
public static boolean isDomain(String str) {
if (StringUtils.isBlank(str)) {
return false;
}
if (Objects.equals(str, LOCAL_HOST)) {
return true;
}
return DOMAIN_PATTERN.matcher(str).matches();
}
|
@Test
void testIsDomain() {
assertTrue(InternetAddressUtil.isDomain("localhost"));
assertTrue(InternetAddressUtil.isDomain("github.com"));
assertTrue(InternetAddressUtil.isDomain("prefix.infix.suffix"));
assertTrue(InternetAddressUtil.isDomain("p-hub.com"));
assertFalse(InternetAddressUtil.isDomain(""));
assertFalse(InternetAddressUtil.isDomain(null));
}
|
public static long computeStartOfNextSecond(long now) {
Calendar cal = Calendar.getInstance();
cal.setTime(new Date(now));
cal.set(Calendar.MILLISECOND, 0);
cal.add(Calendar.SECOND, 1);
return cal.getTime().getTime();
}
|
@Test
public void testSecond() {
// Mon Nov 20 18:05:17,522 CET 2006
long now = 1164042317522L;
// Mon Nov 20 18:05:18,000 CET 2006
long expected = 1164042318000L;
long computed = TimeUtil.computeStartOfNextSecond(now);
Assertions.assertEquals(expected - now, 478);
Assertions.assertEquals(expected, computed);
}
|
public void createOperator(StreamOperatorParameters<RowData> parameters) {
checkArgument(wrapped == null, "This operator has been initialized");
if (factory instanceof ProcessingTimeServiceAware) {
((ProcessingTimeServiceAware) factory)
.setProcessingTimeService(parameters.getProcessingTimeService());
}
wrapped = factory.createStreamOperator(parameters);
}
|
@Test
public void testCreateOperator() throws Exception {
TestingOneInputStreamOperator operator = new TestingOneInputStreamOperator();
TableOperatorWrapper<TestingOneInputStreamOperator> wrapper =
createOneInputOperatorWrapper(operator, "test");
StreamOperatorParameters<RowData> parameters = createStreamOperatorParameters();
wrapper.createOperator(parameters);
assertThat(wrapper.getStreamOperator()).isEqualTo(operator);
// create operator again, will throw exception
assertThatThrownBy(() -> wrapper.createOperator(parameters))
.hasMessageContaining("This operator has been initialized");
}
|
@Override
public AwsProxyResponse handle(Throwable ex) {
log.error("Called exception handler for:", ex);
// adding a print stack trace in case we have no appender or we are running inside SAM local, where need the
// output to go to the stderr.
ex.printStackTrace();
if (ex instanceof InvalidRequestEventException || ex instanceof InternalServerErrorException) {
return new AwsProxyResponse(500, HEADERS, getErrorJson(INTERNAL_SERVER_ERROR));
} else {
return new AwsProxyResponse(502, HEADERS, getErrorJson(GATEWAY_TIMEOUT_ERROR));
}
}
|
@Test
void typedHandle_InternalServerErrorException_500State() {
// Needed to mock InternalServerErrorException because it leverages RuntimeDelegate to set an internal
// response object.
InternalServerErrorException mockInternalServerErrorException = Mockito.mock(InternalServerErrorException.class);
Mockito.when(mockInternalServerErrorException.getMessage()).thenReturn(INTERNAL_SERVER_ERROR_MESSAGE);
AwsProxyResponse resp = exceptionHandler.handle(mockInternalServerErrorException);
assertNotNull(resp);
assertEquals(500, resp.getStatusCode());
}
|
public static <T> void concat(T[] sourceFirst, T[] sourceSecond, T[] dest) {
System.arraycopy(sourceFirst, 0, dest, 0, sourceFirst.length);
System.arraycopy(sourceSecond, 0, dest, sourceFirst.length, sourceSecond.length);
}
|
@Test(expected = NullPointerException.class)
public void concat_whenFirstNull() {
Integer[] first = null;
Integer[] second = new Integer[]{4};
Integer[] concatenated = new Integer[4];
ArrayUtils.concat(first, second, concatenated);
fail();
}
|
public static List<List<String>> dump(SetType type, SessionVariable sessionVar, PatternMatcher matcher) {
List<List<String>> rows = Lists.newArrayList();
// Hold the read lock when session dump, because this option need to access global variable.
RLOCK.lock();
try {
for (Map.Entry<String, VarContext> entry : CTX_BY_VAR_NAME.entrySet()) {
// Filter variable not match to the regex.
String name = StringUtils.isBlank(entry.getValue().getVarAttr().show()) ? entry.getKey()
: entry.getValue().getVarAttr().show();
if (matcher != null && !matcher.match(name)) {
continue;
}
VarContext ctx = entry.getValue();
// For session variables, the flag is VariableMgr.SESSION | VariableMgr.INVISIBLE
// For global variables, the flag is VariableMgr.GLOBAL | VariableMgr.INVISIBLE
if ((ctx.getFlag() > VariableMgr.INVISIBLE) && sessionVar != null &&
!sessionVar.isEnableShowAllVariables()) {
continue;
}
List<String> row = Lists.newArrayList();
if (type != SetType.GLOBAL && ctx.getObj() == DEFAULT_SESSION_VARIABLE) {
// In this condition, we may retrieve session variables for caller.
if (sessionVar != null) {
row.add(name);
String currentValue = getValue(sessionVar, ctx.getField());
row.add(currentValue);
if (type == SetType.VERBOSE) {
row.add(ctx.defaultValue);
row.add(ctx.defaultValue.equals(currentValue) ? "0" : "1");
}
} else {
LOG.error("sessionVar is null during dumping session variables.");
continue;
}
} else {
row.add(name);
String currentValue = getValue(ctx.getObj(), ctx.getField());
row.add(currentValue);
if (type == SetType.VERBOSE) {
row.add(ctx.defaultValue);
row.add(ctx.defaultValue.equals(currentValue) ? "0" : "1");
}
}
if (row.get(0).equalsIgnoreCase(SessionVariable.SQL_MODE)) {
try {
row.set(1, SqlModeHelper.decode(Long.valueOf(row.get(1))));
} catch (DdlException e) {
row.set(1, "");
LOG.warn("Decode sql mode failed");
}
}
rows.add(row);
}
} finally {
RLOCK.unlock();
}
// Sort all variables by variable name.
rows.sort(Comparator.comparing(o -> o.get(0)));
return rows;
}
|
@Test
public void testDumpInvisible() {
SessionVariable sv = new SessionVariable();
List<List<String>> vars = VariableMgr.dump(SetType.SESSION, sv, null);
Assert.assertFalse(vars.toString().contains("enable_show_all_variables"));
Assert.assertFalse(vars.toString().contains("cbo_use_correlated_join_estimate"));
sv.setEnableShowAllVariables(true);
vars = VariableMgr.dump(SetType.SESSION, sv, null);
Assert.assertTrue(vars.toString().contains("cbo_use_correlated_join_estimate"));
vars = VariableMgr.dump(SetType.SESSION, null, null);
List<List<String>> vars1 = VariableMgr.dump(SetType.GLOBAL, null, null);
Assert.assertTrue(vars.size() < vars1.size());
List<List<String>> vars2 = VariableMgr.dump(SetType.SESSION, null, null);
Assert.assertTrue(vars.size() == vars2.size());
}
|
public void triggerNextSuperstep() {
synchronized (monitor) {
if (terminated) {
throw new IllegalStateException("Already terminated.");
}
superstepNumber++;
monitor.notifyAll();
}
}
|
@Test
public void testWaitFromOne() {
try {
SuperstepKickoffLatch latch = new SuperstepKickoffLatch();
Waiter w = new Waiter(latch, 2);
Thread waiter = new Thread(w);
waiter.setDaemon(true);
waiter.start();
WatchDog wd = new WatchDog(waiter, 2000);
wd.start();
Thread.sleep(100);
latch.triggerNextSuperstep();
wd.join();
if (wd.getError() != null) {
throw wd.getError();
}
if (w.getError() != null) {
throw w.getError();
}
} catch (Throwable t) {
t.printStackTrace();
Assert.fail("Error: " + t.getMessage());
}
}
|
public long maxOffset(MessageQueue mq) throws MQClientException {
return this.mQClientFactory.getMQAdminImpl().maxOffset(mq);
}
|
@Test
public void testMaxOffset() throws MQClientException {
assertEquals(0, defaultMQPushConsumerImpl.maxOffset(createMessageQueue()));
}
|
void makeAsFarAs(ExecNode<?> a, ExecNode<?> b) {
TopologyNode nodeA = getOrCreateTopologyNode(a);
TopologyNode nodeB = getOrCreateTopologyNode(b);
for (TopologyNode input : nodeB.inputs) {
link(input.execNode, nodeA.execNode);
}
}
|
@Test
void testMakeAsFarAs() {
Tuple2<TopologyGraph, TestingBatchExecNode[]> tuple2 = buildTopologyGraph();
TopologyGraph graph = tuple2.f0;
TestingBatchExecNode[] nodes = tuple2.f1;
graph.makeAsFarAs(nodes[4], nodes[7]);
Map<ExecNode<?>, Integer> distances = graph.calculateMaximumDistance();
assertThat(distances.get(nodes[7]).intValue()).isEqualTo(4);
assertThat(distances.get(nodes[4]).intValue()).isEqualTo(4);
}
|
public List<Stream> match(Message message) {
final Set<Stream> result = Sets.newHashSet();
final Set<String> blackList = Sets.newHashSet();
for (final Rule rule : rulesList) {
if (blackList.contains(rule.getStreamId())) {
continue;
}
final StreamRule streamRule = rule.getStreamRule();
final StreamRuleType streamRuleType = streamRule.getType();
final Stream.MatchingType matchingType = rule.getMatchingType();
if (!ruleTypesNotNeedingFieldPresence.contains(streamRuleType)
&& !message.hasField(streamRule.getField())) {
if (matchingType == Stream.MatchingType.AND) {
result.remove(rule.getStream());
// blacklist stream because it can't match anymore
blackList.add(rule.getStreamId());
}
continue;
}
final Stream stream;
if (streamRuleType != StreamRuleType.REGEX) {
stream = rule.match(message);
} else {
stream = rule.matchWithTimeOut(message, streamProcessingTimeout, TimeUnit.MILLISECONDS);
}
if (stream == null) {
if (matchingType == Stream.MatchingType.AND) {
result.remove(rule.getStream());
// blacklist stream because it can't match anymore
blackList.add(rule.getStreamId());
}
} else {
result.add(stream);
if (matchingType == Stream.MatchingType.OR) {
// blacklist stream because it is already matched
blackList.add(rule.getStreamId());
}
}
}
final Stream defaultStream = defaultStreamProvider.get();
boolean alreadyRemovedDefaultStream = false;
for (Stream stream : result) {
if (stream.getRemoveMatchesFromDefaultStream()) {
if (alreadyRemovedDefaultStream || message.removeStream(defaultStream)) {
alreadyRemovedDefaultStream = true;
if (LOG.isTraceEnabled()) {
LOG.trace("Successfully removed default stream <{}> from message <{}>", defaultStream.getId(), message.getId());
}
} else {
// A previously executed message processor (or Illuminate) has likely already removed the
// default stream from the message. Now, the message has matched a stream in the Graylog
// MessageFilterChain, and the matching stream is also set to remove the default stream.
// This is usually from user-defined stream rules, and is generally not a problem.
cannotRemoveDefaultMeter.inc();
if (LOG.isTraceEnabled()) {
LOG.trace("Couldn't remove default stream <{}> from message <{}>", defaultStream.getId(), message.getId());
}
}
}
}
return ImmutableList.copyOf(result);
}
|
@Test
public void testTestMatch() throws Exception {
final StreamMock stream = getStreamMock("test");
final StreamRuleMock rule1 = new StreamRuleMock(ImmutableMap.of(
"_id", new ObjectId(),
"field", "testfield1",
"type", StreamRuleType.PRESENCE.toInteger(),
"stream_id", stream.getId()
));
final StreamRuleMock rule2 = new StreamRuleMock(ImmutableMap.of(
"_id", new ObjectId(),
"field", "testfield2",
"value", "^test",
"type", StreamRuleType.REGEX.toInteger(),
"stream_id", stream.getId()
));
stream.setStreamRules(Lists.newArrayList(rule1, rule2));
final StreamRouterEngine engine = newEngine(Lists.newArrayList(stream));
// Without testfield1 and testfield2 in the message.
final Message message1 = getMessage();
final StreamRouterEngine.StreamTestMatch testMatch1 = engine.testMatch(message1).get(0);
final Map<StreamRule, Boolean> matches1 = testMatch1.getMatches();
assertFalse(testMatch1.isMatched());
assertFalse(matches1.get(rule1));
assertFalse(matches1.get(rule2));
// With testfield1 but no-matching testfield2 in the message.
final Message message2 = getMessage();
message2.addField("testfield1", "testvalue");
message2.addField("testfield2", "no-testvalue");
final StreamRouterEngine.StreamTestMatch testMatch2 = engine.testMatch(message2).get(0);
final Map<StreamRule, Boolean> matches2 = testMatch2.getMatches();
assertFalse(testMatch2.isMatched());
assertTrue(matches2.get(rule1));
assertFalse(matches2.get(rule2));
// With testfield1 and matching testfield2 in the message.
final Message message3 = getMessage();
message3.addField("testfield1", "testvalue");
message3.addField("testfield2", "testvalue2");
final StreamRouterEngine.StreamTestMatch testMatch3 = engine.testMatch(message3).get(0);
final Map<StreamRule, Boolean> matches3 = testMatch3.getMatches();
assertTrue(testMatch3.isMatched());
assertTrue(matches3.get(rule1));
assertTrue(matches3.get(rule2));
}
|
@VisibleForTesting
Optional<Set<String>> getSchedulerResourceTypeNamesUnsafe(final Object response) {
if (getSchedulerResourceTypesMethod.isPresent() && response != null) {
try {
@SuppressWarnings("unchecked")
final Set<? extends Enum> schedulerResourceTypes =
(Set<? extends Enum>)
getSchedulerResourceTypesMethod.get().invoke(response);
return Optional.of(
Preconditions.checkNotNull(schedulerResourceTypes).stream()
.map(Enum::name)
.collect(Collectors.toSet()));
} catch (Exception e) {
logger.error("Error invoking 'getSchedulerResourceTypes()'", e);
}
}
return Optional.empty();
}
|
@Test
void testCallsGetSchedulerResourceTypesMethodIfPresent() {
final RegisterApplicationMasterResponseReflector
registerApplicationMasterResponseReflector =
new RegisterApplicationMasterResponseReflector(LOG, HasMethod.class);
final Optional<Set<String>> schedulerResourceTypeNames =
registerApplicationMasterResponseReflector.getSchedulerResourceTypeNamesUnsafe(
new HasMethod());
assertThat(schedulerResourceTypeNames).isPresent();
assertThat(schedulerResourceTypeNames.get()).contains("MEMORY", "CPU");
}
|
public static CreateSourceProperties from(final Map<String, Literal> literals) {
try {
return new CreateSourceProperties(literals, DurationParser::parse, false);
} catch (final ConfigException e) {
final String message = e.getMessage().replace(
"configuration",
"property"
);
throw new KsqlException(message, e);
}
}
|
@Test
public void shouldThrowOnInvalidSourceConnectorPropertyType() {
// Given:
final Map<String, Literal> props = ImmutableMap.<String, Literal>builder()
.putAll(MINIMUM_VALID_PROPS)
.put(CreateConfigs.SOURCED_BY_CONNECTOR_PROPERTY, new IntegerLiteral(1))
.build();
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> CreateSourceProperties.from(props)
);
// Then:
assertThat(e.getMessage(), containsString("Invalid value 1 for property SOURCED_BY_CONNECTOR: " +
"Expected value to be a string, but it was a java.lang.Integer"));
}
|
static boolean checkChunkSane(Chunk chunk)
{
if (chunk == null)
{
// If the chunk does not exist, it can not be wrong...
return true;
}
if (chunk.start + chunk.length > chunk.bytes.length)
{
return false;
}
if (chunk.start < 4)
{
return false;
}
// We must include the chunk type in the CRC calculation
int ourCRC = crc(chunk.bytes, chunk.start - 4, chunk.length + 4);
if (ourCRC != chunk.crc)
{
LOG.error(String.format("Invalid CRC %08X on chunk %08X, expected %08X.", ourCRC,
chunk.chunkType, chunk.crc));
return false;
}
return true;
}
|
@Test
void testChunkSane()
{
PNGConverter.Chunk chunk = new PNGConverter.Chunk();
assertTrue(PNGConverter.checkChunkSane(null));
chunk.bytes = "IHDRsomedummyvaluesDummyValuesAtEnd".getBytes();
chunk.length = 19;
assertEquals(35, chunk.bytes.length);
assertEquals("IHDRsomedummyvalues", new String(chunk.getData()));
assertFalse(PNGConverter.checkChunkSane(chunk));
chunk.start = 4;
assertEquals("somedummyvaluesDumm", new String(chunk.getData()));
assertFalse(PNGConverter.checkChunkSane(chunk));
chunk.crc = -1729802258;
assertTrue(PNGConverter.checkChunkSane(chunk));
chunk.start = 6;
assertFalse(PNGConverter.checkChunkSane(chunk));
chunk.length = 60;
assertFalse(PNGConverter.checkChunkSane(chunk));
}
|
public static void processEnvVariables(Map<String, String> inputProperties) {
processEnvVariables(inputProperties, System.getenv());
}
|
@Test
void oldJsonEnvVariablesIsIgnoredIfNewIsDefinedAndLogAWarning() {
var inputProperties = new HashMap<String, String>();
EnvironmentConfig.processEnvVariables(inputProperties,
Map.of("SONARQUBE_SCANNER_PARAMS", "{\"key1\":\"should not override\", \"key3\":\"value3\"}",
"SONAR_SCANNER_JSON_PARAMS", "{\"key1\":\"value1\", \"key2\":\"value2\"}"));
assertThat(inputProperties).containsOnly(
entry("key1", "value1"),
entry("key2", "value2"));
assertThat(logTester.logs(Level.WARN)).containsOnly("Ignoring environment variable 'SONARQUBE_SCANNER_PARAMS' because 'SONAR_SCANNER_JSON_PARAMS' is set");
}
|
public static <T> T retryUntilTimeout(Callable<T> callable, Supplier<String> description, Duration timeoutDuration, long retryBackoffMs) throws Exception {
return retryUntilTimeout(callable, description, timeoutDuration, retryBackoffMs, Time.SYSTEM);
}
|
@Test
public void testWakeupException() throws Exception {
Mockito.when(mockCallable.call()).thenThrow(new WakeupException());
assertThrows(ConnectException.class,
() -> RetryUtil.retryUntilTimeout(mockCallable, testMsg, Duration.ofMillis(50), 10, mockTime));
Mockito.verify(mockCallable, Mockito.atLeastOnce()).call();
}
|
@Override
public List<? extends Instance> listInstances(String namespaceId, String groupName, String serviceName,
String clusterName) throws NacosException {
Service service = Service.newService(namespaceId, groupName, serviceName);
if (!ServiceManager.getInstance().containSingleton(service)) {
throw new NacosException(NacosException.NOT_FOUND,
String.format("service %s@@%s is not found!", groupName, serviceName));
}
if (!serviceStorage.getClusters(service).contains(clusterName)) {
throw new NacosException(NacosException.NOT_FOUND, "cluster " + clusterName + " is not found!");
}
ServiceInfo serviceInfo = serviceStorage.getData(service);
ServiceInfo result = ServiceUtil.selectInstances(serviceInfo, clusterName);
return result.getHosts();
}
|
@Test
void testListInstances() throws NacosException {
Mockito.when(serviceStorage.getClusters(Mockito.any())).thenReturn(Collections.singleton("D"));
ServiceInfo serviceInfo = new ServiceInfo();
serviceInfo.setGroupName("B");
serviceInfo.setName("C");
Instance instance = new Instance();
instance.setClusterName("D");
instance.setIp("1.1.1.1");
serviceInfo.setHosts(Collections.singletonList(instance));
Mockito.when(serviceStorage.getData(Mockito.any())).thenReturn(serviceInfo);
List<? extends Instance> instances = catalogServiceV2Impl.listInstances("A", "B", "C", "D");
assertEquals(1, instances.size());
}
|
public static void clear(Buffer buffer) {
buffer.clear();
}
|
@Test
public void testClear() {
ByteBuffer byteBuffer = ByteBuffer.allocate(4);
byteBuffer.putInt(1);
Assertions.assertDoesNotThrow(() -> BufferUtils.clear(byteBuffer));
}
|
public static EnumMap<StepInstance.Status, WorkflowStepStatusSummary> toStepStatusMap(
WorkflowSummary summary, Map<String, StepRuntimeState> states) {
AtomicLong ordinal = new AtomicLong(0);
Map<String, Long> stepOrdinalMap =
summary.getRuntimeDag().keySet().stream()
.collect(Collectors.toMap(Function.identity(), s -> ordinal.incrementAndGet()));
EnumMap<StepInstance.Status, WorkflowStepStatusSummary> stats =
new EnumMap<>(StepInstance.Status.class);
states.forEach(
(stepId, state) -> {
if (state.getStatus() != StepInstance.Status.NOT_CREATED) {
if (!stats.containsKey(state.getStatus())) {
stats.put(state.getStatus(), new WorkflowStepStatusSummary());
}
List<Long> stepInfo =
Arrays.asList(
stepOrdinalMap.remove(stepId), state.getStartTime(), state.getEndTime());
stats.get(state.getStatus()).addStep(stepInfo);
}
});
stats.forEach((status, stepStatusSummary) -> stepStatusSummary.sortSteps());
// Don't include NOT_CREATED in the stats. Use decode method to get them if needed.
return stats;
}
|
@Test
public void testToStepStatusMap() throws Exception {
WorkflowSummary workflowSummary =
loadObject("fixtures/parameters/sample-wf-summary-params.json", WorkflowSummary.class);
StepRuntimeState state = new StepRuntimeState();
state.setStatus(StepInstance.Status.RUNNING);
state.setStartTime(123L);
Assert.assertEquals(
singletonEnumMap(
StepInstance.Status.RUNNING,
WorkflowStepStatusSummary.of(0).addStep(Arrays.asList(2L, 123L, null))),
TaskHelper.toStepStatusMap(workflowSummary, singletonMap("job1", state)));
}
|
public static TableConfig overwriteTableConfigForTier(TableConfig tableConfig, @Nullable String tier) {
if (tier == null) {
return tableConfig;
}
try {
boolean updated = false;
JsonNode tblCfgJson = tableConfig.toJsonNode();
// Apply tier specific overwrites for `tableIndexConfig`
JsonNode tblIdxCfgJson = tblCfgJson.get(TableConfig.INDEXING_CONFIG_KEY);
if (tblIdxCfgJson != null && tblIdxCfgJson.has(TableConfig.TIER_OVERWRITES_KEY)) {
JsonNode tierCfgJson = tblIdxCfgJson.get(TableConfig.TIER_OVERWRITES_KEY).get(tier);
if (tierCfgJson != null) {
LOGGER.debug("Got table index config overwrites: {} for tier: {}", tierCfgJson, tier);
overwriteConfig(tblIdxCfgJson, tierCfgJson);
updated = true;
}
}
// Apply tier specific overwrites for `fieldConfigList`
JsonNode fieldCfgListJson = tblCfgJson.get(TableConfig.FIELD_CONFIG_LIST_KEY);
if (fieldCfgListJson != null && fieldCfgListJson.isArray()) {
Iterator<JsonNode> fieldCfgListItr = fieldCfgListJson.elements();
while (fieldCfgListItr.hasNext()) {
JsonNode fieldCfgJson = fieldCfgListItr.next();
if (!fieldCfgJson.has(TableConfig.TIER_OVERWRITES_KEY)) {
continue;
}
JsonNode tierCfgJson = fieldCfgJson.get(TableConfig.TIER_OVERWRITES_KEY).get(tier);
if (tierCfgJson != null) {
LOGGER.debug("Got field index config overwrites: {} for tier: {}", tierCfgJson, tier);
overwriteConfig(fieldCfgJson, tierCfgJson);
updated = true;
}
}
}
if (updated) {
LOGGER.debug("Got overwritten table config: {} for tier: {}", tblCfgJson, tier);
return JsonUtils.jsonNodeToObject(tblCfgJson, TableConfig.class);
} else {
LOGGER.debug("No table config overwrites for tier: {}", tier);
return tableConfig;
}
} catch (IOException e) {
LOGGER.warn("Failed to overwrite table config for tier: {} for table: {}", tier, tableConfig.getTableName(), e);
return tableConfig;
}
}
|
@Test
public void testOverwriteTableConfigForTierWithError()
throws Exception {
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setTierOverwrites(JsonUtils.stringToJsonNode("{\"coldTier\": {\"starTreeIndexConfigs\": {}}}")).build();
TableConfig tierTblCfg = TableConfigUtils.overwriteTableConfigForTier(tableConfig, "coldTier");
Assert.assertEquals(tierTblCfg, tableConfig);
}
|
@Override
public Map<String, Set<String>> read() {
Map<String, Set<String>> extensions = new HashMap<>();
for (String extensionPoint : processor.getExtensions().keySet()) {
try {
FileObject file = getFiler().getResource(StandardLocation.CLASS_OUTPUT, "", EXTENSIONS_RESOURCE
+ "/" + extensionPoint);
Set<String> entries = new HashSet<>();
ExtensionStorage.read(file.openReader(true), entries);
extensions.put(extensionPoint, entries);
} catch (FileNotFoundException | NoSuchFileException e) {
// doesn't exist, ignore
} catch (FilerException e) {
// re-opening the file for reading or after writing is ignorable
} catch (IOException e) {
error(e.getMessage());
}
}
return extensions;
}
|
@Test
public void ensureServiceProviderExtensionStorageReadWorks() throws IOException {
final StringReader file = new StringReader("#hello\n World");
final Set<String> entries = new HashSet<>();
ServiceProviderExtensionStorage.read(file, entries);
assertThat(entries.size(), is(1));
assertThat(entries.contains("World"), is(true));
}
|
public void changeFieldType(final CustomFieldMapping customMapping,
final Set<String> indexSetsIds,
final boolean rotateImmediately) {
checkFieldTypeCanBeChanged(customMapping.fieldName());
checkType(customMapping);
checkAllIndicesSupportFieldTypeChange(customMapping.fieldName(), indexSetsIds);
for (String indexSetId : indexSetsIds) {
try {
indexSetService.get(indexSetId).ifPresent(indexSetConfig -> {
var updatedIndexSetConfig = storeMapping(customMapping, indexSetConfig);
if (rotateImmediately) {
updatedIndexSetConfig.ifPresent(this::cycleIndexSet);
}
});
} catch (Exception ex) {
LOG.error("Failed to update field type in index set : " + indexSetId, ex);
throw ex;
}
}
}
|
@Test
void testCyclesIndexSet() {
doReturn(Optional.of(existingIndexSet)).when(indexSetService).get("existing_index_set");
doReturn(existingMongoIndexSet).when(mongoIndexSetFactory).create(any());
toTest.changeFieldType(newCustomMapping,
new LinkedHashSet<>(List.of("existing_index_set", "wrong_index_set")),
true);
final IndexSetConfig expectedUpdatedConfig = existingIndexSet.toBuilder()
.customFieldMappings(new CustomFieldMappings(Set.of(existingCustomFieldMapping, newCustomMapping)))
.build();
verify(mongoIndexSetService).save(expectedUpdatedConfig);
verify(existingMongoIndexSet).cycle();
verifyNoMoreInteractions(mongoIndexSetService);
}
|
public static Queue<Consumer<byte[]>> stopConsumers(final Queue<Consumer<byte[]>> consumers) throws PulsarClientException {
while (!consumers.isEmpty()) {
Consumer<byte[]> consumer = consumers.poll();
if (consumer != null) {
try {
consumer.close();
} catch (PulsarClientException.AlreadyClosedException e) {
// ignore during stopping
} catch (Exception e) {
LOG.debug("Error stopping consumer: {} due to {}. This exception is ignored", consumer,
e.getMessage(), e);
}
}
}
return new ConcurrentLinkedQueue<>();
}
|
@Test
public void givenConsumerQueueIsEmptywhenIStopConsumersverifyEmptyQueueIsReturned() throws PulsarClientException {
Queue<Consumer<byte[]>> expected = PulsarUtils.stopConsumers(new ConcurrentLinkedQueue<Consumer<byte[]>>());
assertTrue(expected.isEmpty());
}
|
@Override
public boolean add(final Integer value) {
return add(value.intValue());
}
|
@Test
@SuppressWarnings("UnnecessaryBoxing")
public void containsAddedBoxedElements() {
assertTrue(set.add(1));
assertTrue(set.add(Integer.valueOf(2)));
assertContains(set, Integer.valueOf(1));
assertContains(set, 2);
}
|
@Bean
public CorsFilter corsFilter() {
UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();
CorsConfiguration config = jHipsterProperties.getCors();
if (!CollectionUtils.isEmpty(config.getAllowedOrigins()) || !CollectionUtils.isEmpty(config.getAllowedOriginPatterns())) {
log.debug("Registering CORS filter");
source.registerCorsConfiguration("/api/**", config);
source.registerCorsConfiguration("/management/**", config);
source.registerCorsConfiguration("/v3/api-docs", config);
source.registerCorsConfiguration("/swagger-ui/**", config);
}
return new CorsFilter(source);
}
|
@Test
void shouldCorsFilterOnApiPath() throws Exception {
props.getCors().setAllowedOrigins(Collections.singletonList("other.domain.com"));
props.getCors().setAllowedMethods(Arrays.asList("GET", "POST", "PUT", "DELETE"));
props.getCors().setAllowedHeaders(Collections.singletonList("*"));
props.getCors().setMaxAge(1800L);
props.getCors().setAllowCredentials(true);
MockMvc mockMvc = MockMvcBuilders.standaloneSetup(new WebConfigurerTestController()).addFilters(webConfigurer.corsFilter()).build();
mockMvc
.perform(
options("/api/test-cors")
.header(HttpHeaders.ORIGIN, "other.domain.com")
.header(HttpHeaders.ACCESS_CONTROL_REQUEST_METHOD, "POST")
)
.andExpect(status().isOk())
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, "other.domain.com"))
.andExpect(header().string(HttpHeaders.VARY, "Origin"))
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, "GET,POST,PUT,DELETE"))
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"))
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_MAX_AGE, "1800"));
mockMvc
.perform(get("/api/test-cors").header(HttpHeaders.ORIGIN, "other.domain.com"))
.andExpect(status().isOk())
.andExpect(header().string(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, "other.domain.com"));
}
|
protected final void tryShutdown(final HazelcastInstance hazelcastInstance) {
OutOfMemoryHandlerHelper.tryShutdown(hazelcastInstance);
}
|
@Test
public void testTryShutdown() {
outOfMemoryHandler.shutdown(hazelcastInstance);
}
|
@Override
public void apply(IntentOperationContext<FlowObjectiveIntent> intentOperationContext) {
Objects.requireNonNull(intentOperationContext);
Optional<IntentData> toUninstall = intentOperationContext.toUninstall();
Optional<IntentData> toInstall = intentOperationContext.toInstall();
List<FlowObjectiveIntent> uninstallIntents = intentOperationContext.intentsToUninstall();
List<FlowObjectiveIntent> installIntents = intentOperationContext.intentsToInstall();
if (!toInstall.isPresent() && !toUninstall.isPresent()) {
intentInstallCoordinator.intentInstallSuccess(intentOperationContext);
return;
}
if (toUninstall.isPresent()) {
IntentData intentData = toUninstall.get();
trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources());
uninstallIntents.forEach(installable ->
trackerService.removeTrackedResources(intentData.intent().key(),
installable.resources()));
}
if (toInstall.isPresent()) {
IntentData intentData = toInstall.get();
trackerService.addTrackedResources(intentData.key(), intentData.intent().resources());
installIntents.forEach(installable ->
trackerService.addTrackedResources(intentData.key(),
installable.resources()));
}
FlowObjectiveIntentInstallationContext intentInstallationContext =
new FlowObjectiveIntentInstallationContext(intentOperationContext);
uninstallIntents.stream()
.map(intent -> buildObjectiveContexts(intent, REMOVE))
.flatMap(Collection::stream)
.forEach(context -> {
context.intentInstallationContext(intentInstallationContext);
intentInstallationContext.addContext(context);
intentInstallationContext.addPendingContext(context);
});
installIntents.stream()
.map(intent -> buildObjectiveContexts(intent, ADD))
.flatMap(Collection::stream)
.forEach(context -> {
context.intentInstallationContext(intentInstallationContext);
intentInstallationContext.addContext(context);
intentInstallationContext.addNextPendingContext(context);
});
intentInstallationContext.apply();
}
|
@Test
public void testGroupMissingError() {
// group exist -> group missing -> add group
intentInstallCoordinator = new TestIntentInstallCoordinator();
installer.intentInstallCoordinator = intentInstallCoordinator;
errors = ImmutableList.of(GROUPEXISTS, GROUPMISSING);
installer.flowObjectiveService = new TestFailedFlowObjectiveService(errors);
context = createInstallContext();
installer.apply(context);
successContext = intentInstallCoordinator.successContext;
assertEquals(successContext, context);
}
|
@Subscribe
public void onVarbitChanged(VarbitChanged varbitChanged)
{
if (varbitChanged.getVarbitId() == Varbits.WINTERTODT_TIMER)
{
int timeToNotify = config.roundNotification();
// Sometimes wt var updates are sent to players even after leaving wt.
// So only notify if in wt or after just having left.
if (timeToNotify > 0 && (isInWintertodt || needRoundNotif))
{
int timeInSeconds = varbitChanged.getValue() * 30 / 50;
int prevTimeInSeconds = previousTimerValue * 30 / 50;
log.debug("Seconds left until round start: {}", timeInSeconds);
if (prevTimeInSeconds > timeToNotify && timeInSeconds <= timeToNotify)
{
notifier.notify("Wintertodt round is about to start");
needRoundNotif = false;
}
}
previousTimerValue = varbitChanged.getValue();
}
}
|
@Test
public void matchStartingNotification_shouldNotify_when5SecondsOptionSelected()
{
when(config.roundNotification()).thenReturn(5);
VarbitChanged varbitChanged = new VarbitChanged();
varbitChanged.setVarbitId(Varbits.WINTERTODT_TIMER);
varbitChanged.setValue(10);
wintertodtPlugin.onVarbitChanged(varbitChanged);
//(5 * 50) / 30 = ~8
varbitChanged.setValue(8);
wintertodtPlugin.onVarbitChanged(varbitChanged);
verify(notifier, times(1)).notify("Wintertodt round is about to start");
}
|
@Override
public void writeInt(final int v) throws IOException {
ensureAvailable(INT_SIZE_IN_BYTES);
Bits.writeInt(buffer, pos, v, isBigEndian);
pos += INT_SIZE_IN_BYTES;
}
|
@Test
public void testWriteIntV() throws Exception {
int expected = 100;
out.writeInt(expected);
int actual = Bits.readIntB(out.buffer, 0);
assertEquals(expected, actual);
}
|
public synchronized ObjectId insertTransformationPartitionSchema( ObjectId id_transformation,
ObjectId id_partition_schema ) throws KettleException {
ObjectId id = connectionDelegate.getNextTransformationPartitionSchemaID();
RowMetaAndData table = new RowMetaAndData();
table.addValue( new ValueMetaInteger(
KettleDatabaseRepository.FIELD_TRANS_PARTITION_SCHEMA_ID_TRANS_PARTITION_SCHEMA ), id );
table.addValue(
new ValueMetaInteger(
KettleDatabaseRepository.FIELD_TRANS_PARTITION_SCHEMA_ID_TRANSFORMATION ), id_transformation );
table.addValue(
new ValueMetaInteger(
KettleDatabaseRepository.FIELD_TRANS_PARTITION_SCHEMA_ID_PARTITION_SCHEMA ), id_partition_schema );
connectionDelegate.insertTableRow( KettleDatabaseRepository.TABLE_R_TRANS_PARTITION_SCHEMA, table );
return id;
}
|
@Test
public void testInsertTransformationPartitionSchema() throws KettleException {
ArgumentCaptor<String> argumentTableName = ArgumentCaptor.forClass( String.class );
ArgumentCaptor<RowMetaAndData> argumentTableData = ArgumentCaptor.forClass( RowMetaAndData.class );
doNothing().when( repo.connectionDelegate ).insertTableRow( argumentTableName.capture(), argumentTableData.capture() );
doReturn( new LongObjectId( 456 ) ).when( repo.connectionDelegate ).getNextTransformationPartitionSchemaID();
ObjectId result = repo.insertTransformationPartitionSchema( new LongObjectId( 147 ), new LongObjectId( 258 ) );
RowMetaAndData insertRecord = argumentTableData.getValue();
assertEquals( KettleDatabaseRepository.TABLE_R_TRANS_PARTITION_SCHEMA, argumentTableName.getValue() );
assertEquals( 3, insertRecord.size() );
assertEquals( ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta( 0 ).getType() );
assertEquals( KettleDatabaseRepository.FIELD_TRANS_PARTITION_SCHEMA_ID_TRANS_PARTITION_SCHEMA, insertRecord.getValueMeta( 0 ).getName() );
assertEquals( Long.valueOf( 456 ), insertRecord.getInteger( 0 ) );
assertEquals( ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta( 1 ).getType() );
assertEquals( KettleDatabaseRepository.FIELD_TRANS_PARTITION_SCHEMA_ID_TRANSFORMATION, insertRecord.getValueMeta( 1 ).getName() );
assertEquals( Long.valueOf( 147 ), insertRecord.getInteger( 1 ) );
assertEquals( ValueMetaInterface.TYPE_INTEGER, insertRecord.getValueMeta( 2 ).getType() );
assertEquals( KettleDatabaseRepository.FIELD_TRANS_PARTITION_SCHEMA_ID_PARTITION_SCHEMA, insertRecord.getValueMeta( 2 ).getName() );
assertEquals( Long.valueOf( 258 ), insertRecord.getInteger( 2 ) );
assertEquals( new LongObjectId( 456 ), result );
}
|
@Override
public void updateRewardActivity(RewardActivityUpdateReqVO updateReqVO) {
// 校验存在
RewardActivityDO dbRewardActivity = validateRewardActivityExists(updateReqVO.getId());
if (dbRewardActivity.getStatus().equals(PromotionActivityStatusEnum.CLOSE.getStatus())) { // 已关闭的活动,不能修改噢
throw exception(REWARD_ACTIVITY_UPDATE_FAIL_STATUS_CLOSED);
}
// 校验商品是否冲突
validateRewardActivitySpuConflicts(updateReqVO.getId(), updateReqVO.getProductSpuIds());
// 更新
RewardActivityDO updateObj = RewardActivityConvert.INSTANCE.convert(updateReqVO)
.setStatus(PromotionUtils.calculateActivityStatus(updateReqVO.getEndTime()));
rewardActivityMapper.updateById(updateObj);
}
|
@Test
public void testUpdateRewardActivity_success() {
// mock 数据
RewardActivityDO dbRewardActivity = randomPojo(RewardActivityDO.class, o -> o.setStatus(PromotionActivityStatusEnum.WAIT.getStatus()));
rewardActivityMapper.insert(dbRewardActivity);// @Sql: 先插入出一条存在的数据
// 准备参数
RewardActivityUpdateReqVO reqVO = randomPojo(RewardActivityUpdateReqVO.class, o -> {
o.setId(dbRewardActivity.getId()); // 设置更新的 ID
o.setConditionType(randomEle(PromotionConditionTypeEnum.values()).getType());
o.setProductScope(randomEle(PromotionProductScopeEnum.values()).getScope());
// 用于触发进行中的状态
o.setStartTime(addTime(Duration.ofDays(1))).setEndTime(addTime(Duration.ofDays(2)));
});
// 调用
rewardActivityService.updateRewardActivity(reqVO);
// 校验是否更新正确
RewardActivityDO rewardActivity = rewardActivityMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, rewardActivity, "rules");
assertEquals(rewardActivity.getStatus(), PromotionActivityStatusEnum.WAIT.getStatus());
for (int i = 0; i < reqVO.getRules().size(); i++) {
assertPojoEquals(reqVO.getRules().get(i), rewardActivity.getRules().get(i));
}
}
|
public static <T> T createInstance(String userClassName,
Class<T> xface,
ClassLoader classLoader) {
Class<?> theCls;
try {
theCls = Class.forName(userClassName, true, classLoader);
} catch (ClassNotFoundException | NoClassDefFoundError cnfe) {
throw new RuntimeException("User class must be in class path", cnfe);
}
if (!xface.isAssignableFrom(theCls)) {
throw new RuntimeException(userClassName + " does not implement " + xface.getName());
}
Class<T> tCls = (Class<T>) theCls.asSubclass(xface);
T result;
try {
Constructor<T> meth = (Constructor<T>) constructorCache.get(theCls);
if (null == meth) {
meth = tCls.getDeclaredConstructor();
meth.setAccessible(true);
constructorCache.put(theCls, meth);
}
result = meth.newInstance();
} catch (InstantiationException ie) {
throw new RuntimeException("User class must be concrete", ie);
} catch (NoSuchMethodException e) {
throw new RuntimeException("User class must have a no-arg constructor", e);
} catch (IllegalAccessException e) {
throw new RuntimeException("User class must have a public constructor", e);
} catch (InvocationTargetException e) {
throw new RuntimeException("User class constructor throws exception", e);
}
return result;
}
|
@Test
public void testCreateInstanceNoNoArgConstructor() {
try {
createInstance(OneArgClass.class.getName(), classLoader);
fail("Should fail to load class doesn't have no-arg constructor");
} catch (RuntimeException re) {
assertTrue(re.getCause() instanceof NoSuchMethodException);
}
}
|
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest request) {
String method = request.getMethod();
URI uri = request.getUri();
for (Rule rule : rules) {
if (rule.matches(method, uri)) {
log.log(Level.FINE, () ->
String.format("Request '%h' with method '%s' and uri '%s' matched rule '%s'", request, method, uri, rule.name));
return responseFor(request, rule.name, rule.response);
}
}
return responseFor(request, "default", defaultResponse);
}
|
@Test
void no_filtering_if_request_is_allowed() {
RuleBasedFilterConfig config = new RuleBasedFilterConfig.Builder()
.dryrun(false)
.defaultRule(new DefaultRule.Builder()
.action(DefaultRule.Action.Enum.ALLOW))
.build();
Metric metric = mock(Metric.class);
RuleBasedRequestFilter filter = new RuleBasedRequestFilter(metric, config);
MockResponseHandler responseHandler = new MockResponseHandler();
filter.filter(request("DELETE", "http://myserver:80/"), responseHandler);
assertAllowed(responseHandler, metric);
}
|
@Override
public Iterator<RawUnionValue> call(Iterator<WindowedValue<InputT>> inputs) throws Exception {
SparkPipelineOptions options = pipelineOptions.get().as(SparkPipelineOptions.class);
// Register standard file systems.
FileSystems.setDefaultPipelineOptions(options);
// Do not call processElements if there are no inputs
// Otherwise, this may cause validation errors (e.g. ParDoTest)
if (!inputs.hasNext()) {
return Collections.emptyIterator();
}
try (ExecutableStageContext stageContext = contextFactory.get(jobInfo)) {
ExecutableStage executableStage = ExecutableStage.fromPayload(stagePayload);
try (StageBundleFactory stageBundleFactory =
stageContext.getStageBundleFactory(executableStage)) {
ConcurrentLinkedQueue<RawUnionValue> collector = new ConcurrentLinkedQueue<>();
StateRequestHandler stateRequestHandler =
getStateRequestHandler(
executableStage, stageBundleFactory.getProcessBundleDescriptor());
if (executableStage.getTimers().size() == 0) {
ReceiverFactory receiverFactory = new ReceiverFactory(collector, outputMap);
processElements(stateRequestHandler, receiverFactory, null, stageBundleFactory, inputs);
return collector.iterator();
}
// Used with Batch, we know that all the data is available for this key. We can't use the
// timer manager from the context because it doesn't exist. So we create one and advance
// time to the end after processing all elements.
final InMemoryTimerInternals timerInternals = new InMemoryTimerInternals();
timerInternals.advanceProcessingTime(Instant.now());
timerInternals.advanceSynchronizedProcessingTime(Instant.now());
ReceiverFactory receiverFactory = new ReceiverFactory(collector, outputMap);
TimerReceiverFactory timerReceiverFactory =
new TimerReceiverFactory(
stageBundleFactory,
(Timer<?> timer, TimerInternals.TimerData timerData) -> {
currentTimerKey = timer.getUserKey();
if (timer.getClearBit()) {
timerInternals.deleteTimer(timerData);
} else {
timerInternals.setTimer(timerData);
}
},
windowCoder);
// Process inputs.
processElements(
stateRequestHandler, receiverFactory, timerReceiverFactory, stageBundleFactory, inputs);
// Finish any pending windows by advancing the input watermark to infinity.
timerInternals.advanceInputWatermark(BoundedWindow.TIMESTAMP_MAX_VALUE);
// Finally, advance the processing time to infinity to fire any timers.
timerInternals.advanceProcessingTime(BoundedWindow.TIMESTAMP_MAX_VALUE);
timerInternals.advanceSynchronizedProcessingTime(BoundedWindow.TIMESTAMP_MAX_VALUE);
// Now we fire the timers and process elements generated by timers (which may be timers
// itself)
while (timerInternals.hasPendingTimers()) {
try (RemoteBundle bundle =
stageBundleFactory.getBundle(
receiverFactory,
timerReceiverFactory,
stateRequestHandler,
getBundleProgressHandler())) {
PipelineTranslatorUtils.fireEligibleTimers(
timerInternals, bundle.getTimerReceivers(), currentTimerKey);
}
}
return collector.iterator();
}
}
}
|
@Test
public void testStageBundleClosed() throws Exception {
SparkExecutableStageFunction<Integer, ?> function = getFunction(Collections.emptyMap());
List<WindowedValue<Integer>> inputs = new ArrayList<>();
inputs.add(WindowedValue.valueInGlobalWindow(0));
function.call(inputs.iterator());
verify(stageBundleFactory).getBundle(any(), any(), any(), any(BundleProgressHandler.class));
verify(stageBundleFactory).getProcessBundleDescriptor();
verify(stageBundleFactory).close();
verifyNoMoreInteractions(stageBundleFactory);
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatCreateSourceStreamStatement() {
// Given:
final CreateSourceProperties props = CreateSourceProperties.from(
new ImmutableMap.Builder<String, Literal>()
.putAll(SOME_WITH_PROPS.copyOfOriginalLiterals())
.build()
);
final CreateStream createStream = new CreateStream(
TEST,
ELEMENTS_WITH_KEY,
false,
false,
props,
true);
// When:
final String sql = SqlFormatter.formatSql(createStream);
// Then:
assertThat(sql, is("CREATE SOURCE STREAM TEST (`k3` STRING KEY, `Foo` STRING) "
+ "WITH (KAFKA_TOPIC='topic_test', VALUE_FORMAT='JSON');"));
}
|
public static boolean areCompatible(final SqlArgument actual, final ParamType declared) {
return areCompatible(actual, declared, false);
}
|
@Test
public void shouldPassCompatibleSchemas() {
assertThat(ParamTypes.areCompatible(
SqlArgument.of(SqlTypes.STRING),
ParamTypes.STRING,
false),
is(true));
assertThat(ParamTypes.areCompatible(
SqlArgument.of(SqlIntervalUnit.INSTANCE),
ParamTypes.INTERVALUNIT,
true),
is(true));
assertThat(
ParamTypes.areCompatible(
SqlArgument.of(SqlTypes.array(SqlTypes.INTEGER)),
ArrayType.of(ParamTypes.INTEGER),
false),
is(true));
assertThat(ParamTypes.areCompatible(
SqlArgument.of(SqlTypes.struct().field("a", SqlTypes.decimal(1, 1)).build()),
StructType.builder().field("a", ParamTypes.DECIMAL).build(),
false),
is(true));
assertThat(ParamTypes.areCompatible(
SqlArgument.of(SqlTypes.map(SqlTypes.INTEGER, SqlTypes.decimal(1, 1))),
MapType.of(ParamTypes.INTEGER, ParamTypes.DECIMAL),
false),
is(true));
assertThat(ParamTypes.areCompatible(
SqlArgument.of(SqlLambdaResolved.of(ImmutableList.of(SqlTypes.STRING), SqlTypes.STRING)),
LambdaType.of(ImmutableList.of(ParamTypes.STRING), ParamTypes.STRING),
false),
is(true));
}
|
public static Object convert(Class<?> expectedClass, Object originalObject) {
if (originalObject == null) {
return null;
}
Class<?> currentClass = originalObject.getClass();
if (expectedClass.isAssignableFrom(currentClass)) {
return originalObject;
}
if (PrimitiveBoxedUtils.areSameWithBoxing(expectedClass, originalObject.getClass())) {
// No cast/transformation originalObject
return originalObject;
}
if (expectedClass == String.class) {
return originalObject.toString();
}
Object toReturn;
String currentClassName = currentClass.getName();
switch (currentClassName) {
case "java.lang.String":
toReturn = convertFromString(expectedClass, (String) originalObject);
break;
case "int":
case "java.lang.Integer":
toReturn = convertFromInteger(expectedClass, (Integer) originalObject);
break;
case "double":
case "java.lang.Double":
toReturn = convertFromDouble(expectedClass, (Double) originalObject);
break;
case "float":
case "java.lang.Float":
toReturn = convertFromFloat(expectedClass, (Float) originalObject);
break;
default:
throw new KiePMMLException(String.format(FAILED_CONVERSION, originalObject,
expectedClass.getName()));
}
return toReturn;
}
|
@Test
void convertUnconvertibleFromString() {
UNCONVERTIBLE_FROM_STRING.forEach((s, o) -> {
Class<?> expectedClass = o.getClass();
try {
ConverterTypeUtil.convert(expectedClass, s);
fail(String.format("Expecting KiePMMLException for %s %s", s, o));
} catch (Exception e) {
assertThat(e.getClass()).isEqualTo(KiePMMLException.class);
}
});
}
|
@VisibleForTesting
public static JobGraph createJobGraph(StreamGraph streamGraph) {
return new StreamingJobGraphGenerator(
Thread.currentThread().getContextClassLoader(),
streamGraph,
null,
Runnable::run)
.createJobGraph();
}
|
@Test
void testStreamingJobTypeByDefault() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.fromData("test").sinkTo(new DiscardingSink<>());
JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
assertThat(jobGraph.getJobType()).isEqualTo(JobType.STREAMING);
}
|
public static ShearCaptcha createShearCaptcha(int width, int height) {
return new ShearCaptcha(width, height);
}
|
@Test
@Disabled
public void createTest() {
for(int i = 0; i < 1; i++) {
CaptchaUtil.createShearCaptcha(320, 240);
}
}
|
public NamespaceBO loadNamespaceBO(String appId, Env env, String clusterName,
String namespaceName, boolean includeDeletedItems) {
NamespaceDTO namespace = namespaceAPI.loadNamespace(appId, env, clusterName, namespaceName);
if (namespace == null) {
throw BadRequestException.namespaceNotExists(appId, clusterName, namespaceName);
}
return transformNamespace2BO(env, namespace, includeDeletedItems);
}
|
@Test
public void testLoadNamespaceBO() {
String branchName = "branch";
NamespaceDTO namespaceDTO = createNamespace(testAppId, branchName, testNamespaceName);
when(namespaceAPI.loadNamespace(any(), any(), any(), any())).thenReturn(namespaceDTO);
ReleaseDTO releaseDTO = new ReleaseDTO();
releaseDTO.setConfigurations("{\"k1\":\"k1\",\"k2\":\"k2\", \"k3\":\"\"}");
when(releaseService.loadLatestRelease(any(), any(), any(), any())).thenReturn(releaseDTO);
List<ItemDTO> itemDTOList = Lists.newArrayList();
ItemDTO itemDTO1 = new ItemDTO();
itemDTO1.setId(1);
itemDTO1.setNamespaceId(1);
itemDTO1.setKey("k1");
itemDTO1.setValue(String.valueOf(1));
itemDTOList.add(itemDTO1);
ItemDTO itemDTO2 = new ItemDTO();
itemDTO2.setId(2);
itemDTO2.setNamespaceId(2);
itemDTO2.setKey("k2");
itemDTO2.setValue(String.valueOf(2));
itemDTOList.add(itemDTO2);
when(itemService.findItems(any(), any(), any(), any())).thenReturn(itemDTOList);
List<ItemDTO> deletedItemDTOList = Lists.newArrayList();
ItemDTO deletedItemDTO = new ItemDTO();
deletedItemDTO.setId(3);
deletedItemDTO.setNamespaceId(3);
deletedItemDTO.setKey("k3");
deletedItemDTOList.add(deletedItemDTO);
when(itemService.findDeletedItems(any(), any(), any(), any())).thenReturn(deletedItemDTOList);
NamespaceBO namespaceBO1 = namespaceService.loadNamespaceBO(testAppId, testEnv, testClusterName, testNamespaceName);
List<String> namespaceKey1 = namespaceBO1.getItems().stream().map(s -> s.getItem().getKey()).collect(Collectors.toList());
assertThat(namespaceBO1.getItemModifiedCnt()).isEqualTo(3);
assertThat(namespaceBO1.getItems().size()).isEqualTo(3);
assertThat(namespaceKey1).isEqualTo(Arrays.asList("k1", "k2", "k3"));
NamespaceBO namespaceBO2 = namespaceService.loadNamespaceBO(testAppId, testEnv, testClusterName, testNamespaceName, false);
List<String> namespaceKey2 = namespaceBO2.getItems().stream().map(s -> s.getItem().getKey()).collect(Collectors.toList());
assertThat(namespaceBO2.getItemModifiedCnt()).isEqualTo(2);
assertThat(namespaceBO2.getItems().size()).isEqualTo(2);
assertThat(namespaceKey2).isEqualTo(Arrays.asList("k1", "k2"));
}
|
public int deliverAll(Set<EmailDeliveryRequest> deliveries) {
if (deliveries.isEmpty() || !isActivated()) {
LOG.debug(SMTP_HOST_NOT_CONFIGURED_DEBUG_MSG);
return 0;
}
return (int) deliveries.stream()
.filter(t -> !t.recipientEmail().isBlank())
.map(t -> {
EmailMessage emailMessage = format(t.notification());
if (emailMessage != null) {
emailMessage.setTo(t.recipientEmail());
return deliver(emailMessage);
}
return false;
})
.filter(Boolean::booleanValue)
.count();
}
|
@Test
@UseDataProvider("emptyStrings")
public void deliverAll_ignores_requests_which_recipient_is_empty(String emptyString) {
EmailSmtpConfiguration emailSettings = mock(EmailSmtpConfiguration.class);
when(emailSettings.getSmtpHost()).thenReturn(null);
Set<EmailDeliveryRequest> requests = IntStream.range(0, 1 + new Random().nextInt(10))
.mapToObj(i -> new EmailDeliveryRequest(emptyString, mock(Notification.class)))
.collect(toSet());
EmailNotificationChannel emailNotificationChannel = new EmailNotificationChannel(emailSettings, server, null, null);
int count = emailNotificationChannel.deliverAll(requests);
assertThat(count).isZero();
verify(emailSettings).getSmtpHost();
verifyNoMoreInteractions(emailSettings);
assertThat(smtpServer.getMessages()).isEmpty();
}
|
@Override
public void importData(JsonReader reader) throws IOException {
logger.info("Reading configuration for 1.1");
// this *HAS* to start as an object
reader.beginObject();
while (reader.hasNext()) {
JsonToken tok = reader.peek();
switch (tok) {
case NAME:
String name = reader.nextName();
// find out which member it is
if (name.equals(CLIENTS)) {
readClients(reader);
} else if (name.equals(GRANTS)) {
readGrants(reader);
} else if (name.equals(WHITELISTEDSITES)) {
readWhitelistedSites(reader);
} else if (name.equals(BLACKLISTEDSITES)) {
readBlacklistedSites(reader);
} else if (name.equals(AUTHENTICATIONHOLDERS)) {
readAuthenticationHolders(reader);
} else if (name.equals(ACCESSTOKENS)) {
readAccessTokens(reader);
} else if (name.equals(REFRESHTOKENS)) {
readRefreshTokens(reader);
} else if (name.equals(SYSTEMSCOPES)) {
readSystemScopes(reader);
} else {
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.importExtensionData(name, reader);
break;
}
}
}
// unknown token, skip it
reader.skipValue();
}
break;
case END_OBJECT:
// the object ended, we're done here
reader.endObject();
continue;
default:
logger.debug("Found unexpected entry");
reader.skipValue();
continue;
}
}
fixObjectReferences();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.fixExtensionObjectReferences(maps);
break;
}
}
maps.clearAll();
}
|
@Test
public void testImportSystemScopes() throws IOException {
SystemScope scope1 = new SystemScope();
scope1.setId(1L);
scope1.setValue("scope1");
scope1.setDescription("Scope 1");
scope1.setRestricted(true);
scope1.setDefaultScope(false);
scope1.setIcon("glass");
SystemScope scope2 = new SystemScope();
scope2.setId(2L);
scope2.setValue("scope2");
scope2.setDescription("Scope 2");
scope2.setRestricted(false);
scope2.setDefaultScope(false);
scope2.setIcon("ball");
SystemScope scope3 = new SystemScope();
scope3.setId(3L);
scope3.setValue("scope3");
scope3.setDescription("Scope 3");
scope3.setRestricted(false);
scope3.setDefaultScope(true);
scope3.setIcon("road");
String configJson = "{" +
"\"" + MITREidDataService.CLIENTS + "\": [], " +
"\"" + MITREidDataService.ACCESSTOKENS + "\": [], " +
"\"" + MITREidDataService.REFRESHTOKENS + "\": [], " +
"\"" + MITREidDataService.GRANTS + "\": [], " +
"\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " +
"\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " +
"\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " +
"\"" + MITREidDataService.SYSTEMSCOPES + "\": [" +
"{\"id\":1,\"description\":\"Scope 1\",\"icon\":\"glass\",\"value\":\"scope1\",\"allowDynReg\":false,\"defaultScope\":false}," +
"{\"id\":2,\"description\":\"Scope 2\",\"icon\":\"ball\",\"value\":\"scope2\",\"allowDynReg\":true,\"defaultScope\":false}," +
"{\"id\":3,\"description\":\"Scope 3\",\"icon\":\"road\",\"value\":\"scope3\",\"allowDynReg\":true,\"defaultScope\":true}" +
" ]" +
"}";
System.err.println(configJson);
JsonReader reader = new JsonReader(new StringReader(configJson));
dataService.importData(reader);
verify(sysScopeRepository, times(3)).save(capturedScope.capture());
List<SystemScope> savedScopes = capturedScope.getAllValues();
assertThat(savedScopes.size(), is(3));
assertThat(savedScopes.get(0).getValue(), equalTo(scope1.getValue()));
assertThat(savedScopes.get(0).getDescription(), equalTo(scope1.getDescription()));
assertThat(savedScopes.get(0).getIcon(), equalTo(scope1.getIcon()));
assertThat(savedScopes.get(0).isDefaultScope(), equalTo(scope1.isDefaultScope()));
assertThat(savedScopes.get(0).isRestricted(), equalTo(scope1.isRestricted()));
assertThat(savedScopes.get(1).getValue(), equalTo(scope2.getValue()));
assertThat(savedScopes.get(1).getDescription(), equalTo(scope2.getDescription()));
assertThat(savedScopes.get(1).getIcon(), equalTo(scope2.getIcon()));
assertThat(savedScopes.get(1).isDefaultScope(), equalTo(scope2.isDefaultScope()));
assertThat(savedScopes.get(1).isRestricted(), equalTo(scope2.isRestricted()));
assertThat(savedScopes.get(2).getValue(), equalTo(scope3.getValue()));
assertThat(savedScopes.get(2).getDescription(), equalTo(scope3.getDescription()));
assertThat(savedScopes.get(2).getIcon(), equalTo(scope3.getIcon()));
assertThat(savedScopes.get(2).isDefaultScope(), equalTo(scope3.isDefaultScope()));
assertThat(savedScopes.get(2).isRestricted(), equalTo(scope3.isRestricted()));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.