focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static Builder newBuilder() {
return new Builder();
}
|
@Test
public void testBuilderThrowsExceptionWhenHeartbeatMillisMissing() {
assertThrows(
"heartbeatMillis",
IllegalStateException.class,
() ->
PartitionMetadata.newBuilder()
.setPartitionToken(PARTITION_TOKEN)
.setParentTokens(Sets.newHashSet(PARENT_TOKEN))
.setStartTimestamp(START_TIMESTAMP)
.setEndTimestamp(END_TIMESTAMP)
.setState(State.CREATED)
.setWatermark(WATERMARK)
.setCreatedAt(CREATED_AT)
.build());
}
|
public static int getXpForLevel(int level)
{
if (level < 1 || level > MAX_VIRT_LEVEL)
{
throw new IllegalArgumentException(level + " is not a valid level");
}
// XP_FOR_LEVEL[0] is XP for level 1
return XP_FOR_LEVEL[level - 1];
}
|
@Test(expected = IllegalArgumentException.class)
public void testGetXpForHighLevel()
{
int xp = Experience.getXpForLevel(Integer.MAX_VALUE);
}
|
@GetMapping("")
@RequiresPermissions("system:dict:list")
public ShenyuAdminResult queryDicts(final String type, final String dictCode, final String dictName,
@RequestParam @NotNull final Integer currentPage,
@RequestParam @NotNull final Integer pageSize) {
final ShenyuDictQuery query = new ShenyuDictQuery(type, dictCode, dictName, new PageParameter(currentPage, pageSize));
return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, shenyuDictService.listByPage(query));
}
|
@Test
public void testQueryDicts() throws Exception {
final PageParameter pageParameter = new PageParameter();
final ShenyuDictQuery shenyuDictQuery = new ShenyuDictQuery("1", "t", "t_n", pageParameter);
final CommonPager<ShenyuDictVO> commonPager = new CommonPager<>(pageParameter, Collections.singletonList(shenyuDictVO));
given(this.shenyuDictService.listByPage(shenyuDictQuery)).willReturn(commonPager);
this.mockMvc.perform(MockMvcRequestBuilders.get("/shenyu-dict")
.param("type", "1")
.param("dictCode", "t")
.param("dictName", "t_n")
.param("currentPage", Integer.toString(pageParameter.getCurrentPage()))
.param("pageSize", Integer.toString(pageParameter.getPageSize())))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS)))
.andExpect(jsonPath("$.data.dataList[0].id", is(commonPager.getDataList().get(0).getId())))
.andReturn();
}
|
@PATCH
@Path("/{connector}/config")
public Response patchConnectorConfig(final @PathParam("connector") String connector,
final @Context HttpHeaders headers,
final @Parameter(hidden = true) @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfigPatch) throws Throwable {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>();
herder.patchConnectorConfig(connector, connectorConfigPatch, cb);
Herder.Created<ConnectorInfo> createdInfo = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/config",
"PATCH", headers, connectorConfigPatch, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward);
return Response.ok().entity(createdInfo.result()).build();
}
|
@Test
public void testPatchConnectorConfigLeaderRedirect() throws Throwable {
final ArgumentCaptor<Callback<Herder.Created<ConnectorInfo>>> cb = ArgumentCaptor.forClass(Callback.class);
expectAndCallbackNotLeaderException(cb)
.when(herder).patchConnectorConfig(eq(CONNECTOR_NAME), eq(CONNECTOR_CONFIG_PATCH), cb.capture());
when(restClient.httpRequest(eq(LEADER_URL + "connectors/" + CONNECTOR_NAME + "/config?forward=false"), eq("PATCH"), isNull(), eq(CONNECTOR_CONFIG_PATCH), any()))
.thenReturn(new RestClient.HttpResponse<>(200, new HashMap<>(CONNECTOR_CONFIG_PATCHED), null));
connectorsResource.patchConnectorConfig(CONNECTOR_NAME, NULL_HEADERS, FORWARD, CONNECTOR_CONFIG_PATCH);
}
|
private Map<String, List<URIRegisterDTO>> buildData(final Collection<URIRegisterDTO> dataList) {
Map<String, List<URIRegisterDTO>> resultMap = new HashMap<>(8);
for (URIRegisterDTO dto : dataList) {
String contextPath = dto.getContextPath();
String key = StringUtils.isNotEmpty(contextPath) ? contextPath : dto.getAppName();
if (StringUtils.isNotEmpty(key)) {
if (resultMap.containsKey(key)) {
List<URIRegisterDTO> existList = resultMap.get(key);
existList.add(dto);
resultMap.put(key, existList);
} else {
resultMap.put(key, Lists.newArrayList(dto));
}
}
}
return resultMap;
}
|
@Test
public void testBuildData() {
try {
List<URIRegisterDTO> list = new ArrayList<>();
list.add(URIRegisterDTO.builder().appName("test1").build());
list.add(URIRegisterDTO.builder().appName("test2").build());
Method testMethod = uriRegisterExecutorSubscriber.getClass().getDeclaredMethod("buildData", Collection.class);
testMethod.setAccessible(true);
Map<String, List<URIRegisterDTO>> result = (Map) testMethod.invoke(uriRegisterExecutorSubscriber, list);
assertEquals(2, result.size());
list.add(URIRegisterDTO.builder().appName("test1").build());
result = (Map) testMethod.invoke(uriRegisterExecutorSubscriber, list);
assertEquals(2, result.size());
} catch (Exception e) {
throw new ShenyuException(e.getCause());
}
}
|
@Override
public void scanLedgers(OffloadedLedgerMetadataConsumer consumer, Map<String,
String> offloadDriverMetadata) throws ManagedLedgerException {
BlobStoreLocation bsKey = getBlobStoreLocation(offloadDriverMetadata);
String endpoint = bsKey.getEndpoint();
String readBucket = bsKey.getBucket();
log.info("Scanning bucket {}, bsKey {}, location {} endpoint{} ", readBucket, bsKey,
config.getBlobStoreLocation(), endpoint);
BlobStore readBlobstore = getBlobStore(config.getBlobStoreLocation());
int batchSize = 100;
String bucketName = config.getBucket();
String marker = null;
do {
marker = scanContainer(consumer, readBlobstore, bucketName, marker, batchSize);
} while (marker != null);
}
|
@Test(timeOut = 600000) // 10 minutes.
public void testScanLedgers() throws Exception {
ReadHandle toWrite = buildReadHandle(DEFAULT_BLOCK_SIZE, 3);
LedgerOffloader offloader = getOffloader();
UUID uuid = UUID.randomUUID();
offloader.offload(toWrite, uuid, new HashMap<>()).get();
List<OffloadedLedgerMetadata> result = new ArrayList<>();
offloader.scanLedgers(
(m) -> {
log.info("found {}", m);
if (m.getLedgerId() == toWrite.getId()) {
result.add(m);
}
return true;
}, offloader.getOffloadDriverMetadata());
assertEquals(2, result.size());
// data and index
OffloadedLedgerMetadata offloadedLedgerMetadata = result.get(0);
assertEquals(toWrite.getId(), offloadedLedgerMetadata.getLedgerId());
OffloadedLedgerMetadata offloadedLedgerMetadata2 = result.get(1);
assertEquals(toWrite.getId(), offloadedLedgerMetadata2.getLedgerId());
}
|
public int[] findMatchingLines(List<String> left, List<String> right) {
int[] index = new int[right.size()];
int dbLine = left.size();
int reportLine = right.size();
try {
PathNode node = new MyersDiff<String>().buildPath(left, right);
while (node.prev != null) {
PathNode prevNode = node.prev;
if (!node.isSnake()) {
// additions
reportLine -= (node.j - prevNode.j);
// removals
dbLine -= (node.i - prevNode.i);
} else {
// matches
for (int i = node.i; i > prevNode.i; i--) {
index[reportLine - 1] = dbLine;
reportLine--;
dbLine--;
}
}
node = prevNode;
}
} catch (DifferentiationFailedException e) {
LOG.error("Error finding matching lines", e);
return index;
}
return index;
}
|
@Test
public void shouldDetectNewLinesInMiddleOfFile() {
List<String> database = new ArrayList<>();
database.add("line - 0");
database.add("line - 1");
database.add("line - 2");
database.add("line - 3");
List<String> report = new ArrayList<>();
report.add("line - 0");
report.add("line - 1");
report.add("line - new");
report.add("line - new");
report.add("line - 2");
report.add("line - 3");
int[] diff = new SourceLinesDiffFinder().findMatchingLines(database, report);
assertThat(diff).containsExactly(1, 2, 0, 0, 3, 4);
}
|
@Override
public String pathPattern() {
return buildExtensionPathPattern(scheme) + "/{name}";
}
|
@Test
void shouldBuildPathPatternCorrectly() {
var scheme = Scheme.buildFromType(FakeExtension.class);
var getHandler = new ExtensionGetHandler(scheme, client);
var pathPattern = getHandler.pathPattern();
assertEquals("/apis/fake.halo.run/v1alpha1/fakes/{name}", pathPattern);
}
|
protected void writeDetailsAsHtml(final RouteStatistic routeStatistic, final File outputPath) throws IOException {
Map<String, Object> data = new HashMap<>();
data.put("route", routeStatistic);
data.put("eips", routeStatistic.getEipStatisticMap().entrySet());
String rendered = TemplateRenderer.render(DETAILS_FILE, data);
fileUtil.write(rendered, routeStatistic.getId(), outputPath);
}
|
@Test
public void testWriteDetailsAsHtml() throws IllegalAccessException, IOException {
@SuppressWarnings("unchecked")
Map<String, RouteStatistic> routeStatisticMap
= (Map<String, RouteStatistic>) FieldUtils.readDeclaredField(processor, "routeStatisticMap", true);
File outputPath = htmlPath();
Path outputPathAsPath = Paths.get(outputPath.getPath());
if (!Files.exists(outputPathAsPath)) {
Files.createDirectories(outputPathAsPath);
}
processor.parseAllTestResults(xmlPath());
processor.gatherBestRouteCoverages();
processor.generateRouteStatistics("test project", outputPath);
processor.generateEipStatistics();
RouteStatistic routeStatistic = processor.getRouteStatistic(GREETINGS_ROUTE);
assertAll(
() -> assertNotNull(routeStatistic),
() -> assertNotNull(routeStatistic.getEipStatisticMap()),
() -> assertEquals(3, routeStatistic.getEipStatisticMap().size()));
processor.writeDetailsAsHtml(routeStatistic, outputPath);
}
|
public void batchOn(Runnable runnable, ParSeqBasedFluentClient... fluentClients) throws Exception
{
List<ParSeqBasedFluentClient> batchedClients =
fluentClients.length > 0 ? new ArrayList<>(Arrays.asList(fluentClients))
: _fluentClientAll;
for (ParSeqBasedFluentClient fluentClient : batchedClients)
{
fluentClient.setExecutionGroup(this);
}
try
{
runnable.run();
this.execute();
} finally
{
for (ParSeqBasedFluentClient fluentClient : batchedClients)
{
fluentClient.removeExecutionGroup();
}
}
}
|
@Test
public void testCallableExecution_Nested() throws Exception
{
MockBatchableResource client = new MockBatchableResource();
eg.batchOn(() ->
{
Assert.assertTrue(client.validateExecutionGroupFromContext(eg)); //outer - eg
ExecutionGroup eg2 = new ExecutionGroup(_engine);
try
{
eg2.batchOn(() ->
{
Assert.assertTrue(client.validateExecutionGroupFromContext(eg2)); // inner - eg2
}, client);
} catch (Exception ignored)
{
}
Assert.assertTrue(client.validateExecutionGroupFromContext(eg)); // outer -eg
}, client);
}
|
@Override
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
try (AutoDetectReader reader = new AutoDetectReader(CloseShieldInputStream.wrap(stream),
metadata, getEncodingDetector(context))) {
Charset charset = reader.getCharset();
String mediaType = metadata.get(Metadata.CONTENT_TYPE);
String name = metadata.get(TikaCoreProperties.RESOURCE_NAME_KEY);
if (mediaType != null && name != null) {
MediaType type = MediaType.parse(mediaType);
metadata.set(Metadata.CONTENT_TYPE, type.toString());
metadata.set(Metadata.CONTENT_ENCODING, charset.name());
StringBuilder out = new StringBuilder();
String line;
int nbLines = 0;
while ((line = reader.readLine()) != null) {
out.append(line).append(System.getProperty("line.separator"));
String author = parserAuthor(line);
if (author != null) {
metadata.add(TikaCoreProperties.CREATOR, author);
}
nbLines++;
}
metadata.set("LoC", String.valueOf(nbLines));
Renderer renderer = getRenderer(type.toString());
String codeAsHtml = renderer.highlight(name, out.toString(), charset.name(), false);
Schema schema = context.get(Schema.class, HTML_SCHEMA);
org.ccil.cowan.tagsoup.Parser parser = new org.ccil.cowan.tagsoup.Parser();
parser.setProperty(org.ccil.cowan.tagsoup.Parser.schemaProperty, schema);
parser.setContentHandler(handler);
parser.parse(new InputSource(new StringReader(codeAsHtml)));
}
}
}
|
@Test
public void testNoMarkupInToTextHandler() throws Exception {
ContentHandler contentHandler = new ToTextContentHandler();
ParseContext parseContext = new ParseContext();
try (TikaInputStream tis = TikaInputStream
.get(getResourceAsStream("/test-documents/testJAVA.java"))) {
AUTO_DETECT_PARSER
.parse(tis, contentHandler, createMetadata("text/x-java-source"), parseContext);
}
String strContent = contentHandler.toString();
assertContains("public class HelloWorld {", strContent);
assertNotContained("background-color", strContent);
}
|
public FontMetrics parse() throws IOException
{
return parseFontMetric(false);
}
|
@Test
void testHelveticaKernPairs() throws IOException
{
AFMParser parser = new AFMParser(
new FileInputStream("src/test/resources/afm/Helvetica.afm"));
FontMetrics fontMetrics = parser.parse();
// KernPairs
List<KernPair> kernPairs = fontMetrics.getKernPairs();
assertEquals(2705, kernPairs.size());
// check "KPX A Ucircumflex -50"
checkKernPair(kernPairs, "A", "Ucircumflex", -50, 0);
// check "KPX W agrave -40"
checkKernPair(kernPairs, "W", "agrave", -40, 0);
// KernPairs0
assertTrue(fontMetrics.getKernPairs0().isEmpty());
// KernPairs1
assertTrue(fontMetrics.getKernPairs1().isEmpty());
// composite data
assertTrue(fontMetrics.getComposites().isEmpty());
}
|
public boolean isExcluded(Path absolutePath, Path relativePath, InputFile.Type type) {
PathPattern[] exclusionPatterns = InputFile.Type.MAIN == type ? mainExclusionsPattern : testExclusionsPattern;
for (PathPattern pattern : exclusionPatterns) {
if (pattern.match(absolutePath, relativePath)) {
return true;
}
}
return false;
}
|
@Test
public void should_handleAliasForTestExclusionsProperty() {
settings.setProperty(PROJECT_TESTS_EXCLUSIONS_PROPERTY, "**/*Dao.java");
AbstractExclusionFilters filter = new AbstractExclusionFilters(analysisWarnings, settings.asConfig()::getStringArray) {
};
IndexedFile indexedFile = new DefaultIndexedFile("foo", moduleBaseDir, "test/main/java/com/mycompany/FooDao.java", null);
assertThat(filter.isExcluded(indexedFile.path(), Paths.get(indexedFile.relativePath()), InputFile.Type.TEST)).isTrue();
indexedFile = new DefaultIndexedFile("foo", moduleBaseDir, "test/main/java/com/mycompany/Foo.java", null);
assertThat(filter.isExcluded(indexedFile.path(), Paths.get(indexedFile.relativePath()), InputFile.Type.TEST)).isFalse();
String expectedWarn = "Use of sonar.tests.exclusions detected. " +
"While being taken into account, the only supported property is sonar.test.exclusions. Consider updating your configuration.";
assertThat(logTester.logs(Level.WARN)).hasSize(1)
.contains(expectedWarn);
verify(analysisWarnings).addUnique(expectedWarn);
}
|
@Override
public KTable<K, Long> count() {
return doCount(NamedInternal.empty(), Materialized.with(keySerde, Serdes.Long()));
}
|
@Test
public void shouldThrowNullPointerOnCountWhenMaterializedIsNull() {
assertThrows(NullPointerException.class, () -> groupedStream.count((Materialized<String, Long, KeyValueStore<Bytes, byte[]>>) null));
}
|
static SortKey[] rangeBounds(
int numPartitions, Comparator<StructLike> comparator, SortKey[] samples) {
// sort the keys first
Arrays.sort(samples, comparator);
int numCandidates = numPartitions - 1;
SortKey[] candidates = new SortKey[numCandidates];
int step = (int) Math.ceil((double) samples.length / numPartitions);
int position = step - 1;
int numChosen = 0;
while (position < samples.length && numChosen < numCandidates) {
SortKey candidate = samples[position];
// skip duplicate values
if (numChosen > 0 && candidate.equals(candidates[numChosen - 1])) {
// linear probe for the next distinct value
position += 1;
} else {
candidates[numChosen] = candidate;
position += step;
numChosen += 1;
}
}
return candidates;
}
|
@Test
public void testRangeBoundsSkipDuplicates() {
// step is 3 = ceiling(11/4)
assertThat(
SketchUtil.rangeBounds(
4,
SORT_ORDER_COMPARTOR,
new SortKey[] {
CHAR_KEYS.get("a"),
CHAR_KEYS.get("b"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("g"),
CHAR_KEYS.get("h"),
CHAR_KEYS.get("i"),
CHAR_KEYS.get("j"),
CHAR_KEYS.get("k"),
}))
// skipped duplicate c's
.containsExactly(CHAR_KEYS.get("c"), CHAR_KEYS.get("g"), CHAR_KEYS.get("j"));
}
|
public static synchronized TransformServiceLauncher forProject(
@Nullable String projectName, int port, @Nullable String pythonRequirementsFile)
throws IOException {
if (projectName == null || projectName.isEmpty()) {
projectName = DEFAULT_PROJECT_NAME;
}
if (!launchers.containsKey(projectName)) {
launchers.put(
projectName, new TransformServiceLauncher(projectName, port, pythonRequirementsFile));
}
return launchers.get(projectName);
}
|
@Test
public void testLauncherCreatesDependenciesDir() throws IOException {
String projectName = UUID.randomUUID().toString();
Path expectedTempDir = Paths.get(System.getProperty("java.io.tmpdir"), projectName);
File file = expectedTempDir.toFile();
file.deleteOnExit();
TransformServiceLauncher.forProject(projectName, 12345, null);
Path expectedCredentialsDir = Paths.get(expectedTempDir.toString(), "dependencies_dir");
Assert.assertTrue(expectedCredentialsDir.toFile().exists());
}
|
@Override
public CloseableIterator<ColumnarBatch> readParquetFiles(
CloseableIterator<FileStatus> fileIter,
StructType physicalSchema,
Optional<Predicate> predicate) {
return new CloseableIterator<>() {
private int currentReadColumnarBatchIndex = -1;
private List<ColumnarBatch> currentColumnarBatchList = Lists.newArrayList();
private String currentFile;
@Override
public void close() {
Utils.closeCloseables(fileIter);
currentReadColumnarBatchIndex = -1;
currentColumnarBatchList = null;
}
@Override
public boolean hasNext() {
if (hasNextToConsume()) {
return true;
} else {
currentReadColumnarBatchIndex = -1;
currentColumnarBatchList = Lists.newArrayList();
// There is no file in reading or the current file being read has no more data.
// Initialize the next file reader or return false if there are no more files to
// read.
try {
tryGetNextFileColumnarBatch();
} catch (Exception ex) {
throw new KernelEngineException(
format("Error reading Parquet file: %s", currentFile), ex);
}
if (hasNextToConsume()) {
return true;
} else if (fileIter.hasNext()) {
// recurse since it's possible the loaded file is empty
return hasNext();
} else {
return false;
}
}
}
private boolean hasNextToConsume() {
return currentReadColumnarBatchIndex != -1 && !currentColumnarBatchList.isEmpty() &&
currentReadColumnarBatchIndex < currentColumnarBatchList.size();
}
@Override
public ColumnarBatch next() {
return currentColumnarBatchList.get(currentReadColumnarBatchIndex++);
}
private void tryGetNextFileColumnarBatch() throws ExecutionException {
if (fileIter.hasNext()) {
currentFile = fileIter.next().getPath();
if (LogReplay.containsAddOrRemoveFileActions(physicalSchema)) {
Pair<String, StructType> key = Pair.create(currentFile, physicalSchema);
if (checkpointCache.getIfPresent(key) != null || predicate.isEmpty()) {
currentColumnarBatchList = checkpointCache.get(key);
} else {
currentColumnarBatchList = readParquetFile(currentFile, physicalSchema, hadoopConf);
}
} else {
currentColumnarBatchList = readParquetFile(currentFile, physicalSchema, hadoopConf);
}
currentReadColumnarBatchIndex = 0;
}
}
};
}
|
@Test
public void testParquetMetadata() {
String path = deltaLakePath + "/00000000000000000030.checkpoint.parquet";
DeltaLakeParquetHandler deltaLakeParquetHandler = new DeltaLakeParquetHandler(hdfsConfiguration, checkpointCache);
StructType readSchema = LogReplay.getAddRemoveReadSchema(true);
FileStatus fileStatus = FileStatus.of(path, 0, 0);
List<Row> addRows = Lists.newArrayList();
try (CloseableIterator<ColumnarBatch> parquetIter = deltaLakeParquetHandler.readParquetFiles(
Utils.singletonCloseableIterator(fileStatus), readSchema, Optional.empty())) {
while (parquetIter.hasNext()) {
ColumnarBatch columnarBatch = parquetIter.next();
ColumnVector addsVector = columnarBatch.getColumnVector(ADD_FILE_ORDINAL);
for (int rowId = 0; rowId < addsVector.getSize(); rowId++) {
if (addsVector.isNullAt(rowId)) {
continue;
}
getAddFilePath(addsVector, rowId);
}
try (CloseableIterator<Row> rows = columnarBatch.getRows()) {
while (rows.hasNext()) {
Row row = rows.next();
addRows.add(row);
}
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
Assert.assertEquals(32, addRows.size());
List<String> pathList = Lists.newArrayList();
Set<String> partitionValues = Sets.newHashSet();
for (Row scanRow : addRows) {
if (scanRow.isNullAt(InternalScanFileUtils.ADD_FILE_ORDINAL)) {
continue;
}
Row addFile = getAddFileEntry(scanRow);
pathList.add(addFile.getString(ADD_FILE_PATH_ORDINAL));
partitionValues.addAll(InternalScanFileUtils.getPartitionValues(scanRow).values());
}
Assert.assertEquals(30, pathList.size());
Assert.assertEquals(18, partitionValues.size());
Assert.assertFalse(checkpointCache.asMap().isEmpty());
Assert.assertTrue(checkpointCache.asMap().containsKey(Pair.create(path, readSchema)));
}
|
@Bean
public RetryRegistry retryRegistry(RetryConfigurationProperties retryConfigurationProperties,
EventConsumerRegistry<RetryEvent> retryEventConsumerRegistry,
RegistryEventConsumer<Retry> retryRegistryEventConsumer,
@Qualifier("compositeRetryCustomizer") CompositeCustomizer<RetryConfigCustomizer> compositeRetryCustomizer) {
RetryRegistry retryRegistry = createRetryRegistry(retryConfigurationProperties,
retryRegistryEventConsumer, compositeRetryCustomizer);
registerEventConsumer(retryRegistry, retryEventConsumerRegistry,
retryConfigurationProperties);
initRetryRegistry(retryConfigurationProperties, compositeRetryCustomizer, retryRegistry);
return retryRegistry;
}
|
@Test
public void testCreateRetryRegistryWithUnknownConfig() {
RetryConfigurationProperties retryConfigurationProperties = new RetryConfigurationProperties();
InstanceProperties instanceProperties = new InstanceProperties();
instanceProperties.setBaseConfig("unknownConfig");
retryConfigurationProperties.getInstances().put("backend", instanceProperties);
RetryConfiguration retryConfiguration = new RetryConfiguration();
DefaultEventConsumerRegistry<RetryEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>();
assertThatThrownBy(() -> retryConfiguration
.retryRegistry(retryConfigurationProperties, eventConsumerRegistry,
new CompositeRegistryEventConsumer<>(emptyList()), compositeRetryCustomizerTest()))
.isInstanceOf(ConfigurationNotFoundException.class)
.hasMessage("Configuration with name 'unknownConfig' does not exist");
}
|
public void prepareIndices(final String idField, final Collection<String> sortFields, final Collection<String> caseInsensitiveStringSortFields) {
if (!sortFields.containsAll(caseInsensitiveStringSortFields)) {
throw new IllegalArgumentException("Case Insensitive String Sort Fields should be a subset of all Sort Fields ");
}
final var existingIndices = db.listIndexes();
for (String sortField : sortFields) {
if (!sortField.equals(idField)) { //id has index by default
final var existingIndex = getExistingIndex(existingIndices, sortField);
if (caseInsensitiveStringSortFields.contains(sortField)) { //index string fields with collation for more efficient case-insensitive sorting
if (existingIndex.isEmpty()) {
createCaseInsensitiveStringIndex(sortField);
} else if (existingIndex.get().get(COLLATION_KEY) == null) {
//replace simple index with "collation" index
dropIndex(sortField);
createCaseInsensitiveStringIndex(sortField);
}
} else {
if (existingIndex.isEmpty()) {
createSingleFieldIndex(sortField);
} else if (existingIndex.get().get(COLLATION_KEY) != null) {
//replace "collation" index with simple one
dropIndex(sortField);
createSingleFieldIndex(sortField);
}
}
}
}
}
|
@Test
void doesNotCreateCollationIndexIfProperOneExists() {
rawdb.createIndex(Indexes.ascending("summary"), new IndexOptions().collation(Collation.builder().locale("en").build()));
toTest.prepareIndices("id", List.of("summary"), List.of("summary"));
verify(db, never()).createIndex(any());
verify(db, never()).createIndex(any(Bson.class), any(IndexOptions.class));
}
|
@ScalarOperator(GREATER_THAN_OR_EQUAL)
@SqlType(StandardTypes.BOOLEAN)
public static boolean greaterThanOrEqual(@SqlType(StandardTypes.INTEGER) long left, @SqlType(StandardTypes.INTEGER) long right)
{
return left >= right;
}
|
@Test
public void testGreaterThanOrEqual()
{
assertFunction("INTEGER'37' >= INTEGER'37'", BOOLEAN, true);
assertFunction("INTEGER'37' >= INTEGER'17'", BOOLEAN, true);
assertFunction("INTEGER'17' >= INTEGER'37'", BOOLEAN, false);
assertFunction("INTEGER'17' >= INTEGER'17'", BOOLEAN, true);
}
|
public void commitLogical(CommittedBundle<?> bundle, MetricUpdates updates) {
for (MetricUpdate<Long> counter : updates.counterUpdates()) {
counters.get(counter.getKey()).commitLogical(bundle, counter.getUpdate());
}
for (MetricUpdate<DistributionData> distribution : updates.distributionUpdates()) {
distributions.get(distribution.getKey()).commitLogical(bundle, distribution.getUpdate());
}
for (MetricUpdate<GaugeData> gauge : updates.gaugeUpdates()) {
gauges.get(gauge.getKey()).commitLogical(bundle, gauge.getUpdate());
}
for (MetricUpdate<StringSetData> sSet : updates.stringSetUpdates()) {
stringSet.get(sSet.getKey()).commitLogical(bundle, sSet.getUpdate());
}
}
|
@SuppressWarnings("unchecked")
@Test
public void testApplyCommittedNoFilter() {
metrics.commitLogical(
bundle1,
MetricUpdates.create(
ImmutableList.of(
MetricUpdate.create(MetricKey.create("step1", NAME1), 5L),
MetricUpdate.create(MetricKey.create("step1", NAME2), 8L)),
ImmutableList.of(
MetricUpdate.create(
MetricKey.create("step1", NAME1), DistributionData.create(8, 2, 3, 5))),
ImmutableList.of(
MetricUpdate.create(MetricKey.create("step1", NAME4), GaugeData.create(15L))),
ImmutableList.of(
MetricUpdate.create(
MetricKey.create("step1", NAME4),
StringSetData.create(ImmutableSet.of("ab"))))));
metrics.commitLogical(
bundle1,
MetricUpdates.create(
ImmutableList.of(
MetricUpdate.create(MetricKey.create("step2", NAME1), 7L),
MetricUpdate.create(MetricKey.create("step1", NAME2), 4L)),
ImmutableList.of(
MetricUpdate.create(
MetricKey.create("step1", NAME1), DistributionData.create(4, 1, 4, 4))),
ImmutableList.of(
MetricUpdate.create(MetricKey.create("step1", NAME4), GaugeData.create(27L))),
ImmutableList.of(
MetricUpdate.create(
MetricKey.create("step1", NAME4),
StringSetData.create(ImmutableSet.of("cd"))))));
MetricQueryResults results = metrics.allMetrics();
assertThat(
results.getCounters(),
containsInAnyOrder(
attemptedMetricsResult("ns1", "name1", "step1", 0L),
attemptedMetricsResult("ns1", "name2", "step1", 0L),
attemptedMetricsResult("ns1", "name1", "step2", 0L)));
assertThat(
results.getCounters(),
containsInAnyOrder(
committedMetricsResult("ns1", "name1", "step1", 5L),
committedMetricsResult("ns1", "name2", "step1", 12L),
committedMetricsResult("ns1", "name1", "step2", 7L)));
assertThat(
results.getDistributions(),
contains(
attemptedMetricsResult("ns1", "name1", "step1", DistributionResult.IDENTITY_ELEMENT)));
assertThat(
results.getDistributions(),
contains(
committedMetricsResult(
"ns1", "name1", "step1", DistributionResult.create(12, 3, 3, 5))));
assertThat(
results.getGauges(),
contains(attemptedMetricsResult("ns2", "name2", "step1", GaugeResult.empty())));
assertThat(
results.getGauges(),
contains(
committedMetricsResult(
"ns2", "name2", "step1", GaugeResult.create(27L, Instant.now()))));
assertThat(
results.getStringSets(),
contains(
committedMetricsResult(
"ns2", "name2", "step1", StringSetResult.create(ImmutableSet.of("ab", "cd")))));
}
|
public void isInStrictOrder() {
isInStrictOrder(Ordering.natural());
}
|
@Test
public void iterableIsInStrictOrder() {
assertThat(asList()).isInStrictOrder();
assertThat(asList(1)).isInStrictOrder();
assertThat(asList(1, 2, 3, 4)).isInStrictOrder();
}
|
public void deleteOrder(Long orderId) throws RestClientException {
deleteOrderWithHttpInfo(orderId);
}
|
@Test
public void deleteOrderTest() {
Long orderId = null;
api.deleteOrder(orderId);
// TODO: test validations
}
|
@Override public boolean implies(Permission permission) {
if (permission instanceof RuntimePermission && BLOCKED_RUNTIME_PERMISSIONS.contains(permission.getName())) {
return false;
}
if (permission instanceof SecurityPermission && BLOCKED_SECURITY_PERMISSIONS.contains(permission.getName())) {
return false;
}
return true;
}
|
@Test
public void rule_restricts_denied_permissions() {
assertThat(rule.implies(deniedSecurity)).isFalse();
assertThat(rule.implies(deniedRuntime)).isFalse();
}
|
@Override
public Object poll() {
Object result = queue.poll();
progTracker.madeProgress(result != null);
return result;
}
|
@Test
public void when_pollNonEmpty_then_getItem() {
assertEquals(ITEMS.get(0), inbox.poll());
}
|
private Function<KsqlConfig, Kudf> getUdfFactory(
final Method method,
final UdfDescription udfDescriptionAnnotation,
final String functionName,
final FunctionInvoker invoker,
final String sensorName
) {
return ksqlConfig -> {
final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance(
method.getDeclaringClass(), udfDescriptionAnnotation.name());
if (actualUdf instanceof Configurable) {
ExtensionSecurityManager.INSTANCE.pushInUdf();
try {
((Configurable) actualUdf)
.configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName));
} finally {
ExtensionSecurityManager.INSTANCE.popOutUdf();
}
}
final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf);
return metrics.<Kudf>map(m -> new UdfMetricProducer(
m.getSensor(sensorName),
theUdf,
Time.SYSTEM
)).orElse(theUdf);
};
}
|
@Test
public void shouldLoadFunctionWithNestedDecimalSchema() {
// Given:
final UdfFactory returnDecimal = FUNC_REG.getUdfFactory(FunctionName.of("decimalstruct"));
// When:
final KsqlScalarFunction function = returnDecimal.getFunction(ImmutableList.of());
// Then:
assertThat(
function.getReturnType(ImmutableList.of()),
equalTo(SqlStruct.builder().field("VAL", SqlDecimal.of(64, 2)).build()));
}
|
static DescriptorDigest generateSelector(ImmutableList<FileEntry> layerEntries)
throws IOException {
return Digests.computeJsonDigest(toSortedJsonTemplates(layerEntries));
}
|
@Test
public void testGenerateSelector_ownersModified() throws IOException {
Path layerFile = temporaryFolder.newFolder("testFolder").toPath().resolve("file");
Files.write(layerFile, "hello".getBytes(StandardCharsets.UTF_8));
FileEntry layerEntry111 =
new FileEntry(
layerFile,
AbsoluteUnixPath.get("/extraction/path"),
FilePermissions.fromOctalString("111"),
FileEntriesLayer.DEFAULT_MODIFICATION_TIME,
"0:0");
FileEntry layerEntry222 =
new FileEntry(
layerFile,
AbsoluteUnixPath.get("/extraction/path"),
FilePermissions.fromOctalString("222"),
FileEntriesLayer.DEFAULT_MODIFICATION_TIME,
"foouser");
// Verify that changing ownership generates a different selector
Assert.assertNotEquals(
LayerEntriesSelector.generateSelector(ImmutableList.of(layerEntry111)),
LayerEntriesSelector.generateSelector(ImmutableList.of(layerEntry222)));
}
|
@Override
public void init(TbContext ctx, TbNodeConfiguration configuration) throws TbNodeException {
}
|
@Test
public void givenDefaultConfig_whenInit_thenOk() {
assertThatNoException().isThrownBy(() -> node.init(ctxMock, nodeConfiguration));
}
|
@Override
public List<String> mapRow(Row element) {
List<String> res = new ArrayList<>();
Schema s = element.getSchema();
for (int i = 0; i < s.getFieldCount(); i++) {
res.add(convertFieldToString(s.getField(i).getType(), element.getValue(i)));
}
return res;
}
|
@Test
public void testAllDataTypes() {
Schema.Builder schemaBuilder = new Schema.Builder();
schemaBuilder.addField("byte", Schema.FieldType.BYTE);
schemaBuilder.addField("int16", Schema.FieldType.INT16);
schemaBuilder.addField("int32", Schema.FieldType.INT32);
schemaBuilder.addField("int64", Schema.FieldType.INT64);
schemaBuilder.addField("float", Schema.FieldType.FLOAT);
schemaBuilder.addField("double", Schema.FieldType.DOUBLE);
schemaBuilder.addField("decimal", Schema.FieldType.DECIMAL);
schemaBuilder.addField("boolean", Schema.FieldType.BOOLEAN);
schemaBuilder.addField("datetime", Schema.FieldType.DATETIME);
schemaBuilder.addField("bytes", Schema.FieldType.BYTES);
schemaBuilder.addField("string", Schema.FieldType.STRING);
Schema schema = schemaBuilder.build();
Row.Builder rowBuilder = Row.withSchema(schema);
rowBuilder.addValue((byte) 10);
rowBuilder.addValue((short) 10);
rowBuilder.addValue(10);
rowBuilder.addValue((long) 10);
rowBuilder.addValue((float) 10.1);
rowBuilder.addValue(10.1);
rowBuilder.addValue(new BigDecimal("10.1"));
rowBuilder.addValue(false);
rowBuilder.addValue(new DateTime("2022-01-01T10:10:10.012Z"));
rowBuilder.addValue("asd".getBytes(StandardCharsets.UTF_8));
rowBuilder.addValue("asd");
Row row = rowBuilder.build();
SingleStoreDefaultUserDataMapper mapper = new SingleStoreDefaultUserDataMapper();
List<String> res = mapper.mapRow(row);
assertEquals(11, res.size());
assertEquals("10", res.get(0));
assertEquals("10", res.get(1));
assertEquals("10", res.get(2));
assertEquals("10", res.get(3));
assertEquals("10.1", res.get(4));
assertEquals("10.1", res.get(5));
assertEquals("10.1", res.get(6));
assertEquals("0", res.get(7));
assertEquals("2022-01-01 10:10:10.012", res.get(8));
assertEquals("asd", res.get(9));
assertEquals("asd", res.get(10));
}
|
public PickTableLayoutForPredicate pickTableLayoutForPredicate()
{
return new PickTableLayoutForPredicate(metadata);
}
|
@Test
public void nonDeterministicPredicate()
{
Type orderStatusType = createVarcharType(1);
tester().assertThat(pickTableLayout.pickTableLayoutForPredicate())
.on(p -> {
p.variable("orderstatus", orderStatusType);
return p.filter(p.rowExpression("orderstatus = 'O' AND rand() = 0"),
p.tableScan(
ordersTableHandle,
ImmutableList.of(p.variable("orderstatus", orderStatusType)),
ImmutableMap.of(p.variable("orderstatus", orderStatusType), new TpchColumnHandle("orderstatus", orderStatusType))));
})
.matches(
filter("rand() = 0",
constrainedTableScanWithTableLayout(
"orders",
ImmutableMap.of("orderstatus", singleValue(orderStatusType, utf8Slice("O"))),
ImmutableMap.of("orderstatus", "orderstatus"))));
tester().assertThat(pickTableLayout.pickTableLayoutForPredicate())
.on(p -> {
p.variable("orderstatus", orderStatusType);
return p.filter(p.rowExpression("orderstatus = 'O' AND rand() = 0"),
p.tableScan(
ordersTableHandle,
ImmutableList.of(variable("orderstatus", orderStatusType)),
ImmutableMap.of(variable("orderstatus", orderStatusType), new TpchColumnHandle("orderstatus", orderStatusType))));
})
.matches(
filter("rand() = 0",
constrainedTableScanWithTableLayout(
"orders",
ImmutableMap.of("orderstatus", singleValue(orderStatusType, utf8Slice("O"))),
ImmutableMap.of("orderstatus", "orderstatus"))));
}
|
@Override
public boolean shouldCareAbout(Object entity) {
return securityConfigClasses.stream().anyMatch(aClass -> aClass.isAssignableFrom(entity.getClass()));
}
|
@Test
public void shouldNotCareAboutEntityWhichIsNotPartOfSecurityConfig() {
SecurityConfigChangeListener securityConfigChangeListener = new SecurityConfigChangeListener() {
@Override
public void onEntityConfigChange(Object entity) {
}
};
assertThat(securityConfigChangeListener.shouldCareAbout(new ElasticProfile()), is(false));
}
|
public static <T> Encoder<T> encoderFor(Coder<T> coder) {
Encoder<T> enc = getOrCreateDefaultEncoder(coder.getEncodedTypeDescriptor().getRawType());
return enc != null ? enc : binaryEncoder(coder, true);
}
|
@Test
public void testBeamEncoderMappings() {
BASIC_CASES.forEach(
(coder, data) -> {
Encoder<?> encoder = encoderFor(coder);
serializeAndDeserialize(data.get(0), (Encoder) encoder);
Dataset<?> dataset = createDataset(data, (Encoder) encoder);
assertThat(dataset.collect(), equalTo(data.toArray()));
});
}
|
@Override
public ImagesAndRegistryClient call()
throws IOException, RegistryException, LayerPropertyNotFoundException,
LayerCountMismatchException, BadContainerConfigurationFormatException,
CacheCorruptedException, CredentialRetrievalException {
EventHandlers eventHandlers = buildContext.getEventHandlers();
try (ProgressEventDispatcher progressDispatcher =
progressDispatcherFactory.create("pulling base image manifest", 4);
TimerEventDispatcher ignored1 = new TimerEventDispatcher(eventHandlers, DESCRIPTION)) {
// Skip this step if this is a scratch image
ImageReference imageReference = buildContext.getBaseImageConfiguration().getImage();
if (imageReference.isScratch()) {
Set<Platform> platforms = buildContext.getContainerConfiguration().getPlatforms();
Verify.verify(!platforms.isEmpty());
eventHandlers.dispatch(LogEvent.progress("Getting scratch base image..."));
ImmutableList.Builder<Image> images = ImmutableList.builder();
for (Platform platform : platforms) {
Image.Builder imageBuilder = Image.builder(buildContext.getTargetFormat());
imageBuilder.setArchitecture(platform.getArchitecture()).setOs(platform.getOs());
images.add(imageBuilder.build());
}
return new ImagesAndRegistryClient(images.build(), null);
}
eventHandlers.dispatch(
LogEvent.progress("Getting manifest for base image " + imageReference + "..."));
if (buildContext.isOffline()) {
List<Image> images = getCachedBaseImages();
if (!images.isEmpty()) {
return new ImagesAndRegistryClient(images, null);
}
throw new IOException(
"Cannot run Jib in offline mode; " + imageReference + " not found in local Jib cache");
} else if (imageReference.getDigest().isPresent()) {
List<Image> images = getCachedBaseImages();
if (!images.isEmpty()) {
RegistryClient noAuthRegistryClient =
buildContext.newBaseImageRegistryClientFactory().newRegistryClient();
// TODO: passing noAuthRegistryClient may be problematic. It may return 401 unauthorized
// if layers have to be downloaded.
// https://github.com/GoogleContainerTools/jib/issues/2220
return new ImagesAndRegistryClient(images, noAuthRegistryClient);
}
}
Optional<ImagesAndRegistryClient> mirrorPull =
tryMirrors(buildContext, progressDispatcher.newChildProducer());
if (mirrorPull.isPresent()) {
return mirrorPull.get();
}
try {
// First, try with no credentials. This works with public GCR images (but not Docker Hub).
// TODO: investigate if we should just pass credentials up front. However, this involves
// some risk. https://github.com/GoogleContainerTools/jib/pull/2200#discussion_r359069026
// contains some related discussions.
RegistryClient noAuthRegistryClient =
buildContext.newBaseImageRegistryClientFactory().newRegistryClient();
return new ImagesAndRegistryClient(
pullBaseImages(noAuthRegistryClient, progressDispatcher.newChildProducer()),
noAuthRegistryClient);
} catch (RegistryUnauthorizedException ex) {
eventHandlers.dispatch(
LogEvent.lifecycle(
"The base image requires auth. Trying again for " + imageReference + "..."));
Credential credential =
RegistryCredentialRetriever.getBaseImageCredential(buildContext).orElse(null);
RegistryClient registryClient =
buildContext
.newBaseImageRegistryClientFactory()
.setCredential(credential)
.newRegistryClient();
String wwwAuthenticate = ex.getHttpResponseException().getHeaders().getAuthenticate();
if (wwwAuthenticate != null) {
eventHandlers.dispatch(
LogEvent.debug("WWW-Authenticate for " + imageReference + ": " + wwwAuthenticate));
registryClient.authPullByWwwAuthenticate(wwwAuthenticate);
return new ImagesAndRegistryClient(
pullBaseImages(registryClient, progressDispatcher.newChildProducer()),
registryClient);
} else {
// Not getting WWW-Authenticate is unexpected in practice, and we may just blame the
// server and fail. However, to keep some old behavior, try a few things as a last resort.
// TODO: consider removing this fallback branch.
if (credential != null && !credential.isOAuth2RefreshToken()) {
eventHandlers.dispatch(
LogEvent.debug("Trying basic auth as fallback for " + imageReference + "..."));
registryClient.configureBasicAuth();
try {
return new ImagesAndRegistryClient(
pullBaseImages(registryClient, progressDispatcher.newChildProducer()),
registryClient);
} catch (RegistryUnauthorizedException ignored) {
// Fall back to try bearer auth.
}
}
eventHandlers.dispatch(
LogEvent.debug("Trying bearer auth as fallback for " + imageReference + "..."));
registryClient.doPullBearerAuth();
return new ImagesAndRegistryClient(
pullBaseImages(registryClient, progressDispatcher.newChildProducer()),
registryClient);
}
}
}
}
|
@Test
public void testCall_offlineMode_cached()
throws LayerPropertyNotFoundException, RegistryException, LayerCountMismatchException,
BadContainerConfigurationFormatException, CacheCorruptedException,
CredentialRetrievalException, InvalidImageReferenceException, IOException {
ImageReference imageReference = ImageReference.parse("cat");
Mockito.when(imageConfiguration.getImage()).thenReturn(imageReference);
Mockito.when(buildContext.isOffline()).thenReturn(true);
ContainerConfigurationTemplate containerConfigJson = new ContainerConfigurationTemplate();
containerConfigJson.setArchitecture("slim arch");
containerConfigJson.setOs("fat system");
ManifestAndConfigTemplate manifestAndConfig =
new ManifestAndConfigTemplate(
new V22ManifestTemplate(), containerConfigJson, "sha256:digest");
ImageMetadataTemplate imageMetadata =
new ImageMetadataTemplate(null, Arrays.asList(manifestAndConfig));
Mockito.when(cache.retrieveMetadata(imageReference)).thenReturn(Optional.of(imageMetadata));
Mockito.when(cache.areAllLayersCached(manifestAndConfig.getManifest())).thenReturn(true);
ImagesAndRegistryClient result = pullBaseImageStep.call();
Assert.assertEquals("fat system", result.images.get(0).getOs());
Assert.assertNull(result.registryClient);
Mockito.verify(buildContext, Mockito.never()).newBaseImageRegistryClientFactory();
}
|
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset(
RequestContext context,
OffsetCommitRequestData request
) throws ApiException {
Group group = validateOffsetCommit(context, request);
// In the old consumer group protocol, the offset commits maintain the session if
// the group is in Stable or PreparingRebalance state.
if (group.type() == Group.GroupType.CLASSIC) {
ClassicGroup classicGroup = (ClassicGroup) group;
if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) {
groupMetadataManager.rescheduleClassicGroupMemberHeartbeat(
classicGroup,
classicGroup.member(request.memberId())
);
}
}
final OffsetCommitResponseData response = new OffsetCommitResponseData();
final List<CoordinatorRecord> records = new ArrayList<>();
final long currentTimeMs = time.milliseconds();
final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs);
request.topics().forEach(topic -> {
final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name());
response.topics().add(topicResponse);
topic.partitions().forEach(partition -> {
if (isMetadataInvalid(partition.committedMetadata())) {
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code()));
} else {
log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.",
request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(),
request.memberId(), partition.committedLeaderEpoch());
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.NONE.code()));
final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest(
partition,
currentTimeMs,
expireTimestampMs
);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
request.groupId(),
topic.name(),
partition.partitionIndex(),
offsetAndMetadata,
metadataImage.features().metadataVersion()
));
}
});
});
if (!records.isEmpty()) {
metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size());
}
return new CoordinatorResult<>(records, response);
}
|
@Test
public void testConsumerGroupOffsetDeleteWithErrors() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
ConsumerGroup group = context.groupMetadataManager.getOrMaybeCreatePersistedConsumerGroup(
"foo",
true
);
MetadataImage image = new MetadataImageBuilder()
.addTopic(Uuid.randomUuid(), "foo", 1)
.addRacks()
.build();
ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1")
.setSubscribedTopicNames(Collections.singletonList("bar"))
.build();
group.computeSubscriptionMetadata(
group.computeSubscribedTopicNames(null, member1),
image.topics(),
image.cluster()
);
group.updateMember(member1);
context.commitOffset("foo", "bar", 0, 100L, 0);
assertTrue(group.isSubscribedToTopic("bar"));
// Delete the offset whose topic partition doesn't exist.
context.testOffsetDeleteWith("foo", "bar1", 0, Errors.NONE);
// Delete the offset from the topic that the group is subscribed to.
context.testOffsetDeleteWith("foo", "bar", 0, Errors.GROUP_SUBSCRIBED_TO_TOPIC);
}
|
static QueryId buildId(
final Statement statement,
final EngineContext engineContext,
final QueryIdGenerator idGenerator,
final OutputNode outputNode,
final boolean createOrReplaceEnabled,
final Optional<String> withQueryId) {
if (withQueryId.isPresent()) {
final String queryId = withQueryId.get().toUpperCase();
validateWithQueryId(queryId);
return new QueryId(queryId);
}
if (statement instanceof CreateTable && ((CreateTable) statement).isSource()) {
// Use the CST name as part of the QueryID
final String suffix = ((CreateTable) statement).getName().text().toUpperCase()
+ "_" + idGenerator.getNext().toUpperCase();
return new QueryId(ReservedQueryIdsPrefixes.CST + suffix);
}
if (!outputNode.getSinkName().isPresent()) {
final String prefix =
"transient_" + outputNode.getSource().getLeftmostSourceNode().getAlias().text() + "_";
return new QueryId(prefix + Math.abs(ThreadLocalRandom.current().nextLong()));
}
final KsqlStructuredDataOutputNode structured = (KsqlStructuredDataOutputNode) outputNode;
if (!structured.createInto()) {
return new QueryId(ReservedQueryIdsPrefixes.INSERT + idGenerator.getNext());
}
final SourceName sink = outputNode.getSinkName().get();
final Set<QueryId> queriesForSink = engineContext.getQueryRegistry().getQueriesWithSink(sink);
if (queriesForSink.size() > 1) {
throw new KsqlException("REPLACE for sink " + sink + " is not supported because there are "
+ "multiple queries writing into it: " + queriesForSink);
} else if (!queriesForSink.isEmpty()) {
if (!createOrReplaceEnabled) {
final String type = outputNode.getNodeOutputType().getKsqlType().toLowerCase();
throw new UnsupportedOperationException(
String.format(
"Cannot add %s '%s': A %s with the same name already exists",
type,
sink.text(),
type));
}
return Iterables.getOnlyElement(queriesForSink);
}
final String suffix = outputNode.getId().toString().toUpperCase()
+ "_" + idGenerator.getNext().toUpperCase();
return new QueryId(
outputNode.getNodeOutputType() == DataSourceType.KTABLE
? ReservedQueryIdsPrefixes.CTAS + suffix
: ReservedQueryIdsPrefixes.CSAS + suffix
);
}
|
@Test
public void shouldThrowIfWithQueryIdIsReserved() {
// When:
final Exception e = assertThrows(
Exception.class,
() -> QueryIdUtil.buildId(statement, engineContext, idGenerator, plan,
false, Optional.of("insertquery_custom"))
);
// Then:
assertThat(e.getMessage(), containsString("Query IDs must not start with a "
+ "reserved query ID prefix (INSERTQUERY_, CTAS_, CSAS_, CST_). "
+ "Got 'INSERTQUERY_CUSTOM'."));
}
|
@Override
public void getConfig(ZookeeperServerConfig.Builder builder) {
ConfigServer[] configServers = getConfigServers();
int[] zookeeperIds = getConfigServerZookeeperIds();
if (configServers.length != zookeeperIds.length) {
throw new IllegalArgumentException(String.format("Number of provided config server hosts (%d) must be the " +
"same as number of provided config server zookeeper ids (%d)",
configServers.length, zookeeperIds.length));
}
String myhostname = HostName.getLocalhost();
// TODO: Server index should be in interval [1, 254] according to doc,
// however, we cannot change this id for an existing server
for (int i = 0; i < configServers.length; i++) {
if (zookeeperIds[i] < 0) {
throw new IllegalArgumentException(String.format("Zookeeper ids cannot be negative, was %d for %s",
zookeeperIds[i], configServers[i].hostName));
}
if (configServers[i].hostName.equals(myhostname)) {
builder.myid(zookeeperIds[i]);
}
builder.server(getZkServer(configServers[i], zookeeperIds[i]));
}
if (options.zookeeperClientPort().isPresent()) {
builder.clientPort(options.zookeeperClientPort().get());
}
if (options.hostedVespa().orElse(false)) {
builder.vespaTlsConfigFile(Defaults.getDefaults().underVespaHome("var/zookeeper/conf/tls.conf.json"));
}
boolean isHostedVespa = options.hostedVespa().orElse(false);
builder.dynamicReconfiguration(isHostedVespa);
builder.reconfigureEnsemble(!isHostedVespa);
builder.snapshotMethod(options.zooKeeperSnapshotMethod());
builder.juteMaxBuffer(options.zookeeperJuteMaxBuffer());
}
|
@Test
void zookeeperConfig_default() {
ZookeeperServerConfig config = getConfig(ZookeeperServerConfig.class);
assertZookeeperServerProperty(config.server(), ZookeeperServerConfig.Server::hostname, "localhost");
assertZookeeperServerProperty(config.server(), ZookeeperServerConfig.Server::id, 0);
assertEquals(0, config.myid());
assertEquals("/opt/vespa/var/zookeeper/conf/tls.conf.json", config.vespaTlsConfigFile());
}
|
@Override
@CheckForNull
public EmailMessage format(Notification notif) {
if (!(notif instanceof ChangesOnMyIssuesNotification)) {
return null;
}
ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif;
if (notification.getChange() instanceof AnalysisChange) {
checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty");
return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification);
}
return formatMultiProject(notification);
}
|
@Test
public void formats_returns_html_message_for_multiple_issues_of_same_rule_on_same_project_on_branch_when_analysis_change() {
String branchName = randomAlphabetic(19);
Project project = newBranch("1", branchName);
String ruleName = randomAlphabetic(8);
String host = randomAlphabetic(15);
Rule rule = newRule(ruleName, randomRuleTypeHotspotExcluded());
String status = randomValidStatus();
List<ChangedIssue> changedIssues = IntStream.range(0, 2 + new Random().nextInt(5))
.mapToObj(i -> newChangedIssue("issue_" + i, status, project, rule))
.collect(toList());
AnalysisChange analysisChange = newAnalysisChange();
when(emailSettings.getServerBaseURL()).thenReturn(host);
EmailMessage emailMessage = underTest.format(new ChangesOnMyIssuesNotification(analysisChange, ImmutableSet.copyOf(changedIssues)));
String expectedHref = host + "/project/issues?id=" + project.getKey() + "&branch=" + branchName
+ "&issues=" + changedIssues.stream().map(ChangedIssue::getKey).collect(joining("%2C"));
String expectedLinkText = "See all " + changedIssues.size() + " issues";
HtmlFragmentAssert.assertThat(emailMessage.getMessage())
.hasParagraph().hasParagraph() // skip header
.hasParagraph()// skip title based on status
.hasList("Rule " + ruleName + " - " + expectedLinkText)
.withLink(expectedLinkText, expectedHref)
.hasParagraph().hasParagraph() // skip footer
.noMoreBlock();
}
|
@Override
public InetSocketAddress resolve(ServerWebExchange exchange) {
List<String> xForwardedValues = extractXForwardedValues(exchange);
if (!xForwardedValues.isEmpty()) {
int index = Math.max(0, xForwardedValues.size() - maxTrustedIndex);
return new InetSocketAddress(xForwardedValues.get(index), 0);
}
return defaultRemoteIpResolver.resolve(exchange);
}
|
@Test
public void maxIndexOneReturnsLastForwardedIp() {
ServerWebExchange exchange = buildExchange(oneTwoThreeBuilder());
InetSocketAddress address = trustOne.resolve(exchange);
assertThat(address.getHostName()).isEqualTo("0.0.0.3");
}
|
@Override
public void reportErrorAndInvalidate(String bundleSymbolicName, List<String> messages) {
try {
pluginRegistry.markPluginInvalid(bundleSymbolicName, messages);
} catch (Exception e) {
LOGGER.warn("[Plugin Health Service] Plugin with id '{}' tried to report health with message '{}' but Go is unaware of this plugin.", bundleSymbolicName, messages, e);
}
}
|
@Test
void shouldNotThrowExceptionWhenPluginIsNotFound() {
String bundleSymbolicName = "invalid-plugin";
String message = "some msg";
List<String> reasons = List.of(message);
doThrow(new RuntimeException()).when(pluginRegistry).markPluginInvalid(bundleSymbolicName, reasons);
assertThatCode(() -> serviceDefault.reportErrorAndInvalidate(bundleSymbolicName, reasons))
.doesNotThrowAnyException();
}
|
@Override
public Num calculate(BarSeries series, Position position) {
return isBreakEvenPosition(position) ? series.one() : series.zero();
}
|
@Test
public void calculateWithNoPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
assertNumEquals(0, getCriterion().calculate(series, new BaseTradingRecord()));
}
|
private void execute(String[] args) throws Exception {
String tool = args[0];
String[] subsetArgs = new String[args.length - 1];
System.arraycopy(args, 1, subsetArgs, 0, args.length - 1);
switch (tool) {
case "Report":
handleReport(subsetArgs);
break;
case "Compare":
handleCompare(subsetArgs);
break;
case "Profile":
handleProfile(subsetArgs);
break;
case "StartDB":
handleStartDB(subsetArgs);
break;
case "FileProfile":
handleProfileFiles(subsetArgs);
break;
default:
System.out.println(specifyTools());
break;
}
}
|
@Test
@Disabled("use this for development")
public void testOneOff() throws Exception {
List<String> args = new ArrayList<>();
args.add("Compare");
args.add("-extractsA");
args.add(ProcessUtils.escapeCommandLine(extractsDir
.resolve("extractsA")
.toAbsolutePath()
.toString()));
args.add("-extractsB");
args.add(ProcessUtils.escapeCommandLine(extractsDir
.resolve("extractsB")
.toAbsolutePath()
.toString()));
args.add("-db");
args.add(ProcessUtils.escapeCommandLine(compareDBDir
.toAbsolutePath()
.toString() + "/" + dbName));
execute(args, 60000);
// args.add("-drop");
// args.add("-jdbc");
// args.add("jdbc:postgresql:tika_eval?user=user&password=password");
}
|
public static void transform(IntIndexedContainer arr, IntIndexedContainer map) {
for (int i = 0; i < arr.size(); ++i)
arr.set(i, map.get(arr.get(i)));
}
|
@Test
public void testTransform() {
IntArrayList arr = from(7, 6, 2);
ArrayUtil.transform(arr, ArrayUtil.constant(8, 4));
assertEquals(IntArrayList.from(4, 4, 4), arr);
IntArrayList brr = from(3, 0, 1);
ArrayUtil.transform(brr, IntArrayList.from(6, 2, 1, 5));
assertEquals(IntArrayList.from(5, 6, 2), brr);
}
|
@Override
public AllocatedSlot reserveFreeSlot(AllocationID allocationId) {
LOG.debug("Reserve free slot with allocation id {}.", allocationId);
AllocatedSlot slot = registeredSlots.get(allocationId);
Preconditions.checkNotNull(slot, "The slot with id %s was not exists.", allocationId);
Preconditions.checkState(
freeSlots.removeFreeSlot(allocationId, slot.getTaskManagerId()) != null,
"The slot with id %s was not free.",
allocationId);
return registeredSlots.get(allocationId);
}
|
@Test
void testReserveFreeSlot() {
final DefaultAllocatedSlotPool slotPool = new DefaultAllocatedSlotPool();
final Collection<AllocatedSlot> allSlots = createAllocatedSlots();
final Collection<AllocatedSlot> freeSlots = new ArrayList<>(allSlots);
final Iterator<AllocatedSlot> iterator = freeSlots.iterator();
final AllocatedSlot allocatedSlot = iterator.next();
iterator.remove();
slotPool.addSlots(allSlots, 0);
assertThat(slotPool.reserveFreeSlot(allocatedSlot.getAllocationId()))
.isEqualTo(allocatedSlot);
assertSlotPoolContainsFreeSlots(slotPool, freeSlots);
assertSlotPoolContainsSlots(slotPool, allSlots);
}
|
public RunResponse restartDirectly(
RunResponse restartStepInfo, RunRequest runRequest, boolean blocking) {
WorkflowInstance instance = restartStepInfo.getInstance();
String stepId = restartStepInfo.getStepId();
validateStepId(instance, stepId, Actions.StepInstanceAction.RESTART);
StepInstance stepInstance =
getStepInstanceAndValidate(instance, stepId, runRequest.getRestartConfig());
// prepare payload and then add to db
StepAction stepAction = StepAction.createRestart(stepInstance, runRequest);
saveAction(stepInstance, stepAction);
if (blocking) {
return waitResponseWithTimeout(stepInstance, stepAction);
} else {
return RunResponse.from(stepInstance, stepAction.toTimelineEvent());
}
}
|
@Test
public void testRestartDirectly() {
stepInstance.getRuntimeState().setStatus(StepInstance.Status.FATALLY_FAILED);
stepInstance.getStepRetry().setRetryable(false);
((TypedStep) stepInstance.getDefinition()).setFailureMode(FailureMode.FAIL_AFTER_RUNNING);
stepInstanceDao.insertOrUpsertStepInstance(stepInstance, true);
RunResponse restartStepInfo = RunResponse.builder().instance(instance).stepId("job1").build();
RunRequest runRequest =
RunRequest.builder()
.requester(user)
.currentPolicy(RunPolicy.RESTART_FROM_SPECIFIC)
.stepRunParams(
Collections.singletonMap(
"job1",
Collections.singletonMap(
"foo", ParamDefinition.buildParamDefinition("foo", "bar"))))
.build();
RunResponse response = actionDao.restartDirectly(restartStepInfo, runRequest, false);
Assert.assertEquals("sample-dag-test-3", response.getWorkflowId());
Assert.assertEquals(1, response.getWorkflowInstanceId());
Assert.assertEquals(1, response.getWorkflowRunId());
Assert.assertEquals("job1", response.getStepId());
Assert.assertEquals(2L, response.getStepAttemptId().longValue());
Assert.assertEquals(
"User [tester] take action [RESTART] on the step",
response.getTimelineEvent().getMessage());
}
|
DateRange getRange(String dateRangeString) throws ParseException {
if (dateRangeString == null || dateRangeString.isEmpty())
return null;
String[] dateArr = dateRangeString.split("-");
if (dateArr.length > 2 || dateArr.length < 1)
return null;
// throw new IllegalArgumentException("Only Strings containing two Date separated by a '-' or a single Date are allowed");
ParsedCalendar from = parseDateString(dateArr[0]);
ParsedCalendar to;
if (dateArr.length == 2)
to = parseDateString(dateArr[1]);
else
// faster and safe?
// to = new ParsedCalendar(from.parseType, (Calendar) from.parsedCalendar.clone());
to = parseDateString(dateArr[0]);
try {
return new DateRange(from, to);
} catch (IllegalArgumentException ex) {
return null;
}
}
|
@Test
public void testParseSingleDateRangeWithoutDay() throws ParseException {
DateRange dateRange = dateRangeParser.getRange("2014 Sep");
assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 31)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.SEPTEMBER, 1)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.SEPTEMBER, 30)));
assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.OCTOBER, 1)));
assertFalse(dateRange.isInRange(getCalendar(2015, Calendar.SEPTEMBER, 1)));
}
|
public PendingSpan getOrCreate(
@Nullable TraceContext parent, TraceContext context, boolean start) {
PendingSpan result = get(context);
if (result != null) return result;
MutableSpan span = new MutableSpan(context, defaultSpan);
PendingSpan parentSpan = parent != null ? get(parent) : null;
// save overhead calculating time if the parent is in-progress (usually is)
TickClock clock;
if (parentSpan != null) {
TraceContext parentContext = parentSpan.context();
if (parentContext != null) parent = parentContext;
clock = parentSpan.clock;
if (start) span.startTimestamp(clock.currentTimeMicroseconds());
} else {
long currentTimeMicroseconds = this.clock.currentTimeMicroseconds();
clock = new TickClock(platform, currentTimeMicroseconds, platform.nanoTime());
if (start) span.startTimestamp(currentTimeMicroseconds);
}
PendingSpan newSpan = new PendingSpan(context, span, clock);
// Probably absent because we already checked with get() at the entrance of this method
PendingSpan previousSpan = putIfProbablyAbsent(context, newSpan);
if (previousSpan != null) return previousSpan; // lost race
// We've now allocated a new trace context.
assert parent != null || context.isLocalRoot() :
"Bug (or unexpected call to internal code): parent can only be null in a local root!";
spanHandler.begin(newSpan.handlerContext, newSpan.span, parentSpan != null
? parentSpan.handlerContext : null);
return newSpan;
}
|
@Test void getOrCreate_cachesReference() {
PendingSpan span = pendingSpans.getOrCreate(null, context, false);
assertThat(pendingSpans.getOrCreate(null, context, false)).isSameAs(span);
}
|
public Object getProperty( Object root, String propName ) throws Exception {
List<Integer> extractedIndexes = new ArrayList<>();
BeanInjectionInfo.Property prop = info.getProperties().get( propName );
if ( prop == null ) {
throw new RuntimeException( "Property not found" );
}
Object obj = root;
for ( int i = 1, arrIndex = 0; i < prop.path.size(); i++ ) {
BeanLevelInfo s = prop.path.get( i );
obj = s.field.get( obj );
if ( obj == null ) {
return null; // some value in path is null - return empty
}
switch ( s.dim ) {
case ARRAY:
int indexArray = extractedIndexes.get( arrIndex++ );
if ( Array.getLength( obj ) <= indexArray ) {
return null;
}
obj = Array.get( obj, indexArray );
if ( obj == null ) {
return null; // element is empty
}
break;
case LIST:
int indexList = extractedIndexes.get( arrIndex++ );
List<?> list = (List<?>) obj;
if ( list.size() <= indexList ) {
return null;
}
obj = list.get( indexList );
if ( obj == null ) {
return null; // element is empty
}
break;
case NONE:
break;
}
}
return obj;
}
|
@Test
public void getProperty_NotFound() {
BeanInjector bi = new BeanInjector(null );
BeanInjectionInfo bii = new BeanInjectionInfo( MetaBeanLevel1.class );
BeanInjectionInfo.Property actualProperty = bi.getProperty( bii, "DOES_NOT_EXIST" );
assertNull(actualProperty);
}
|
public Collection<Integer> getAssignedWorkerIds() {
Collection<String> childrenKeys = repository.getChildrenKeys(ComputeNode.getInstanceWorkerIdRootNodePath());
Collection<Integer> result = new LinkedHashSet<>(childrenKeys.size(), 1F);
for (String each : childrenKeys) {
String workerId = repository.query(ComputeNode.getInstanceWorkerIdNodePath(each));
if (null != workerId) {
result.add(Integer.parseInt(workerId));
}
}
return result;
}
|
@Test
void assertGetUsedWorkerIds() {
new ComputeNodePersistService(repository).getAssignedWorkerIds();
verify(repository).getChildrenKeys(ComputeNode.getInstanceWorkerIdRootNodePath());
}
|
@Override
public ElectLeadersResult electLeaders(
final ElectionType electionType,
final Set<TopicPartition> topicPartitions,
ElectLeadersOptions options) {
final KafkaFutureImpl<Map<TopicPartition, Optional<Throwable>>> electionFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("electLeaders", calcDeadlineMs(now, options.timeoutMs()),
new ControllerNodeProvider()) {
@Override
public ElectLeadersRequest.Builder createRequest(int timeoutMs) {
return new ElectLeadersRequest.Builder(electionType, topicPartitions, timeoutMs);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
ElectLeadersResponse response = (ElectLeadersResponse) abstractResponse;
Map<TopicPartition, Optional<Throwable>> result = ElectLeadersResponse.electLeadersResult(response.data());
// For version == 0 then errorCode would be 0 which maps to Errors.NONE
Errors error = Errors.forCode(response.data().errorCode());
if (error != Errors.NONE) {
electionFuture.completeExceptionally(error.exception());
return;
}
electionFuture.complete(result);
}
@Override
void handleFailure(Throwable throwable) {
electionFuture.completeExceptionally(throwable);
}
}, now);
return new ElectLeadersResult(electionFuture);
}
|
@Test
public void testElectLeaders() throws Exception {
TopicPartition topic1 = new TopicPartition("topic", 0);
TopicPartition topic2 = new TopicPartition("topic", 2);
try (AdminClientUnitTestEnv env = mockClientEnv()) {
for (ElectionType electionType : ElectionType.values()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// Test a call where one partition has an error.
ApiError value = ApiError.fromThrowable(new ClusterAuthorizationException(null));
List<ReplicaElectionResult> electionResults = new ArrayList<>();
ReplicaElectionResult electionResult = new ReplicaElectionResult();
electionResult.setTopic(topic1.topic());
// Add partition 1 result
PartitionResult partition1Result = new PartitionResult();
partition1Result.setPartitionId(topic1.partition());
partition1Result.setErrorCode(value.error().code());
partition1Result.setErrorMessage(value.message());
electionResult.partitionResult().add(partition1Result);
// Add partition 2 result
PartitionResult partition2Result = new PartitionResult();
partition2Result.setPartitionId(topic2.partition());
partition2Result.setErrorCode(value.error().code());
partition2Result.setErrorMessage(value.message());
electionResult.partitionResult().add(partition2Result);
electionResults.add(electionResult);
env.kafkaClient().prepareResponse(new ElectLeadersResponse(0, Errors.NONE.code(),
electionResults, ApiKeys.ELECT_LEADERS.latestVersion()));
ElectLeadersResult results = env.adminClient().electLeaders(
electionType,
new HashSet<>(asList(topic1, topic2)));
assertEquals(results.partitions().get().get(topic2).get().getClass(), ClusterAuthorizationException.class);
// Test a call where there are no errors. By mutating the internal of election results
partition1Result.setErrorCode(ApiError.NONE.error().code());
partition1Result.setErrorMessage(ApiError.NONE.message());
partition2Result.setErrorCode(ApiError.NONE.error().code());
partition2Result.setErrorMessage(ApiError.NONE.message());
env.kafkaClient().prepareResponse(new ElectLeadersResponse(0, Errors.NONE.code(), electionResults,
ApiKeys.ELECT_LEADERS.latestVersion()));
results = env.adminClient().electLeaders(electionType, new HashSet<>(asList(topic1, topic2)));
assertFalse(results.partitions().get().get(topic1).isPresent());
assertFalse(results.partitions().get().get(topic2).isPresent());
// Now try a timeout
results = env.adminClient().electLeaders(
electionType,
new HashSet<>(asList(topic1, topic2)),
new ElectLeadersOptions().timeoutMs(100));
TestUtils.assertFutureError(results.partitions(), TimeoutException.class);
}
}
}
|
@Override
@SuppressWarnings("Slf4jFormatShouldBeConst")
protected void logException(long id, JdbiException exception) {
final Throwable cause = exception.getCause();
if (cause instanceof SQLException) {
for (Throwable throwable : (SQLException) cause) {
logger.error(formatLogMessage(id, throwable), throwable);
}
} else {
logger.error(formatLogMessage(id, exception), exception);
}
}
|
@Test
void testPlainJdbiException() throws Exception {
JdbiException jdbiException = new TransactionException("Transaction failed for unknown reason");
jdbiExceptionMapper.logException(9812, jdbiException);
verify(logger).error("Error handling a request: 0000000000002654", jdbiException);
}
|
@Override
public InvokerWrapper getInvokerWrapper() {
return invokerWrapper;
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvokerWrapper_invokeOnAllPartitions_whenRequestOfWrongType_thenThrowException() throws Exception {
context.getInvokerWrapper().invokeOnAllPartitions(new Object(), false);
}
|
@Override
public boolean isEquivalentTo(final AbstractPicker picker) {
return picker instanceof EmptyPicker && (Objects.equal(status, ((EmptyPicker) picker).status) || (status.isOk() && ((EmptyPicker) picker).status.isOk()));
}
|
@Test
public void testIsEquivalentTo() {
EmptyPicker picker = new EmptyPicker(mock(Status.class));
assertTrue(picker.isEquivalentTo(picker));
}
|
@Override
public int hashCode() {
return MessageIdAdvUtils.hashCode(this);
}
|
@Test
public void hashCodeTest() {
BatchMessageIdImpl batchMsgId1 = new BatchMessageIdImpl(0, 0, 0, 0);
BatchMessageIdImpl batchMsgId2 = new BatchMessageIdImpl(1, 1, 1, 1);
assertEquals(batchMsgId1.hashCode(), batchMsgId1.hashCode());
assertNotEquals(batchMsgId1.hashCode(), batchMsgId2.hashCode());
}
|
@Override
public void check(final String databaseName, final EncryptRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) {
checkEncryptors(ruleConfig.getEncryptors());
checkTables(databaseName, ruleConfig.getTables(), ruleConfig.getEncryptors());
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
@Test
void assertCheckWhenConfigInvalidLikeColumn() {
EncryptRuleConfiguration config = createInvalidLikeColumnConfiguration();
RuleConfigurationChecker checker = OrderedSPILoader.getServicesByClass(RuleConfigurationChecker.class, Collections.singleton(config.getClass())).get(config.getClass());
assertThrows(UnregisteredAlgorithmException.class, () -> checker.check("test", config, Collections.emptyMap(), Collections.emptyList()));
}
|
@Override
protected boolean copyObject(String src, String dst) {
try {
LOG.debug("Copying {} to {}", src, dst);
mClient.copyObject(mBucketNameInternal, src, mBucketNameInternal, dst);
return true;
} catch (CosClientException e) {
LOG.error("Failed to rename file {} to {}", src, dst, e);
return false;
}
}
|
@Test
public void testCopyObject() {
// test successful copy object
Mockito.when(mClient.copyObject(ArgumentMatchers.anyString(), ArgumentMatchers.anyString(),
ArgumentMatchers.anyString(), ArgumentMatchers.anyString())).thenReturn(null);
boolean result = mCOSUnderFileSystem.copyObject(SRC, DST);
Assert.assertTrue(result);
// test copy object exception
Mockito.when(mClient.copyObject(ArgumentMatchers.anyString(),
ArgumentMatchers.anyString(), ArgumentMatchers.anyString(),
ArgumentMatchers.anyString())).thenThrow(CosClientException.class);
try {
mCOSUnderFileSystem.copyObject(SRC, DST);
} catch (Exception e) {
Assert.assertTrue(e instanceof CosClientException);
}
}
|
@Override
public double calcEdgeWeight(EdgeIteratorState edgeState, boolean reverse) {
double priority = edgeToPriorityMapping.get(edgeState, reverse);
if (priority == 0) return Double.POSITIVE_INFINITY;
final double distance = edgeState.getDistance();
double seconds = calcSeconds(distance, edgeState, reverse);
if (Double.isInfinite(seconds)) return Double.POSITIVE_INFINITY;
// add penalty at start/stop/via points
if (edgeState.get(EdgeIteratorState.UNFAVORED_EDGE)) seconds += headingPenaltySeconds;
double distanceCosts = distance * distanceInfluence;
if (Double.isInfinite(distanceCosts)) return Double.POSITIVE_INFINITY;
return seconds / priority + distanceCosts;
}
|
@Test
public void bugWithNaNForBarrierEdges() {
EdgeIteratorState motorway = graph.edge(0, 1).setDistance(0).
set(roadClassEnc, MOTORWAY).set(avSpeedEnc, 80);
CustomModel customModel = createSpeedCustomModel(avSpeedEnc)
.addToPriority(If("road_class == MOTORWAY", Statement.Op.MULTIPLY, "0"));
Weighting weighting = createWeighting(customModel);
assertFalse(Double.isNaN(weighting.calcEdgeWeight(motorway, false)));
assertTrue(Double.isInfinite(weighting.calcEdgeWeight(motorway, false)));
}
|
public static void toast(Context context, @StringRes int message) {
// this is a static method so it is easier to call,
// as the context checking and casting is done for you
if (context == null) return;
if (!(context instanceof Application)) {
context = context.getApplicationContext();
}
if (context instanceof Application) {
final Context c = context;
final @StringRes int m = message;
getInstance().runInApplicationThread(() -> Toast.makeText(c, m, Toast.LENGTH_LONG).show());
}
}
|
@Test
public void testToastWithNullContext() {
AppConfig.toast(null, R.string.ok);
assertNull(ShadowToast.getLatestToast());
}
|
static FlowRule toFlowRule(/*@Valid*/ GatewayFlowRule rule) {
return new FlowRule(rule.getResource())
.setControlBehavior(rule.getControlBehavior())
.setCount(rule.getCount())
.setGrade(rule.getGrade())
.setMaxQueueingTimeMs(rule.getMaxQueueingTimeoutMs());
}
|
@Test
public void testConvertToFlowRule() {
GatewayFlowRule rule = new GatewayFlowRule("routeId1")
.setCount(10)
.setControlBehavior(RuleConstant.CONTROL_BEHAVIOR_RATE_LIMITER)
.setMaxQueueingTimeoutMs(1000);
FlowRule flowRule = GatewayRuleConverter.toFlowRule(rule);
assertEquals(rule.getResource(), flowRule.getResource());
assertEquals(rule.getCount(), flowRule.getCount(), 0.01);
assertEquals(rule.getControlBehavior(), flowRule.getControlBehavior());
assertEquals(rule.getMaxQueueingTimeoutMs(), flowRule.getMaxQueueingTimeMs());
}
|
@Override
protected CompletableFuture<JobExecutionResultResponseBody> handleRequest(
@Nonnull final HandlerRequest<EmptyRequestBody> request,
@Nonnull final RestfulGateway gateway)
throws RestHandlerException {
final JobID jobId = request.getPathParameter(JobIDPathParameter.class);
final CompletableFuture<JobStatus> jobStatusFuture =
gateway.requestJobStatus(jobId, timeout);
return jobStatusFuture
.thenCompose(
jobStatus -> {
if (jobStatus.isGloballyTerminalState()) {
return gateway.requestJobResult(jobId, timeout)
.thenApply(JobExecutionResultResponseBody::created);
} else {
return CompletableFuture.completedFuture(
JobExecutionResultResponseBody.inProgress());
}
})
.exceptionally(
throwable -> {
throw propagateException(throwable);
});
}
|
@Test
void testResultInProgress() throws Exception {
final TestingRestfulGateway testingRestfulGateway =
new TestingRestfulGateway.Builder()
.setRequestJobStatusFunction(
jobId -> CompletableFuture.completedFuture(JobStatus.RUNNING))
.build();
final JobExecutionResultResponseBody responseBody =
jobExecutionResultHandler.handleRequest(testRequest, testingRestfulGateway).get();
assertThat(responseBody.getStatus().getId()).isEqualTo(QueueStatus.Id.IN_PROGRESS);
}
|
public final void doesNotContainEntry(@Nullable Object key, @Nullable Object value) {
checkNoNeedToDisplayBothValues("entries()")
.that(checkNotNull(actual).entries())
.doesNotContain(immutableEntry(key, value));
}
|
@Test
public void doesNotContainEntryFailure() {
ImmutableMultimap<String, String> multimap = ImmutableMultimap.of("kurt", "kluever");
expectFailureWhenTestingThat(multimap).doesNotContainEntry("kurt", "kluever");
assertFailureKeys("value of", "expected not to contain", "but was");
assertFailureValue("value of", "multimap.entries()");
assertFailureValue("expected not to contain", "kurt=kluever");
assertFailureValue("but was", "[kurt=kluever]");
}
|
protected String addDatetimeToFilename( String filename, boolean addDate, String datePattern, boolean addTime,
String timePattern, boolean specifyFormat, String datetimeFormat ) {
if ( Utils.isEmpty( filename ) ) {
return null;
}
// Replace possible environment variables...
String realfilename = environmentSubstitute( filename );
String filenameNoExtension = FilenameUtils.removeExtension( realfilename );
String extension = FilenameUtils.getExtension( realfilename );
// If an extension exists, add the corresponding dot before
if ( !StringUtil.isEmpty( extension ) ) {
extension = '.' + extension;
}
final SimpleDateFormat sdf = new SimpleDateFormat();
Date now = new Date();
if ( specifyFormat && !Utils.isEmpty( datetimeFormat ) ) {
sdf.applyPattern( datetimeFormat );
String dt = sdf.format( now );
filenameNoExtension += dt;
} else {
if ( addDate && null != datePattern ) {
sdf.applyPattern( datePattern );
String d = sdf.format( now );
filenameNoExtension += '_' + d;
}
if ( addTime && null != timePattern ) {
sdf.applyPattern( timePattern );
String t = sdf.format( now );
filenameNoExtension += '_' + t;
}
}
return filenameNoExtension + extension;
}
|
@Test
public void testAddDatetimeToFilename_ZipWithoutDotsInFolderWithDots() {
JobEntryBase jobEntryBase = new JobEntryBase();
String fullFilename;
String filename = "/folder.with.dots/zip_without_dots_in_folder_with_dots";
String regexFilename = regexDotEscape( filename );
// add nothing
fullFilename = jobEntryBase.addDatetimeToFilename( filename, false, null, false, null, false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( regexFilename, fullFilename ) );
// add date
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, "yyyyMMdd", false, null, false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( regexFilename + DATE_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, null, false, null, false, null );
assertNotNull( fullFilename );
assertEquals( filename, fullFilename );
// add time
fullFilename = jobEntryBase.addDatetimeToFilename( filename, false, null, true, "HHmmssSSS", false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( regexFilename + TIME_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, false, null, true, null, false, null );
assertNotNull( fullFilename );
assertEquals( filename, fullFilename );
// add date and time
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, "yyyyMMdd", true, "HHmmssSSS", false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( regexFilename + DATE_PATTERN + TIME_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, null, true, "HHmmssSSS", false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( regexFilename + TIME_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, "yyyyMMdd", true, null, false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( regexFilename + DATE_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, null, true, null, false, null );
assertNotNull( fullFilename );
assertEquals( filename, fullFilename );
// add datetime
fullFilename =
jobEntryBase.addDatetimeToFilename( filename, false, null, false, null, true, "(yyyyMMdd_HHmmssSSS)" );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( filename + DATE_TIME_PATTERN, fullFilename ) );
fullFilename =
jobEntryBase.addDatetimeToFilename( filename, false, null, false, null, true, null );
assertNotNull( fullFilename );
assertEquals( filename, fullFilename );
}
|
@Override
public String getFileId(final Path file) throws BackgroundException {
if(StringUtils.isNotBlank(file.attributes().getFileId())) {
return file.attributes().getFileId();
}
if(file.isRoot()
|| new SimplePathPredicate(file).test(DriveHomeFinderService.MYDRIVE_FOLDER)
|| new SimplePathPredicate(file).test(DriveHomeFinderService.SHARED_FOLDER_NAME)
|| new SimplePathPredicate(file).test(DriveHomeFinderService.SHARED_DRIVES_NAME)) {
return DriveHomeFinderService.ROOT_FOLDER_ID;
}
final String cached = super.getFileId(file);
if(cached != null) {
if(log.isDebugEnabled()) {
log.debug(String.format("Return cached fileid %s for file %s", cached, file));
}
return cached;
}
if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) {
final Path found = new DriveTeamDrivesListService(session, this).list(file.getParent(),
new DisabledListProgressListener()).find(new SimplePathPredicate(file)
);
if(null == found) {
throw new NotfoundException(file.getAbsolute());
}
return this.cache(file, found.attributes().getFileId());
}
final Path query;
if(file.isPlaceholder()) {
query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes());
}
else {
query = file;
}
final AttributedList<Path> list = new FileidDriveListService(session, this, query).list(file.getParent(), new DisabledListProgressListener());
final Path found = list.filter(new IgnoreTrashedComparator()).find(new SimplePathPredicate(file));
if(null == found) {
throw new NotfoundException(file.getAbsolute());
}
return this.cache(file, found.attributes().getFileId());
}
|
@Test
public void testGetFileid() throws Exception {
final Path test = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
new DriveTouchFeature(session, fileid).touch(test, new TransferStatus());
assertNotNull(fileid.getFileId(test));
new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@SuppressWarnings("unused") // Required for automatic type inference
public static <K> Builder0<K> forClass(final Class<K> type) {
return new Builder0<>();
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldThrowIfHandlerSupplierReturnsNullHandler2() {
HandlerMaps.forClass(BaseType.class).withArgTypes(String.class, Integer.class)
.put(LeafTypeA.class, () -> null)
.build();
}
|
public static Transcript parse(String str) {
if (StringUtils.isBlank(str)) {
return null;
}
str = str.replaceAll("\r\n", "\n");
Transcript transcript = new Transcript();
List<String> lines = Arrays.asList(str.split("\n"));
Iterator<String> iter = lines.iterator();
String speaker = "";
String prevSpeaker = "";
StringBuilder body;
String line;
String segmentBody = "";
long startTimecode = -1L;
long spanStartTimecode = -1L;
long spanEndTimecode = -1L;
long endTimecode = -1L;
long duration = 0L;
Set<String> speakers = new HashSet<>();
while (iter.hasNext()) {
body = new StringBuilder();
line = iter.next();
if (line.isEmpty()) {
continue;
}
spanEndTimecode = endTimecode;
if (line.contains("-->")) {
String[] timecodes = line.split("-->");
if (timecodes.length < 2) {
continue;
}
startTimecode = parseTimecode(timecodes[0].trim());
endTimecode = parseTimecode(timecodes[1].trim());
if (startTimecode == -1 || endTimecode == -1) {
continue;
}
if (spanStartTimecode == -1) {
spanStartTimecode = startTimecode;
}
duration += endTimecode - startTimecode;
do {
line = iter.next();
if (StringUtil.isBlank(line)) {
break;
}
body.append(line.strip());
body.append(" ");
} while (iter.hasNext());
}
if (body.indexOf(": ") != -1) {
String[] parts = body.toString().trim().split(":");
if (parts.length < 2) {
continue;
}
prevSpeaker = speaker;
speaker = parts[0];
speakers.add(speaker);
body = new StringBuilder(parts[1].strip());
if (StringUtils.isNotEmpty(prevSpeaker) && !StringUtils.equals(speaker, prevSpeaker)) {
if (StringUtils.isNotEmpty(segmentBody)) {
transcript.addSegment(new TranscriptSegment(spanStartTimecode,
spanEndTimecode, segmentBody, prevSpeaker));
duration = 0L;
spanStartTimecode = startTimecode;
segmentBody = body.toString();
continue;
}
}
} else {
if (StringUtils.isNotEmpty(prevSpeaker) && StringUtils.isEmpty(speaker)) {
speaker = prevSpeaker;
}
}
segmentBody += " " + body;
segmentBody = StringUtils.trim(segmentBody);
if (duration >= TranscriptParser.MIN_SPAN && endTimecode > spanStartTimecode) {
transcript.addSegment(new TranscriptSegment(spanStartTimecode, endTimecode, segmentBody, speaker));
duration = 0L;
spanStartTimecode = -1L;
segmentBody = "";
}
}
if (!StringUtil.isBlank(segmentBody) && endTimecode > spanStartTimecode) {
segmentBody = StringUtils.trim(segmentBody);
transcript.addSegment(new TranscriptSegment(spanStartTimecode, endTimecode, segmentBody, speaker));
}
if (transcript.getSegmentCount() > 0) {
transcript.setSpeakers(speakers);
return transcript;
} else {
return null;
}
}
|
@Test
public void testParse() {
String type = "application/srr";
Transcript result;
result = TranscriptParser.parse(srtStr, type);
// There isn't a segment at 800L, so go backwards and get the segment at 0L
assertEquals(result.getSegmentAtTime(800L).getWords(), "Promoting your podcast in a new");
result = TranscriptParser.parse(null, type);
assertEquals(result, null);
// blank string
String blankStr = "";
result = TranscriptParser.parse(blankStr, type);
assertNull(result);
// All empty lines
String allNewlinesStr = "\r\n\r\n\r\n\r\n";
result = TranscriptParser.parse(allNewlinesStr, type);
assertEquals(result, null);
// first segment has invalid time formatting, so the entire segment will be thrown out
String srtStrBad1 = "00:0000,000 --> 00:00:02,730\n"
+ "John Doe: Promoting your podcast in a new\n\n"
+ "2\n"
+ "00:00:02,730 --> 00:00:04,600\n"
+ "way. The latest from PogNews.";
result = TranscriptParser.parse(srtStrBad1, type);
assertEquals(result.getSegmentAtTime(2730L).getWords(), "way. The latest from PogNews.");
// first segment has invalid time in end time, 2nd segment has invalid time in both start time and end time
String srtStrBad2 = "00:00:00,000 --> 00:0002,730\n"
+ "Jane Doe: Promoting your podcast in a new\n\n"
+ "2\n"
+ "badstarttime --> badendtime\n"
+ "way. The latest from PogNews.\n"
+ "badstarttime -->\n"
+ "Jane Doe says something\n"
+ "00:00:00,000 --> 00:00:02,730\n"
+ "Jane Doe:";
result = TranscriptParser.parse(srtStrBad2, type);
assertNull(result);
// Just plain text
String strBad3 = "John Doe: Promoting your podcast in a new\n\n"
+ "way. The latest from PogNews.";
result = TranscriptParser.parse(strBad3, type);
assertNull(result);
// passing the wrong type
type = "application/json";
result = TranscriptParser.parse(srtStr, type);
assertEquals(result, null);
type = "unknown";
result = TranscriptParser.parse(srtStr, type);
assertEquals(result, null);
}
|
public Certificate add(X509Certificate cert) {
final Certificate db;
try {
db = Certificate.from(cert);
} catch (CertificateEncodingException e) {
logger.error("Encoding error in certificate", e);
throw new ClientException("Encoding error in certificate", e);
}
try {
// Special case for first CSCA certificate for this document type
if (repository.countByDocumentTypeAndType(db.getDocumentType(), db.getType()) == 0) {
cert.verify(cert.getPublicKey());
logger.warn("Added first CSCA certificate for {}, set trusted flag manually", db.getDocumentType());
} else {
verify(cert, allowAddingExpired ? cert.getNotAfter() : null);
}
} catch (GeneralSecurityException | VerificationException e) {
logger.error(
String.format("Could not verify certificate of %s issued by %s",
cert.getSubjectX500Principal(), cert.getIssuerX500Principal()
), e
);
throw new ClientException("Could not verify certificate", e);
}
return repository.saveAndFlush(db);
}
|
@Test
public void shouldDisallowToAddCertificateIfFirstOfDocumentTypeButNotSelfSigned() throws IOException {
final X509Certificate cert = readCertificate("test/intermediate.crt");
assertThrows(ClientException.class, () -> service.add(cert));
}
|
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (listClass != null || inner != null) {
log.error("Could not configure ListDeserializer as some parameters were already set -- listClass: {}, inner: {}", listClass, inner);
throw new ConfigException("List deserializer was already initialized using a non-default constructor");
}
configureListClass(configs, isKey);
configureInnerSerde(configs, isKey);
}
|
@Test
public void testListValueDeserializerNoArgConstructorsShouldThrowConfigExceptionDueInnerSerdeClassNotFound() {
props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_TYPE_CLASS, ArrayList.class);
props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS, nonExistingClass);
final ConfigException exception = assertThrows(
ConfigException.class,
() -> listDeserializer.configure(props, false)
);
assertEquals("Invalid value " + nonExistingClass + " for configuration "
+ CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS + ": Deserializer's inner serde class "
+ "\"" + nonExistingClass + "\" could not be found.", exception.getMessage());
}
|
@Override
public List<GrantedAuthority> getAuthorities(JsonObject introspectionResponse) {
List<GrantedAuthority> auth = new ArrayList<>(getAuthorities());
if (introspectionResponse.has("scope") && introspectionResponse.get("scope").isJsonPrimitive()) {
String scopeString = introspectionResponse.get("scope").getAsString();
Set<String> scopes = OAuth2Utils.parseParameterList(scopeString);
for (String scope : scopes) {
auth.add(new SimpleGrantedAuthority("OAUTH_SCOPE_" + scope));
}
}
return auth;
}
|
@Test
public void testGetAuthoritiesJsonObject_withScopes() {
introspectionResponse.addProperty("scope", "foo bar baz batman");
List<GrantedAuthority> expected = new ArrayList<>();
expected.add(new SimpleGrantedAuthority("ROLE_API"));
expected.add(new SimpleGrantedAuthority("OAUTH_SCOPE_foo"));
expected.add(new SimpleGrantedAuthority("OAUTH_SCOPE_bar"));
expected.add(new SimpleGrantedAuthority("OAUTH_SCOPE_baz"));
expected.add(new SimpleGrantedAuthority("OAUTH_SCOPE_batman"));
List<GrantedAuthority> authorities = granter.getAuthorities(introspectionResponse);
assertTrue(authorities.containsAll(expected));
assertTrue(expected.containsAll(authorities));
}
|
public static byte[] ecdh(XECPrivateKey privateKey, XECPublicKey publicKey) {
try {
var keyAgreement = KeyAgreement.getInstance("XDH");
keyAgreement.init(privateKey);
keyAgreement.doPhase(publicKey, true);
byte[] sharedSecret = keyAgreement.generateSecret();
// RFC 7748 recommends checking that the shared secret is not all zero bytes.
// Furthermore, RFC 9180 states "For X25519 and X448, public keys and Diffie-Hellman
// outputs MUST be validated as described in [RFC7748]".
// Usually we won't get here at all since Java will throw an InvalidKeyException
// from detecting a key with a low order point. But in case we _do_ get here, fail fast.
if (SideChannelSafe.allZeros(sharedSecret)) {
throw new IllegalArgumentException("Computed shared secret is all zeroes");
}
return sharedSecret;
} catch (NoSuchAlgorithmException | InvalidKeyException e) {
throw new RuntimeException(e);
}
}
|
@Test
void x25519_ecdh_fails_if_shared_secret_is_all_zeros_case_1() {
var alice_priv = xecPrivFromHex("88227494038f2bb811d47805bcdf04a2ac585ada7f2f23389bfd4658f9ddd45e");
var bob_public = xecPubFromHex( "0000000000000000000000000000000000000000000000000000000000000000");
// This actually internally fails with an InvalidKeyException due to small point order
assertThrows(RuntimeException.class, () -> KeyUtils.ecdh(alice_priv, bob_public));
}
|
public static <T> void invokeAll(List<Callable<T>> callables, long timeoutMs)
throws TimeoutException, ExecutionException {
ExecutorService service = Executors.newCachedThreadPool();
try {
invokeAll(service, callables, timeoutMs);
} finally {
service.shutdownNow();
}
}
|
@Test
public void invokeAllHang() throws Exception {
int numTasks = 5;
List<Callable<Void>> tasks = new ArrayList<>();
for (int i = 0; i < numTasks; i++) {
tasks.add(new Callable<Void>() {
@Override
public Void call() throws Exception {
Thread.sleep(10 * Constants.SECOND_MS);
return null;
}
});
}
try {
CommonUtils.invokeAll(tasks, 50);
fail("Expected a timeout exception");
} catch (TimeoutException e) {
// Expected
}
}
|
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest req) {
var now = clock.instant();
var bearerToken = requestBearerToken(req).orElse(null);
if (bearerToken == null) {
log.fine("Missing bearer token");
return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized"));
}
var permission = Permission.getRequiredPermission(req).orElse(null);
if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
var requestTokenHash = requestTokenHash(bearerToken);
var clientIds = new TreeSet<String>();
var permissions = EnumSet.noneOf(Permission.class);
var matchedTokens = new HashSet<TokenVersion>();
for (Client c : allowedClients) {
if (!c.permissions().contains(permission)) continue;
var matchedToken = c.tokens().get(requestTokenHash);
if (matchedToken == null) continue;
var expiration = matchedToken.expiration().orElse(null);
if (expiration != null && now.isAfter(expiration)) continue;
matchedTokens.add(matchedToken);
clientIds.add(c.id());
permissions.addAll(c.permissions());
}
if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
if (matchedTokens.size() > 1) {
log.warning("Multiple tokens matched for request %s"
.formatted(matchedTokens.stream().map(TokenVersion::id).toList()));
return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
}
var matchedToken = matchedTokens.stream().findAny().get();
addAccessLogEntry(req, "token.id", matchedToken.id());
addAccessLogEntry(req, "token.hash", matchedToken.fingerprint().toDelimitedHexString());
addAccessLogEntry(req, "token.exp", matchedToken.expiration().map(Instant::toString).orElse("<none>"));
ClientPrincipal.attachToRequest(req, clientIds, permissions);
return Optional.empty();
}
|
@Test
void supports_handler_with_custom_request_spec() {
// Spec that maps POST as action 'read'
var spec = RequestHandlerSpec.builder()
.withAclMapping(HttpMethodAclMapping.standard()
.override(Method.POST, Action.READ).build())
.build();
var req = FilterTestUtils.newRequestBuilder()
.withMethod(Method.POST)
.withHeader("Authorization", "Bearer " + READ_TOKEN.secretTokenString())
.withAttribute(RequestHandlerSpec.ATTRIBUTE_NAME, spec)
.build();
var responseHandler = new MockResponseHandler();
newFilterWithClientsConfig().filter(req, responseHandler);
assertNull(responseHandler.getResponse());
assertEquals(new ClientPrincipal(Set.of(TOKEN_SEARCH_CLIENT), Set.of(READ)), req.getUserPrincipal());
}
|
public static Field p(String fieldName) {
return SELECT_ALL_FROM_SOURCES_ALL.where(fieldName);
}
|
@Test
void contains_uri() {
String q = Q.p("f1").containsUri("https://test.uri")
.build();
assertEquals(q, "yql=select * from sources * where f1 contains uri(\"https://test.uri\")");
}
|
public static String getUUID() {
UUID id = UUID.randomUUID();
ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
bb.putLong(id.getMostSignificantBits());
bb.putLong(id.getLeastSignificantBits());
return Base64.encodeBase64URLSafeString(bb.array());
}
|
@Test
public void testGetUUID() {
String id1 = Util.getUUID();
String id2 = Util.getUUID();
System.out.println("uuid = " + id1);
System.out.println("uuid = " + id2);
Assert.assertNotEquals(id1, id2);
}
|
@Deprecated
public static MessageType convert(StructType struct, FieldProjectionFilter filter) {
return convert(struct, filter, true, new Configuration());
}
|
@Test
public void testConvertLogicalI32Type() {
LogicalTypeAnnotation timeLogicalType = LogicalTypeAnnotation.timeType(true, TimeUnit.MILLIS);
String fieldName = "timeI32Type";
Short fieldId = 0;
ThriftType timeI32Type = new ThriftType.I32Type();
timeI32Type.setLogicalTypeAnnotation(timeLogicalType);
StructType thriftStruct = buildOneFieldThriftStructType(fieldName, fieldId, timeI32Type);
MessageType actual = ThriftSchemaConvertVisitor.convert(thriftStruct, FieldProjectionFilter.ALL_COLUMNS);
Type expectedParquetField = Types.primitive(PrimitiveTypeName.INT32, Repetition.REQUIRED)
.as(timeLogicalType)
.named(fieldName)
.withId(fieldId);
MessageType expected = buildOneFieldParquetMessage(expectedParquetField);
assertEquals(expected, actual);
}
|
@Override
public TensorProto serialize() {
TensorProto.Builder builder = TensorProto.newBuilder();
builder.setVersion(CURRENT_VERSION);
builder.setClassName(DenseMatrix.class.getName());
DenseTensorProto.Builder dataBuilder = DenseTensorProto.newBuilder();
dataBuilder.addAllDimensions(Arrays.stream(shape).boxed().collect(Collectors.toList()));
ByteBuffer buffer = ByteBuffer.allocate(numElements * 8).order(ByteOrder.LITTLE_ENDIAN);
DoubleBuffer doubleBuffer = buffer.asDoubleBuffer();
for (int i = 0; i < values.length; i ++) {
doubleBuffer.put(values[i]);
}
doubleBuffer.rewind();
dataBuilder.setValues(ByteString.copyFrom(buffer));
builder.setSerializedData(Any.pack(dataBuilder.build()));
return builder.build();
}
|
@Test
public void serializationTest() {
DenseMatrix a = generateA();
TensorProto proto = a.serialize();
Tensor deser = Tensor.deserialize(proto);
assertEquals(a,deser);
}
|
@Override
public BytesInput getBytes() {
// The Page Header should include: blockSizeInValues, numberOfMiniBlocks, totalValueCount
if (deltaValuesToFlush != 0) {
flushBlockBuffer();
}
return BytesInput.concat(
config.toBytesInput(),
BytesInput.fromUnsignedVarInt(totalValueCount),
BytesInput.fromZigZagVarInt(firstValue),
BytesInput.from(baos));
}
|
@Test
public void shouldConsumePageDataInInitialization() throws IOException {
int[] data = new int[2 * blockSize + 3];
for (int i = 0; i < data.length; i++) {
data[i] = i * 32;
}
writeData(data);
reader = new DeltaBinaryPackingValuesReader();
BytesInput bytes = writer.getBytes();
byte[] valueContent = bytes.toByteArray();
byte[] pageContent = new byte[valueContent.length * 10];
int contentOffsetInPage = 33;
System.arraycopy(valueContent, 0, pageContent, contentOffsetInPage, valueContent.length);
// offset should be correct
ByteBufferInputStream stream = ByteBufferInputStream.wrap(ByteBuffer.wrap(pageContent));
stream.skipFully(contentOffsetInPage);
reader.initFromPage(100, stream);
long offset = stream.position();
assertEquals(valueContent.length + contentOffsetInPage, offset);
// should be able to read data correctly
for (int i : data) {
assertEquals(i, reader.readInteger());
}
// Testing the deprecated behavior of using byte arrays directly
reader = new DeltaBinaryPackingValuesReader();
reader.initFromPage(100, pageContent, contentOffsetInPage);
assertEquals(valueContent.length + contentOffsetInPage, reader.getNextOffset());
for (int i : data) {
assertEquals(i, reader.readInteger());
}
}
|
@Override
protected void doProcess(Exchange exchange, MetricsEndpoint endpoint, MetricRegistry registry, String metricsName)
throws Exception {
Message in = exchange.getIn();
MetricsTimerAction action = endpoint.getAction();
MetricsTimerAction finalAction = in.getHeader(HEADER_TIMER_ACTION, action, MetricsTimerAction.class);
if (finalAction == MetricsTimerAction.start) {
handleStart(exchange, registry, metricsName);
} else if (finalAction == MetricsTimerAction.stop) {
handleStop(exchange, metricsName);
} else {
LOG.warn("No action provided for timer \"{}\"", metricsName);
}
}
|
@Test
public void testProcessStop() throws Exception {
when(endpoint.getAction()).thenReturn(MetricsTimerAction.stop);
when(in.getHeader(HEADER_TIMER_ACTION, MetricsTimerAction.stop, MetricsTimerAction.class))
.thenReturn(MetricsTimerAction.stop);
when(exchange.getProperty(PROPERTY_NAME, Timer.Context.class)).thenReturn(context);
producer.doProcess(exchange, endpoint, registry, METRICS_NAME);
inOrder.verify(exchange, times(1)).getIn();
inOrder.verify(endpoint, times(1)).getAction();
inOrder.verify(in, times(1)).getHeader(HEADER_TIMER_ACTION, MetricsTimerAction.stop, MetricsTimerAction.class);
inOrder.verify(exchange, times(1)).getProperty(PROPERTY_NAME, Timer.Context.class);
inOrder.verify(context, times(1)).stop();
inOrder.verify(exchange, times(1)).removeProperty(PROPERTY_NAME);
inOrder.verifyNoMoreInteractions();
}
|
public Future<KafkaVersionChange> reconcile() {
return getPods()
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
}
|
@Test
public void testUpgradeWithAllVersionAndMixedPods(VertxTestContext context) {
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(VERSIONS.defaultVersion().version(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), VERSIONS.defaultVersion().metadataVersion()),
mockRos(mockMixedPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.defaultVersion().version()))
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION)));
assertThat(c.to(), is(VERSIONS.defaultVersion()));
assertThat(c.metadataVersion(), is(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion()));
async.flag();
})));
}
|
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
if (resourceInfo.getResourceMethod().isAnnotationPresent(SupportedSearchVersion.class) ||
resourceInfo.getResourceMethod().isAnnotationPresent(SupportedSearchVersions.class)) {
checkVersion(resourceInfo.getResourceMethod().getAnnotationsByType(SupportedSearchVersion.class));
} else if (resourceInfo.getResourceClass().isAnnotationPresent(SupportedSearchVersion.class) ||
resourceInfo.getResourceClass().isAnnotationPresent(SupportedSearchVersions.class)) {
checkVersion(resourceInfo.getResourceClass().getAnnotationsByType(SupportedSearchVersion.class));
}
}
|
@Test
public void testFilterOnMethod() throws Exception {
final Method resourceMethod = TestResourceWithMethodAnnotation.class.getMethod("methodWithAnnotation");
when(resourceInfo.getResourceMethod()).thenReturn(resourceMethod);
when(versionProvider.get()).thenReturn(openSearchV1);
filter.filter(requestContext);
verify(versionProvider, times(1)).get();
}
|
@Override
public String render(String text) {
if (StringUtils.isBlank(text)) {
return "";
}
if (regex.isEmpty() || link.isEmpty()) {
Comment comment = new Comment();
comment.escapeAndAdd(text);
return comment.render();
}
try {
Matcher matcher = Pattern.compile(regex).matcher(text);
int start = 0;
Comment comment = new Comment();
while (hasMatch(matcher)) {
comment.escapeAndAdd(text.substring(start, matcher.start()));
comment.add(dynamicLink(matcher));
start = matcher.end();
}
comment.escapeAndAdd(text.substring(start));
return comment.render();
} catch (PatternSyntaxException e) {
LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage());
}
return text;
}
|
@Test
public void shouldRenderUsingFixedUrlIfLinkDoesNotContainVariable() throws Exception {
String link = "http://mingle05/projects/cce/cards/wall-E";
String regex = "(evo-\\d+)";
trackingTool = new DefaultCommentRenderer(link, regex);
String result = trackingTool.render("evo-111: checkin message");
assertThat(result,
is("<a href=\"" + "http://mingle05/projects/cce/cards/wall-E\" "
+ "target=\"story_tracker\">evo-111</a>: checkin message"));
}
|
@Override
public ConnectorMetadata getMetadata() {
Optional<IHiveMetastore> hiveMetastore = Optional.empty();
if (isHiveOrGlueCatalogType()) {
MetastoreType metastoreType = MetastoreType.get(catalogType);
HiveMetaClient metaClient = HiveMetaClient.createHiveMetaClient(this.hdfsEnvironment, properties);
hiveMetastore = Optional.of(new HiveMetastore(metaClient, catalogName, metastoreType));
// TODO caching hiveMetastore support
}
return new KuduMetadata(catalogName, hdfsEnvironment, kuduMaster, schemaEmulationEnabled, schemaEmulationPrefix,
hiveMetastore);
}
|
@Test
public void testGetMetadata() {
Map<String, String> properties = new HashMap<>();
properties.put("kudu.master", "localhost:7051");
properties.put("kudu.catalog.type", "kudu");
KuduConnector connector = new KuduConnector(new ConnectorContext("kudu_catalog", "kudu", properties));
ConnectorMetadata metadata = connector.getMetadata();
Assert.assertTrue(metadata instanceof KuduMetadata);
}
|
public boolean cleanExpiredConsumeQueue(final String addr,
long timeoutMillis) throws MQClientException, RemotingConnectException,
RemotingSendRequestException, RemotingTimeoutException, InterruptedException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CLEAN_EXPIRED_CONSUMEQUEUE, null);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return true;
}
default:
break;
}
throw new MQClientException(response.getCode(), response.getRemark());
}
|
@Test
public void assertCleanExpiredConsumeQueue() throws RemotingException, InterruptedException, MQClientException {
mockInvokeSync();
assertTrue(mqClientAPI.cleanExpiredConsumeQueue(defaultBrokerAddr, defaultTimeout));
}
|
public void insertOrUpdateOutputData(OutputData outputData) {
final String outputDataStr = validateAndToJson(outputData);
withMetricLogError(
() ->
withRetryableUpdate(UPSERT_OUTPUT_DATA_QUERY, stmt -> stmt.setString(1, outputDataStr)),
"insertOrUpdateOutputData",
"Failed updating output data: [{}]",
outputDataStr);
}
|
@Test
public void testParamsSizeOverLimit() throws Exception {
ObjectMapper mockMapper = mock(ObjectMapper.class);
OutputDataDao testDao = new OutputDataDao(dataSource, mockMapper, config);
when(mockMapper.writeValueAsString(any()))
.thenReturn(new String(new char[Constants.JSONIFIED_PARAMS_STRING_SIZE_LIMIT + 1]));
AssertHelper.assertThrows(
"Output data size is over limit",
IllegalArgumentException.class,
"Output data's total size [750001] is larger than system param size limit [750000]",
() ->
testDao.insertOrUpdateOutputData(
new OutputData(
JOB_TYPE,
EXT_JOB_ID,
WORKFLOW_ID,
System.currentTimeMillis(),
System.currentTimeMillis(),
params,
new HashMap<>())));
}
|
@Override
public String getExtraOptionsHelpText() {
return "http://docs.aws.amazon.com/redshift/latest/mgmt/configure-jdbc-connection.html";
}
|
@Test
public void testGetExtraOptionsHelpText() throws Exception {
assertEquals( "http://docs.aws.amazon.com/redshift/latest/mgmt/configure-jdbc-connection.html",
dbMeta.getExtraOptionsHelpText() );
}
|
public static Fiat parseFiatInexact(final String currencyCode, final String str) {
try {
long val = new BigDecimal(str).movePointRight(SMALLEST_UNIT_EXPONENT).longValue();
return Fiat.valueOf(currencyCode, val);
} catch (ArithmeticException e) {
throw new IllegalArgumentException(e);
}
}
|
@Test(expected = IllegalArgumentException.class)
public void testParseFiatInexactInvalidAmount() {
Fiat.parseFiatInexact("USD", "33.xx");
}
|
@Override
public void restRequest(RestRequest request, Callback<RestResponse> callback, String routeKey)
{
this.restRequest(request, new RequestContext(), callback, routeKey);
}
|
@Test
public void testRouteLookupClientFuture() throws ExecutionException, InterruptedException
{
RouteLookup routeLookup = new SimpleTestRouteLookup();
final D2Client d2Client = new D2ClientBuilder().setZkHosts("localhost:2121").build();
d2Client.start(new FutureCallback<>());
RouteLookupClient routeLookupClient = new RouteLookupClient(d2Client, routeLookup, "WestCoast");
RestRequest dummyRestRequest = new RestRequestBuilder(URI.create("d2://simple_uri")).build();
Future<RestResponse> future = routeLookupClient.restRequest(dummyRestRequest, "5436");
try
{
future.get();
// the request shouldn't succeed because we haven't set up a server or any service -> cluster -> uri
// mapping; we want it to fail because we can get the service name we tried to get at from the
// ServiceUnavailableException that is thrown.
Assert.fail("Unexpected success, request should have thrown a ServiceUnavailableException");
}
catch (Exception e)
{
String message = e.getMessage();
if (!message.contains("_serviceName=simple_uriWestCoast5436Foo"))
{
Assert.fail("request was not rewritten to point at the d2 service simple_uriWestCoast5436Foo");
}
}
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("list", concurrency);
try {
final String prefix = this.createPrefix(directory);
if(log.isDebugEnabled()) {
log.debug(String.format("List with prefix %s", prefix));
}
final Path bucket = containerService.getContainer(directory);
final AttributedList<Path> objects = new AttributedList<>();
String priorLastKey = null;
String priorLastVersionId = null;
long revision = 0L;
String lastKey = null;
boolean hasDirectoryPlaceholder = bucket.isRoot() || containerService.isContainer(directory);
do {
final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), prefix, String.valueOf(Path.DELIMITER),
new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"),
priorLastKey, priorLastVersionId, false);
// Amazon S3 returns object versions in the order in which they were stored, with the most recently stored returned first.
for(BaseVersionOrDeleteMarker marker : chunk.getItems()) {
final String key = URIEncoder.decode(marker.getKey());
if(new SimplePathPredicate(PathNormalizer.compose(bucket, key)).test(directory)) {
if(log.isDebugEnabled()) {
log.debug(String.format("Skip placeholder key %s", key));
}
hasDirectoryPlaceholder = true;
continue;
}
final PathAttributes attr = new PathAttributes();
attr.setVersionId(marker.getVersionId());
if(!StringUtils.equals(lastKey, key)) {
// Reset revision for next file
revision = 0L;
}
attr.setRevision(++revision);
attr.setDuplicate(marker.isDeleteMarker() && marker.isLatest() || !marker.isLatest());
if(marker.isDeleteMarker()) {
attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, String.valueOf(true)));
}
attr.setModificationDate(marker.getLastModified().getTime());
attr.setRegion(bucket.attributes().getRegion());
if(marker instanceof S3Version) {
final S3Version object = (S3Version) marker;
attr.setSize(object.getSize());
if(StringUtils.isNotBlank(object.getEtag())) {
attr.setETag(StringUtils.remove(object.getEtag(), "\""));
// The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted
// using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is
// not the MD5 of the object data.
attr.setChecksum(Checksum.parse(StringUtils.remove(object.getEtag(), "\"")));
}
if(StringUtils.isNotBlank(object.getStorageClass())) {
attr.setStorageClass(object.getStorageClass());
}
}
final Path f = new Path(directory.isDirectory() ? directory : directory.getParent(),
PathNormalizer.name(key), EnumSet.of(Path.Type.file), attr);
if(metadata) {
f.withAttributes(attributes.find(f));
}
objects.add(f);
lastKey = key;
}
final String[] prefixes = chunk.getCommonPrefixes();
final List<Future<Path>> folders = new ArrayList<>();
for(String common : prefixes) {
if(new SimplePathPredicate(PathNormalizer.compose(bucket, URIEncoder.decode(common))).test(directory)) {
continue;
}
folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(common)));
}
for(Future<Path> f : folders) {
try {
objects.add(Uninterruptibles.getUninterruptibly(f));
}
catch(ExecutionException e) {
log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage()));
for(Throwable cause : ExceptionUtils.getThrowableList(e)) {
Throwables.throwIfInstanceOf(cause, BackgroundException.class);
}
throw new DefaultExceptionMappingService().map(Throwables.getRootCause(e));
}
}
priorLastKey = null != chunk.getNextKeyMarker() ? URIEncoder.decode(chunk.getNextKeyMarker()) : null;
priorLastVersionId = chunk.getNextVersionIdMarker();
listener.chunk(directory, objects);
}
while(priorLastKey != null);
if(!hasDirectoryPlaceholder && objects.isEmpty()) {
// Only for AWS
if(S3Session.isAwsHostname(session.getHost().getHostname())) {
if(StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) {
if(log.isWarnEnabled()) {
log.warn(String.format("No placeholder found for directory %s", directory));
}
throw new NotfoundException(directory.getAbsolute());
}
}
else {
// Handle missing prefix for directory placeholders in Minio
final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(),
String.format("%s%s", this.createPrefix(directory.getParent()), directory.getName()),
String.valueOf(Path.DELIMITER), 1, null, null, false);
if(Arrays.stream(chunk.getCommonPrefixes()).map(URIEncoder::decode).noneMatch(common -> common.equals(prefix))) {
throw new NotfoundException(directory.getAbsolute());
}
}
}
return objects;
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, directory);
}
finally {
// Cancel future tasks
pool.shutdown(false);
}
}
|
@Test
public void testDirectoyPlaceholderNoChildren() throws Exception {
final Path bucket = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final Path directory = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertFalse(isDuplicate(directory, new S3VersionedObjectListService(session, acl).list(bucket, new DisabledListProgressListener())));
// Nullify version to add delete marker
directory.attributes().setVersionId(null);
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertTrue(isDuplicate(directory, new S3VersionedObjectListService(session, acl).list(bucket, new DisabledListProgressListener())));
}
|
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final ReadOnlyKeyValueStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore
.store(QueryableStoreTypes.timestampedKeyValueStore(), partition);
final ValueAndTimestamp<GenericRow> row = store.get(key);
if (row == null) {
return KsMaterializedQueryResult.rowIterator(Collections.emptyIterator());
} else {
return KsMaterializedQueryResult.rowIterator(ImmutableList.of(Row.of(
stateStore.schema(), key, row.value(), row.timestamp())).iterator());
}
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldThrowIfGettingStateStoreFails() {
// Given:
when(stateStore.store(any(), anyInt())).thenThrow(new MaterializationTimeOutException("Boom"));
// When:
final Exception e = assertThrows(
MaterializationException.class,
() -> table.get(A_KEY, PARTITION)
);
// Then:
assertThat(e.getMessage(), containsString(
"Failed to get value from materialized table"));
assertThat(e.getCause(), (instanceOf(MaterializationTimeOutException.class)));
}
|
public static <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor(
Input<T> input) {
boolean canOmitSetKeyContext;
if (input instanceof AbstractStreamOperator) {
canOmitSetKeyContext = canOmitSetKeyContext((AbstractStreamOperator<?>) input, 0);
} else {
canOmitSetKeyContext =
input instanceof KeyContextHandler
&& !((KeyContextHandler) input).hasKeyContext();
}
if (canOmitSetKeyContext) {
return input::processElement;
} else if (input instanceof AsyncStateProcessing
&& ((AsyncStateProcessing) input).isAsyncStateProcessingEnabled()) {
return ((AsyncStateProcessing) input).getRecordProcessor(1);
} else {
return record -> {
input.setKeyContextElement(record);
input.processElement(record);
};
}
}
|
@Test
void testGetRecordProcessor() throws Exception {
TestOperator input1 = new TestOperator();
TestOperator input2 = new TestKeyContextHandlerOperator(true);
TestOperator input3 = new TestKeyContextHandlerOperator(false);
RecordProcessorUtils.getRecordProcessor(input1).accept(new StreamRecord<>("test"));
assertThat(input1.setKeyContextElementCalled).isTrue();
assertThat(input1.processElementCalled).isTrue();
RecordProcessorUtils.getRecordProcessor(input2).accept(new StreamRecord<>("test"));
assertThat(input2.setKeyContextElementCalled).isTrue();
assertThat(input2.processElementCalled).isTrue();
RecordProcessorUtils.getRecordProcessor(input3).accept(new StreamRecord<>("test"));
assertThat(input3.setKeyContextElementCalled).isFalse();
assertThat(input3.processElementCalled).isTrue();
}
|
@Override
public long queryOffset(final String group, final String topic, final int queueId) {
if (!MixAll.isLmq(group)) {
return super.queryOffset(group, topic, queueId);
}
// topic@group
String key = topic + TOPIC_GROUP_SEPARATOR + group;
Long offset = lmqOffsetTable.get(key);
if (offset != null) {
return offset;
}
return -1;
}
|
@Test
public void testQueryOffsetForLmqGroupWithoutExistingOffset() {
// Act
Map<Integer, Long> actualOffsets = offsetManager.queryOffset(LMQ_GROUP, "nonExistingTopic");
// Assert
assertNotNull(actualOffsets);
assertTrue("The map should be empty for non-existing offsets", actualOffsets.isEmpty());
}
|
public static DataChecksum newDataChecksum(Type type, int bytesPerChecksum ) {
if ( bytesPerChecksum <= 0 ) {
return null;
}
switch ( type ) {
case NULL :
return new DataChecksum(type, new ChecksumNull(), bytesPerChecksum );
case CRC32 :
return new DataChecksum(type, newCrc32(), bytesPerChecksum );
case CRC32C:
return new DataChecksum(type, newCrc32C(), bytesPerChecksum);
default:
return null;
}
}
|
@Test
public void testBulkOps() throws Exception {
for (DataChecksum.Type type : CHECKSUM_TYPES) {
System.err.println(
"---- beginning tests with checksum type " + type + "----");
DataChecksum checksum = DataChecksum.newDataChecksum(
type, BYTES_PER_CHUNK);
for (boolean useDirect : new boolean[]{false, true}) {
doBulkTest(checksum, 1023, useDirect);
doBulkTest(checksum, 1024, useDirect);
doBulkTest(checksum, 1025, useDirect);
}
}
}
|
@Nonnull
public MappingResults applyToPrimaryResource(@Nonnull Mappings mappings) {
mappings = enrich(mappings);
WorkspaceResource resource = workspace.getPrimaryResource();
MappingResults results = new MappingResults(mappings, listeners.createBundledMappingApplicationListener())
.withAggregateManager(aggregateMappingManager);
// Apply mappings to all classes in the primary resource, collecting into the results model.
Mappings finalMappings = mappings;
ExecutorService service = ThreadUtil.phasingService(applierThreadPool);
Stream.concat(resource.jvmClassBundleStream(), resource.versionedJvmClassBundleStream()).forEach(bundle -> {
bundle.forEach(classInfo -> {
service.execute(() -> dumpIntoResults(results, workspace, resource, bundle, classInfo, finalMappings));
});
});
ThreadUtil.blockUntilComplete(service);
// Yield results
return results;
}
|
@Test
void applyClassWithAnnotation() {
String annotationName = AnnotationImpl.class.getName().replace('.', '/');
String classWithAnnotationName = ClassWithAnnotation.class.getName().replace('.', '/');
// Create mappings for all classes but the target 'ClassWithAnnotation'
Mappings mappings = mappingGenerator.generate(workspace, resource, inheritanceGraph, nameGenerator, new NameGeneratorFilter(null, true) {
@Override
public boolean shouldMapClass(@Nonnull ClassInfo info) {
return !info.getName().equals(classWithAnnotationName);
}
});
// Preview the mapping operation
MappingResults results = mappingApplier.applyToPrimaryResource(mappings);
// The annotation class we define should be remapped.
// The user class (ClassWithAnnotation) itself should not be remapped,
// but its annotation usage should be updated.
String mappedAnnotationName = mappings.getMappedClassName(annotationName);
assertNotNull(mappedAnnotationName, "AnnotationImpl should be remapped");
assertNull(mappings.getMappedClassName(classWithAnnotationName), "ClassWithAnnotation should not be remapped");
assertTrue(results.wasMapped(annotationName), "AnnotationImpl should have updated");
assertTrue(results.wasMapped(classWithAnnotationName), "ClassWithAnnotation should have updated");
// Assert aggregate updated too.
results.apply();
AggregatedMappings aggregatedMappings = aggregateMappingManager.getAggregatedMappings();
assertNotNull(aggregatedMappings.getMappedClassName(annotationName),
"AnnotationImpl should be tracked in aggregate");
// Get the names of the annotation's mapped attribute methods
String annoValueName = mappings.getMappedMethodName(annotationName, "value", "()Ljava/lang/String;");
String annoPolicyName = mappings.getMappedMethodName(annotationName, "policy", "()Ljava/lang/annotation/Retention;");
// Assert the user class has the correct new values
ClassPathNode classPath = results.getPostMappingPath(classWithAnnotationName);
assertNotNull(classPath, "Could not find: " + classWithAnnotationName);
JvmClassInfo classWithAnnotation = classPath.getValue().asJvmClass();
AnnotationInfo annotationInfo = classWithAnnotation.getAnnotations().get(0);
assertEquals("L" + mappedAnnotationName + ";", annotationInfo.getDescriptor(),
"AnnotationImpl not remapped in ClassWithAnnotation");
AnnotationElement valueElement = annotationInfo.getElements().get(annoValueName);
AnnotationElement policyElement = annotationInfo.getElements().get(annoPolicyName);
assertNotNull(valueElement, "Missing mapped value element");
assertNotNull(policyElement, "Missing mapped policy element");
}
|
public long next() {
final long duration;
if (mDurations == null) {
duration = mDuration;
} else {
duration = mDurations[mIndex];
if (mIndex < mDurations.length - 1) {
mIndex++;
}
}
mNextTime = now() + duration;
return duration;
}
|
@Test
public void testDelayOne() {
final long millis = 5000;
final Delay delay = new Delay(millis);
for (int i = 0; i < 5; i++) {
check(delay, millis);
final long next = delay.next();
Assert.assertEquals(millis, next);
}
}
|
@Override
public void setConf(Configuration conf) {
this.conf = conf;
uid = conf.getInt(UID, 0);
user = conf.get(USER);
if (null == user) {
try {
user = UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException e) {
user = "hadoop";
}
}
gid = conf.getInt(GID, 1);
group = conf.get(GROUP);
if (null == group) {
group = user;
}
resetUGInfo();
addUser(user, uid);
addGroup(group, gid);
}
|
@Test
public void testDefault() {
String user;
try {
user = UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException e) {
user = "hadoop";
}
Configuration conf = new Configuration(false);
ugi.setConf(conf);
Map<Integer, String> ids = ugi.ugiMap();
assertEquals(2, ids.size());
assertEquals(user, ids.get(0));
assertEquals(user, ids.get(1));
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStartBounds,
final Range<Instant> windowEndBounds,
final Optional<Position> position
) {
try {
final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore
.store(QueryableStoreTypes.timestampedWindowStore(), partition);
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it
= cacheBypassFetcher.fetch(store, key, lower, upper)) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next();
final Instant windowStart = Instant.ofEpochMilli(next.key);
if (!windowStartBounds.contains(windowStart)) {
continue;
}
final Instant windowEnd = windowStart.plus(windowSize);
if (!windowEndBounds.contains(windowEnd)) {
continue;
}
final TimeWindow window =
new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
new Windowed<>(key, window),
next.value.value(),
next.value.timestamp()
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIterator(builder.build().iterator());
}
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldFetchWithOnlyStartBounds() {
// When:
table.get(A_KEY, PARTITION, WINDOW_START_BOUNDS, Range.all());
// Then:
verify(cacheBypassFetcher).fetch(
eq(tableStore),
any(),
eq(WINDOW_START_BOUNDS.lowerEndpoint()),
eq(WINDOW_START_BOUNDS.upperEndpoint())
);
}
|
@Override
public <T> T loadObject(String accountName, ObjectType objectType, String objectKey)
throws IllegalArgumentException, NotFoundException {
if (objectType.equals(ObjectType.CANARY_RESULT_ARCHIVE)) {
var record =
sqlCanaryArchiveRepo
.findById(objectKey)
.orElseThrow(() -> new NotFoundException("Not found object for id: " + objectKey));
return mapToObject(record.getContent(), objectType);
}
if (objectType.equals(ObjectType.CANARY_CONFIG)) {
var record =
sqlCanaryConfigRepo
.findById(objectKey)
.orElseThrow(() -> new NotFoundException("Not found object for id: " + objectKey));
return mapToObject(record.getContent(), objectType);
}
if (objectType.equals(ObjectType.METRIC_SET_PAIR_LIST)) {
var record =
sqlMetricSetPairsRepo
.findById(objectKey)
.orElseThrow(() -> new NotFoundException("Not found object for id: " + objectKey));
return mapToObject(record.getContent(), objectType);
}
if (objectType.equals(ObjectType.METRIC_SET_LIST)) {
var record =
sqlMetricSetsRepo
.findById(objectKey)
.orElseThrow(() -> new NotFoundException("Not found object for id: " + objectKey));
return mapToObject(record.getContent(), objectType);
}
throw new IllegalArgumentException("Unsupported object type: " + objectType);
}
|
@Test
public void testLoadObjectWhenMetricSetPairs() throws IOException {
var testAccountName = UUID.randomUUID().toString();
var testObjectType = ObjectType.METRIC_SET_PAIR_LIST;
var testObjectKey = UUID.randomUUID().toString();
var testMetricSetPair = createTestMetricSetPair();
var testSqlMetricSetPairs = createTestSqlMetricSetPairs(testMetricSetPair);
testSqlMetricSetPairs.setId(testObjectKey);
when(sqlMetricSetPairsRepo.findById(testObjectKey))
.thenReturn(Optional.of(testSqlMetricSetPairs));
var metricSetPairs =
(List<MetricSetPair>)
sqlStorageService.loadObject(testAccountName, testObjectType, testObjectKey);
assertNotNull(metricSetPairs);
assertEquals(metricSetPairs.size(), 1);
var metricSetPair = metricSetPairs.get(0);
assertEquals(metricSetPair.getId(), testMetricSetPair.getId());
assertEquals(metricSetPair.getName(), testMetricSetPair.getName());
assertEquals(metricSetPair.getTags(), testMetricSetPair.getTags());
assertEquals(metricSetPair.getValues(), testMetricSetPair.getValues());
assertEquals(metricSetPair.getAttributes(), testMetricSetPair.getAttributes());
assertEquals(metricSetPair.getScopes(), testMetricSetPair.getScopes());
}
|
@Override
public Stream<MappingField> resolveAndValidateFields(
boolean isKey,
List<MappingField> userFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
if (userFields.isEmpty()) {
throw QueryException.error("Column list is required for JSON format");
}
return extractFields(userFields, isKey).entrySet().stream()
.map(entry -> {
QueryPath path = entry.getKey();
if (path.isTopLevel()) {
throw QueryException.error("Cannot use '" + path + "' field with JSON serialization");
}
return entry.getValue();
});
}
|
@Test
@Parameters({
"true, __key",
"false, this"
})
public void test_resolveFields(boolean key, String prefix) {
Stream<MappingField> fields = INSTANCE.resolveAndValidateFields(
key,
singletonList(field("field", QueryDataType.INT, prefix + ".field")),
emptyMap(),
null
);
assertThat(fields).containsExactly(field("field", QueryDataType.INT, prefix + ".field"));
}
|
@Nullable
AuthTemplate getAuthFor(String registry) {
Map.Entry<String, AuthTemplate> authEntry =
findFirstInMapByKey(dockerConfigTemplate.getAuths(), getRegistryMatchersFor(registry));
return authEntry != null ? authEntry.getValue() : null;
}
|
@Test
public void testGetAuthFor_orderOfMatchPreference() throws URISyntaxException, IOException {
Path json =
Paths.get(Resources.getResource("core/json/dockerconfig_extra_matches.json").toURI());
DockerConfig dockerConfig =
new DockerConfig(JsonTemplateMapper.readJsonFromFile(json, DockerConfigTemplate.class));
Assert.assertEquals(
"my-registry: exact match", dockerConfig.getAuthFor("my-registry").getAuth());
Assert.assertEquals(
"cool-registry: with https", dockerConfig.getAuthFor("cool-registry").getAuth());
Assert.assertEquals(
"awesome-registry: starting with name",
dockerConfig.getAuthFor("awesome-registry").getAuth());
Assert.assertEquals(
"dull-registry: starting with name and with https",
dockerConfig.getAuthFor("dull-registry").getAuth());
}
|
public static void reset() {
settings = null;
}
|
@Test public void
reset_sets_static_json_schema_validator_settings_to_null() {
// Given
JsonSchemaValidator.settings = new JsonSchemaValidatorSettings();
// When
JsonSchemaValidator.reset();
// Then
try {
assertThat(JsonSchemaValidator.settings, nullValue());
} finally {
JsonSchemaValidator.settings = null;
}
}
|
public static Stream<String> splitAsStream(CharSequence text, String regex) {
if (text == null || regex == null) {
return Stream.empty();
}
return Pattern.compile(regex).splitAsStream(text);
}
|
@Test
public void testSplitAsStream() {
List<String> items = StringHelper.splitAsStream("a,b,c", ",").toList();
assertTrue(items.contains("a"));
assertTrue(items.contains("b"));
assertTrue(items.contains("c"));
}
|
private void rewrapDataSource(String jndiName, DataSource dataSource)
throws IllegalAccessException {
final String dataSourceClassName = dataSource.getClass().getName();
LOG.debug("Datasource needs rewrap: " + jndiName + " of class " + dataSourceClassName);
final String dataSourceRewrappedMessage = "Datasource rewrapped: " + jndiName;
if (isJBossOrGlassfishDataSource(dataSourceClassName)) {
// JBOSS: le rebind de la datasource dans le JNDI JBoss est possible mais ne
// fonctionne pas (car tous les lookup renverraient alors une instance de
// MarshalledValuePair ou une instance javax.naming.Reference selon comment cela
// est fait), donc on modifie directement l'instance de WrapperDataSource déjà
// présente dans le JNDI.
// GLASSFISH: le contexte JNDI commençant par "java:" est en lecture seule
// dans glassfish (comme dit dans la spec et comme implémenté dans
// http://kickjava.com/src/com/sun/enterprise/naming/java/javaURLContext.java.htm),
// donc on modifie directement l'instance de DataSource40 déjà présente dans le
// JNDI.
// Par "chance", la classe org.jboss.resource.adapter.jdbc.WrapperDataSource et
// la super-classe de com.sun.gjc.spi.jdbc40.DataSource40 contiennent toutes les
// deux un attribut de nom "cm" et de type javax.resource.spi.ConnectionManager
// dont on veut faire un proxy.
Object javaxConnectionManager = JdbcWrapperHelper.getFieldValue(dataSource, "cm");
javaxConnectionManager = createJavaxConnectionManagerProxy(javaxConnectionManager);
JdbcWrapperHelper.setFieldValue(dataSource, "cm", javaxConnectionManager);
LOG.debug(dataSourceRewrappedMessage);
} else if (isWildfly9DataSource(dataSourceClassName)) {
Object delegateDataSource = JdbcWrapperHelper.getFieldValue(dataSource, "delegate");
delegateDataSource = createDataSourceProxy((DataSource) delegateDataSource);
JdbcWrapperHelper.setFieldValue(dataSource, "delegate", delegateDataSource);
LOG.debug(dataSourceRewrappedMessage);
} else if (weblogic
&& "weblogic.jdbc.common.internal.RmiDataSource".equals(dataSourceClassName)) {
// WEBLOGIC: le contexte JNDI est en lecture seule donc on modifie directement
// l'instance de RmiDataSource déjà présente dans le JNDI.
rewrapWebLogicDataSource(dataSource);
LOG.debug(dataSourceRewrappedMessage);
} else if (isDbcpDataSource(dataSourceClassName)) {
// JIRA dans Tomcat: la dataSource a déjà été mise en cache par org.ofbiz.core.entity.transaction.JNDIFactory
// à l'initialisation de com.atlassian.jira.startup.JiraStartupChecklistContextListener
// donc on modifie directement l'instance de BasicDataSource déjà présente dans le JNDI.
// Et dans certains JIRA la datasource est bien une instance de org.apache.commons.dbcp.BasicDataSource
// cf http://groups.google.com/group/javamelody/browse_thread/thread/da8336b908f1e3bd/6cf3048f1f11866e?show_docid=6cf3048f1f11866e
// et aussi rewrap pour tomee/openejb (cf issue 104),
rewrapBasicDataSource(dataSource);
LOG.debug(dataSourceRewrappedMessage);
} else if ("org.apache.openejb.resource.jdbc.managed.local.ManagedDataSource"
.equals(dataSourceClassName)) {
// rewrap pour tomee/openejb plus récents (cf issue 104),
rewrapTomEEDataSource(dataSource);
LOG.debug(dataSourceRewrappedMessage);
} else {
LOG.info("Datasource can't be rewrapped: " + jndiName + " of class "
+ dataSourceClassName);
}
}
|
@Test
public void testRewrapDataSource() throws Exception {
final org.apache.tomcat.dbcp.dbcp2.BasicDataSource tomcat2DataSource = new org.apache.tomcat.dbcp.dbcp2.BasicDataSource();
tomcat2DataSource.setUrl(H2_DATABASE_URL);
rewrapDataSource(tomcat2DataSource);
final org.apache.commons.dbcp2.BasicDataSource dbcp2DataSource = new org.apache.commons.dbcp2.BasicDataSource();
dbcp2DataSource.setUrl(H2_DATABASE_URL);
rewrapDataSource(dbcp2DataSource);
final DataSource dataSource = createNiceMock(DataSource.class);
rewrapDataSource(dataSource);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.