focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static <E extends SCRIPT> E setScriptSrc(E e, String src) { if (src.endsWith(".js")) { e.$type("text/javascript"); // required in html4 } e.$src(src); return e; }
@Test void testSetScriptSrc() { SCRIPT script = mock(SCRIPT.class); HamletImpl.setScriptSrc(script, "uri"); HamletImpl.setScriptSrc(script, "script.js"); verify(script).$src("uri"); verify(script).$type("text/javascript"); verify(script).$src("script.js"); verifyNoMoreInteractions(script); }
public static String capitalize(String str) { return changeFirstCharacterCase(str); }
@Test void testCapitalize() { // Test for an empty string String str1 = ""; assertEquals("", StringUtils.capitalize(str1)); // Test for a single word string String str2 = "hello"; assertEquals("Hello", StringUtils.capitalize(str2)); // Test for a multiple word string String str3 = "hello world"; assertEquals("Hello world", StringUtils.capitalize(str3)); // Test for a string with special characters String str4 = "!@#$%^&*()"; assertEquals("!@#$%^&*()", StringUtils.capitalize(str4)); // Test for a string with numbers String str5 = "abc123"; assertEquals("Abc123", StringUtils.capitalize(str5)); }
public void promoteLeader() { Worker leader = null; if (!workers.isEmpty()) { leader = workers.get(0); } this.leader = leader; }
@Test void testPromoteLeader() { var taskSet = new TaskSet(); var taskHandler = new TaskHandler(); var workCenter = new WorkCenter(); workCenter.createWorkers(5, taskSet, taskHandler); workCenter.removeWorker(workCenter.getLeader()); workCenter.promoteLeader(); assertEquals(4, workCenter.getWorkers().size()); assertEquals(workCenter.getWorkers().get(0), workCenter.getLeader()); }
@Override public boolean filterPath(Path filePath) { if (getIncludeMatchers().isEmpty() && getExcludeMatchers().isEmpty()) { return false; } // compensate for the fact that Flink paths are slashed final String path = filePath.hasWindowsDrive() ? filePath.getPath().substring(1) : filePath.getPath(); final java.nio.file.Path nioPath = Paths.get(path); for (PathMatcher matcher : getIncludeMatchers()) { if (matcher.matches(nioPath)) { return shouldExclude(nioPath); } } return true; }
@Test void testExcludeFilesIfMatchesExclude() { GlobFilePathFilter matcher = new GlobFilePathFilter( Collections.singletonList("dir/*"), Collections.singletonList("dir/file.txt")); assertThat(matcher.filterPath(new Path("dir/file.txt"))).isTrue(); }
public Input<DefaultIssue> create(Component component) { return new RawLazyInput(component); }
@Test void load_issues_from_report() { RuleKey ruleKey = RuleKey.of("java", "S001"); markRuleAsActive(ruleKey); registerRule(ruleKey, "name", r -> r.addDefaultImpact(MAINTAINABILITY, LOW)); ScannerReport.Issue reportIssue = ScannerReport.Issue.newBuilder() .setTextRange(newTextRange(2)) .setMsg("the message") .addMsgFormatting(ScannerReport.MessageFormatting.newBuilder().setStart(0).setEnd(3).setType(CODE).build()) .addOverridenImpacts(ScannerReport.Impact.newBuilder() .setSoftwareQuality(MAINTAINABILITY.name()) .setSeverity(org.sonar.api.issue.impact.Severity.HIGH.name()) .build()) .setRuleRepository(ruleKey.repository()) .setRuleKey(ruleKey.rule()) .setSeverity(Constants.Severity.BLOCKER) .setGap(3.14) .setQuickFixAvailable(true) .build(); reportReader.putIssues(FILE.getReportAttributes().getRef(), singletonList(reportIssue)); Input<DefaultIssue> input = underTest.create(FILE); Collection<DefaultIssue> issues = input.getIssues(); assertThat(issues).hasSize(1); DefaultIssue issue = Iterators.getOnlyElement(issues.iterator()); // fields set by analysis report assertThat(issue.ruleKey()).isEqualTo(ruleKey); assertThat(issue.severity()).isEqualTo(Severity.BLOCKER); assertThat(issue.line()).isEqualTo(2); assertThat(issue.gap()).isEqualTo(3.14); assertThat(issue.message()).isEqualTo("the message"); assertThat(issue.isQuickFixAvailable()).isTrue(); // Check message formatting DbIssues.MessageFormattings messageFormattings = Iterators.getOnlyElement(issues.iterator()).getMessageFormattings(); assertThat(messageFormattings.getMessageFormattingCount()).isEqualTo(1); assertThat(messageFormattings.getMessageFormatting(0).getStart()).isZero(); assertThat(messageFormattings.getMessageFormatting(0).getEnd()).isEqualTo(3); assertThat(messageFormattings.getMessageFormatting(0).getType()).isEqualTo(DbIssues.MessageFormattingType.CODE); // fields set by compute engine assertThat(issue.checksum()).isEqualTo(input.getLineHashSequence().getHashForLine(2)); assertThat(issue.tags()).isEmpty(); assertInitializedIssue(issue); assertThat(issue.effort()).isNull(); assertThat(issue.getRuleDescriptionContextKey()).isEmpty(); assertThat(issue.impacts()).containsExactlyEntriesOf(Map.of(MAINTAINABILITY, org.sonar.api.issue.impact.Severity.HIGH)); }
public static Subject.Factory<Re2jStringSubject, String> re2jString() { return Re2jStringSubject.FACTORY; }
@Test public void doesNotContainMatch_string_succeeds() { assertAbout(re2jString()).that("hello cruel world").doesNotContainMatch(PATTERN_STR); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { if(directory.isRoot()) { return new AttributedList<Path>(Collections.singletonList( new MantaAccountHomeInfo(session.getHost().getCredentials().getUsername(), session.getHost().getDefaultPath()).getNormalizedHomePath())); } final AttributedList<Path> children = new AttributedList<>(); final Iterator<MantaObject> objectsIter; try { objectsIter = session.getClient().listObjects(directory.getAbsolute()).iterator(); } catch(MantaObjectException e) { throw new MantaExceptionMappingService().map("Listing directory {0} failed", e, directory); } catch(MantaClientHttpResponseException e) { throw new MantaHttpExceptionMappingService().map("Listing directory {0} failed", e, directory); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Listing directory {0} failed", e); } final MantaObjectAttributeAdapter adapter = new MantaObjectAttributeAdapter(session); while(objectsIter.hasNext()) { MantaObject o = objectsIter.next(); final Path file = new Path(directory, PathNormalizer.name(o.getPath()), EnumSet.of(o.isDirectory() ? Path.Type.directory : Path.Type.file), adapter.toAttributes(o) ); children.add(file); listener.chunk(directory, children); } return children; }
@Test public void testListEmptyFolder() throws Exception { final Path folder = new MantaDirectoryFeature(session).mkdir(new Path( testPathPrefix, UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new MantaListService(session).list(folder, new DisabledListProgressListener()).isEmpty()); new MantaDeleteFeature(session).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public File getDatabaseFile(String filename) { File dbFile = null; if (filename != null && filename.trim().length() > 0) { dbFile = new File(filename); } if (dbFile == null || dbFile.isDirectory()) { dbFile = new File(new AndroidContextUtil().getDatabasePath("logback.db")); } return dbFile; }
@Test public void nullFilenameResultsInDefault() throws IOException { final File file = appender.getDatabaseFile(null); assertThat(file, is(notNullValue())); assertThat(file.getName(), is("logback.db")); }
public List<Modification> modifications(ConsoleResult result) { List<Modification> modifications = new ArrayList<>(); for (String change : result.output()) { if (!StringUtils.isBlank(change)) { String description = ""; try { long revision = revisionFromChange(change); description = p4Client.describe(revision); modifications.add(modificationFromDescription(description, result)); } catch (P4OutputParseException e) { LOG.error("Error parsing changes for {}", this); LOG.error("---- change ---------"); LOG.error(result.replaceSecretInfo(change)); LOG.error("---- description ----"); LOG.error(result.replaceSecretInfo(description)); LOG.error("---------------------"); } catch (RuntimeException e) { throw (RuntimeException) result.smudgedException(e); } } } return modifications; }
@Test void shouldIgnoreEmptyLinesInChanges() { final String output = """ Change 539921 on 2008/09/24 by abc@SomeRefinery_abc_sa1-sgr-xyz-001 'more work in progress on ABC un' Change 539920 on 2008/09/24 by abc@SomeRefinery_abc_sa1-sgr-xyz-001 'Fixed pipeline for abc-new-sale' Change 539919 on 2008/09/24 by james@SomeRefinery_def_sa1-sgr-xyz-001 'Assignment stage to send ok and' Change 539918 on 2008/09/24 by abc@SomeRefinery_abc_sa1-sgr-xyz-001 'More refactoring and code clean' Change 539917 on 2008/09/24 by thomas@tom_ws_stuff 'added client name for Arza SW' Change 539916 on 2008/09/24 by alex@SA2-COUNT-LAX-001 'sending the context further ' Change 539915 on 2008/09/24 by ellen@ellen-box-1 'TT 678 cause:Old code try to' Change 539914 on 2008/09/24 by valerie@ExcelSheets_New 'update sheet comments ' Change 539913 on 2008/09/24 by perforce@SOM-NAME-HERE-HOST-1 'apply new version numbers via A' Change 539912 on 2008/09/24 by lance@lance-box-1 'Corrected a typo. ' Change 539911 on 2008/09/24 by lance@lance-box-1 'corrected a typo. ' Change 539910 on 2008/09/24 by josh@josh_box_1 'Changes to remove hacks I had m' Change 539909 on 2008/09/24 by thomas@tom_ws_stuff 'Added Arza SW to add request ' Change 539908 on 2008/09/24 by padma@Padma '1. Fix #2644 : When the FSOi' Change 539907 on 2008/09/24 by berlin@Dans_Box 'Added GetDocumentMetadataForEdi' Change 539906 on 2008/09/24 by lance@lance-box-1 'Added detail aboutPFP. ' Change 539904 on 2008/09/24 by lance@lance-box-1 'Added a detail about PFP. ' Change 539903 on 2008/09/24 by nitya@docs_box 'Updated for lam, am 20080923' Change 539902 on 2008/09/24 by abc@SomeRefinery_abc_sa1-sgr-xyz-001 'Work in progress ' Change 539901 on 2008/09/24 by anil@anil-box-1 'Added all columns of AA_TASK to' """; when(p4Client.describe(any(Long.class))).thenReturn(""" Change 539921 by abc@SomeRefinery_abc_sa1-sgr-xyz-001 on 2008/09/24 12:10:00 \tmore work in progress on ABC unit test Affected files ... """); List<Modification> modifications = parser.modifications(new ConsoleResult(0, List.of(output.split("\n")), new ArrayList<>(), new ArrayList<>(), new ArrayList<>())); assertThat(modifications.size()).isEqualTo(20); }
public static LogExceptionBehaviourInterface getExceptionStrategy( LogTableCoreInterface table ) { return getExceptionStrategy( table, null ); }
@Test public void testGetExceptionStrategyWithExceptionPropSetY() { System.setProperty( DatabaseLogExceptionFactory.KETTLE_GLOBAL_PROP_NAME, PROPERTY_VALUE_TRUE ); LogExceptionBehaviourInterface exceptionStrategy = DatabaseLogExceptionFactory.getExceptionStrategy( logTable, new Exception() ); String strategyName = exceptionStrategy.getClass().getName(); assertEquals( THROWABLE, strategyName ); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseSamsungPhoneWithNativeBrowserTest() { final String uaString = "Dalvik/2.1.0 (Linux; U; Android 9; SM-G950U Build/PPR1.180610.011)"; final UserAgent ua = UserAgentUtil.parse(uaString); assertEquals("Android Browser", ua.getBrowser().toString()); assertNull(ua.getVersion()); assertEquals("Unknown", ua.getEngine().toString()); assertNull(ua.getEngineVersion()); assertEquals("Android", ua.getOs().toString()); assertEquals("9", ua.getOsVersion()); assertEquals("Android", ua.getPlatform().toString()); assertTrue(ua.isMobile()); }
public void makeReadOnly() { key.data().makeReadOnly(); if (params != null) { params.data().makeReadOnly(); } }
@Test public void testReadOnlyWithNullParams() { DataMap keyDataMap = new DataMap(); keyDataMap.put("key", "key-value"); EmptyRecord key = new EmptyRecord(keyDataMap); ComplexResourceKey<EmptyRecord, EmptyRecord> complexResourceKey = new ComplexResourceKey<>(key, null); complexResourceKey.makeReadOnly(); try { key.data().put("key", "new key value"); Assert.fail("Should not be able to update the key after the ComplexResourceKey has been made read only!"); } catch (UnsupportedOperationException e) { } }
public Integer doCall() throws Exception { if (name == null && label == null && filePath == null) { printer().println("Name or label selector must be set"); return 1; } if (label == null) { String projectName; if (name != null) { projectName = KubernetesHelper.sanitize(name); } else { projectName = KubernetesHelper.sanitize(FileUtil.onlyName(SourceScheme.onlyName(filePath))); } label = "%s=%s".formatted(BaseTrait.INTEGRATION_LABEL, projectName); } String[] parts = label.split("=", 2); if (parts.length != 2) { printer().println("--label selector must be in syntax: key=value"); } boolean shouldResume = true; int resumeCount = 0; while (shouldResume) { shouldResume = watchLogs(parts[0], parts[1], container, resumeCount); resumeCount++; } return 0; }
@Test public void shouldHandlePodNotFound() throws Exception { PodLogs command = createCommand(); command.name = "mickey-mouse"; command.maxWaitAttempts = 2; // total timeout of 4 seconds command.doCall(); Assertions.assertTrue( printer.getOutput().contains("Pod for label camel.apache.org/integration=mickey-mouse not available")); }
protected String readEncodingAndString(int max) throws IOException { byte encoding = readByte(); return readEncodedString(encoding, max - 1); }
@Test public void testReadMultipleStrings() throws IOException { byte[] data = { ID3Reader.ENCODING_ISO, 'F', 'o', 'o', 0, // Null-terminated ID3Reader.ENCODING_ISO, 'B', 'a', 'r', 0 // Null-terminated }; CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(data)); ID3Reader reader = new ID3Reader(inputStream); assertEquals("Foo", reader.readEncodingAndString(1000)); assertEquals("Bar", reader.readEncodingAndString(1000)); }
private void handleDecision( @SuppressWarnings("OptionalUsedAsFieldOrParameterType") Optional<Decision> decisionOpt) { Decision decision = decisionOpt.orElseGet( () -> callWithLock(() -> spillStrategy.decideActionWithGlobalInfo(this))); if (!decision.getBufferToSpill().isEmpty()) { spillBuffers(decision.getBufferToSpill()); } if (!decision.getBufferToRelease().isEmpty()) { releaseBuffers(decision.getBufferToRelease()); } }
@Test void testHandleDecision() throws Exception { final int targetSubpartition = 0; final int numFinishedBufferToTriggerDecision = 4; List<BufferIndexAndChannel> toSpill = HybridShuffleTestUtils.createBufferIndexAndChannelsList( targetSubpartition, 0, 1, 2); List<BufferIndexAndChannel> toRelease = HybridShuffleTestUtils.createBufferIndexAndChannelsList(targetSubpartition, 2, 3); HsSpillingStrategy spillingStrategy = TestingSpillingStrategy.builder() .setOnBufferFinishedFunction( (numFinishedBuffers, poolSize) -> { if (numFinishedBuffers < numFinishedBufferToTriggerDecision) { return Optional.of(Decision.NO_ACTION); } return Optional.of( Decision.builder() .addBufferToSpill(targetSubpartition, toSpill) .addBufferToRelease( targetSubpartition, toRelease) .build()); }) .build(); CompletableFuture<List<SpilledBuffer>> spilledFuture = new CompletableFuture<>(); CompletableFuture<Integer> readableFuture = new CompletableFuture<>(); TestingFileDataIndex dataIndex = TestingFileDataIndex.builder() .setAddBuffersConsumer(spilledFuture::complete) .setMarkBufferReadableConsumer( (subpartitionId, bufferIndex) -> readableFuture.complete(bufferIndex)) .build(); HsMemoryDataManager memoryDataManager = createMemoryDataManager(spillingStrategy, dataIndex); for (int i = 0; i < 4; i++) { memoryDataManager.append( createRecord(i), targetSubpartition, Buffer.DataType.DATA_BUFFER); } assertThatFuture(spilledFuture).eventuallySucceeds(); assertThatFuture(readableFuture).eventuallySucceeds(); assertThat(readableFuture).isCompletedWithValue(2); assertThat(memoryDataManager.getNumTotalUnSpillBuffers()).isEqualTo(1); }
@Override public V getOrDefault(Object key, V defaultValue) { return underlying().getOrDefault(key, defaultValue); }
@Test public void testDelegationOfGetOrDefault() { new PCollectionsHashMapWrapperDelegationChecker<>() .defineMockConfigurationForFunctionInvocation(mock -> mock.getOrDefault(eq(this), eq(this)), this) .defineWrapperFunctionInvocationAndMockReturnValueTransformation(wrapper -> wrapper.getOrDefault(this, this), identity()) .doFunctionDelegationCheck(); }
@Nullable static ProxyProvider createFrom(Properties properties) { Objects.requireNonNull(properties, "properties"); if (properties.containsKey(HTTP_PROXY_HOST) || properties.containsKey(HTTPS_PROXY_HOST)) { return createHttpProxyFrom(properties); } if (properties.containsKey(SOCKS_PROXY_HOST)) { return createSocksProxyFrom(properties); } return null; }
@Test void proxyFromSystemProperties_npeWhenHttpsProxyUsernameIsSetButNotPassword() { Properties properties = new Properties(); properties.setProperty(ProxyProvider.HTTPS_PROXY_HOST, "host"); properties.setProperty(ProxyProvider.HTTPS_PROXY_USER, "user"); Throwable throwable = catchThrowable(() -> ProxyProvider.createFrom(properties)); assertThat(throwable) .isInstanceOf(NullPointerException.class) .hasMessage("Proxy username is set via 'https.proxyUser', but 'https.proxyPassword' is not set."); }
@VisibleForTesting Map<String, List<Operation>> computeOperations(SegmentDirectory.Reader segmentReader) throws Exception { Map<String, List<Operation>> columnOperationsMap = new HashMap<>(); // Does not work for segment versions < V3. if (_segmentDirectory.getSegmentMetadata().getVersion().compareTo(SegmentVersion.v3) < 0) { return columnOperationsMap; } Set<String> existingAllColumns = _segmentDirectory.getSegmentMetadata().getAllColumns(); Set<String> existingDictColumns = _segmentDirectory.getColumnsWithIndex(StandardIndexes.dictionary()); Set<String> existingForwardIndexColumns = _segmentDirectory.getColumnsWithIndex(StandardIndexes.forward()); for (String column : existingAllColumns) { if (_schema != null && !_schema.hasColumn(column)) { // _schema will be null only in tests LOGGER.info("Column {} is not in schema, skipping updating forward index", column); continue; } boolean existingHasDict = existingDictColumns.contains(column); boolean existingHasFwd = existingForwardIndexColumns.contains(column); FieldIndexConfigs newConf = _fieldIndexConfigs.get(column); boolean newIsFwd = newConf.getConfig(StandardIndexes.forward()).isEnabled(); boolean newIsDict = newConf.getConfig(StandardIndexes.dictionary()).isEnabled(); boolean newIsRange = newConf.getConfig(StandardIndexes.range()).isEnabled(); if (existingHasFwd && !newIsFwd) { // Existing column has a forward index. New column config disables the forward index ColumnMetadata columnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column); if (columnMetadata.isSorted()) { // Check if the column is sorted. If sorted, disabling forward index should be a no-op. Do not return an // operation for this column related to disabling forward index. LOGGER.warn("Trying to disable the forward index for a sorted column {}, ignoring", column); continue; } if (existingHasDict) { if (!newIsDict) { // Dictionary was also disabled. Just disable the dictionary and remove it along with the forward index // If range index exists, don't try to regenerate it on toggling the dictionary, throw an error instead Preconditions.checkState(!newIsRange, String.format( "Must disable range (enabled) index to disable the dictionary and forward index for column: %s or " + "refresh / back-fill the forward index", column)); columnOperationsMap.put(column, Arrays.asList(Operation.DISABLE_FORWARD_INDEX, Operation.DISABLE_DICTIONARY)); } else { // Dictionary is still enabled, keep it but remove the forward index columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_FORWARD_INDEX)); } } else { if (!newIsDict) { // Dictionary remains disabled and we should not reconstruct temporary forward index as dictionary based columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_FORWARD_INDEX)); } else { // Dictionary is enabled, creation of dictionary and conversion to dictionary based forward index is needed columnOperationsMap.put(column, Arrays.asList(Operation.DISABLE_FORWARD_INDEX, Operation.ENABLE_DICTIONARY)); } } } else if (!existingHasFwd && newIsFwd) { // Existing column does not have a forward index. New column config enables the forward index ColumnMetadata columnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column); if (columnMetadata != null && columnMetadata.isSorted()) { // Check if the column is sorted. If sorted, disabling forward index should be a no-op and forward index // should already exist. Do not return an operation for this column related to enabling forward index. LOGGER.warn("Trying to enable the forward index for a sorted column {}, ignoring", column); continue; } // Get list of columns with inverted index Set<String> existingInvertedIndexColumns = segmentReader.toSegmentDirectory().getColumnsWithIndex(StandardIndexes.inverted()); if (!existingHasDict || !existingInvertedIndexColumns.contains(column)) { // If either dictionary or inverted index is missing on the column there is no way to re-generate the forward // index. Treat this as a no-op and log a warning. LOGGER.warn("Trying to enable the forward index for a column {} missing either the dictionary ({}) and / or " + "the inverted index ({}) is not possible. Either a refresh or back-fill is required to get the " + "forward index, ignoring", column, existingHasDict ? "enabled" : "disabled", existingInvertedIndexColumns.contains(column) ? "enabled" : "disabled"); continue; } columnOperationsMap.put(column, Collections.singletonList(Operation.ENABLE_FORWARD_INDEX)); } else if (!existingHasFwd) { // Forward index is disabled for the existing column and should remain disabled based on the latest config // Need some checks to see whether the dictionary is being enabled or disabled here and take appropriate actions // If the dictionary is not enabled on the existing column it must be on the new noDictionary column list. // Cannot enable the dictionary for a column with forward index disabled. Preconditions.checkState(existingHasDict || !newIsDict, String.format("Cannot regenerate the dictionary for column %s with forward index disabled. Please " + "refresh or back-fill the data to add back the forward index", column)); if (existingHasDict && !newIsDict) { // Dictionary is currently enabled on this column but is supposed to be disabled. Remove the dictionary // and update the segment metadata If the range index exists then throw an error since we are not // regenerating the range index on toggling the dictionary Preconditions.checkState(!newIsRange, String.format( "Must disable range (enabled) index to disable the dictionary for a forwardIndexDisabled column: %s or " + "refresh / back-fill the forward index", column)); columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_DICTIONARY)); } } else if (!existingHasDict && newIsDict) { // Existing column is RAW. New column is dictionary enabled. if (_schema == null || _tableConfig == null) { // This can only happen in tests. LOGGER.warn("Cannot enable dictionary for column={} as schema or tableConfig is null.", column); continue; } ColumnMetadata existingColumnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column); if (DictionaryIndexType.ignoreDictionaryOverride(_tableConfig.getIndexingConfig().isOptimizeDictionary(), _tableConfig.getIndexingConfig().isOptimizeDictionaryForMetrics(), _tableConfig.getIndexingConfig().getNoDictionarySizeRatioThreshold(), existingColumnMetadata.getFieldSpec(), _fieldIndexConfigs.get(column), existingColumnMetadata.getCardinality(), existingColumnMetadata.getTotalNumberOfEntries())) { columnOperationsMap.put(column, Collections.singletonList(Operation.ENABLE_DICTIONARY)); } } else if (existingHasDict && !newIsDict) { // Existing column has dictionary. New config for the column is RAW. if (shouldDisableDictionary(column, _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column))) { columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_DICTIONARY)); } } else if (!existingHasDict) { // Both existing and new column is RAW forward index encoded. Check if compression needs to be changed. // TODO: Also check if raw index version needs to be changed if (shouldChangeRawCompressionType(column, segmentReader)) { columnOperationsMap.put(column, Collections.singletonList(Operation.CHANGE_INDEX_COMPRESSION_TYPE)); } } else { // Both existing and new column is dictionary encoded. Check if compression needs to be changed. if (shouldChangeDictIdCompressionType(column, segmentReader)) { columnOperationsMap.put(column, Collections.singletonList(Operation.CHANGE_INDEX_COMPRESSION_TYPE)); } } } return columnOperationsMap; }
@Test public void testComputeOperationNoOp() throws Exception { // Setup SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); // TEST1: Validate with zero changes. ForwardIndexHandler should be a No-Op. IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, null); Map<String, List<ForwardIndexHandler.Operation>> operationMap = fwdIndexHandler.computeOperations(writer); assertEquals(operationMap, Collections.EMPTY_MAP); // Tear down segmentLocalFSDirectory.close(); }
public boolean setLastCommittedIndex(final long lastCommittedIndex) { boolean doUnlock = true; final long stamp = this.stampedLock.writeLock(); try { if (this.pendingIndex != 0 || !this.pendingMetaQueue.isEmpty()) { Requires.requireTrue(lastCommittedIndex < this.pendingIndex, "Node changes to leader, pendingIndex=%d, param lastCommittedIndex=%d", this.pendingIndex, lastCommittedIndex); return false; } if (!this.waiter.hasAvailableCapacity(1)) { LOG.warn("Node {} fsm is busy, can't set lastCommittedIndex to be {}, lastCommittedIndex={}.", this.opts.getNodeId(), lastCommittedIndex, this.lastCommittedIndex); return false; } if (lastCommittedIndex < this.lastCommittedIndex) { return false; } if (lastCommittedIndex > this.lastCommittedIndex) { this.lastCommittedIndex = lastCommittedIndex; this.stampedLock.unlockWrite(stamp); doUnlock = false; this.waiter.onCommitted(lastCommittedIndex); } } finally { if (doUnlock) { this.stampedLock.unlockWrite(stamp); } } return true; }
@Test public void testSetLastCommittedIndex() { Mockito.when(this.waiter.hasAvailableCapacity(1)).thenReturn(true); assertEquals(0, this.box.getLastCommittedIndex()); assertTrue(this.box.setLastCommittedIndex(1)); assertEquals(1, this.box.getLastCommittedIndex()); Mockito.verify(this.waiter, Mockito.times(1)).onCommitted(1); }
@Override public SnowflakeTableMetadata loadTableMetadata(SnowflakeIdentifier tableIdentifier) { Preconditions.checkArgument( tableIdentifier.type() == SnowflakeIdentifier.Type.TABLE, "loadTableMetadata requires a TABLE identifier, got '%s'", tableIdentifier); SnowflakeTableMetadata tableMeta; try { final String finalQuery = "SELECT SYSTEM$GET_ICEBERG_TABLE_INFORMATION(?) AS METADATA"; tableMeta = connectionPool.run( conn -> queryHarness.query( conn, finalQuery, TABLE_METADATA_RESULT_SET_HANDLER, tableIdentifier.toIdentifierString())); } catch (SQLException e) { throw snowflakeExceptionToIcebergException( tableIdentifier, e, String.format("Failed to get table metadata for '%s'", tableIdentifier)); } catch (InterruptedException e) { throw new UncheckedInterruptedException( e, "Interrupted while getting table metadata for '%s'", tableIdentifier); } return tableMeta; }
@SuppressWarnings("unchecked") @Test public void testGetTableMetadataSQLExceptionWithoutErrorCode() throws SQLException, InterruptedException { Exception injectedException = new SQLException("Fake SQL exception"); when(mockClientPool.run(any(ClientPool.Action.class))).thenThrow(injectedException); assertThatExceptionOfType(UncheckedSQLException.class) .isThrownBy( () -> snowflakeClient.loadTableMetadata( SnowflakeIdentifier.ofTable("DB_1", "SCHEMA_1", "TABLE_1"))) .withMessageContaining("Failed to get table metadata for 'TABLE: 'DB_1.SCHEMA_1.TABLE_1''") .withCause(injectedException); }
@Override public RuntimeException handleFault(String failureMessage, Throwable cause) { if (cause == null) { log.error("Encountered {} fault: {}", type, failureMessage); } else { log.error("Encountered {} fault: {}", type, failureMessage, cause); } try { action.run(); } catch (Throwable e) { log.error("Failed to run LoggingFaultHandler action.", e); } return new FaultHandlerException(failureMessage, cause); }
@Test public void testHandleFault() { AtomicInteger counter = new AtomicInteger(0); LoggingFaultHandler handler = new LoggingFaultHandler("test", () -> { counter.incrementAndGet(); }); handler.handleFault("uh oh"); assertEquals(1, counter.get()); handler.handleFault("uh oh", new RuntimeException("yikes")); assertEquals(2, counter.get()); }
public void initAllCapacity() { initAllCapacity(false); initAllCapacity(true); }
@Test void testInitAllCapacity() { List<String> groupList = new ArrayList<>(); groupList.add("testGroup"); when(configInfoPersistService.getGroupIdList(eq(1), eq(500))).thenReturn(groupList); List<String> tenantList = new ArrayList<>(); tenantList.add("testTenant"); when(configInfoPersistService.getTenantIdList(eq(1), eq(500))).thenReturn(tenantList); GroupCapacity groupCapacity = new GroupCapacity(); groupCapacity.setGroup("testGroup"); groupCapacity.setUsage(300); when(groupCapacityPersistService.insertGroupCapacity(any())).thenReturn(true); when(groupCapacityPersistService.getGroupCapacity(eq("testGroup"))).thenReturn(groupCapacity); when(groupCapacityPersistService.updateQuota(eq("testGroup"), eq(500))).thenReturn(true); TenantCapacity tenantCapacity = new TenantCapacity(); tenantCapacity.setTenant("testTenant"); tenantCapacity.setUsage(300); when(tenantCapacityPersistService.insertTenantCapacity(any())).thenReturn(true); when(tenantCapacityPersistService.getTenantCapacity(eq("testTenant"))).thenReturn(tenantCapacity); when(tenantCapacityPersistService.updateQuota(eq("testTenant"), eq(500))).thenReturn(true); service.initAllCapacity(); Mockito.verify(groupCapacityPersistService, times(1)).insertGroupCapacity(any()); Mockito.verify(groupCapacityPersistService, times(1)).getGroupCapacity(eq("testGroup")); Mockito.verify(groupCapacityPersistService, times(1)).updateQuota(eq("testGroup"), eq(500)); Mockito.verify(tenantCapacityPersistService, times(1)).insertTenantCapacity(any()); Mockito.verify(tenantCapacityPersistService, times(1)).getTenantCapacity(eq("testTenant")); Mockito.verify(tenantCapacityPersistService, times(1)).updateQuota(eq("testTenant"), eq(500)); }
public static <K, InputT, AccumT> ParDoFn create( PipelineOptions options, KvCoder<K, ?> inputElementCoder, @Nullable CloudObject cloudUserFn, @Nullable List<SideInputInfo> sideInputInfos, List<Receiver> receivers, DataflowExecutionContext<?> executionContext, DataflowOperationContext operationContext) throws Exception { AppliedCombineFn<K, InputT, AccumT, ?> combineFn; SideInputReader sideInputReader; StepContext stepContext; if (cloudUserFn == null) { combineFn = null; sideInputReader = NullSideInputReader.empty(); stepContext = null; } else { Object deserializedFn = SerializableUtils.deserializeFromByteArray( getBytes(cloudUserFn, PropertyNames.SERIALIZED_FN), "serialized combine fn"); @SuppressWarnings("unchecked") AppliedCombineFn<K, InputT, AccumT, ?> combineFnUnchecked = ((AppliedCombineFn<K, InputT, AccumT, ?>) deserializedFn); combineFn = combineFnUnchecked; sideInputReader = executionContext.getSideInputReader( sideInputInfos, combineFn.getSideInputViews(), operationContext); stepContext = executionContext.getStepContext(operationContext); } return create( options, inputElementCoder, combineFn, sideInputReader, receivers.get(0), stepContext); }
@Test public void testPartialGroupByKeyWithCombinerAndSideInputs() throws Exception { Coder keyCoder = StringUtf8Coder.of(); Coder valueCoder = BigEndianIntegerCoder.of(); TestOutputReceiver receiver = new TestOutputReceiver( new ElementByteSizeObservableCoder( WindowedValue.getValueOnlyCoder(KvCoder.of(keyCoder, valueCoder))), counterSet, NameContextsForTests.nameContextForTest()); Combiner<WindowedValue<String>, Integer, Integer, Integer> combineFn = new TestCombiner(); ParDoFn pgbkParDoFn = new StreamingSideInputPGBKParDoFn( GroupingTables.combining( new WindowingCoderGroupingKeyCreator(keyCoder), PairInfo.create(), combineFn, new CoderSizeEstimator(WindowedValue.getValueOnlyCoder(keyCoder)), new CoderSizeEstimator(valueCoder)), receiver, mockSideInputFetcher); Set<BoundedWindow> readyWindows = ImmutableSet.<BoundedWindow>of(GlobalWindow.INSTANCE); when(mockSideInputFetcher.getReadyWindows()).thenReturn(readyWindows); when(mockSideInputFetcher.prefetchElements(readyWindows)) .thenReturn(ImmutableList.of(elemsBag)); when(elemsBag.read()) .thenReturn( ImmutableList.of( WindowedValue.valueInGlobalWindow(KV.of("hi", 4)), WindowedValue.valueInGlobalWindow(KV.of("there", 5)))); when(mockSideInputFetcher.storeIfBlocked(Matchers.<WindowedValue<KV<String, Integer>>>any())) .thenReturn(false, false, false, true); pgbkParDoFn.startBundle(receiver); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("hi", 6))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("joe", 7))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("there", 8))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("hi", 9))); pgbkParDoFn.finishBundle(); assertThat( receiver.outputElems, IsIterableContainingInAnyOrder.<Object>containsInAnyOrder( WindowedValue.valueInGlobalWindow(KV.of("hi", 10)), WindowedValue.valueInGlobalWindow(KV.of("there", 13)), WindowedValue.valueInGlobalWindow(KV.of("joe", 7)))); // Exact counter values depend on size of encoded data. If encoding // changes, then these expected counters should change to match. CounterUpdateExtractor<?> updateExtractor = Mockito.mock(CounterUpdateExtractor.class); counterSet.extractUpdates(false, updateExtractor); verify(updateExtractor).longSum(getObjectCounterName("test_receiver_out"), false, 3L); verify(updateExtractor) .longMean( getMeanByteCounterName("test_receiver_out"), false, LongCounterMean.ZERO.addValue(25L, 3)); verifyNoMoreInteractions(updateExtractor); }
public static SerializableFunction<Row, TableRow> toTableRow() { return ROW_TO_TABLE_ROW; }
@Test public void testToTableRow_map() { TableRow row = toTableRow().apply(MAP_ROW); assertThat(row.size(), equalTo(1)); row = ((List<TableRow>) row.get("map")).get(0); assertThat(row.size(), equalTo(2)); assertThat(row, hasEntry("key", "test")); assertThat(row, hasEntry("value", "123.456")); }
@Override @MethodNotAvailable public void loadAll(boolean replaceExistingValues) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testLoadAllWithKeys() { adapter.loadAll(Collections.emptySet(), true); }
@CheckForNull public Date maxLiveDateOfClosedIssues() { return maxLiveDateOfClosedIssues(new Date(system2.now())); }
@Test void should_delete_all_closed_issues() { PurgeConfiguration conf = new PurgeConfiguration("root", "project", 0, Optional.empty(), System2.INSTANCE, emptySet(), 0); assertThat(conf.maxLiveDateOfClosedIssues()).isNull(); conf = new PurgeConfiguration("root", "project", -1, Optional.empty(), System2.INSTANCE, emptySet(), 0); assertThat(conf.maxLiveDateOfClosedIssues()).isNull(); }
public void close() { close(Long.MAX_VALUE, false); }
@Test public void shouldNotBlockInCloseForZeroDuration() throws Exception { prepareStreams(); prepareStreamThread(streamThreadOne, 1); prepareStreamThread(streamThreadTwo, 2); prepareTerminableThread(streamThreadOne); try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) { // with mock time that does not elapse, close would not return if it ever waits on the state transition assertFalse(streams.close(Duration.ZERO)); } }
@SuppressWarnings("unchecked") public static <T> Set<T> toSet(Class<T> elementType, Object value) { return (Set<T>) toCollection(HashSet.class, elementType, value); }
@Test public void toSetTest() { final Set<Integer> result = Convert.convert(new TypeReference<Set<Integer>>() { }, "1,2,3"); assertEquals(CollUtil.set(false, 1, 2, 3), result); }
@Override public int read() throws IOException { if (left == 0) { return -1; } int result = in.read(); if (result != -1) { --left; } return result; }
@Test public void testReadBytes() throws IOException { try (LimitInputStream limitInputStream = new LimitInputStream(new RandomInputStream(), 128)) { Random r = new Random(0); byte[] data = new byte[4]; byte[] expected = { (byte) r.nextInt(), (byte) r.nextInt(), (byte) r.nextInt(), (byte) r.nextInt() }; limitInputStream.read(data, 0, 4); assertArrayEquals("Incorrect bytes returned", expected, data); } }
@Override protected ExecuteContext doAfter(ExecuteContext context) { context.changeResult(getMergedInstances(context)); return context; }
@Test public void doAfter() throws NoSuchMethodException { RegisterContext.INSTANCE.setAvailable(true); final ZookeeperInstanceSupplierInterceptor interceptor = new ZookeeperInstanceSupplierInterceptor(); final ExecuteContext context = interceptor.doAfter(buildContext()); final Object result = context.getResult(); Assert.assertTrue(result instanceof List); List<ServiceInstance> instances = (List<ServiceInstance>) result; assertEquals(instances.size(), (scInstances.size() + originInstances.size())); // Test deduplication scInstances.clear(); originInstances.clear(); allInstances.clear(); final ExecuteContext contextRepeat = interceptor.doAfter(buildContextRepeatedly()); final Object resultRepeat = contextRepeat.getResult(); Assert.assertTrue(resultRepeat instanceof List); List<ServiceInstance> instancesRepeat = (List<ServiceInstance>) resultRepeat; assertEquals(instancesRepeat.size(), (scInstances.size() + originInstances.size() - 1)); RegisterContext.INSTANCE.setAvailable(false); }
public int runInteractively() { displayWelcomeMessage(); RemoteServerSpecificCommand.validateClient(terminal.writer(), restClient); boolean eof = false; while (!eof) { try { handleLine(nextNonCliCommand()); } catch (final EndOfFileException exception) { // EOF is fine, just terminate the REPL terminal.writer().println("Exiting ksqlDB."); eof = true; } catch (final Exception exception) { LOGGER.error("An error occurred while running a command. Error = " + exception.getMessage(), exception); terminal.printError(ErrorMessageUtil.buildErrorMessage(exception), exception.toString()); } terminal.flush(); } return NO_ERROR; }
@Test public void testRunInteractively() { // Given: givenRunInteractivelyWillExit(); // When: localCli.runInteractively(); }
@Override public int getAvailableDeviceCount() { return availableDevices.size(); }
@Test public final void testGetAvailableDeviceCount() { assertEquals("initialy empty", 0, deviceStore.getAvailableDeviceCount()); putDevice(DID1, SW1); putDevice(DID2, SW2); deviceStore.markOffline(DID1); assertEquals("expect 1 available device", 1, deviceStore.getAvailableDeviceCount()); deviceStore.markOnline(DID1); assertEquals("expect 2 available devices", 2, deviceStore.getAvailableDeviceCount()); }
public boolean filter(char[] content, int offset, int length) { if (content == null) { return false; } boolean filtered = false; for (int i = offset; i < offset + length; i++) { if (isFiltered(content[i])) { filtered = true; content[i] = REPLACEMENT_CHAR; } } if (filtered) { LOG.warn("Identified and replaced non-XML chars"); } return filtered; }
@Test public void testFilter3ArgsNullArg() { assertDoesNotThrow(() -> nonXmlCharFilterer.filter(null, 2, 3)); }
public void createSecret(String secretId, String secretData) { checkArgument(!secretId.isEmpty(), "secretId can not be empty"); checkArgument(!secretData.isEmpty(), "secretData can not be empty"); try { checkIsUsable(); ProjectName projectName = ProjectName.of(projectId); // Create the parent secret. Secret secret = Secret.newBuilder() .setReplication( Replication.newBuilder() .setAutomatic(Replication.Automatic.newBuilder().build()) .build()) .build(); Secret createdSecret = secretManagerServiceClient.createSecret(projectName, secretId, secret); // Add a secret version. SecretPayload payload = SecretPayload.newBuilder().setData(ByteString.copyFromUtf8(secretData)).build(); secretManagerServiceClient.addSecretVersion(createdSecret.getName(), payload); createdSecretIds.add(secretId); LOG.info("Created secret successfully."); } catch (Exception e) { throw new SecretManagerResourceManagerException("Error while creating secret", e); } }
@Test public void testCreateSecretWithInvalidNameShouldFail() { IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> testManager.createSecret("", SECRET_DATA)); assertThat(exception).hasMessageThat().contains("secretId can not be empty"); }
private static GuardedByExpression bind(JCTree.JCExpression exp, BinderContext context) { GuardedByExpression expr = BINDER.visit(exp, context); checkGuardedBy(expr != null, String.valueOf(exp)); checkGuardedBy(expr.kind() != Kind.TYPE_LITERAL, "Raw type literal: %s", exp); return expr; }
@Test public void simpleNameClass() { assertThat( bind( "Test", "Other.class", forSourceLines( "threadsafety/Test.java", "package threadsafety;", "class Other {", " static final Object lock = new Object();", "}", "class Test {", " Other Other = null;", "}"))) .isEqualTo("(CLASS_LITERAL threadsafety.Other)"); }
@Override public boolean namespaceExists(Namespace namespace) { return JdbcUtil.namespaceExists(catalogName, connections, namespace); }
@Test public void testNamespaceExists() { TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata"); TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2"); TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4"); TableIdentifier tbl4 = TableIdentifier.of("db", "metadata"); Lists.newArrayList(tbl1, tbl2, tbl3, tbl4) .forEach(t -> catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned())); assertThat(catalog.namespaceExists(Namespace.of("db", "ns1", "ns2"))) .as("Should true to namespace exist") .isTrue(); assertThat(catalog.namespaceExists(Namespace.of("db", "db2", "not_exist"))) .as("Should false to namespace doesn't exist") .isFalse(); }
public static String urlWithForm(String url, Map<String, Object> form, Charset charset, boolean isEncodeParams) { // url和参数是分别编码的 return urlWithForm(url, toParams(form, charset), charset, isEncodeParams); }
@Test public void urlWithFormTest() { final Map<String, Object> param = new LinkedHashMap<>(); param.put("AccessKeyId", "123"); param.put("Action", "DescribeDomainRecords"); param.put("Format", "date"); param.put("DomainName", "lesper.cn"); // 域名地址 param.put("SignatureMethod", "POST"); param.put("SignatureNonce", "123"); param.put("SignatureVersion", "4.3.1"); param.put("Timestamp", 123432453); param.put("Version", "1.0"); String urlWithForm = HttpUtil.urlWithForm("http://api.hutool.cn/login?type=aaa", param, CharsetUtil.CHARSET_UTF_8, false); assertEquals( "http://api.hutool.cn/login?type=aaa&AccessKeyId=123&Action=DescribeDomainRecords&Format=date&DomainName=lesper.cn&SignatureMethod=POST&SignatureNonce=123&SignatureVersion=4.3.1&Timestamp=123432453&Version=1.0", urlWithForm); urlWithForm = HttpUtil.urlWithForm("http://api.hutool.cn/login?type=aaa", param, CharsetUtil.CHARSET_UTF_8, false); assertEquals( "http://api.hutool.cn/login?type=aaa&AccessKeyId=123&Action=DescribeDomainRecords&Format=date&DomainName=lesper.cn&SignatureMethod=POST&SignatureNonce=123&SignatureVersion=4.3.1&Timestamp=123432453&Version=1.0", urlWithForm); }
@Override public Set<EntityExcerpt> listEntityExcerpts() { return getNativeViews().map(this::createExcerpt).collect(Collectors.toSet()); }
@Test @MongoDBFixtures("ViewFacadeTest.json") public void itShouldListEntityExcerptsForAllViewsInDB() { final ViewDTO viewDTO = viewService.get(viewId) .orElseThrow(() -> new NotFoundException("Missing view with id: " + viewId)); final EntityExcerpt entityExcerpt = EntityExcerpt.builder() .title(viewDTO.title()) .id(ModelId.of(viewId)) .type(ModelTypes.SEARCH_V1) .build(); final Set<EntityExcerpt> entityExcerpts = facade.listEntityExcerpts(); assertThat(entityExcerpts) .hasSize(1) .contains(entityExcerpt); }
public static RecyclableByteBufferList newInstance() { return newInstance(DEFAULT_INITIAL_CAPACITY); }
@Test public void testMultipleRecycleAtDifferentThread() throws InterruptedException { final RecyclableByteBufferList object = RecyclableByteBufferList.newInstance(); final Thread thread1 = new Thread(object::recycle); thread1.start(); thread1.join(); assertSame(object, RecyclableByteBufferList.newInstance()); }
@Override public Collection<AnticipatedTransition> getAnticipatedTransitionByComponent(Component component) { try (DbSession dbSession = dbClient.openSession(false)) { String projectUuid = dbClient.entityDao().selectByComponentUuid(dbSession, component.getUuid()).map(EntityDto::getUuid) .orElse(calculateProjectUuidFromComponentKey(dbSession, component)); List<AnticipatedTransitionDto> anticipatedTransitionDtos = dbClient.anticipatedTransitionDao() .selectByProjectUuidAndFilePath(dbSession, projectUuid, component.getName()); return getAnticipatedTransitions(anticipatedTransitionDtos); } }
@Test public void giveProjectBranchAvailable_projectUuidShouldBeCalculatedFromThere() { //given String projectKey = "projectKey2"; String projectUuid = "projectUuid2"; String mainFile = "file1.js"; dbClient.projectDao().insert(db.getSession(), getProjectDto(projectUuid, projectKey)); BranchDto branchDto = getBranchDto(projectUuid, "branch"); dbClient.branchDao().insert(db.getSession(), branchDto); ComponentDto fileDto = getComponentDto(projectKey + ":" + mainFile, branchDto.getUuid()); dbClient.componentDao().insertWithAudit(db.getSession(), fileDto); insertAnticipatedTransition(projectUuid, mainFile); insertAnticipatedTransition(projectUuid, "file2.js"); insertAnticipatedTransition(projectUuid, "file2.js"); db.getSession().commit(); Component file = getFileComponent(fileDto.uuid(), projectKey, mainFile); var anticipatedTransitions = underTest.getAnticipatedTransitionByComponent(file); assertThat(anticipatedTransitions).hasSize(1); }
public Comment toComment() { Comment comment = new Comment(); comment.setMetadata(new Metadata()); comment.getMetadata().setName(UUID.randomUUID().toString()); Comment.CommentSpec spec = new Comment.CommentSpec(); comment.setSpec(spec); spec.setSubjectRef(subjectRef); spec.setRaw(raw); spec.setContent(content); spec.setAllowNotification(allowNotification); if (owner != null) { spec.setOwner(owner.toCommentOwner()); } return comment; }
@Test void toComment() throws JSONException { CommentRequest commentRequest = createCommentRequest(); Comment comment = commentRequest.toComment(); assertThat(comment.getMetadata().getName()).isNotNull(); comment.getMetadata().setName("fake"); JSONAssert.assertEquals(""" { "spec": { "raw": "raw", "content": "content", "allowNotification": true, "subjectRef": { "group": "fake.halo.run", "version": "v1alpha1", "kind": "Fake", "name": "fake" } }, "apiVersion": "content.halo.run/v1alpha1", "kind": "Comment", "metadata": { "name": "fake" } } """, JsonUtils.objectToJson(comment), true); }
@Override public void handleExecutionsTermination(Collection<ExecutionState> terminatedExecutionStates) { final Set<ExecutionState> notFinishedExecutionStates = checkNotNull(terminatedExecutionStates).stream() .filter(state -> state != ExecutionState.FINISHED) .collect(Collectors.toSet()); if (notFinishedExecutionStates.isEmpty()) { handleExecutionsFinished(); } else { handleAnyExecutionNotFinished(notFinishedExecutionStates); } }
@Test void testExecutionTerminationWithNull() { assertThatThrownBy( () -> createTestInstanceFailingOnGlobalFailOver() .handleExecutionsTermination(null)) .isInstanceOf(NullPointerException.class); }
public static < EventTypeT, EventKeyTypeT, ResultTypeT, StateTypeT extends MutableState<EventTypeT, ResultTypeT>> OrderedEventProcessor<EventTypeT, EventKeyTypeT, ResultTypeT, StateTypeT> create( OrderedProcessingHandler<EventTypeT, EventKeyTypeT, StateTypeT, ResultTypeT> handler) { return new AutoValue_OrderedEventProcessor<>(handler); }
@Test public void testProcessingWithEveryOtherResultEmission() throws CannotProvideCoderException { Event[] events = { Event.create(2, "id-1", "c"), Event.create(1, "id-1", "b"), Event.create(0, "id-1", "a"), Event.create(3, "id-1", "d"), Event.create(0, "id-2", "a"), Event.create(1, "id-2", "b"), }; Collection<KV<String, OrderedProcessingStatus>> expectedStatuses = new ArrayList<>(); expectedStatuses.add( KV.of("id-1", OrderedProcessingStatus.create(3L, 0, null, null, 4, 2L, 0, false))); expectedStatuses.add( KV.of("id-2", OrderedProcessingStatus.create(1L, 0, null, null, 2, 1L, 0, false))); Collection<KV<String, String>> expectedOutput = new ArrayList<>(); expectedOutput.add(KV.of("id-1", "a")); // Skipped KV.of("id-1", "ab"), expectedOutput.add(KV.of("id-1", "abc")); // Skipped KV.of("id-1", "abcd"), expectedOutput.add(KV.of("id-2", "a")); // Skipped KV.of("id-2", "ab") testProcessing( events, expectedStatuses, expectedOutput, EMISSION_FREQUENCY_ON_EVERY_OTHER_EVENT, INITIAL_SEQUENCE_OF_0, LARGE_MAX_RESULTS_PER_OUTPUT, DONT_PRODUCE_STATUS_ON_EVERY_EVENT); }
public static ResourceBundle getBundledResource(String basename) { return ResourceBundle.getBundle(basename, new UTF8Control()); }
@Test public void getBundleByFqcn() { title("getBundleByFqcn"); String fqcn = "org.onosproject.ui.lion.LionUtils"; res = LionUtils.getBundledResource(fqcn); assertNotNull("missing resource bundle", res); String v1 = res.getString("foo"); String v2 = res.getString("boo"); print("v1 is %s, v2 is %s", v1, v2); assertEquals("v1 value wrong", "bar", v1); assertEquals("v2 value wrong", "ghost", v2); }
@Override public void define(Context context) { NewController controller = context.createController("api/hotspots"); controller.setDescription("Read and update Security Hotspots."); controller.setSince("8.1"); for (HotspotsWsAction action : actions) { action.define(controller); } controller.done(); }
@Test public void define_controller() { String[] actionKeys = IntStream.range(0, 1 + new Random().nextInt(12)) .mapToObj(i -> i + randomAlphanumeric(10)) .toArray(String[]::new); HotspotsWsAction[] actions = Arrays.stream(actionKeys) .map(actionKey -> new HotspotsWsAction() { @Override public void define(WebService.NewController context) { context.createAction(actionKey).setHandler(this); } @Override public void handle(Request request, Response response) { } }) .toArray(HotspotsWsAction[]::new); WebService.Context context = new WebService.Context(); new HotspotsWs(actions).define(context); WebService.Controller controller = context.controller("api/hotspots"); assertThat(controller).isNotNull(); assertThat(controller.description()).isNotEmpty(); assertThat(controller.since()).isEqualTo("8.1"); assertThat(controller.actions()).extracting(WebService.Action::key).containsOnly(actionKeys); }
public B forks(Integer forks) { this.forks = forks; return getThis(); }
@Test void forks() { MethodBuilder builder = new MethodBuilder(); builder.forks(5); Assertions.assertEquals(5, builder.build().getForks()); }
public static NearestNeighbor nearestNeighbor(String docVectorName, String queryVectorName) { return new NearestNeighbor(docVectorName, queryVectorName); }
@Test void nearestNeighbor() { String q = Q.p("f1").nearestNeighbor("query_vector") .build(); assertEquals(q, "yql=select * from sources * where nearestNeighbor(f1, query_vector)"); }
public int encodeToInteger(List<Integer> input) { checkEncodeInputValidity(input, Integer.SIZE); return (int) zOrderByteAddressToLong(encodeToByteArray(input)); }
@Test public void testZOrderOverInt() { List<Integer> bitPositions = ImmutableList.of(16, 16, 16); int totalBitLength = bitPositions.stream().mapToInt(Integer::intValue).sum(); ZOrder zOrder = new ZOrder(bitPositions); List<Integer> intColumns = ImmutableList.of(20456, 20456, 20456); try { zOrder.encodeToInteger(intColumns); fail("Expected test to fail: total bits to encode is larger than the size of a integer."); } catch (IllegalArgumentException e) { String expectedMessage = format("The z-address type specified is not large enough to hold %d values with a total of %d bits.", bitPositions.size(), totalBitLength); assertEquals(e.getMessage(), expectedMessage, format("Expected exception message '%s' to match '%s'", e.getMessage(), expectedMessage)); } }
@SuppressWarnings("deprecation") public static <K> KStreamHolder<K> build( final KStreamHolder<K> left, final KStreamHolder<K> right, final StreamStreamJoin<K> join, final RuntimeBuildContext buildContext, final StreamJoinedFactory streamJoinedFactory) { final QueryContext queryContext = join.getProperties().getQueryContext(); final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext); final LogicalSchema leftSchema; final LogicalSchema rightSchema; final Formats rightFormats; final Formats leftFormats; if (join.getJoinType().equals(RIGHT)) { leftFormats = join.getRightInternalFormats(); rightFormats = join.getLeftInternalFormats(); leftSchema = right.getSchema(); rightSchema = left.getSchema(); } else { leftFormats = join.getLeftInternalFormats(); rightFormats = join.getRightInternalFormats(); leftSchema = left.getSchema(); rightSchema = right.getSchema(); } final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from( leftSchema, leftFormats.getKeyFeatures(), leftFormats.getValueFeatures() ); final Serde<GenericRow> leftSerde = buildContext.buildValueSerde( leftFormats.getValueFormat(), leftPhysicalSchema, stacker.push(LEFT_SERDE_CTX).getQueryContext() ); final PhysicalSchema rightPhysicalSchema = PhysicalSchema.from( rightSchema, rightFormats.getKeyFeatures(), rightFormats.getValueFeatures() ); final Serde<GenericRow> rightSerde = buildContext.buildValueSerde( rightFormats.getValueFormat(), rightPhysicalSchema, stacker.push(RIGHT_SERDE_CTX).getQueryContext() ); final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde( leftFormats.getKeyFormat(), leftPhysicalSchema, queryContext ); final StreamJoined<K, GenericRow, GenericRow> joined = streamJoinedFactory.create( keySerde, leftSerde, rightSerde, StreamsUtil.buildOpName(queryContext), StreamsUtil.buildOpName(queryContext) ); final JoinParams joinParams = JoinParamsFactory .create(join.getKeyColName(), leftSchema, rightSchema); JoinWindows joinWindows; // Grace, as optional, helps to identify if a user specified the GRACE PERIOD syntax in the // join window. If specified, then we'll call the new KStreams API ofTimeDifferenceAndGrace() // which enables the "spurious" results bugfix with left/outer joins (see KAFKA-10847). if (join.getGraceMillis().isPresent()) { joinWindows = JoinWindows.ofTimeDifferenceAndGrace( join.getBeforeMillis(), join.getGraceMillis().get()); } else { joinWindows = JoinWindows.of(join.getBeforeMillis()); } joinWindows = joinWindows.after(join.getAfterMillis()); final KStream<K, GenericRow> result; switch (join.getJoinType()) { case LEFT: result = left.getStream().leftJoin( right.getStream(), joinParams.getJoiner(), joinWindows, joined); break; case RIGHT: result = right.getStream().leftJoin( left.getStream(), joinParams.getJoiner(), joinWindows, joined); break; case OUTER: result = left.getStream().outerJoin( right.getStream(), joinParams.getJoiner(), joinWindows, joined); break; case INNER: result = left.getStream().join( right.getStream(), joinParams.getJoiner(), joinWindows, joined); break; default: throw new IllegalStateException("invalid join type"); } return left.withStream(result, joinParams.getSchema()); }
@Test public void shouldDoInnerJoinWithSyntheticKey() { // Given: givenInnerJoin(SYNTH_KEY); // When: final KStreamHolder<Struct> result = join.build(planBuilder, planInfo); // Then: verify(leftKStream).join( same(rightKStream), eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 1)), eq(WINDOWS_NO_GRACE), same(joined) ); verifyNoMoreInteractions(leftKStream, rightKStream, resultKStream); assertThat(result.getStream(), is(resultKStream)); assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory)); }
public void registerSharedStatesAfterRestored( SharedStateRegistry sharedStateRegistry, RestoreMode restoreMode) { // in claim mode we should not register any shared handles if (!props.isUnclaimed()) { sharedStateRegistry.registerAllAfterRestored(this, restoreMode); } }
@Test void testRegisterStatesAtRegistry() { OperatorState state = mock(OperatorState.class); Map<OperatorID, OperatorState> operatorStates = new HashMap<>(); operatorStates.put(new OperatorID(), state); CompletedCheckpoint checkpoint = new CompletedCheckpoint( new JobID(), 0, 0, 1, operatorStates, Collections.emptyList(), CheckpointProperties.forCheckpoint( CheckpointRetentionPolicy.RETAIN_ON_FAILURE), new TestCompletedCheckpointStorageLocation(), null); SharedStateRegistry sharedStateRegistry = new SharedStateRegistryImpl(); checkpoint.registerSharedStatesAfterRestored(sharedStateRegistry, RestoreMode.DEFAULT); verify(state, times(1)).registerSharedStates(sharedStateRegistry, 0L); }
private boolean isRateSimilar(long onosRate, long deviceRate) { double lowerEnd = (double) onosRate * (1.0 - RATE_ERROR); double upperEnd = (double) onosRate * (1.0 + RATE_ERROR); if (log.isDebugEnabled()) { log.debug("isRateSimilar {} in [{}, {}]", deviceRate, lowerEnd, upperEnd); } return deviceRate >= lowerEnd && deviceRate <= upperEnd; }
@Test public void testIsRateSimilar() { PiMeterBand onosMeterBand; PiMeterBand deviceMeterBand; PiMeterCellConfig onosMeter; PiMeterCellConfig deviceMeter; for (Map.Entry<Long, Long> entry : RATES.entrySet()) { onosMeterBand = new PiMeterBand(PiMeterBandType.COMMITTED, entry.getKey(), 0); deviceMeterBand = new PiMeterBand(PiMeterBandType.COMMITTED, entry.getValue(), 0); onosMeter = PiMeterCellConfig.builder() .withMeterCellId(meterCellId) .withMeterBand(onosMeterBand) .withMeterBand(new PiMeterBand(PiMeterBandType.PEAK, 0, 0)) .build(); deviceMeter = PiMeterCellConfig.builder() .withMeterCellId(meterCellId) .withMeterBand(deviceMeterBand) .withMeterBand(new PiMeterBand(PiMeterBandType.PEAK, 0, 0)) .build(); assertTrue(meterProgrammable.isSimilar(onosMeter, deviceMeter)); } }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthMining() throws Exception { web3j.ethMining().send(); verifyResult("{\"jsonrpc\":\"2.0\",\"method\":\"eth_mining\",\"params\":[],\"id\":1}"); }
public <T> void resolve(T resolvable) { ParamResolver resolver = this; if (ParamScope.class.isAssignableFrom(resolvable.getClass())) { ParamScope newScope = (ParamScope) resolvable; resolver = newScope.applyOver(resolver); } resolveStringLeaves(resolvable, resolver); resolveNonStringLeaves(resolvable, resolver); resolveNodes(resolvable, resolver); }
@Test public void shouldNotResolveOptedOutConfigSubtags() throws NoSuchFieldException { PipelineConfig pipelineConfig = PipelineConfigMother.createPipelineConfig("cruise", "dev", "ant"); pipelineConfig.setLabelTemplate("2.1-${COUNT}-#{foo}-bar-#{bar}"); pipelineConfig.addParam(param("#{foo}-name", "#{foo}-#{bar}-baz")); new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("bar", "jj"))), fieldCache).resolve(pipelineConfig); assertThat(pipelineConfig.getLabelTemplate(), is("2.1-${COUNT}-pavan-bar-jj")); assertThat(pipelineConfig.getParams().get(0), is(param("#{foo}-name", "#{foo}-#{bar}-baz"))); assertThat(pipelineConfig.getClass().getDeclaredField("params").getAnnotation(SkipParameterResolution.class), isA(SkipParameterResolution.class)); }
public static boolean hasLinkUtmProperties(Set<String> parameterNames) { if (parameterNames == null || parameterNames.isEmpty()) { return false; } for (Map.Entry<String, String> entry : UTM_LINK_MAP.entrySet()) { if (entry != null) { if (parameterNames.contains(entry.getValue())) { return true; } } } for (String key : sChannelSourceKeySet) { if (!TextUtils.isEmpty(key)) { if (sChannelSourceKeySet.contains(key)) { return true; } } } return false; }
@Test public void hasLinkUtmProperties() { Set sets = new HashSet(); sets.add("utm_source"); sets.add("utm_medium"); sets.add("utm_content"); Assert.assertTrue(ChannelUtils.hasLinkUtmProperties(sets)); }
public static void notEmpty(Collection<?> collection, String message) { if (CollectionUtil.isEmpty(collection)) { throw new IllegalArgumentException(message); } }
@Test(expected = IllegalArgumentException.class) public void assertNotEmptyByString() { Assert.notEmpty("", "string is null"); }
public CsvReader ignoreComments(String commentPrefix) { if (commentPrefix == null || commentPrefix.length() == 0) { throw new IllegalArgumentException( "The comment prefix must not be null or an empty string"); } this.commentPrefix = commentPrefix; return this; }
@Test void testIgnoreComments() { CsvReader reader = getCsvReader(); assertThat(reader.commentPrefix).isNull(); reader.ignoreComments("#"); assertThat(reader.commentPrefix).isEqualTo("#"); }
@Override public <T> Mono<T> run(final Mono<T> run, final Function<Throwable, Mono<T>> fallback, final Resilience4JConf resilience4JConf) { RateLimiter rateLimiter = Resilience4JRegistryFactory.rateLimiter(resilience4JConf.getId(), resilience4JConf.getRateLimiterConfig()); CircuitBreaker circuitBreaker = Resilience4JRegistryFactory.circuitBreaker(resilience4JConf.getId(), resilience4JConf.getCircuitBreakerConfig()); final Duration timeoutDuration = resilience4JConf.getTimeLimiterConfig().getTimeoutDuration(); Mono<T> to = run.transformDeferred(CircuitBreakerOperator.of(circuitBreaker)) .transformDeferred(RateLimiterOperator.of(rateLimiter)) .timeout(timeoutDuration, Mono.error(() -> new TimeoutException("Response took longer than timeout: " + timeoutDuration))) .doOnError(TimeoutException.class, t -> circuitBreaker.onError( resilience4JConf.getTimeLimiterConfig().getTimeoutDuration().toMillis(), TimeUnit.MILLISECONDS, t)); if (Objects.nonNull(fallback)) { to = to.onErrorResume(fallback); } return to; }
@Test public void normalTest() { Resilience4JConf conf = mock(Resilience4JConf.class); when(conf.getId()).thenReturn("SHENYU"); when(conf.getRateLimiterConfig()).thenReturn(RateLimiterConfig.ofDefaults()); when(conf.getTimeLimiterConfig()).thenReturn(TimeLimiterConfig.ofDefaults()); when(conf.getCircuitBreakerConfig()).thenReturn(CircuitBreakerConfig.ofDefaults()); Mono<String> mono = Mono.just("ERROR"); StepVerifier.create(combinedExecutor.run(Mono.just("SHENYU"), throwable -> mono, conf)) .expectSubscription() .expectNext("SHENYU") .verifyComplete(); }
public static GapEncodedVariableLengthIntegerReader combine(GapEncodedVariableLengthIntegerReader reader1, GapEncodedVariableLengthIntegerReader reader2, ArraySegmentRecycler memoryRecycler) { reader1.reset(); reader2.reset(); ByteDataArray arr = new ByteDataArray(memoryRecycler); int cur = 0; while(reader1.nextElement() != Integer.MAX_VALUE || reader2.nextElement() != Integer.MAX_VALUE) { if(reader1.nextElement() < reader2.nextElement()) { VarInt.writeVInt(arr, reader1.nextElement() - cur); cur = reader1.nextElement(); reader1.advance(); } else if(reader2.nextElement() < reader1.nextElement()) { VarInt.writeVInt(arr, reader2.nextElement() - cur); cur = reader2.nextElement(); reader2.advance(); } else { VarInt.writeVInt(arr, reader1.nextElement() - cur); cur = reader1.nextElement(); reader1.advance(); reader2.advance(); } } return new GapEncodedVariableLengthIntegerReader(arr.getUnderlyingArray(), (int)arr.length()); }
@Test public void testCombine() { GapEncodedVariableLengthIntegerReader reader1 = reader(1, 10, 100, 105, 107, 200); GapEncodedVariableLengthIntegerReader reader2 = reader(5, 76, 100, 102, 109, 197, 198, 199, 200, 201); GapEncodedVariableLengthIntegerReader combined = GapEncodedVariableLengthIntegerReader.combine(reader1, reader2, WastefulRecycler.SMALL_ARRAY_RECYCLER); assertValues(combined, 1, 5, 10, 76, 100, 102, 105, 107, 109, 197, 198, 199, 200, 201); }
public static Range<LocalDateTime> localDateTimeRange(String range) { return ofString(range, parseLocalDateTime().compose(unquote()), LocalDateTime.class); }
@Test public void localDateTimeTest() { assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.1,)")); assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.12,)")); assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.123,)")); assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.1234,)")); assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.12345,)")); assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.123456,)")); assertNotNull(Range.localDateTimeRange("[2019-03-27 16:33:10.123456,infinity)")); }
public boolean poll(Timer timer, boolean waitForJoinGroup) { maybeUpdateSubscriptionMetadata(); invokeCompletedOffsetCommitCallbacks(); if (subscriptions.hasAutoAssignedPartitions()) { if (protocol == null) { throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " to empty while trying to subscribe for group protocol to auto assign partitions"); } // Always update the heartbeat last poll time so that the heartbeat thread does not leave the // group proactively due to application inactivity even if (say) the coordinator cannot be found. pollHeartbeat(timer.currentTimeMs()); if (coordinatorUnknownAndUnreadySync(timer)) { return false; } if (rejoinNeededOrPending()) { // due to a race condition between the initial metadata fetch and the initial rebalance, // we need to ensure that the metadata is fresh before joining initially. This ensures // that we have matched the pattern against the cluster's topics at least once before joining. if (subscriptions.hasPatternSubscription()) { // For consumer group that uses pattern-based subscription, after a topic is created, // any consumer that discovers the topic after metadata refresh can trigger rebalance // across the entire consumer group. Multiple rebalances can be triggered after one topic // creation if consumers refresh metadata at vastly different times. We can significantly // reduce the number of rebalances caused by single topic creation by asking consumer to // refresh metadata before re-joining the group as long as the refresh backoff time has // passed. if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) { this.metadata.requestUpdate(true); } if (!client.ensureFreshMetadata(timer)) { return false; } maybeUpdateSubscriptionMetadata(); } // if not wait for join group, we would just use a timer of 0 if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) { // since we may use a different timer in the callee, we'd still need // to update the original timer's current time after the call timer.update(time.milliseconds()); return false; } } } else { // For manually assigned partitions, we do not try to pro-actively lookup coordinator; // instead we only try to refresh metadata when necessary. // If connections to all nodes fail, wakeups triggered while attempting to send fetch // requests result in polls returning immediately, causing a tight loop of polls. Without // the wakeup, poll() with no channels would block for the timeout, delaying re-connection. // awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop. if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) { client.awaitMetadataUpdate(timer); } // if there is pending coordinator requests, ensure they have a chance to be transmitted. client.pollNoWakeup(); } maybeAutoCommitOffsetsAsync(timer.currentTimeMs()); return true; }
@Test public void testAutoCommitDynamicAssignment() { try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) { subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener)); joinAsFollowerAndReceiveAssignment(coordinator, singletonList(t1p)); subscriptions.seek(t1p, 100); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE); time.sleep(autoCommitIntervalMs); coordinator.poll(time.timer(Long.MAX_VALUE)); assertFalse(client.hasPendingResponses()); } }
@Udf public List<Long> generateSeriesLong( @UdfParameter(description = "The beginning of the series") final long start, @UdfParameter(description = "Marks the end of the series (inclusive)") final long end ) { return generateSeriesLong(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldComputeIntRangeWithNegativeOddStepLong() { final List<Long> range = rangeUdf.generateSeriesLong(9, 0, -3); assertThat(range, hasSize(4)); long val = 9; for (final long i : range) { assertThat(val, is(i)); val -= 3; } }
@Override public ValueSet intersect(ValueSet other) { AllOrNoneValueSet otherValueSet = checkCompatibility(other); return new AllOrNoneValueSet(type, all && otherValueSet.all); }
@Test public void testIntersect() { AllOrNoneValueSet all = AllOrNoneValueSet.all(HYPER_LOG_LOG); AllOrNoneValueSet none = AllOrNoneValueSet.none(HYPER_LOG_LOG); assertEquals(all.intersect(all), all); assertEquals(all.intersect(none), none); assertEquals(none.intersect(all), none); assertEquals(none.intersect(none), none); }
@Override public void start() { Files2.FILES2.createDir(exportDir); }
@Test public void start_creates_import_and_export_directories_including_missing_parents() throws IOException { dataDir = new File(temp.newFolder(), "data"); File importDir = new File(dataDir, "governance/project_dumps/import"); File exportDir = new File(dataDir, "governance/project_dumps/export"); settings.setProperty("sonar.path.data", dataDir.getAbsolutePath()); this.underTest = new ProjectExportDumpFSImpl(settings.asConfig()); assertThat(dataDir).doesNotExist(); assertThat(importDir).doesNotExist(); assertThat(exportDir).doesNotExist(); underTest.start(); assertThat(dataDir).exists().isDirectory(); assertThat(exportDir).exists().isDirectory(); }
@Override public synchronized FunctionSource getFunction(final List<SqlType> argTypeList) { final List<SqlArgument> args = argTypeList.stream() .map((type) -> type == null ? null : SqlArgument.of(type)) .collect(Collectors.toList()); final UdafFactoryInvoker creator = udfIndex.getFunction(args); if (creator == null) { throw new KsqlException("There is no aggregate function with name='" + getName() + "' that has arguments of type=" + argTypeList.stream() .map(SqlType::baseType) .map(Objects::toString) .collect(Collectors.joining(","))); } final boolean isFactoryVariadic = creator.literalParams().stream() .anyMatch(ParameterInfo::isVariadic); /* There can only be one variadic argument, so we know either the column args are bounded or the initial args are bounded. */ final int numInitArgs; final int numSignatureInitArgs = creator.literalParams().size(); if (isFactoryVariadic) { numInitArgs = argTypeList.size() - (creator.parameterInfo().size() - numSignatureInitArgs); } else { numInitArgs = numSignatureInitArgs; } return new FunctionSource( numInitArgs, (initArgs) -> creator.createFunction(initArgs, args) ); }
@Test public void shouldHandleInitParamsOfAllPrimitiveTypes() { // When: AggregateFunctionFactory.FunctionSource result = functionFactory.getFunction( ImmutableList.of(SqlTypes.STRING, SqlTypes.BOOLEAN, SqlTypes.INTEGER, SqlTypes.BIGINT, SqlTypes.DOUBLE, SqlTypes.STRING) ); int initArgs = result.initArgs; result.source.apply(new AggregateFunctionInitArguments( Collections.singletonList(0), ImmutableMap.of(), ImmutableList.of(true, 1, 1L, 1.0d, "s") )); // Then: did not throw. assertEquals(5, initArgs); }
public boolean isConnected() { return socket.isConnected(); }
@Test void testUnresolvedInetSocketAddress() throws Exception { try (BlobClient client = new BlobClient( InetSocketAddress.createUnresolved("localhost", getBlobServer().getPort()), getBlobClientConfig())) { assertThat(client.isConnected()).isTrue(); } }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyWithEmptyStringAndMissingItem() { expectFailureWhenTestingThat(asList("")).containsExactly("a", null); assertFailureValue("missing (2)", "a, null"); assertFailureValue("unexpected (1)", ""); }
@CheckForNull public FileData fileData(String path) { return fileDataByPath.get(path); }
@Test public void test_file_data_when_file_does_not_exist() { FileData fileData = repository.fileData("/Def.java"); assertThat(fileData).isNull(); }
public static Catalog loadCatalog( String impl, String catalogName, Map<String, String> properties, Object hadoopConf) { Preconditions.checkNotNull(impl, "Cannot initialize custom Catalog, impl class name is null"); DynConstructors.Ctor<Catalog> ctor; try { ctor = DynConstructors.builder(Catalog.class).impl(impl).buildChecked(); } catch (NoSuchMethodException e) { throw new IllegalArgumentException( String.format("Cannot initialize Catalog implementation %s: %s", impl, e.getMessage()), e); } Catalog catalog; try { catalog = ctor.newInstance(); } catch (ClassCastException e) { throw new IllegalArgumentException( String.format("Cannot initialize Catalog, %s does not implement Catalog.", impl), e); } configureHadoopConf(catalog, hadoopConf); catalog.initialize(catalogName, properties); return catalog; }
@Test public void loadCustomCatalog() { Map<String, String> options = Maps.newHashMap(); options.put("key", "val"); Configuration hadoopConf = new Configuration(); String name = "custom"; Catalog catalog = CatalogUtil.loadCatalog(TestCatalog.class.getName(), name, options, hadoopConf); assertThat(catalog).isInstanceOf(TestCatalog.class); assertThat(((TestCatalog) catalog).catalogName).isEqualTo(name); assertThat(((TestCatalog) catalog).catalogProperties).isEqualTo(options); }
public static String defaultIfEmpty(String str, String defaultStr) { return isEmpty(str) ? defaultStr : str; }
@Test void testDefaultIfEmpty() { assertEquals("NULL", StringUtils.defaultIfEmpty(null, "NULL")); assertEquals("NULL", StringUtils.defaultIfEmpty("", "NULL")); assertEquals(" ", StringUtils.defaultIfEmpty(" ", "NULL")); assertEquals("bat", StringUtils.defaultIfEmpty("bat", "NULL")); assertNull(StringUtils.defaultIfEmpty("", null)); }
String getServiceName() { return serviceName; }
@Test public void propertyServiceDnsIsNull() { // given Map<String, Comparable> properties = createProperties(); String serviceName = "service-name"; properties.put(SERVICE_NAME.key(), serviceName); properties.put(SERVICE_DNS.key(), null); //when KubernetesConfig config = new KubernetesConfig(properties); //then assertEquals(serviceName, config.getServiceName()); }
@Override public Optional<HealthStatus> deflectorHealth(Collection<String> indices) { if (indices.isEmpty()) { return Optional.of(HealthStatus.Green); } final Map<String, String> aliasMapping = catApi.aliases(); final Set<String> mappedIndices = indices .stream() .map(index -> aliasMapping.getOrDefault(index, index)) .collect(Collectors.toSet()); final Set<IndexSummaryResponse> indexSummaries = catApi.indices() .stream() .filter(indexSummary -> mappedIndices.contains(indexSummary.index())) .collect(Collectors.toSet()); if (indexSummaries.size() < mappedIndices.size()) { return Optional.empty(); } return indexSummaries.stream() .map(IndexSummaryResponse::health) .map(HealthStatus::fromString) .min(HealthStatus::compareTo); }
@Test void testDeflectorHealth() { when(catApi.aliases()).thenReturn(Map.of( "foo_deflector", "foo_42", "bar_deflector", "bar_17", "baz_deflector", "baz_23" )); when(catApi.indices()).thenReturn(List.of( new IndexSummaryResponse("foo_42", "", "RED"), new IndexSummaryResponse("bar_17", "", "YELLOW"), new IndexSummaryResponse("baz_23", "", "GREEN") )); assertThat(clusterAdapter.deflectorHealth(Set.of("foo_deflector", "bar_deflector", "baz_deflector"))).contains(HealthStatus.Red); }
static JsonNode renderRequestHandlers(JdiscBindingsConfig bindingsConfig, Map<ComponentId, ? extends RequestHandler> handlersById) { ArrayNode ret = jsonMapper.createArrayNode(); for (Map.Entry<ComponentId, ? extends RequestHandler> handlerEntry : handlersById.entrySet()) { String id = handlerEntry.getKey().stringValue(); RequestHandler handler = handlerEntry.getValue(); ObjectNode handlerJson = renderComponent(handler, handlerEntry.getKey()); addBindings(bindingsConfig, id, handlerJson); ret.add(handlerJson); } return ret; }
@Test void client_providers_are_rendered() { final String id = "myClient"; final String clientBinding = "http://*/clientBinding"; final String clientBinding2 = "http://*/anotherClientBinding"; final String serverBinding = "http://*/serverBinding"; HashMap<ComponentId, ClientProvider> clientsById = new HashMap<>(); clientsById.put(new ComponentId(id), Mockito.mock(ClientProvider.class)); JdiscBindingsConfig bindingsConfig = new JdiscBindingsConfig(new JdiscBindingsConfig.Builder() .handlers(id, new Handlers.Builder() .clientBindings(clientBinding) .clientBindings(clientBinding2) .serverBindings(serverBinding)) ); String json = ApplicationStatusHandler.renderRequestHandlers(bindingsConfig, clientsById).toString(); System.out.println(json); assertTrue(json.contains("\"" + id + "\"")); assertTrue(json.contains(clientBinding)); assertTrue(json.contains(clientBinding2)); assertTrue(json.contains(serverBinding)); }
@ShellMethod(key = "repair show empty commit metadata", value = "show failed commits") public void showFailedCommits() { HoodieTableMetaClient metaClient = HoodieCLI.getTableMetaClient(); HoodieActiveTimeline activeTimeline = metaClient.getActiveTimeline(); activeTimeline.filterCompletedInstants().getInstantsAsStream().filter(activeTimeline::isEmpty).forEach(hoodieInstant -> LOG.warn("Empty Commit: " + hoodieInstant.toString())); }
@Test public void testShowFailedCommits() { HoodieCLI.conf = storageConf(); StorageConfiguration<?> conf = HoodieCLI.conf; HoodieTableMetaClient metaClient = HoodieCLI.getTableMetaClient(); for (int i = 1; i < 20; i++) { String timestamp = String.valueOf(i); // Write corrupted requested Clean File HoodieTestCommitMetadataGenerator.createCommitFile(tablePath, timestamp, conf); } metaClient.getActiveTimeline().getInstantsAsStream().filter(hoodieInstant -> Integer.parseInt(hoodieInstant.getTimestamp()) % 4 == 0).forEach(hoodieInstant -> { metaClient.getActiveTimeline().deleteInstantFileIfExists(hoodieInstant); if (hoodieInstant.isCompleted()) { metaClient.getActiveTimeline().createCompleteInstant(hoodieInstant); } else { metaClient.getActiveTimeline().createNewInstant(hoodieInstant); } }); final TestLogAppender appender = new TestLogAppender(); final Logger logger = (Logger) LogManager.getLogger(RepairsCommand.class); try { appender.start(); logger.addAppender(appender); Object result = shell.evaluate(() -> "repair show empty commit metadata"); assertTrue(ShellEvaluationResultUtil.isSuccess(result)); final List<LogEvent> log = appender.getLog(); assertEquals(log.size(),4); log.forEach(LoggingEvent -> { assertEquals(LoggingEvent.getLevel(), Level.WARN); assertTrue(LoggingEvent.getMessage().getFormattedMessage().contains("Empty Commit: ")); assertTrue(LoggingEvent.getMessage().getFormattedMessage().contains("COMPLETED]")); }); } finally { logger.removeAppender(appender); } }
public String serializedRecursiveParserWrapperExample() throws IOException, SAXException, TikaException { List<Metadata> metadataList = recursiveParserWrapperExample(); StringWriter writer = new StringWriter(); JsonMetadataList.toJson(metadataList, writer); return writer.toString(); }
@Test public void testSerializedRecursiveParserWrapperExample() throws IOException, SAXException, TikaException { String json = parsingExample.serializedRecursiveParserWrapperExample(); assertTrue(json.contains("When in the Course")); //now try deserializing the JSON List<Metadata> metadataList = JsonMetadataList.fromJson(new StringReader(json)); assertEquals(12, metadataList.size()); }
public static Frequency ofTHz(long value) { return new Frequency(value * THZ); }
@Test public void testofTHz() { Frequency frequency = Frequency.ofTHz(1.0); assertThat(frequency.asGHz(), is(1000.0)); }
public static boolean isBasicInfoChanged(Member actual, Member expected) { if (null == expected) { return null != actual; } if (!expected.getIp().equals(actual.getIp())) { return true; } if (expected.getPort() != actual.getPort()) { return true; } if (!expected.getAddress().equals(actual.getAddress())) { return true; } if (!expected.getState().equals(actual.getState())) { return true; } // if change if (expected.isGrpcReportEnabled() != actual.isGrpcReportEnabled()) { return true; } return isBasicInfoChangedInExtendInfo(expected, actual); }
@Test void testIsBasicInfoChangedForChangedNull() { Member newMember = buildMember(); assertTrue(MemberUtil.isBasicInfoChanged(newMember, null)); }
@Override protected void doRefresh(final List<RuleData> dataList) { pluginDataSubscriber.refreshRuleDataSelf(dataList); dataList.forEach(pluginDataSubscriber::onRuleSubscribe); }
@Test public void testDoRefresh() { List<RuleData> ruleDataList = createFakeRuleDateObjects(3); ruleDataHandler.doRefresh(ruleDataList); verify(subscriber).refreshRuleDataSelf(ruleDataList); ruleDataList.forEach(verify(subscriber)::onRuleSubscribe); }
public static SlotSelectionStrategy selectSlotSelectionStrategy( final JobType jobType, final Configuration configuration) { TaskManagerLoadBalanceMode taskManagerLoadBalanceMode = TaskManagerOptions.TaskManagerLoadBalanceMode.loadFromConfiguration(configuration); final SlotSelectionStrategy locationPreferenceSlotSelectionStrategy; locationPreferenceSlotSelectionStrategy = taskManagerLoadBalanceMode == TaskManagerLoadBalanceMode.SLOTS ? LocationPreferenceSlotSelectionStrategy.createEvenlySpreadOut() : LocationPreferenceSlotSelectionStrategy.createDefault(); final boolean isLocalRecoveryEnabled = configuration.get(StateRecoveryOptions.LOCAL_RECOVERY); if (isLocalRecoveryEnabled) { if (jobType == JobType.STREAMING) { return PreviousAllocationSlotSelectionStrategy.create( locationPreferenceSlotSelectionStrategy); } else { LOG.warn( "Batch job does not support local recovery. Falling back to use " + locationPreferenceSlotSelectionStrategy.getClass()); return locationPreferenceSlotSelectionStrategy; } } else { return locationPreferenceSlotSelectionStrategy; } }
@Test void testCreateLocationPreferenceSlotSelectionStrategyForLocalRecoveryBatchJob() { final Configuration configuration = new Configuration(); configuration.set(StateRecoveryOptions.LOCAL_RECOVERY, true); final SlotSelectionStrategy slotSelectionStrategy = SlotSelectionStrategyUtils.selectSlotSelectionStrategy( JobType.BATCH, configuration); assertThat(slotSelectionStrategy) .isInstanceOf(LocationPreferenceSlotSelectionStrategy.class); }
public static InputStream limitedInputStream(final InputStream is, final int limit) throws IOException { return new InputStream() { private int mPosition = 0; private int mMark = 0; private final int mLimit = Math.min(limit, is.available()); @Override public int read() throws IOException { if (mPosition < mLimit) { mPosition++; return is.read(); } return -1; } @Override public int read(byte[] b, int off, int len) throws IOException { if (b == null) { throw new NullPointerException(); } if (off < 0 || len < 0 || len > b.length - off) { throw new IndexOutOfBoundsException(); } if (mPosition >= mLimit) { return -1; } if (mPosition + len > mLimit) { len = mLimit - mPosition; } if (len <= 0) { return 0; } is.read(b, off, len); mPosition += len; return len; } @Override public long skip(long len) throws IOException { if (mPosition + len > mLimit) { len = mLimit - mPosition; } if (len <= 0) { return 0; } is.skip(len); mPosition += len; return len; } @Override public int available() { return mLimit - mPosition; } @Override public boolean markSupported() { return is.markSupported(); } @Override public synchronized void mark(int readlimit) { is.mark(readlimit); mMark = mPosition; } @Override public synchronized void reset() throws IOException { is.reset(); mPosition = mMark; } @Override public void close() throws IOException { is.close(); } }; }
@Test void testReadWithWrongOffset() { Assertions.assertThrows(IndexOutOfBoundsException.class, () -> { InputStream is = StreamUtilsTest.class.getResourceAsStream("/StreamUtilsTest.txt"); try { is = StreamUtils.limitedInputStream(is, 2); is.read(new byte[1], -1, 1); } finally { if (is != null) { is.close(); } } }); }
@Override public R transform(final K readOnlyKey, final GenericRow value) { return delegate.transform( readOnlyKey, value, context.orElseThrow(() -> new IllegalStateException("Not initialized")) ); }
@Test public void shouldExposeRowTime() { // Given: ksTransformer.transform(KEY, VALUE); final KsqlProcessingContext ksqlCtx = getKsqlProcessingContext(); // When: final long rowTime = ksqlCtx.getRowTime(); // Then: assertThat(rowTime, is(ROWTIME)); }
@VisibleForTesting void showTopScreen() { List<ApplicationInformation> appsInfo = new ArrayList<>(); List<ApplicationReport> apps; try { apps = fetchAppReports(); } catch (Exception e) { LOG.error("Unable to get application information", e); return; } for (ApplicationReport appReport : apps) { ApplicationInformation appInfo = new ApplicationInformation(appReport); appsInfo.add(appInfo); } if (ascendingSort) { Collections.sort(appsInfo, comparator); } else { Collections.sort(appsInfo, Collections.reverseOrder(comparator)); } NodesInformation nodesInfo = getNodesInfo(); QueueMetrics queueMetrics = getQueueMetrics(); String header = getHeader(queueMetrics, nodesInfo); String appsStr = getPrintableAppInformation(appsInfo); synchronized (lock) { printHeader(header); printApps(appsStr); System.out.print(SET_CURSOR_LINE_7_COLUMN_0); System.out.print(CLEAR_LINE); } }
@Test public void testHeaderNodeManagers() throws Exception { YarnClusterMetrics ymetrics = mock(YarnClusterMetrics.class); when(ymetrics.getNumNodeManagers()).thenReturn(0); when(ymetrics.getNumDecommissioningNodeManagers()).thenReturn(1); when(ymetrics.getNumDecommissionedNodeManagers()).thenReturn(2); when(ymetrics.getNumActiveNodeManagers()).thenReturn(3); when(ymetrics.getNumLostNodeManagers()).thenReturn(4); when(ymetrics.getNumUnhealthyNodeManagers()).thenReturn(5); when(ymetrics.getNumRebootedNodeManagers()).thenReturn(6); when(ymetrics.getNumShutdownNodeManagers()).thenReturn(7); YarnClient client = mock(YarnClient.class); when(client.getYarnClusterMetrics()).thenReturn(ymetrics); TopCLI topcli = new TopCLI() { @Override protected void createAndStartYarnClient() { } }; topcli.setClient(client); topcli.terminalWidth = 200; String actual; try (ByteArrayOutputStream outStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(outStream)) { System.setOut(out); System.setErr(out); topcli.showTopScreen(); out.flush(); actual = outStream.toString(StandardCharsets.UTF_8.name()); } String expected = "NodeManager(s)" + ": 0 total, 3 active, 5 unhealthy, 1 decommissioning," + " 2 decommissioned, 4 lost, 6 rebooted, 7 shutdown"; Assert.assertTrue( String.format("Expected output to contain [%s], actual output was [%s].", expected, actual), actual.contains(expected)); }
protected boolean updateWatchingNamespaces(Set<String> namespaces) { if (watchedNamespaces.equals(namespaces)) { log.info("No watched namespace change detected"); return false; } if (watchedNamespaces.isEmpty()) { log.info("Cannot update watch namespaces for operator started at cluster level."); return false; } if (namespaces == null || namespaces.isEmpty()) { log.error("Cannot updating namespaces to empty"); return false; } registeredSparkControllers.forEach( c -> { if (c.allowsNamespaceChanges()) { log.info("Updating operator namespaces to {}", namespaces); c.changeNamespaces(namespaces); } else { log.error("Controller does not allow namespace change, skipping namespace change."); } }); this.watchedNamespaces = new HashSet<>(namespaces); return true; }
@Test void testUpdateWatchedNamespacesWithDynamicConfigEnabled() { MetricsSystem mockMetricsSystem = mock(MetricsSystem.class); KubernetesClient mockClient = mock(KubernetesClient.class); var registeredController = mock(RegisteredController.class); when(registeredController.allowsNamespaceChanges()).thenReturn(true); boolean dynamicConfigEnabled = SparkOperatorConf.DYNAMIC_CONFIG_ENABLED.getValue(); try (MockedStatic<MetricsSystemFactory> mockMetricsSystemFactory = mockStatic(MetricsSystemFactory.class); MockedStatic<KubernetesClientFactory> mockKubernetesClientFactory = mockStatic(KubernetesClientFactory.class); MockedStatic<Utils> mockUtils = mockStatic(Utils.class); MockedConstruction<Operator> operatorConstruction = mockConstruction( Operator.class, (mock, context) -> { when(mock.register(any(SparkAppReconciler.class), any(Consumer.class))) .thenReturn(registeredController); when(mock.register(any(SparkClusterReconciler.class), any(Consumer.class))) .thenReturn(registeredController); }); MockedConstruction<SparkAppReconciler> sparkAppReconcilerConstruction = mockConstruction(SparkAppReconciler.class); MockedConstruction<SparkOperatorConfigMapReconciler> configReconcilerConstruction = mockConstruction(SparkOperatorConfigMapReconciler.class); MockedConstruction<ProbeService> probeServiceConstruction = mockConstruction(ProbeService.class); MockedConstruction<MetricsService> metricsServiceConstruction = mockConstruction(MetricsService.class); MockedConstruction<KubernetesMetricsInterceptor> interceptorMockedConstruction = mockConstruction(KubernetesMetricsInterceptor.class)) { setConfigKey(SparkOperatorConf.DYNAMIC_CONFIG_ENABLED, true); mockMetricsSystemFactory .when(MetricsSystemFactory::createMetricsSystem) .thenReturn(mockMetricsSystem); mockKubernetesClientFactory .when(() -> KubernetesClientFactory.buildKubernetesClient(any())) .thenReturn(mockClient); mockUtils.when(Utils::getWatchedNamespaces).thenReturn(Collections.singleton("namespace-1")); SparkOperator sparkOperator = new SparkOperator(); Set<String> updatedNamespaces = Set.of("namespace-1", "namespace-2"); Assertions.assertTrue(sparkOperator.updateWatchingNamespaces(updatedNamespaces)); Assertions.assertEquals(updatedNamespaces, sparkOperator.watchedNamespaces); verify(registeredController).allowsNamespaceChanges(); verify(registeredController).changeNamespaces(updatedNamespaces); verifyNoMoreInteractions(registeredController); } finally { setConfigKey(SparkOperatorConf.DYNAMIC_CONFIG_ENABLED, dynamicConfigEnabled); } }
@Override protected ActivityState<TransportProtos.SessionInfoProto> updateState(UUID sessionId, ActivityState<TransportProtos.SessionInfoProto> state) { SessionMetaData session = sessions.get(sessionId); if (session == null) { return null; } state.setMetadata(session.getSessionInfo()); var sessionInfo = state.getMetadata(); if (sessionInfo.getGwSessionIdMSB() == 0L || sessionInfo.getGwSessionIdLSB() == 0L) { return state; } var gwSessionId = new UUID(sessionInfo.getGwSessionIdMSB(), sessionInfo.getGwSessionIdLSB()); SessionMetaData gwSession = sessions.get(gwSessionId); if (gwSession == null || !gwSession.isOverwriteActivityTime()) { return state; } long lastRecordedTime = state.getLastRecordedTime(); long gwLastRecordedTime = getLastRecordedTime(gwSessionId); log.debug("Session with id: [{}] has gateway session with id: [{}] with overwrite activity time enabled. " + "Updating last activity time. Session last recorded time: [{}], gateway session last recorded time: [{}].", sessionId, gwSessionId, lastRecordedTime, gwLastRecordedTime); state.setLastRecordedTime(Math.max(lastRecordedTime, gwLastRecordedTime)); return state; }
@Test void givenHasGwSessionWithoutOverwriteEnabled_whenUpdatingActivityState_thenShouldReturnSameInstanceWithUpdatedSessionInfo() { // GIVEN var gwSessionId = UUID.fromString("19864038-9b48-11ee-b9d1-0242ac120002"); TransportProtos.SessionInfoProto gwSessionInfo = TransportProtos.SessionInfoProto.newBuilder() .setSessionIdMSB(gwSessionId.getMostSignificantBits()) .setSessionIdLSB(gwSessionId.getLeastSignificantBits()) .build(); SessionMsgListener gwListenerMock = mock(SessionMsgListener.class); sessions.put(gwSessionId, new SessionMetaData(gwSessionInfo, TransportProtos.SessionType.ASYNC, gwListenerMock)); TransportProtos.SessionInfoProto sessionInfo = TransportProtos.SessionInfoProto.newBuilder() .setSessionIdMSB(SESSION_ID.getMostSignificantBits()) .setSessionIdLSB(SESSION_ID.getLeastSignificantBits()) .setGwSessionIdMSB(gwSessionId.getMostSignificantBits()) .setGwSessionIdLSB(gwSessionId.getLeastSignificantBits()) .build(); SessionMsgListener listenerMock = mock(SessionMsgListener.class); sessions.put(SESSION_ID, new SessionMetaData(sessionInfo, TransportProtos.SessionType.ASYNC, listenerMock)); long lastRecordedTime = 123L; ActivityState<TransportProtos.SessionInfoProto> state = new ActivityState<>(); state.setLastRecordedTime(lastRecordedTime); state.setMetadata(TransportProtos.SessionInfoProto.getDefaultInstance()); when(transportServiceMock.updateState(SESSION_ID, state)).thenCallRealMethod(); // WHEN ActivityState<TransportProtos.SessionInfoProto> updatedState = transportServiceMock.updateState(SESSION_ID, state); // THEN assertThat(updatedState).isSameAs(state); assertThat(updatedState.getLastRecordedTime()).isEqualTo(lastRecordedTime); assertThat(updatedState.getMetadata()).isEqualTo(sessionInfo); verify(transportServiceMock, never()).getLastRecordedTime(gwSessionId); }
public MetadataVersion metadataVersion() { return metadataVersion; }
@Test public void testSetMetadataVersion() { for (int i = MetadataVersion.MINIMUM_KRAFT_VERSION.ordinal(); i < MetadataVersion.VERSIONS.length; i++) { MetadataVersion version = MetadataVersion.VERSIONS[i]; ImageWriterOptions.Builder options = new ImageWriterOptions.Builder(). setMetadataVersion(version); if (i < MetadataVersion.MINIMUM_BOOTSTRAP_VERSION.ordinal()) { assertEquals(MetadataVersion.MINIMUM_KRAFT_VERSION, options.metadataVersion()); assertEquals(version, options.requestedMetadataVersion()); } else { assertEquals(version, options.metadataVersion()); } } }
@Override public List<String> listDatabases() throws CatalogException { try { FileStatus[] fileStatuses = fs.listStatus(catalogPath); return Arrays.stream(fileStatuses) .filter(FileStatus::isDirectory) .map(fileStatus -> fileStatus.getPath().getName()) .collect(Collectors.toList()); } catch (IOException e) { throw new CatalogException("Listing database exception.", e); } }
@Test public void testListDatabases() { List<String> actual = catalog.listDatabases(); assertTrue(actual.contains(TEST_DEFAULT_DATABASE)); assertFalse(actual.contains(NONE_EXIST_DATABASE)); }
@Override public void doAlarm(List<AlarmMessage> alarmMessages) throws Exception { Map<String, DingtalkSettings> settingsMap = alarmRulesWatcher.getDingtalkSettings(); if (settingsMap == null || settingsMap.isEmpty()) { return; } Map<String, List<AlarmMessage>> groupedMessages = groupMessagesByHook(alarmMessages); for (Map.Entry<String, List<AlarmMessage>> entry : groupedMessages.entrySet()) { var hookName = entry.getKey(); var messages = entry.getValue(); var setting = settingsMap.get(hookName); if (setting == null || CollectionUtils.isEmpty(setting.getWebhooks()) || CollectionUtils.isEmpty( messages)) { continue; } for (final var webHookUrl : setting.getWebhooks()) { final var url = getUrl(webHookUrl); for (final var alarmMessage : messages) { final var requestBody = String.format( setting.getTextTemplate(), alarmMessage.getAlarmMessage() ); post(URI.create(url), requestBody, Map.of()); } } } }
@Test public void testDingtalkWebhookWithoutSign() throws Exception { List<DingtalkSettings.WebHookUrl> webHooks = new ArrayList<>(); webHooks.add(new DingtalkSettings.WebHookUrl("", "http://127.0.0.1:" + SERVER.httpPort() + "/dingtalkhook/receiveAlarm?token=dummy_token")); Rules rules = new Rules(); String template = "{\"msgtype\":\"text\",\"text\":{\"content\":\"Skywaling alarm: %s\"}}"; DingtalkSettings setting1 = new DingtalkSettings("setting1", AlarmHooksType.dingtalk, true); setting1.setWebhooks(webHooks); setting1.setTextTemplate(template); DingtalkSettings setting2 = new DingtalkSettings("setting2", AlarmHooksType.dingtalk, false); setting2.setWebhooks(webHooks); setting2.setTextTemplate(template); rules.getDingtalkSettingsMap().put(setting1.getFormattedName(), setting1); rules.getDingtalkSettingsMap().put(setting2.getFormattedName(), setting2); AlarmRulesWatcher alarmRulesWatcher = new AlarmRulesWatcher(rules, null); DingtalkHookCallback dingtalkCallBack = new DingtalkHookCallback(alarmRulesWatcher); List<AlarmMessage> alarmMessages = new ArrayList<>(2); AlarmMessage alarmMessage = new AlarmMessage(); alarmMessage.setScopeId(DefaultScopeDefine.SERVICE); alarmMessage.setRuleName("service_resp_time_rule"); alarmMessage.setAlarmMessage("alarmMessage with [DefaultScopeDefine.All]"); alarmMessage.getHooks().add(setting1.getFormattedName()); alarmMessages.add(alarmMessage); AlarmMessage anotherAlarmMessage = new AlarmMessage(); anotherAlarmMessage.setRuleName("service_resp_time_rule_2"); anotherAlarmMessage.setScopeId(DefaultScopeDefine.ENDPOINT); anotherAlarmMessage.setAlarmMessage("anotherAlarmMessage with [DefaultScopeDefine.Endpoint]"); anotherAlarmMessage.getHooks().add(setting2.getFormattedName()); alarmMessages.add(anotherAlarmMessage); dingtalkCallBack.doAlarm(alarmMessages); Assertions.assertTrue(IS_SUCCESS.get()); }
@Override public int getOrder() { return PluginEnum.BASIC_AUTH.getCode(); }
@Test public void testGetOrder() { final int result = basicAuthPlugin.getOrder(); Assertions.assertEquals(PluginEnum.BASIC_AUTH.getCode(), result); }
@SneakyThrows(NoSuchAlgorithmException.class) @Override public void channelRead(final ChannelHandlerContext ctx, final Object msg) { if (msg instanceof MySQLHandshakePacket) { MySQLHandshakePacket handshake = (MySQLHandshakePacket) msg; MySQLHandshakeResponse41Packet handshakeResponsePacket = new MySQLHandshakeResponse41Packet(MAX_PACKET_SIZE, CHARACTER_SET, username); handshakeResponsePacket.setAuthResponse(generateAuthResponse(handshake.getAuthPluginData().getAuthenticationPluginData())); handshakeResponsePacket.setCapabilityFlags(generateClientCapability()); handshakeResponsePacket.setAuthPluginName(MySQLAuthenticationMethod.NATIVE); ctx.channel().writeAndFlush(handshakeResponsePacket); serverVersion = new MySQLServerVersion(handshake.getServerVersion()); return; } if (msg instanceof MySQLAuthSwitchRequestPacket) { MySQLAuthSwitchRequestPacket authSwitchRequest = (MySQLAuthSwitchRequestPacket) msg; ctx.channel().writeAndFlush(new MySQLAuthSwitchResponsePacket(getAuthPluginResponse(authSwitchRequest))); seed = authSwitchRequest.getAuthPluginData().getAuthenticationPluginData(); return; } if (msg instanceof MySQLAuthMoreDataPacket) { MySQLAuthMoreDataPacket authMoreData = (MySQLAuthMoreDataPacket) msg; handleCachingSha2Auth(ctx, authMoreData); return; } if (msg instanceof MySQLOKPacket) { ctx.channel().pipeline().remove(this); authResultCallback.setSuccess(serverVersion); return; } MySQLErrPacket error = (MySQLErrPacket) msg; ctx.channel().close(); throw new PipelineInternalException(error.getErrorMessage()); }
@Test void assertChannelReadOkPacket() throws ReflectiveOperationException { MySQLOKPacket okPacket = new MySQLOKPacket(0); MySQLServerVersion serverVersion = new MySQLServerVersion("5.5.0-log"); Plugins.getMemberAccessor().set(MySQLNegotiateHandler.class.getDeclaredField("serverVersion"), mysqlNegotiateHandler, serverVersion); mysqlNegotiateHandler.channelRead(channelHandlerContext, okPacket); verify(pipeline).remove(mysqlNegotiateHandler); verify(authResultCallback).setSuccess(serverVersion); }
@Override public V put(K key, V value) { return cache.getAndPut(key, value); }
@Test public void testPut() { cache.put(42, "oldValue"); String oldValue = adapter.put(42, "newValue"); assertEquals("oldValue", oldValue); assertEquals("newValue", cache.get(42)); }
public static String u4(int v) { char[] result = new char[8]; for (int i = 0; i < 8; i++) { result[7 - i] = Character.forDigit(v & 0x0f, 16); v >>= 4; } return new String(result); }
@Test public void testU4() { Assert.assertEquals("00000000", Hex.u4(0)); Assert.assertEquals("00bc614e", Hex.u4(12345678)); Assert.assertEquals("499602d2", Hex.u4(1234567890)); }
@NonNull public static SSLContext fromClientCertificate(@NonNull ECKey ecKey) { // see also: // https://connect2id.com/products/nimbus-oauth-openid-connect-sdk/examples/utils/custom-key-store if (ecKey.getParsedX509CertChain() == null || ecKey.getParsedX509CertChain().isEmpty()) { throw new IllegalArgumentException( "client key is missing certificate, kid: " + ecKey.getKeyID()); } try { var ctx = SSLContext.getInstance("TLS"); var tmf = TrustManagerFactory.getInstance("PKIX"); // Using null here initialises with the default trust store. tmf.init((KeyStore) null); ctx.init( keyManagerOf(ecKey.getParsedX509CertChain().get(0), ecKey.toPrivateKey()), tmf.getTrustManagers(), null); return ctx; } catch (JOSEException | GeneralSecurityException e) { throw new IllegalStateException("failed to initialize SSL context", e); } }
@Test void fromClientCertificate_noPrivate() throws Exception { var key = generateSigningKey(URI.create(ISSUER)); var pub = key.toPublicJWK(); assertThrows(IllegalArgumentException.class, () -> TlsContext.fromClientCertificate(pub)); }
@Override public ResultSet getCrossReference(final String parentCatalog, final String parentSchema, final String parentTable, final String foreignCatalog, final String foreignSchema, final String foreignTable) { return null; }
@Test void assertGetCrossReference() { assertNull(metaData.getCrossReference("", "", "", "", "", "")); }
public void demoUpdate() { inputComponent.update(this, 0); physicComponent.update(this); graphicComponent.update(this); }
@Test void npcDemoTest(){ LOGGER.info("npcDemoTest:"); npcTest.demoUpdate(); assertEquals(2, npcTest.getVelocity()); assertEquals(2, npcTest.getCoordinate()); }
public boolean isEnabled() { return enabled; }
@Test public void isEnabled() { assertThat(properties.isEnabled()).isEqualTo(true); }
public static String convert18To15(String idCard) { if (StrUtil.isNotBlank(idCard) && IdcardUtil.isValidCard18(idCard)) { return idCard.substring(0, 6) + idCard.substring(8, idCard.length() - 1); } return idCard; }
@Test public void convert18To15Test() { String idcard15 = IdcardUtil.convert18To15("150102198807303035"); assertEquals(ID_15, idcard15); }
Object[] findValues(int ordinal) { return getAllValues(ordinal, type, 0); }
@Test public void testListIntegerReference() throws Exception { ListType listType = new ListType(); listType.intValues = Arrays.asList(1, 2, 3); objectMapper.add(listType); StateEngineRoundTripper.roundTripSnapshot(writeStateEngine, readStateEngine); FieldPath fieldPath; Object[] values; //with partial auto expand fieldPath = new FieldPath(readStateEngine, "ListType", "intValues.element"); values = fieldPath.findValues(0); Assert.assertEquals(1, (int) values[0]); Assert.assertEquals(2, (int) values[1]); Assert.assertEquals(3, (int) values[2]); //with auto expand but full path given fieldPath = new FieldPath(readStateEngine, "ListType", "intValues.element.value"); values = fieldPath.findValues(0); Assert.assertEquals(1, (int) values[0]); Assert.assertEquals(2, (int) values[1]); Assert.assertEquals(3, (int) values[2]); //without auto expand but full path given fieldPath = new FieldPath(readStateEngine, "ListType", "intValues.element.value", false); values = fieldPath.findValues(0); Assert.assertEquals(1, (int) values[0]); Assert.assertEquals(2, (int) values[1]); Assert.assertEquals(3, (int) values[2]); }
public static void main(String[] args) { SpringApplication.run(Main.class, args); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> Main.main(new String[]{})); }
@Override public List<RedisClientInfo> getClientList(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST); List<String> list = syncFuture(f); return CONVERTER.convert(list.toArray(new String[list.size()])); }
@Test public void testGetClientList() { RedisClusterNode master = getFirstMaster(); List<RedisClientInfo> list = connection.getClientList(master); assertThat(list.size()).isGreaterThan(10); }
@Override public Expression createExpression(Expression source, String expression, Object[] properties) { return doCreateJsonPathExpression(source, expression, properties, false); }
@Test public void testSuppressException() { Exchange exchange = new DefaultExchange(context); exchange.getIn().setBody(new File("src/test/resources/type.json")); JsonPathLanguage lan = (JsonPathLanguage) context.resolveLanguage("jsonpath"); Expression exp = lan.createExpression("$.foo", new Object[] { null, null, null, null, null, null, null, Option.SUPPRESS_EXCEPTIONS }); String nofoo = exp.evaluate(exchange, String.class); assertNull(nofoo); }
protected int getCount() { int i = 1; Condition<?> cur = this; while (cur.hasNext()) { i++; cur = cur.next; } return i; }
@Test public void testGetCount() { Assertions.assertEquals(3, TEST_CONDITION.getCount()); }