focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testConsumerGroupOffsetCommitWithUnknownMemberId() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Create an empty group. context.groupMetadataManager.getOrMaybeCreatePersistedConsumerGroup( "foo", true ); // Verify that the request is rejected with the correct exception. assertThrows(UnknownMemberIdException.class, () -> context.commitOffset( new OffsetCommitRequestData() .setGroupId("foo") .setMemberId("member") .setGenerationIdOrMemberEpoch(10) .setTopics(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") .setPartitions(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) )) )) ) ); }
@Override public Optional<AuthenticatedDevice> authenticate(BasicCredentials basicCredentials) { boolean succeeded = false; String failureReason = null; try { final UUID accountUuid; final byte deviceId; { final Pair<String, Byte> identifierAndDeviceId = getIdentifierAndDeviceId(basicCredentials.getUsername()); accountUuid = UUID.fromString(identifierAndDeviceId.first()); deviceId = identifierAndDeviceId.second(); } Optional<Account> account = accountsManager.getByAccountIdentifier(accountUuid); if (account.isEmpty()) { failureReason = "noSuchAccount"; return Optional.empty(); } Optional<Device> device = account.get().getDevice(deviceId); if (device.isEmpty()) { failureReason = "noSuchDevice"; return Optional.empty(); } SaltedTokenHash deviceSaltedTokenHash = device.get().getAuthTokenHash(); if (deviceSaltedTokenHash.verify(basicCredentials.getPassword())) { succeeded = true; Account authenticatedAccount = updateLastSeen(account.get(), device.get()); if (deviceSaltedTokenHash.getVersion() != SaltedTokenHash.CURRENT_VERSION) { OLD_TOKEN_VERSION_COUNTER.increment(); authenticatedAccount = accountsManager.updateDeviceAuthentication( authenticatedAccount, device.get(), SaltedTokenHash.generateFor(basicCredentials.getPassword())); // new credentials have current version } return Optional.of(new AuthenticatedDevice(authenticatedAccount, device.get())); } else { failureReason = "incorrectPassword"; return Optional.empty(); } } catch (IllegalArgumentException | InvalidAuthorizationHeaderException iae) { failureReason = "invalidHeader"; return Optional.empty(); } finally { Tags tags = Tags.of( AUTHENTICATION_SUCCEEDED_TAG_NAME, String.valueOf(succeeded)); if (StringUtils.isNotBlank(failureReason)) { tags = tags.and(AUTHENTICATION_FAILURE_REASON_TAG_NAME, failureReason); } Metrics.counter(AUTHENTICATION_COUNTER_NAME, tags).increment(); } }
@Test void testAuthenticateAccountNotFound() { assertThat(accountAuthenticator.authenticate(new BasicCredentials(UUID.randomUUID().toString(), "password"))) .isEmpty(); }
public static <T> Read<T> read(Class<T> classType) { return new AutoValue_CosmosIO_Read.Builder<T>().setClassType(classType).build(); }
@Test public void testEstimatedSizeBytes() throws Exception { Read<Family> read = CosmosIO.read(Family.class) .withContainer(CONTAINER) .withDatabase(DATABASE) .withCoder(SerializableCoder.of(Family.class)); BoundedCosmosBDSource<Family> initialSource = new BoundedCosmosBDSource<>(read); // Cosmos DB precision is in KB. Inserted test data is ~3KB long estimatedSize = initialSource.getEstimatedSizeBytes(pipelineOptions); assertEquals("Wrong estimated size", 3072, estimatedSize); }
public static BigInteger decodeQuantity(String value) { if (isLongValue(value)) { return BigInteger.valueOf(Long.parseLong(value)); } if (!isValidHexQuantity(value)) { throw new MessageDecodingException("Value must be in format 0x[0-9a-fA-F]+"); } try { return parsePaddedNumberHex(value); } catch (NumberFormatException e) { throw new MessageDecodingException("Negative ", e); } }
@Test public void testQuantityDecodeMissingPrefix() { assertThrows(MessageDecodingException.class, () -> Numeric.decodeQuantity("ff")); }
public boolean accept(DefaultIssue issue, Component component) { if (component.getType() != FILE || (exclusionPatterns.isEmpty() && inclusionPatterns.isEmpty())) { return true; } if (isExclude(issue, component)) { return false; } return isInclude(issue, component); }
@Test public void ignore_some_rule_and_component() { IssueFilter underTest = newIssueFilter(newSettings(asList("xoo:x1", "**/xoo/File1*"), Collections.emptyList())); assertThat(underTest.accept(ISSUE_1, COMPONENT_1)).isFalse(); assertThat(underTest.accept(ISSUE_1, COMPONENT_2)).isTrue(); assertThat(underTest.accept(ISSUE_2, COMPONENT_1)).isTrue(); assertThat(underTest.accept(ISSUE_2, COMPONENT_2)).isTrue(); }
public void setIdentityContext(IdentityContext identityContext) { this.identityContext = identityContext; }
@Test void testSetIdentityContext() { IdentityContext identityContext = new IdentityContext(); assertNull(authContext.getIdentityContext()); authContext.setIdentityContext(identityContext); assertSame(identityContext, authContext.getIdentityContext()); }
@Override public boolean hasIndexFor(String column, IndexType<?, ?, ?> type) { File indexFile = getFileFor(column, type); return indexFile.exists(); }
@Test public void nativeTextIndexIsRecognized() throws IOException { // See https://github.com/apache/pinot/issues/11529 try (FilePerIndexDirectory fpi = new FilePerIndexDirectory(TEMP_DIR, _segmentMetadata, ReadMode.mmap); NativeTextIndexCreator fooCreator = new NativeTextIndexCreator("foo", TEMP_DIR)) { fooCreator.add("{\"clean\":\"this\"}"); fooCreator.seal(); assertTrue(fpi.hasIndexFor("foo", StandardIndexes.text()), "Native text index not found"); } }
@Override public KeyValueIterator<Windowed<K>, V> fetch(final K key) { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType); for (final ReadOnlySessionStore<K, V> store : stores) { try { final KeyValueIterator<Windowed<K>, V> result = store.fetch(key); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException ise) { throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" + " and may have been migrated to another instance; " + "please re-discover its location from the state metadata. " + "Original error message: " + ise); } } return KeyValueIterators.emptyIterator(); }
@Test public void shouldFetchResultsFromUnderlyingSessionStore() { underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 1L); underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(10, 10)), 2L); final List<KeyValue<Windowed<String>, Long>> results = toList(sessionStore.fetch("a")); assertEquals(Arrays.asList(KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 1L), KeyValue.pair(new Windowed<>("a", new SessionWindow(10, 10)), 2L)), results); }
@Override public BlockBuilder writeBytes(Slice source, int sourceIndex, int length) { checkValidSliceRange(sourceIndex, length); if (!initialized) { initializeCapacity(); } sliceOutput.writeBytes(source, sourceIndex, length); currentEntrySize += length; return this; }
@Test public void testWriteBytes() { int entries = 100; String inputChars = "abcdefghijklmnopqrstuvwwxyz01234566789!@#$%^"; VariableWidthBlockBuilder blockBuilder = new VariableWidthBlockBuilder(null, entries, entries); List<String> values = new ArrayList<>(); Random rand = new Random(0); byte[] bytes = inputChars.getBytes(UTF_8); assertEquals(bytes.length, inputChars.length()); for (int i = 0; i < entries; i++) { int valueLength = rand.nextInt(bytes.length); VARCHAR.writeBytes(blockBuilder, bytes, 0, valueLength); values.add(inputChars.substring(0, valueLength)); } verifyBlockValues(blockBuilder, values); verifyBlockValues(blockBuilder.build(), values); }
static MetricRegistry createMetricRegistry() { MetricRegistry registry = new MetricRegistry(); final Slf4jReporter reporter = Slf4jReporter.forRegistry(registry) .outputTo(LOG) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .withLoggingLevel(Slf4jReporter.LoggingLevel.DEBUG) .build(); reporter.start(DEFAULT_REPORTING_INTERVAL_SECONDS, TimeUnit.SECONDS); return registry; }
@Test public void testCreateMetricRegistry() { MetricRegistry registry = MetricsComponent.createMetricRegistry(); assertThat(registry, is(notNullValue())); }
@Override public int drainTo(Collection<? super E> c, int maxElements) { // initially take all permits to stop consumers from modifying queues // while draining. will restore any excess when done draining. final int permits = semaphore.drainPermits(); final int numElements = Math.min(maxElements, permits); int numRemaining = numElements; for (int i=0; numRemaining > 0 && i < queues.size(); i++) { numRemaining -= queues.get(i).drainTo(c, numRemaining); } int drained = numElements - numRemaining; if (permits > drained) { // restore unused permits. semaphore.release(permits - drained); } return drained; }
@SuppressWarnings("deprecation") @Test public void testDrainTo() { Configuration conf = new Configuration(); conf.setInt("ns." + FairCallQueue.IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2); FairCallQueue<Schedulable> fcq2 = new FairCallQueue<Schedulable>(2, 10, "ns", conf); // Start with 3 in fcq, to be drained for (int i = 0; i < 3; i++) { fcq.offer(mockCall("c")); } fcq.drainTo(fcq2); assertEquals(0, fcq.size()); assertEquals(3, fcq2.size()); }
public static int read( final UnsafeBuffer termBuffer, final int termOffset, final FragmentHandler handler, final int fragmentsLimit, final Header header, final ErrorHandler errorHandler, final long currentPosition, final Position subscriberPosition) { int fragmentsRead = 0; int offset = termOffset; final int capacity = termBuffer.capacity(); header.buffer(termBuffer); try { while (fragmentsRead < fragmentsLimit && offset < capacity) { final int frameLength = frameLengthVolatile(termBuffer, offset); if (frameLength <= 0) { break; } final int frameOffset = offset; offset += BitUtil.align(frameLength, FRAME_ALIGNMENT); if (!isPaddingFrame(termBuffer, frameOffset)) { ++fragmentsRead; header.offset(frameOffset); handler.onFragment(termBuffer, frameOffset + HEADER_LENGTH, frameLength - HEADER_LENGTH, header); } } } catch (final Exception ex) { errorHandler.onError(ex); } finally { final long newPosition = currentPosition + (offset - termOffset); if (newPosition > currentPosition) { subscriberPosition.setOrdered(newPosition); } } return fragmentsRead; }
@Test void shouldReadLastMessage() { final int msgLength = 1; final int frameLength = HEADER_LENGTH + msgLength; final int alignedFrameLength = align(frameLength, FRAME_ALIGNMENT); final int frameOffset = TERM_BUFFER_CAPACITY - alignedFrameLength; final long startingPosition = LogBufferDescriptor.computePosition( INITIAL_TERM_ID, frameOffset, POSITION_BITS_TO_SHIFT, INITIAL_TERM_ID); when(termBuffer.getIntVolatile(frameOffset)).thenReturn(frameLength); when(termBuffer.getShort(typeOffset(frameOffset))).thenReturn((short)HDR_TYPE_DATA); when(subscriberPosition.getVolatile()).thenReturn(startingPosition); final int readOutcome = TermReader.read( termBuffer, frameOffset, handler, Integer.MAX_VALUE, header, errorHandler, startingPosition, subscriberPosition); assertEquals(1, readOutcome); final InOrder inOrder = inOrder(termBuffer, handler, subscriberPosition); inOrder.verify(termBuffer).getIntVolatile(frameOffset); inOrder.verify(handler).onFragment( eq(termBuffer), eq(frameOffset + HEADER_LENGTH), eq(msgLength), any(Header.class)); inOrder.verify(subscriberPosition).setOrdered(TERM_BUFFER_CAPACITY); }
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { metadata.set(Metadata.CONTENT_TYPE, TMX_CONTENT_TYPE.toString()); final XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); XMLReaderUtils.parseSAX(CloseShieldInputStream.wrap(stream), new TMXContentHandler(xhtml, metadata), context); }
@Test public void testTMX() throws Exception { try (InputStream input = getResourceAsStream("/test-documents/testTMX.tmx")) { Metadata metadata = new Metadata(); ContentHandler handler = new BodyContentHandler(); new TMXParser().parse(input, handler, metadata, new ParseContext()); String content = handler.toString(); assertContains("Hello world!", content); assertContains("Salut lume!", content); assertEquals("1", metadata.get("tu-count")); assertEquals("2", metadata.get("tuv-count")); assertEquals("en-us", metadata.get("source-language")); assertEquals("ro-ro", metadata.get("target-language")); assertEquals("apache-tika", metadata.get("creation-tool")); } }
@Override public double getValue(double quantile) { if (quantile < 0.0 || quantile > 1.0 || Double.isNaN(quantile)) { throw new IllegalArgumentException(quantile + " is not in [0..1]"); } if (values.length == 0) { return 0.0; } final double pos = quantile * (values.length + 1); final int index = (int) pos; if (index < 1) { return values[0]; } if (index >= values.length) { return values[values.length - 1]; } final double lower = values[index - 1]; final double upper = values[index]; return lower + (pos - floor(pos)) * (upper - lower); }
@Test(expected = IllegalArgumentException.class) public void disallowsQuantileOverOne() { snapshot.getValue(1.5); }
public static void mkdir( final HybridFile parentFile, @NonNull final HybridFile file, final Context context, final boolean rootMode, @NonNull final ErrorCallBack errorCallBack) { new AsyncTask<Void, Void, Void>() { private DataUtils dataUtils = DataUtils.getInstance(); private Function<DocumentFile, Void> safCreateDirectory = input -> { if (input != null && input.isDirectory()) { boolean result = false; try { result = input.createDirectory(file.getName(context)) != null; } catch (Exception e) { LOG.warn("Failed to make directory", e); } errorCallBack.done(file, result); } else errorCallBack.done(file, false); return null; }; @Override protected Void doInBackground(Void... params) { // checking whether filename is valid or a recursive call possible if (!Operations.isFileNameValid(file.getName(context))) { errorCallBack.invalidName(file); return null; } if (file.exists()) { errorCallBack.exists(file); return null; } // Android data directory, prohibit create directory if (file.isAndroidDataDir()) { errorCallBack.done(file, false); return null; } if (file.isSftp() || file.isFtp()) { file.mkdir(context); /* FIXME: throw Exceptions from HybridFile.mkdir() so errorCallback can throw Exceptions here */ errorCallBack.done(file, true); return null; } if (file.isSmb()) { try { file.getSmbFile(2000).mkdirs(); } catch (SmbException e) { LOG.warn("failed to make smb directories", e); errorCallBack.done(file, false); return null; } errorCallBack.done(file, file.exists()); return null; } if (file.isOtgFile()) { if (checkOtgNewFileExists(file, context)) { errorCallBack.exists(file); return null; } safCreateDirectory.apply(OTGUtil.getDocumentFile(parentFile.getPath(), context, false)); return null; } if (file.isDocumentFile()) { if (checkDocumentFileNewFileExists(file, context)) { errorCallBack.exists(file); return null; } safCreateDirectory.apply( OTGUtil.getDocumentFile( parentFile.getPath(), SafRootHolder.getUriRoot(), context, OpenMode.DOCUMENT_FILE, false)); return null; } else if (file.isDropBoxFile()) { CloudStorage cloudStorageDropbox = dataUtils.getAccount(OpenMode.DROPBOX); try { cloudStorageDropbox.createFolder(CloudUtil.stripPath(OpenMode.DROPBOX, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { LOG.warn("failed to make directory in cloud connection", e); errorCallBack.done(file, false); } } else if (file.isBoxFile()) { CloudStorage cloudStorageBox = dataUtils.getAccount(OpenMode.BOX); try { cloudStorageBox.createFolder(CloudUtil.stripPath(OpenMode.BOX, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { LOG.warn("failed to make directory in cloud connection", e); errorCallBack.done(file, false); } } else if (file.isOneDriveFile()) { CloudStorage cloudStorageOneDrive = dataUtils.getAccount(OpenMode.ONEDRIVE); try { cloudStorageOneDrive.createFolder( CloudUtil.stripPath(OpenMode.ONEDRIVE, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { LOG.warn("failed to make directory in cloud connection", e); errorCallBack.done(file, false); } } else if (file.isGoogleDriveFile()) { CloudStorage cloudStorageGdrive = dataUtils.getAccount(OpenMode.GDRIVE); try { cloudStorageGdrive.createFolder(CloudUtil.stripPath(OpenMode.GDRIVE, file.getPath())); errorCallBack.done(file, true); } catch (Exception e) { LOG.warn("failed to make directory in cloud connection", e); errorCallBack.done(file, false); } } else { if (file.isLocal() || file.isRoot()) { int mode = checkFolder(new File(file.getParent(context)), context); if (mode == 2) { errorCallBack.launchSAF(file); return null; } if (mode == 1 || mode == 0) MakeDirectoryOperation.mkdir(file.getFile(), context); if (!file.exists() && rootMode) { file.setMode(OpenMode.ROOT); if (file.exists()) errorCallBack.exists(file); try { MakeDirectoryCommand.INSTANCE.makeDirectory( file.getParent(context), file.getName(context)); } catch (ShellNotRunningException e) { LOG.warn("failed to make directory in local filesystem", e); } errorCallBack.done(file, file.exists()); return null; } errorCallBack.done(file, file.exists()); return null; } errorCallBack.done(file, file.exists()); } return null; } }.executeOnExecutor(executor); }
@Test public void testMkdirNewFolderSameNameAsCurrentFolder() throws InterruptedException { File newFolder = new File(storageRoot, "test"); HybridFile newFolderHF = new HybridFile(OpenMode.FILE, newFolder.getAbsolutePath()); CountDownLatch waiter1 = new CountDownLatch(1); Operations.mkdir( newFolderHF, newFolderHF, ApplicationProvider.getApplicationContext(), false, new AbstractErrorCallback() { @Override public void done(HybridFile hFile, boolean b) { waiter1.countDown(); } }); waiter1.await(); assertTrue(newFolder.exists()); File newFolder2 = new File(newFolder, "test"); HybridFile newFolder2HF = new HybridFile(OpenMode.FILE, newFolder2.getAbsolutePath()); CountDownLatch waiter2 = new CountDownLatch(1); Operations.mkdir( newFolder2HF, newFolder2HF, ApplicationProvider.getApplicationContext(), false, new AbstractErrorCallback() { @Override public void done(HybridFile hFile, boolean b) { waiter2.countDown(); } }); waiter2.await(); assertTrue(newFolder2.exists()); CountDownLatch waiter3 = new CountDownLatch(1); AtomicBoolean assertFlag = new AtomicBoolean(false); Operations.mkdir( newFolder2HF, newFolder2HF, ApplicationProvider.getApplicationContext(), false, new AbstractErrorCallback() { @Override public void exists(HybridFile file) { assertFlag.set(true); waiter3.countDown(); } }); waiter3.await(); assertTrue(assertFlag.get()); }
public boolean setLocations(DefaultIssue issue, @Nullable Object locations) { if (!locationsEqualsIgnoreHashes(locations, issue.getLocations())) { issue.setLocations(locations); issue.setChanged(true); issue.setLocationsChanged(true); return true; } return false; }
@Test void change_locations_if_different_flow_count() { DbIssues.Locations locations = DbIssues.Locations.newBuilder() .addFlow(DbIssues.Flow.newBuilder() .addLocation(DbIssues.Location.newBuilder()) .build()) .build(); issue.setLocations(locations); DbIssues.Locations.Builder builder = locations.toBuilder(); builder.clearFlow(); boolean updated = underTest.setLocations(issue, builder.build()); assertThat(updated).isTrue(); }
public CountDownLatch getCountDownLatch() { return countDownLatch; }
@Test void testPhase() throws InterruptedException { // CREATE client.pods().inNamespace("ns1") .create(new PodBuilder().withNewMetadata().withName("pod1").endMetadata().withNewStatus() .endStatus().build()); await().until(isPodAvailable("pod1")); // READ PodList podList = client.pods().inNamespace("ns1").list(); assertNotNull(podList); assertEquals(1, podList.getItems().size()); Pod pod = podList.getItems().get(0); // WATCH PodPhaseWatcher podWatcher = new PodPhaseWatcher( phase -> StringUtils.equalsAnyIgnoreCase(phase, "Succeeded", "Failed", "Running")); try (Watch watch = client.pods().inNamespace("ns1").withName("pod1").watch(podWatcher)) { // Update Pod to "pending" phase pod.setStatus(new PodStatus(null, null, null, null, null, null, null, "Pending", null, null, null, null, null)); pod = client.pods().inNamespace("ns1").replaceStatus(pod); // Wait a little bit, till update is applied await().pollDelay(Duration.ofSeconds(1)) .until(isPodPhase(pod.getMetadata().getName(), "Pending")); // Update Pod to "Running" phase pod.setStatus(new PodStatusBuilder(new PodStatus(null, null, null, null, null, null, null, "Running", null, null, null, null, null)).build()); client.pods().inNamespace("ns1").replaceStatus(pod); await().pollDelay(Duration.ofSeconds(1)) .until(isPodPhase(pod.getMetadata().getName(), "Running")); assertTrue(podWatcher.getCountDownLatch().await(1, TimeUnit.SECONDS)); } }
@Override @Cacheable(cacheNames = RedisKeyConstants.OAUTH_CLIENT, key = "#clientId", unless = "#result == null") public OAuth2ClientDO getOAuth2ClientFromCache(String clientId) { return oauth2ClientMapper.selectByClientId(clientId); }
@Test public void testGetOAuth2ClientFromCache() { // mock 数据 OAuth2ClientDO clientDO = randomPojo(OAuth2ClientDO.class); oauth2ClientMapper.insert(clientDO); // 准备参数 String clientId = clientDO.getClientId(); // 调用,并断言 OAuth2ClientDO dbClientDO = oauth2ClientService.getOAuth2ClientFromCache(clientId); assertPojoEquals(clientDO, dbClientDO); }
@Override public void write(final MySQLPacketPayload payload, final Object value) { payload.getByteBuf().writeDoubleLE(Double.parseDouble(value.toString())); }
@Test void assertWrite() { new MySQLDoubleBinaryProtocolValue().write(new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8), 1.0D); verify(byteBuf).writeDoubleLE(1.0D); }
abstract public long[] getBlockListAsLongs();
@Test public void testUc() { BlockListAsLongs blocks = checkReport( new ReplicaBeingWritten(b1, null, null, null)); assertArrayEquals( new long[] { 0, 1, -1, -1, -1, 1, 11, 111, ReplicaState.RBW.getValue() }, blocks.getBlockListAsLongs()); }
@Override public int partition(RowData row, int numPartitions) { // reuse the sortKey and rowDataWrapper sortKey.wrap(rowDataWrapper.wrap(row)); return SketchUtil.partition(sortKey, numPartitions, rangeBounds, comparator); }
@Test public void testRangePartitioningWithRangeBounds() { SketchRangePartitioner partitioner = new SketchRangePartitioner(TestFixtures.SCHEMA, SORT_ORDER, RANGE_BOUNDS); GenericRowData row = GenericRowData.of(StringData.fromString("data"), 0L, StringData.fromString("2023-06-20")); for (long id = 0; id < MAX_ID; ++id) { row.setField(1, id); int partition = partitioner.partition(row, NUM_PARTITIONS); assertThat(partition).isGreaterThanOrEqualTo(0).isLessThan(NUM_PARTITIONS); int expectedPartition = id == 0L ? 0 : (int) ((id - 1) / RANGE_STEP); assertThat(partition).isEqualTo(expectedPartition); } }
@Override public long sizeOf(T element) { LOG.warn( "Trying to estimate size using {}, this operation will always return 0", this.getClass().getSimpleName()); return 0; }
@Test public void alwaysReturns0AsEstimatedThroughput() { final NullSizeEstimator<byte[]> estimator = new NullSizeEstimator<>(); assertEquals(estimator.sizeOf(new byte[40]), 0D, DELTA); assertEquals(estimator.sizeOf(new byte[20]), 0D, DELTA); assertEquals(estimator.sizeOf(new byte[10]), 0D, DELTA); }
@Override protected InputStream readInputStreamParam(String key) { Part part = readPart(key); return (part == null) ? null : part.getInputStream(); }
@Test public void returns_null_when_invalid_part() throws Exception { when(source.getContentType()).thenReturn("multipart/form-data"); InputStream file = mock(InputStream.class); Part part = mock(Part.class); when(part.getSize()).thenReturn(0L); when(part.getInputStream()).thenReturn(file); doThrow(IllegalArgumentException.class).when(source).getPart("param1"); assertThat(underTest.readInputStreamParam("param1")).isNull(); }
@Operation(summary = "list", description = "List hosts") @GetMapping public ResponseEntity<List<HostVO>> list(@PathVariable Long clusterId) { return ResponseEntity.success(hostService.list(clusterId)); }
@Test void listReturnsEmptyForInvalidClusterId() { Long clusterId = 999L; when(hostService.list(clusterId)).thenReturn(List.of()); ResponseEntity<List<HostVO>> response = hostController.list(clusterId); assertTrue(response.isSuccess()); assertTrue(response.getData().isEmpty()); }
@Override public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endStream, ChannelPromise promise) { return writeHeaders0(ctx, streamId, headers, false, 0, (short) 0, false, padding, endStream, promise); }
@Test public void headersWriteForUnknownStreamShouldCreateStream() throws Exception { writeAllFlowControlledFrames(); final int streamId = 6; ChannelPromise promise = newPromise(); encoder.writeHeaders(ctx, streamId, EmptyHttp2Headers.INSTANCE, 0, false, promise); verify(writer).writeHeaders(eq(ctx), eq(streamId), eq(EmptyHttp2Headers.INSTANCE), eq(0), eq(false), eq(promise)); assertTrue(promise.isSuccess()); }
@Override protected Triple<List<HoodieClusteringGroup>, Integer, List<FileSlice>> buildSplitClusteringGroups( ConsistentBucketIdentifier identifier, List<FileSlice> fileSlices, int splitSlot) { return super.buildSplitClusteringGroups(identifier, fileSlices, splitSlot); }
@Test public void testBuildSplitClusteringGroup() throws IOException { setup(); int maxFileSize = 5120; Properties props = new Properties(); props.setProperty(KeyGeneratorOptions.RECORDKEY_FIELD_NAME.key(), "uuid"); HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath) .withIndexConfig(HoodieIndexConfig.newBuilder().fromProperties(props).withIndexType(HoodieIndex.IndexType.BUCKET) .withBucketIndexEngineType(HoodieIndex.BucketIndexEngineType.CONSISTENT_HASHING) .withBucketMaxNum(6) .withBucketNum("4").build()) .withStorageConfig(HoodieStorageConfig.newBuilder() .parquetMaxFileSize(maxFileSize).build()) .build(); HoodieTable hoodieTable = HoodieSparkTable.create(config, context, metaClient); SparkConsistentBucketClusteringPlanStrategy planStrategy = new SparkConsistentBucketClusteringPlanStrategy(hoodieTable, context, config); HoodieConsistentHashingMetadata metadata = new HoodieConsistentHashingMetadata("partition", config.getBucketIndexNumBuckets()); ConsistentBucketIdentifier identifier = new ConsistentBucketIdentifier(metadata); int[] fsSize = {maxFileSize * 5, (int) (maxFileSize * BUCKET_SPLIT_THRESHOLD.defaultValue() + 1), maxFileSize, maxFileSize * 5}; List<FileSlice> fileSlices = IntStream.range(0, metadata.getNodes().size()).mapToObj( i -> createFileSliceWithSize(metadata.getNodes().get(i).getFileIdPrefix(), 1024, fsSize[i] - 1024) ).collect(Collectors.toList()); /** * 1. Test split candidate selection based on file size * 2. Test the effectiveness of split slot */ Triple res = planStrategy.buildSplitClusteringGroups(identifier, fileSlices, 2); Assertions.assertEquals(2, res.getMiddle()); List<HoodieClusteringGroup> groups = (List<HoodieClusteringGroup>) res.getLeft(); Assertions.assertEquals(2, groups.size()); Assertions.assertEquals(fileSlices.get(0).getFileId(), groups.get(0).getSlices().get(0).getFileId()); Assertions.assertEquals(fileSlices.get(1).getFileId(), groups.get(1).getSlices().get(0).getFileId()); List<FileSlice> fsUntouched = (List<FileSlice>) res.getRight(); Assertions.assertEquals(2, fsUntouched.size()); Assertions.assertEquals(fileSlices.get(2), fsUntouched.get(0)); Assertions.assertEquals(fileSlices.get(3), fsUntouched.get(1)); }
@Nonnull @Override public ProgressState call() { progTracker.reset(); stateMachineStep(); return progTracker.toProgressState(); }
@Test public void when_item_then_offeredToSsWriter() { // When init(singletonList(entry("k", "v"))); assertEquals(MADE_PROGRESS, sst.call()); // Then assertEquals(entry(serialize("k"), serialize("v")), mockSsWriter.poll()); assertNull(mockSsWriter.poll()); }
public static ThreadFactory groupedThreads(String groupName, String pattern) { return groupedThreads(groupName, pattern, log); }
@Test public void groupedThreads() { ThreadFactory f = Tools.groupedThreads("foo/bar-me", "foo-%d"); Thread t = f.newThread(() -> TestTools.print("yo")); assertTrue("wrong pattern", t.getName().startsWith("foo-bar-me-foo-")); assertTrue("wrong group", "foo/bar-me".equals(t.getThreadGroup().getName())); }
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) { if (statsEnabled) { stats.log(deviceStateServiceMsg); } stateService.onQueueMsg(deviceStateServiceMsg, callback); }
@Test public void givenProcessingFailure_whenForwardingDisconnectMsgToStateService_thenOnFailureCallbackIsCalled() { // GIVEN var disconnectMsg = TransportProtos.DeviceDisconnectProto.newBuilder() .setTenantIdMSB(tenantId.getId().getMostSignificantBits()) .setTenantIdLSB(tenantId.getId().getLeastSignificantBits()) .setDeviceIdMSB(deviceId.getId().getMostSignificantBits()) .setDeviceIdLSB(deviceId.getId().getLeastSignificantBits()) .setLastDisconnectTime(time) .build(); doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(disconnectMsg, tbCallbackMock); var runtimeException = new RuntimeException("Something bad happened!"); doThrow(runtimeException).when(stateServiceMock).onDeviceDisconnect(tenantId, deviceId, time); // WHEN defaultTbCoreConsumerServiceMock.forwardToStateService(disconnectMsg, tbCallbackMock); // THEN then(tbCallbackMock).should(never()).onSuccess(); then(tbCallbackMock).should().onFailure(runtimeException); }
@Override public FilteredMessage apply(Message msg) { try (var ignored = executionTime.time()) { return doApply(msg); } }
@Test void applyWithNoFilterAndTwoDestinations(MessageFactory messageFactory) { final var filter = createFilter(Map.of(), Set.of("indexer", "other")); final var message = messageFactory.createMessage("msg", "src", Tools.nowUTC()); message.addStream(defaultStream); final var filteredMessage = filter.apply(message); assertThat(filteredMessage.message()).isEqualTo(ImmutableMessage.wrap(message)); assertThat(filteredMessage.destinations().keySet()).containsExactlyInAnyOrder("indexer", "other"); assertThat(filteredMessage.destinations().get("indexer")).containsExactlyInAnyOrder(defaultStream); assertThat(filteredMessage.destinations().get("other")).containsExactlyInAnyOrder(defaultStream); }
public void endsWith(@Nullable String string) { checkNotNull(string); if (actual == null) { failWithActual("expected a string that ends with", string); } else if (!actual.endsWith(string)) { failWithActual("expected to end with", string); } }
@Test public void stringEndsWithFail() { expectFailureWhenTestingThat("abc").endsWith("ab"); assertFailureValue("expected to end with", "ab"); }
@Override public List<Object> apply(ConsumerRecord<K, V> record) { List<Object> vals = func.apply(record); if (vals == null) { return null; } KafkaTuple ret = new KafkaTuple(); ret.addAll(vals); return ret.routedTo(stream); }
@Test public void testNullTranslation() { SimpleRecordTranslator<String, String> trans = new SimpleRecordTranslator<>((r) -> null, new Fields("key")); assertNull(trans.apply(null)); }
public boolean record(final Throwable observation) { final long timestampMs; DistinctObservation distinctObservation; timestampMs = clock.time(); synchronized (this) { distinctObservation = find(distinctObservations, observation); if (null == distinctObservation) { distinctObservation = newObservation(timestampMs, observation); if (INSUFFICIENT_SPACE == distinctObservation) { return false; } } } final int offset = distinctObservation.offset; buffer.getAndAddInt(offset + OBSERVATION_COUNT_OFFSET, 1); buffer.putLongOrdered(offset + LAST_OBSERVATION_TIMESTAMP_OFFSET, timestampMs); return true; }
@Test void shouldRecordFirstObservationOnly() { final long timestampOne = 7; final long timestampTwo = 8; final int offset = 0; final RuntimeException error = new RuntimeException("Test Error"); when(clock.time()).thenReturn(timestampOne).thenReturn(timestampTwo); assertTrue(log.record(error)); assertTrue(log.record(error)); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).putBytes(eq(offset + ENCODED_ERROR_OFFSET), any(byte[].class)); inOrder.verify(buffer).putLong(offset + FIRST_OBSERVATION_TIMESTAMP_OFFSET, timestampOne); inOrder.verify(buffer).putIntOrdered(eq(offset + LENGTH_OFFSET), anyInt()); inOrder.verify(buffer).getAndAddInt(offset + OBSERVATION_COUNT_OFFSET, 1); inOrder.verify(buffer).putLongOrdered(offset + LAST_OBSERVATION_TIMESTAMP_OFFSET, timestampOne); inOrder.verify(buffer).getAndAddInt(offset + OBSERVATION_COUNT_OFFSET, 1); inOrder.verify(buffer).putLongOrdered(offset + LAST_OBSERVATION_TIMESTAMP_OFFSET, timestampTwo); }
public static void retry(String action, RunnableThrowsIOException f, RetryPolicy policy) throws IOException { IOException e = null; while (policy.attempt()) { try { f.run(); return; } catch (IOException ioe) { e = ioe; LOG.debug("Failed to {} (attempt {}): {}", action, policy.getAttemptCount(), e.toString()); } } if (e != null) { throw e; } throw new IOException(String.format("Failed to run action %s after %d attempts", action, policy.getAttemptCount())); }
@Test public void failure() throws IOException { AtomicInteger count = new AtomicInteger(0); try { RetryUtils.retry("failure test", () -> { count.incrementAndGet(); throw new IOException(Integer.toString(count.get())); }, new CountingRetry(10)); fail("Expected an exception to be thrown"); } catch (IOException e) { assertEquals("11", e.getMessage()); } assertEquals(11, count.get()); }
public void finish() throws IOException { if (finished) { return; } flush(); // Finish the stream with the terminatorValue. VarInt.encode(terminatorValue, os); if (!BUFFER_POOL.offer(buffer)) { // The pool is full, we can't store the buffer. We just drop the buffer. } finished = true; }
@Test public void testBehaviorWhenBufferPoolFull() throws Exception { ByteArrayOutputStream baos = new ByteArrayOutputStream(); while (BUFFER_POOL.remainingCapacity() > 0) { BUFFER_POOL.offer(ByteBuffer.allocate(256)); } BufferedElementCountingOutputStream os = createAndWriteValues(toBytes("abcdefghij"), baos); os.finish(); assertEquals(0, BUFFER_POOL.remainingCapacity()); }
public Collection<ServerPluginInfo> loadPlugins() { Map<String, ServerPluginInfo> bundledPluginsByKey = new LinkedHashMap<>(); for (ServerPluginInfo bundled : getBundledPluginsMetadata()) { failIfContains(bundledPluginsByKey, bundled, plugin -> MessageException.of(format("Found two versions of the plugin %s [%s] in the directory %s. Please remove one of %s or %s.", bundled.getName(), bundled.getKey(), getRelativeDir(fs.getInstalledBundledPluginsDir()), bundled.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName()))); bundledPluginsByKey.put(bundled.getKey(), bundled); } Map<String, ServerPluginInfo> externalPluginsByKey = new LinkedHashMap<>(); for (ServerPluginInfo external : getExternalPluginsMetadata()) { failIfContains(bundledPluginsByKey, external, plugin -> MessageException.of(format("Found a plugin '%s' in the directory '%s' with the same key [%s] as a built-in feature '%s'. Please remove '%s'.", external.getName(), getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getKey(), plugin.getName(), new File(getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName())))); failIfContains(externalPluginsByKey, external, plugin -> MessageException.of(format("Found two versions of the plugin '%s' [%s] in the directory '%s'. Please remove %s or %s.", external.getName(), external.getKey(), getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName()))); externalPluginsByKey.put(external.getKey(), external); } for (PluginInfo downloaded : getDownloadedPluginsMetadata()) { failIfContains(bundledPluginsByKey, downloaded, plugin -> MessageException.of(format("Fail to update plugin: %s. Built-in feature with same key already exists: %s. Move or delete plugin from %s directory", plugin.getName(), plugin.getKey(), getRelativeDir(fs.getDownloadedPluginsDir())))); ServerPluginInfo installedPlugin; if (externalPluginsByKey.containsKey(downloaded.getKey())) { deleteQuietly(externalPluginsByKey.get(downloaded.getKey()).getNonNullJarFile()); installedPlugin = moveDownloadedPluginToExtensions(downloaded); LOG.info("Plugin {} [{}] updated to version {}", installedPlugin.getName(), installedPlugin.getKey(), installedPlugin.getVersion()); } else { installedPlugin = moveDownloadedPluginToExtensions(downloaded); LOG.info("Plugin {} [{}] installed", installedPlugin.getName(), installedPlugin.getKey()); } externalPluginsByKey.put(downloaded.getKey(), installedPlugin); } Map<String, ServerPluginInfo> plugins = new HashMap<>(externalPluginsByKey.size() + bundledPluginsByKey.size()); plugins.putAll(externalPluginsByKey); plugins.putAll(bundledPluginsByKey); PluginRequirementsValidator.unloadIncompatiblePlugins(plugins); return plugins.values(); }
@Test public void test_plugin_requirements_at_startup() throws Exception { copyTestPluginTo("test-base-plugin", fs.getInstalledExternalPluginsDir()); copyTestPluginTo("test-require-plugin", fs.getInstalledExternalPluginsDir()); assertThat(underTest.loadPlugins()).extracting(PluginInfo::getKey).containsOnly("testbase", "testrequire"); }
public synchronized NumaResourceAllocation allocateNumaNodes( Container container) throws ResourceHandlerException { NumaResourceAllocation allocation = allocate(container.getContainerId(), container.getResource()); if (allocation != null) { try { // Update state store. context.getNMStateStore().storeAssignedResources(container, NUMA_RESOURCE_TYPE, Arrays.asList(allocation)); } catch (IOException e) { releaseNumaResource(container.getContainerId()); throw new ResourceHandlerException(e); } } return allocation; }
@Test public void testAllocateNumaNodeWhenNoNumaMemResourcesAvailable() throws Exception { NumaResourceAllocation nodeInfo = numaResourceAllocator .allocateNumaNodes(getContainer( ContainerId.fromString("container_1481156246874_0001_01_000001"), Resource.newInstance(2048000, 6))); Assert.assertNull("Should not assign numa nodes when there" + " are no sufficient memory resources available.", nodeInfo); }
@Override public <T extends MigrationStep> MigrationStepRegistry add(long migrationNumber, String description, Class<T> stepClass) { validate(migrationNumber); requireNonNull(description, "description can't be null"); checkArgument(!description.isEmpty(), "description can't be empty"); requireNonNull(stepClass, "MigrationStep class can't be null"); checkState(!migrations.containsKey(migrationNumber), "A migration is already registered for migration number '%s'", migrationNumber); this.migrations.put(migrationNumber, new RegisteredMigrationStep(migrationNumber, description, stepClass)); return this; }
@Test public void add_fails_with_IAE_if_migrationNumber_is_less_than_0() { assertThatThrownBy(() -> { underTest.add(-Math.abs(new Random().nextLong() + 1), "sdsd", MigrationStep.class); }) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Migration number must be >= 0"); }
public void setConsumerThread(Thread consumerThread) { this.consumerThread = checkNotNull(consumerThread, "consumerThread can't be null"); }
@Test(expected = NullPointerException.class) @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") public void setOwningThread_whenNull() { MPSCQueue queue = new MPSCQueue(new BusySpinIdleStrategy()); queue.setConsumerThread(null); }
public static IntRef ofInt(int value) { return new IntRef(value); }
@Test public void testIntRef() { PrimitiveRef.IntRef ref = PrimitiveRef.ofInt(3); assertEquals(3, ref.value++); assertEquals(4, ref.value); assertEquals(5, ++ref.value); assertEquals(5, ref.value); }
public static boolean isQuorumCandidate(final ClusterMember[] clusterMembers, final ClusterMember candidate) { int possibleVotes = 0; for (final ClusterMember member : clusterMembers) { if (NULL_POSITION == member.logPosition || compareLog(candidate, member) < 0) { continue; } ++possibleVotes; } return possibleVotes >= ClusterMember.quorumThreshold(clusterMembers.length); }
@Test void isQuorumCandidateReturnTrueWhenQuorumIsReached() { final ClusterMember candidate = newMember(2, 10, 800); final ClusterMember[] members = new ClusterMember[] { newMember(10, 2, 100), newMember(20, 18, 6), newMember(30, 10, 800), newMember(40, 9, 800), newMember(50, 10, 700) }; assertTrue(isQuorumCandidate(members, candidate)); }
public static boolean regionMatches(final CharSequence cs, final boolean ignoreCase, final int thisStart, final CharSequence substring, final int start, final int length) { if (cs instanceof String && substring instanceof String) { return ((String) cs).regionMatches(ignoreCase, thisStart, (String) substring, start, length); } int index1 = thisStart; int index2 = start; int tmpLen = length; while (tmpLen-- > 0) { final char c1 = cs.charAt(index1++); final char c2 = substring.charAt(index2++); if (c1 == c2) { continue; } if (!ignoreCase) { return false; } // The same check as in String.regionMatches(): if (Character.toUpperCase(c1) != Character.toUpperCase(c2) && Character.toLowerCase(c1) != Character .toLowerCase(c2)) { return false; } } return true; }
@Test void testRegionMatchesNotEqualsCaseInsensitive() { assertFalse(StringUtils.regionMatches("abc", true, 0, "xCab", 1, 3)); }
@Udf(description = "Adds a duration to a date") public Date dateAdd( @UdfParameter(description = "A unit of time, for example DAY") final TimeUnit unit, @UdfParameter(description = "An integer number of intervals to add") final Integer interval, @UdfParameter(description = "A DATE value.") final Date date ) { if (unit == null || interval == null || date == null) { return null; } final long epochDayResult = TimeUnit.MILLISECONDS.toDays(date.getTime() + unit.toMillis(interval)); return new Date(TimeUnit.DAYS.toMillis(epochDayResult)); }
@Test public void shouldAddToDate() { assertThat(udf.dateAdd(TimeUnit.DAYS, 9, new Date(86400000)), is(new Date(864000000))); assertThat(udf.dateAdd(TimeUnit.DAYS, -1, new Date(86400000)), is(new Date(0))); assertThat(udf.dateAdd(TimeUnit.SECONDS, 5, new Date(86400000)), is(new Date(86400000))); }
@Override public void checkApiEndpoint(GithubAppConfiguration githubAppConfiguration) { if (StringUtils.isBlank(githubAppConfiguration.getApiEndpoint())) { throw new IllegalArgumentException("Missing URL"); } URI apiEndpoint; try { apiEndpoint = URI.create(githubAppConfiguration.getApiEndpoint()); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Invalid URL, " + e.getMessage()); } if (!"http".equalsIgnoreCase(apiEndpoint.getScheme()) && !"https".equalsIgnoreCase(apiEndpoint.getScheme())) { throw new IllegalArgumentException("Only http and https schemes are supported"); } else if (!"api.github.com".equalsIgnoreCase(apiEndpoint.getHost()) && !apiEndpoint.getPath().toLowerCase(Locale.ENGLISH).startsWith("/api/v3")) { throw new IllegalArgumentException("Invalid GitHub URL"); } }
@Test @UseDataProvider("invalidApiEndpoints") public void checkApiEndpoint_Invalid(String url, String expectedMessage) { GithubAppConfiguration configuration = new GithubAppConfiguration(1L, "", url); assertThatThrownBy(() -> underTest.checkApiEndpoint(configuration)) .isInstanceOf(IllegalArgumentException.class) .hasMessage(expectedMessage); }
@Override public String getName() { return "HTTP Alarm Callback [Deprecated]"; }
@Test public void getNameReturnsNameOfHTTPAlarmCallback() throws Exception { assertThat(alarmCallback.getName()).isEqualTo("HTTP Alarm Callback [Deprecated]"); }
public static <T, IdT> Deduplicate.WithRepresentativeValues<T, IdT> withRepresentativeValueFn( SerializableFunction<T, IdT> representativeValueFn) { return new Deduplicate.WithRepresentativeValues<T, IdT>( DEFAULT_TIME_DOMAIN, DEFAULT_DURATION, representativeValueFn, null, null); }
@Test @Category({NeedsRunner.class, UsesTestStreamWithProcessingTime.class}) public void testRepresentativeValuesWithCoder() { Instant base = new Instant(0); TestStream<KV<Long, String>> values = TestStream.create(KvCoder.of(VarLongCoder.of(), StringUtf8Coder.of())) .advanceWatermarkTo(base) .addElements( TimestampedValue.of(KV.of(1L, "k1"), base), TimestampedValue.of(KV.of(2L, "k2"), base.plus(Duration.standardSeconds(10))), TimestampedValue.of(KV.of(3L, "k3"), base.plus(Duration.standardSeconds(20)))) .advanceProcessingTime(Duration.standardMinutes(1)) .addElements( TimestampedValue.of(KV.of(1L, "k1"), base.plus(Duration.standardSeconds(30))), TimestampedValue.of(KV.of(2L, "k2"), base.plus(Duration.standardSeconds(40))), TimestampedValue.of(KV.of(3L, "k3"), base.plus(Duration.standardSeconds(50)))) .advanceWatermarkToInfinity(); PCollection<KV<Long, String>> distinctValues = p.apply(values) .apply( Deduplicate.withRepresentativeValueFn(new Keys<Long>()) .withRepresentativeCoder(VarLongCoder.of())); PAssert.that(distinctValues) .containsInAnyOrder(KV.of(1L, "k1"), KV.of(2L, "k2"), KV.of(3L, "k3")); p.run(); }
public static boolean isAllowedStatement(final SQLStatement sqlStatement) { return ALLOWED_SQL_STATEMENTS.contains(sqlStatement.getClass()); }
@Test void assertIsStatementAllowed() { Collection<SQLStatement> sqlStatements = Arrays.asList( new MySQLAlterTableStatement(), new MySQLAlterUserStatement(), new MySQLAnalyzeTableStatement(), new MySQLCacheIndexStatement(), new MySQLCallStatement(), new MySQLChangeMasterStatement(), new MySQLChecksumTableStatement(), new MySQLCommitStatement(), new MySQLCreateIndexStatement(), new MySQLDropIndexStatement(), new MySQLCreateDatabaseStatement(), new MySQLDropDatabaseStatement(), new MySQLCreateTableStatement(false), new MySQLDropTableStatement(false), new MySQLCreateUserStatement(), new MySQLRenameUserStatement(), new MySQLDropUserStatement(), new MySQLCreateViewStatement(), new MySQLDropViewStatement(), new MySQLDeleteStatement(), new MySQLDoStatement(), new MySQLFlushStatement(), new MySQLGrantStatement(), new MySQLInsertStatement(), new MySQLInstallPluginStatement(), new MySQLKillStatement(), new MySQLLoadIndexInfoStatement(), new MySQLOptimizeTableStatement(), new MySQLRenameTableStatement(), new MySQLRepairTableStatement(), new MySQLResetStatement(), new MySQLRevokeStatement(), new MySQLSelectStatement(), new MySQLSetStatement(), new MySQLShowWarningsStatement(), new MySQLShowErrorsStatement(), new MySQLShowBinlogEventsStatement(), new MySQLShowCreateProcedureStatement(), new MySQLShowCreateFunctionStatement(), new MySQLShowCreateEventStatement(), new MySQLShowCreateTableStatement(), new MySQLShowCreateViewStatement(), new MySQLShowBinaryLogsStatement(), new MySQLShowStatusStatement(), new MySQLStartSlaveStatement(), new MySQLStopSlaveStatement(), new MySQLTruncateStatement(), new MySQLUninstallPluginStatement(), new MySQLUpdateStatement(), new XABeginStatement("1"), new XAPrepareStatement("1"), new XACommitStatement("1"), new XARollbackStatement("1"), new XAEndStatement("1"), new XARecoveryStatement()); for (SQLStatement each : sqlStatements) { assertTrue(MySQLComStmtPrepareChecker.isAllowedStatement(each)); } }
@Override public Properties groupMetadataTopicConfigs() { Properties properties = new Properties(); properties.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); properties.put(TopicConfig.COMPRESSION_TYPE_CONFIG, BrokerCompressionType.PRODUCER.name); properties.put(TopicConfig.SEGMENT_BYTES_CONFIG, String.valueOf(config.offsetsTopicSegmentBytes())); return properties; }
@Test public void testGroupMetadataTopicConfigs() { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, new GroupCoordinatorMetrics(), createConfigManager() ); Properties expectedProperties = new Properties(); expectedProperties.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); expectedProperties.put(TopicConfig.COMPRESSION_TYPE_CONFIG, BrokerCompressionType.PRODUCER.name); expectedProperties.put(TopicConfig.SEGMENT_BYTES_CONFIG, "1000"); assertEquals(expectedProperties, service.groupMetadataTopicConfigs()); }
public static KStreamHolder<GenericKey> build( final KStreamHolder<?> stream, final StreamSelectKeyV1 selectKey, final RuntimeBuildContext buildContext ) { final LogicalSchema sourceSchema = stream.getSchema(); final CompiledExpression expression = buildExpressionEvaluator( selectKey, buildContext, sourceSchema ); final ProcessingLogger processingLogger = buildContext .getProcessingLogger(selectKey.getProperties().getQueryContext()); final String errorMsg = "Error extracting new key using expression " + selectKey.getKeyExpression(); final Function<GenericRow, Object> evaluator = val -> expression .evaluate(val, null, processingLogger, () -> errorMsg); final LogicalSchema resultSchema = new StepSchemaResolver(buildContext.getKsqlConfig(), buildContext.getFunctionRegistry()).resolve(selectKey, sourceSchema); final KStream<?, GenericRow> kstream = stream.getStream(); final KStream<GenericKey, GenericRow> rekeyed = kstream .filter((key, val) -> val != null && evaluator.apply(val) != null) .selectKey((key, val) -> GenericKey.genericKey(evaluator.apply(val))); return new KStreamHolder<>( rekeyed, resultSchema, ExecutionKeyFactory.unwindowed(buildContext) ); }
@Test public void shouldReturnCorrectSerdeFactory() { // When: final KStreamHolder<GenericKey> result = selectKey.build(planBuilder, planInfo); // Then: result.getExecutionKeyFactory().buildKeySerde( FormatInfo.of(FormatFactory.JSON.name()), PhysicalSchema.from(SOURCE_SCHEMA, SerdeFeatures.of(), SerdeFeatures.of()), queryContext ); verify(buildContext).buildKeySerde( FormatInfo.of(FormatFactory.JSON.name()), PhysicalSchema.from(SOURCE_SCHEMA, SerdeFeatures.of(), SerdeFeatures.of()), queryContext); }
public Map<String, String> build() { Map<String, String> builder = new HashMap<>(); configureFileSystem(builder); configureNetwork(builder); configureCluster(builder); configureSecurity(builder); configureOthers(builder); LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]", builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY), builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY)); return builder; }
@Test public void set_initial_master_nodes_settings_if_cluster_is_enabled() throws Exception { Props props = minProps(CLUSTER_ENABLED); props.set(CLUSTER_ES_HOSTS.getKey(), "1.2.3.4:9000,1.2.3.5:8080"); Map<String, String> settings = new EsSettings(props, new EsInstallation(props), System2.INSTANCE).build(); assertThat(settings) .containsEntry("cluster.initial_master_nodes", "1.2.3.4:9000,1.2.3.5:8080") .containsEntry("discovery.initial_state_timeout", "120s"); }
public static StackTraceElement[] extract(Throwable t, String fqnOfInvokingClass, final int maxDepth, List<String> frameworkPackageList) { if (t == null) { return null; } StackTraceElement[] steArray = t.getStackTrace(); StackTraceElement[] callerDataArray; int found = LINE_NA; for (int i = 0; i < steArray.length; i++) { if (isInFrameworkSpace(steArray[i].getClassName(), fqnOfInvokingClass, frameworkPackageList)) { // the caller is assumed to be the next stack frame, hence the +1. found = i + 1; } else { if (found != LINE_NA) { break; } } } // we failed to extract caller data if (found == LINE_NA) { return EMPTY_CALLER_DATA_ARRAY; } int availableDepth = steArray.length - found; int desiredDepth = maxDepth < (availableDepth) ? maxDepth : availableDepth; callerDataArray = new StackTraceElement[desiredDepth]; for (int i = 0; i < desiredDepth; i++) { callerDataArray[i] = steArray[found + i]; } return callerDataArray; }
@Test public void testDeferredProcessing() { StackTraceElement[] cda = CallerData.extract(new Throwable(), "com.inexistent.foo", 10, null); assertNotNull(cda); assertEquals(0, cda.length); }
public static RSAPublicKey parseRSAPublicKey(String pem) throws ServletException { String fullPem = PEM_HEADER + pem + PEM_FOOTER; PublicKey key = null; try { CertificateFactory fact = CertificateFactory.getInstance("X.509"); ByteArrayInputStream is = new ByteArrayInputStream( fullPem.getBytes(StandardCharsets.UTF_8)); X509Certificate cer = (X509Certificate) fact.generateCertificate(is); key = cer.getPublicKey(); } catch (CertificateException ce) { String message = null; if (pem.startsWith(PEM_HEADER)) { message = "CertificateException - be sure not to include PEM header " + "and footer in the PEM configuration element."; } else { message = "CertificateException - PEM may be corrupt"; } throw new ServletException(message, ce); } return (RSAPublicKey) key; }
@Test public void testValidPEM() throws Exception { String pem = "MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w" + "CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl" + "c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x" + "CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv" + "b3AxDTALBgNVBAsTBFRlc3QxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB" + "jQAwgYkCgYEAwpfpLdi7dWTHNzETt+L7618/dWUQFb/C7o1jIxFgbKOVIB6d5YmvUbJck5PYxFkz" + "C25fmU5H71WGOI1Kle5TFDmIo+hqh5xqu1YNRZz9i6D94g+2AyYr9BpvH4ZfdHs7r9AU7c3kq68V" + "7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim" + "Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy" + "9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX" + "Mzc1xA=="; try { RSAPublicKey pk = CertificateUtil.parseRSAPublicKey(pem); assertNotNull(pk); assertEquals("RSA", pk.getAlgorithm()); } catch (ServletException se) { fail("Should not have thrown ServletException"); } }
@Override public ResourceStatistics getStatistics(String location, Job job) throws IOException { if (LOG.isDebugEnabled()) { String jobToString = String.format("job[id=%s, name=%s]", job.getJobID(), job.getJobName()); LOG.debug("LoadMetadata.getStatistics({}, {})", location, jobToString); } /* We need to call setInput since setLocation is not guaranteed to be called before this */ setInput(location, job); long length = 0; try { for (InputSplit split : getParquetInputFormat().getSplits(job)) { length += split.getLength(); } } catch (InterruptedException e) { LOG.warn("Interrupted", e); Thread.currentThread().interrupt(); return null; } ResourceStatistics stats = new ResourceStatistics(); // TODO use pig-0.12 setBytes api when its available stats.setmBytes(length / 1024 / 1024); return stats; }
@Test public void testPredicatePushdown() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(ParquetLoader.ENABLE_PREDICATE_FILTER_PUSHDOWN, true); PigServer pigServer = new PigServer(ExecType.LOCAL, conf); pigServer.setValidateEachStatement(true); String out = "target/out"; String out2 = "target/out2"; int rows = 10; Data data = Storage.resetData(pigServer); List<Tuple> list = new ArrayList<Tuple>(); for (int i = 0; i < rows; i++) { list.add(Storage.tuple(i, i * 1.0, i * 2L, "v" + i)); } data.set("in", "c1:int, c2:double, c3:long, c4:chararray", list); pigServer.setBatchOn(); pigServer.registerQuery("A = LOAD 'in' USING mock.Storage();"); pigServer.deleteFile(out); pigServer.registerQuery("Store A into '" + out + "' using " + ParquetStorer.class.getName() + "();"); pigServer.executeBatch(); pigServer.deleteFile(out2); pigServer.registerQuery("B = LOAD '" + out + "' using " + ParquetLoader.class.getName() + "('c1:int, c2:double, c3:long, c4:chararray');"); pigServer.registerQuery("C = FILTER B by c1 == 1 or c1 == 5;"); pigServer.registerQuery("STORE C into '" + out2 + "' using mock.Storage();"); List<ExecJob> jobs = pigServer.executeBatch(); long recordsRead = jobs.get(0).getStatistics().getInputStats().get(0).getNumberRecords(); assertEquals(2, recordsRead); }
public boolean contains(int id) { checkIdInRange(id); return positions[id] != NOT_PRESENT; }
@Test void testContains() { create(4); push(1, 0.1f); push(2, 0.7f); push(0, 0.5f); assertFalse(contains(3)); assertTrue(contains(1)); assertEquals(1, poll()); assertFalse(contains(1)); }
public Object getSingleValue() { if (!isSingleValue()) { throw new IllegalStateException("Range does not have just a single value"); } return low.getValue(); }
@Test public void testGetSingleValue() { assertEquals(Range.equal(BIGINT, 0L).getSingleValue(), 0L); assertThrows(IllegalStateException.class, () -> Range.lessThan(BIGINT, 0L).getSingleValue()); }
public static ReadRows readRows() { return new AutoValue_JdbcIO_ReadRows.Builder() .setFetchSize(DEFAULT_FETCH_SIZE) .setOutputParallelization(true) .setStatementPreparator(ignored -> {}) .build(); }
@Test public void testReadRowsWithoutStatementPreparator() { SerializableFunction<Void, DataSource> dataSourceProvider = ignored -> DATA_SOURCE; String name = TestRow.getNameForSeed(1); PCollection<Row> rows = pipeline.apply( JdbcIO.readRows() .withDataSourceProviderFn(dataSourceProvider) .withQuery( String.format( "select name,id from %s where name = '%s'", READ_TABLE_NAME, name))); Schema expectedSchema = Schema.of( Schema.Field.of("NAME", LogicalTypes.variableLengthString(JDBCType.VARCHAR, 500)) .withNullable(true), Schema.Field.of("ID", Schema.FieldType.INT32).withNullable(true)); assertEquals(expectedSchema, rows.getSchema()); PCollection<Row> output = rows.apply(Select.fieldNames("NAME", "ID")); PAssert.that(output) .containsInAnyOrder( ImmutableList.of(Row.withSchema(expectedSchema).addValues(name, 1).build())); pipeline.run(); }
@Override public KVTable fetchBrokerRuntimeStats( final String brokerAddr) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException { return this.defaultMQAdminExtImpl.fetchBrokerRuntimeStats(brokerAddr); }
@Test public void testFetchBrokerRuntimeStats() throws InterruptedException, MQBrokerException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException { KVTable brokerStats = defaultMQAdminExt.fetchBrokerRuntimeStats("127.0.0.1:10911"); assertThat(brokerStats.getTable().get("id")).isEqualTo(String.valueOf(MixAll.MASTER_ID)); assertThat(brokerStats.getTable().get("brokerName")).isEqualTo("default-broker"); }
private void registerPoolListeners() { memoryPools.forEach(memoryPool -> memoryPool.addListener(memoryPoolListener)); }
@Test public void testTaskThresholdRevokingSchedulerImmediate() throws Exception { SqlTask sqlTask1 = newSqlTask(new QueryId("query"), memoryPool); TestOperatorContext operatorContext11 = createTestingOperatorContexts(sqlTask1, "operator11"); TestOperatorContext operatorContext12 = createTestingOperatorContexts(sqlTask1, "operator12"); SqlTask sqlTask2 = newSqlTask(new QueryId("query"), memoryPool); TestOperatorContext operatorContext2 = createTestingOperatorContexts(sqlTask2, "operator2"); allOperatorContexts = ImmutableSet.of(operatorContext11, operatorContext12, operatorContext2); List<SqlTask> tasks = ImmutableList.of(sqlTask1, sqlTask2); ImmutableMap<TaskId, SqlTask> taskMap = ImmutableMap.of(sqlTask1.getTaskId(), sqlTask1, sqlTask2.getTaskId(), sqlTask2); TaskThresholdMemoryRevokingScheduler scheduler = new TaskThresholdMemoryRevokingScheduler( singletonList(memoryPool), () -> tasks, taskMap::get, singleThreadedScheduledExecutor, 5L); scheduler.registerPoolListeners(); // no periodic check initiated assertMemoryRevokingNotRequested(); operatorContext11.localRevocableMemoryContext().setBytes(3); operatorContext2.localRevocableMemoryContext().setBytes(2); // at this point, Task1 = 3 total bytes, Task2 = 2 total bytes // this ensures that we are waiting for the memory revocation listener and not using polling-based revoking awaitTaskThresholdAsynchronousCallbacksRun(); assertMemoryRevokingNotRequested(); operatorContext12.localRevocableMemoryContext().setBytes(3); // at this point, Task1 = 6 total bytes, Task2 = 2 total bytes awaitTaskThresholdAsynchronousCallbacksRun(); // only operator11 should revoke since we need to revoke only 1 byte // threshold - (operator11 + operator12) => 5 - (3 + 3) = 1 bytes to revoke assertMemoryRevokingRequestedFor(operatorContext11); // revoke 2 bytes in operator11 operatorContext11.localRevocableMemoryContext().setBytes(1); // at this point, Task1 = 3 total bytes, Task2 = 2 total bytes operatorContext11.resetMemoryRevokingRequested(); awaitTaskThresholdAsynchronousCallbacksRun(); assertMemoryRevokingNotRequested(); operatorContext12.localRevocableMemoryContext().setBytes(6); // operator12 fills up // at this point, Task1 = 7 total bytes, Task2 = 2 total bytes awaitTaskThresholdAsynchronousCallbacksRun(); // both operator11 and operator 12 are revoking since we revoke in order of operator creation within the task until we are below the memory revoking threshold assertMemoryRevokingRequestedFor(operatorContext11, operatorContext12); operatorContext11.localRevocableMemoryContext().setBytes(2); operatorContext11.resetMemoryRevokingRequested(); operatorContext12.localRevocableMemoryContext().setBytes(2); operatorContext12.resetMemoryRevokingRequested(); // at this point, Task1 = 4 total bytes, Task2 = 2 total bytes awaitTaskThresholdAsynchronousCallbacksRun(); assertMemoryRevokingNotRequested(); // no need to revoke operatorContext2.localRevocableMemoryContext().setBytes(6); // at this point, Task1 = 4 total bytes, Task2 = 6 total bytes, operators in Task2 must be revoked awaitTaskThresholdAsynchronousCallbacksRun(); assertMemoryRevokingRequestedFor(operatorContext2); }
@Override public Long del(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key: keys) { write(key, LongCodec.INSTANCE, RedisCommands.DEL, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.writeAsync(key, StringCodec.INSTANCE, RedisCommands.DEL, key); } BatchResult<Long> b = (BatchResult<Long>) es.execute(); return b.getResponses().stream().collect(Collectors.summarizingLong(v -> v)).getSum(); }
@Test public void testDelPipeline() { byte[] k = "key".getBytes(); byte[] v = "val".getBytes(); connection.set(k, v); connection.openPipeline(); connection.get(k); connection.del(k); List<Object> results = connection.closePipeline(); byte[] val = (byte[])results.get(0); assertThat(val).isEqualTo(v); Long res = (Long) results.get(1); assertThat(res).isEqualTo(1); }
@Override public String getMethod() { return PATH; }
@Test public void testDeleteMyCommandsWithEmptyScope() { DeleteMyCommands deleteMyCommands = DeleteMyCommands.builder().build(); assertEquals("deleteMyCommands", deleteMyCommands.getMethod()); assertDoesNotThrow(deleteMyCommands::validate); }
@Deprecated public void awaitRunning(final Runnable runnable) { LOG.debug("Waiting for server to enter RUNNING state"); Uninterruptibles.awaitUninterruptibly(runningLatch); LOG.debug("Server entered RUNNING state"); try { LOG.debug("Executing awaitRunning callback"); runnable.run(); } catch (Exception e) { LOG.error("awaitRunning callback failed", e); } }
@Test public void testAwaitRunning() throws Exception { final Runnable runnable = mock(Runnable.class); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch stopLatch = new CountDownLatch(1); new Thread(new Runnable() { @Override public void run() { try { startLatch.countDown(); status.awaitRunning(runnable); } finally { stopLatch.countDown(); } } }).start(); startLatch.await(5, TimeUnit.SECONDS); verify(runnable, never()).run(); status.start(); stopLatch.await(5, TimeUnit.SECONDS); verify(runnable).run(); }
public static String getGroupFromGrpcClient(AlluxioConfiguration conf) { try { User user = AuthenticatedClientUser.get(conf); if (user == null) { return ""; } return CommonUtils.getPrimaryGroupName(user.getName(), conf); } catch (IOException e) { return ""; } }
@Test public void getGroupFromGrpcClient() throws Exception { // When security is not enabled, user and group are not set mConfiguration.set(PropertyKey.SECURITY_AUTHENTICATION_TYPE, AuthType.NOSASL); assertEquals("", SecurityUtils.getGroupFromGrpcClient(mConfiguration)); mConfiguration.set(PropertyKey.SECURITY_AUTHENTICATION_TYPE, AuthType.SIMPLE); mConfiguration.set(PropertyKey.SECURITY_GROUP_MAPPING_CLASS, IdentityUserGroupsMapping.class.getName()); AuthenticatedClientUser.set("test_client_user"); assertEquals("test_client_user", SecurityUtils.getGroupFromGrpcClient(mConfiguration)); }
@Description("converts the string to lower case") @ScalarFunction("lower") @LiteralParameters("x") @SqlType("char(x)") public static Slice charLower(@SqlType("char(x)") Slice slice) { return lower(slice); }
@Test public void testCharLower() { assertFunction("LOWER(CAST('' AS CHAR(10)))", createCharType(10), padRight("", 10)); assertFunction("LOWER(CAST('Hello World' AS CHAR(11)))", createCharType(11), padRight("hello world", 11)); assertFunction("LOWER(CAST('WHAT!!' AS CHAR(6)))", createCharType(6), padRight("what!!", 6)); assertFunction("LOWER(CAST('\u00D6STERREICH' AS CHAR(10)))", createCharType(10), padRight(lowerByCodePoint("\u00D6sterreich"), 10)); assertFunction("LOWER(CAST('From\uD801\uDC2DTo' AS CHAR(7)))", createCharType(7), padRight(lowerByCodePoint("from\uD801\uDC2Dto"), 7)); }
public SmppCommand createSmppCommand(SMPPSession session, Exchange exchange) { SmppCommandType commandType = SmppCommandType.fromExchange(exchange); return commandType.createCommand(session, configuration); }
@Test public void createSmppDataSmCommand() { SMPPSession session = new SMPPSession(); Exchange exchange = new DefaultExchange(new DefaultCamelContext()); exchange.getIn().setHeader(SmppConstants.COMMAND, "DataSm"); SmppCommand command = binding.createSmppCommand(session, exchange); assertTrue(command instanceof SmppDataSmCommand); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) { return invoke(n, BigDecimal.ZERO); }
@Test void invokeRoundingDown() { FunctionTestUtil.assertResult(roundDownFunction.invoke(BigDecimal.valueOf(10.24)), BigDecimal.valueOf(10)); FunctionTestUtil.assertResult(roundDownFunction.invoke(BigDecimal.valueOf(10.24), BigDecimal.ONE), BigDecimal.valueOf(10.2)); }
@Override public ServiceInfo subscribe(String serviceName, String groupName, String clusters) throws NacosException { NAMING_LOGGER.info("[GRPC-SUBSCRIBE] service:{}, group:{}, cluster:{} ", serviceName, groupName, clusters); redoService.cacheSubscriberForRedo(serviceName, groupName, clusters); return doSubscribe(serviceName, groupName, clusters); }
@Test void testSubscribe() throws Exception { SubscribeServiceResponse res = new SubscribeServiceResponse(); ServiceInfo info = new ServiceInfo(GROUP_NAME + "@@" + SERVICE_NAME + "@@" + CLUSTERS); res.setServiceInfo(info); when(this.rpcClient.request(any())).thenReturn(res); ServiceInfo actual = client.subscribe(SERVICE_NAME, GROUP_NAME, CLUSTERS); assertEquals(info, actual); }
@Override public boolean syncData(DistroData data, String targetServer) { if (isNoExistTarget(targetServer)) { return true; } DistroDataRequest request = new DistroDataRequest(data, data.getType()); Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { Loggers.DISTRO .warn("[DISTRO] Cancel distro sync caused by target server {} unhealthy, key: {}", targetServer, data.getDistroKey()); return false; } try { Response response = clusterRpcClientProxy.sendRequest(member, request); return checkResponse(response); } catch (NacosException e) { Loggers.DISTRO.error("[DISTRO-FAILED] Sync distro data failed! key: {}", data.getDistroKey(), e); } return false; }
@Test void testSyncDataWithCallbackForMemberUnhealthy() throws NacosException { when(memberManager.hasMember(member.getAddress())).thenReturn(true); when(memberManager.find(member.getAddress())).thenReturn(member); transportAgent.syncData(new DistroData(), member.getAddress(), distroCallback); verify(distroCallback).onFailed(null); verify(clusterRpcClientProxy, never()).asyncRequest(any(Member.class), any(), any()); }
public static <T> void concat(T[] sourceFirst, T[] sourceSecond, T[] dest) { System.arraycopy(sourceFirst, 0, dest, 0, sourceFirst.length); System.arraycopy(sourceSecond, 0, dest, sourceFirst.length, sourceSecond.length); }
@Test(expected = NullPointerException.class) public void concat_whenSecondNull() { Integer[] first = new Integer[]{1, 2, 3}; Integer[] second = null; Integer[] concatenated = new Integer[4]; ArrayUtils.concat(first, second, concatenated); fail(); }
public void joinChannel(String name) { joinChannel(configuration.findChannel(name)); }
@Test public void doJoinChannelTestNoKey() { endpoint.joinChannel("#chan1"); verify(connection).doJoin("#chan1"); }
@Override public void cleanup() { for ( ServerSocket serverSocket : serverSockets ) { try { socketRepository.releaseSocket( serverSocket.getLocalPort() ); logDetailed( BaseMessages.getString( PKG, "BaseStep.Log.ReleasedServerSocketOnPort", serverSocket.getLocalPort() ) ); } catch ( IOException e ) { logError( "Cleanup: Unable to release server socket (" + serverSocket.getLocalPort() + ")", e ); } } List<RemoteStep> remoteInputSteps = getRemoteInputSteps(); if ( remoteInputSteps != null ) { cleanupRemoteSteps( remoteInputSteps ); } List<RemoteStep> remoteOutputSteps = getRemoteOutputSteps(); if ( remoteOutputSteps != null ) { cleanupRemoteSteps( remoteOutputSteps ); } }
@Test public void testCleanup() throws IOException { BaseStep baseStep = new BaseStep( mockHelper.stepMeta, mockHelper.stepDataInterface, 0, mockHelper.transMeta, mockHelper.trans ); ServerSocket serverSocketMock = mock( ServerSocket.class ); doReturn( 0 ).when( serverSocketMock ).getLocalPort(); baseStep.setServerSockets( Collections.singletonList( serverSocketMock ) ); SocketRepository socketRepositoryMock = mock( SocketRepository.class ); baseStep.setSocketRepository( socketRepositoryMock ); baseStep.cleanup(); verify( socketRepositoryMock ).releaseSocket( 0 ); }
@Override public void init(final InternalProcessorContext<Void, Void> context) { super.init(context); this.context = context; try { keySerializer = prepareKeySerializer(keySerializer, context, this.name()); } catch (ConfigException | StreamsException e) { throw new StreamsException(String.format("Failed to initialize key serdes for sink node %s", name()), e, context.taskId()); } try { valSerializer = prepareValueSerializer(valSerializer, context, this.name()); } catch (final ConfigException | StreamsException e) { throw new StreamsException(String.format("Failed to initialize value serdes for sink node %s", name()), e, context.taskId()); } }
@Test public void shouldThrowStreamsExceptionOnUndefinedValueSerde() { utilsMock.when(() -> WrappingNullableUtils.prepareValueSerializer(any(), any(), any())) .thenThrow(new ConfigException("Please set StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG")); final Throwable exception = assertThrows(StreamsException.class, () -> sink.init(context)); assertThat( exception.getMessage(), equalTo("Failed to initialize value serdes for sink node anyNodeName") ); assertThat( exception.getCause().getMessage(), equalTo("Please set StreamsConfig#DEFAULT_VALUE_SERDE_CLASS_CONFIG") ); }
@Override public void checkCanSelectFromColumns(ConnectorTransactionHandle transactionHandle, ConnectorIdentity identity, AccessControlContext context, SchemaTableName tableName, Set<Subfield> columnOrSubfieldNames) { Set<String> deniedColumns = new HashSet<>(); for (String column : columnOrSubfieldNames.stream().map(column -> column.getRootName()).collect(toImmutableSet())) { if (!checkAccess(identity, tableName, column, HiveAccessType.SELECT)) { deniedColumns.add(column); } } if (deniedColumns.size() > 0) { denySelectColumns(tableName.getTableName(), columnOrSubfieldNames.stream().map(column -> column.getRootName()).collect(toImmutableSet()), format("Access denied - User [ %s ] does not have [SELECT] " + "privilege on all mentioned columns of [ %s/%s ] ", identity.getUser(), tableName.getSchemaName(), tableName.getTableName())); } }
@Test public void testColumnLevelAccess() { ConnectorAccessControl accessControl = createRangerAccessControl("default-table-column-access.json", "user_groups.json"); // 'analyst' group have read access {group - analyst, user - joe} accessControl.checkCanSelectFromColumns(TRANSACTION_HANDLE, user("joe"), CONTEXT, new SchemaTableName("foodmart", "salary"), ImmutableSet.of(new Subfield("salary_paid"), new Subfield("overtime_paid"))); // Access denied to others {group - readall, user - bob} assertDenied(() -> accessControl.checkCanSelectFromColumns(TRANSACTION_HANDLE, user("bob"), CONTEXT, new SchemaTableName("foodmart", "salary"), ImmutableSet.of(new Subfield("currency_id"), new Subfield("overtime_paid")))); }
@Override public V get() throws InterruptedException, ExecutionException { return peel().get(); }
@Test(expected = ExecutionException.class) public void get_executionExc() throws Exception { ScheduledFuture<Object> outer = createScheduledFutureMock(); ScheduledFuture<Object> inner = createScheduledFutureMock(); when(outer.get()).thenThrow(new ExecutionException(new NullPointerException())); when(inner.get()).thenReturn(2); new DelegatingScheduledFutureStripper<Object>(outer).get(); }
public static String getPrintSize(long size) { DecimalFormat df = new DecimalFormat("#.00"); if (size < KB_SIZE) { return size + "B"; } else if (size < MB_SIZE) { return df.format((double) size / KB_SIZE) + "KB"; } else if (size < GB_SIZE) { return df.format((double) size / MB_SIZE) + "MB"; } else { return df.format((double) size / GB_SIZE) + "GB"; } }
@Test public void assertGetPrintSize() { Assert.isTrue(Objects.equals(ByteConvertUtil.getPrintSize(220), "220B")); Assert.isTrue(Objects.equals(ByteConvertUtil.getPrintSize(2200), "2.15KB")); Assert.isTrue(Objects.equals(ByteConvertUtil.getPrintSize(2200000), "2.10MB")); Assert.isTrue(Objects.equals(ByteConvertUtil.getPrintSize(2200000000L), "2.05GB")); }
@Override public void onRestRequest(RestRequest req, RequestContext requestContext, Map<String, String> wireAttrs, NextFilter<RestRequest, RestResponse> nextFilter) { disruptRequest(req, requestContext, wireAttrs, nextFilter); }
@Test public void testRestTimeoutDisrupt() throws Exception { final RequestContext requestContext = new RequestContext(); requestContext.putLocalAttr(DISRUPT_CONTEXT_KEY, DisruptContexts.timeout()); final DisruptFilter filter = new DisruptFilter(_scheduler, _executor, REQUEST_TIMEOUT, _clock); final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean success = new AtomicBoolean(false); final NextFilter<RestRequest, RestResponse> next = new NextFilter<RestRequest, RestResponse>() { @Override public void onRequest(RestRequest restRequest, RequestContext requestContext, Map<String, String> wireAttrs) { latch.countDown(); } @Override public void onResponse(RestResponse restResponse, RequestContext requestContext, Map<String, String> wireAttrs) { latch.countDown(); } @Override public void onError(Throwable ex, RequestContext requestContext, Map<String, String> wireAttrs) { success.set(ex instanceof TimeoutException); latch.countDown(); } }; filter.onRestRequest(new RestRequestBuilder(new URI(URI)).build(), requestContext, Collections.emptyMap(), next); Assert.assertTrue(latch.await(TEST_TIMEOUT, TimeUnit.MILLISECONDS), "Missing NextFilter invocation"); Assert.assertTrue(success.get(), "Unexpected method invocation"); }
@Override public double measure(MetricConfig config, long now) { purgeObsoleteSamples(config, now); return combine(this.samples, config, now); }
@Test @DisplayName("Sample should be purged if doesn't overlap the window") public void testSampleIsPurgedIfDoesntOverlap() { MetricConfig config = new MetricConfig().timeWindow(1, SECONDS).samples(2); // Monitored window: 2s. Complete a sample and wait 2.5s after. completeSample(config); time.sleep(2500); double numSamples = stat.measure(config, time.milliseconds()); assertEquals(0, numSamples); }
public static Optional<RestartConfig.RestartNode> getNextNode(RestartConfig config) { if (config == null) { return Optional.empty(); } List<RestartConfig.RestartNode> path = config.getRestartPath(); if (path != null && path.size() > 1) { return Optional.of(path.get(path.size() - 2)); } return Optional.empty(); }
@Test public void testGetNextNode() { RestartConfig config = RestartConfig.builder().addRestartNode("foo", 1, "bar").build(); Assert.assertEquals(Optional.empty(), RunRequest.getNextNode(config)); config = config.toBuilder().addRestartNode("bar", 1, "bar").build(); Assert.assertEquals( Optional.of(new RestartConfig.RestartNode("foo", 1, "bar")), RunRequest.getNextNode(config)); }
@VisibleForTesting Pair<String, File> encryptSegmentIfNeeded(File tempDecryptedFile, File tempEncryptedFile, boolean isUploadedSegmentEncrypted, String crypterUsedInUploadedSegment, String crypterClassNameInTableConfig, String segmentName, String tableNameWithType) { boolean segmentNeedsEncryption = StringUtils.isNotEmpty(crypterClassNameInTableConfig); // form the output File finalSegmentFile = (isUploadedSegmentEncrypted || segmentNeedsEncryption) ? tempEncryptedFile : tempDecryptedFile; String crypterClassName = StringUtils.isEmpty(crypterClassNameInTableConfig) ? crypterUsedInUploadedSegment : crypterClassNameInTableConfig; ImmutablePair<String, File> out = ImmutablePair.of(crypterClassName, finalSegmentFile); if (!segmentNeedsEncryption) { return out; } if (isUploadedSegmentEncrypted && !crypterClassNameInTableConfig.equals(crypterUsedInUploadedSegment)) { throw new ControllerApplicationException(LOGGER, String.format( "Uploaded segment is encrypted with '%s' while table config requires '%s' as crypter " + "(segment name = '%s', table name = '%s').", crypterUsedInUploadedSegment, crypterClassNameInTableConfig, segmentName, tableNameWithType), Response.Status.INTERNAL_SERVER_ERROR); } // encrypt segment PinotCrypter pinotCrypter = PinotCrypterFactory.create(crypterClassNameInTableConfig); LOGGER.info("Using crypter class '{}' for encrypting '{}' to '{}' (segment name = '{}', table name = '{}').", crypterClassNameInTableConfig, tempDecryptedFile, tempEncryptedFile, segmentName, tableNameWithType); pinotCrypter.encrypt(tempDecryptedFile, tempEncryptedFile); return out; }
@Test public void testEncryptSegmentIfNeededCrypterInTableConfig() { // arrange boolean uploadedSegmentIsEncrypted = false; String crypterClassNameInTableConfig = "NoOpPinotCrypter"; String crypterClassNameUsedInUploadedSegment = null; // act Pair<String, File> encryptionInfo = _resource .encryptSegmentIfNeeded(_decryptedFile, _encryptedFile, uploadedSegmentIsEncrypted, crypterClassNameUsedInUploadedSegment, crypterClassNameInTableConfig, SEGMENT_NAME, TABLE_NAME); // assert assertEquals("NoOpPinotCrypter", encryptionInfo.getLeft()); assertEquals(_encryptedFile, encryptionInfo.getRight()); }
public void feed(Royalty r) { r.getFed(); }
@Test void testFeed() { final var royalty = mock(Royalty.class); final var servant = new Servant("test"); servant.feed(royalty); verify(royalty).getFed(); verifyNoMoreInteractions(royalty); }
@Override public List<Map<String, String>> taskConfigs(int maxTasks) { if (knownConsumerGroups == null) { // If knownConsumerGroup is null, it means the initial loading has not finished. // An exception should be thrown to trigger the retry behavior in the framework. log.debug("Initial consumer loading has not yet completed"); throw new RetriableException("Timeout while loading consumer groups."); } // if the replication is disabled, known consumer group is empty, or checkpoint emission is // disabled by setting 'emit.checkpoints.enabled' to false, the interval of checkpoint emission // will be negative and no 'MirrorCheckpointTask' will be created if (!config.enabled() || knownConsumerGroups.isEmpty() || config.emitCheckpointsInterval().isNegative()) { return Collections.emptyList(); } int numTasks = Math.min(maxTasks, knownConsumerGroups.size()); List<List<String>> groupsPartitioned = ConnectorUtils.groupPartitions(new ArrayList<>(knownConsumerGroups), numTasks); return IntStream.range(0, numTasks) .mapToObj(i -> config.taskConfigForConsumerGroups(groupsPartitioned.get(i), i)) .collect(Collectors.toList()); }
@Test public void testReplicationEnabled() { // enable the replication MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps("enabled", "true")); Set<String> knownConsumerGroups = new HashSet<>(); knownConsumerGroups.add(CONSUMER_GROUP); // MirrorCheckpointConnector as minimum to run taskConfig() MirrorCheckpointConnector connector = new MirrorCheckpointConnector(knownConsumerGroups, config); List<Map<String, String>> output = connector.taskConfigs(1); // expect 1 task will be created assertEquals(1, output.size(), "Replication for consumer-group-1 has incorrect size"); assertEquals(CONSUMER_GROUP, output.get(0).get(MirrorCheckpointConfig.TASK_CONSUMER_GROUPS), "Replication for consumer-group-1 failed"); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { if(new DefaultPathPredicate(containerService.getContainer(file)).test(containerService.getContainer(renamed))) { // Either copy complete file contents (small file) or copy manifest (large file) final Path rename = proxy.copy(file, renamed, new TransferStatus().withLength(file.attributes().getSize()), connectionCallback, new DisabledStreamListener()); delete.delete(Collections.singletonMap(file, status), connectionCallback, callback, false); return rename; } else { final Path copy = new SwiftSegmentCopyService(session, regionService).copy(file, renamed, new TransferStatus().withLength(file.attributes().getSize()), connectionCallback, new DisabledStreamListener()); delete.delete(Collections.singletonMap(file, status), connectionCallback, callback); return copy; } }
@Test(expected = NotfoundException.class) public void testMoveNotFound() throws Exception { final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); test.attributes().setRegion("IAD"); new SwiftMoveFeature(session).move(test, new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); }
static String appendDefaultPort(String address, int defaultPort) { if (StringUtils.isNotEmpty(address) && defaultPort > 0) { int i = address.indexOf(':'); if (i < 0) { return address + ":" + defaultPort; } else if (Integer.parseInt(address.substring(i + 1)) == 0) { return address.substring(0, i + 1) + defaultPort; } } return address; }
@Test void testDefaultPort() { Assertions.assertEquals("10.20.153.10:2181", URL.appendDefaultPort("10.20.153.10:0", 2181)); Assertions.assertEquals("10.20.153.10:2181", URL.appendDefaultPort("10.20.153.10", 2181)); }
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { //Try to parse TSD file try (RereadableInputStream ris = new RereadableInputStream(stream, 2048, true)) { Metadata TSDAndEmbeddedMetadata = new Metadata(); List<TSDMetas> tsdMetasList = this.extractMetas(ris); this.buildMetas(tsdMetasList, metadata != null && metadata.size() > 0 ? TSDAndEmbeddedMetadata : metadata); XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); ris.rewind(); //Try to parse embedded file in TSD file this.parseTSDContent(ris, xhtml, TSDAndEmbeddedMetadata, context); xhtml.endDocument(); } }
@Test public void testTSDFileData() throws Exception { try (InputStream inputXml = getResourceAsStream("/test-documents/MANIFEST.XML.TSD"); InputStream inputTxt1 = getResourceAsStream("/test-documents/Test1.txt.tsd"); InputStream inputTxt2 = getResourceAsStream("/test-documents/Test2.txt.tsd"); InputStream inputDocx = getResourceAsStream("/test-documents/Test3.docx.tsd"); InputStream inputPdf = getResourceAsStream("/test-documents/Test4.pdf.tsd"); InputStream inputPng = getResourceAsStream("/test-documents/Test5.PNG.tsd")) { TSDParser tsdParser = new TSDParser(); ContentHandler handler = new BodyContentHandler(); Metadata metadata = new Metadata(); ParseContext parseContext = new ParseContext(); tsdParser.parse(inputXml, handler, metadata, parseContext); assertNotNull(handler); assertNotNull(metadata); assertContains("Description=Time Stamped Data Envelope", metadata.toString()); assertContains("Content-Type=application/timestamped-data", metadata.toString()); assertContains("File-Parsed=true", metadata.toString()); handler = new BodyContentHandler(); metadata = new Metadata(); parseContext = new ParseContext(); tsdParser.parse(inputTxt1, handler, metadata, parseContext); assertNotNull(handler); assertNotNull(metadata); assertContains("Description=Time Stamped Data Envelope", metadata.toString()); assertContains("Content-Type=application/timestamped-data", metadata.toString()); assertContains("File-Parsed=true", metadata.toString()); handler = new BodyContentHandler(); metadata = new Metadata(); parseContext = new ParseContext(); tsdParser.parse(inputTxt2, handler, metadata, parseContext); assertNotNull(handler); assertNotNull(metadata); assertContains("Description=Time Stamped Data Envelope", metadata.toString()); assertContains("Content-Type=application/timestamped-data", metadata.toString()); assertContains("File-Parsed=true", metadata.toString()); handler = new BodyContentHandler(); metadata = new Metadata(); parseContext = new ParseContext(); tsdParser.parse(inputDocx, handler, metadata, parseContext); assertNotNull(handler); assertNotNull(metadata); assertContains("Description=Time Stamped Data Envelope", metadata.toString()); assertContains("Content-Type=application/timestamped-data", metadata.toString()); assertContains("File-Parsed=true", metadata.toString()); handler = new BodyContentHandler(); metadata = new Metadata(); parseContext = new ParseContext(); tsdParser.parse(inputPdf, handler, metadata, parseContext); assertNotNull(handler); assertNotNull(metadata); assertContains("Description=Time Stamped Data Envelope", metadata.toString()); assertContains("Content-Type=application/timestamped-data", metadata.toString()); assertContains("File-Parsed=true", metadata.toString()); handler = new BodyContentHandler(); metadata = new Metadata(); parseContext = new ParseContext(); tsdParser.parse(inputPng, handler, metadata, parseContext); assertNotNull(handler); assertNotNull(metadata); assertContains("Description=Time Stamped Data Envelope", metadata.toString()); assertContains("Content-Type=application/timestamped-data", metadata.toString()); assertContains("File-Parsed=true", metadata.toString()); } }
@Override public synchronized List<HeliumPackage> getAll() throws IOException { List<HeliumPackage> result = new LinkedList<>(); File file = new File(uri()); File [] files = file.listFiles(); if (files == null) { return result; } for (File f : files) { if (f.getName().startsWith(".")) { continue; } HeliumPackage pkgInfo = readPackageInfo(f); if (pkgInfo != null) { result.add(pkgInfo); } } return result; }
@Test void testGetAllPackage() throws IOException { // given File r1Path = new File(tmpDir, "r1"); HeliumLocalRegistry r1 = new HeliumLocalRegistry("r1", r1Path.getAbsolutePath()); assertEquals(0, r1.getAll().size()); // when Gson gson = new Gson(); HeliumPackage pkg1 = newHeliumPackage(HeliumType.APPLICATION, "app1", "desc1", "artifact1", "classname1", new String[][]{}, "license", ""); FileUtils.writeStringToFile(new File(r1Path, "pkg1.json"), gson.toJson(pkg1), StandardCharsets.UTF_8); // then assertEquals(1, r1.getAll().size()); }
@Override public Iterable<Device> getDevices() { return manager.getVirtualDevices( this.networkId).stream().collect(Collectors.toSet()); }
@Test public void testGetDevices() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); VirtualDevice device1 = manager.createVirtualDevice(virtualNetwork.id(), DID1); VirtualDevice device2 = manager.createVirtualDevice(virtualNetwork.id(), DID2); DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class); // test the getDevices() method Iterator<Device> it = deviceService.getDevices().iterator(); assertEquals("The device set size did not match.", 2, Iterators.size(it)); // test the getAvailableDevices() method Iterator<Device> it2 = deviceService.getAvailableDevices().iterator(); assertEquals("The device set size did not match.", 2, Iterators.size(it2)); // test the getDeviceCount() method assertEquals("The device set size did not match.", 2, deviceService.getDeviceCount()); // test the getDevice() method assertEquals("The expect device did not match.", device1, deviceService.getDevice(DID1)); assertNotEquals("The expect device should not have matched.", device1, deviceService.getDevice(DID2)); // test the isAvailable() method assertTrue("The expect device availability did not match.", deviceService.isAvailable(DID1)); assertFalse("The expect device availability did not match.", deviceService.isAvailable(DID3)); }
@Override public AuthorizationPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) { Capabilities capabilities = capabilities(descriptor.id()); PluggableInstanceSettings authConfigSettings = authConfigSettings(descriptor.id()); PluggableInstanceSettings roleSettings = roleSettings(descriptor.id(), capabilities); Image image = image(descriptor.id()); return new AuthorizationPluginInfo(descriptor, authConfigSettings, roleSettings, image, capabilities); }
@Test public void shouldBuildPluginInfoWithPluginDescriptor() { GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build(); AuthorizationPluginInfo pluginInfo = new AuthorizationPluginInfoBuilder(extension).pluginInfoFor(descriptor); assertThat(pluginInfo.getDescriptor(), is(descriptor)); }
public List<SearchResult> getSearchResults(String query) throws IOException { Elements results = fetchSearchResults(query); List<SearchResult> resultList = new ArrayList<>(); for (Element result : results) { Element title = result.getElementsByClass("links_main").first().getElementsByTag("a").first(); Element snippet = result.getElementsByClass("result__snippet").first(); String snippetText = snippet.text().substring(0, snippet.text().length() > 250 ? 250 : snippet.text().length()); resultList.add(new SearchResult(title.attr("href"), title.text(), snippetText)); } return resultList.subList(0, resultList.size() > 5 ? 5 : resultList.size()); }
@Test void testGetSearchResultsLimitTo5() throws IOException { searchWebAction = new SearchWebAction() { @Override Elements fetchSearchResults(String query) { Elements mockResults = new Elements(); for (int i = 1; i <= 6; i++) { mockResults.add(createMockResult("http://example" + i + ".com", "Title " + i, "Snippet " + i)); } return mockResults; } }; List<SearchWebAction.SearchResult> results = searchWebAction.getSearchResults("test query"); assertEquals(5, results.size()); assertEquals("http://example1.com", results.get(0).url()); assertEquals("http://example5.com", results.get(4).url()); }
@Override public Command<?> buildCommand() { Common.setDeployMode(getDeployMode()); if (checkConfig) { return new SeaTunnelConfValidateCommand(this); } if (encrypt) { return new ConfEncryptCommand(this); } if (decrypt) { return new ConfDecryptCommand(this); } return new ClientExecuteCommand(this); }
@Test public void testExecuteClientCommandArgsWithPluginName() throws FileNotFoundException, URISyntaxException { String configurePath = "/config/fake_to_inmemory.json"; String configFile = MultiTableSinkTest.getTestConfigFile(configurePath); ClientCommandArgs clientCommandArgs = buildClientCommandArgs(configFile); Assertions.assertDoesNotThrow(() -> SeaTunnel.run(clientCommandArgs.buildCommand())); }
public ScanResults run(ScanTarget scanTarget) throws ExecutionException, InterruptedException { return runAsync(scanTarget).get(); }
@Test public void run_whenSomeVulnDetectorFailed_returnsPartiallySucceededScanResult() throws ExecutionException, InterruptedException { Injector injector = Guice.createInjector( new FakeUtcClockModule(), new FakePluginExecutionModule(), new FakePortScannerBootstrapModule(), new FakeServiceFingerprinterBootstrapModule(), new FakeVulnDetectorBootstrapModule(), new FakeRemoteVulnDetectorBootstrapModule(), new FailedVulnDetectorBootstrapModule(), new FailedRemoteVulnDetectorBootstrapModule()); scanningWorkflow = injector.getInstance(DefaultScanningWorkflow.class); ScanResults scanResults = scanningWorkflow.run(buildScanTarget()); assertThat(scanResults.getScanStatus()).isEqualTo(ScanStatus.PARTIALLY_SUCCEEDED); assertThat(scanResults.getStatusMessage()) .contains( "Failed plugins:\n" + "/fake/VULN_DETECTION/FailedVulnDetector/v0.1\n" + "/fake/REMOTE_VULN_DETECTION/FailedRemoteVulnDetector/v0.1"); assertThat(scanResults.getScanFindingsList()).hasSize(2); }
@Override public int getMajorVersion() { return -1; }
@Test public void testGetMajorVersion() { if (driver.getMajorVersion() >= 0) { fail("getMajorVersion"); } }
@PostMapping("/listener") @Secured(action = ActionTypes.READ, signType = SignType.CONFIG) @ExtractorManager.Extractor(httpExtractor = ConfigListenerHttpParamExtractor.class) public void listener(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { request.setAttribute("org.apache.catalina.ASYNC_SUPPORTED", true); String probeModify = request.getParameter("Listening-Configs"); if (StringUtils.isBlank(probeModify)) { LOGGER.warn("invalid probeModify is blank"); throw new IllegalArgumentException("invalid probeModify"); } probeModify = URLDecoder.decode(probeModify, Constants.ENCODE); Map<String, String> clientMd5Map; try { clientMd5Map = MD5Util.getClientMd5Map(probeModify); } catch (Throwable e) { throw new IllegalArgumentException("invalid probeModify"); } // do long-polling inner.doPollingConfig(request, response, clientMd5Map, probeModify.length()); }
@Test void testListener() throws Exception { MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.post(Constants.CONFIG_CONTROLLER_PATH + "/listener") .param("Listening-Configs", "test"); int actualValue = mockmvc.perform(builder).andReturn().getResponse().getStatus(); assertEquals(200, actualValue); }
@Udf public List<Long> generateSeriesLong( @UdfParameter(description = "The beginning of the series") final long start, @UdfParameter(description = "Marks the end of the series (inclusive)") final long end ) { return generateSeriesLong(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldComputeIntRangeWithOddStepLong() { final List<Long> range = rangeUdf.generateSeriesLong(0, 9, 3); assertThat(range, hasSize(4)); long index = 0; for (final long i : range) { assertThat(index, is(i)); index += 3; } }
public final void doesNotContainKey(@Nullable Object key) { check("keySet()").that(checkNotNull(actual).keySet()).doesNotContain(key); }
@Test public void failMapLacksKey() { ImmutableMap<String, String> actual = ImmutableMap.of("a", "A"); expectFailureWhenTestingThat(actual).doesNotContainKey("a"); assertFailureKeys("value of", "expected not to contain", "but was", "map was"); assertFailureValue("value of", "map.keySet()"); assertFailureValue("expected not to contain", "a"); assertFailureValue("but was", "[a]"); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseWindows10WithIe11Test() { final String uaStr = "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"; final UserAgent ua = UserAgentUtil.parse(uaStr); assertEquals("MSIE11", ua.getBrowser().toString()); assertEquals("11.0", ua.getVersion()); assertEquals("Trident", ua.getEngine().toString()); assertEquals("7.0", ua.getEngineVersion()); assertEquals("Windows 10 or Windows Server 2016", ua.getOs().toString()); assertEquals("10.0", ua.getOsVersion()); assertEquals("Windows", ua.getPlatform().toString()); assertFalse(ua.isMobile()); }
public void addSecretVersion(String secretId, String secretData) { checkArgument(!secretId.isEmpty(), "secretId can not be empty"); checkArgument(!secretData.isEmpty(), "secretData can not be empty"); checkIsUsable(); try { SecretName parent = SecretName.of(projectId, secretId); SecretPayload secretPayload = SecretPayload.newBuilder().setData(ByteString.copyFromUtf8(secretData)).build(); secretManagerServiceClient.addSecretVersion(parent, secretPayload); } catch (Exception e) { throw new SecretManagerResourceManagerException("Error while adding version to a secret", e); } }
@Test public void testAddSecretVersionWithInvalidNameShouldFail() { IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> testManager.addSecretVersion(SECRET_ID, "")); assertThat(exception).hasMessageThat().contains("secretData can not be empty"); }
public B id(String id) { this.id = id; return getThis(); }
@Test void id() { Builder builder = new Builder(); builder.id("id"); Assertions.assertEquals("id", builder.build().getId()); }
static int getVideoStreamIndex(@NonNull final String targetResolution, final MediaFormat targetFormat, @NonNull final List<VideoStream> videoStreams) { int fullMatchIndex = -1; int fullMatchNoRefreshIndex = -1; int resMatchOnlyIndex = -1; int resMatchOnlyNoRefreshIndex = -1; int lowerResMatchNoRefreshIndex = -1; final String targetResolutionNoRefresh = targetResolution.replaceAll("p\\d+$", "p"); for (int idx = 0; idx < videoStreams.size(); idx++) { final MediaFormat format = targetFormat == null ? null : videoStreams.get(idx).getFormat(); final String resolution = videoStreams.get(idx).getResolution(); final String resolutionNoRefresh = resolution.replaceAll("p\\d+$", "p"); if (format == targetFormat && resolution.equals(targetResolution)) { fullMatchIndex = idx; } if (format == targetFormat && resolutionNoRefresh.equals(targetResolutionNoRefresh)) { fullMatchNoRefreshIndex = idx; } if (resMatchOnlyIndex == -1 && resolution.equals(targetResolution)) { resMatchOnlyIndex = idx; } if (resMatchOnlyNoRefreshIndex == -1 && resolutionNoRefresh.equals(targetResolutionNoRefresh)) { resMatchOnlyNoRefreshIndex = idx; } if (lowerResMatchNoRefreshIndex == -1 && compareVideoStreamResolution( resolutionNoRefresh, targetResolutionNoRefresh) < 0) { lowerResMatchNoRefreshIndex = idx; } } if (fullMatchIndex != -1) { return fullMatchIndex; } if (fullMatchNoRefreshIndex != -1) { return fullMatchNoRefreshIndex; } if (resMatchOnlyIndex != -1) { return resMatchOnlyIndex; } if (resMatchOnlyNoRefreshIndex != -1) { return resMatchOnlyNoRefreshIndex; } return lowerResMatchNoRefreshIndex; }
@Test public void getVideoDefaultStreamIndexCombinations() { final List<VideoStream> testList = List.of( generateVideoStream("mpeg_4-1080", MediaFormat.MPEG_4, "1080p", false), generateVideoStream("mpeg_4-720_60", MediaFormat.MPEG_4, "720p60", false), generateVideoStream("mpeg_4-720", MediaFormat.MPEG_4, "720p", false), generateVideoStream("webm-480", MediaFormat.WEBM, "480p", false), generateVideoStream("mpeg_4-360", MediaFormat.MPEG_4, "360p", false), generateVideoStream("webm-360", MediaFormat.WEBM, "360p", false), generateVideoStream("v3gpp-240_60", MediaFormat.v3GPP, "240p60", false), generateVideoStream("webm-144", MediaFormat.WEBM, "144p", false)); // exact matches assertEquals(1, ListHelper.getVideoStreamIndex("720p60", MediaFormat.MPEG_4, testList)); assertEquals(2, ListHelper.getVideoStreamIndex("720p", MediaFormat.MPEG_4, testList)); // match but not refresh assertEquals(0, ListHelper.getVideoStreamIndex("1080p60", MediaFormat.MPEG_4, testList)); assertEquals(6, ListHelper.getVideoStreamIndex("240p", MediaFormat.v3GPP, testList)); // match but not format assertEquals(1, ListHelper.getVideoStreamIndex("720p60", MediaFormat.WEBM, testList)); assertEquals(2, ListHelper.getVideoStreamIndex("720p", MediaFormat.WEBM, testList)); assertEquals(1, ListHelper.getVideoStreamIndex("720p60", null, testList)); assertEquals(2, ListHelper.getVideoStreamIndex("720p", null, testList)); // match but not format and not refresh assertEquals(0, ListHelper.getVideoStreamIndex("1080p60", MediaFormat.WEBM, testList)); assertEquals(6, ListHelper.getVideoStreamIndex("240p", MediaFormat.WEBM, testList)); assertEquals(0, ListHelper.getVideoStreamIndex("1080p60", null, testList)); assertEquals(6, ListHelper.getVideoStreamIndex("240p", null, testList)); // match closest lower resolution assertEquals(7, ListHelper.getVideoStreamIndex("200p", MediaFormat.WEBM, testList)); assertEquals(7, ListHelper.getVideoStreamIndex("200p60", MediaFormat.WEBM, testList)); assertEquals(7, ListHelper.getVideoStreamIndex("200p", MediaFormat.MPEG_4, testList)); assertEquals(7, ListHelper.getVideoStreamIndex("200p60", MediaFormat.MPEG_4, testList)); assertEquals(7, ListHelper.getVideoStreamIndex("200p", null, testList)); assertEquals(7, ListHelper.getVideoStreamIndex("200p60", null, testList)); // Can't find a match assertEquals(-1, ListHelper.getVideoStreamIndex("100p", null, testList)); }
public boolean submitUnknownProcessingError(Message message, String details) { return submitProcessingErrorsInternal(message, ImmutableList.of(new Message.ProcessingError( ProcessingFailureCause.UNKNOWN, "Encountered an unrecognizable processing error", details))); }
@Test @DisplayName("Ensure Message#getId() is used as a fallback for Message#getMessageId()") public void submitProcessingErrorWithIdButnoMessageId() throws Exception { // given final Message msg = Mockito.mock(Message.class); when(msg.getId()).thenReturn("msg-uuid"); when(msg.processingErrors()).thenReturn(List.of()); when(msg.supportsFailureHandling()).thenReturn(true); when(failureHandlingConfiguration.submitProcessingFailures()).thenReturn(true); when(failureHandlingConfiguration.keepFailedMessageDuplicate()).thenReturn(true); // when underTest.submitUnknownProcessingError(msg, "Details of the unknown error!"); // then verify(failureSubmissionQueue, times(1)).submitBlocking(failureBatchCaptor.capture()); assertThat(failureBatchCaptor.getValue()).satisfies(fb -> { assertThat(fb.containsProcessingFailures()).isTrue(); assertThat(fb.size()).isEqualTo(1); assertThat(fb.getFailures().get(0)).satisfies(processingFailure -> { assertThat(processingFailure.message()).isEqualTo("Failed to process message with id 'msg-uuid': Encountered an unrecognizable processing error"); }); }); }
public static String parseNamespace(NacosClientProperties properties) { String namespaceTmp = null; String isUseCloudNamespaceParsing = properties.getProperty(PropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING, properties.getProperty(SystemPropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING, String.valueOf(Constants.DEFAULT_USE_CLOUD_NAMESPACE_PARSING))); if (Boolean.parseBoolean(isUseCloudNamespaceParsing)) { namespaceTmp = TenantUtil.getUserTenantForAcm(); namespaceTmp = TemplateUtils.stringBlankAndThenExecute(namespaceTmp, () -> { String namespace = properties.getProperty(PropertyKeyConst.SystemEnv.ALIBABA_ALIWARE_NAMESPACE); return StringUtils.isNotBlank(namespace) ? namespace : StringUtils.EMPTY; }); } if (StringUtils.isBlank(namespaceTmp)) { namespaceTmp = properties.getProperty(PropertyKeyConst.NAMESPACE); } return StringUtils.isNotBlank(namespaceTmp) ? namespaceTmp.trim() : StringUtils.EMPTY; }
@Test void testParseNamespace() { String expect = "test"; Properties properties = new Properties(); properties.setProperty(PropertyKeyConst.NAMESPACE, expect); final NacosClientProperties nacosClientProperties = NacosClientProperties.PROTOTYPE.derive(properties); String actual = ParamUtil.parseNamespace(nacosClientProperties); assertEquals(expect, actual); }
public static Class<?> getLiteral(String className, String literal) { LiteralAnalyzer analyzer = ANALYZERS.get( className ); Class result = null; if ( analyzer != null ) { analyzer.validate( literal ); result = analyzer.getLiteral(); } return result; }
@Test public void testMiscellaneousErroneousPatterns() { assertThat( getLiteral( int.class.getCanonicalName(), "1F" ) ).isNull(); assertThat( getLiteral( float.class.getCanonicalName(), "1D" ) ).isNull(); assertThat( getLiteral( int.class.getCanonicalName(), "_1" ) ).isNull(); assertThat( getLiteral( int.class.getCanonicalName(), "1_" ) ).isNull(); assertThat( getLiteral( int.class.getCanonicalName(), "0x_1" ) ).isNull(); assertThat( getLiteral( int.class.getCanonicalName(), "0_x1" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4.9e_-3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4.9_e-3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4._9e-3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4_.9e-3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "_4.9e-3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4.9E-3_" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4.9E_-3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4.9E-_3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4.9E+-3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4.9E+_3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "4.9_E-3" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "0x0.001_P-10d" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "0x0.001P_-10d" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "0x0.001_p-10d" ) ).isNull(); assertThat( getLiteral( double.class.getCanonicalName(), "0x0.001p_-10d" ) ).isNull(); }