focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Optional<ServiceInstance> select(String serviceName, PolicyContext policyContext) { final ServiceInstance lastInstance = policyContext.getServiceInstance(); return DiscoveryManager.INSTANCE.choose(serviceName, lbCache.computeIfAbsent(serviceName, name -> new RoundRobinLoadbalancer()), (serviceName1, serviceInstances) -> { if (serviceInstances == null || serviceInstances.size() <= 1) { return serviceInstances; } serviceInstances.removeIf(instance -> instance.equals(lastInstance)); return serviceInstances; }); }
@Test public void select() { String serviceName = "test"; final ServiceInstance selectedInstance = CommonUtils.buildInstance(serviceName, 8989); final ServiceInstance nextInstance = CommonUtils.buildInstance(serviceName, 8888); final List<ServiceInstance> serviceInstances = Arrays.asList(selectedInstance, nextInstance); try { Mockito.when(zkService34.getInstances(serviceName)).thenReturn(serviceInstances); } catch (QueryInstanceException e) { e.printStackTrace(); } final RoundRobinRetryPolicy roundRobinRetryPolicy = new RoundRobinRetryPolicy(); final PolicyContext policyContext = new PolicyContext(); policyContext.setServiceInstance(selectedInstance); final Optional<ServiceInstance> select = roundRobinRetryPolicy.select(serviceName, policyContext); Assert.assertTrue(select.isPresent()); Assert.assertEquals(select.get(), nextInstance); }
@Override public List<ServiceDTO> getServiceInstances(String serviceId) { Application application = eurekaClient.getApplication(serviceId); if (application == null || CollectionUtils.isEmpty(application.getInstances())) { Tracer.logEvent("Apollo.Discovery.NotFound", serviceId); return Collections.emptyList(); } return application.getInstances().stream().map(instanceInfoToServiceDTOFunc) .collect(Collectors.toList()); }
@Test public void testGetServiceInstancesWithNullInstances() { when(eurekaClient.getApplication(someServiceId)).thenReturn(null); assertTrue(defaultDiscoveryService.getServiceInstances(someServiceId).isEmpty()); }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testGenericGroupOffsetCommitWithoutMemberIdAndGeneration() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Create an empty group. ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup( "foo", true ); // Add member. group.add(mkGenericMember("member", Optional.of("new-instance-id"))); // Transition to next generation. group.transitionTo(ClassicGroupState.PREPARING_REBALANCE); group.initNextGeneration(); assertEquals(1, group.generationId()); // Verify that the request is rejected with the correct exception. assertThrows(UnknownMemberIdException.class, () -> context.commitOffset( new OffsetCommitRequestData() .setGroupId("foo") .setTopics(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") .setPartitions(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) )) )) ) ); }
public static AvroGenericCoder of(Schema schema) { return AvroGenericCoder.of(schema); }
@Test public void testDeterministicNonDeterministicArray() { assertNonDeterministic( AvroCoder.of(NonDeterministicArray.class), reasonField( UnorderedMapClass.class, "mapField", "java.util.Map<java.lang.String, java.lang.String>" + " may not be deterministically ordered")); }
public CruiseConfig deserializeConfig(String content) throws Exception { String md5 = md5Hex(content); Element element = parseInputStream(new ByteArrayInputStream(content.getBytes())); LOGGER.debug("[Config Save] Updating config cache with new XML"); CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse(); setMd5(configForEdit, md5); configForEdit.setOrigins(new FileConfigOrigin()); return configForEdit; }
@Test void shouldAllowBothCounterAndTruncatedGitMaterialInLabelTemplate() throws Exception { CruiseConfig cruiseConfig = xmlLoader.deserializeConfig(LABEL_TEMPLATE_WITH_LABEL_TEMPLATE("1.3.0-${COUNT}-${git[:7]}")); assertThat(cruiseConfig.pipelineConfigByName(new CaseInsensitiveString("cruise")).getLabelTemplate()).isEqualTo("1.3.0-${COUNT}-${git[:7]}"); }
public static Builder builder() { return new Builder(); }
@TestTemplate public void replacePartitionsWithDuplicates() { assertThat(listManifestFiles()).isEmpty(); table .newReplacePartitions() .addFile(FILE_A) .addFile(DataFiles.builder(SPEC).copy(FILE_A).build()) .addFile(FILE_A) .commit(); assertThat(table.currentSnapshot().summary()) .hasSize(12) .containsEntry(SnapshotSummary.ADDED_FILES_PROP, "1") .containsEntry(SnapshotSummary.ADDED_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.ADDED_RECORDS_PROP, "1") .containsEntry(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP, "1") .containsEntry(SnapshotSummary.REPLACE_PARTITIONS_PROP, "true") .containsEntry(SnapshotSummary.TOTAL_DATA_FILES_PROP, "1") .containsEntry(SnapshotSummary.TOTAL_DELETE_FILES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_EQ_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_POS_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.TOTAL_RECORDS_PROP, "1"); }
public Response logPage(String fileName, Integer start, Integer length, String grep, String user) throws IOException, InvalidRequestException { Path rawFile = logRoot.resolve(fileName); Path absFile = rawFile.toAbsolutePath().normalize(); if (!absFile.startsWith(logRoot) || !rawFile.normalize().toString().equals(rawFile.toString())) { //Ensure filename doesn't contain ../ parts return LogviewerResponseBuilder.buildResponsePageNotFound(); } if (resourceAuthorizer.isUserAllowedToAccessFile(user, fileName)) { workerLogs.setLogFilePermission(fileName); Path topoDir = absFile.getParent().getParent(); if (absFile.toFile().exists()) { SortedSet<Path> logFiles; try { logFiles = Arrays.stream(topoDir.toFile().listFiles()) .flatMap(portDir -> { try { return directoryCleaner.getFilesForDir(portDir.toPath()).stream(); } catch (IOException e) { throw new RuntimeException(e); } }) .filter(Files::isRegularFile) .collect(toCollection(TreeSet::new)); } catch (UncheckedIOException e) { throw e.getCause(); } List<String> reorderedFilesStr = logFiles.stream() .map(WorkerLogs::getTopologyPortWorkerLog) .filter(fileStr -> !StringUtils.equals(fileName, fileStr)) .collect(toList()); reorderedFilesStr.add(fileName); length = length != null ? Math.min(10485760, length) : LogviewerConstant.DEFAULT_BYTES_PER_PAGE; final boolean isZipFile = absFile.getFileName().toString().endsWith(".gz"); long fileLength = getFileLength(absFile.toFile(), isZipFile); if (start == null) { start = Long.valueOf(fileLength - length).intValue(); } String logString = isTxtFile(fileName) ? escapeHtml(pageFile(absFile.toString(), isZipFile, fileLength, start, length)) : escapeHtml("This is a binary file and cannot display! You may download the full file."); List<DomContent> bodyContents = new ArrayList<>(); if (StringUtils.isNotEmpty(grep)) { String matchedString = String.join("\n", Arrays.stream(logString.split("\n")) .filter(str -> str.contains(grep)).collect(toList())); bodyContents.add(pre(matchedString).withId("logContent")); } else { DomContent pagerData = null; if (isTxtFile(fileName)) { pagerData = pagerLinks(fileName, start, length, Long.valueOf(fileLength).intValue(), "log"); } bodyContents.add(searchFileForm(fileName, "no")); // list all files for this topology bodyContents.add(logFileSelectionForm(reorderedFilesStr, fileName, "log")); if (pagerData != null) { bodyContents.add(pagerData); } bodyContents.add(downloadLink(fileName)); bodyContents.add(pre(logString).withClass("logContent")); if (pagerData != null) { bodyContents.add(pagerData); } } String content = logTemplate(bodyContents, fileName, user).render(); return LogviewerResponseBuilder.buildSuccessHtmlResponse(content); } else { return LogviewerResponseBuilder.buildResponsePageNotFound(); } } else { if (resourceAuthorizer.getLogUserGroupWhitelist(fileName) == null) { return LogviewerResponseBuilder.buildResponsePageNotFound(); } else { return LogviewerResponseBuilder.buildResponseUnauthorizedUser(user); } } }
@Test public void testLogPageOutsideLogRoot() throws Exception { try (TmpPath rootPath = new TmpPath()) { LogviewerLogPageHandler handler = createHandlerForTraversalTests(rootPath.getFile().toPath()); final Response returned = handler.logPage("../nimbus.log", 0, 100, null, "user"); Utils.forceDelete(rootPath.toString()); //Should not show files outside worker log root. assertThat(returned.getStatus(), is(Response.Status.NOT_FOUND.getStatusCode())); } }
@Override public Optional<Rule> findByKey(RuleKey key) { verifyKeyArgument(key); ensureInitialized(); return Optional.ofNullable(rulesByKey.get(key)); }
@Test public void findByKey_returns_absent_if_rule_does_not_exist_in_DB() { Optional<Rule> rule = underTest.findByKey(AC_RULE_KEY); assertThat(rule).isEmpty(); }
@Override public void unsubscribe(String serviceName, String groupName, String clusters) throws NacosException { }
@Test void testUnsubscribe() throws Exception { String groupName = "group1"; String serviceName = "serviceName"; String clusters = "clusters"; //when clientProxy.unsubscribe(serviceName, groupName, clusters); // do nothing }
public List<String> generate(String tableName, String columnName, boolean isAutoGenerated) throws SQLException { return generate(tableName, singleton(columnName), isAutoGenerated); }
@Test public void generate_for_h2() throws SQLException { when(dbConstraintFinder.findConstraintName(TABLE_NAME)).thenReturn(Optional.of(CONSTRAINT)); when(db.getDialect()).thenReturn(H2); List<String> sqls = underTest.generate(TABLE_NAME, PK_COLUMN, true); assertThat(sqls).containsExactly("ALTER TABLE issues DROP CONSTRAINT pk_id"); }
public static String buildRuleParentPath(final String pluginName) { return String.join(PATH_SEPARATOR, RULE_PARENT, pluginName); }
@Test public void testBuildRuleParentPath() { String pluginName = RandomStringUtils.randomAlphanumeric(10); String ruleParentPath = DefaultPathConstants.buildRuleParentPath(pluginName); assertThat(ruleParentPath, notNullValue()); assertThat(String.join(SEPARATOR, RULE_PARENT, pluginName), equalTo(ruleParentPath)); }
@Override public void lockInode(Inode inode, LockMode mode) { mode = nextLockMode(mode); if (!mLocks.isEmpty()) { Preconditions.checkState(!endsInInode(), "Cannot lock inode %s for lock list %s because the lock list already ends in an inode", inode.getId(), this); checkInodeNameAndEdgeNameMatch(inode); } lockAndAddInode(inode, mode); }
@Test public void lockInodeAfterInode() { mLockList.lockInode(mDirA, LockMode.READ); mThrown.expect(IllegalStateException.class); mLockList.lockInode(mDirB, LockMode.READ); }
protected short sampleStoreTopicReplicationFactor(Map<String, ?> config, AdminClient adminClient) { if (_sampleStoreTopicReplicationFactor != null) { return _sampleStoreTopicReplicationFactor; } int maxRetryCount = Integer.parseInt(config.get(MonitorConfig.FETCH_METRIC_SAMPLES_MAX_RETRY_COUNT_CONFIG).toString()); AtomicInteger numberOfBrokersInCluster = new AtomicInteger(0); AtomicReference<String> errorMsg = new AtomicReference<>(""); boolean success = CruiseControlMetricsUtils.retry(() -> { try { numberOfBrokersInCluster.set(adminClient.describeCluster().nodes().get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS).size()); } catch (InterruptedException | ExecutionException | TimeoutException e) { errorMsg.set("Auto creation of sample store topics failed due to failure to describe cluster. " + e); return true; } if (numberOfBrokersInCluster.get() <= 1) { errorMsg.set(String.format("Kafka cluster has less than 2 brokers (brokers in cluster=%d)", numberOfBrokersInCluster.get())); return true; } numberOfBrokersInCluster.set(Math.min(DEFAULT_SAMPLE_STORE_TOPIC_REPLICATION_FACTOR, numberOfBrokersInCluster.get())); return false; }, maxRetryCount); if (success) { return (short) numberOfBrokersInCluster.get(); } else { throw new IllegalStateException(errorMsg.get()); } }
@Test public void testSampleStoreTopicReplicationFactorWhenValueAlreadyExists() { short expected = 1; Map<String, ?> config = Collections.emptyMap(); AdminClient adminClient = EasyMock.mock(AdminClient.class); AbstractKafkaSampleStore kafkaSampleStore = EasyMock.partialMockBuilder(AbstractKafkaSampleStore.class).createMock(); Whitebox.setInternalState(kafkaSampleStore, "_sampleStoreTopicReplicationFactor", expected); EasyMock.replay(adminClient, kafkaSampleStore); short actual = kafkaSampleStore.sampleStoreTopicReplicationFactor(config, adminClient); assertEquals(expected, actual); EasyMock.verify(adminClient, kafkaSampleStore); }
public static final void saveAttributesMap( DataNode dataNode, AttributesInterface attributesInterface ) throws KettleException { saveAttributesMap( dataNode, attributesInterface, NODE_ATTRIBUTE_GROUPS ); }
@Test public void testSaveAttributesMap_DefaultTag_NullParameter() throws Exception { try ( MockedStatic<AttributesMapUtil> mockedAttributesMapUtil = mockStatic( AttributesMapUtil.class ) ) { mockedAttributesMapUtil.when( () -> AttributesMapUtil.saveAttributesMap( any( DataNode.class ), any( AttributesInterface.class ) ) ).thenCallRealMethod(); mockedAttributesMapUtil.when( () -> AttributesMapUtil.saveAttributesMap( any( DataNode.class ), any( AttributesInterface.class ), anyString() ) ).thenCallRealMethod(); DataNode dataNode = new DataNode( CNST_DUMMY ); AttributesMapUtil.saveAttributesMap( dataNode, null ); assertNull( dataNode.getNode( AttributesMapUtil.NODE_ATTRIBUTE_GROUPS ) ); } }
@Override public double readDouble(@Nonnull String fieldName) throws IOException { FieldDefinition fd = cd.getField(fieldName); if (fd == null) { return 0d; } switch (fd.getType()) { case DOUBLE: return super.readDouble(fieldName); case LONG: return super.readLong(fieldName); case FLOAT: return super.readFloat(fieldName); case INT: return super.readInt(fieldName); case BYTE: return super.readByte(fieldName); case CHAR: return super.readChar(fieldName); case SHORT: return super.readShort(fieldName); default: throw createIncompatibleClassChangeError(fd, DOUBLE); } }
@Test public void testReadDouble() throws Exception { double aByte = reader.readDouble("byte"); double aShort = reader.readDouble("short"); double aChar = reader.readDouble("char"); double aInt = reader.readDouble("int"); double aFloat = reader.readDouble("float"); double aLong = reader.readDouble("long"); double aDouble = reader.readDouble("double"); assertEquals(1, aByte, 0); assertEquals(3, aShort, 0); assertEquals(2, aChar, 0); assertEquals(4, aInt, 0); assertEquals(5, aLong, 0); assertEquals(1f, aFloat, 0); assertEquals(2d, aDouble, 0); assertEquals(0, reader.readDouble("NO SUCH FIELD"), 0); }
private RemotingCommand notifyMinBrokerIdChange(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { NotifyMinBrokerIdChangeRequestHeader requestHeader = (NotifyMinBrokerIdChangeRequestHeader) request.decodeCommandCustomHeader(NotifyMinBrokerIdChangeRequestHeader.class); RemotingCommand response = RemotingCommand.createResponseCommand(null); LOGGER.warn("min broker id changed, prev {}, new {}", this.brokerController.getMinBrokerIdInGroup(), requestHeader.getMinBrokerId()); this.brokerController.updateMinBroker(requestHeader.getMinBrokerId(), requestHeader.getMinBrokerAddr(), requestHeader.getOfflineBrokerAddr(), requestHeader.getHaBrokerAddr()); response.setCode(ResponseCode.SUCCESS); response.setRemark(null); return response; }
@Test public void testNotifyMinBrokerIdChange() throws RemotingCommandException { NotifyMinBrokerIdChangeRequestHeader requestHeader = new NotifyMinBrokerIdChangeRequestHeader(); requestHeader.setMinBrokerId(1L); requestHeader.setMinBrokerAddr("127.0.0.1:10912"); requestHeader.setOfflineBrokerAddr("127.0.0.1:10911"); requestHeader.setHaBrokerAddr(""); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.NOTIFY_MIN_BROKER_ID_CHANGE, requestHeader); request.makeCustomHeaderToNet(); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
public Set<LongPair> items() { Set<LongPair> items = new HashSet<>(); forEach((item1, item2) -> items.add(new LongPair(item1, item2))); return items; }
@Test public void testItems() { ConcurrentLongPairSet set = ConcurrentLongPairSet.newBuilder().build(); int n = 100; int limit = 10; for (int i = 0; i < n; i++) { set.add(i, i); } Set<LongPair> items = set.items(); Set<LongPair> limitItems = set.items(limit); assertEquals(items.size(), n); assertEquals(limitItems.size(), limit); int totalRemovedItems = set.removeIf((first, second) -> limitItems.contains((new LongPair(first, second)))); assertEquals(limitItems.size(), totalRemovedItems); assertEquals(set.size(), n - limit); }
public boolean match(String pattern, String path) { return doMatch(pattern, path, true, null); }
@Test public void matchesTest() { AntPathMatcher antPathMatcher = new AntPathMatcher(); boolean matched = antPathMatcher.match("/api/org/organization/{orgId}", "/api/org/organization/999"); assertTrue(matched); }
@Override public ContainerItem getContainer(final Path file) { Deque<Path> pathDeque = decompose(file); Path lastContainer = null; Path lastCollection = null; boolean exit = false, nextExit = false, exitEarly = false; while(!exit && pathDeque.size() > 0) { final Path current = pathDeque.pop(); exit = nextExit; nextExit = exitEarly; switch(current.getName()) { case DRIVES_CONTAINER: nextExit = true; case SITES_CONTAINER: lastCollection = current; break; case GROUPS_CONTAINER: lastCollection = current; exitEarly = true; break; default: lastContainer = current; } } return new ContainerItem(lastContainer, lastCollection, exit); }
@Test public void testContainerEquality() { final Path source = new Path("/Default/Drives/Docs", EnumSet.of(Path.Type.directory)) .withAttributes(new PathAttributes() .withFileId("File Id")); final Path target = new Path("/Default/Drives/Docs", EnumSet.of(Path.Type.directory)); final GraphSession.ContainerItem sourceItem = session.getContainer(source); final GraphSession.ContainerItem targetItem = session.getContainer(target); assertEquals(sourceItem, targetItem); }
@Override public InputFile newInputFile(String location) { Preconditions.checkState(!closed, "Cannot call newInputFile after calling close()"); byte[] contents = IN_MEMORY_FILES.get(location); if (null == contents) { throw new NotFoundException("No in-memory file found for location: %s", location); } return new InMemoryInputFile(location, contents); }
@Test public void testNewInputFileNotFound() { InMemoryFileIO fileIO = new InMemoryFileIO(); assertThatExceptionOfType(NotFoundException.class) .isThrownBy(() -> fileIO.newInputFile("s3://nonexistent/file")); }
@Override public void revert(final Path file) throws BackgroundException { try { session.getClient().copy(URIEncoder.encode(file.getAbsolute()), new DAVPathEncoder().encode(file)); } catch(SardineException e) { throw new DAVExceptionMappingService().map("Cannot revert file", e, file); } catch(IOException e) { throw new HttpExceptionMappingService().map(e, file); } }
@Test public void testRevert() throws Exception { final Path directory = new DAVDirectoryFeature(session).mkdir(new Path( new OwncloudHomeFeature(session.getHost()).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final TransferStatus status = new TransferStatus(); final NextcloudWriteFeature writer = new NextcloudWriteFeature(session); final byte[] initialContent = RandomUtils.nextBytes(32769); { new StreamCopier(status, status).transfer(new ByteArrayInputStream(initialContent), writer.write(test, status.withLength(initialContent.length), new DisabledConnectionCallback())); } final OwncloudVersioningFeature feature = new OwncloudVersioningFeature(session); assertEquals(0, feature.list(test.withAttributes(new OwncloudAttributesFinderFeature(session).find(test)), new DisabledListProgressListener()).size()); final PathAttributes initialAttributes = new OwncloudAttributesFinderFeature(session).find(test); final byte[] contentUpdate = RandomUtils.nextBytes(16258); { new StreamCopier(status, status).transfer(new ByteArrayInputStream(contentUpdate), writer.write(test, status.withLength(contentUpdate.length).exists(true), new DisabledConnectionCallback())); final AttributedList<Path> versions = feature.list(test.withAttributes(new OwncloudAttributesFinderFeature(session).find(test)), new DisabledListProgressListener()); assertEquals(1, versions.size()); } { final byte[] contentLatest = RandomUtils.nextBytes(13247); new StreamCopier(status, status).transfer(new ByteArrayInputStream(contentLatest), writer.write(test, status.withLength(contentLatest.length).exists(true), new DisabledConnectionCallback())); } final AttributedList<Path> versions = new AttributedList<>(); do { versions.addAll(feature.list(test.withAttributes(new OwncloudAttributesFinderFeature(session).find(test)), new DisabledListProgressListener())); } while(versions.size() != 2); assertEquals(2, versions.size()); final Path initialVersion = versions.get(1); { assertEquals(initialAttributes.getSize(), initialVersion.attributes().getSize()); assertEquals(initialAttributes.getModificationDate(), initialVersion.attributes().getModificationDate()); assertNotNull(initialVersion.attributes().getVersionId()); assertNotEquals(initialAttributes, new OwncloudAttributesFinderFeature(session).find(test)); assertEquals(initialVersion.attributes(), new OwncloudAttributesFinderFeature(session).find(initialVersion)); { final InputStream reader = new OwncloudReadFeature(session).read(initialVersion, new TransferStatus(), new DisabledLoginCallback()); assertArrayEquals(initialContent, IOUtils.readFully(reader, initialContent.length)); reader.close(); } final Path updatedVersion = versions.get(0); assertEquals(contentUpdate.length, new OwncloudAttributesFinderFeature(session).find(updatedVersion).getSize()); { final InputStream reader = new OwncloudReadFeature(session).read(updatedVersion, new TransferStatus(), new DisabledLoginCallback()); assertArrayEquals(contentUpdate, IOUtils.readFully(reader, contentUpdate.length)); reader.close(); } } feature.revert(initialVersion); assertEquals(initialVersion.attributes().getSize(), new OwncloudAttributesFinderFeature(session).find(test).getSize()); new DAVDeleteFeature(session).delete(Arrays.asList(test, directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Nonnull @Override public Collection<DataConnectionResource> listResources() { HazelcastInstance instance = getClient(); try { return instance.getDistributedObjects() .stream() .filter(IMap.class::isInstance) .map(o -> new DataConnectionResource(OBJECT_TYPE_IMAP_JOURNAL, o.getName())) .collect(Collectors.toList()); } finally { instance.shutdown(); } }
@Test public void list_resources_should_not_return_system_maps() { DataConnectionConfig dataConnectionConfig = sharedDataConnectionConfig(clusterName); hazelcastDataConnection = new HazelcastDataConnection(dataConnectionConfig); Collection<DataConnectionResource> resources = hazelcastDataConnection.listResources(); assertThat(resources).isEmpty(); }
public static short translateBucketAcl(GSAccessControlList acl, String userId) { short mode = (short) 0; for (GrantAndPermission gp : acl.getGrantAndPermissions()) { Permission perm = gp.getPermission(); GranteeInterface grantee = gp.getGrantee(); if (perm.equals(Permission.PERMISSION_READ)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is readable by the user, add r and x to the owner mode. mode |= (short) 0500; } } else if (perm.equals(Permission.PERMISSION_WRITE)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is writable by the user, +w to the owner mode. mode |= (short) 0200; } } else if (perm.equals(Permission.PERMISSION_FULL_CONTROL)) { if (isUserIdInGrantee(grantee, userId)) { // If the user has full control to the bucket, +rwx to the owner mode. mode |= (short) 0700; } } } return mode; }
@Test public void translateUserFullPermission() { mAcl.grantPermission(mUserGrantee, Permission.PERMISSION_FULL_CONTROL); assertEquals((short) 0700, GCSUtils.translateBucketAcl(mAcl, ID)); assertEquals((short) 0000, GCSUtils.translateBucketAcl(mAcl, OTHER_ID)); }
public Tenant getTenant(TenantName tenantName) { return tenants.get(tenantName); }
@Test public void testListenersAdded() throws IOException, SAXException { TenantApplications applicationRepo = tenantRepository.getTenant(tenant1).getApplicationRepo(); ApplicationId id = ApplicationId.from(tenant1, ApplicationName.defaultName(), InstanceName.defaultName()); applicationRepo.createApplication(id); try (var transaction = new CuratorTransaction(curator)) { applicationRepo.createWriteActiveTransaction(transaction, id, 4).commit(); } applicationRepo.activateApplication(ApplicationVersions.from(new Application(new VespaModel(MockApplicationPackage.createEmpty()), new ServerCache(), 4L, new Version(1, 2, 3), MetricUpdater.createTestUpdater(), id)), 4); assertEquals(1, listener.activated.get()); }
public void generateTypeStubs() throws IOException { generateMetaAttributeEnum(); for (final List<Token> tokens : ir.types()) { switch (tokens.get(0).signal()) { case BEGIN_ENUM: generateEnum(tokens); break; case BEGIN_SET: generateBitSet(tokens); break; case BEGIN_COMPOSITE: generateComposite(tokens); break; default: break; } } }
@Test void shouldGenerateChoiceSetStub() throws Exception { final int bufferOffset = 8; final byte bitset = (byte)0b0000_0100; final String className = "OptionalExtrasDecoder"; final String fqClassName = ir.applicableNamespace() + "." + className; when(mockBuffer.getByte(bufferOffset)).thenReturn(bitset); generateTypeStubs(); final Class<?> clazz = compile(fqClassName); assertNotNull(clazz); final Object flyweight = clazz.getConstructor().newInstance(); final Method method = flyweight.getClass().getDeclaredMethod("wrap", READ_ONLY_BUFFER_CLASS, int.class); method.invoke(flyweight, mockBuffer, bufferOffset); final Object result = get(flyweight, "cruiseControl"); assertThat(result, is(Boolean.TRUE)); }
@Override public boolean accept(final Path file) { if(list.find(new SimplePathPredicate(file)) != null) { return true; } for(Path f : list) { if(f.isChild(file)) { return true; } } if(log.isDebugEnabled()) { log.debug(String.format("Filter %s", file)); } return false; }
@Test public void testAcceptFile() { final RecursiveSearchFilter f = new RecursiveSearchFilter(new AttributedList<>(Arrays.asList(new Path("/f", EnumSet.of(Path.Type.file))))); assertTrue(f.accept(new Path("/f", EnumSet.of(Path.Type.file)))); assertFalse(f.accept(new Path("/a", EnumSet.of(Path.Type.file)))); }
@Override public boolean matches(Job localJob, Job storageProviderJob) { if (storageProviderJob.getVersion() > localJob.getVersion() + 1) { Optional<ProcessingState> localProcessingState = localJob.getLastJobStateOfType(ProcessingState.class); Optional<ProcessingState> storageProviderProcessingState = storageProviderJob.getLastJobStateOfType(ProcessingState.class); if (localProcessingState.isPresent() && storageProviderProcessingState.isPresent()) { return !localProcessingState.get().getServerId().equals(storageProviderProcessingState.get().getServerId()); } } return false; }
@Test void ifJobIsHavingConcurrentStateChangeOnSameServerItWillNotMatch() { final Job jobInProgress = aJobInProgress().build(); final Job succeededJob = aCopyOf(jobInProgress).withState(new SucceededState(ofMillis(10), ofMillis(6))).build(); boolean matchesAllowedStateChange = allowedStateChange.matches(jobInProgress, succeededJob); assertThat(matchesAllowedStateChange).isFalse(); }
@Override public void define(IndexDefinitionContext context) { NewRegularIndex index = context.create( DESCRIPTOR, newBuilder(config) .setDefaultNbOfShards(5) .build()) // storing source is required because some search queries on issue index use terms lookup query onto the view index // and this requires source to be stored (https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-terms-query.html#query-dsl-terms-lookup) .setEnableSource(true); // type "view" TypeMapping mapping = index.createTypeMapping(TYPE_VIEW); mapping.keywordFieldBuilder(FIELD_UUID).disableNorms().build(); mapping.keywordFieldBuilder(FIELD_PROJECTS).disableNorms().build(); }
@Test public void define() { ViewIndexDefinition def = new ViewIndexDefinition(new MapSettings().asConfig()); def.define(underTest); assertThat(underTest.getIndices()).hasSize(1); NewIndex index = underTest.getIndices().get("views"); assertThat(index.getMainType()) .isEqualTo(IndexType.main(Index.simple("views"), "view")); assertThat(index.getRelationsStream()).isEmpty(); assertThat(index.getSetting("index.number_of_shards")).isEqualTo("5"); assertThat(index.getSetting("index.number_of_replicas")).isEqualTo("0"); }
public void encryptColumns( String inputFile, String outputFile, List<String> paths, FileEncryptionProperties fileEncryptionProperties) throws IOException { Path inPath = new Path(inputFile); Path outPath = new Path(outputFile); RewriteOptions options = new RewriteOptions.Builder(conf, inPath, outPath) .encrypt(paths) .encryptionProperties(fileEncryptionProperties) .build(); ParquetRewriter rewriter = new ParquetRewriter(options); rewriter.processBlocks(); rewriter.close(); }
@Test public void testNestedColumn() throws IOException { String[] encryptColumns = {"Links.Forward"}; testSetup("GZIP"); columnEncryptor.encryptColumns( inputFile.getFileName(), outputFile, Arrays.asList(encryptColumns), EncDecProperties.getFileEncryptionProperties(encryptColumns, ParquetCipher.AES_GCM_CTR_V1, false)); verifyResultDecryptionWithValidKey(); }
public final void isNotNaN() { if (actual == null) { failWithActual(simpleFact("expected a double other than NaN")); } else { isNotEqualTo(NaN); } }
@Test public void isNotNaNIsNull() { expectFailureWhenTestingThat(null).isNotNaN(); assertFailureKeys("expected a double other than NaN", "but was"); }
public static Object getValueFromFieldOrProperty(Object object, String paramName) { Class<?> aClass = object.getClass(); final Optional<Field> optionalField = findField(aClass, paramName); if (optionalField.isPresent()) { return getValueFromField(optionalField.get(), object); } final Optional<Method> optionalGetMethod = findMethod(aClass, "get" + capitalize(paramName)); if (optionalGetMethod.isPresent()) { return getValueFromGetMethod(optionalGetMethod.get(), object); } throw new IllegalArgumentException(String.format("Could not get value '%s' from object with class %s", paramName, object.getClass())); }
@Test void testGetValueFromFieldOrProperty() { final TestObject test = new TestObject("test"); assertThat(getValueFromFieldOrProperty(test, "field")).isEqualTo("test"); assertThat(getValueFromFieldOrProperty(test, "anotherField")).isEqualTo("test"); assertThatThrownBy(() -> getValueFromFieldOrProperty(test, "doesNotExist")).isInstanceOf(IllegalArgumentException.class); }
public static String removeDigits( String input ) { if ( Utils.isEmpty( input ) ) { return null; } StringBuilder digitsOnly = new StringBuilder(); char c; for ( int i = 0; i < input.length(); i++ ) { c = input.charAt( i ); if ( !Character.isDigit( c ) ) { digitsOnly.append( c ); } } return digitsOnly.toString(); }
@Test public void testRemoveDigits() { assertNull( Const.removeDigits( null ) ); assertEquals( "foobar", Const.removeDigits( "123foo456bar789" ) ); }
public static DistCpOptions parse(String[] args) throws IllegalArgumentException { CommandLineParser parser = new CustomParser(); CommandLine command; try { command = parser.parse(cliOptions, args, true); } catch (ParseException e) { throw new IllegalArgumentException("Unable to parse arguments. " + Arrays.toString(args), e); } DistCpOptions.Builder builder = parseSourceAndTargetPaths(command); builder .withAtomicCommit( command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch())) .withSyncFolder( command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch())) .withDeleteMissing( command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch())) .withIgnoreFailures( command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch())) .withOverwrite( command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch())) .withAppend( command.hasOption(DistCpOptionSwitch.APPEND.getSwitch())) .withSkipCRC( command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch())) .withBlocking( !command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch())) .withVerboseLog( command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch())) .withDirectWrite( command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch())) .withUseIterator( command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch())) .withUpdateRoot( command.hasOption(DistCpOptionSwitch.UPDATE_ROOT.getSwitch())); if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseDiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.RDIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.RDIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseRdiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) { builder.withFiltersFile( getVal(command, DistCpOptionSwitch.FILTERS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) { builder.withLogPath( new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) { final String workPath = getVal(command, DistCpOptionSwitch.WORK_PATH.getSwitch()); if (workPath != null && !workPath.isEmpty()) { builder.withAtomicWorkPath(new Path(workPath)); } } if (command.hasOption(DistCpOptionSwitch.TRACK_MISSING.getSwitch())) { builder.withTrackMissing( new Path(getVal( command, DistCpOptionSwitch.TRACK_MISSING.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) { try { final Float mapBandwidth = Float.parseFloat( getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch())); builder.withMapBandwidth(mapBandwidth); } catch (NumberFormatException e) { throw new IllegalArgumentException("Bandwidth specified is invalid: " + getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e); } } if (command.hasOption( DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) { try { final Integer numThreads = Integer.parseInt(getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())); builder.withNumListstatusThreads(numThreads); } catch (NumberFormatException e) { throw new IllegalArgumentException( "Number of liststatus threads is invalid: " + getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) { try { final Integer maps = Integer.parseInt( getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch())); builder.maxMaps(maps); } catch (NumberFormatException e) { throw new IllegalArgumentException("Number of maps is invalid: " + getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) { builder.withCopyStrategy( getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) { builder.preserve( getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch())) { final String chunkSizeStr = getVal(command, DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch().trim()); try { int csize = Integer.parseInt(chunkSizeStr); csize = csize > 0 ? csize : 0; LOG.info("Set distcp blocksPerChunk to " + csize); builder.withBlocksPerChunk(csize); } catch (NumberFormatException e) { throw new IllegalArgumentException("blocksPerChunk is invalid: " + chunkSizeStr, e); } } if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) { final String copyBufferSizeStr = getVal(command, DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim()); try { int copyBufferSize = Integer.parseInt(copyBufferSizeStr); builder.withCopyBufferSize(copyBufferSize); } catch (NumberFormatException e) { throw new IllegalArgumentException("copyBufferSize is invalid: " + copyBufferSizeStr, e); } } return builder.build(); }
@Test public void testOptionsAppendToConfDoesntOverwriteBandwidth() { Configuration conf = new Configuration(); Assert.assertEquals( conf.getRaw(DistCpOptionSwitch.BANDWIDTH.getConfigLabel()), null); DistCpOptions options = OptionsParser.parse(new String[] { "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); options.appendToConf(conf); assertThat(conf.getFloat(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(), -1)) .isCloseTo(-1.0f,within(DELTA)); conf = new Configuration(); Assert.assertEquals( conf.getRaw(DistCpOptionSwitch.BANDWIDTH.getConfigLabel()), null); options = OptionsParser.parse(new String[] { "-update", "-delete", "-pu", "-bandwidth", "77", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertEquals( conf.getFloat(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(), -1), 77.0, DELTA); conf = new Configuration(); conf.set(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(), "88"); Assert.assertEquals( conf.getRaw(DistCpOptionSwitch.BANDWIDTH.getConfigLabel()), "88"); options = OptionsParser.parse(new String[] { "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertEquals( conf.getFloat(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(), -1), 88.0, DELTA); conf = new Configuration(); conf.set(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(), "88.0"); Assert.assertEquals( conf.getRaw(DistCpOptionSwitch.BANDWIDTH.getConfigLabel()), "88.0"); options = OptionsParser.parse(new String[] { "-bandwidth", "99", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertEquals( conf.getFloat(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(), -1), 99.0, DELTA); }
public static <T> IntermediateCompatibilityResult<T> constructIntermediateCompatibilityResult( TypeSerializerSnapshot<?>[] newNestedSerializerSnapshots, TypeSerializerSnapshot<?>[] oldNestedSerializerSnapshots) { Preconditions.checkArgument( newNestedSerializerSnapshots.length == oldNestedSerializerSnapshots.length, "Different number of new serializer snapshots and existing serializer snapshots."); TypeSerializer<?>[] nestedSerializers = new TypeSerializer[newNestedSerializerSnapshots.length]; // check nested serializers for compatibility boolean nestedSerializerRequiresMigration = false; boolean hasReconfiguredNestedSerializers = false; for (int i = 0; i < oldNestedSerializerSnapshots.length; i++) { TypeSerializerSchemaCompatibility<?> compatibility = resolveCompatibility( newNestedSerializerSnapshots[i], oldNestedSerializerSnapshots[i]); // if any one of the new nested serializers is incompatible, we can just short circuit // the result if (compatibility.isIncompatible()) { return IntermediateCompatibilityResult.definedIncompatibleResult(); } if (compatibility.isCompatibleAfterMigration()) { nestedSerializerRequiresMigration = true; } else if (compatibility.isCompatibleWithReconfiguredSerializer()) { hasReconfiguredNestedSerializers = true; nestedSerializers[i] = compatibility.getReconfiguredSerializer(); } else if (compatibility.isCompatibleAsIs()) { nestedSerializers[i] = newNestedSerializerSnapshots[i].restoreSerializer(); } else { throw new IllegalStateException("Undefined compatibility type."); } } if (nestedSerializerRequiresMigration) { return IntermediateCompatibilityResult.definedCompatibleAfterMigrationResult(); } if (hasReconfiguredNestedSerializers) { return IntermediateCompatibilityResult.undefinedReconfigureResult(nestedSerializers); } // ends up here if everything is compatible as is return IntermediateCompatibilityResult.definedCompatibleAsIsResult(nestedSerializers); }
@Test void testCompatibleAfterMigrationIntermediateCompatibilityResult() { final TypeSerializerSnapshot<?>[] previousSerializerSnapshots = new TypeSerializerSnapshot<?>[] { new SchemaCompatibilityTestingSerializer("a").snapshotConfiguration(), new SchemaCompatibilityTestingSerializer("b").snapshotConfiguration(), new SchemaCompatibilityTestingSerializer("c").snapshotConfiguration() }; final TypeSerializerSnapshot<?>[] newSerializerSnapshots = new TypeSerializerSnapshot<?>[] { SchemaCompatibilityTestingSnapshot .thatIsCompatibleWithLastSerializerAfterReconfiguration("a"), SchemaCompatibilityTestingSnapshot .thatIsCompatibleWithLastSerializerAfterMigration("b"), SchemaCompatibilityTestingSnapshot.thatIsCompatibleWithLastSerializer("c"), }; IntermediateCompatibilityResult<?> intermediateCompatibilityResult = CompositeTypeSerializerUtil.constructIntermediateCompatibilityResult( newSerializerSnapshots, previousSerializerSnapshots); assertThat(intermediateCompatibilityResult.isCompatibleAfterMigration()).isTrue(); assertThat(intermediateCompatibilityResult.getFinalResult().isCompatibleAfterMigration()) .isTrue(); }
@Override public boolean isLeader() { return isLeader; }
@Test void pausesPollingAfterDowngradeFromLeader() { final AtomicInteger lockInvocations = new AtomicInteger(); final AtomicReference<Lock> lock = new AtomicReference<>(); when(lockService.lock(any(), isNull())).then(i -> { lockInvocations.incrementAndGet(); return Optional.ofNullable(lock.get()); }); leaderElectionService.startAsync().awaitRunning(); lock.set(mock(Lock.class)); await().until(() -> leaderElectionService.isLeader()); // polling should continue int lockCount = lockInvocations.get(); Uninterruptibles.sleepUninterruptibly(configuration.getLeaderElectionLockPollingInterval().multipliedBy(2)); assertThat(lockInvocations.get()).isGreaterThan(lockCount); lock.set(null); await().until(() -> !leaderElectionService.isLeader()); // polling should have been paused lockCount = lockInvocations.get(); Uninterruptibles.sleepUninterruptibly(configuration.getLeaderElectionLockPollingInterval().multipliedBy(2)); assertThat(lockInvocations.get()).isEqualTo(lockCount); }
@Override public Long time(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<Long> f = executorService.readAsync(entry, LongCodec.INSTANCE, RedisCommands.TIME_LONG); return syncFuture(f); }
@Test public void testTime() { testInCluster(connection -> { RedisClusterNode master = getFirstMaster(connection); Long time = connection.time(master); assertThat(time).isGreaterThan(1000); }); }
public ParsedQuery parse(final String query) throws ParseException { final TokenCollectingQueryParser parser = new TokenCollectingQueryParser(ParsedTerm.DEFAULT_FIELD, ANALYZER); parser.setSplitOnWhitespace(true); parser.setAllowLeadingWildcard(allowLeadingWildcard); final Query parsed = parser.parse(query); final ParsedQuery.Builder builder = ParsedQuery.builder().query(query); builder.tokensBuilder().addAll(parser.getTokens()); final TermCollectingQueryVisitor visitor = new TermCollectingQueryVisitor(ANALYZER, parser.getTokenLookup()); parsed.visit(visitor); builder.termsBuilder().addAll(visitor.getParsedTerms()); return builder.build(); }
@Test void testValueTokenSimple() throws ParseException { final ParsedQuery query = parser.parse("foo:bar AND lorem:ipsum"); assertThat(query.terms().size()).isEqualTo(2); assertThat(query.terms()) .hasSize(2) .anySatisfy(term -> { assertThat(term.field()).isEqualTo("foo"); assertThat(term.keyToken()).map(ImmutableToken::image).hasValue("foo"); assertThat(term.valueToken()).map(ImmutableToken::image).hasValue("bar"); }) .anySatisfy(term -> { assertThat(term.field()).isEqualTo("lorem"); assertThat(term.keyToken()).map(ImmutableToken::image).hasValue("lorem"); assertThat(term.valueToken()).map(ImmutableToken::image).hasValue("ipsum"); }); }
@NonNull FrameHeader readFrameHeader() throws IOException { String id = readPlainBytesToString(FRAME_ID_LENGTH); int size = readInt(); if (tagHeader != null && tagHeader.getVersion() >= 0x0400) { size = unsynchsafe(size); } short flags = readShort(); return new FrameHeader(id, size, flags); }
@Test public void testReadFrameHeader() throws IOException { byte[] data = generateFrameHeader("CHAP", 42); CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(data)); FrameHeader header = new ID3Reader(inputStream).readFrameHeader(); assertEquals("CHAP", header.getId()); assertEquals(42, header.getSize()); }
public boolean isHidden(final String topicName) { return hiddenTopicsPattern.matcher(topicName).matches(); }
@Test public void shouldReturnFalseOnNonHiddenTopics() { // Given final List<String> topicNames = ImmutableList.of( KSQL_PROCESSING_LOG_TOPIC, "topic_prefix_", "_suffix_topic" ); // Given topicNames.forEach(topic -> { // When final boolean isHidden = internalTopics.isHidden(topic); // Then assertThat("Should return false on non-hidden topic: " + topic, isHidden, is(false)); }); }
@Override public Metrics toHour() { MaxLabeledFunction metrics = (MaxLabeledFunction) createNew(); metrics.setEntityId(getEntityId()); metrics.setTimeBucket(toTimeBucketInHour()); metrics.setServiceId(getServiceId()); metrics.getValue().copyFrom(getValue()); return metrics; }
@Test public void testToHour() { function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1); function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_2); function.calculate(); final MaxLabeledFunction hourFunction = (MaxLabeledFunction) function.toHour(); hourFunction.calculate(); assertThat(hourFunction.getValue()).isEqualTo(HTTP_CODE_COUNT_3); }
public static MacAddress valueOf(final String address) { if (!isValid(address)) { throw new IllegalArgumentException( "Specified MAC Address must contain 12 hex digits" + " separated pairwise by :'s."); } final String[] elements = address.split(":"); final byte[] addressInBytes = new byte[MacAddress.MAC_ADDRESS_LENGTH]; for (int i = 0; i < MacAddress.MAC_ADDRESS_LENGTH; i++) { final String element = elements[i]; addressInBytes[i] = (byte) Integer.parseInt(element, 16); } return new MacAddress(addressInBytes); }
@Test(expected = IllegalArgumentException.class) public void testValueOfInvalidStringWithTooLongOctet() throws Exception { MacAddress.valueOf(INVALID_MAC_OCTET_TOO_LONG); }
@Override public boolean isNodeVersionCompatibleWith(Version clusterVersion) { Preconditions.checkNotNull(clusterVersion); return node.getVersion().asVersion().equals(clusterVersion); }
@Test public void test_nodeVersionNotCompatibleWith_otherMinorVersion() { MemberVersion currentVersion = getNode(hazelcastInstance).getVersion(); Version minorPlusOne = Version.of(currentVersion.getMajor(), currentVersion.getMinor() + 1); assertFalse(nodeExtension.isNodeVersionCompatibleWith(minorPlusOne)); }
@Override public long timestamp() { if (recordContext == null) { // This is only exposed via the deprecated ProcessorContext, // in which case, we're preserving the pre-existing behavior // of returning dummy values when the record context is undefined. // For timestamp, the dummy value is `0L`. return 0L; } else { return recordContext.timestamp(); } }
@Test public void shouldReturnTimestampFromRecordContext() { assertThat(context.timestamp(), equalTo(recordContext.timestamp())); }
@Override public E peek() { E e = null; for (int i=0; e == null && i < queues.size(); i++) { e = queues.get(i).peek(); } return e; }
@Test public void testPeekNullWhenEmpty() { assertNull(fcq.peek()); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void chatJoinRequest() { BaseResponse response = bot.execute(new ApproveChatJoinRequest(groupId, memberBot)); assertFalse(response.isOk()); assertEquals("Bad Request: USER_ALREADY_PARTICIPANT", response.description()); response = bot.execute(new DeclineChatJoinRequest(groupId, memberBot)); assertFalse(response.isOk()); assertEquals("Bad Request: HIDE_REQUESTER_MISSING", response.description()); }
protected CompletableFuture<Triple<MessageExt, String, Boolean>> getMessageFromRemoteAsync(String topic, long offset, int queueId, String brokerName) { try { String brokerAddr = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInSubscribe(brokerName, MixAll.MASTER_ID, false); if (null == brokerAddr) { this.brokerController.getTopicRouteInfoManager().updateTopicRouteInfoFromNameServer(topic, true, false); brokerAddr = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInSubscribe(brokerName, MixAll.MASTER_ID, false); if (null == brokerAddr) { LOG.warn("can't find broker address for topic {}, {}", topic, brokerName); return CompletableFuture.completedFuture(Triple.of(null, "brokerAddress not found", true)); // maybe offline temporarily, so need retry } } return this.brokerController.getBrokerOuterAPI().pullMessageFromSpecificBrokerAsync(brokerName, brokerAddr, this.innerConsumerGroupName, topic, queueId, offset, 1, DEFAULT_PULL_TIMEOUT_MILLIS) .thenApply(pullResult -> { if (pullResult.getLeft() != null && PullStatus.FOUND.equals(pullResult.getLeft().getPullStatus()) && CollectionUtils.isNotEmpty(pullResult.getLeft().getMsgFoundList())) { return Triple.of(pullResult.getLeft().getMsgFoundList().get(0), "", false); } return Triple.of(null, pullResult.getMiddle(), pullResult.getRight()); }); } catch (Exception e) { LOG.error("Get message from remote failed. {}, {}, {}, {}", topic, offset, queueId, brokerName, e); } return CompletableFuture.completedFuture(Triple.of(null, "Get message from remote failed", true)); // need retry }
@Test public void getMessageFromRemoteAsyncTest() { Assertions.assertThatCode(() -> escapeBridge.getMessageFromRemoteAsync(TEST_TOPIC, 1, DEFAULT_QUEUE_ID, BROKER_NAME)).doesNotThrowAnyException(); }
protected static DataSource getDataSourceFromJndi( String dsName, Context ctx ) throws NamingException { if ( Utils.isEmpty( dsName ) ) { throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", String.valueOf( dsName ) ) ); } Object foundDs = FoundDS.get( dsName ); if ( foundDs != null ) { return (DataSource) foundDs; } Object lkup = null; DataSource rtn = null; NamingException firstNe = null; // First, try what they ask for... try { lkup = ctx.lookup( dsName ); if ( lkup instanceof DataSource ) { rtn = (DataSource) lkup; FoundDS.put( dsName, rtn ); return rtn; } } catch ( NamingException ignored ) { firstNe = ignored; } try { // Needed this for Jboss lkup = ctx.lookup( "java:" + dsName ); if ( lkup instanceof DataSource ) { rtn = (DataSource) lkup; FoundDS.put( dsName, rtn ); return rtn; } } catch ( NamingException ignored ) { // ignore } try { // Tomcat lkup = ctx.lookup( "java:comp/env/jdbc/" + dsName ); if ( lkup instanceof DataSource ) { rtn = (DataSource) lkup; FoundDS.put( dsName, rtn ); return rtn; } } catch ( NamingException ignored ) { // ignore } try { // Others? lkup = ctx.lookup( "jdbc/" + dsName ); if ( lkup instanceof DataSource ) { rtn = (DataSource) lkup; FoundDS.put( dsName, rtn ); return rtn; } } catch ( NamingException ignored ) { // ignore } if ( firstNe != null ) { throw firstNe; } throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", dsName ) ); }
@Test( expected = NamingException.class ) public void testEmptyName() throws NamingException { DatabaseUtil.getDataSourceFromJndi( "", context ); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testRecordWithPastTimestampIsRejected() { long timestampBeforeMaxConfig = Duration.ofHours(24).toMillis(); // 24 hrs long timestampAfterMaxConfig = Duration.ofHours(1).toMillis(); // 1 hr long now = System.currentTimeMillis(); long fiveMinutesBeforeThreshold = now - timestampBeforeMaxConfig - Duration.ofMinutes(5).toMillis(); Compression compression = Compression.gzip().build(); MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V2, fiveMinutesBeforeThreshold, compression); RecordValidationException e = assertThrows(RecordValidationException.class, () -> new LogValidator( records, topicPartition, time, CompressionType.GZIP, compression, false, RecordBatch.MAGIC_VALUE_V2, TimestampType.CREATE_TIME, timestampBeforeMaxConfig, timestampAfterMaxConfig, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ) ); assertInstanceOf(InvalidTimestampException.class, e.invalidException()); assertFalse(e.recordErrors().isEmpty()); assertEquals(3, e.recordErrors().size()); }
public void remove(String name) { metadata.remove(name); }
@Test public void testRemove() { Metadata meta = new Metadata(); meta.remove("name-one"); assertEquals(0, meta.size()); meta.add("name-one", "value-1.1"); meta.add("name-one", "value-1.2"); meta.add("name-two", "value-2.2"); assertEquals(2, meta.size()); assertNotNull(meta.get("name-one")); assertNotNull(meta.get("name-two")); meta.remove("name-one"); assertEquals(1, meta.size()); assertNull(meta.get("name-one")); assertNotNull(meta.get("name-two")); meta.remove("name-two"); assertEquals(0, meta.size()); assertNull(meta.get("name-one")); assertNull(meta.get("name-two")); }
@Override public ExecuteContext before(ExecuteContext context) { init(); Object[] arguments = context.getArguments(); URI uri = (URI) context.getArguments()[1]; if (!PlugEffectWhiteBlackUtils.isHostEqualRealmName(uri.getHost())) { return context; } Map<String, String> hostAndPath = RequestInterceptorUtils.recoverHostAndPath(uri.getPath()); if (!PlugEffectWhiteBlackUtils.isPlugEffect(hostAndPath.get(HttpConstants.HTTP_URI_SERVICE))) { return context; } context.skip(new JettyClientWrapper((HttpClient) context.getObject(), (HttpConversation) arguments[0], uri)); return context; }
@Test public void test() { // The domain name does not match arguments[1] = URI.create("http://www.domain1.com/foo/hello"); interceptor.before(context); Assert.assertFalse(context.isSkip()); // The name of the service does not match arguments[1] = URI.create("http://www.domain.com/bar/hello"); interceptor.before(context); Assert.assertFalse(context.isSkip()); arguments[1] = URI.create("http://www.domain.com/foo/hello"); interceptor.before(context); Assert.assertTrue(context.isSkip()); Assert.assertTrue(context.getResult() instanceof JettyClientWrapper); }
public void setExpression(String expression) { this.expression = expression; }
@Test void testSetExpression() { MockCmdbSelector cmdbSelector = new MockCmdbSelector(); assertNull(cmdbSelector.getExpression()); cmdbSelector.setExpression("test"); assertEquals("test", cmdbSelector.getExpression()); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldCreateSourceTable() { // Given: final SingleStatementContext stmt = givenQuery("CREATE SOURCE TABLE X WITH (kafka_topic='X');"); // When: final CreateTable result = (CreateTable) builder.buildStatement(stmt); // Then: assertThat(result.isSource(), is(true)); }
@Override public byte[] decompress(byte[] src) throws IOException { byte[] result = src; byte[] uncompressData = new byte[src.length]; ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(src); InflaterInputStream inflaterInputStream = new InflaterInputStream(byteArrayInputStream); ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(src.length); try { while (true) { int len = inflaterInputStream.read(uncompressData, 0, uncompressData.length); if (len <= 0) { break; } byteArrayOutputStream.write(uncompressData, 0, len); } byteArrayOutputStream.flush(); result = byteArrayOutputStream.toByteArray(); } catch (IOException e) { throw e; } finally { try { byteArrayInputStream.close(); } catch (IOException e) { log.error("Failed to close the stream", e); } try { inflaterInputStream.close(); } catch (IOException e) { log.error("Failed to close the stream", e); } try { byteArrayOutputStream.close(); } catch (IOException e) { log.error("Failed to close the stream", e); } } return result; }
@Test(expected = IOException.class) public void testDecompressionFailureWithInvalidData() throws Exception { byte[] compressedData = new byte[] {0, 1, 2, 3, 4}; ZlibCompressor compressor = new ZlibCompressor(); compressor.decompress(compressedData); // Invalid compressed data }
@Override public void deletePermission(String role, String resource, String action) { String sql = "DELETE FROM permissions WHERE role=? AND resource=? AND action=?"; try { jt.update(sql, role, resource, action); } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e.toString(), e); throw e; } }
@Test void testDeletePermission() { String sql = "DELETE FROM permissions WHERE role=? AND resource=? AND action=?"; externalPermissionPersistService.deletePermission("role", "resource", "action"); Mockito.verify(jdbcTemplate).update(sql, "role", "resource", "action"); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) { return new IteratorStreamMergedResult(queryResults); } Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0)); SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext; selectStatementContext.setIndexes(columnLabelIndexMap); MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database); return decorate(queryResults, selectStatementContext, mergedResult); }
@Test void assertBuildGroupByStreamMergedResultWithOracleLimit() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "Oracle")); final ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); WhereSegment whereSegment = mock(WhereSegment.class); BinaryOperationExpression binaryOperationExpression = mock(BinaryOperationExpression.class); when(binaryOperationExpression.getLeft()).thenReturn(new ColumnSegment(0, 0, new IdentifierValue("row_id"))); when(binaryOperationExpression.getRight()).thenReturn(new LiteralExpressionSegment(0, 0, 1L)); when(binaryOperationExpression.getOperator()).thenReturn(">="); when(whereSegment.getExpr()).thenReturn(binaryOperationExpression); SubqueryTableSegment subqueryTableSegment = mock(SubqueryTableSegment.class); SubquerySegment subquerySegment = mock(SubquerySegment.class); SelectStatement subSelectStatement = mock(MySQLSelectStatement.class); ProjectionsSegment subProjectionsSegment = mock(ProjectionsSegment.class); TopProjectionSegment topProjectionSegment = mock(TopProjectionSegment.class); when(topProjectionSegment.getAlias()).thenReturn("row_id"); when(subProjectionsSegment.getProjections()).thenReturn(Collections.singletonList(topProjectionSegment)); when(subSelectStatement.getProjections()).thenReturn(subProjectionsSegment); when(subquerySegment.getSelect()).thenReturn(subSelectStatement); when(subqueryTableSegment.getSubquery()).thenReturn(subquerySegment); OracleSelectStatement selectStatement = (OracleSelectStatement) buildSelectStatement(new OracleSelectStatement()); selectStatement.setGroupBy(new GroupBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST)))); selectStatement.setOrderBy(new OrderBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST)))); selectStatement.setProjections(new ProjectionsSegment(0, 0)); selectStatement.setFrom(subqueryTableSegment); selectStatement.setWhere(whereSegment); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), null, selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createDatabase(), mock(ConnectionContext.class)); assertThat(actual, instanceOf(RowNumberDecoratorMergedResult.class)); assertThat(((RowNumberDecoratorMergedResult) actual).getMergedResult(), instanceOf(GroupByStreamMergedResult.class)); }
protected List<MavenArtifact> processResponse(Dependency dependency, HttpURLConnection conn) throws IOException { final List<MavenArtifact> result = new ArrayList<>(); try (InputStreamReader streamReader = new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8); JsonParser parser = objectReader.getFactory().createParser(streamReader)) { if (init(parser) && parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT) { // at least one result do { final FileImpl file = objectReader.readValue(parser); checkHashes(dependency, file.getChecksums()); final Matcher pathMatcher = PATH_PATTERN.matcher(file.getPath()); if (!pathMatcher.matches()) { throw new IllegalStateException("Cannot extract the Maven information from the path " + "retrieved in Artifactory " + file.getPath()); } final String groupId = pathMatcher.group("groupId").replace('/', '.'); final String artifactId = pathMatcher.group("artifactId"); final String version = pathMatcher.group("version"); result.add(new MavenArtifact(groupId, artifactId, version, file.getDownloadUri(), MavenArtifact.derivePomUrl(artifactId, version, file.getDownloadUri()))); } while (parser.nextToken() == com.fasterxml.jackson.core.JsonToken.START_OBJECT); } else { throw new FileNotFoundException("Artifact " + dependency + " not found in Artifactory"); } } return result; }
@Test public void shouldProcessCorrectlyArtifactoryAnswerWithoutSha256() throws IOException { // Given Dependency dependency = new Dependency(); dependency.setSha1sum("2e66da15851f9f5b5079228f856c2f090ba98c38"); dependency.setMd5sum("3dbee72667f107b4f76f2d5aa33c5687"); final HttpURLConnection urlConnection = mock(HttpURLConnection.class); final byte[] payload = ("{\n" + " \"results\" : [ {\n" + " \"repo\" : \"jcenter-cache\",\n" + " \"path\" : \"/com/google/code/gson/gson/2.1/gson-2.1.jar\",\n" + " \"created\" : \"2017-06-14T16:15:37.936+02:00\",\n" + " \"createdBy\" : \"anonymous\",\n" + " \"lastModified\" : \"2012-12-12T22:20:22.000+01:00\",\n" + " \"modifiedBy\" : \"anonymous\",\n" + " \"lastUpdated\" : \"2017-06-14T16:15:37.939+02:00\",\n" + " \"properties\" : {\n" + " \"artifactory.internal.etag\" : [ \"2e66da15851f9f5b5079228f856c2f090ba98c38\" ]\n" + " },\n" + " \"downloadUri\" : \"https://artifactory.techno.ingenico.com/artifactory/jcenter-cache/com/google/code/gson/gson/2.1/gson-2.1.jar\",\n" + " \"remoteUrl\" : \"http://jcenter.bintray.com/com/google/code/gson/gson/2.1/gson-2.1.jar\",\n" + " \"mimeType\" : \"application/java-archive\",\n" + " \"size\" : \"180110\",\n" + " \"checksums\" : {\n" + " \"sha1\" : \"2e66da15851f9f5b5079228f856c2f090ba98c38\",\n" + " \"md5\" : \"3dbee72667f107b4f76f2d5aa33c5687\"\n" + " },\n" + " \"originalChecksums\" : {\n" + " \"sha1\" : \"2e66da15851f9f5b5079228f856c2f090ba98c38\"\n" + " },\n" + " \"uri\" : \"https://artifactory.techno.ingenico.com/artifactory/api/storage/jcenter-cache/com/google/code/gson/gson/2.1/gson-2.1.jar\"\n" + " } ]\n" + "}").getBytes(StandardCharsets.UTF_8); when(urlConnection.getInputStream()).thenReturn(new ByteArrayInputStream(payload)); // When final List<MavenArtifact> mavenArtifacts = searcher.processResponse(dependency, urlConnection); // Then assertEquals(1, mavenArtifacts.size()); final MavenArtifact artifact = mavenArtifacts.get(0); assertEquals("com.google.code.gson", artifact.getGroupId()); assertEquals("gson", artifact.getArtifactId()); assertEquals("2.1", artifact.getVersion()); assertEquals("https://artifactory.techno.ingenico.com/artifactory/jcenter-cache/com/google/code/gson/gson/2.1/gson-2.1.jar", artifact.getArtifactUrl()); assertEquals("https://artifactory.techno.ingenico.com/artifactory/jcenter-cache/com/google/code/gson/gson/2.1/gson-2.1.pom", artifact.getPomUrl()); }
@Override public void calculate(TradePriceCalculateReqBO param, TradePriceCalculateRespBO result) { // 0. 只有【普通】订单,才计算该优惠 if (ObjectUtil.notEqual(result.getType(), TradeOrderTypeEnum.NORMAL.getType())) { return; } // 1. 获得用户的会员等级 MemberUserRespDTO user = memberUserApi.getUser(param.getUserId()); if (user.getLevelId() == null || user.getLevelId() <= 0) { return; } MemberLevelRespDTO level = memberLevelApi.getMemberLevel(user.getLevelId()); if (level == null || level.getDiscountPercent() == null) { return; } // 2. 计算每个 SKU 的优惠金额 result.getItems().forEach(orderItem -> { // 2.1 计算优惠金额 Integer vipPrice = calculateVipPrice(orderItem.getPayPrice(), level.getDiscountPercent()); if (vipPrice <= 0) { return; } // 2.2 记录优惠明细 if (orderItem.getSelected()) { // 注意,只有在选中的情况下,才会记录到优惠明细。否则仅仅是更新 SKU 优惠金额,用于展示 TradePriceCalculatorHelper.addPromotion(result, orderItem, level.getId(), level.getName(), PromotionTypeEnum.MEMBER_LEVEL.getType(), String.format("会员等级折扣:省 %s 元", formatPrice(vipPrice)), vipPrice); } // 2.3 更新 SKU 的优惠金额 orderItem.setVipPrice(vipPrice); TradePriceCalculatorHelper.recountPayPrice(orderItem); }); TradePriceCalculatorHelper.recountAllPrice(result); }
@Test public void testCalculate() { // 准备参数 TradePriceCalculateReqBO param = new TradePriceCalculateReqBO() .setUserId(1024L) .setItems(asList( new TradePriceCalculateReqBO.Item().setSkuId(10L).setCount(2).setSelected(true), // 匹配活动,且已选中 new TradePriceCalculateReqBO.Item().setSkuId(20L).setCount(3).setSelected(false) // 匹配活动,但未选中 )); TradePriceCalculateRespBO result = new TradePriceCalculateRespBO() .setType(TradeOrderTypeEnum.NORMAL.getType()) .setPrice(new TradePriceCalculateRespBO.Price()) .setPromotions(new ArrayList<>()) .setItems(asList( new TradePriceCalculateRespBO.OrderItem().setSkuId(10L).setCount(2).setSelected(true) .setPrice(100), new TradePriceCalculateRespBO.OrderItem().setSkuId(20L).setCount(3).setSelected(false) .setPrice(50) )); // 保证价格被初始化上 TradePriceCalculatorHelper.recountPayPrice(result.getItems()); TradePriceCalculatorHelper.recountAllPrice(result); // mock 方法(会员等级) when(memberUserApi.getUser(eq(1024L))).thenReturn(new MemberUserRespDTO().setLevelId(2048L)); when(memberLevelApi.getMemberLevel(eq(2048L))).thenReturn( new MemberLevelRespDTO().setId(2048L).setName("VIP 会员").setDiscountPercent(60)); // 调用 memberLevelPriceCalculator.calculate(param, result); // 断言:Price 部分 TradePriceCalculateRespBO.Price price = result.getPrice(); assertEquals(price.getTotalPrice(), 200); assertEquals(price.getDiscountPrice(), 0); assertEquals(price.getPointPrice(), 0); assertEquals(price.getDeliveryPrice(), 0); assertEquals(price.getCouponPrice(), 0); assertEquals(price.getVipPrice(), 80); assertEquals(price.getPayPrice(), 120); assertNull(result.getCouponId()); // 断言:SKU 1 assertEquals(result.getItems().size(), 2); TradePriceCalculateRespBO.OrderItem orderItem01 = result.getItems().get(0); assertEquals(orderItem01.getSkuId(), 10L); assertEquals(orderItem01.getCount(), 2); assertEquals(orderItem01.getPrice(), 100); assertEquals(orderItem01.getDiscountPrice(), 0); assertEquals(orderItem01.getDeliveryPrice(), 0); assertEquals(orderItem01.getCouponPrice(), 0); assertEquals(orderItem01.getPointPrice(), 0); assertEquals(orderItem01.getVipPrice(), 80); assertEquals(orderItem01.getPayPrice(), 120); // 断言:SKU 2 TradePriceCalculateRespBO.OrderItem orderItem02 = result.getItems().get(1); assertEquals(orderItem02.getSkuId(), 20L); assertEquals(orderItem02.getCount(), 3); assertEquals(orderItem02.getPrice(), 50); assertEquals(orderItem02.getDiscountPrice(), 0); assertEquals(orderItem02.getDeliveryPrice(), 0); assertEquals(orderItem02.getCouponPrice(), 0); assertEquals(orderItem02.getPointPrice(), 0); assertEquals(orderItem02.getVipPrice(), 60); assertEquals(orderItem02.getPayPrice(), 90); // 断言:Promotion 部分 assertEquals(result.getPromotions().size(), 1); TradePriceCalculateRespBO.Promotion promotion01 = result.getPromotions().get(0); assertEquals(promotion01.getId(), 2048L); assertEquals(promotion01.getName(), "VIP 会员"); assertEquals(promotion01.getType(), PromotionTypeEnum.MEMBER_LEVEL.getType()); assertEquals(promotion01.getTotalPrice(), 200); assertEquals(promotion01.getDiscountPrice(), 80); assertTrue(promotion01.getMatch()); assertEquals(promotion01.getDescription(), "会员等级折扣:省 0.80 元"); TradePriceCalculateRespBO.PromotionItem promotionItem01 = promotion01.getItems().get(0); assertEquals(promotion01.getItems().size(), 1); assertEquals(promotionItem01.getSkuId(), 10L); assertEquals(promotionItem01.getTotalPrice(), 200); assertEquals(promotionItem01.getDiscountPrice(), 80); }
public WatsonxAiRequest request(Prompt prompt) { WatsonxAiChatOptions options = WatsonxAiChatOptions.builder().build(); if (this.defaultOptions != null) { options = ModelOptionsUtils.merge(options, this.defaultOptions, WatsonxAiChatOptions.class); } if (prompt.getOptions() != null) { if (prompt.getOptions() instanceof WatsonxAiChatOptions runtimeOptions) { options = ModelOptionsUtils.merge(runtimeOptions, options, WatsonxAiChatOptions.class); } else { var updatedRuntimeOptions = ModelOptionsUtils.copyToTarget(prompt.getOptions(), ChatOptions.class, WatsonxAiChatOptions.class); options = ModelOptionsUtils.merge(updatedRuntimeOptions, options, WatsonxAiChatOptions.class); } } Map<String, Object> parameters = options.toMap(); final String convertedPrompt = MessageToPromptConverter.create() .withAssistantPrompt("") .withHumanPrompt("") .toPrompt(prompt.getInstructions()); return WatsonxAiRequest.builder(convertedPrompt).withParameters(parameters).build(); }
@Test public void testCreateRequestWithNoModelId() { var options = ChatOptionsBuilder.builder().withTemperature(0.9f).withTopK(100).withTopP(0.6f).build(); Prompt prompt = new Prompt("Test message", options); Exception exception = Assert.assertThrows(IllegalArgumentException.class, () -> { WatsonxAiRequest request = chatModel.request(prompt); }); }
@Override protected void write(final PostgreSQLPacketPayload payload) { payload.getByteBuf().writeBytes(PREFIX); payload.getByteBuf().writeByte(status); }
@Test void assertReadWriteWithNotInTransaction() { ByteBuf byteBuf = ByteBufTestUtils.createByteBuf(6); PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8); PostgreSQLReadyForQueryPacket packet = PostgreSQLReadyForQueryPacket.NOT_IN_TRANSACTION; packet.write(payload); assertThat(byteBuf.writerIndex(), is(6)); assertThat(byteBuf.getByte(5), is((byte) 'I')); }
public Set<String> getReferencedSearchFiltersIds(final Collection<UsesSearchFilters> searchFiltersOwners) { return searchFiltersOwners .stream() .map(UsesSearchFilters::filters) .filter(Objects::nonNull) .flatMap(Collection::stream) .filter(usedSearchFilter -> usedSearchFilter instanceof ReferencedSearchFilter) .map(usedSearchFilter -> (ReferencedSearchFilter) usedSearchFilter) .map(ReferencedSearchFilter::id) .collect(Collectors.toSet()); }
@Test void testGetReferencedSearchFiltersIdsReturnsProperIds() { final ReferencedSearchFilter filter1 = ReferencedQueryStringSearchFilter.builder().id("r_id_1").build(); final ReferencedSearchFilter filter2 = ReferencedQueryStringSearchFilter.builder().id("r_id_2").build(); final Query query = TestData.validQueryBuilder() .filters(ImmutableList.of(filter1, filter2)) .build(); final Set<String> referencedSearchFiltersIds = toTest.getReferencedSearchFiltersIds(ImmutableSet.of(query)); assertEquals(ImmutableSet.of("r_id_1", "r_id_2"), referencedSearchFiltersIds); }
@Transactional public ReviewGroupCreationResponse createReviewGroup(ReviewGroupCreationRequest request) { String reviewRequestCode; do { reviewRequestCode = randomCodeGenerator.generate(REVIEW_REQUEST_CODE_LENGTH); } while (reviewGroupRepository.existsByReviewRequestCode(reviewRequestCode)); ReviewGroup reviewGroup = reviewGroupRepository.save( new ReviewGroup( request.revieweeName(), request.projectName(), reviewRequestCode, request.groupAccessCode() ) ); return new ReviewGroupCreationResponse(reviewGroup.getReviewRequestCode()); }
@Test void 코드가_중복되는_경우_다시_생성한다() { // given reviewGroupRepository.save(new ReviewGroup("reviewee", "project", "0000", "1111")); given(randomCodeGenerator.generate(anyInt())) .willReturn("0000") // ReviewRequestCode .willReturn("AAAA"); ReviewGroupCreationRequest request = new ReviewGroupCreationRequest("sancho", "reviewme", "groupAccessCode"); // when ReviewGroupCreationResponse response = reviewGroupService.createReviewGroup(request); // then assertThat(response).isEqualTo(new ReviewGroupCreationResponse("AAAA")); then(randomCodeGenerator).should(times(2)).generate(anyInt()); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testOrderByExpressionOnOutputColumn2() { // TODO: validate output analyze("SELECT a x FROM t1 ORDER BY a + 1"); assertFails(TYPE_MISMATCH, 3, 10, "SELECT x.c as x\n" + "FROM (VALUES 1) x(c)\n" + "ORDER BY x.c"); }
public int getWriteDelaySeconds() { return writeDelaySeconds; }
@Test public void getWriteDelaySeconds() { assertEquals(DEFAULT_WRITE_DELAY_SECONDS, new MapStoreConfig().getWriteDelaySeconds()); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldParseNullAsNullIfSurroundedByWhitespace() { SchemaAndValue schemaAndValue = Values.parseString(WHITESPACE + "null" + WHITESPACE); assertNull(schemaAndValue); }
@Override public int[] toIntArray() { int[] array = new int[size]; for (int i = 0; i < size; i++) { array[i] = i; } return array; }
@Test public void toIntArray() throws Exception { RangeSet rs = new RangeSet(4); int[] array = rs.toIntArray(); assertArrayEquals(new int[]{0, 1, 2, 3}, array); }
public static boolean isContains(String values, String value) { return isNotEmpty(values) && isContains(COMMA_SPLIT_PATTERN.split(values), value); }
@Test void testIsContains() throws Exception { assertThat(StringUtils.isContains("a,b, c", "b"), is(true)); assertThat(StringUtils.isContains("", "b"), is(false)); assertThat(StringUtils.isContains(new String[] {"a", "b", "c"}, "b"), is(true)); assertThat(StringUtils.isContains((String[]) null, null), is(false)); assertTrue(StringUtils.isContains("abc", 'a')); assertFalse(StringUtils.isContains("abc", 'd')); assertFalse(StringUtils.isContains("", 'a')); assertFalse(StringUtils.isContains(null, 'a')); assertTrue(StringUtils.isNotContains("abc", 'd')); assertFalse(StringUtils.isNotContains("abc", 'a')); assertTrue(StringUtils.isNotContains("", 'a')); assertTrue(StringUtils.isNotContains(null, 'a')); }
@Override public void consume(Update update) { super.consume(update); }
@Test void canReportUpdatedStatistics() { Update upd1 = mockFullUpdate(bot, CREATOR, "/count 1 2 3 4"); bot.consume(upd1); Update upd2 = mockFullUpdate(bot, CREATOR, "must reply"); bot.consume(upd2); Mockito.reset(silent); Update statUpd = mockFullUpdate(bot, CREATOR, "/stats"); bot.consume(statUpd); verify(silent, times(1)).send("count: 1\nmustreply: 1", CREATOR.getId()); }
public static <K, V> Reshuffle<K, V> of() { return new Reshuffle<>(); }
@Test @Category(ValidatesRunner.class) public void testReshuffleAfterSessionsAndGroupByKey() { PCollection<KV<String, Iterable<Integer>>> input = pipeline .apply( Create.of(GBK_TESTABLE_KVS) .withCoder(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of()))) .apply(Window.into(Sessions.withGapDuration(Duration.standardMinutes(10)))) .apply(GroupByKey.create()); PCollection<KV<String, Iterable<Integer>>> output = input.apply(Reshuffle.of()); PAssert.that(output).satisfies(new AssertThatHasExpectedContents()); assertEquals(input.getWindowingStrategy(), output.getWindowingStrategy()); pipeline.run(); }
@Subscribe public void onScriptCallbackEvent(ScriptCallbackEvent event) { String eventName = event.getEventName(); int[] intStack = client.getIntStack(); String[] stringStack = client.getStringStack(); int intStackSize = client.getIntStackSize(); int stringStackSize = client.getStringStackSize(); switch (eventName) { case "setSearchBankInputText": stringStack[stringStackSize - 1] = SEARCH_BANK_INPUT_TEXT; break; case "setSearchBankInputTextFound": { int matches = intStack[intStackSize - 1]; stringStack[stringStackSize - 1] = String.format(SEARCH_BANK_INPUT_TEXT_FOUND, matches); break; } case "bankSearchFilter": final int itemId = intStack[intStackSize - 1]; String searchfilter = stringStack[stringStackSize - 1]; BankTag tag = activeTag; boolean tagSearch = true; // Shared storage uses ~bankmain_filteritem too. Allow using tag searches in it but don't // apply the tag search from the active tab. final boolean bankOpen = client.getItemContainer(InventoryID.BANK) != null; if (tag == null || !bankOpen) { if (searchfilter.isEmpty()) { return; } tagSearch = searchfilter.startsWith(TAG_SEARCH); if (tagSearch) { searchfilter = searchfilter.substring(TAG_SEARCH.length()).trim(); } // Build a temporary BankTag using the search filter tag = buildSearchFilterBankTag(searchfilter); } if (itemId == -1 && tag.layout() != null) { // item -1 always passes on a laid out tab so items can be dragged to it return; } if (itemId > -1 && tag.contains(itemId)) { // return true intStack[intStackSize - 2] = 1; } else if (tagSearch) { // if the item isn't tagged we return false to prevent the item matching if the item name happens // to contain the tag name. intStack[intStackSize - 2] = 0; } break; case "getSearchingTagTab": intStack[intStackSize - 1] = activeTag != null ? 1 : 0; break; case "bankBuildTab": // Use the per-tab view when we want to hide the separators to avoid having to reposition items & // recomputing the scroll height. if (activeTag != null && (tabInterface.isTagTabActive() || config.removeSeparators() || activeTag.layout() != null)) { var stack = client.getIntStack(); var sz = client.getIntStackSize(); stack[sz - 1] = 1; // use single tab view mode } break; } }
@Test public void testExplicitSearch() { when(client.getIntStack()).thenReturn(new int[]{0, ABYSSAL_WHIP}); when(client.getStringStack()).thenReturn(new String[]{"tag:whip"}); when(configManager.getConfiguration(BankTagsPlugin.CONFIG_GROUP, TagManager.ITEM_KEY_PREFIX + ABYSSAL_WHIP)).thenReturn("herb,bossing,whip"); bankTagsPlugin.onScriptCallbackEvent(EVENT); assertEquals(1, client.getIntStack()[0]); // Search should be found at the start of the tag when(client.getIntStack()).thenReturn(new int[]{0, ABYSSAL_WHIP}); when(configManager.getConfiguration(BankTagsPlugin.CONFIG_GROUP, TagManager.ITEM_KEY_PREFIX + ABYSSAL_WHIP)).thenReturn("herb,bossing,whip long tag"); bankTagsPlugin.onScriptCallbackEvent(EVENT); assertEquals(1, client.getIntStack()[0]); // Search should not be be found in the middle of the tag // and explicit search does not allow fall through when(configManager.getConfiguration(BankTagsPlugin.CONFIG_GROUP, TagManager.ITEM_KEY_PREFIX + ABYSSAL_WHIP)).thenReturn("herb,bossing whip"); bankTagsPlugin.onScriptCallbackEvent(EVENT); assertEquals(0, client.getIntStack()[0]); }
public URL getInterNodeListener( final Function<URL, Integer> portResolver ) { return getInterNodeListener(portResolver, LOGGER); }
@Test public void shouldThrowIfOnGetInterNodeListenerIfInternalListenerSetToUnresolvableHost() { // Given: final URL expected = url("https://unresolvable_host:12345"); final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") .put(INTERNAL_LISTENER_CONFIG, expected.toString()) .build() ); // When: final Exception e = assertThrows( ConfigException.class, () -> config.getInterNodeListener(portResolver, logger) ); // Then: assertThat(e.getMessage(), containsString("Invalid value " + "https://unresolvable_host:12345 for configuration " + INTERNAL_LISTENER_CONFIG + ": Could not resolve internal host")); }
static String replaceInvalidChars(String str) { char[] chars = null; final int strLen = str.length(); int pos = 0; for (int i = 0; i < strLen; i++) { final char c = str.charAt(i); switch (c) { case '>': case '<': case '"': // remove character by not moving cursor if (chars == null) { chars = str.toCharArray(); } break; case ' ': if (chars == null) { chars = str.toCharArray(); } chars[pos++] = '_'; break; case ',': case '=': case ';': case ':': case '?': case '\'': case '*': if (chars == null) { chars = str.toCharArray(); } chars[pos++] = '-'; break; default: if (chars != null) { chars[pos] = c; } pos++; } } return chars == null ? str : new String(chars, 0, pos); }
@Test void testReplaceInvalidChars() { assertThat(JMXReporter.replaceInvalidChars("")).isEqualTo(""); assertThat(JMXReporter.replaceInvalidChars("abc")).isEqualTo("abc"); assertThat(JMXReporter.replaceInvalidChars("abc\"")).isEqualTo("abc"); assertThat(JMXReporter.replaceInvalidChars("\"abc")).isEqualTo("abc"); assertThat(JMXReporter.replaceInvalidChars("\"abc\"")).isEqualTo("abc"); assertThat(JMXReporter.replaceInvalidChars("\"a\"b\"c\"")).isEqualTo("abc"); assertThat(JMXReporter.replaceInvalidChars("\"\"\"\"")).isEqualTo(""); assertThat(JMXReporter.replaceInvalidChars(" ")).isEqualTo("____"); assertThat(JMXReporter.replaceInvalidChars("\"ab ;(c)'")).isEqualTo("ab_-(c)-"); assertThat(JMXReporter.replaceInvalidChars("a b c")).isEqualTo("a_b_c"); assertThat(JMXReporter.replaceInvalidChars("a b c ")).isEqualTo("a_b_c_"); assertThat(JMXReporter.replaceInvalidChars("a;b'c*")).isEqualTo("a-b-c-"); assertThat(JMXReporter.replaceInvalidChars("a,=;:?'b,=;:?'c")).isEqualTo("a------b------c"); }
public Future<Void> reconcile(boolean isOpenShift, ImagePullPolicy imagePullPolicy, List<LocalObjectReference> imagePullSecrets, Clock clock) { return serviceAccount() .compose(i -> entityOperatorRole()) .compose(i -> topicOperatorRole()) .compose(i -> userOperatorRole()) .compose(i -> networkPolicy()) .compose(i -> topicOperatorRoleBindings()) .compose(i -> userOperatorRoleBindings()) .compose(i -> topicOperatorConfigMap()) .compose(i -> userOperatorConfigMap()) .compose(i -> topicOperatorCruiseControlApiSecret()) .compose(i -> deleteOldEntityOperatorSecret()) .compose(i -> topicOperatorSecret(clock)) .compose(i -> userOperatorSecret(clock)) .compose(i -> deployment(isOpenShift, imagePullPolicy, imagePullSecrets)) .compose(i -> waitForDeploymentReadiness()); }
@Test public void reconcileWithToAndUoAndWatchNamespaces(VertxTestContext context) { String toWatchNamespace = "to-watch-namespace"; String uoWatchNamespace = "uo-watch-namespace"; ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); DeploymentOperator mockDepOps = supplier.deploymentOperations; SecretOperator mockSecretOps = supplier.secretOperations; ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; RoleOperator mockRoleOps = supplier.roleOperations; RoleBindingOperator mockRoleBindingOps = supplier.roleBindingOperations; NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; ConfigMapOperator mockCmOps = supplier.configMapOperations; ArgumentCaptor<ServiceAccount> saCaptor = ArgumentCaptor.forClass(ServiceAccount.class); when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorSecretName(NAME)))).thenReturn(Future.succeededFuture()); when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.entityUserOperatorSecretName(NAME)))).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> operatorSecretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorSecretName(NAME)), operatorSecretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> toSecretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorSecretName(NAME)), toSecretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> uoSecretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityUserOperatorSecretName(NAME)), uoSecretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Role> operatorRoleCaptor = ArgumentCaptor.forClass(Role.class); when(mockRoleOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), operatorRoleCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Role> toRoleCaptor = ArgumentCaptor.forClass(Role.class); when(mockRoleOps.reconcile(any(), eq(toWatchNamespace), eq(KafkaResources.entityOperatorDeploymentName(NAME)), toRoleCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Role> uoRoleCaptor = ArgumentCaptor.forClass(Role.class); when(mockRoleOps.reconcile(any(), eq(uoWatchNamespace), eq(KafkaResources.entityOperatorDeploymentName(NAME)), uoRoleCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<RoleBinding> toRoleBindingCaptor = ArgumentCaptor.forClass(RoleBinding.class); when(mockRoleBindingOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorRoleBinding(NAME)), toRoleBindingCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockRoleBindingOps.reconcile(any(), eq(toWatchNamespace), eq(KafkaResources.entityTopicOperatorRoleBinding(NAME)), toRoleBindingCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<RoleBinding> uoRoleBindingCaptor = ArgumentCaptor.forClass(RoleBinding.class); when(mockRoleBindingOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityUserOperatorRoleBinding(NAME)), uoRoleBindingCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockRoleBindingOps.reconcile(any(), eq(uoWatchNamespace), eq(KafkaResources.entityUserOperatorRoleBinding(NAME)), uoRoleBindingCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<ConfigMap> toCmCaptor = ArgumentCaptor.forClass(ConfigMap.class); when(mockCmOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorLoggingConfigMapName(NAME)), toCmCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<ConfigMap> uoCmCaptor = ArgumentCaptor.forClass(ConfigMap.class); when(mockCmOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityUserOperatorLoggingConfigMapName(NAME)), uoCmCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<NetworkPolicy> netPolicyCaptor = ArgumentCaptor.forClass(NetworkPolicy.class); when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), netPolicyCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Deployment> depCaptor = ArgumentCaptor.forClass(Deployment.class); when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); Kafka kafka = new KafkaBuilder(KAFKA) .editSpec() .withNewEntityOperator() .withNewTopicOperator() .withWatchedNamespace(toWatchNamespace) .endTopicOperator() .withNewUserOperator() .withWatchedNamespace(uoWatchNamespace) .endUserOperator() .endEntityOperator() .endSpec() .build(); EntityOperatorReconciler rcnclr = new EntityOperatorReconciler( Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.dummyClusterOperatorConfig(), supplier, kafka, CLUSTER_CA ); Checkpoint async = context.checkpoint(); rcnclr.reconcile(false, null, null, Clock.systemUTC()) .onComplete(context.succeeding(v -> context.verify(() -> { assertThat(saCaptor.getAllValues().size(), is(1)); assertThat(saCaptor.getValue(), is(notNullValue())); assertThat(operatorSecretCaptor.getAllValues().size(), is(1)); assertThat(operatorSecretCaptor.getAllValues().get(0), is(nullValue())); assertThat(toSecretCaptor.getAllValues().size(), is(1)); assertThat(toSecretCaptor.getAllValues().get(0), is(notNullValue())); assertThat(uoSecretCaptor.getAllValues().size(), is(1)); assertThat(uoSecretCaptor.getAllValues().get(0), is(notNullValue())); assertThat(netPolicyCaptor.getAllValues().size(), is(1)); assertThat(netPolicyCaptor.getValue(), is(notNullValue())); assertThat(operatorRoleCaptor.getAllValues().size(), is(1)); assertThat(operatorRoleCaptor.getValue(), is(notNullValue())); assertThat(toRoleCaptor.getAllValues().size(), is(1)); assertThat(toRoleCaptor.getValue(), is(notNullValue())); assertThat(uoRoleCaptor.getAllValues().size(), is(1)); assertThat(uoRoleCaptor.getValue(), is(notNullValue())); assertThat(toRoleBindingCaptor.getAllValues().size(), is(2)); assertThat(toRoleBindingCaptor.getAllValues().get(0), is(notNullValue())); assertThat(toRoleBindingCaptor.getAllValues().get(0).getMetadata().getNamespace(), is(toWatchNamespace)); assertThat(toRoleBindingCaptor.getAllValues().get(1), is(notNullValue())); assertThat(toRoleBindingCaptor.getAllValues().get(1).getMetadata().getNamespace(), is(NAMESPACE)); assertThat(uoRoleBindingCaptor.getAllValues().size(), is(2)); assertThat(uoRoleBindingCaptor.getAllValues().get(0), is(notNullValue())); assertThat(uoRoleBindingCaptor.getAllValues().get(0).getMetadata().getNamespace(), is(uoWatchNamespace)); assertThat(uoRoleBindingCaptor.getAllValues().get(1), is(notNullValue())); assertThat(uoRoleBindingCaptor.getAllValues().get(1).getMetadata().getNamespace(), is(NAMESPACE)); assertThat(toCmCaptor.getAllValues().size(), is(1)); assertThat(toCmCaptor.getValue(), is(notNullValue())); assertThat(uoCmCaptor.getAllValues().size(), is(1)); assertThat(uoCmCaptor.getValue(), is(notNullValue())); assertThat(depCaptor.getAllValues().size(), is(1)); assertThat(depCaptor.getValue(), is(notNullValue())); async.flag(); }))); }
@Override public boolean unloadPlugin(String pluginId) { if (currentPluginId.equals(pluginId)) { return original.unloadPlugin(pluginId); } else { throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute unloadPlugin for foreign pluginId!"); } }
@Test public void unloadPlugin() { pluginManager.loadPlugins(); assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.unloadPlugin(OTHER_PLUGIN_ID)); assertTrue(wrappedPluginManager.unloadPlugin(THIS_PLUGIN_ID)); }
public void startAsync() { try { udfLoader.load(); ProcessingLogServerUtils.maybeCreateProcessingLogTopic( serviceContext.getTopicClient(), processingLogConfig, ksqlConfig); if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) { log.warn("processing log auto-create is enabled, but this is not supported " + "for headless mode."); } rocksDBConfigSetterHandler.accept(ksqlConfig); processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); ksqlConfig.originals().forEach((key, value) -> { if (nonNull(value)) { properties.put(key, value.toString()); } }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); throw e; } }
@Test public void shouldCreateProcessingLogTopic() { // When: standaloneExecutor.startAsync(); // Then verify(kafkaTopicClient).createTopic(eq(PROCESSING_LOG_TOPIC_NAME), anyInt(), anyShort()); }
@Override public List<Change> computeDiff(final List<T> source, final List<T> target, DiffAlgorithmListener progress) { Objects.requireNonNull(source, "source list must not be null"); Objects.requireNonNull(target, "target list must not be null"); if (progress != null) { progress.diffStart(); } PathNode path = buildPath(source, target, progress); List<Change> result = buildRevision(path, source, target); if (progress != null) { progress.diffEnd(); } return result; }
@Test public void testDiffMyersExample1Forward() { List<String> original = Arrays.asList("A", "B", "C", "A", "B", "B", "A"); List<String> revised = Arrays.asList("C", "B", "A", "B", "A", "C"); final Patch<String> patch = Patch.generate(original, revised, new MyersDiff<String>().computeDiff(original, revised, null)); assertNotNull(patch); assertEquals(4, patch.getDeltas().size()); assertEquals("Patch{deltas=[[DeleteDelta, position: 0, lines: [A, B]], [InsertDelta, position: 3, lines: [B]], [DeleteDelta, position: 5, lines: [B]], [InsertDelta, position: 7, lines: [C]]]}", patch.toString()); }
public void set(int index, E value) { assert value != null; Storage32 newStorage = storage.set(index, value); if (newStorage != storage) { storage = newStorage; } }
@Test public void testSetDenseToSparse32WithMaxExceeded() { // add some dense entries for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE; ++i) { set(i); verify(); } // go far beyond the last index to trigger dense to sparse conversion set(ARRAY_STORAGE_32_MAX_SPARSE_SIZE * 1000); verify(); // make sure we are still good for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE * 5; ++i) { set(i); verify(); } }
@Override public void checkBeforeUpdate(final CreateReadwriteSplittingRuleStatement sqlStatement) { ReadwriteSplittingRuleStatementChecker.checkCreation(database, sqlStatement.getRules(), null == rule ? null : rule.getConfiguration(), sqlStatement.isIfNotExists()); }
@Test void assertCheckSQLStatementWithoutExistedResources() { when(resourceMetaData.getNotExistedDataSources(any())).thenReturn(Arrays.asList("read_ds_0", "read_ds_1")); assertThrows(MissingRequiredStorageUnitsException.class, () -> executor.checkBeforeUpdate(createSQLStatement("TEST"))); }
@VisibleForTesting static List<TopicPartition> getAllTopicPartitions( SerializableFunction<Map<String, Object>, Consumer<byte[], byte[]>> kafkaConsumerFactoryFn, Map<String, Object> kafkaConsumerConfig, Set<String> topics, @Nullable Pattern topicPattern) { List<TopicPartition> current = new ArrayList<>(); try (Consumer<byte[], byte[]> kafkaConsumer = kafkaConsumerFactoryFn.apply(kafkaConsumerConfig)) { if (topics != null && !topics.isEmpty()) { for (String topic : topics) { for (PartitionInfo partition : kafkaConsumer.partitionsFor(topic)) { current.add(new TopicPartition(topic, partition.partition())); } } } else { for (Map.Entry<String, List<PartitionInfo>> topicInfo : kafkaConsumer.listTopics().entrySet()) { if (topicPattern == null || topicPattern.matcher(topicInfo.getKey()).matches()) { for (PartitionInfo partition : topicInfo.getValue()) { current.add(new TopicPartition(partition.topic(), partition.partition())); } } } } } return current; }
@Test public void testGetAllTopicPartitions() throws Exception { Consumer<byte[], byte[]> mockConsumer = Mockito.mock(Consumer.class); when(mockConsumer.listTopics()) .thenReturn( ImmutableMap.of( "topic1", ImmutableList.of( new PartitionInfo("topic1", 0, null, null, null), new PartitionInfo("topic1", 1, null, null, null)), "topic2", ImmutableList.of( new PartitionInfo("topic2", 0, null, null, null), new PartitionInfo("topic2", 1, null, null, null)))); assertEquals( ImmutableList.of( new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic2", 0), new TopicPartition("topic2", 1)), WatchForKafkaTopicPartitions.getAllTopicPartitions( (input) -> mockConsumer, null, null, null)); }
public Map<String, FieldMapping> fieldTypes(final String index) { final JsonNode result = client.executeRequest(request(index), "Unable to retrieve field types of index " + index); final JsonNode fields = result.path(index).path("mappings").path("properties"); //noinspection UnstableApiUsage return Streams.stream(fields.fields()) .collect(Collectors.toMap(Map.Entry::getKey, entry -> { final JsonNode entryValue = entry.getValue(); String type = entryValue.path("type").asText(); if ("alias".equals(type)) { String aliasPath = entryValue.path("path").asText(); type = fields.path(aliasPath).path("type").asText(); } return FieldMapping.create( type, entryValue.path("fielddata").asBoolean() ); })); }
@Test void testReturnsEmptyMapOnNoMappings() throws Exception { String mappingResponse = """ { "graylog_13": { "mappings": { "properties": { } } } } """; doReturn(objectMapper.readTree(mappingResponse)) .when(client) .executeRequest(eq(new Request("GET", "/graylog_13/_mapping")), anyString()); final Map<String, FieldMappingApi.FieldMapping> result = toTest.fieldTypes("graylog_13"); assertEquals(Map.of(), result); }
public static <T> int indexOfSub(T[] array, T[] subArray) { return indexOfSub(array, 0, subArray); }
@Test public void lastIndexOfSubTest2() { Integer[] a = {0x12, 0x56, 0x78, 0x56, 0x21, 0x9A}; Integer[] b = {0x56, 0x78}; int i = ArrayUtil.indexOfSub(a, b); assertEquals(1, i); }
@Override public synchronized void blameResult(InputFile file, List<BlameLine> lines) { checkNotNull(file); checkNotNull(lines); checkArgument(allFilesToBlame.contains(file), "It was not expected to blame file %s", file); if (lines.size() != file.lines()) { LOG.debug("Ignoring blame result since provider returned {} blame lines but file {} has {} lines", lines.size(), file, file.lines()); return; } Builder scmBuilder = ScannerReport.Changesets.newBuilder(); DefaultInputFile inputFile = (DefaultInputFile) file; scmBuilder.setComponentRef(inputFile.scannerId()); Map<String, Integer> changesetsIdByRevision = new HashMap<>(); int lineId = 1; for (BlameLine line : lines) { validateLine(line, lineId, file); Integer changesetId = changesetsIdByRevision.get(line.revision()); if (changesetId == null) { addChangeset(scmBuilder, line); changesetId = scmBuilder.getChangesetCount() - 1; changesetsIdByRevision.put(line.revision(), changesetId); } scmBuilder.addChangesetIndexByLine(changesetId); lineId++; } writer.writeComponentChangesets(scmBuilder.build()); allFilesToBlame.remove(file); count++; progressReport.message(count + "/" + total + " " + pluralize(count) + " have been analyzed"); }
@Test public void shouldFailIfNullDate() { InputFile file = new TestInputFileBuilder("foo", "src/main/java/Foo.java").setLines(1).build(); var blameOutput = new DefaultBlameOutput(null, analysisWarnings, singletonList(file), mock(DocumentationLinkGenerator.class)); var lines = singletonList(new BlameLine().revision("1").author("guy")); assertThatThrownBy(() -> blameOutput.blameResult(file, lines)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Blame date is null for file " + file + " at line 1"); }
public static void extract(Path source, Path destination) throws IOException { extract(source, destination, false); }
@Test public void testExtract_reproducibleTimestampsEnabled() throws URISyntaxException, IOException { // The tarfile has only level1/level2/level3/file.txt packaged Path source = Paths.get(Resources.getResource("core/tarfile-only-file-packaged.tar").toURI()); Path destination = temporaryFolder.getRoot().toPath(); TarExtractor.extract(source, destination, true); assertThat( Files.getLastModifiedTime(destination.resolve("level-1")) .toInstant() .truncatedTo(ChronoUnit.SECONDS)) .isEqualTo(FileTime.fromMillis(1000L).toInstant()); assertThat( Files.getLastModifiedTime(destination.resolve("level-1/level-2")) .toInstant() .truncatedTo(ChronoUnit.SECONDS)) .isEqualTo(FileTime.fromMillis(1000L).toInstant()); assertThat( Files.getLastModifiedTime(destination.resolve("level-1/level-2/level-3")) .toInstant() .truncatedTo(ChronoUnit.SECONDS)) .isEqualTo(FileTime.fromMillis(1000L).toInstant()); assertThat( Files.getLastModifiedTime(destination.resolve("level-1/level-2/level-3/file.txt")) .toInstant() .truncatedTo(ChronoUnit.SECONDS)) .isEqualTo(Instant.parse("2021-01-29T21:10:02Z")); }
@Override public boolean betterThan(Num criterionValue1, Num criterionValue2) { return criterionValue1.isGreaterThan(criterionValue2); }
@Test public void betterThan() { AnalysisCriterion criterion = getCriterion(); assertTrue(criterion.betterThan(numOf(50), numOf(45))); assertFalse(criterion.betterThan(numOf(45), numOf(50))); }
public void loadServiceMetadataSnapshot(ConcurrentMap<Service, ServiceMetadata> snapshot) { for (Service each : snapshot.keySet()) { Service service = Service.newService(each.getNamespace(), each.getGroup(), each.getName(), each.isEphemeral()); ServiceManager.getInstance().getSingleton(service); } ConcurrentMap<Service, ServiceMetadata> oldSnapshot = serviceMetadataMap; serviceMetadataMap = snapshot; oldSnapshot.clear(); }
@Test void testLoadServiceMetadataSnapshot() { namingMetadataManager.loadServiceMetadataSnapshot(new ConcurrentHashMap<>()); Map<Service, ServiceMetadata> serviceMetadataSnapshot = namingMetadataManager.getServiceMetadataSnapshot(); assertEquals(0, serviceMetadataSnapshot.size()); }
@Override public JCExpression inline(Inliner inliner) { return inliner.getBinding(key()).inline(inliner); }
@Test public void inline() { ImportPolicy.bind(context, ImportPolicy.IMPORT_TOP_LEVEL); Symtab symtab = Symtab.instance(context); Type listType = symtab.listType; bind( new UTypeVar.Key("E"), TypeWithExpression.create( new ClassType(listType, List.<Type>of(symtab.stringType), listType.tsym))); assertInlines("List<String>", UTypeVarIdent.create("E")); assertThat(inliner.getImportsToAdd()).isEqualTo(ImmutableSet.of("java.util.List")); }
public E peek() { if (mSize == 0) { throw new EmptyStackException(); } return mElements.get(mSize - 1); }
@Test void testIllegalPeek() throws Exception { Assertions.assertThrows(EmptyStackException.class, () -> { Stack<String> stack = new Stack<String>(); stack.peek(); }); }
public static PodTemplateSpec createPodTemplateSpec( String workloadName, Labels labels, PodTemplate template, Map<String, String> defaultPodLabels, Map<String, String> podAnnotations, Affinity affinity, List<Container> initContainers, List<Container> containers, List<Volume> volumes, List<LocalObjectReference> defaultImagePullSecrets, PodSecurityContext podSecurityContext ) { return new PodTemplateSpecBuilder() .withNewMetadata() .withLabels(labels.withAdditionalLabels(Util.mergeLabelsOrAnnotations(defaultPodLabels, TemplateUtils.labels(template))).toMap()) .withAnnotations(Util.mergeLabelsOrAnnotations(podAnnotations, TemplateUtils.annotations(template))) .endMetadata() .withNewSpec() .withServiceAccountName(workloadName) .withEnableServiceLinks(template != null ? template.getEnableServiceLinks() : null) .withAffinity(affinity) .withInitContainers(initContainers) .withContainers(containers) .withVolumes(volumes) .withTolerations(template != null && template.getTolerations() != null ? template.getTolerations() : null) .withTerminationGracePeriodSeconds(template != null ? (long) template.getTerminationGracePeriodSeconds() : 30L) .withImagePullSecrets(imagePullSecrets(template, defaultImagePullSecrets)) .withSecurityContext(podSecurityContext) .withPriorityClassName(template != null ? template.getPriorityClassName() : null) .withSchedulerName(template != null && template.getSchedulerName() != null ? template.getSchedulerName() : "default-scheduler") .withHostAliases(template != null ? template.getHostAliases() : null) .withTopologySpreadConstraints(template != null ? template.getTopologySpreadConstraints() : null) .endSpec() .build(); }
@Test public void testCreatePodTemplateSpecWithNullValues() { PodTemplateSpec pod = WorkloadUtils.createPodTemplateSpec( NAME, LABELS, null, null, null, null, null, List.of(new ContainerBuilder().withName("container").build()), null, null, null ); assertThat(pod.getMetadata().getLabels(), is(LABELS.toMap())); assertThat(pod.getMetadata().getAnnotations(), is(Map.of())); assertThat(pod.getSpec().getServiceAccountName(), is(NAME)); assertThat(pod.getSpec().getEnableServiceLinks(), is(nullValue())); assertThat(pod.getSpec().getAffinity(), is(nullValue())); assertThat(pod.getSpec().getInitContainers(), is(nullValue())); assertThat(pod.getSpec().getContainers().size(), is(1)); assertThat(pod.getSpec().getContainers().get(0).getName(), is("container")); assertThat(pod.getSpec().getVolumes(), is(nullValue())); assertThat(pod.getSpec().getTolerations(), is(nullValue())); assertThat(pod.getSpec().getTerminationGracePeriodSeconds(), is(30L)); assertThat(pod.getSpec().getImagePullSecrets(), is(nullValue())); assertThat(pod.getSpec().getSecurityContext(), is(nullValue())); assertThat(pod.getSpec().getPriorityClassName(), is(nullValue())); assertThat(pod.getSpec().getSchedulerName(), is("default-scheduler")); assertThat(pod.getSpec().getHostAliases(), is(nullValue())); assertThat(pod.getSpec().getTopologySpreadConstraints(), is(nullValue())); }
@Udf(description = "Returns a masked version of the input string. All characters except for the" + " last n will be replaced according to the default masking rules.") @SuppressWarnings("MethodMayBeStatic") // Invoked via reflection public String mask( @UdfParameter("input STRING to be masked") final String input, @UdfParameter("number of characters to keep unmasked at the end") final int numChars ) { return doMask(new Masker(), input, numChars); }
@Test public void shouldThrowIfLengthIsNegative() { // When: final KsqlException e = assertThrows( KsqlFunctionException.class, () -> udf.mask("AbCd#$123xy Z", -1) ); // Then: assertThat(e.getMessage(), containsString("function mask_keep_right requires a non-negative number")); }
@VisibleForTesting static String fromSecondLevel(String qualifiedTemplateClass) { List<String> path = Splitter.on('.').splitToList(qualifiedTemplateClass); for (int topLevel = 0; topLevel < path.size() - 1; topLevel++) { if (Ascii.isUpperCase(path.get(topLevel).charAt(0))) { return Joiner.on('_').join(path.subList(topLevel + 1, path.size())); } } return Iterables.getLast(path); }
@Test public void fromSecondLevel() { assertThat( RefasterRule.fromSecondLevel( "com.google.devtools.javatools.refactory.refaster.cleanups.MergeNestedIf")) .isEqualTo("MergeNestedIf"); assertThat( RefasterRule.fromSecondLevel( "com.google.devtools.javatools.refactory.refaster.cleanups.HashingShortcuts.HashEntireByteArray")) .isEqualTo("HashEntireByteArray"); assertThat( RefasterRule.fromSecondLevel( "com.google.devtools.javatools.refactory.refaster.cleanups.PrimitiveComparisons.Compare.Ints")) .isEqualTo("Compare_Ints"); }
@Operation(summary = "秒杀场景二(redis分布式锁实现)", description = "秒杀场景二(redis分布式锁实现)", method = "POST") @PostMapping("/redisson") public Result doWithRedissionLock(@RequestBody @Valid SeckillWebMockRequestDTO dto) { processSeckill(dto, REDISSION_LOCK); return Result.ok(); }
@Test void testDoWithRedissionLock() { SeckillWebMockRequestDTO requestDTO = new SeckillWebMockRequestDTO(); requestDTO.setSeckillId(1L); requestDTO.setRequestCount(1); SeckillMockRequestDTO any = new SeckillMockRequestDTO(); any.setSeckillId(1L); Result response = seckillMockController.doWithRedissionLock(requestDTO); verify(seckillService, times(0)).execute(any(SeckillMockRequestDTO.class), anyInt()); assertEquals(0, response.getCode()); }
@Override public boolean tryLock() { return get(tryLockAsync()); }
@Test public void testReentrancy() throws InterruptedException { Lock lock = redisson.getSpinLock("lock1"); Assertions.assertTrue(lock.tryLock()); Assertions.assertTrue(lock.tryLock()); lock.unlock(); // next row for test renew expiration tisk. //Thread.currentThread().sleep(TimeUnit.SECONDS.toMillis(RedissonLock.LOCK_EXPIRATION_INTERVAL_SECONDS*2)); Thread thread1 = new Thread() { @Override public void run() { RLock lock1 = redisson.getSpinLock("lock1"); Assertions.assertFalse(lock1.tryLock()); } }; thread1.start(); thread1.join(); lock.unlock(); }
static boolean isAllowedMultiplicationBasedOnSpec(final Object left, final Object right, final EvaluationContext ctx) { if (left instanceof TemporalAmount && right instanceof TemporalAmount) { ctx.notifyEvt(() -> new InvalidParametersEvent(FEELEvent.Severity.ERROR, Msg.createMessage(Msg.INVALID_PARAMETERS_FOR_OPERATION, "multiplication", left.getClass().getName(), right.getClass().getName()))); return false; } return true; }
@Test void isAllowedMultiplicationBasedOnSpecTest() { EvaluationContext evaluationContext = mock(EvaluationContext.class); Object left = 23; Object right = 354.5; assertThat(isAllowedMultiplicationBasedOnSpec(left, right, evaluationContext)).isTrue(); verify(evaluationContext, never()).notifyEvt(any()); right = Duration.of(5, DAYS); assertThat(isAllowedMultiplicationBasedOnSpec(left, right, evaluationContext)).isTrue(); verify(evaluationContext, never()).notifyEvt(any()); left = Duration.of(5, DAYS); right = 354.5; assertThat(isAllowedMultiplicationBasedOnSpec(left, right, evaluationContext)).isTrue(); verify(evaluationContext, never()).notifyEvt(any()); left = Duration.of(5, DAYS); right = Duration.of(5, DAYS); assertThat(isAllowedMultiplicationBasedOnSpec(left, right, evaluationContext)).isFalse(); verify(evaluationContext, times(1)).notifyEvt(any(Supplier.class)); }
public static Statement sanitize( final Statement node, final MetaStore metaStore) { return sanitize(node, metaStore, true); }
@Test public void shouldAllowNestedLambdaFunctionsWithoutDuplicate() { // Given: final Statement stmt = givenQuery( "SELECT TRANSFORM_ARRAY(Col4, (X,Y,Z) => TRANSFORM_MAP(Col4, Q => 4, H => 5), (X,Y,Z) => 0) FROM TEST1;"); // When: final Query result = (Query) AstSanitizer.sanitize(stmt, META_STORE); // Then: assertThat(result.getSelect(), is(new Select(ImmutableList.of( new SingleColumn( new FunctionCall( FunctionName.of("TRANSFORM_ARRAY"), ImmutableList.of( column(TEST1_NAME, "COL4"), new LambdaFunctionCall( ImmutableList.of("X", "Y", "Z"), new FunctionCall( FunctionName.of("TRANSFORM_MAP"), ImmutableList.of( column(TEST1_NAME, "COL4"), new LambdaFunctionCall( ImmutableList.of("Q"), new IntegerLiteral(4) ), new LambdaFunctionCall( ImmutableList.of("H"), new IntegerLiteral(5) ) ) ) ), new LambdaFunctionCall( ImmutableList.of("X", "Y", "Z"), new IntegerLiteral(0) ) ) ), Optional.of(ColumnName.of("KSQL_COL_0"))) )))); }
public void evaluate(List<AuthorizationContext> contexts) { if (CollectionUtils.isEmpty(contexts)) { return; } contexts.forEach(this.authorizationStrategy::evaluate); }
@Test public void evaluate1() { if (MixAll.isMac()) { return; } User user = User.of("test", "test"); this.authenticationMetadataManager.createUser(user).join(); Acl acl = AuthTestHelper.buildAcl("User:test", "Topic:test*", "Pub", "192.168.0.0/24", Decision.ALLOW); this.authorizationMetadataManager.createAcl(acl).join(); Subject subject = Subject.of("User:test"); Resource resource = Resource.ofTopic("test"); Action action = Action.PUB; String sourceIp = "192.168.0.1"; DefaultAuthorizationContext context = DefaultAuthorizationContext.of(subject, resource, action, sourceIp); context.setRpcCode("10"); this.evaluator.evaluate(Collections.singletonList(context)); // acl sourceIp is null acl = AuthTestHelper.buildAcl("User:test", "Topic:test*", "Pub", null, Decision.ALLOW); this.authorizationMetadataManager.updateAcl(acl).join(); subject = Subject.of("User:test"); resource = Resource.ofTopic("test"); action = Action.PUB; sourceIp = "192.168.0.1"; context = DefaultAuthorizationContext.of(subject, resource, action, sourceIp); context.setRpcCode("10"); this.evaluator.evaluate(Collections.singletonList(context)); }
@Override public void waitFor( final KsqlEntityList previousCommands, final Class<? extends Statement> statementClass) { if (mustSync.test(statementClass)) { final ArrayList<KsqlEntity> reversed = new ArrayList<>(previousCommands); Collections.reverse(reversed); reversed.stream() .filter(e -> e instanceof CommandStatusEntity) .map(CommandStatusEntity.class::cast) .map(CommandStatusEntity::getCommandSequenceNumber) .findFirst() .ifPresent(seqNum -> { try { commandQueue.ensureConsumedPast(seqNum, timeout); } catch (final InterruptedException e) { throw new KsqlRestException(Errors.serverShuttingDown()); } catch (final TimeoutException e) { throw new KsqlRestException(Errors.commandQueueCatchUpTimeout(seqNum)); } }); } }
@Test public void shouldNotWaitForNonCommandStatusEntity() throws Exception { // Given: givenSyncWithPredicate(clazz -> true); givenEntities(entity1); // When: commandQueueSync.waitFor(entities, CreateStreamAsSelect.class); // Then: verify(commandQueue, never()).ensureConsumedPast(anyLong(), any()); }
protected Timestamp convertBigNumberToTimestamp( BigDecimal bd ) { if ( bd == null ) { return null; } return convertIntegerToTimestamp( bd.longValue() ); }
@Test public void testConvertBigNumberToTimestamp_Milliseconds() throws KettleValueException { System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE, Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE_MILLISECONDS ); ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp(); Timestamp result = valueMetaTimestamp.convertBigNumberToTimestamp( BigDecimal.valueOf( TIMESTAMP_AS_MILLISECONDS ) ); assertEquals( TIMESTAMP_WITH_MILLISECONDS, result ); }
public Map<String, Object> populateAnomalyDetails() { // Goal violation has one more field than other anomaly types. Map<String, Object> anomalyDetails = new HashMap<>(); anomalyDetails.put(_isJson ? DETECTION_MS : DETECTION_DATE, _isJson ? _anomalyState.detectionMs() : utcDateFor(_anomalyState.detectionMs())); anomalyDetails.put(STATUS, _anomalyState.status()); anomalyDetails.put(ANOMALY_ID, _anomalyState.anomalyId()); anomalyDetails.put(_isJson ? STATUS_UPDATE_MS : STATUS_UPDATE_DATE, _isJson ? _anomalyState.statusUpdateMs() : utcDateFor(_anomalyState.statusUpdateMs())); switch ((KafkaAnomalyType) _anomalyType) { case GOAL_VIOLATION: GoalViolations goalViolations = (GoalViolations) _anomalyState.anomaly(); Map<Boolean, List<String>> violatedGoalsByFixability = goalViolations.violatedGoalsByFixability(); anomalyDetails.put(FIXABLE_VIOLATED_GOALS, violatedGoalsByFixability.getOrDefault(true, Collections.emptyList())); anomalyDetails.put(UNFIXABLE_VIOLATED_GOALS, violatedGoalsByFixability.getOrDefault(false, Collections.emptyList())); if (_hasFixStarted) { anomalyDetails.put(OPTIMIZATION_RESULT, goalViolations.optimizationResult(_isJson)); } break; case BROKER_FAILURE: BrokerFailures brokerFailures = (BrokerFailures) _anomalyState.anomaly(); anomalyDetails.put(FAILED_BROKERS_BY_TIME_MS, brokerFailures.failedBrokers()); if (_hasFixStarted) { anomalyDetails.put(OPTIMIZATION_RESULT, brokerFailures.optimizationResult(_isJson)); } break; case DISK_FAILURE: DiskFailures diskFailures = (DiskFailures) _anomalyState.anomaly(); anomalyDetails.put(FAILED_DISKS_BY_TIME_MS, diskFailures.failedDisks()); if (_hasFixStarted) { anomalyDetails.put(OPTIMIZATION_RESULT, diskFailures.optimizationResult(_isJson)); } break; case METRIC_ANOMALY: KafkaMetricAnomaly metricAnomaly = (KafkaMetricAnomaly) _anomalyState.anomaly(); anomalyDetails.put(DESCRIPTION, metricAnomaly.description()); if (_hasFixStarted) { anomalyDetails.put(OPTIMIZATION_RESULT, metricAnomaly.optimizationResult(_isJson)); } break; case TOPIC_ANOMALY: TopicAnomaly topicAnomaly = (TopicAnomaly) _anomalyState.anomaly(); anomalyDetails.put(DESCRIPTION, topicAnomaly.toString()); if (_hasFixStarted) { anomalyDetails.put(OPTIMIZATION_RESULT, topicAnomaly.optimizationResult(_isJson)); } break; case MAINTENANCE_EVENT: MaintenanceEvent maintenanceEvent = (MaintenanceEvent) _anomalyState.anomaly(); anomalyDetails.put(DESCRIPTION, maintenanceEvent.toString()); if (_hasFixStarted) { anomalyDetails.put(OPTIMIZATION_RESULT, maintenanceEvent.optimizationResult(_isJson)); } break; default: throw new IllegalStateException("Unrecognized anomaly type " + _anomalyType); } return anomalyDetails; }
@Test public void testPopulateAnomalyDetails() { AnomalyState mockAnomalyState = EasyMock.mock(AnomalyState.class); for (KafkaAnomalyType anomalyType : KafkaAnomalyType.cachedValues()) { AnomalyDetails details = new AnomalyDetails(mockAnomalyState, anomalyType, false, false); EasyMock.expect(mockAnomalyState.detectionMs()).andReturn(MOCK_DETECTION_MS).once(); EasyMock.expect(mockAnomalyState.status()).andReturn(DETECTED).once(); EasyMock.expect(mockAnomalyState.anomalyId()).andReturn(MOCK_ANOMALY_ID).once(); EasyMock.expect(mockAnomalyState.statusUpdateMs()).andReturn(MOCK_STATUS_UPDATE_MS).once(); switch (anomalyType) { case GOAL_VIOLATION: GoalViolations goalViolations = EasyMock.mock(GoalViolations.class); EasyMock.expect(goalViolations.violatedGoalsByFixability()).andReturn(VIOLATED_GOALS_BY_FIXABILITY).once(); replayAndVerify(mockAnomalyState, details, goalViolations); break; case BROKER_FAILURE: BrokerFailures brokerFailures = EasyMock.mock(BrokerFailures.class); EasyMock.expect(brokerFailures.failedBrokers()).andReturn(FAILED_BROKERS).once(); replayAndVerify(mockAnomalyState, details, brokerFailures); break; case DISK_FAILURE: DiskFailures diskFailures = EasyMock.mock(DiskFailures.class); EasyMock.expect(diskFailures.failedDisks()).andReturn(FAILED_DISKS).once(); replayAndVerify(mockAnomalyState, details, diskFailures); break; case METRIC_ANOMALY: KafkaMetricAnomaly kafkaMetricAnomaly = EasyMock.mock(KafkaMetricAnomaly.class); EasyMock.expect(kafkaMetricAnomaly.description()).andReturn(MOCK_DESCRIPTION).once(); replayAndVerify(mockAnomalyState, details, kafkaMetricAnomaly); break; case TOPIC_ANOMALY: TopicAnomaly topicAnomaly = EasyMock.mock(TopicAnomaly.class); // Note: EasyMock provides a built-in behavior for equals(), toString(), hashCode() and finalize() even for class mocking. // Hence, we cannot record our own behavior for topicAnomaly.toString(). replayAndVerify(mockAnomalyState, details, topicAnomaly); break; case MAINTENANCE_EVENT: MaintenanceEvent maintenanceEvent = EasyMock.mock(MaintenanceEvent.class); // Note: EasyMock provides a built-in behavior for equals(), toString(), hashCode() and finalize() even for class mocking. // Hence, we cannot record our own behavior for maintenanceEvent.toString(). replayAndVerify(mockAnomalyState, details, maintenanceEvent); break; default: throw new IllegalStateException("Unrecognized anomaly type " + anomalyType); } } }
@Override public Optional<IndexSet> get(final String indexSetId) { return this.indexSetsCache.get() .stream() .filter(indexSet -> Objects.equals(indexSet.id(), indexSetId)) .map(indexSetConfig -> (IndexSet) mongoIndexSetFactory.create(indexSetConfig)) .findFirst(); }
@Test public void indexSetsCacheShouldReturnNewListAfterInvalidate() { final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class); final List<IndexSetConfig> indexSetConfigs = Collections.singletonList(indexSetConfig); when(indexSetService.findAll()).thenReturn(indexSetConfigs); final List<IndexSetConfig> result = this.indexSetsCache.get(); assertThat(result) .isNotNull() .hasSize(1) .containsExactly(indexSetConfig); this.indexSetsCache.invalidate(); final IndexSetConfig newIndexSetConfig = mock(IndexSetConfig.class); final List<IndexSetConfig> newIndexSetConfigs = Collections.singletonList(newIndexSetConfig); when(indexSetService.findAll()).thenReturn(newIndexSetConfigs); final List<IndexSetConfig> newResult = this.indexSetsCache.get(); assertThat(newResult) .isNotNull() .hasSize(1) .containsExactly(newIndexSetConfig); verify(indexSetService, times(2)).findAll(); }
public void go(PrintStream out) { KieServices ks = KieServices.Factory.get(); KieRepository kr = ks.getRepository(); Resource ex1Res = ks.getResources().newFileSystemResource(getFile("kiebase-inclusion")); Resource ex2Res = ks.getResources().newFileSystemResource(getFile("named-kiesession")); KieModule kModule = kr.addKieModule(ex1Res, ex2Res); KieContainer kContainer = ks.newKieContainer(kModule.getReleaseId()); KieSession kSession = kContainer.newKieSession("ksession2"); kSession.setGlobal("out", out); Object msg1 = createMessage(kContainer, "Dave", "Hello, HAL. Do you read me, HAL?"); kSession.insert(msg1); kSession.fireAllRules(); Object msg2 = createMessage(kContainer, "Dave", "Open the pod bay doors, HAL."); kSession.insert(msg2); kSession.fireAllRules(); }
@Test public void testGo() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); new KieModuleFromMultipleFilesExample().go(ps); ps.close(); String actual = baos.toString(); String expected = "" + "Dave: Hello, HAL. Do you read me, HAL?" + NL + "HAL: Dave. I read you." + NL + "Dave: Open the pod bay doors, HAL." + NL + "HAL: I'm sorry, Dave. I'm afraid I can't do that." + NL; assertEquals(expected, actual); }
public static ConfigDefinition createConfigDefinition(CNode root) { ConfigDefinition def = new ConfigDefinition(root.getName(), root.getNamespace()); for (CNode node : root.getChildren()) { addNode(def, node); } return def; }
@Test // TODO Test ranges public void testCreateConfigDefinition() throws IOException { File defFile = new File(DEF_NAME); DefParser defParser = new DefParser(defFile.getName(), new FileReader(defFile)); CNode root = defParser.getTree(); ConfigDefinition def = ConfigDefinitionBuilder.createConfigDefinition(root); assertNotNull(def); assertThat(def.getBoolDefs().size(), is(2)); assertNull(def.getBoolDefs().get("bool_val").getDefVal()); assertThat(def.getBoolDefs().get("bool_with_def").getDefVal(), is(false)); assertThat(def.getIntDefs().size(), is(2)); assertNull(def.getIntDefs().get("int_val").getDefVal()); assertThat(def.getIntDefs().get("int_with_def").getDefVal(), is(-545)); assertThat(def.getLongDefs().size(), is(2)); assertNull(def.getLongDefs().get("long_val").getDefVal()); assertThat(def.getLongDefs().get("long_with_def").getDefVal(), is(-50000000000L)); assertThat(def.getDoubleDefs().size(), is(2)); assertNull(def.getDoubleDefs().get("double_val").getDefVal()); assertThat(def.getDoubleDefs().get("double_with_def").getDefVal(), is(-6.43)); assertThat(def.getEnumDefs().size(), is(3)); assertTrue(def.getEnumDefs().containsKey("enum_val")); assertThat(def.getEnumDefs().get("enum_val").getVals().size(), is(3)); assertThat(def.getEnumDefs().get("enum_val").getVals().get(0), is("FOO")); assertThat(def.getEnumDefs().get("enum_val").getVals().get(1), is("BAR")); assertThat(def.getEnumDefs().get("enum_val").getVals().get(2), is("FOOBAR")); assertTrue(def.getEnumDefs().containsKey("enumwithdef")); assertThat(def.getEnumDefs().get("enumwithdef").getDefVal(), is("BAR2")); assertTrue(def.getEnumDefs().containsKey("onechoice")); assertThat(def.getEnumDefs().get("onechoice").getDefVal(), is("ONLYFOO")); assertThat(def.getStringDefs().size(), is(2)); assertNull(def.getStringDefs().get("string_val").getDefVal()); // The return value is a String, so null if no default value assertThat(def.getStringDefs().get("stringwithdef").getDefVal(), is("foobar")); assertThat(def.getReferenceDefs().size(), is(2)); assertNotNull(def.getReferenceDefs().get("refval")); assertThat(def.getReferenceDefs().get("refwithdef").getDefVal(), is(":parent:")); assertThat(def.getFileDefs().size(), is(1)); assertNotNull(def.getFileDefs().get("fileVal")); assertThat(def.getPathDefs().size(), is(1)); assertNotNull(def.getPathDefs().get("pathVal")); assertThat(def.getOptionalPathDefs().size(), is(1)); assertNotNull(def.getOptionalPathDefs().get("optionalPathVal")); // An array does not have to have any elements set assertThat(def.getArrayDefs().size(), is(10)); assertNotNull(def.getArrayDefs().get("boolarr")); assertThat(def.getArrayDefs().get("boolarr").getTypeSpec().getType(), is("bool")); assertNotNull(def.getArrayDefs().get("boolarrEmpty")); assertThat(def.getArrayDefs().get("boolarrEmpty").getTypeSpec().getType(), is("bool")); assertNotNull(def.getArrayDefs().get("enumarr")); assertThat(def.getArrayDefs().get("enumarr").getTypeSpec().getType(), is("enum")); assertThat(def.getArrayDefs().get("enumarr").getTypeSpec().getEnumVals().toString(), is("[ARRAY, VALUES]")); assertNotNull(def.getArrayDefs().get("refarr")); assertThat(def.getArrayDefs().get("refarr").getTypeSpec().getType(), is("reference")); assertNotNull(def.getArrayDefs().get("fileArr")); assertThat(def.getArrayDefs().get("fileArr").getTypeSpec().getType(), is("file")); assertThat(def.getStructDefs().size(), is(2)); assertNotNull(def.getStructDefs().get("basicStruct")); assertThat(def.getStructDefs().get("basicStruct").getStringDefs().size(), is(1)); assertThat(def.getStructDefs().get("basicStruct").getStringDefs().get("foo").getDefVal(), is("basic")); assertThat(def.getStructDefs().get("basicStruct").getIntDefs().size(), is(1)); assertNull(def.getStructDefs().get("basicStruct").getIntDefs().get("bar").getDefVal()); assertThat(def.getStructDefs().get("basicStruct").getArrayDefs().size(), is(1)); assertThat(def.getStructDefs().get("basicStruct").getArrayDefs().get("intArr").getTypeSpec().getType(), is("int")); assertNotNull(def.getStructDefs().get("rootStruct")); assertNotNull(def.getStructDefs().get("rootStruct").getStructDefs().get("inner0")); assertNotNull(def.getStructDefs().get("rootStruct").getStructDefs().get("inner1")); assertThat(def.getStructDefs().get("rootStruct").getInnerArrayDefs().size(), is(1)); assertNotNull(def.getStructDefs().get("rootStruct").getInnerArrayDefs().get("innerArr")); assertThat(def.getStructDefs().get("rootStruct").getInnerArrayDefs().get("innerArr").getStringDefs().size(), is(1)); assertThat(def.getInnerArrayDefs().size(), is(1)); assertNotNull(def.getInnerArrayDefs().get("myarray")); assertThat(def.getInnerArrayDefs().get("myarray").getIntDefs().get("intval").getDefVal(), is(14)); assertThat(def.getInnerArrayDefs().get("myarray").getArrayDefs().size(), is(1)); assertNotNull(def.getInnerArrayDefs().get("myarray").getArrayDefs().get("stringval")); assertThat(def.getInnerArrayDefs().get("myarray").getArrayDefs().get("stringval").getTypeSpec().getType(), is("string")); assertThat(def.getInnerArrayDefs().get("myarray").getEnumDefs().get("enumval").getDefVal(), is("TYPE")); assertNull(def.getInnerArrayDefs().get("myarray").getReferenceDefs().get("refval").getDefVal()); assertThat(def.getInnerArrayDefs().get("myarray").getInnerArrayDefs().size(), is(1)); assertThat(def.getInnerArrayDefs().get("myarray").getInnerArrayDefs().get("anotherarray").getIntDefs().get("foo").getDefVal(), is(-4)); assertNull(def.getInnerArrayDefs().get("myarray").getStructDefs().get("myStruct").getIntDefs().get("a").getDefVal()); assertThat(def.getInnerArrayDefs().get("myarray").getStructDefs().get("myStruct").getIntDefs().get("b").getDefVal(), is(2)); // Maps assertEquals(def.getLeafMapDefs().size(), 4); assertEquals(def.getLeafMapDefs().get("intMap").getTypeSpec().getType(), "int"); assertEquals(def.getLeafMapDefs().get("stringMap").getTypeSpec().getType(), "string"); assertEquals(def.getStructMapDefs().size(), 1); assertNull(def.getStructMapDefs().get("myStructMap").getIntDefs().get("myInt").getDefVal()); assertNull(def.getStructMapDefs().get("myStructMap").getStringDefs().get("myString").getDefVal()); assertEquals(def.getStructMapDefs().get("myStructMap").getIntDefs().get("myIntDef").getDefVal(), (Integer)56); assertEquals(def.getStructMapDefs().get("myStructMap").getStringDefs().get("myStringDef").getDefVal(), "g"); // Ranges }