focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public List<Intent> compile(PointToPointIntent intent, List<Intent> installable) { log.trace("compiling {} {}", intent, installable); ConnectPoint ingressPoint = intent.filteredIngressPoint().connectPoint(); ConnectPoint egressPoint = intent.filteredEgressPoint().connectPoint(); //TODO: handle protected path case with suggested path!! //Idea: use suggested path as primary and another path from path service as protection if (intent.suggestedPath() != null && intent.suggestedPath().size() > 0) { Path path = new DefaultPath(PID, intent.suggestedPath(), new ScalarWeight(1)); //Check intent constraints against suggested path and suggested path availability if (checkPath(path, intent.constraints()) && pathAvailable(intent)) { allocateIntentBandwidth(intent, path); return asList(createLinkCollectionIntent(ImmutableSet.copyOf(intent.suggestedPath()), DEFAULT_COST, intent)); } } if (ingressPoint.deviceId().equals(egressPoint.deviceId())) { return createZeroHopLinkCollectionIntent(intent); } // proceed with no protected paths if (!ProtectionConstraint.requireProtectedPath(intent)) { return createUnprotectedLinkCollectionIntent(intent); } try { // attempt to compute and implement backup path return createProtectedIntent(ingressPoint, egressPoint, intent, installable); } catch (PathNotFoundException e) { log.warn("Could not find disjoint Path for {}", intent); // no disjoint path extant -- maximum one path exists between devices return createSinglePathIntent(ingressPoint, egressPoint, intent, installable); } }
@Test public void testRGBandwidthConstrainedIntentAllocation() { final double bpsTotal = 1000.0; ResourceGroup resourceGroup = ResourceGroup.of(100); String[] hops = {S1, S2, S3}; final ResourceService resourceService = MockResourceService.makeCustomBandwidthResourceService(bpsTotal); final List<Constraint> constraints = Collections.singletonList(new BandwidthConstraint(Bandwidth.bps(BPS_TO_RESERVE))); final PointToPointIntent intent = makeIntent(new ConnectPoint(DID_1, PORT_1), new ConnectPoint(DID_3, PORT_2), constraints, resourceGroup); PointToPointIntentCompiler compiler = makeCompiler(hops, resourceService); compiler.compile(intent, null); ResourceAllocation rAOne = new ResourceAllocation(RESOURCE_SW1_P1, resourceGroup); ResourceAllocation rATwo = new ResourceAllocation(RESOURCE_SW1_P2, resourceGroup); ResourceAllocation rAThree = new ResourceAllocation(RESOURCE_SW2_P1, resourceGroup); ResourceAllocation rAFour = new ResourceAllocation(RESOURCE_SW2_P2, resourceGroup); ResourceAllocation rAFive = new ResourceAllocation(RESOURCE_SW3_P1, resourceGroup); ResourceAllocation rASix = new ResourceAllocation(RESOURCE_SW3_P2, resourceGroup); Set<ResourceAllocation> expectedresourceAllocations = ImmutableSet.of(rAOne, rATwo, rAThree, rAFour, rAFive, rASix); Set<ResourceAllocation> resourceAllocations = ImmutableSet.copyOf(resourceService.getResourceAllocations(resourceGroup)); assertThat(resourceAllocations, hasSize(6)); assertEquals(expectedresourceAllocations, resourceAllocations); }
@Override @ActivityLog(event = OWNERSHIP_CREATED) @ReactiveTransactional public Mono<Ownership> create(@ActivityParameter(OwnershipCreate.DATA_ENTITY_ID) final long dataEntityId, final OwnershipFormData formData) { return Mono.zip(ownerService.getOrCreate(formData.getOwnerName()), titleService.getOrCreate(formData.getTitleName())) .map(function((owner, title) -> Tuples.of(owner, title, new OwnershipPojo() .setDataEntityId(dataEntityId) .setOwnerId(owner.getId()) .setTitleId(title.getId()) ))) .flatMap(function((owner, title, ownership) -> ownershipRepository.create(ownership) .map(pojo -> Tuples.of(owner, title, pojo)))) .flatMap(function((owner, title, ownership) -> { if (Boolean.TRUE.equals(formData.getPropagate())) { return propagateIfDEG(ownership, OwnershipPropagateAction.CREATE) .then(Mono.just(Tuples.of(owner, title, ownership))); } return Mono.just(Tuples.of(owner, title, ownership)); })) .flatMap(function((owner, title, ownership) -> searchEntrypointRepository .updateChangedOwnershipVectors(ownership.getId()) .thenReturn(new OwnershipDto(ownership, owner, title)))) .flatMap( ownershipDto -> dataEntityFilledService.markEntityFilled(dataEntityId, OWNERS).thenReturn(ownershipDto)) .map(ownershipMapper::mapDto); }
@Test @DisplayName("Creates ownership, expecting successfully created") void testCreateOwnership() { final String testOwnerName = UUID.randomUUID().toString(); final String testTitleName = UUID.randomUUID().toString(); final long testOwnerId = 2L; final long testTitleId = 3L; final long testOwnershipId = 15L; final OwnershipFormData testOwnershipFromData = new OwnershipFormData() .ownerName(testOwnerName) .titleName(testTitleName); final OwnerPojo owner = createTestOwner(testOwnerId, testOwnerName); final TitlePojo title = createTestTitle(testTitleId, testTitleName); final OwnershipPojo ownershipPojo = createTestOwnershipPojo(testOwnershipId, owner, title); final Ownership ownership = createTestOwnership(testOwnershipId, owner, title); when(ownerService.getOrCreate(anyString())).thenReturn(Mono.just(owner)); when(titleService.getOrCreate(anyString())).thenReturn(Mono.just(title)); when(ownershipRepository.create(any(OwnershipPojo.class))).thenReturn(Mono.just(ownershipPojo)); when(searchEntrypointRepository.updateChangedOwnershipVectors(anyLong())).thenReturn(Mono.just(1)); when(ownershipMapper.mapDto(any(OwnershipDto.class))).thenReturn(ownership); when(dataEntityFilledService.markEntityFilled(anyLong(), any())) .thenReturn(Mono.just(new DataEntityFilledPojo())); final Mono<Ownership> actualOwnershipMono = ownershipService.create(1L, testOwnershipFromData); StepVerifier .create(actualOwnershipMono) .assertNext(o -> { assertThat(o.getId()).isEqualTo(testOwnershipId); assertThat(o.getOwner().getId()).isEqualTo(testOwnerId); assertThat(o.getOwner().getName()).isEqualTo(testOwnerName); assertThat(o.getTitle().getId()).isEqualTo(testTitleId); assertThat(o.getTitle().getName()).isEqualTo(testTitleName); }) .verifyComplete(); verify(ownerService, only()).getOrCreate(any(String.class)); verify(titleService, only()).getOrCreate(any(String.class)); verify(ownershipRepository, times(1)).create(any(OwnershipPojo.class)); verify(searchEntrypointRepository, times(1)) .updateChangedOwnershipVectors(testOwnershipId); verify(ownershipMapper, only()).mapDto(any(OwnershipDto.class)); }
@Override public DescriptiveUrlBag toUrl(final Path file) { final DescriptiveUrlBag list = new DescriptiveUrlBag(); for(String scheme : host.getProtocol().getSchemes()) { if(Arrays.stream(Scheme.values()).noneMatch(s -> s.name().equals(scheme))) { list.add(new DescriptiveUrl(URI.create(String.format("%s://%s%s", scheme, new PunycodeConverter().convert(host.getHostname()), PathNormalizer.normalize(file.getAbsolute()))), DescriptiveUrl.Type.provider, MessageFormat.format(LocaleFactory.localizedString("{0} URL"), StringUtils.capitalize(scheme)))); } } return list; }
@Test public void testHelp() { Host host = new Host(new TestProtocol() { public String[] getSchemes() { return new String[]{"s"}; } }, "localhost"); Path path = new Path("/file", EnumSet.of(Path.Type.file)); final DescriptiveUrlBag list = new CustomSchemeUrlProvider(host).toUrl(path).filter(DescriptiveUrl.Type.provider); assertEquals(1, list.size()); assertEquals("S URL", list.find(DescriptiveUrl.Type.provider).getHelp()); }
protected void declareConstraintsXor(final List<KiePMMLFieldOperatorValue> xorConstraints) { if (xorConstraints.size() != 2) { throw new KiePMMLException("Expecting two fields for XOR constraints, retrieved " + xorConstraints.size()); } final String[] keys = new String[xorConstraints.size()]; final String[] values = new String[xorConstraints.size()]; for (int i = 0; i < keys.length; i++) { keys[i] = xorConstraints.get(i).getName(); values[i] = xorConstraints.get(i).getConstraintsAsString(); } final CEDescrBuilder<CEDescrBuilder<RuleDescrBuilder, AndDescr>, AndDescr> andBuilder = builder.and(); final CEDescrBuilder<CEDescrBuilder<CEDescrBuilder<CEDescrBuilder<RuleDescrBuilder, AndDescr>, AndDescr>, NotDescr>, AndDescr> notBuilder = andBuilder.not().and(); commonDeclarePatternWithConstraint(notBuilder, keys[0], values[0]); commonDeclarePatternWithConstraint(notBuilder, keys[1], values[1]); final CEDescrBuilder<CEDescrBuilder<CEDescrBuilder<CEDescrBuilder<RuleDescrBuilder, AndDescr>, AndDescr>, ExistsDescr>, OrDescr> existsBuilder = andBuilder.exists().or(); commonDeclarePatternWithConstraint(existsBuilder, keys[0], values[0]); commonDeclarePatternWithConstraint(existsBuilder.or(), keys[1], values[1]); }
@Test void declareConstraintsXor() { String temperatureField = "TEMPERATURE"; final List<KiePMMLFieldOperatorValue> xorConstraints = Arrays .asList(new KiePMMLFieldOperatorValue(temperatureField, BOOLEAN_OPERATOR.OR, Collections.singletonList(new KiePMMLOperatorValue(OPERATOR.LESS_THAN, 35)), null), new KiePMMLFieldOperatorValue(temperatureField, BOOLEAN_OPERATOR.OR, Collections.singletonList(new KiePMMLOperatorValue(OPERATOR.GREATER_THAN, 85)), null)); KiePMMLDescrLhsFactory.factory(lhsBuilder).declareConstraintsXor(xorConstraints); assertThat(lhsBuilder.getDescr()).isNotNull(); assertThat(lhsBuilder.getDescr().getDescrs()).isNotNull(); assertThat(lhsBuilder.getDescr().getDescrs()).hasSize(1); assertThat(lhsBuilder.getDescr().getDescrs().get(0)).isInstanceOf(AndDescr.class); AndDescr rootAndDescr = (AndDescr) lhsBuilder.getDescr().getDescrs().get(0); assertThat(rootAndDescr.getDescrs()).hasSize(2); assertThat(rootAndDescr.getDescrs().get(0)).isInstanceOf(NotDescr.class); assertThat(rootAndDescr.getDescrs().get(1)).isInstanceOf(ExistsDescr.class); // "Not" construct NotDescr notDescr = (NotDescr) rootAndDescr.getDescrs().get(0); assertThat(notDescr.getDescrs()).hasSize(1); assertThat(notDescr.getDescrs().get(0)).isInstanceOf(AndDescr.class); AndDescr notAndDescr = (AndDescr) notDescr.getDescrs().get(0); assertThat(notAndDescr.getDescrs().get(0)).isInstanceOf(PatternDescr.class); assertThat(notAndDescr.getDescrs().get(1)).isInstanceOf(PatternDescr.class); PatternDescr patternDescr = (PatternDescr) notAndDescr.getDescrs().get(0); assertThat(patternDescr.getObjectType()).isEqualTo(temperatureField); assertThat(patternDescr.getIdentifier()).isNull(); assertThat(patternDescr.getConstraint()).isInstanceOf(AndDescr.class); AndDescr andDescr = (AndDescr) patternDescr.getConstraint(); assertThat(andDescr.getDescrs()).hasSize(1); assertThat(andDescr.getDescrs().get(0)).isInstanceOf(ExprConstraintDescr.class); ExprConstraintDescr exprConstraintDescr = (ExprConstraintDescr) andDescr.getDescrs().get(0); assertThat(exprConstraintDescr.isNegated()).isFalse(); assertThat(exprConstraintDescr.getType()).isEqualTo(ExprConstraintDescr.Type.NAMED); String expected = "value < 35"; assertThat(exprConstraintDescr.getExpression()).isEqualTo(expected); patternDescr = (PatternDescr) notAndDescr.getDescrs().get(1); assertThat(patternDescr.getObjectType()).isEqualTo(temperatureField); assertThat(patternDescr.getIdentifier()).isNull(); assertThat(patternDescr.getConstraint()).isInstanceOf(AndDescr.class); andDescr = (AndDescr) patternDescr.getConstraint(); assertThat(andDescr.getDescrs()).hasSize(1); assertThat(andDescr.getDescrs().get(0)).isInstanceOf(ExprConstraintDescr.class); exprConstraintDescr = (ExprConstraintDescr) andDescr.getDescrs().get(0); assertThat(exprConstraintDescr.isNegated()).isFalse(); assertThat(exprConstraintDescr.getType()).isEqualTo(ExprConstraintDescr.Type.NAMED); expected = "value > 85"; assertThat(exprConstraintDescr.getExpression()).isEqualTo(expected); // "Exists" construct ExistsDescr existsDescr = (ExistsDescr) rootAndDescr.getDescrs().get(1); assertThat(existsDescr.getDescrs()).hasSize(1); assertThat(existsDescr.getDescrs().get(0)).isInstanceOf(OrDescr.class); OrDescr existsOrDescr = (OrDescr) existsDescr.getDescrs().get(0); assertThat(existsOrDescr.getDescrs()).hasSize(2); assertThat(existsOrDescr.getDescrs().get(0)).isInstanceOf(PatternDescr.class); assertThat(existsOrDescr.getDescrs().get(1)).isInstanceOf(OrDescr.class); patternDescr = (PatternDescr) existsOrDescr.getDescrs().get(0); assertThat(patternDescr.getObjectType()).isEqualTo(temperatureField); assertThat(patternDescr.getIdentifier()).isNull(); assertThat(patternDescr.getConstraint()).isInstanceOf(AndDescr.class); andDescr = (AndDescr) patternDescr.getConstraint(); assertThat(andDescr.getDescrs()).hasSize(1); assertThat(andDescr.getDescrs().get(0)).isInstanceOf(ExprConstraintDescr.class); exprConstraintDescr = (ExprConstraintDescr) andDescr.getDescrs().get(0); assertThat(exprConstraintDescr.isNegated()).isFalse(); assertThat(exprConstraintDescr.getType()).isEqualTo(ExprConstraintDescr.Type.NAMED); expected = "value < 35"; assertThat(exprConstraintDescr.getExpression()).isEqualTo(expected); OrDescr nestedOrDescr = (OrDescr) existsOrDescr.getDescrs().get(1); assertThat(nestedOrDescr.getDescrs()).hasSize(1); assertThat(nestedOrDescr.getDescrs().get(0)).isInstanceOf(PatternDescr.class); patternDescr = (PatternDescr) nestedOrDescr.getDescrs().get(0); assertThat(patternDescr.getObjectType()).isEqualTo(temperatureField); assertThat(patternDescr.getIdentifier()).isNull(); assertThat(patternDescr.getConstraint()).isInstanceOf(AndDescr.class); andDescr = (AndDescr) patternDescr.getConstraint(); assertThat(andDescr.getDescrs()).hasSize(1); assertThat(andDescr.getDescrs().get(0)).isInstanceOf(ExprConstraintDescr.class); exprConstraintDescr = (ExprConstraintDescr) andDescr.getDescrs().get(0); assertThat(exprConstraintDescr.isNegated()).isFalse(); assertThat(exprConstraintDescr.getType()).isEqualTo(ExprConstraintDescr.Type.NAMED); expected = "value > 85"; assertThat(exprConstraintDescr.getExpression()).isEqualTo(expected); }
Mono<NotificationElement> prepareNotificationElement(Subscriber subscriber, Reason reason, NotifierDescriptor descriptor) { return getLocaleFromSubscriber(subscriber) .flatMap(locale -> inferenceTemplate(reason, subscriber, locale)) .map(notificationContent -> NotificationElement.builder() .descriptor(descriptor) .reason(reason) .subscriber(subscriber) .reasonType(notificationContent.reasonType()) .notificationTitle(notificationContent.title()) .reasonAttributes(notificationContent.reasonAttributes()) .notificationRawBody(defaultString(notificationContent.rawBody())) .notificationHtmlBody(defaultString(notificationContent.htmlBody())) .build() ); }
@Test public void testPrepareNotificationElement() { var spyNotificationCenter = spy(notificationCenter); doReturn(Mono.just(Locale.getDefault())) .when(spyNotificationCenter).getLocaleFromSubscriber(any()); var notificationContent = mock(DefaultNotificationCenter.NotificationContent.class); doReturn(Mono.just(notificationContent)) .when(spyNotificationCenter).inferenceTemplate(any(), any(), any()); spyNotificationCenter.prepareNotificationElement(any(), any(), any()) .block(); verify(spyNotificationCenter).getLocaleFromSubscriber(any()); verify(spyNotificationCenter).inferenceTemplate(any(), any(), any()); }
static String generateIndexName(String baseString) { return generateResourceId( baseString, ILLEGAL_INDEX_NAME_CHARS, REPLACE_INDEX_NAME_CHAR, MAX_INDEX_NAME_LENGTH, TIME_FORMAT); }
@Test public void testGenerateIndexNameShouldReplaceDoubleQuotes() { String testBaseString = "Test\"DB\"Name"; String actual = generateIndexName(testBaseString); assertThat(actual).matches("test-db-name-\\d{8}-\\d{6}-\\d{6}"); }
@Override public BasicFileAttributes readAttributes(File file) { return new Attributes(file); }
@Test public void testAttributes() { BasicFileAttributes attrs = provider.readAttributes(file); assertThat(attrs.fileKey()).isEqualTo(0); assertThat(attrs.isDirectory()).isTrue(); assertThat(attrs.isRegularFile()).isFalse(); assertThat(attrs.creationTime()).isNotNull(); }
public int addWritePermOfBroker(final String nameSrvAddr, String brokerName, final long timeoutMillis) throws RemotingCommandException, RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQClientException { AddWritePermOfBrokerRequestHeader requestHeader = new AddWritePermOfBrokerRequestHeader(); requestHeader.setBrokerName(brokerName); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.ADD_WRITE_PERM_OF_BROKER, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(nameSrvAddr, request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { AddWritePermOfBrokerResponseHeader responseHeader = (AddWritePermOfBrokerResponseHeader) response.decodeCommandCustomHeader(AddWritePermOfBrokerResponseHeader.class); return responseHeader.getAddTopicCount(); } default: break; } throw new MQClientException(response.getCode(), response.getRemark()); }
@Test public void testAddWritePermOfBroker() throws Exception { doAnswer(invocationOnMock -> { RemotingCommand request = invocationOnMock.getArgument(1); if (request.getCode() != RequestCode.ADD_WRITE_PERM_OF_BROKER) { return null; } RemotingCommand response = RemotingCommand.createResponseCommand(AddWritePermOfBrokerResponseHeader.class); AddWritePermOfBrokerResponseHeader responseHeader = (AddWritePermOfBrokerResponseHeader) response.readCustomHeader(); response.setCode(ResponseCode.SUCCESS); responseHeader.setAddTopicCount(7); response.addExtField("addTopicCount", String.valueOf(responseHeader.getAddTopicCount())); return response; }).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong()); int topicCnt = mqClientAPI.addWritePermOfBroker("127.0.0.1", "default-broker", 1000); assertThat(topicCnt).isEqualTo(7); }
@Override public void startInfrastructure(boolean shouldPoll) { removeBundleDirectory(); goPluginOSGiFramework.start(); addPluginChangeListener(new PluginChangeListener() { @Override public void pluginLoaded(GoPluginDescriptor pluginDescriptor) { } @Override public void pluginUnLoaded(GoPluginDescriptor pluginDescriptor) { synchronized (initializedPluginsWithTheirExtensionTypes) { initializedPluginsWithTheirExtensionTypes.remove(pluginDescriptor); } } }); monitor.addPluginJarChangeListener(defaultPluginJarChangeListener); if (shouldPoll) { monitor.start(); } else { monitor.oneShot(); } }
@Test void shouldCleanTheBundleDirectoryAtStart() throws Exception { String pluginJarFile = "descriptor-aware-test-plugin.should.be.deleted.jar"; copyPluginToTheDirectory(bundleDir, pluginJarFile); new DefaultPluginManager(monitor, registry, goPluginOSGiFramework, jarChangeListener, null, systemEnvironment, pluginLoader).startInfrastructure(true); assertThat(bundleDir).doesNotExist(); }
@Override public boolean addAll(Collection<? extends String> c) { return get(addAllAsync(c)); }
@Test public void testAll() { RLexSortedSet set = redisson.getLexSortedSet("simple"); set.addAll(Arrays.asList("foo", "bar")); assertThat(set.contains("foo")).isTrue(); assertThat(set.contains("bar")).isTrue(); assertThat(set.contains("123")).isFalse(); }
@VisibleForTesting void recover() { try (DbSession dbSession = dbClient.openSession(false)) { Profiler profiler = Profiler.create(LOGGER).start(); long beforeDate = system2.now() - minAgeInMs; IndexingResult result = new IndexingResult(); Collection<EsQueueDto> items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit); while (!items.isEmpty()) { IndexingResult loopResult = new IndexingResult(); groupItemsByDocType(items).asMap().forEach((type, typeItems) -> loopResult.add(doIndex(dbSession, type, typeItems))); result.add(loopResult); if (loopResult.getSuccessRatio() <= CIRCUIT_BREAKER_IN_PERCENT) { LOGGER.error(LOG_PREFIX + "too many failures [{}/{} documents], waiting for next run", loopResult.getFailures(), loopResult.getTotal()); break; } if (loopResult.getTotal() == 0L) { break; } items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit); } if (result.getTotal() > 0L) { profiler.stopInfo(LOG_PREFIX + format("%d documents processed [%d failures]", result.getTotal(), result.getFailures())); } } catch (Throwable t) { LOGGER.error(LOG_PREFIX + "fail to recover documents", t); } }
@Test public void successfully_recover_indexing_requests() { EsQueueDto item1a = insertItem(FOO_TYPE, "f1"); EsQueueDto item1b = insertItem(FOO_TYPE, "f2"); IndexMainType type2 = IndexType.main(Index.simple("bars"), "bar"); EsQueueDto item2 = insertItem(type2, "b1"); SuccessfulFakeIndexer indexer1 = new SuccessfulFakeIndexer(FOO_TYPE); SuccessfulFakeIndexer indexer2 = new SuccessfulFakeIndexer(type2); advanceInTime(); underTest = newRecoveryIndexer(indexer1, indexer2); underTest.recover(); assertThatQueueHasSize(0); assertThatLogsContain(INFO, "Elasticsearch recovery - 3 documents processed [0 failures]"); assertThat(indexer1.called).hasSize(1); assertThat(indexer1.called.get(0)) .extracting(EsQueueDto::getUuid) .containsExactlyInAnyOrder(item1a.getUuid(), item1b.getUuid()); assertThatLogsContain(TRACE, "Elasticsearch recovery - processing 2 [foos/foo]"); assertThat(indexer2.called).hasSize(1); assertThat(indexer2.called.get(0)) .extracting(EsQueueDto::getUuid) .containsExactlyInAnyOrder(item2.getUuid()); assertThatLogsContain(TRACE, "Elasticsearch recovery - processing 1 [bars/bar]"); }
public static void checkWithinRange( long value, String valueName, long minValueInclusive, long maxValueInclusive) { checkArgument( (value >= minValueInclusive) && (value <= maxValueInclusive), "'%s' (%s) must be within the range [%s, %s].", valueName, value, minValueInclusive, maxValueInclusive); }
@Test public void testCheckWithinRange() throws Exception { // Should not throw. Validate.checkWithinRange(10, "arg", 5, 15); Validate.checkWithinRange(10.0, "arg", 5.0, 15.0); // Verify it throws. intercept(IllegalArgumentException.class, "'arg' (5) must be within the range [10, 20]", () -> Validate.checkWithinRange(5, "arg", 10, 20)); intercept(IllegalArgumentException.class, "'arg' (5.0) must be within the range [10.0, 20.0]", () -> Validate.checkWithinRange(5.0, "arg", 10.0, 20.0)); }
@Override public String getText() { if (startTime == null) { return null; } Duration time = Duration.between(startTime, lastTime == null ? Instant.now() : lastTime); final String formatString = "mm:ss"; return DurationFormatUtils.formatDuration(time.toMillis(), formatString, true); }
@Test public void testGetText() { final Instant now = Instant.now(); final Instant fiveSecondsAgo = now.minusSeconds(5); final Instant fiveMinutesAgo = now.minusSeconds(5 * 60); final Instant oneHourAgo = now.minusSeconds(60 * 60); final Instant fiveHoursAgo = now.minusSeconds(5 * 60 * 60); assertEquals("00:00", timerText(now, now)); assertEquals("00:00", timerText(now, null)); assertEquals("00:05", timerText(fiveSecondsAgo, now)); assertEquals("00:05", timerText(fiveSecondsAgo, null)); assertEquals("04:55", timerText(fiveMinutesAgo, fiveSecondsAgo)); assertEquals("05:00", timerText(fiveMinutesAgo, now)); assertEquals("05:00", timerText(fiveMinutesAgo, null)); assertEquals("55:00", timerText(oneHourAgo, fiveMinutesAgo)); assertEquals("59:55", timerText(oneHourAgo, fiveSecondsAgo)); assertEquals("60:00", timerText(oneHourAgo, now)); assertEquals("60:00", timerText(oneHourAgo, null)); assertEquals("240:00", timerText(fiveHoursAgo, oneHourAgo)); assertEquals("295:00", timerText(fiveHoursAgo, fiveMinutesAgo)); assertEquals("299:55", timerText(fiveHoursAgo, fiveSecondsAgo)); assertEquals("300:00", timerText(fiveHoursAgo, now)); assertEquals("300:00", timerText(fiveHoursAgo, null)); }
@DELETE @Produces(MediaType.APPLICATION_JSON) @Path("/{device_id}") @ChangesLinkedDevices public void removeDevice(@Mutable @Auth AuthenticatedDevice auth, @PathParam("device_id") byte deviceId) { if (auth.getAuthenticatedDevice().getId() != Device.PRIMARY_ID && auth.getAuthenticatedDevice().getId() != deviceId) { throw new WebApplicationException(Response.Status.UNAUTHORIZED); } if (deviceId == Device.PRIMARY_ID) { throw new ForbiddenException(); } accounts.removeDevice(auth.getAccount(), deviceId).join(); }
@Test void removeDeviceByOther() { final byte deviceId = 2; final byte otherDeviceId = 3; try (final Response response = resources .getJerseyTest() .target("/v1/devices/" + otherDeviceId) .request() .header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID_3, deviceId, AuthHelper.VALID_PASSWORD_3_LINKED)) .header(HttpHeaders.USER_AGENT, "Signal-Android/5.42.8675309 Android/30") .delete()) { assertThat(response.getStatus()).isEqualTo(401); verify(accountsManager, never()).removeDevice(any(), anyByte()); } }
@POST @ApiOperation("Create a new view") @AuditEvent(type = ViewsAuditEventTypes.VIEW_CREATE) public ViewDTO create(@ApiParam @Valid @NotNull(message = "View is mandatory") ViewDTO dto, @Context UserContext userContext, @Context SearchUser searchUser) throws ValidationException { if (dto.type().equals(ViewDTO.Type.DASHBOARD) && !searchUser.canCreateDashboards()) { throw new ForbiddenException("User is not allowed to create new dashboards."); } validateIntegrity(dto, searchUser, true); final User user = userContext.getUser(); var result = dbService.saveWithOwner(dto.toBuilder().owner(searchUser.username()).build(), user); recentActivityService.create(result.id(), result.type().equals(ViewDTO.Type.DASHBOARD) ? GRNTypes.DASHBOARD : GRNTypes.SEARCH, searchUser); return result; }
@Test public void creatingViewAddsCurrentUserAsOwner() throws ValidationException { final ViewService viewService = mock(ViewService.class); final var dto = ViewDTO.builder().searchId("1").title("2").state(new HashMap<>()).build(); when(viewService.saveWithOwner(any(), any())).thenReturn(dto); final ViewsResource viewsResource = createViewsResource( viewService, mock(StartPageService.class), mock(RecentActivityService.class), mock(ClusterEventBus.class), new ReferencedSearchFiltersHelper(), EMPTY_SEARCH_FILTER_VISIBILITY_CHECKER, EMPTY_VIEW_RESOLVERS, SEARCH ); viewsResource.create(TEST_DASHBOARD_VIEW, mockUserContext(), SEARCH_USER); final ArgumentCaptor<ViewDTO> viewCaptor = ArgumentCaptor.forClass(ViewDTO.class); final ArgumentCaptor<User> ownerCaptor = ArgumentCaptor.forClass(User.class); verify(viewService, times(1)).saveWithOwner(viewCaptor.capture(), ownerCaptor.capture()); assertThat(viewCaptor.getValue().owner()).hasValue("testuser"); assertThat(ownerCaptor.getValue().getName()).isEqualTo("testuser"); }
@Override public boolean remove(long value) { assert value != nullValue : "remove() called with null-sentinel value " + nullValue; return hsa.remove(value); }
@Test public void testRemove() { long key = random.nextLong(); assertFalse(set.remove(key)); set.add(key); assertTrue(set.remove(key)); }
public static void checkMetaDir() throws InvalidMetaDirException, IOException { // check meta dir // if metaDir is the default config: StarRocksFE.STARROCKS_HOME_DIR + "/meta", // we should check whether both the new default dir (STARROCKS_HOME_DIR + "/meta") // and the old default dir (DORIS_HOME_DIR + "/doris-meta") are present. If both are present, // we need to let users keep only one to avoid starting from outdated metadata. Path oldDefaultMetaDir = Paths.get(System.getenv("DORIS_HOME") + "/doris-meta"); Path newDefaultMetaDir = Paths.get(System.getenv("STARROCKS_HOME") + "/meta"); Path metaDir = Paths.get(Config.meta_dir); if (metaDir.equals(newDefaultMetaDir)) { File oldMeta = new File(oldDefaultMetaDir.toUri()); File newMeta = new File(newDefaultMetaDir.toUri()); if (oldMeta.exists() && newMeta.exists()) { LOG.error("New default meta dir: {} and Old default meta dir: {} are both present. " + "Please make sure {} has the latest data, and remove the another one.", newDefaultMetaDir, oldDefaultMetaDir, newDefaultMetaDir); throw new InvalidMetaDirException(); } } File meta = new File(metaDir.toUri()); if (!meta.exists()) { // If metaDir is not the default config, it means the user has specified the other directory // We should not use the oldDefaultMetaDir. // Just exit in this case if (!metaDir.equals(newDefaultMetaDir)) { LOG.error("meta dir {} dose not exist", metaDir); throw new InvalidMetaDirException(); } File oldMeta = new File(oldDefaultMetaDir.toUri()); if (oldMeta.exists()) { // For backward compatible Config.meta_dir = oldDefaultMetaDir.toString(); } else { LOG.error("meta dir {} does not exist", meta.getAbsolutePath()); throw new InvalidMetaDirException(); } } long lowerFreeDiskSize = Long.parseLong(EnvironmentParams.FREE_DISK.getDefault()); FileStore store = Files.getFileStore(Paths.get(Config.meta_dir)); if (store.getUsableSpace() < lowerFreeDiskSize) { LOG.error("Free capacity left for meta dir: {} is less than {}", Config.meta_dir, new ByteSizeValue(lowerFreeDiskSize)); throw new InvalidMetaDirException(); } Path imageDir = Paths.get(Config.meta_dir + GlobalStateMgr.IMAGE_DIR); Path bdbDir = Paths.get(BDBEnvironment.getBdbDir()); boolean haveImageData = false; if (Files.exists(imageDir)) { try (Stream<Path> stream = Files.walk(imageDir)) { haveImageData = stream.anyMatch(path -> path.getFileName().toString().startsWith("image.")); } } boolean haveBDBData = false; if (Files.exists(bdbDir)) { try (Stream<Path> stream = Files.walk(bdbDir)) { haveBDBData = stream.anyMatch(path -> path.getFileName().toString().endsWith(".jdb")); } } if (haveImageData && !haveBDBData && !Config.start_with_incomplete_meta) { LOG.error("image exists, but bdb dir is empty, " + "set start_with_incomplete_meta to true if you want to forcefully recover from image data, " + "this may end with stale meta data, so please be careful."); throw new InvalidMetaDirException(); } }
@Test public void testImageNotExistBDBExist() throws IOException, InvalidMetaDirException { Config.start_with_incomplete_meta = false; Config.meta_dir = testDir + "/meta"; mkdir(Config.meta_dir + "/bdb"); File file = new File(Config.meta_dir + "/bdb/EF889.jdb"); Assert.assertTrue(file.createNewFile()); try { MetaHelper.checkMetaDir(); } finally { deleteDir(new File(testDir + "/")); } }
public void retrieveDocuments() throws DocumentRetrieverException { boolean first = true; String route = params.cluster.isEmpty() ? params.route : resolveClusterRoute(params.cluster); MessageBusParams messageBusParams = createMessageBusParams(params.configId, params.timeout, route); documentAccess = documentAccessFactory.createDocumentAccess(messageBusParams); session = documentAccess.createSyncSession(new SyncParameters.Builder().build()); int trace = params.traceLevel; if (trace > 0) { session.setTraceLevel(trace); } Iterator<String> iter = params.documentIds; if (params.jsonOutput && !params.printIdsOnly) { System.out.println('['); } while (iter.hasNext()) { if (params.jsonOutput && !params.printIdsOnly) { if (!first) { System.out.println(','); } else { first = false; } } String docid = iter.next(); Message msg = createDocumentRequest(docid); Reply reply = session.syncSend(msg); printReply(reply); } if (params.jsonOutput && !params.printIdsOnly) { System.out.println(']'); } }
@Test void testJsonOutput() throws DocumentRetrieverException, IOException { ClientParameters params = createParameters() .setDocumentIds(asIterator(DOC_ID_1, DOC_ID_2, DOC_ID_3)) .setJsonOutput(true) .build(); when(mockedSession.syncSend(any())).thenReturn( createDocumentReply(DOC_ID_1), createDocumentReply(DOC_ID_2), createDocumentReply(DOC_ID_3)); DocumentRetriever documentRetriever = createDocumentRetriever(params); documentRetriever.retrieveDocuments(); verify(mockedSession, times(3)).syncSend(any()); @SuppressWarnings("unchecked") List<Map<String, Object>> feed = Jackson.mapper().readValue(outContent.toByteArray(), List.class); assertEquals(DOC_ID_1, feed.get(0).get("id")); assertEquals(DOC_ID_2, feed.get(1).get("id")); assertEquals(DOC_ID_3, feed.get(2).get("id")); }
public String compile(final DataProvider dataProvider, final String template) { final InputStream templateStream = this.getClass().getResourceAsStream(template); return compile(dataProvider, templateStream); }
@Test public void testCompilerMaps() throws Exception { Collection<Map<String, Object>> maps = new ArrayList<Map<String, Object>>(); final ObjectDataCompiler converter = new ObjectDataCompiler(); InputStream templateStream = this.getClass().getResourceAsStream("/templates/rule_template_1.drl"); TemplateContainer tc = new DefaultTemplateContainer(templateStream); Column[] columns = tc.getColumns(); for (String[] row : rows) { Map<String, Object> map = new HashMap<String, Object>(); for (int icol = 0; icol < columns.length; icol++) { Object value = row[icol]; if (value != null) { map.put(columns[icol].getName(), value); } } maps.add(map); } templateStream = this.getClass().getResourceAsStream("/templates/rule_template_1.drl"); final String drl = converter.compile(maps, templateStream); assertThat(EXPECTED_RULES.toString()).isEqualToIgnoringWhitespace(drl); }
@EventListener void shutdown(ShutdownEvent event) { if (configuration.getBackgroundJobServer().isEnabled()) { backgroundJobServer.get().stop(); } if (configuration.getDashboard().isEnabled()) { dashboardWebServer.get().stop(); } storageProvider.close(); }
@Test void onStopOptionalsAreNotCalledToBootstrapIfNotConfigured() { when(backgroundJobServerConfiguration.isEnabled()).thenReturn(false); when(dashboardConfiguration.isEnabled()).thenReturn(false); jobRunrStarter.shutdown(null); verifyNoInteractions(backgroundJobServer); verifyNoInteractions(dashboardWebServer); }
public static Optional<ScalablePushRegistry> create( final LogicalSchema logicalSchema, final Supplier<List<PersistentQueryMetadata>> allPersistentQueries, final boolean isTable, final Map<String, Object> streamsProperties, final Map<String, Object> consumerProperties, final String sourceApplicationId, final KsqlTopic ksqlTopic, final ServiceContext serviceContext, final KsqlConfig ksqlConfig ) { final Object appServer = streamsProperties.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (appServer == null) { return Optional.empty(); } if (!(appServer instanceof String)) { throw new IllegalArgumentException(StreamsConfig.APPLICATION_SERVER_CONFIG + " not String"); } final URL localhost; try { localhost = new URL((String) appServer); } catch (final MalformedURLException e) { throw new IllegalArgumentException(StreamsConfig.APPLICATION_SERVER_CONFIG + " malformed: " + "'" + appServer + "'"); } final PushLocator pushLocator = new AllHostsLocator(allPersistentQueries, localhost); return Optional.of(new ScalablePushRegistry( pushLocator, logicalSchema, isTable, consumerProperties, ksqlTopic, serviceContext, ksqlConfig, sourceApplicationId, KafkaConsumerFactory::create, LatestConsumer::new, CatchupConsumer::new, Executors.newSingleThreadExecutor(), Executors.newScheduledThreadPool( ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PUSH_V2_MAX_CATCHUP_CONSUMERS)))); }
@Test public void shouldCreate_noApplicationServer() { // When final Optional<ScalablePushRegistry> registry = ScalablePushRegistry.create(SCHEMA, Collections::emptyList, false, ImmutableMap.of(), ImmutableMap.of(), SOURCE_APP_ID, ksqlTopic, serviceContext, ksqlConfig); // Then assertThat(registry.isPresent(), is(false)); }
@Override public String[] split(String text) { for (Pattern regexp : CONTRACTIONS2) text = regexp.matcher(text).replaceAll("$1 $2"); for (Pattern regexp : CONTRACTIONS3) text = regexp.matcher(text).replaceAll("$1 $2 $3"); text = DELIMITERS[0].matcher(text).replaceAll(" $1 "); text = DELIMITERS[1].matcher(text).replaceAll(" $1"); text = DELIMITERS[2].matcher(text).replaceAll(" $1"); text = DELIMITERS[3].matcher(text).replaceAll(" . "); String[] words = WHITESPACE.split(text); if (words.length > 1 && words[words.length-1].equals(".")) { if (EnglishAbbreviations.contains(words[words.length-2])) { words[words.length-2] = words[words.length-2] + "."; } } return words; }
@Test public void testTokenizeMixedAlphanumWords() { System.out.println("tokenize words with mixed numbers, letters, and punctuation"); String text = "3M, L-3, BB&T, AutoZone, O'Reilly, Harley-Davidson, CH2M, A-Mark, " + "Quad/Graphics, Bloomin' Brands, B/E Aerospace, J.Crew, E*Trade."; // Note: would be very hard to get "Bloomin'" and "E*Trade" correct String[] expResult = {"3M", ",", "L-3", ",", "BB&T", ",", "AutoZone", ",", "O'Reilly", ",", "Harley-Davidson", ",", "CH2M", ",", "A-Mark", ",", "Quad/Graphics", ",", "Bloomin", "'", "Brands", ",", "B/E", "Aerospace", ",", "J.Crew", ",", "E", "*", "Trade", "."}; SimpleTokenizer instance = new SimpleTokenizer(); String[] result = instance.split(text); assertEquals(expResult.length, result.length); for (int i = 0; i < result.length; i++) { assertEquals(expResult[i], result[i]); } }
@VisibleForTesting long getDecayPeriodMillis() { return decayPeriodMillis; }
@Test @SuppressWarnings("deprecation") public void testParsePeriod() { // By default scheduler = new DecayRpcScheduler(1, "ipc.1", new Configuration()); assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_DEFAULT, scheduler.getDecayPeriodMillis()); // Custom Configuration conf = new Configuration(); conf.setLong("ipc.2." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, 1058); scheduler = new DecayRpcScheduler(1, "ipc.2", conf); assertEquals(1058L, scheduler.getDecayPeriodMillis()); }
public static void closeExpandedContent() { closeExpandedContent( spoonInstance().getActiveTransGraph() ); }
@Test public void testCloseExpandedContentManager() throws Exception { TransGraph transGraph = mock( TransGraph.class ); Browser browser = mock( Browser.class ); SashForm sashForm = mock( SashForm.class ); setupExpandedContentMocks( transGraph, browser, sashForm ); ExpandedContentManager.closeExpandedContent( transGraph ); verify( browser ).close(); verify( sashForm ).setWeights( new int[] { 3, 2, 1 } ); }
public ClusterStateBundle.FeedBlock inferContentClusterFeedBlockOrNull(ContentCluster cluster) { if (!feedBlockEnabled) { return null; } var nodeInfos = cluster.getNodeInfos(); var exhaustions = enumerateNodeResourceExhaustionsAcrossAllNodes(nodeInfos); if (exhaustions.isEmpty()) { return null; } int maxDescriptions = 3; String description = exhaustions.stream() .limit(maxDescriptions) .map(NodeResourceExhaustion::toExhaustionAddedDescription) .collect(Collectors.joining(", ")); if (exhaustions.size() > maxDescriptions) { description += String.format(" (... and %d more)", exhaustions.size() - maxDescriptions); } description = decoratedMessage(cluster, description); // FIXME we currently will trigger a cluster state recomputation even if the number of // exhaustions is greater than what is returned as part of the description. Though at // that point, cluster state recomputations will be the least of your worries...! return ClusterStateBundle.FeedBlock.blockedWith(description, exhaustions); }
@Test void no_feed_block_returned_when_feed_block_disabled() { var calc = new ResourceExhaustionCalculator(false, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.6))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster()); assertNull(feedBlock); }
public void updateEtlStatus() throws Exception { if (!checkState(JobState.ETL)) { return; } // get etl status SparkEtlJobHandler handler = new SparkEtlJobHandler(); EtlStatus status = handler.getEtlJobStatus(sparkLoadAppHandle, appId, id, etlOutputPath, sparkResource, brokerDesc); writeLock(); try { switch (status.getState()) { case RUNNING: unprotectedUpdateEtlStatusInternal(status); break; case FINISHED: unprotectedProcessEtlFinish(status, handler); break; case CANCELLED: throw new LoadException("spark etl job failed. msg: " + status.getFailMsg()); default: LOG.warn("unknown etl state: {}", status.getState().name()); break; } } finally { writeUnlock(); } if (checkState(JobState.LOADING)) { // create and send push tasks submitPushTasks(); } }
@Test(expected = LoadException.class) public void testUpdateEtlStatusCancelled(@Mocked GlobalStateMgr globalStateMgr, @Injectable String originStmt, @Mocked SparkEtlJobHandler handler) throws Exception { EtlStatus status = new EtlStatus(); status.setState(TEtlState.CANCELLED); new Expectations() { { handler.getEtlJobStatus((SparkLoadAppHandle) any, appId, anyLong, etlOutputPath, (SparkResource) any, (BrokerDesc) any); result = status; } }; SparkLoadJob job = getEtlStateJob(originStmt); job.updateEtlStatus(); }
public String filterNamespaceName(String namespaceName) { if (namespaceName.toLowerCase().endsWith(".properties")) { int dotIndex = namespaceName.lastIndexOf("."); return namespaceName.substring(0, dotIndex); } return namespaceName; }
@Test public void testFilterNamespaceNameWithRandomCaseUnchanged() throws Exception { String someName = "AbCD.xMl"; assertEquals(someName, namespaceUtil.filterNamespaceName(someName)); }
public RunResponse restart(RunRequest runRequest) { RunResponse runResponse = restartRecursively(runRequest); if (runResponse.getStatus() == RunResponse.Status.NON_TERMINAL_ERROR) { LOG.error( "workflow instance {} does not support restart action as it is in a non-terminal status [{}]", runRequest.getWorkflowIdentity(), runResponse.getTimelineEvent().getMessage()); throw new MaestroBadRequestException( Collections.emptyList(), "workflow instance %s does not support restart action as it is in a non-terminal status [%s]", runRequest.getWorkflowIdentity(), runResponse.getTimelineEvent().getMessage()); } return runResponse; }
@Test public void testRestartDelegated() { WorkflowInstance wfInstance = new WorkflowInstance(); SubworkflowInitiator initiator = new SubworkflowInitiator(); UpstreamInitiator.Info info = new UpstreamInitiator.Info(); info.setWorkflowId("foo"); info.setInstanceId(123L); info.setRunId(2L); info.setStepId("bar"); initiator.setAncestors(Collections.singletonList(info)); wfInstance.setInitiator(initiator); wfInstance.setStatus(WorkflowInstance.Status.FAILED); wfInstance.setWorkflowInstanceId(10L); wfInstance.setWorkflowRunId(1L); wfInstance.setWorkflowId("test-workflow"); wfInstance.setRuntimeWorkflow(Workflow.builder().build()); RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.RESTART_FROM_BEGINNING) .restartConfig( RestartConfig.builder().addRestartNode("test-workflow", 1L, null).build()) .build(); WorkflowInstance parentInstance = new WorkflowInstance(); parentInstance.setWorkflowId("foo"); parentInstance.setWorkflowInstanceId(123L); parentInstance.setStatus(WorkflowInstance.Status.IN_PROGRESS); parentInstance.setAggregatedInfo(new WorkflowInstanceAggregatedInfo()); parentInstance .getAggregatedInfo() .setStepAggregatedViews( Collections.singletonMap( "bar", StepAggregatedView.builder().status(StepInstance.Status.RUNNING).build())); when(instanceDao.getWorkflowInstance("test-workflow", 1L, Constants.LATEST_INSTANCE_RUN, true)) .thenReturn(wfInstance); when(instanceDao.getWorkflowInstance("foo", 123L, Constants.LATEST_INSTANCE_RUN, true)) .thenReturn(parentInstance); RunResponse response = actionHandler.restart(request); assertEquals("foo", response.getWorkflowId()); assertEquals(123L, response.getWorkflowInstanceId()); assertEquals(RunResponse.Status.DELEGATED, response.getStatus()); parentInstance .getAggregatedInfo() .setStepAggregatedViews( Collections.singletonMap( "bar", StepAggregatedView.builder().status(StepInstance.Status.NOT_CREATED).build())); AssertHelper.assertThrows( "step id is not in the DAG", IllegalArgumentException.class, "step [bar] is not created in the DAG", () -> actionHandler.restart(request)); }
public String abbreviate(String fqClassName) { if (fqClassName == null) { throw new IllegalArgumentException("Class name may not be null"); } int inLen = fqClassName.length(); if (inLen < targetLength) { return fqClassName; } StringBuilder buf = new StringBuilder(inLen); int rightMostDotIndex = fqClassName.lastIndexOf(DOT); if (rightMostDotIndex == -1) return fqClassName; // length of last segment including the dot int lastSegmentLength = inLen - rightMostDotIndex; int leftSegments_TargetLen = targetLength - lastSegmentLength; if (leftSegments_TargetLen < 0) leftSegments_TargetLen = 0; int leftSegmentsLen = inLen - lastSegmentLength; // maxPossibleTrim denotes the maximum number of characters we aim to trim // the actual number of character trimmed may be higher since segments, when // reduced, are reduced to just one character int maxPossibleTrim = leftSegmentsLen - leftSegments_TargetLen; int trimmed = 0; boolean inDotState = true; int i = 0; for (; i < rightMostDotIndex; i++) { char c = fqClassName.charAt(i); if (c == DOT) { // if trimmed too many characters, let us stop if (trimmed >= maxPossibleTrim) break; buf.append(c); inDotState = true; } else { if (inDotState) { buf.append(c); inDotState = false; } else { trimmed++; } } } // append from the position of i which may include the last seen DOT buf.append(fqClassName.substring(i)); return buf.toString(); }
@Test public void testShortName() { { TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(100); String name = "hello"; assertEquals(name, abbreviator.abbreviate(name)); } { TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(100); String name = "hello.world"; assertEquals(name, abbreviator.abbreviate(name)); } }
@Override public PiPipeconfId getPipeconfId(DeviceId deviceId) { if (!deviceToPipeconf.containsKey(deviceId)) { return null; } return deviceToPipeconf.get(deviceId).value(); }
@Test public void getPipeconfIdDevice() throws Exception { clear(); createOrUpdatePipeconfToDeviceBinding(); assertEquals("Wrong PipeconfId", store.getPipeconfId(DEVICE_ID), PIPECONF_ID); }
BrokerResponse createSubscriptionResponse(String queueName, String topicName) throws IOException { String queryUrl = subscriptionEndpoint(messageVpn, queueName); ImmutableMap<String, Object> params = ImmutableMap.<String, Object>builder() .put("subscriptionTopic", topicName) .put("queueName", queueName) .build(); HttpResponse response = executePost(new GenericUrl(baseUrl + queryUrl), params); return BrokerResponse.fromHttpResponse(response); }
@Test public void testCreateSubscriptionResponseEncoding() throws IOException { MockHttpTransport transport = new MockHttpTransport() { @Override public LowLevelHttpRequest buildRequest(String method, String url) { return new MockLowLevelHttpRequest() { @Override public LowLevelHttpResponse execute() throws IOException { MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); assertTrue(this.getContentAsString().contains("\"queueName\":\"queue/xxx/yyy\"")); assertTrue( this.getContentAsString().contains("\"subscriptionTopic\":\"topic/aaa\"")); assertTrue(url.contains("queues/queue%2Fxxx%2Fyyy/subscriptions")); assertTrue(url.contains("msgVpns/vpnName%232")); return response; } }; } }; HttpRequestFactory requestFactory = transport.createRequestFactory(); SempBasicAuthClientExecutor client = new SempBasicAuthClientExecutor( "http://host", "username", "password", "vpnName#2", requestFactory); client.createSubscriptionResponse("queue/xxx/yyy", "topic/aaa"); }
public static String[] extractRecordKeys(String recordKey) { return extractRecordKeysByFields(recordKey, Collections.emptyList()); }
@Test public void testExtractRecordKeys() { // test complex key form: field1:val1,field2:val2,... String[] s1 = KeyGenUtils.extractRecordKeys("id:1"); Assertions.assertArrayEquals(new String[] {"1"}, s1); String[] s2 = KeyGenUtils.extractRecordKeys("id:1,id:2"); Assertions.assertArrayEquals(new String[] {"1", "2"}, s2); String[] s3 = KeyGenUtils.extractRecordKeys("id:1,id2:__null__,id3:__empty__"); Assertions.assertArrayEquals(new String[] {"1", null, ""}, s3); String[] s4 = KeyGenUtils.extractRecordKeys("id:ab:cd,id2:ef"); Assertions.assertArrayEquals(new String[] {"ab:cd", "ef"}, s4); // test simple key form: val1 String[] s5 = KeyGenUtils.extractRecordKeys("1"); Assertions.assertArrayEquals(new String[] {"1"}, s5); String[] s6 = KeyGenUtils.extractRecordKeys("id:1,id2:2,2"); Assertions.assertArrayEquals(new String[]{"1", "2", "2"}, s6); }
static Schema getSchema(Class<? extends Message> clazz) { return getSchema(ProtobufUtil.getDescriptorForClass(clazz)); }
@Test public void testWrapperMessagesSchema() { assertEquals( TestProtoSchemas.WKT_MESSAGE_SCHEMA, ProtoSchemaTranslator.getSchema(Proto3SchemaMessages.WktMessage.class)); }
@Override public void update(double[] x, double y) { propagate(x, true); setTarget(y); backpropagate(true); t++; }
@Test public void testLongley() throws Exception { System.out.println("longley"); int p = Longley.x[0].length; MLP model = new MLP(Layer.input(p), Layer.rectifier(30), Layer.sigmoid(30)); // small learning rate and weight decay to counter exploding gradient model.setLearningRate(TimeFunction.constant(0.01)); model.setWeightDecay(0.1); for (int epoch = 0; epoch < 5; epoch++) { int[] permutation = MathEx.permutate(Longley.x.length); for (int i : permutation) { model.update(Longley.x[i], Longley.y[i]); } } java.nio.file.Path temp = Write.object(model); Read.object(temp); }
public Page<BoardSimpleResponse> findAllBoard(final Pageable pageable, final Long memberId) { QueryResults<BoardSimpleResponse> result = jpaQueryFactory.select( constructor(BoardSimpleResponse.class, board.id, member.nickname, board.post.title, board.createdAt, board.likeCount.likeCount, count(comment.id), isLikedAlreadyByMe(memberId) )).from(board) .leftJoin(member).on(board.writerId.eq(member.id)) .leftJoin(comment).on(comment.boardId.eq(board.id)) .leftJoin(likeStorage).on(likeStorage.boardId.eq(board.id).and(likeStorage.memberId.eq(memberId))) .groupBy(board.id, member.nickname, board.post.title, board.createdAt, board.likeCount.likeCount) .orderBy(board.id.desc()) .offset(pageable.getOffset()) .limit(pageable.getPageSize()) .fetchResults(); return new PageImpl<>(result.getResults(), pageable, result.getTotal()); }
@Test void 게시글_15개에서_1번_페이지에서_10개의_게시글을_페이징_조회를_한다() { // given List<Board> boards = new ArrayList<>(); for (int i = 0; i < 15; i++) { boards.add(boardRepository.save(게시글_생성_사진없음())); } PageRequest pageRequest = PageRequest.of(0, 10); // when Page<BoardSimpleResponse> found = boardQueryRepository.findAllBoard(pageRequest, 1L); // then List<BoardSimpleResponse> expected = boards.stream() .sorted(Comparator.comparing(Board::getId).reversed()) .limit(10) .map(it -> new BoardSimpleResponse(it.getId(), member.getNickname(), it.getPost().getTitle(), it.getCreatedAt(), 0L, 0L, false)) .toList(); assertSoftly(softly -> { softly.assertThat(found).hasSize(10); softly.assertThat(found.hasNext()).isTrue(); softly.assertThat(found.getContent()) .usingRecursiveComparison() .ignoringFieldsOfTypes(LocalDateTime.class) .isEqualTo(expected); }); }
void releaseChannel(Channel channel, String serverAddress) { if (channel == null || serverAddress == null) { return; } try { synchronized (channelLocks.get(serverAddress)) { Channel ch = channels.get(serverAddress); if (ch == null) { nettyClientKeyPool.returnObject(poolKeyMap.get(serverAddress), channel); return; } if (ch.compareTo(channel) == 0) { if (LOGGER.isInfoEnabled()) { LOGGER.info("return to pool, rm channel:{}", channel); } destroyChannel(serverAddress, channel); } else { nettyClientKeyPool.returnObject(poolKeyMap.get(serverAddress), channel); } } } catch (Exception exx) { LOGGER.error(exx.getMessage()); } }
@Test @SuppressWarnings("unchecked") void assertReleaseChannelWhichCacheIsEmpty() throws Exception { setNettyClientKeyPool(); setUpReleaseChannel(); channelManager.releaseChannel(channel, "127.0.0.1:8091"); verify(keyedObjectPool).returnObject(nettyPoolKey, channel); }
@Override public void subscribe(final Subscriber<? super Row> subscriber) { if (polling) { throw new IllegalStateException("Cannot set subscriber if polling"); } synchronized (this) { subscribing = true; super.subscribe(subscriber); } }
@Test public void shouldPropagateErrorToSubscriber() throws Exception { // Given subscribe(); // When handleQueryResultError(); // Then verify(subscriber).onError(any()); }
public int hash(byte[] bytes) { return hash(bytes, bytes.length, -1); }
@Test public void testHash() { int iterations = 30; assertTrue("testHash jenkins error !!!", Hash.JENKINS_HASH == Hash.parseHashType("jenkins")); assertTrue("testHash murmur error !!!", Hash.MURMUR_HASH == Hash.parseHashType("murmur")); assertTrue("testHash undefined", Hash.INVALID_HASH == Hash.parseHashType("undefined")); Configuration cfg = new Configuration(); cfg.set("hadoop.util.hash.type", "murmur"); assertTrue("testHash", MurmurHash.getInstance() == Hash.getInstance(cfg)); cfg = new Configuration(); cfg.set("hadoop.util.hash.type", "jenkins"); assertTrue("testHash jenkins configuration error !!!", JenkinsHash.getInstance() == Hash.getInstance(cfg)); cfg = new Configuration(); assertTrue("testHash undefine configuration error !!!", MurmurHash.getInstance() == Hash.getInstance(cfg)); assertTrue("testHash error jenkin getInstance !!!", JenkinsHash.getInstance() == Hash.getInstance(Hash.JENKINS_HASH)); assertTrue("testHash error murmur getInstance !!!", MurmurHash.getInstance() == Hash.getInstance(Hash.MURMUR_HASH)); assertNull("testHash error invalid getInstance !!!", Hash.getInstance(Hash.INVALID_HASH)); int murmurHash = Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes()); for (int i = 0; i < iterations; i++) { assertTrue("multiple evaluation murmur hash error !!!", murmurHash == Hash.getInstance(Hash.MURMUR_HASH) .hash(LINE.getBytes())); } murmurHash = Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(), 67); for (int i = 0; i < iterations; i++) { assertTrue( "multiple evaluation murmur hash error !!!", murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash( LINE.getBytes(), 67)); } int jenkinsHash = Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes()); for (int i = 0; i < iterations; i++) { assertTrue( "multiple evaluation jenkins hash error !!!", jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash( LINE.getBytes())); } jenkinsHash = Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(), 67); for (int i = 0; i < iterations; i++) { assertTrue( "multiple evaluation jenkins hash error !!!", jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash( LINE.getBytes(), 67)); } }
@Override public int offer(E e) { @SuppressWarnings("deprecation") long z = mix64(Thread.currentThread().getId()); int increment = ((int) (z >>> 32)) | 1; int h = (int) z; int mask; int result; Buffer<E> buffer; boolean uncontended = true; Buffer<E>[] buffers = table; if ((buffers == null) || ((mask = buffers.length - 1) < 0) || ((buffer = buffers[h & mask]) == null) || !(uncontended = ((result = buffer.offer(e)) != Buffer.FAILED))) { return expandOrRetry(e, h, increment, uncontended); } return result; }
@Test public void expand() { var buffer = new FakeBuffer<Integer>(Buffer.FAILED); assertThat(buffer.offer(ELEMENT)).isEqualTo(Buffer.SUCCESS); for (int i = 0; i < 64; i++) { int result = buffer.offer(ELEMENT); if (result == Buffer.SUCCESS) { return; } } Assert.fail(); }
@Override public WxMaPhoneNumberInfo getWxMaPhoneNumberInfo(Integer userType, String phoneCode) { WxMaService service = getWxMaService(userType); try { return service.getUserService().getPhoneNoInfo(phoneCode); } catch (WxErrorException e) { log.error("[getPhoneNoInfo][userType({}) phoneCode({}) 获得手机号失败]", userType, phoneCode, e); throw exception(SOCIAL_CLIENT_WEIXIN_MINI_APP_PHONE_CODE_ERROR); } }
@Test public void testGetWxMaPhoneNumberInfo_success() throws WxErrorException { // 准备参数 Integer userType = randomPojo(UserTypeEnum.class).getValue(); String phoneCode = randomString(); // mock 方法 WxMaUserService userService = mock(WxMaUserService.class); when(wxMaService.getUserService()).thenReturn(userService); WxMaPhoneNumberInfo phoneNumber = randomPojo(WxMaPhoneNumberInfo.class); when(userService.getPhoneNoInfo(eq(phoneCode))).thenReturn(phoneNumber); // 调用 WxMaPhoneNumberInfo result = socialClientService.getWxMaPhoneNumberInfo(userType, phoneCode); // 断言 assertSame(phoneNumber, result); }
@Override public ByteBuf setShortLE(int index, int value) { checkIndex(index, 2); _setShortLE(index, value); return this; }
@Test public void testSetShortLEAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().setShortLE(0, 1); } }); }
public abstract byte getCategory();
@Test void testDumpedMeter() { QueryScopeInfo info = new QueryScopeInfo.JobManagerQueryScopeInfo(); MetricDump.MeterDump md = new MetricDump.MeterDump(info, "meter", 5.0); assertThat(md.name).isEqualTo("meter"); assertThat(md.rate).isCloseTo(5.0, within(0.1)); assertThat(md.scopeInfo).isEqualTo(info); assertThat(md.getCategory()).isEqualTo(METRIC_CATEGORY_METER); }
public static Builder builder() { return new Builder(); }
@Test // Test cases that can't be constructed with our Builder class e2e but that will parse correctly public void testCanDeserializeWithoutDefaultValues() throws JsonProcessingException { // only updated UpdateNamespacePropertiesResponse onlyUpdated = UpdateNamespacePropertiesResponse.builder().addUpdated(UPDATED).build(); String jsonOnlyUpdatedOthersNull = "{\"removed\":null,\"updated\":[\"owner\"],\"missing\":null}"; assertEquals(deserialize(jsonOnlyUpdatedOthersNull), onlyUpdated); String jsonOnlyUpdatedOthersMissing = "{\"updated\":[\"owner\"]}"; assertEquals(deserialize(jsonOnlyUpdatedOthersMissing), onlyUpdated); // Only removed UpdateNamespacePropertiesResponse onlyRemoved = UpdateNamespacePropertiesResponse.builder().addRemoved(REMOVED).build(); String jsonOnlyRemovedOthersNull = "{\"removed\":[\"foo\"],\"updated\":null,\"missing\":null}"; assertEquals(deserialize(jsonOnlyRemovedOthersNull), onlyRemoved); String jsonOnlyRemovedOthersMissing = "{\"removed\":[\"foo\"]}"; assertEquals(deserialize(jsonOnlyRemovedOthersMissing), onlyRemoved); // Only missing UpdateNamespacePropertiesResponse onlyMissing = UpdateNamespacePropertiesResponse.builder().addMissing(MISSING).build(); String jsonOnlyMissingFieldOthersNull = "{\"removed\":null,\"updated\":null,\"missing\":[\"bar\"]}"; assertEquals(deserialize(jsonOnlyMissingFieldOthersNull), onlyMissing); String jsonOnlyMissingFieldIsPresent = "{\"missing\":[\"bar\"]}"; assertEquals(deserialize(jsonOnlyMissingFieldIsPresent), onlyMissing); // all fields are missing UpdateNamespacePropertiesResponse noValues = UpdateNamespacePropertiesResponse.builder().build(); String emptyJson = "{}"; assertEquals(deserialize(emptyJson), noValues); }
public static Map<String, Object> singletonMap(String key, Object value) { Map<String, Object> map = new HashMap<>(1); map.put(key, value); return map; }
@Test void singletonMap() { assertThat(CollectionUtil.singletonMap("key", "value")) .containsExactly(entry("key", "value")); }
public static void persistTaskMetadata(HelixPropertyStore<ZNRecord> propertyStore, String taskType, BaseTaskMetadata taskMetadata, int expectedVersion) { String newPath = ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadata(taskMetadata.getTableNameWithType(), taskType); String oldPath = ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadataDeprecated(taskType, taskMetadata.getTableNameWithType()); if (propertyStore.exists(newPath, AccessOption.PERSISTENT) || !propertyStore.exists(oldPath, AccessOption.PERSISTENT)) { persistTaskMetadata(newPath, propertyStore, taskType, taskMetadata, expectedVersion); } else { persistTaskMetadata(oldPath, propertyStore, taskType, taskMetadata, expectedVersion); } }
@Test public void testPersistTaskMetadataWithException() { DummyTaskMetadata taskMetadata = new DummyTaskMetadata(TABLE_NAME_WITH_TYPE, 1000); HelixPropertyStore<ZNRecord> mockPropertyStore = Mockito.mock(HelixPropertyStore.class); String expectedPath = NEW_MINION_METADATA_PATH; // Test happy path. No exceptions thrown. when(mockPropertyStore.set(expectedPath, taskMetadata.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION)).thenReturn( true); MinionTaskMetadataUtils.persistTaskMetadata(mockPropertyStore, TASK_TYPE, taskMetadata, EXPECTED_VERSION); verify(mockPropertyStore, times(1)).set(expectedPath, taskMetadata.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION); // Test exception thrown when(mockPropertyStore.set(expectedPath, taskMetadata.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION)).thenReturn( false); try { MinionTaskMetadataUtils.persistTaskMetadata(mockPropertyStore, TASK_TYPE, taskMetadata, EXPECTED_VERSION); fail("ZkException should have been thrown"); } catch (ZkException e) { verify(mockPropertyStore, times(2)).set(expectedPath, taskMetadata.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION); assertEquals(e.getMessage(), "Failed to persist minion metadata for task: TestTaskType and metadata:" + " {\"tableNameWithType\":\"TestTable_OFFLINE\"}"); } }
public static void readStepRep( Object object, Repository rep, ObjectId id_step, List<DatabaseMeta> databases ) throws KettleException { try { String stepXML = rep.getStepAttributeString( id_step, "step-xml" ); ByteArrayInputStream bais = new ByteArrayInputStream( stepXML.getBytes() ); Document doc = XMLParserFactoryProducer.createSecureDocBuilderFactory().newDocumentBuilder().parse( bais ); read( object, doc.getDocumentElement() ); } catch ( ParserConfigurationException ex ) { throw new KettleException( ex.getMessage(), ex ); } catch ( SAXException ex ) { throw new KettleException( ex.getMessage(), ex ); } catch ( IOException ex ) { throw new KettleException( ex.getMessage(), ex ); } }
@Test( expected = NullPointerException.class ) public void readingStepRepoThrowsExceptionWhenParsingXmlWithBigAmountOfExternalEntities() throws Exception { SerializationHelper.readStepRep( null, repo, null, new ArrayList<>() ); }
static String trimFieldsAndRemoveEmptyFields(String str) { char[] chars = str.toCharArray(); char[] res = new char[chars.length]; /* * set when reading the first non trimmable char after a separator char (or the beginning of the string) * unset when reading a separator */ boolean inField = false; boolean inQuotes = false; int i = 0; int resI = 0; for (; i < chars.length; i++) { boolean isSeparator = chars[i] == ','; if (!inQuotes && isSeparator) { // exiting field (may already be unset) inField = false; if (resI > 0) { resI = retroTrim(res, resI); } } else { boolean isTrimmed = !inQuotes && istrimmable(chars[i]); if (isTrimmed && !inField) { // we haven't meet any non trimmable char since the last separator yet continue; } boolean isEscape = isEscapeChar(chars[i]); if (isEscape) { inQuotes = !inQuotes; } // add separator as we already had one field if (!inField && resI > 0) { res[resI] = ','; resI++; } // register in field (may already be set) inField = true; // copy current char res[resI] = chars[i]; resI++; } } // inQuotes can only be true at this point if quotes are unbalanced if (!inQuotes) { // trim end of str resI = retroTrim(res, resI); } return new String(res, 0, resI); }
@Test public void trimFieldsAndRemoveEmptyFields_throws_NPE_if_arg_is_null() { assertThatThrownBy(() -> trimFieldsAndRemoveEmptyFields(null)) .isInstanceOf(NullPointerException.class); }
@Override public <T> Invoker<T> refer(Class<T> type, URL url) throws RpcException { if (UrlUtils.isRegistry(url)) { return protocol.refer(type, url); } Invoker<T> invoker = protocol.refer(type, url); if (StringUtils.isEmpty(url.getParameter(REGISTRY_CLUSTER_TYPE_KEY))) { invoker = new ListenerInvokerWrapper<>( invoker, Collections.unmodifiableList(ScopeModelUtil.getExtensionLoader( InvokerListener.class, invoker.getUrl().getScopeModel()) .getActivateExtension(url, INVOKER_LISTENER_KEY))); } return invoker; }
@Test void testLoadingListenerForLocalReference() { // verify that no listener is loaded by default URL urlWithoutListener = URL.valueOf("injvm://127.0.0.1/DemoService").addParameter(INTERFACE_KEY, DemoService.class.getName()); AbstractInvoker<DemoService> invokerWithoutListener = new AbstractInvoker<DemoService>(DemoService.class, urlWithoutListener) { @Override protected Result doInvoke(Invocation invocation) throws Throwable { return null; } }; Protocol protocolWithoutListener = mock(Protocol.class); when(protocolWithoutListener.refer(DemoService.class, urlWithoutListener)) .thenReturn(invokerWithoutListener); ProtocolListenerWrapper protocolListenerWrapperWithoutListener = new ProtocolListenerWrapper(protocolWithoutListener); Invoker<?> invoker = protocolListenerWrapperWithoutListener.refer(DemoService.class, urlWithoutListener); Assertions.assertTrue(invoker instanceof ListenerInvokerWrapper); Assertions.assertEquals( 0, ((ListenerInvokerWrapper<?>) invoker).getListeners().size()); // verify that if the invoker.listener is configured, then load the specified listener URL urlWithListener = URL.valueOf("injvm://127.0.0.1/DemoService") .addParameter(INTERFACE_KEY, DemoService.class.getName()) .addParameter(INVOKER_LISTENER_KEY, "count"); AbstractInvoker<DemoService> invokerWithListener = new AbstractInvoker<DemoService>(DemoService.class, urlWithListener) { @Override protected Result doInvoke(Invocation invocation) throws Throwable { return null; } }; Protocol protocol = mock(Protocol.class); when(protocol.refer(DemoService.class, urlWithListener)).thenReturn(invokerWithListener); ProtocolListenerWrapper protocolListenerWrapper = new ProtocolListenerWrapper(protocol); invoker = protocolListenerWrapper.refer(DemoService.class, urlWithListener); Assertions.assertTrue(invoker instanceof ListenerInvokerWrapper); Assertions.assertEquals(1, CountInvokerListener.getCounter()); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.OAUTH_CLIENT, allEntries = true) // allEntries 清空所有缓存,因为可能修改到 clientId 字段,不好清理 public void updateOAuth2Client(OAuth2ClientSaveReqVO updateReqVO) { // 校验存在 validateOAuth2ClientExists(updateReqVO.getId()); // 校验 Client 未被占用 validateClientIdExists(updateReqVO.getId(), updateReqVO.getClientId()); // 更新 OAuth2ClientDO updateObj = BeanUtils.toBean(updateReqVO, OAuth2ClientDO.class); oauth2ClientMapper.updateById(updateObj); }
@Test public void testUpdateOAuth2Client_success() { // mock 数据 OAuth2ClientDO dbOAuth2Client = randomPojo(OAuth2ClientDO.class); oauth2ClientMapper.insert(dbOAuth2Client);// @Sql: 先插入出一条存在的数据 // 准备参数 OAuth2ClientSaveReqVO reqVO = randomPojo(OAuth2ClientSaveReqVO.class, o -> { o.setId(dbOAuth2Client.getId()); // 设置更新的 ID o.setLogo(randomString()); }); // 调用 oauth2ClientService.updateOAuth2Client(reqVO); // 校验是否更新正确 OAuth2ClientDO oAuth2Client = oauth2ClientMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, oAuth2Client); }
@Override public AutoClaimResult<K, V> autoClaim(String groupName, String consumerName, long idleTime, TimeUnit idleTimeUnit, StreamMessageId startId, int count) { return get(autoClaimAsync(groupName, consumerName, idleTime, idleTimeUnit, startId, count)); }
@Test public void testAutoClaim() throws InterruptedException { RStream<String, String> stream = redisson.getStream("test"); stream.add(StreamAddArgs.entry("0", "0")); stream.createGroup(StreamCreateGroupArgs.name("testGroup")); StreamMessageId id1 = stream.add(StreamAddArgs.entry("1", "1")); StreamMessageId id2 = stream.add(StreamAddArgs.entry("2", "2")); Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered()); assertThat(s.size()).isEqualTo(2); StreamMessageId id3 = stream.add(StreamAddArgs.entry("3", "33")); StreamMessageId id4 = stream.add(StreamAddArgs.entry("4", "44")); Map<StreamMessageId, Map<String, String>> s2 = stream.readGroup("testGroup", "consumer2", StreamReadGroupArgs.neverDelivered()); assertThat(s2.size()).isEqualTo(2); Thread.sleep(5); AutoClaimResult<String, String> res = stream.autoClaim("testGroup", "consumer1", 1, TimeUnit.MILLISECONDS, id3, 2); assertThat(res.getMessages().size()).isEqualTo(2); for (Map.Entry<StreamMessageId, Map<String, String>> entry : res.getMessages().entrySet()) { assertThat(entry.getValue().keySet()).containsAnyOf("3", "4"); assertThat(entry.getValue().values()).containsAnyOf("33", "44"); } }
@Override public AuditReplayCommand parse(Text inputLine, Function<Long, Long> relativeToAbsolute) throws IOException { Matcher m = logLineParseRegex.matcher(inputLine.toString()); if (!m.find()) { throw new IOException( "Unable to find valid message pattern from audit log line: `" + inputLine + "` using regex `" + logLineParseRegex + "`"); } long relativeTimestamp; try { relativeTimestamp = dateFormat.parse(m.group("timestamp")).getTime() - startTimestamp; } catch (ParseException p) { throw new IOException( "Exception while parsing timestamp from audit log line: `" + inputLine + "`", p); } // Sanitize the = in the rename options field into a : so we can split on = String auditMessageSanitized = m.group("message").replace("(options=", "(options:"); Map<String, String> parameterMap = new HashMap<String, String>(); String[] auditMessageSanitizedList = auditMessageSanitized.split("\t"); for (String auditMessage : auditMessageSanitizedList) { String[] splitMessage = auditMessage.split("=", 2); try { parameterMap.put(splitMessage[0], splitMessage[1]); } catch (ArrayIndexOutOfBoundsException e) { throw new IOException( "Exception while parsing a message from audit log line: `" + inputLine + "`", e); } } return new AuditReplayCommand(relativeToAbsolute.apply(relativeTimestamp), // Split the UGI on space to remove the auth and proxy portions of it SPACE_SPLITTER.split(parameterMap.get("ugi")).iterator().next(), parameterMap.get("cmd").replace("(options:", "(options="), parameterMap.get("src"), parameterMap.get("dst"), parameterMap.get("ip")); }
@Test public void testInputWithTokenAuth() throws Exception { Text in = getAuditString("1970-01-01 00:00:11,000", "fakeUser (auth:TOKEN)", "create", "sourcePath", "null"); AuditReplayCommand expected = new AuditReplayCommand(1000, "fakeUser", "create", "sourcePath", "null", "0.0.0.0"); assertEquals(expected, parser.parse(in, Function.identity())); }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); gauges.put("name", (Gauge<String>) runtime::getName); gauges.put("vendor", (Gauge<String>) () -> String.format(Locale.US, "%s %s %s (%s)", runtime.getVmVendor(), runtime.getVmName(), runtime.getVmVersion(), runtime.getSpecVersion())); gauges.put("uptime", (Gauge<Long>) runtime::getUptime); return Collections.unmodifiableMap(gauges); }
@Test public void hasASetOfGauges() { assertThat(gauges.getMetrics().keySet()) .containsOnly("vendor", "name", "uptime"); }
@Override public String encode( String rawPassword ) { return encode( rawPassword, true ); }
@Test public void testEncode1() throws KettleValueException { KettleTwoWayPasswordEncoder encoder = new KettleTwoWayPasswordEncoder(); String encryption; encryption = encoder.encode( null, false ); assertTrue( "".equals( encryption ) ); encryption = encoder.encode( "", false ); assertTrue( "".equals( encryption ) ); encryption = encoder.encode( " ", false ); assertTrue( "2be98afc86aa7f2e4cb79ce309ed2ef9a".equals( encryption ) ); encryption = encoder.encode( "Test of different encryptions!!@#$%", false ); assertTrue( "54657374206f6620646966666572656e742067d0fbddb11ad39b8ba50aef31fed1eb9f".equals( encryption ) ); encryption = encoder.encode( " Spaces left", false ); assertTrue( "2be98afe84af48285a81cbd30d297a9ce".equals( encryption ) ); encryption = encoder.encode( "Spaces right", false ); assertTrue( "2be98afc839d79387ae0aee62d795a7ce".equals( encryption ) ); encryption = encoder.encode( " Spaces ", false ); assertTrue( "2be98afe84a87d2c49809af73db81ef9a".equals( encryption ) ); encryption = encoder.encode( "1234567890", false ); assertTrue( "2be98afc86aa7c3d6f84dfb2689caf68a".equals( encryption ) ); }
public static Metric metric(String name) { return MetricsImpl.metric(name, Unit.COUNT); }
@Test public void metricsAreJobIndependent() { pipeline.readFrom(TestSources.itemStream(1_000)) .withIngestionTimestamps() .filter(l -> l.sequence() < 4000) .map(t -> { if (t.sequence() % 4 == 0) { Metrics.metric("total").increment(); } return t; }) .writeTo(Sinks.noop()); Pipeline pipeline2 = Pipeline.create(); pipeline2.readFrom(TestSources.itemStream(1_000)) .withIngestionTimestamps() .filter(l -> l.sequence() < 4000) .map(t -> { if (t.sequence() % 4 != 0) { Metrics.metric("total").increment(); } return t; }) .writeTo(Sinks.noop()); Job job = instance.getJet().newJob(pipeline, JOB_CONFIG_WITH_METRICS); Job job2 = instance.getJet().newJob(pipeline2, JOB_CONFIG_WITH_METRICS); JobMetricsChecker checker1 = new JobMetricsChecker(job); assertTrueEventually(() -> checker1.assertSummedMetricValue("total", 1000)); JobMetricsChecker checker2 = new JobMetricsChecker(job2); assertTrueEventually(() -> checker2.assertSummedMetricValue("total", 3000)); }
public static String queryIndexName(String tableName, long pointOfTB, Step step, boolean isRecord, boolean isSuperDataSet) { if (StringUtil.isBlank(tableName) || pointOfTB <= 0) { throw new IllegalArgumentException( "Arguments [tableName]: " + tableName + " can not be blank and [pointOfTB]: " + pointOfTB + " can not <= 0"); } if (isRecord && isSuperDataSet) { return tableName + Const.LINE + compressTimeBucket(pointOfTB / 1000000, SUPER_DATASET_DAY_STEP); } switch (step) { case DAY: return tableName + Const.LINE + compressTimeBucket(pointOfTB, DAY_STEP); case HOUR: return tableName + Const.LINE + compressTimeBucket(pointOfTB / 100, DAY_STEP); case MINUTE: return tableName + Const.LINE + compressTimeBucket(pointOfTB / 10000, DAY_STEP); case SECOND: return tableName + Const.LINE + compressTimeBucket(pointOfTB / 1000000, DAY_STEP); } throw new UnexpectedException("Failed to get the index name from tableName:" + tableName + ", pointOfTB:" + pointOfTB + ", step:" + step.name()); }
@Test public void queryIndexNameTest() { Assertions.assertEquals( "metrics-apdex-20220710", TimeSeriesUtils.queryIndexName("metrics-apdex", 20220710111111L, Step.SECOND, false, false) ); Assertions.assertEquals( "metrics-apdex-20220710", TimeSeriesUtils.queryIndexName("metrics-apdex", 202207101111L, Step.MINUTE, false, false) ); Assertions.assertEquals( "metrics-apdex-20220710", TimeSeriesUtils.queryIndexName("metrics-apdex", 2022071011L, Step.HOUR, false, false) ); Assertions.assertEquals( "metrics-apdex-20220710", TimeSeriesUtils.queryIndexName("metrics-apdex", 20220710L, Step.DAY, false, false) ); Assertions.assertEquals( "metrics-apdex-20220710", TimeSeriesUtils.queryIndexName("metrics-apdex", 20220710111111L, Step.DAY, true, true) ); }
public void createMapping(Mapping mapping, boolean replace, boolean ifNotExists, SqlSecurityContext securityContext) { Mapping resolved = resolveMapping(mapping, securityContext); String name = resolved.name(); if (ifNotExists) { relationsStorage.putIfAbsent(name, resolved); } else if (replace) { relationsStorage.put(name, resolved); listeners.forEach(TableListener::onTableChanged); } else if (!relationsStorage.putIfAbsent(name, resolved)) { throw QueryException.error("Mapping or view already exists: " + name); } }
@Test public void when_createsDuplicateMappingWithIfNotExists_then_succeeds() { // given Mapping mapping = mapping(); given(connectorCache.forType(mapping.connectorType())).willReturn(connector); given(connector.typeName()).willReturn(mapping.connectorType()); given(connector.defaultObjectType()).willReturn("Dummy"); given(connector.resolveAndValidateFields(nodeEngine, new SqlExternalResource(mapping.externalName(), mapping.dataConnection(), mapping.connectorType(), null, mapping.options()), mapping.fields() )) .willReturn(singletonList(new MappingField("field_name", INT))); given(relationsStorage.putIfAbsent(eq(mapping.name()), isA(Mapping.class))).willReturn(false); // when catalog.createMapping(mapping, false, true, null); // then verifyNoInteractions(listener); }
public static String getProperty(final String propertyName) { final String propertyValue = System.getProperty(propertyName); return NULL_PROPERTY_VALUE.equals(propertyValue) ? null : propertyValue; }
@Test void shouldGetDefaultProperty() { final String key = "org.agrona.test.case"; final String defaultValue = "default"; assertEquals(defaultValue, SystemUtil.getProperty(key, defaultValue)); }
@Override public List<Plugin> plugins() { List<Plugin> plugins = configurationParameters.get(PLUGIN_PROPERTY_NAME, s -> Arrays.stream(s.split(",")) .map(String::trim) .map(PluginOption::parse) .map(pluginOption -> (Plugin) pluginOption) .collect(Collectors.toList())) .orElseGet(ArrayList::new); getPublishPlugin() .ifPresent(plugins::add); return plugins; }
@Test void getPluginNamesWithPublishEnabled() { ConfigurationParameters config = new MapConfigurationParameters( Constants.PLUGIN_PUBLISH_ENABLED_PROPERTY_NAME, "true"); assertThat(new CucumberEngineOptions(config).plugins().stream() .map(Options.Plugin::pluginString) .collect(toList()), hasItem("io.cucumber.core.plugin.PublishFormatter")); }
public static String pathAndQueryOf(final URI uri) { final String path = uri.getPath(); String pathAndQuery = path; if (ObjectHelper.isEmpty(path)) { pathAndQuery = "/"; } final String query = uri.getQuery(); if (ObjectHelper.isNotEmpty(query)) { pathAndQuery += "?" + query; } return pathAndQuery; }
@Test public void testPathAndQueryOf() { assertEquals("/", URISupport.pathAndQueryOf(URI.create("http://localhost"))); assertEquals("/", URISupport.pathAndQueryOf(URI.create("http://localhost:80"))); assertEquals("/", URISupport.pathAndQueryOf(URI.create("http://localhost:80/"))); assertEquals("/path", URISupport.pathAndQueryOf(URI.create("http://localhost:80/path"))); assertEquals("/path/", URISupport.pathAndQueryOf(URI.create("http://localhost:80/path/"))); assertEquals("/path?query=value", URISupport.pathAndQueryOf(URI.create("http://localhost:80/path?query=value"))); }
@Override public final boolean offer(int ordinal, @Nonnull Object item) { if (ordinal == -1) { return offerInternal(allEdges, item); } else { if (ordinal == bucketCount()) { // ordinal beyond bucketCount will add to snapshot queue, which we don't allow through this method throw new IllegalArgumentException("Illegal edge ordinal: " + ordinal); } singleEdge[0] = ordinal; return offerInternal(singleEdge, item); } }
@Test public void when_offerFailsAndOfferedToDifferentOrdinal_then_fail_1() { do_when_offerToDifferentOrdinal_then_fail(e -> outbox.offer(0, e), e -> outbox.offer(1, e)); }
@Override public SnowflakeTableMetadata loadTableMetadata(SnowflakeIdentifier tableIdentifier) { Preconditions.checkArgument( tableIdentifier.type() == SnowflakeIdentifier.Type.TABLE, "loadTableMetadata requires a TABLE identifier, got '%s'", tableIdentifier); SnowflakeTableMetadata tableMeta; try { final String finalQuery = "SELECT SYSTEM$GET_ICEBERG_TABLE_INFORMATION(?) AS METADATA"; tableMeta = connectionPool.run( conn -> queryHarness.query( conn, finalQuery, TABLE_METADATA_RESULT_SET_HANDLER, tableIdentifier.toIdentifierString())); } catch (SQLException e) { throw snowflakeExceptionToIcebergException( tableIdentifier, e, String.format("Failed to get table metadata for '%s'", tableIdentifier)); } catch (InterruptedException e) { throw new UncheckedInterruptedException( e, "Interrupted while getting table metadata for '%s'", tableIdentifier); } return tableMeta; }
@SuppressWarnings("unchecked") @Test public void testGetAzureTableMetadata() throws SQLException { when(mockResultSet.next()).thenReturn(true); when(mockResultSet.getString("METADATA")) .thenReturn( "{\"metadataLocation\":\"azure://myaccount.blob.core.windows.net/mycontainer/tab3/metadata/v334.metadata.json\",\"status\":\"success\"}"); SnowflakeTableMetadata actualMetadata = snowflakeClient.loadTableMetadata( SnowflakeIdentifier.ofTable("DB_1", "SCHEMA_1", "TABLE_1")); verify(mockQueryHarness) .query( eq(mockConnection), eq("SELECT SYSTEM$GET_ICEBERG_TABLE_INFORMATION(?) AS METADATA"), any(JdbcSnowflakeClient.ResultSetParser.class), eq("DB_1.SCHEMA_1.TABLE_1")); SnowflakeTableMetadata expectedMetadata = new SnowflakeTableMetadata( "azure://myaccount.blob.core.windows.net/mycontainer/tab3/metadata/v334.metadata.json", "wasbs://mycontainer@myaccount.blob.core.windows.net/tab3/metadata/v334.metadata.json", "success", null); assertThat(actualMetadata).isEqualTo(expectedMetadata); }
@Override public JimfsPath resolveSibling(Path other) { JimfsPath otherPath = checkPath(other); if (otherPath == null) { throw new ProviderMismatchException(other.toString()); } if (otherPath.isAbsolute()) { return otherPath; } JimfsPath parent = getParent(); if (parent == null) { return otherPath; } return parent.resolve(other); }
@Test public void testResolveSibling_givenEmptyPath() { Path path = pathService.parsePath("foo/bar"); Path resolved = path.resolveSibling(""); assertPathEquals("foo", resolved); path = pathService.parsePath("foo"); resolved = path.resolveSibling(""); assertPathEquals("", resolved); }
@Override public ListenableFuture<List<Asset>> findAssetsByTenantIdAndCustomerIdAndIdsAsync(UUID tenantId, UUID customerId, List<UUID> assetIds) { return service.submit(() -> DaoUtil.convertDataList(assetRepository.findByTenantIdAndCustomerIdAndIdIn(tenantId, customerId, assetIds))); }
@Test public void testFindAssetsByTenantIdCustomerIdAndIdsAsync() throws ExecutionException, InterruptedException, TimeoutException { List<UUID> searchIds = getAssetsUuids(tenantId1); ListenableFuture<List<Asset>> assetsFuture = assetDao .findAssetsByTenantIdAndCustomerIdAndIdsAsync(tenantId1, customerId1, searchIds); List<Asset> assets = assetsFuture.get(30, TimeUnit.SECONDS); assertNotNull(assets); assertEquals(searchIds.size(), assets.size()); }
@Deprecated public static void writeMetadataFile(Configuration configuration, Path outputPath, List<Footer> footers) throws IOException { writeMetadataFile(configuration, outputPath, footers, JobSummaryLevel.ALL); }
@Test public void testWriteMetadataFileWithRelativeOutputPath() throws IOException { Configuration conf = getTestConfiguration(); FileSystem fs = FileSystem.get(conf); Path relativeRoot = new Path("target/_test_relative"); Path qualifiedRoot = fs.makeQualified(relativeRoot); ParquetMetadata mock = Mockito.mock(ParquetMetadata.class); FileMetaData fileMetaData = new FileMetaData( new MessageType("root1", new PrimitiveType(REPEATED, BINARY, "a")), new HashMap<String, String>(), "test"); Mockito.when(mock.getFileMetaData()).thenReturn(fileMetaData); List<Footer> footers = new ArrayList<Footer>(); Footer footer = new Footer(new Path(qualifiedRoot, "one"), mock); footers.add(footer); // This should not throw an exception ParquetFileWriter.writeMetadataFile(conf, relativeRoot, footers, JobSummaryLevel.ALL); }
@Override public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final DelayedHttpEntityCallable<StorageObject> command = new DelayedHttpEntityCallable<StorageObject>(file) { @Override public StorageObject call(final HttpEntity entity) throws BackgroundException { try { // POST /upload/storage/v1/b/myBucket/o final StringBuilder uri = new StringBuilder(String.format("%supload/storage/v1/b/%s/o?uploadType=resumable", session.getClient().getRootUrl(), containerService.getContainer(file).getName())); if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) { uri.append(String.format("&userProject=%s", session.getHost().getCredentials().getUsername())); } if(!Acl.EMPTY.equals(status.getAcl())) { if(status.getAcl().isCanned()) { uri.append("&predefinedAcl="); if(Acl.CANNED_PRIVATE.equals(status.getAcl())) { uri.append("private"); } else if(Acl.CANNED_PUBLIC_READ.equals(status.getAcl())) { uri.append("publicRead"); } else if(Acl.CANNED_PUBLIC_READ_WRITE.equals(status.getAcl())) { uri.append("publicReadWrite"); } else if(Acl.CANNED_AUTHENTICATED_READ.equals(status.getAcl())) { uri.append("authenticatedRead"); } else if(Acl.CANNED_BUCKET_OWNER_FULLCONTROL.equals(status.getAcl())) { uri.append("bucketOwnerFullControl"); } else if(Acl.CANNED_BUCKET_OWNER_READ.equals(status.getAcl())) { uri.append("bucketOwnerRead"); } // Reset in status to skip setting ACL in upload filter already applied as canned ACL status.setAcl(Acl.EMPTY); } } final HttpEntityEnclosingRequestBase request = new HttpPost(uri.toString()); final StringBuilder metadata = new StringBuilder(); metadata.append(String.format("{\"name\": \"%s\"", containerService.getKey(file))); metadata.append(",\"metadata\": {"); for(Iterator<Map.Entry<String, String>> iter = status.getMetadata().entrySet().iterator(); iter.hasNext(); ) { final Map.Entry<String, String> item = iter.next(); metadata.append(String.format("\"%s\": \"%s\"", item.getKey(), item.getValue())); if(iter.hasNext()) { metadata.append(","); } } metadata.append("}"); if(StringUtils.isNotBlank(status.getMime())) { metadata.append(String.format(", \"contentType\": \"%s\"", status.getMime())); } if(StringUtils.isNotBlank(status.getStorageClass())) { metadata.append(String.format(", \"storageClass\": \"%s\"", status.getStorageClass())); } if(null != status.getModified()) { metadata.append(String.format(", \"customTime\": \"%s\"", new ISO8601DateFormatter().format(status.getModified(), TimeZone.getTimeZone("UTC")))); } metadata.append("}"); request.setEntity(new StringEntity(metadata.toString(), ContentType.create("application/json", StandardCharsets.UTF_8.name()))); if(StringUtils.isNotBlank(status.getMime())) { // Set to the media MIME type of the upload data to be transferred in subsequent requests. request.addHeader("X-Upload-Content-Type", status.getMime()); } request.addHeader(HTTP.CONTENT_TYPE, MEDIA_TYPE); final HttpClient client = session.getHttpClient(); final HttpResponse response = client.execute(request); try { switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: break; default: throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", new HttpResponseException(response.getStatusLine().getStatusCode(), GoogleStorageExceptionMappingService.parse(response)), file); } } finally { EntityUtils.consume(response.getEntity()); } if(response.containsHeader(HttpHeaders.LOCATION)) { final String putTarget = response.getFirstHeader(HttpHeaders.LOCATION).getValue(); // Upload the file final HttpPut put = new HttpPut(putTarget); put.setEntity(entity); final HttpResponse putResponse = client.execute(put); try { switch(putResponse.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_CREATED: return session.getClient().getObjectParser().parseAndClose(new InputStreamReader( putResponse.getEntity().getContent(), StandardCharsets.UTF_8), StorageObject.class); default: throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", new HttpResponseException(putResponse.getStatusLine().getStatusCode(), GoogleStorageExceptionMappingService.parse(putResponse)), file); } } finally { EntityUtils.consume(putResponse.getEntity()); } } else { throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", new HttpResponseException(response.getStatusLine().getStatusCode(), GoogleStorageExceptionMappingService.parse(response)), file); } } catch(IOException e) { throw new GoogleStorageExceptionMappingService().map("Upload {0} failed", e, file); } } @Override public long getContentLength() { return status.getLength(); } }; return this.write(file, status, command); }
@Test public void testWritePublicReadCannedPrivateAcl() throws Exception { final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final TransferStatus status = new TransferStatus(); final byte[] content = RandomUtils.nextBytes(1033); status.setLength(content.length); status.setAcl(Acl.CANNED_PRIVATE); final HttpResponseOutputStream<StorageObject> out = new GoogleStorageWriteFeature(session).write(test, status, new DisabledConnectionCallback()); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out); out.close(); assertNotNull(out.getStatus().getGeneration()); assertTrue(new GoogleStorageFindFeature(session).find(test)); assertFalse(new GoogleStorageAccessControlListFeature(session) .getPermission(test).asList().contains(new Acl.UserAndRole(new Acl.GroupUser(Acl.GroupUser.EVERYONE), new Acl.Role(Acl.Role.READ)))); new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public boolean equals(final Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DataVersion version = (DataVersion) o; if (getStateVersion() != version.getStateVersion()) return false; if (getTimestamp() != version.getTimestamp()) return false; if (counter != null && version.counter != null) { return counter.longValue() == version.counter.longValue(); } return null == counter && null == version.counter; }
@Test public void testEquals() { DataVersion dataVersion = new DataVersion(); DataVersion other = new DataVersion(); other.setTimestamp(dataVersion.getTimestamp()); Assert.assertTrue(dataVersion.equals(other)); }
public boolean isTimeout() { return (state & MASK_TIMEOUT) != 0; }
@Test public void isTimeout() { LacpState state = new LacpState((byte) 0x2); assertTrue(state.isTimeout()); }
@Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); try { defaultMQAdminExt.start(); String brokerName = commandLine.getOptionValue('b').trim(); List<String> namesrvList = defaultMQAdminExt.getNameServerAddressList(); if (namesrvList != null) { for (String namesrvAddr : namesrvList) { try { int wipeTopicCount = defaultMQAdminExt.wipeWritePermOfBroker(namesrvAddr, brokerName); System.out.printf("wipe write perm of broker[%s] in name server[%s] OK, %d%n", brokerName, namesrvAddr, wipeTopicCount ); } catch (Exception e) { System.out.printf("wipe write perm of broker[%s] in name server[%s] Failed%n", brokerName, namesrvAddr ); e.printStackTrace(); } } } } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); } }
@Test public void testExecute() throws SubCommandException { WipeWritePermSubCommand cmd = new WipeWritePermSubCommand(); Options options = ServerUtil.buildCommandlineOptions(new Options()); String[] subargs = new String[] {"-b default-broker"}; final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new DefaultParser()); cmd.execute(commandLine, options, null); }
public int readNext(boolean[] values, int offset, int length) throws IOException { int nonNullCount = 0; int destinationIndex = offset; int remainingToCopy = length; while (remainingToCopy > 0) { if (currentCount == 0) { if (!decode()) { break; } } int chunkSize = Math.min(remainingToCopy, currentCount); int endIndex = destinationIndex + chunkSize; switch (mode) { case RLE: { boolean rleValue = currentValue == 0; while (destinationIndex < endIndex) { values[destinationIndex++] = rleValue; } nonNullCount += currentValue * chunkSize; break; } case PACKED: { int[] buffer = currentBuffer; for (int sourceIndex = buffer.length - currentCount; destinationIndex < endIndex; sourceIndex++, destinationIndex++) { final int value = buffer[sourceIndex]; values[destinationIndex] = value == 0; nonNullCount += value; } break; } default: throw new ParquetDecodingException("not a valid mode " + mode); } currentCount -= chunkSize; remainingToCopy -= chunkSize; } checkState(remainingToCopy == 0, "Failed to copy the requested number of definition levels"); return nonNullCount; }
@Test public void tryReadingTooMany() { FlatDefinitionLevelDecoder dlDecoder = new FlatDefinitionLevelDecoder(valueCount, new ByteArrayInputStream(pageBytes)); try { dlDecoder.readNext(new boolean[valueCount + 500], 0, valueCount + 500); fail("shouldn't come here"); } catch (Exception e) { assertEquals(e.getMessage(), "Failed to copy the requested number of definition levels"); } }
@Override @CacheEvict(cacheNames = RedisKeyConstants.DEPT_CHILDREN_ID_LIST, allEntries = true) // allEntries 清空所有缓存,因为操作一个部门,涉及到多个缓存 public void updateDept(DeptSaveReqVO updateReqVO) { if (updateReqVO.getParentId() == null) { updateReqVO.setParentId(DeptDO.PARENT_ID_ROOT); } // 校验自己存在 validateDeptExists(updateReqVO.getId()); // 校验父部门的有效性 validateParentDept(updateReqVO.getId(), updateReqVO.getParentId()); // 校验部门名的唯一性 validateDeptNameUnique(updateReqVO.getId(), updateReqVO.getParentId(), updateReqVO.getName()); // 更新部门 DeptDO updateObj = BeanUtils.toBean(updateReqVO, DeptDO.class); deptMapper.updateById(updateObj); }
@Test public void testUpdateDept() { // mock 数据 DeptDO dbDeptDO = randomPojo(DeptDO.class, o -> o.setStatus(randomCommonStatus())); deptMapper.insert(dbDeptDO);// @Sql: 先插入出一条存在的数据 // 准备参数 DeptSaveReqVO reqVO = randomPojo(DeptSaveReqVO.class, o -> { // 设置更新的 ID o.setParentId(DeptDO.PARENT_ID_ROOT); o.setId(dbDeptDO.getId()); o.setStatus(randomCommonStatus()); }); // 调用 deptService.updateDept(reqVO); // 校验是否更新正确 DeptDO deptDO = deptMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, deptDO); }
public static String simpleTypeDescription(Type input) { StringBuilder builder = new StringBuilder(); format(builder, input); return builder.toString(); }
@Test public void testTypeFormatterOnArrays() throws Exception { assertEquals("Integer[]", ReflectHelpers.simpleTypeDescription(Integer[].class)); assertEquals("int[]", ReflectHelpers.simpleTypeDescription(int[].class)); }
@Override public void assign(Collection<TopicPartition> partitions) { acquireAndEnsureOpen(); try { if (partitions == null) { throw new IllegalArgumentException("Topic partitions collection to assign to cannot be null"); } if (partitions.isEmpty()) { unsubscribe(); return; } for (TopicPartition tp : partitions) { String topic = (tp != null) ? tp.topic() : null; if (isBlank(topic)) throw new IllegalArgumentException("Topic partitions to assign to cannot have null or empty topic"); } // Clear the buffered data which are not a part of newly assigned topics final Set<TopicPartition> currentTopicPartitions = new HashSet<>(); for (TopicPartition tp : subscriptions.assignedPartitions()) { if (partitions.contains(tp)) currentTopicPartitions.add(tp); } fetchBuffer.retainAll(currentTopicPartitions); // assignment change event will trigger autocommit if it is configured and the group id is specified. This is // to make sure offsets of topic partitions the consumer is unsubscribing from are committed since there will // be no following rebalance. // // See the ApplicationEventProcessor.process() method that handles this event for more detail. applicationEventHandler.add(new AssignmentChangeEvent(subscriptions.allConsumed(), time.milliseconds())); log.info("Assigned to partition(s): {}", partitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); if (subscriptions.assignFromUser(new HashSet<>(partitions))) applicationEventHandler.add(new NewTopicsMetadataUpdateRequestEvent()); } finally { release(); } }
@Test public void testAssignOnNullTopicInPartition() { consumer = newConsumer(); assertThrows(IllegalArgumentException.class, () -> consumer.assign(singleton(new TopicPartition(null, 0)))); }
public void recordFailedAttempt(String username, String address) { Log.warn("Failed admin console login attempt by "+username+" from "+address); Long cnt = (long)0; if (attemptsPerIP.get(address) != null) { cnt = attemptsPerIP.get(address); } cnt++; attemptsPerIP.put(address, cnt); final StringBuilder sb = new StringBuilder(); if (cnt > MAX_ATTEMPTS_PER_IP.getValue()) { Log.warn("Login attempt limit breached for address "+address); sb.append("Future login attempts from this address will be temporarily locked out. "); } cnt = (long)0; if (attemptsPerUsername.get(username) != null) { cnt = attemptsPerUsername.get(username); } cnt++; attemptsPerUsername.put(username, cnt); if (cnt > MAX_ATTEMPTS_PER_USERNAME.getValue()) { Log.warn("Login attempt limit breached for username "+username); sb.append("Future login attempts for this user will be temporarily locked out. "); } securityAuditManager.logEvent(username, "Failed admin console login attempt", "A failed login attempt to the admin console was made from address " + address + ". " + sb); }
@Test public void lockoutsWillBeAudited() { final String username = "test-user-c-" + StringUtils.randomString(10); for(int i = 0; i < 11; i ++) { loginLimitManager.recordFailedAttempt(username, "a.b.c.f"); } verify(securityAuditManager, times(10)).logEvent(username, "Failed admin console login attempt", "A failed login attempt to the admin console was made from address a.b.c.f. "); verify(securityAuditManager, times(1)).logEvent(username, "Failed admin console login attempt", "A failed login attempt to the admin console was made from address a.b.c.f. Future login attempts from this address will be temporarily locked out. Future login attempts for this user will be temporarily locked out. "); }
@Override public ByteBuf setIntLE(int index, int value) { checkIndex(index, 4); _setIntLE(index, value); return this; }
@Test public void testSetIntLEAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().setIntLE(0, 1); } }); }
public void stopRunning( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException { if ( this.isStopped() || sdi.isDisposed() ) { return; } dbLock.lock(); try { meta = (TableInputMeta) smi; data = (TableInputData) sdi; setStopped( true ); if ( data.db != null && data.db.getConnection() != null && !data.isCanceled ) { data.db.cancelQuery(); data.isCanceled = true; } } finally { dbLock.unlock(); } }
@Test public void testStopRunningWhenStepIsNotStoppedNorStepDataInterfaceIsDisposedAndDatabaseConnectionIsNotValid() throws KettleException { doReturn( false ).when( mockTableInput ).isStopped(); doReturn( false ).when( mockStepDataInterface ).isDisposed(); when( mockStepDataInterface.db.getConnection() ).thenReturn( null ); mockTableInput.stopRunning( mockStepMetaInterface, mockStepDataInterface ); verify( mockTableInput, times( 1 ) ).isStopped(); verify( mockStepDataInterface, times( 1 ) ).isDisposed(); verify( mockStepDataInterface.db, times( 1 ) ).getConnection(); verify( mockStepDataInterface.db, times( 0 ) ).cancelStatement( any( PreparedStatement.class ) ); assertFalse( mockStepDataInterface.isCanceled ); }
@Override public ListResult<String> retrieve(GroupVersionKind type, ListOptions options, PageRequest page) { var allMatchedResult = doRetrieve(type, options, page.getSort()); var list = ListResult.subList(allMatchedResult, page.getPageNumber(), page.getPageSize()); return new ListResult<>(page.getPageNumber(), page.getPageSize(), allMatchedResult.size(), list); }
@Test void retrieve() { var spyIndexedQueryEngine = spy(indexedQueryEngine); doReturn(List.of("object1", "object2", "object3")).when(spyIndexedQueryEngine) .doRetrieve(any(), any(), eq(Sort.unsorted())); var gvk = GroupVersionKind.fromExtension(DemoExtension.class); var pageRequest = mock(PageRequest.class); when(pageRequest.getPageNumber()).thenReturn(1); when(pageRequest.getPageSize()).thenReturn(2); when(pageRequest.getSort()).thenReturn(Sort.unsorted()); var result = spyIndexedQueryEngine.retrieve(gvk, new ListOptions(), pageRequest); assertThat(result.getItems()).containsExactly("object1", "object2"); assertThat(result.getTotal()).isEqualTo(3); verify(spyIndexedQueryEngine).doRetrieve(eq(gvk), any(), eq(Sort.unsorted())); verify(pageRequest, times(2)).getPageNumber(); verify(pageRequest, times(2)).getPageSize(); verify(pageRequest).getSort(); }
public boolean removeApplication(ApplicationId appId) { return collectorManager.remove(appId); }
@Test void testRemoveApplication() throws Exception { auxService = createCollectorAndAddApplication(); // auxService should have a single app assertTrue(auxService.hasApplication(appAttemptId.getApplicationId())); ContainerId containerId = getAMContainerId(); ContainerTerminationContext context = mock(ContainerTerminationContext.class); when(context.getContainerId()).thenReturn(containerId); when(context.getContainerType()).thenReturn( ContainerType.APPLICATION_MASTER); auxService.stopContainer(context); // auxService should not have that app assertFalse(auxService.hasApplication(appAttemptId.getApplicationId())); auxService.close(); }
private static void validateIndexingConfig(IndexingConfig indexingConfig, @Nullable Schema schema) { if (schema == null) { return; } ArrayListMultimap<String, String> columnNameToConfigMap = ArrayListMultimap.create(); Set<String> noDictionaryColumnsSet = new HashSet<>(); if (indexingConfig.getNoDictionaryColumns() != null) { for (String columnName : indexingConfig.getNoDictionaryColumns()) { columnNameToConfigMap.put(columnName, "No Dictionary Column Config"); noDictionaryColumnsSet.add(columnName); } } Set<String> bloomFilterColumns = new HashSet<>(); if (indexingConfig.getBloomFilterColumns() != null) { bloomFilterColumns.addAll(indexingConfig.getBloomFilterColumns()); } if (indexingConfig.getBloomFilterConfigs() != null) { bloomFilterColumns.addAll(indexingConfig.getBloomFilterConfigs().keySet()); } for (String bloomFilterColumn : bloomFilterColumns) { columnNameToConfigMap.put(bloomFilterColumn, "Bloom Filter Config"); } if (indexingConfig.getInvertedIndexColumns() != null) { for (String columnName : indexingConfig.getInvertedIndexColumns()) { if (noDictionaryColumnsSet.contains(columnName)) { throw new IllegalStateException("Cannot create an Inverted index on column " + columnName + " specified in the noDictionaryColumns config"); } columnNameToConfigMap.put(columnName, "Inverted Index Config"); } } if (indexingConfig.getOnHeapDictionaryColumns() != null) { for (String columnName : indexingConfig.getOnHeapDictionaryColumns()) { columnNameToConfigMap.put(columnName, "On Heap Dictionary Column Config"); } } if (indexingConfig.getRangeIndexColumns() != null) { for (String columnName : indexingConfig.getRangeIndexColumns()) { columnNameToConfigMap.put(columnName, "Range Column Config"); } } if (indexingConfig.getSortedColumn() != null) { for (String columnName : indexingConfig.getSortedColumn()) { columnNameToConfigMap.put(columnName, "Sorted Column Config"); } } if (indexingConfig.getVarLengthDictionaryColumns() != null) { for (String columnName : indexingConfig.getVarLengthDictionaryColumns()) { columnNameToConfigMap.put(columnName, "Var Length Column Config"); } } if (indexingConfig.getSegmentPartitionConfig() != null && indexingConfig.getSegmentPartitionConfig().getColumnPartitionMap() != null) { for (String columnName : indexingConfig.getSegmentPartitionConfig().getColumnPartitionMap().keySet()) { columnNameToConfigMap.put(columnName, "Segment Partition Config"); } } Set<String> jsonIndexColumns = new HashSet<>(); // Ignore jsonIndexColumns when jsonIndexConfigs is configured if (indexingConfig.getJsonIndexConfigs() != null) { jsonIndexColumns.addAll(indexingConfig.getJsonIndexConfigs().keySet()); } else { if (indexingConfig.getJsonIndexColumns() != null) { jsonIndexColumns.addAll(indexingConfig.getJsonIndexColumns()); } } for (String columnName : jsonIndexColumns) { columnNameToConfigMap.put(columnName, "Json Index Config"); } List<StarTreeIndexConfig> starTreeIndexConfigList = indexingConfig.getStarTreeIndexConfigs(); if (starTreeIndexConfigList != null) { for (StarTreeIndexConfig starTreeIndexConfig : starTreeIndexConfigList) { // Dimension split order cannot be null for (String columnName : starTreeIndexConfig.getDimensionsSplitOrder()) { columnNameToConfigMap.put(columnName, STAR_TREE_CONFIG_NAME); } List<String> functionColumnPairs = starTreeIndexConfig.getFunctionColumnPairs(); List<StarTreeAggregationConfig> aggregationConfigs = starTreeIndexConfig.getAggregationConfigs(); Preconditions.checkState(functionColumnPairs == null || aggregationConfigs == null, "Only one of 'functionColumnPairs' or 'aggregationConfigs' can be specified in StarTreeIndexConfig"); Set<AggregationFunctionColumnPair> storedTypes = new HashSet<>(); if (functionColumnPairs != null) { for (String functionColumnPair : functionColumnPairs) { AggregationFunctionColumnPair columnPair; try { columnPair = AggregationFunctionColumnPair.fromColumnName(functionColumnPair); } catch (Exception e) { throw new IllegalStateException("Invalid StarTreeIndex config: " + functionColumnPair + ". Must be" + "in the form <Aggregation function>__<Column name>"); } AggregationFunctionColumnPair storedType = AggregationFunctionColumnPair.resolveToStoredType(columnPair); if (!storedTypes.add(storedType)) { LOGGER.warn("StarTreeIndex config duplication: {} already matches existing function column pair: {}. ", columnPair, storedType); } String columnName = columnPair.getColumn(); if (!columnName.equals(AggregationFunctionColumnPair.STAR)) { columnNameToConfigMap.put(columnName, STAR_TREE_CONFIG_NAME); } } } if (aggregationConfigs != null) { for (StarTreeAggregationConfig aggregationConfig : aggregationConfigs) { AggregationFunctionColumnPair columnPair; try { columnPair = AggregationFunctionColumnPair.fromAggregationConfig(aggregationConfig); } catch (Exception e) { throw new IllegalStateException("Invalid StarTreeIndex config: " + aggregationConfig); } AggregationFunctionColumnPair storedType = AggregationFunctionColumnPair.resolveToStoredType(columnPair); if (!storedTypes.add(storedType)) { LOGGER.warn("StarTreeIndex config duplication: {} already matches existing function column pair: {}. ", columnPair, storedType); } String columnName = columnPair.getColumn(); if (!columnName.equals(AggregationFunctionColumnPair.STAR)) { columnNameToConfigMap.put(columnName, STAR_TREE_CONFIG_NAME); } } } List<String> skipDimensionList = starTreeIndexConfig.getSkipStarNodeCreationForDimensions(); if (skipDimensionList != null) { for (String columnName : skipDimensionList) { columnNameToConfigMap.put(columnName, STAR_TREE_CONFIG_NAME); } } } } for (Map.Entry<String, String> entry : columnNameToConfigMap.entries()) { String columnName = entry.getKey(); String configName = entry.getValue(); FieldSpec columnFieldSpec = schema.getFieldSpecFor(columnName); Preconditions.checkState(columnFieldSpec != null, "Column Name " + columnName + " defined in " + configName + " must be a valid column defined in the schema"); if (configName.equals(STAR_TREE_CONFIG_NAME)) { Preconditions.checkState(columnFieldSpec.isSingleValueField(), "Column Name " + columnName + " defined in " + configName + " must be a single value column"); } } // Range index semantic validation // Range index can be defined on numeric columns and any column with a dictionary if (indexingConfig.getRangeIndexColumns() != null) { for (String rangeIndexCol : indexingConfig.getRangeIndexColumns()) { Preconditions.checkState( schema.getFieldSpecFor(rangeIndexCol).getDataType().isNumeric() || !noDictionaryColumnsSet.contains( rangeIndexCol), "Cannot create a range index on non-numeric/no-dictionary column " + rangeIndexCol); } } // Var length dictionary semantic validation if (indexingConfig.getVarLengthDictionaryColumns() != null) { for (String varLenDictCol : indexingConfig.getVarLengthDictionaryColumns()) { FieldSpec varLenDictFieldSpec = schema.getFieldSpecFor(varLenDictCol); switch (varLenDictFieldSpec.getDataType().getStoredType()) { case STRING: case BYTES: continue; default: throw new IllegalStateException( "var length dictionary can only be created for columns of type STRING and BYTES. Invalid for column " + varLenDictCol); } } } for (String jsonIndexColumn : jsonIndexColumns) { FieldSpec fieldSpec = schema.getFieldSpecFor(jsonIndexColumn); Preconditions.checkState( fieldSpec.isSingleValueField() && fieldSpec.getDataType().getStoredType() == DataType.STRING, "Json index can only be created for single value String column. Invalid for column: %s", jsonIndexColumn); } }
@Test public void testValidateIndexingConfig() { Schema schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension("myCol", FieldSpec.DataType.STRING) .addSingleValueDimension("bytesCol", FieldSpec.DataType.BYTES) .addSingleValueDimension("intCol", FieldSpec.DataType.INT) .addMultiValueDimension("multiValCol", FieldSpec.DataType.STRING).build(); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setBloomFilterColumns(Arrays.asList("myCol2")).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid Bloom filter column name"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setInvertedIndexColumns(Arrays.asList("")) .build(); TableConfigUtils.validate(tableConfig, schema); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setInvertedIndexColumns(Arrays.asList("myCol2")).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid Inverted Index column name"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setNoDictionaryColumns(Arrays.asList("")) .build(); TableConfigUtils.validate(tableConfig, schema); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setNoDictionaryColumns(Arrays.asList("myCol2")).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid No Dictionary column name"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setOnHeapDictionaryColumns(Arrays.asList("")) .build(); TableConfigUtils.validate(tableConfig, schema); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setOnHeapDictionaryColumns(Arrays.asList("myCol2")).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid On Heap Dictionary column name"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setRangeIndexColumns(Arrays.asList("")) .build(); TableConfigUtils.validate(tableConfig, schema); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setRangeIndexColumns(Arrays.asList("myCol2")) .build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid Range Index column name"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setSortedColumn("").build(); TableConfigUtils.validate(tableConfig, schema); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setSortedColumn("myCol2").build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid Sorted column name"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setVarLengthDictionaryColumns(Arrays.asList("")).build(); TableConfigUtils.validate(tableConfig, schema); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setVarLengthDictionaryColumns(Arrays.asList("myCol2")).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid Var Length Dictionary column name"); } catch (Exception e) { // expected } ColumnPartitionConfig columnPartitionConfig = new ColumnPartitionConfig("Murmur", 4); Map<String, ColumnPartitionConfig> partitionConfigMap = new HashMap<>(); partitionConfigMap.put("myCol2", columnPartitionConfig); SegmentPartitionConfig partitionConfig = new SegmentPartitionConfig(partitionConfigMap); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setSegmentPartitionConfig(partitionConfig) .build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid Segment Partition column name"); } catch (Exception e) { // expected } // Although this config makes no sense, it should pass the validation phase StarTreeIndexConfig starTreeIndexConfig = new StarTreeIndexConfig(List.of("myCol"), List.of("myCol"), List.of("SUM__myCol"), null, 1); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setStarTreeIndexConfigs(List.of(starTreeIndexConfig)).build(); try { TableConfigUtils.validate(tableConfig, schema); } catch (Exception e) { Assert.fail("Should not fail for valid StarTreeIndex config column name"); } starTreeIndexConfig = new StarTreeIndexConfig(List.of("myCol2"), List.of("myCol"), List.of("SUM__myCol"), null, 1); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setStarTreeIndexConfigs(List.of(starTreeIndexConfig)).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid StarTreeIndex config column name in dimension split order"); } catch (Exception e) { // expected } starTreeIndexConfig = new StarTreeIndexConfig(List.of("myCol"), List.of("myCol2"), List.of("SUM__myCol"), null, 1); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setStarTreeIndexConfigs(List.of(starTreeIndexConfig)).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid StarTreeIndex config column name in skip star node for dimension"); } catch (Exception e) { // expected } starTreeIndexConfig = new StarTreeIndexConfig(List.of("myCol"), List.of("myCol"), List.of("SUM__myCol2"), null, 1); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setStarTreeIndexConfigs(List.of(starTreeIndexConfig)).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid StarTreeIndex config column name in function column pair"); } catch (Exception e) { // expected } starTreeIndexConfig = new StarTreeIndexConfig(List.of("myCol"), null, null, List.of(new StarTreeAggregationConfig("myCol2", "SUM")), 1); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setStarTreeIndexConfigs(List.of(starTreeIndexConfig)).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid StarTreeIndex config column name in aggregation config"); } catch (Exception e) { // expected } starTreeIndexConfig = new StarTreeIndexConfig(List.of("myCol"), null, List.of("SUM__myCol"), List.of(new StarTreeAggregationConfig("myCol", "SUM")), 1); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setStarTreeIndexConfigs(List.of(starTreeIndexConfig)).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid StarTreeIndex config with both function column pair and aggregation config"); } catch (Exception e) { // expected } starTreeIndexConfig = new StarTreeIndexConfig(List.of("multiValCol"), List.of("multiValCol"), List.of("SUM__multiValCol"), null, 1); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setStarTreeIndexConfigs(List.of(starTreeIndexConfig)).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for multi-value column name in StarTreeIndex config"); } catch (Exception e) { // expected } FieldConfig fieldConfig = new FieldConfig("myCol2", null, Collections.emptyList(), null, null); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setFieldConfigList(Arrays.asList(fieldConfig)).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for invalid column name in Field Config List"); } catch (Exception e) { // expected } List<String> columnList = Arrays.asList("myCol"); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setNoDictionaryColumns(columnList) .setInvertedIndexColumns(columnList).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for valid column name in both no dictionary and inverted index column config"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setJsonIndexColumns(Arrays.asList("non-existent-column")).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for non existent column in Json index config"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setJsonIndexColumns(Arrays.asList("intCol")) .build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for Json index defined on non string column"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setJsonIndexColumns(Arrays.asList("multiValCol")).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for Json index defined on multi-value column"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setRangeIndexColumns(columnList).build(); try { TableConfigUtils.validate(tableConfig, schema); } catch (Exception e) { Assert.fail("Should work for range index defined on dictionary encoded string column"); } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setRangeIndexColumns(columnList) .setNoDictionaryColumns(columnList).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for range index defined on non numeric/no-dictionary column"); } catch (Exception e) { // Expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setVarLengthDictionaryColumns(Arrays.asList("intCol")).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for Var length dictionary defined for non string/bytes column"); } catch (Exception e) { // expected } tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) .setJsonIndexColumns(Arrays.asList("multiValCol")).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail for Json Index defined on a multi value column"); } catch (Exception e) { // expected } }
@Override public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException { final Service service = kubernetesJobManagerParameters .getRestServiceExposedType() .serviceType() .buildUpExternalRestService(kubernetesJobManagerParameters); return Collections.singletonList(service); }
@Test void testBuildAccompanyingKubernetesResources() throws IOException { final List<HasMetadata> resources = this.externalServiceDecorator.buildAccompanyingKubernetesResources(); assertThat(resources).hasSize(1); final Service restService = (Service) resources.get(0); assertThat(restService.getApiVersion()).isEqualTo(Constants.API_VERSION); assertThat(restService.getMetadata().getName()) .isEqualTo(ExternalServiceDecorator.getExternalServiceName(CLUSTER_ID)); final Map<String, String> expectedLabels = getCommonLabels(); assertThat(restService.getMetadata().getLabels()).isEqualTo(expectedLabels); assertThat(restService.getSpec().getType()) .isEqualTo(KubernetesConfigOptions.ServiceExposedType.ClusterIP.name()); final List<ServicePort> expectedServicePorts = Collections.singletonList( new ServicePortBuilder() .withName(Constants.REST_PORT_NAME) .withPort(REST_PORT) .withNewTargetPort(Integer.valueOf(REST_BIND_PORT)) .build()); assertThat(restService.getSpec().getPorts()).isEqualTo(expectedServicePorts); expectedLabels.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_JOB_MANAGER); assertThat(restService.getSpec().getSelector()).isEqualTo(expectedLabels); final Map<String, String> resultAnnotations = restService.getMetadata().getAnnotations(); assertThat(resultAnnotations).isEqualTo(customizedAnnotations); }
public static boolean is(Object instance, Object... maybeValues) { if (instance != null && maybeValues != null) { for (Object mb : maybeValues) if (instance.equals(mb)) return true; } return false; }
@Test public void testIs() { Assert.assertTrue(ReflectKit.is(22, 22)); Assert.assertTrue(ReflectKit.is(22L, 22L)); Assert.assertEquals(true, ReflectKit.is(true, Boolean.TRUE)); Assert.assertEquals(false, ReflectKit.is(22, 22L)); }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() + mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() + mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ? -1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() + mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit()); gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed()); gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax()); gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted()); gauges.put("heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax()); } }); gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("non-heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getNonHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); for (final MemoryPoolMXBean pool : memoryPools) { final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-")); gauges.put(name(poolName, "usage"), new RatioGauge() { @Override protected Ratio getRatio() { MemoryUsage usage = pool.getUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax()); gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed()); gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted()); // Only register GC usage metrics if the memory pool supports usage statistics. if (pool.getCollectionUsage() != null) { gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () -> pool.getCollectionUsage().getUsed()); } gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit()); } return Collections.unmodifiableMap(gauges); }
@Test public void hasAGaugeForTotalMax() { final Gauge gauge = (Gauge) gauges.getMetrics().get("total.max"); assertThat(gauge.getValue()) .isEqualTo(44L); }
public Optional<String> identityFromSignature(final String password) { // for some generators, identity in the clear is just not a part of the password if (!prependUsername || shouldDeriveUsername() || StringUtils.isBlank(password)) { return Optional.empty(); } // checking for the case of unexpected format if (StringUtils.countMatches(password, DELIMITER) == 2) { if (usernameIsTimestamp()) { final int indexOfSecondDelimiter = password.indexOf(DELIMITER, password.indexOf(DELIMITER) + 1); return Optional.of(password.substring(0, indexOfSecondDelimiter)); } else { return Optional.of(password.substring(0, password.indexOf(DELIMITER))); } } return Optional.empty(); }
@Test public void testGetIdentityFromSignature() { final String identity = standardGenerator.identityFromSignature(standardCredentials.password()).orElseThrow(); assertEquals(E164, identity); }
@SuppressWarnings("SameReturnValue") public Set<String> getSupportedExtensions() { return null; }
@Test public void testGetSupportedExtensions() { Set<String> result = instance.getSupportedExtensions(); assertNull(result); }
@Udf public List<Integer> generateSeriesInt( @UdfParameter(description = "The beginning of the series") final int start, @UdfParameter(description = "Marks the end of the series (inclusive)") final int end ) { return generateSeriesInt(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldComputeIntRangeWithPositiveOddStepInt() { final List<Integer> range = rangeUdf.generateSeriesInt(0, 9, 3); assertThat(range, hasSize(4)); int val = 0; for (final int i : range) { assertThat(val, is(i)); val += 3; } }
public static List<Method> extractValidMethods(Class contract) { return Arrays.stream(contract.getDeclaredMethods()) .filter( m -> !m.isSynthetic() && parametersAreMatching(m) && !m.getName().toLowerCase().contains("event") && !m.getName().equals("load") && !m.getName().equals("kill") && !m.getName().equals("linkLibraries")) .collect(Collectors.toList()); }
@Test public void testExtractValidMethods() { List<Method> filteredMethods = MethodFilter.extractValidMethods(greeterContractClass); filteredMethods.forEach(m -> assertFalse(m.getName().toLowerCase().contains("event"))); }
public boolean hasAnyMethodHandlerAnnotation() { return !operationsWithHandlerAnnotation.isEmpty(); }
@Test public void testHandlerInFunctionalInterfaceWithAnonymousInnerClass() { MyHandlerInterface mhi = new MyHandlerInterface() { @Override public String myMethod() { return ""; } }; BeanInfo info = new BeanInfo(context, mhi.getClass()); assertTrue(info.hasAnyMethodHandlerAnnotation()); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final byte[] payload = rawMessage.getPayload(); final JsonNode event; try { event = objectMapper.readTree(payload); if (event == null || event.isMissingNode()) { throw new IOException("null result"); } } catch (IOException e) { LOG.error("Couldn't decode raw message {}", rawMessage); return null; } return parseEvent(event); }
@Test public void decodeMessagesHandlesFilebeatMessagesWithoutPrefix() throws Exception { configuration = new Configuration(Collections.singletonMap("no_beats_prefix", true)); codec = new Beats2Codec(configuration, objectMapper, messageFactory); final Message message = codec.decode(messageFromJson("filebeat.json")); assertThat(message).isNotNull(); assertThat(message.getMessage()).isEqualTo("TEST"); assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC)); assertThat(message.getField("beats_type")).isEqualTo("filebeat"); assertThat(message.getField("source")).isEqualTo("/tmp/test.log"); assertThat(message.getField("input_type")).isEqualTo("log"); assertThat(message.getField("count")).isEqualTo(1); assertThat(message.getField("offset")).isEqualTo(0); assertThat(message.getField(Message.FIELD_GL2_SOURCE_COLLECTOR)).isEqualTo("1234-5678-1234-5678"); assertThat(message.getField("filebeat_" + Message.FIELD_GL2_SOURCE_COLLECTOR)).isNull(); @SuppressWarnings("unchecked") final List<String> tags = (List<String>) message.getField("tags"); assertThat(tags).containsOnly("foobar", "test"); }
@Override public String getSchemaTerm() { return null; }
@Test void assertGetSchemaTerm() { assertNull(metaData.getSchemaTerm()); }
public static double conversion(String expression) { return (new Calculator()).calculate(expression); }
@Test public void conversationTest(){ final double conversion = Calculator.conversion("(0*1--3)-5/-4-(3*(-2.13))"); assertEquals(10.64, conversion, 0); }
public static void checkTdg(String tenant, String dataId, String group) throws NacosException { checkTenant(tenant); if (StringUtils.isBlank(dataId) || !ParamUtils.isValid(dataId)) { throw new NacosException(NacosException.CLIENT_INVALID_PARAM, DATAID_INVALID_MSG); } if (StringUtils.isBlank(group) || !ParamUtils.isValid(group)) { throw new NacosException(NacosException.CLIENT_INVALID_PARAM, GROUP_INVALID_MSG); } }
@Test void testCheckTdg() throws NacosException { String tenant = "a"; String dataId = "b"; String group = "c"; ParamUtils.checkTdg(tenant, dataId, group); }
StreamsProducer streamsProducerForTask(final TaskId taskId) { return activeTaskCreator.streamsProducerForTask(taskId); }
@Test public void shouldCommitViaProducerIfEosAlphaEnabled() { final StreamsProducer producer = mock(StreamsProducer.class); when(activeTaskCreator.streamsProducerForTask(any(TaskId.class))) .thenReturn(producer); final Map<TopicPartition, OffsetAndMetadata> offsetsT01 = singletonMap(t1p1, new OffsetAndMetadata(0L, null)); final Map<TopicPartition, OffsetAndMetadata> offsetsT02 = singletonMap(t1p2, new OffsetAndMetadata(1L, null)); shouldCommitViaProducerIfEosEnabled(ProcessingMode.EXACTLY_ONCE_ALPHA, offsetsT01, offsetsT02); verify(producer).commitTransaction(offsetsT01, new ConsumerGroupMetadata("appId")); verify(producer).commitTransaction(offsetsT02, new ConsumerGroupMetadata("appId")); verifyNoMoreInteractions(producer); }
@Override public Collection<String> getEnhancedTableNames() { return Collections.emptySet(); }
@Test void assertGetEnhancedTableMapper() { assertThat(new LinkedList<>(ruleAttribute.getEnhancedTableNames()), is(Collections.emptyList())); }
public EndpointResponse streamQuery( final KsqlSecurityContext securityContext, final KsqlRequest request, final CompletableFuture<Void> connectionClosedFuture, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Context context ) { throwIfNotConfigured(); activenessRegistrar.updateLastRequestTime(); final PreparedStatement<?> statement = parseStatement(request); CommandStoreUtil.httpWaitForCommandSequenceNumber( commandQueue, request, commandQueueCatchupTimeout); return handleStatement(securityContext, request, statement, connectionClosedFuture, isInternalRequest, metricsCallbackHolder, context); }
@Test public void shouldReturnForbiddenKafkaAccessIfPrintTopicKsqlTopicAuthorizationException() { // Given: print = PreparedStatement.of("print", mock(PrintTopic.class)); when(mockStatementParser.<PrintTopic>parseSingleStatement(PRINT_TOPIC)) .thenReturn(print); doThrow( new KsqlTopicAuthorizationException(AclOperation.READ, Collections.singleton(TOPIC_NAME))) .when(authorizationValidator).checkAuthorization(any(), any(), any()); // When: final EndpointResponse response = testResource.streamQuery( securityContext, new KsqlRequest(PRINT_TOPIC, Collections.emptyMap(), Collections.emptyMap(), null), new CompletableFuture<>(), Optional.empty(), new MetricsCallbackHolder(), context ); assertEquals(response.getStatus(), AUTHORIZATION_ERROR_RESPONSE.getStatus()); assertEquals(response.getEntity(), AUTHORIZATION_ERROR_RESPONSE.getEntity()); }
public static void copyStream(InputStream input, Writer output) throws IOException { try (InputStreamReader inputStreamReader = new InputStreamReader(input)) { char[] buffer = new char[1024]; // Adjust if you want int bytesRead; while ((bytesRead = inputStreamReader.read(buffer)) != -1) { output.write(buffer, 0, bytesRead); } } }
@Test void testCopyStreamToWriter() throws IOException { InputStream inputStream = new ByteArrayInputStream("test".getBytes(StandardCharsets.UTF_8)); StringWriter stringWriter = new StringWriter(); IOUtils.copyStream(inputStream, stringWriter); assertThat(stringWriter.toString()).contains("test"); }
public String format(DataTable table) { StringBuilder result = new StringBuilder(); formatTo(table, result); return result.toString(); }
@Test void should_print_empty_string_as_empty() { DataTable table = tableOf(""); assertEquals("| [empty] |\n", formatter.format(table)); }
@Override public NacosGrpcProtocolNegotiator build() { Properties properties = EnvUtil.getProperties(); RpcServerTlsConfig config = RpcServerTlsConfigFactory.getInstance().createClusterConfig(properties); if (config.getEnableTls()) { SslContext sslContext = DefaultTlsContextBuilder.getSslContext(config); return new OptionalTlsProtocolNegotiator(sslContext, config); } return null; }
@Test void testBuildTlsDisabled() { assertNull(builder.build()); }
@Override public void apply(IntentOperationContext<FlowRuleIntent> context) { Optional<IntentData> toUninstall = context.toUninstall(); Optional<IntentData> toInstall = context.toInstall(); if (toInstall.isPresent() && toUninstall.isPresent()) { Intent intentToInstall = toInstall.get().intent(); if (requireNonDisruptive(intentToInstall) && INSTALLED.equals(toUninstall.get().state())) { reallocate(context); return; } } if (!toInstall.isPresent() && !toUninstall.isPresent()) { // Nothing to do. intentInstallCoordinator.intentInstallSuccess(context); return; } List<FlowRuleIntent> uninstallIntents = context.intentsToUninstall(); List<FlowRuleIntent> installIntents = context.intentsToInstall(); List<FlowRule> flowRulesToUninstall; List<FlowRule> flowRulesToInstall; if (toUninstall.isPresent()) { // Remove tracked resource from both Intent and installable Intents. trackIntentResources(toUninstall.get(), uninstallIntents, REMOVE); // Retrieves all flow rules from all flow rule Intents. flowRulesToUninstall = uninstallIntents.stream() .map(FlowRuleIntent::flowRules) .flatMap(Collection::stream) .filter(flowRule -> flowRuleService.getFlowEntry(flowRule) != null) .collect(Collectors.toList()); } else { // No flow rules to be uninstalled. flowRulesToUninstall = Collections.emptyList(); } if (toInstall.isPresent()) { // Track resource from both Intent and installable Intents. trackIntentResources(toInstall.get(), installIntents, ADD); // Retrieves all flow rules from all flow rule Intents. flowRulesToInstall = installIntents.stream() .map(FlowRuleIntent::flowRules) .flatMap(Collection::stream) .collect(Collectors.toList()); } else { // No flow rules to be installed. flowRulesToInstall = Collections.emptyList(); } List<FlowRule> flowRuleToModify; List<FlowRule> dontTouch; // If both uninstall/install list contained equal (=match conditions are equal) FlowRules, // omit it from remove list, since it will/should be overwritten by install flowRuleToModify = flowRulesToInstall.stream() .filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::equals)) .collect(Collectors.toList()); // If both contained exactMatch-ing FlowRules, remove from both list, // since it will result in no-op. dontTouch = flowRulesToInstall.stream() .filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::exactMatch)) .collect(Collectors.toList()); flowRulesToUninstall.removeAll(flowRuleToModify); flowRulesToUninstall.removeAll(dontTouch); flowRulesToInstall.removeAll(flowRuleToModify); flowRulesToInstall.removeAll(dontTouch); flowRuleToModify.removeAll(dontTouch); if (flowRulesToInstall.isEmpty() && flowRulesToUninstall.isEmpty() && flowRuleToModify.isEmpty()) { // There is no flow rules to install/uninstall intentInstallCoordinator.intentInstallSuccess(context); return; } FlowRuleOperations.Builder builder = FlowRuleOperations.builder(); // Add flows flowRulesToInstall.forEach(builder::add); // Modify flows flowRuleToModify.forEach(builder::modify); // Remove flows flowRulesToUninstall.forEach(builder::remove); FlowRuleOperationsContext flowRuleOperationsContext = new FlowRuleOperationsContext() { @Override public void onSuccess(FlowRuleOperations ops) { intentInstallCoordinator.intentInstallSuccess(context); } @Override public void onError(FlowRuleOperations ops) { intentInstallCoordinator.intentInstallFailed(context); } }; FlowRuleOperations operations = builder.build(flowRuleOperationsContext); log.debug("applying intent {} -> {} with {} rules: {}", toUninstall.map(x -> x.key().toString()).orElse("<empty>"), toInstall.map(x -> x.key().toString()).orElse("<empty>"), operations.stages().stream().mapToLong(Set::size).sum(), operations.stages()); flowRuleService.apply(operations); }
@Test public void testUninstallAndInstallNonDisruptive() throws InterruptedException { installer.flowRuleService = flowRuleServiceNonDisruptive; List<Intent> intentsToInstall = createAnotherFlowRuleIntentsNonDisruptive(); List<Intent> intentsToUninstall = createFlowRuleIntentsNonDisruptive(); IntentData toInstall = new IntentData(createP2PIntentNonDisruptive(), IntentState.INSTALLING, new WallClockTimestamp()); toInstall = IntentData.compiled(toInstall, intentsToInstall); IntentData toUninstall = new IntentData(createP2PIntentNonDisruptive(), IntentState.INSTALLED, new WallClockTimestamp()); toUninstall = IntentData.compiled(toUninstall, intentsToUninstall); IntentOperationContext<FlowRuleIntent> operationContext; IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall); operationContext = new IntentOperationContext(intentsToUninstall, intentsToInstall, context); installer.apply(operationContext); //A single FlowRule is evaluated for every non-disruptive stage TrafficSelector selector = DefaultTrafficSelector.builder() .matchInPhyPort(CP1.port()) .build(); TrafficTreatment treatment = DefaultTrafficTreatment.builder() .setOutput(CP3.port()) .build(); FlowRule firstStageInstalledRule = DefaultFlowRule.builder() .forDevice(CP1.deviceId()) .withSelector(selector) .withTreatment(treatment) .fromApp(APP_ID) .withPriority(DEFAULT_PRIORITY - 1) .makePermanent() .build(); // We need to wait a bit in order to avoid // race conditions and failing builds synchronized (flowRuleServiceNonDisruptive) { while (!verifyFlowRule(ADD, firstStageInstalledRule)) { flowRuleServiceNonDisruptive.wait(); } } assertTrue(flowRuleServiceNonDisruptive.flowRulesAdd.contains(firstStageInstalledRule)); selector = DefaultTrafficSelector.builder() .matchInPhyPort(CP4_2.port()) .build(); treatment = DefaultTrafficTreatment.builder() .setOutput(CP4_1.port()) .build(); FlowRule secondStageUninstalledRule = DefaultFlowRule.builder() .forDevice(CP4_1.deviceId()) .withSelector(selector) .withTreatment(treatment) .fromApp(APP_ID) .withPriority(DEFAULT_PRIORITY) .makePermanent() .build(); synchronized (flowRuleServiceNonDisruptive) { while (!verifyFlowRule(REMOVE, secondStageUninstalledRule)) { flowRuleServiceNonDisruptive.wait(); } } assertTrue(flowRuleServiceNonDisruptive.flowRulesRemove.contains(secondStageUninstalledRule)); selector = DefaultTrafficSelector.builder() .matchInPhyPort(CP4_3.port()) .build(); treatment = DefaultTrafficTreatment.builder() .setOutput(CP4_1.port()) .build(); FlowRule thirdStageInstalledRule = DefaultFlowRule.builder() .forDevice(CP4_1.deviceId()) .withSelector(selector) .withTreatment(treatment) .fromApp(APP_ID) .withPriority(DEFAULT_PRIORITY) .makePermanent() .build(); synchronized (flowRuleServiceNonDisruptive) { while (!verifyFlowRule(ADD, thirdStageInstalledRule)) { flowRuleServiceNonDisruptive.wait(); } } assertTrue(flowRuleServiceNonDisruptive.flowRulesAdd.contains(thirdStageInstalledRule)); selector = DefaultTrafficSelector.builder() .matchInPhyPort(CP2_1.port()) .build(); treatment = DefaultTrafficTreatment.builder() .setOutput(CP2_2.port()) .build(); FlowRule lastStageUninstalledRule = DefaultFlowRule.builder() .forDevice(CP2_1.deviceId()) .withSelector(selector) .withTreatment(treatment) .fromApp(APP_ID) .withPriority(DEFAULT_PRIORITY) .makePermanent() .build(); synchronized (flowRuleServiceNonDisruptive) { while (!verifyFlowRule(REMOVE, lastStageUninstalledRule)) { flowRuleServiceNonDisruptive.wait(); } } assertTrue(flowRuleServiceNonDisruptive.flowRulesRemove.contains(lastStageUninstalledRule)); IntentOperationContext successContext = intentInstallCoordinator.successContext; assertEquals(successContext, operationContext); }
@Override public Mono<ReserveUsernameHashResponse> reserveUsernameHash(final ReserveUsernameHashRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice(); if (request.getUsernameHashesCount() == 0) { throw Status.INVALID_ARGUMENT .withDescription("List of username hashes must not be empty") .asRuntimeException(); } if (request.getUsernameHashesCount() > AccountController.MAXIMUM_USERNAME_HASHES_LIST_LENGTH) { throw Status.INVALID_ARGUMENT .withDescription(String.format("List of username hashes may have at most %d elements, but actually had %d", AccountController.MAXIMUM_USERNAME_HASHES_LIST_LENGTH, request.getUsernameHashesCount())) .asRuntimeException(); } final List<byte[]> usernameHashes = new ArrayList<>(request.getUsernameHashesCount()); for (final ByteString usernameHash : request.getUsernameHashesList()) { if (usernameHash.size() != AccountController.USERNAME_HASH_LENGTH) { throw Status.INVALID_ARGUMENT .withDescription(String.format("Username hash length must be %d bytes, but was actually %d", AccountController.USERNAME_HASH_LENGTH, usernameHash.size())) .asRuntimeException(); } usernameHashes.add(usernameHash.toByteArray()); } return rateLimiters.getUsernameReserveLimiter().validateReactive(authenticatedDevice.accountIdentifier()) .then(Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier()))) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .flatMap(account -> Mono.fromFuture(() -> accountsManager.reserveUsernameHash(account, usernameHashes))) .map(reservation -> ReserveUsernameHashResponse.newBuilder() .setUsernameHash(ByteString.copyFrom(reservation.reservedUsernameHash())) .build()) .onErrorReturn(UsernameHashNotAvailableException.class, ReserveUsernameHashResponse.newBuilder() .setError(ReserveUsernameHashError.newBuilder() .setErrorType(ReserveUsernameHashErrorType.RESERVE_USERNAME_HASH_ERROR_TYPE_NO_HASHES_AVAILABLE) .build()) .build()); }
@Test void reserveUsernameHashNotAvailable() { final Account account = mock(Account.class); when(accountsManager.getByAccountIdentifierAsync(AUTHENTICATED_ACI)) .thenReturn(CompletableFuture.completedFuture(Optional.of(account))); final byte[] usernameHash = TestRandomUtil.nextBytes(AccountController.USERNAME_HASH_LENGTH); when(accountsManager.reserveUsernameHash(any(), any())) .thenReturn(CompletableFuture.failedFuture(new UsernameHashNotAvailableException())); final ReserveUsernameHashResponse expectedResponse = ReserveUsernameHashResponse.newBuilder() .setError(ReserveUsernameHashError.newBuilder() .setErrorType(ReserveUsernameHashErrorType.RESERVE_USERNAME_HASH_ERROR_TYPE_NO_HASHES_AVAILABLE) .build()) .build(); assertEquals(expectedResponse, authenticatedServiceStub().reserveUsernameHash(ReserveUsernameHashRequest.newBuilder() .addUsernameHashes(ByteString.copyFrom(usernameHash)) .build())); }
public static int getIntegerValue(String key, int defaultValue) { return getIntegerValue(null, key, defaultValue); }
@Test public void getIntegerValue() throws Exception { }
@VisibleForTesting public void addToCluserNodeLabelsWithDefaultExclusivity(Set<String> labels) throws IOException { Set<NodeLabel> nodeLabels = new HashSet<NodeLabel>(); for (String label : labels) { nodeLabels.add(NodeLabel.newInstance(label)); } addToCluserNodeLabels(nodeLabels); }
@Test @Timeout(5000) void testAddInvalidlabel() throws IOException { boolean caught = false; try { Set<String> set = new HashSet<String>(); set.add(null); mgr.addToCluserNodeLabelsWithDefaultExclusivity(set); } catch (IOException e) { caught = true; } assertTrue(caught, "null label should not add to repo"); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of(CommonNodeLabelsManager.NO_LABEL)); } catch (IOException e) { caught = true; } assertTrue(caught, "empty label should not add to repo"); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("-?")); } catch (IOException e) { caught = true; } assertTrue(caught, "invalid label character should not add to repo"); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of(StringUtils.repeat("c", 257))); } catch (IOException e) { caught = true; } assertTrue(caught, "too long label should not add to repo"); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("-aaabbb")); } catch (IOException e) { caught = true; } assertTrue(caught, "label cannot start with \"-\""); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("_aaabbb")); } catch (IOException e) { caught = true; } assertTrue(caught, "label cannot start with \"_\""); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("a^aabbb")); } catch (IOException e) { caught = true; } assertTrue(caught, "label cannot contains other chars like ^[] ..."); caught = false; try { mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("aa[a]bbb")); } catch (IOException e) { caught = true; } assertTrue(caught, "label cannot contains other chars like ^[] ..."); }