focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public synchronized ManagerSpec getManagerSpec() { Set<Long> rootGroupIds = new HashSet<>(); Map<Long, ResourceGroupSpec> resourceGroupSpecMap = new HashMap<>(); Map<Long, ResourceGroupIdTemplate> resourceGroupIdTemplateMap = new HashMap<>(); Map<Long, ResourceGroupSpecBuilder> recordMap = new HashMap<>(); Map<Long, Set<Long>> subGroupIdsToBuild = new HashMap<>(); populateFromDbHelper(recordMap, rootGroupIds, resourceGroupIdTemplateMap, subGroupIdsToBuild); // Build up resource group specs from root to leaf for (LinkedList<Long> queue = new LinkedList<>(rootGroupIds); !queue.isEmpty(); ) { Long id = queue.pollFirst(); resourceGroupIdTemplateMap.computeIfAbsent(id, k -> { ResourceGroupSpecBuilder builder = recordMap.get(k); return ResourceGroupIdTemplate.forSubGroupNamed( resourceGroupIdTemplateMap.get(builder.getParentId().get()), builder.getNameTemplate().toString()); }); Set<Long> childrenToBuild = subGroupIdsToBuild.getOrDefault(id, ImmutableSet.of()); // Add to resource group specs if no more child resource groups are left to build if (childrenToBuild.isEmpty()) { ResourceGroupSpecBuilder builder = recordMap.get(id); ResourceGroupSpec resourceGroupSpec = builder.build(); resourceGroupSpecMap.put(id, resourceGroupSpec); // Add this resource group spec to parent subgroups and remove id from subgroup ids to build builder.getParentId().ifPresent(parentId -> { recordMap.get(parentId).addSubGroup(resourceGroupSpec); subGroupIdsToBuild.get(parentId).remove(id); }); } else { // Add this group back to queue since it still has subgroups to build queue.addFirst(id); // Add this group's subgroups to the queue so that when this id is dequeued again childrenToBuild will be empty queue.addAll(0, childrenToBuild); } } // Specs are built from db records, validate and return manager spec List<ResourceGroupSpec> rootGroups = rootGroupIds.stream().map(resourceGroupSpecMap::get).collect(toList()); List<SelectorSpec> selectors = resourceGroupsDao.getSelectors(environment) .stream() .map(selectorRecord -> new SelectorSpec( selectorRecord.getUserRegex(), selectorRecord.getSourceRegex(), selectorRecord.getQueryType(), selectorRecord.getClientTags(), selectorRecord.getSelectorResourceEstimate(), selectorRecord.getClientInfoRegex(), selectorRecord.getSchema(), selectorRecord.getPrincipalRegex(), resourceGroupIdTemplateMap.get(selectorRecord.getResourceGroupId())) ).collect(toList()); ManagerSpec managerSpec = new ManagerSpec(rootGroups, selectors, getCpuQuotaPeriodFromDb()); return managerSpec; }
@Test public void testSelectorPriority() { H2DaoProvider daoProvider = setup("selectors"); H2ResourceGroupsDao dao = daoProvider.get(); dao.createResourceGroupsTable(); dao.createSelectorsTable(); dao.insertResourceGroup(1, "global", "100%", 100, 100, 100, null, null, null, null, null, null, null, null, 0, null, ENVIRONMENT); final int numberOfUsers = 100; List<String> expectedUsers = new ArrayList<>(); int[] randomPriorities = ThreadLocalRandom.current() .ints(0, 1000) .distinct() .limit(numberOfUsers) .toArray(); // insert several selectors with unique random priority where userRegex is equal to the priority for (int i = 0; i < numberOfUsers; i++) { int priority = randomPriorities[i]; String user = String.valueOf(priority); dao.insertSelector(1, priority, user, ".*", null, null, null, null); expectedUsers.add(user); } DbManagerSpecProvider dbManagerSpecProvider = new DbManagerSpecProvider(daoProvider.get(), ENVIRONMENT, new ReloadingResourceGroupConfig()); List<SelectorSpec> selectors = dbManagerSpecProvider.getManagerSpec().getSelectors(); assertEquals(selectors.size(), expectedUsers.size()); // when we load the selectors we expect the selector list to be ordered by priority expectedUsers.sort(Comparator.<String>comparingInt(Integer::parseInt).reversed()); for (int i = 0; i < numberOfUsers; i++) { Optional<Pattern> user = selectors.get(i).getUserRegex(); assertTrue(user.isPresent()); assertEquals(user.get().pattern(), expectedUsers.get(i)); } }
public static PeriodicCacheReloadTrigger fromConfig(ReadableConfig config) { checkArgument( config.get(CACHE_TYPE) == FULL, "'%s' should be '%s' in order to build a Periodic cache reload trigger.", CACHE_TYPE.key(), FULL); checkArgument( config.get(FULL_CACHE_RELOAD_STRATEGY) == PERIODIC, "'%s' should be '%s' in order to build a Periodic cache reload trigger.", FULL_CACHE_RELOAD_STRATEGY.key(), PERIODIC); checkArgument( config.getOptional(FULL_CACHE_PERIODIC_RELOAD_INTERVAL).isPresent(), "Missing '%s' in the configuration. This option is required to build Periodic cache reload trigger.", FULL_CACHE_PERIODIC_RELOAD_INTERVAL.key()); return new PeriodicCacheReloadTrigger( config.get(FULL_CACHE_PERIODIC_RELOAD_INTERVAL), config.get(FULL_CACHE_PERIODIC_RELOAD_SCHEDULE_MODE)); }
@Test void testCreateFromConfig() { assertThat(PeriodicCacheReloadTrigger.fromConfig(createValidConf())).isNotNull(); Configuration conf1 = createValidConf().set(CACHE_TYPE, PARTIAL); assertThatThrownBy(() -> PeriodicCacheReloadTrigger.fromConfig(conf1)) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("should be 'FULL'"); Configuration conf2 = createValidConf().set(FULL_CACHE_RELOAD_STRATEGY, TIMED); assertThatThrownBy(() -> PeriodicCacheReloadTrigger.fromConfig(conf2)) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("should be 'PERIODIC'"); Configuration conf3 = createValidConf(); conf3.removeConfig(FULL_CACHE_PERIODIC_RELOAD_INTERVAL); assertThatThrownBy(() -> PeriodicCacheReloadTrigger.fromConfig(conf3)) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining( "Missing '" + FULL_CACHE_PERIODIC_RELOAD_INTERVAL.key() + "'"); }
@Override public void afterMethod(final TargetAdviceObject target, final TargetAdviceMethod method, final Object[] args, final Object result, final String pluginType) { if ("createContextManager".equals(method.getName())) { ShardingSphereDataSourceContextHolder.put(((ContextManager) result).getComputeNodeInstanceContext().getInstance().getMetaData().getId(), new ShardingSphereDataSourceContext(AgentReflectionUtils.getFieldValue(target, "databaseName"), (ContextManager) result)); } }
@Test void assertAfterMethod() { assertThat(ShardingSphereDataSourceContextHolder.getShardingSphereDataSourceContexts().size(), is(0)); when(AgentReflectionUtils.getFieldValue(fixture, "databaseName")).thenReturn(databaseName); TargetAdviceMethod method = mock(TargetAdviceMethod.class); when(method.getName()).thenReturn("createContextManager"); ShardingSphereDataSourceAdvice advice = new ShardingSphereDataSourceAdvice(); ContextManager contextManager = mockContextManager(); advice.afterMethod(fixture, method, new Object[]{}, contextManager, "FIXTURE"); assertThat(ShardingSphereDataSourceContextHolder.getShardingSphereDataSourceContexts().size(), is(1)); assertThat(ShardingSphereDataSourceContextHolder.getShardingSphereDataSourceContexts().keySet().iterator().next(), is(instanceId)); assertThat(ShardingSphereDataSourceContextHolder.getShardingSphereDataSourceContexts().get(instanceId).getDatabaseName(), is(databaseName)); }
@Override public void authorize(UserGroupInformation user, InetAddress remoteAddress) throws AuthorizationException { if (user == null) { throw new IllegalArgumentException("user is null."); } UserGroupInformation realUser = user.getRealUser(); if (realUser == null) { return; } AccessControlList acl = proxyUserAcl.get(configPrefix + realUser.getShortUserName()); if (acl == null || !acl.isUserAllowed(user)) { throw new AuthorizationException("User: " + realUser.getUserName() + " is not allowed to impersonate " + user.getUserName()); } MachineList MachineList = proxyHosts.get( getProxySuperuserIpConfKey(realUser.getShortUserName())); if(MachineList == null || !MachineList.includes(remoteAddress)) { throw new AuthorizationException("Unauthorized connection for super-user: " + realUser.getUserName() + " from IP " + remoteAddress); } }
@Test public void testAuthorizationFailure() throws Exception { user = "dummyUser"; proxyUser = "test user2"; when(realUserUGI.getShortUserName()).thenReturn(proxyUser); when(realUserUGI.getUserName()).thenReturn(proxyUser); when(userGroupInformation.getUserName()).thenReturn(user); when(userGroupInformation.getRealUser()).thenReturn(realUserUGI); LambdaTestUtils.intercept(AuthorizationException.class, "User: " + proxyUser + " is not allowed to impersonate " + user, () -> provider.authorize(userGroupInformation, "2.2.2.2")); }
public void setHealth(Health health) { giant.setHealth(health); }
@Test void testSetHealth() { final var model = new GiantModel("giant1", Health.HEALTHY, Fatigue.ALERT, Nourishment.SATURATED); Action action = new Action(model); assertEquals(Health.HEALTHY, model.getHealth()); var messageFormat = "Giant giant1, The giant looks %s, alert and saturated."; for (final var health : Health.values()) { action.setHealth(health); assertEquals(health, model.getHealth()); assertEquals(String.format(messageFormat, health), model.toString()); } }
public Optional<RouteMapper> findTableMapper(final String logicDataSourceName, final String actualTableName) { for (RouteUnit each : routeUnits) { Optional<RouteMapper> result = each.findTableMapper(logicDataSourceName, actualTableName); if (result.isPresent()) { return result; } } return Optional.empty(); }
@Test void assertTableMapperNotFound() { assertFalse(singleRouteContext.findTableMapper(DATASOURCE_NAME_1, ACTUAL_TABLE).isPresent()); }
public final PayloadAttributes getPayloadAttributes() { return this.attributes; }
@Test public void getPayloadAttributes_returnsPayloadAttributes() { Validator validator = (unused) -> false; Payload payload = new Payload("my-payload", validator, PAYLOAD_ATTRIBUTES, CONFIG); assertEquals(payload.getPayloadAttributes(), PAYLOAD_ATTRIBUTES); }
@Override public Table buildTable(final PropertiesList entity) { return new Builder() .withColumnHeaders(HEADERS) .withRows(defRowValues(propertiesListWithOverrides(entity))) .build(); }
@Test public void shouldHandlePropertiesWithNullValue() { // Given: final PropertiesList propList = new PropertiesList("list properties;", Collections.singletonList(new Property(SOME_KEY, "KSQL", null)), Collections.emptyList(), ImmutableList.of(SOME_KEY) ); // When: final Table table = builder.buildTable(propList); // Then: assertThat(getRows(table), contains(row(SOME_KEY, "KSQL", "", "NULL"))); }
@GET @Produces(MediaType.APPLICATION_JSON) @Operation(summary = "Get prekey count", description = "Gets the number of one-time prekeys uploaded for this device and still available") @ApiResponse(responseCode = "200", description = "Body contains the number of available one-time prekeys for the device.", useReturnTypeSchema = true) @ApiResponse(responseCode = "401", description = "Account authentication check failed.") public CompletableFuture<PreKeyCount> getStatus(@ReadOnly @Auth final AuthenticatedDevice auth, @QueryParam("identity") @DefaultValue("aci") final IdentityType identityType) { final CompletableFuture<Integer> ecCountFuture = keysManager.getEcCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId()); final CompletableFuture<Integer> pqCountFuture = keysManager.getPqCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId()); return ecCountFuture.thenCombine(pqCountFuture, PreKeyCount::new); }
@Test void testGetKeysRateLimited() throws RateLimitExceededException { Duration retryAfter = Duration.ofSeconds(31); doThrow(new RateLimitExceededException(retryAfter)).when(rateLimiter).validate(anyString()); Response result = resources.getJerseyTest() .target(String.format("/v2/keys/PNI:%s/*", EXISTS_PNI)) .request() .header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD)) .get(); assertThat(result.getStatus()).isEqualTo(429); assertThat(result.getHeaderString("Retry-After")).isEqualTo(String.valueOf(retryAfter.toSeconds())); }
@Override public void registerHints(RuntimeHints hints, ClassLoader classLoader) { var mcs = MemberCategory.values(); for (var tr : findJsonAnnotatedClassesInPackage(WatsonxAiApi.class)) hints.reflection().registerType(tr, mcs); for (var tr : findJsonAnnotatedClassesInPackage(WatsonxAiChatOptions.class)) hints.reflection().registerType(tr, mcs); }
@Test void registerHints() { RuntimeHints runtimeHints = new RuntimeHints(); WatsonxAiRuntimeHints watsonxAIRuntimeHintsTest = new WatsonxAiRuntimeHints(); watsonxAIRuntimeHintsTest.registerHints(runtimeHints, null); Set<TypeReference> jsonAnnotatedClasses = findJsonAnnotatedClassesInPackage(WatsonxAiApi.class); for (TypeReference jsonAnnotatedClass : jsonAnnotatedClasses) { assertThat(runtimeHints).matches(reflection().onType(jsonAnnotatedClass)); } jsonAnnotatedClasses = findJsonAnnotatedClassesInPackage(WatsonxAiChatOptions.class); for (TypeReference jsonAnnotatedClass : jsonAnnotatedClasses) { assertThat(runtimeHints).matches(reflection().onType(jsonAnnotatedClass)); } }
@Override public TransactionReceipt waitForTransactionReceipt(String transactionHash) throws IOException, TransactionException { return getTransactionReceipt(transactionHash, sleepDuration, attempts); }
@Test public void returnsTransactionReceiptWhenItIsAvailableInstantly() throws Exception { TransactionReceipt transactionReceipt = new TransactionReceipt(); doReturn(requestReturning(response(transactionReceipt))) .when(web3j) .ethGetTransactionReceipt(TRANSACTION_HASH); TransactionReceipt receipt = processor.waitForTransactionReceipt(TRANSACTION_HASH); assertEquals(receipt, (transactionReceipt)); }
public static boolean isHardcodedClientVersionValid() throws IOException, ExtractionException { if (hardcodedClientVersionValid.isPresent()) { return hardcodedClientVersionValid.get(); } // @formatter:off final byte[] body = JsonWriter.string() .object() .object("context") .object("client") .value("hl", "en-GB") .value("gl", "GB") .value("clientName", "WEB") .value("clientVersion", HARDCODED_CLIENT_VERSION) .value("platform", "DESKTOP") .value("utcOffsetMinutes", 0) .end() .object("request") .array("internalExperimentFlags") .end() .value("useSsl", true) .end() .object("user") // TODO: provide a way to enable restricted mode with: // .value("enableSafetyMode", boolean) .value("lockedSafetyMode", false) .end() .end() .value("fetchLiveState", true) .end().done().getBytes(StandardCharsets.UTF_8); // @formatter:on final var headers = getClientHeaders(WEB_CLIENT_ID, HARDCODED_CLIENT_VERSION); // This endpoint is fetched by the YouTube website to get the items of its main menu and is // pretty lightweight (around 30kB) final Response response = getDownloader().postWithContentTypeJson( YOUTUBEI_V1_URL + "guide?" + DISABLE_PRETTY_PRINT_PARAMETER, headers, body); final String responseBody = response.responseBody(); final int responseCode = response.responseCode(); hardcodedClientVersionValid = Optional.of(responseBody.length() > 5000 && responseCode == 200); // Ensure to have a valid response return hardcodedClientVersionValid.get(); }
@Test void testIsHardcodedClientVersionValid() throws IOException, ExtractionException { assertTrue(YoutubeParsingHelper.isHardcodedClientVersionValid(), "Hardcoded client version is not valid anymore"); }
public static void main(String[] args) { SpringApplication.run(Main.class, args); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> Main.main(new String[]{})); }
@NotNull @Override public Optional<? extends Algorithm> parse( @Nullable String str, @NotNull DetectionLocation detectionLocation) { if (str == null) { return Optional.empty(); } return switch (str) { case "RSA" -> Optional.of(new RSA(detectionLocation)); case "DSS" -> Optional.of(new DSS(detectionLocation)); case "PSK" -> Optional.of(new PSK(detectionLocation)); case "SHA RSA" -> Optional.of(new RSA(detectionLocation)) .map( signature -> { signature.put(new SHA(detectionLocation)); return signature; }); case "SHA DSS" -> Optional.of(new DSS(detectionLocation)) .map( signature -> { signature.put(new SHA(detectionLocation)); return signature; }); case "SHA" -> Optional.of(new SHA(detectionLocation)); case "SHA256" -> Optional.of(new SHA2(256, detectionLocation)); case "SHA384" -> Optional.of(new SHA2(384, detectionLocation)); case "GOSTR341012" -> Optional.of(new GOSTR341012(detectionLocation)); case "ECCPWD" -> Optional.of(new ECCPWD(detectionLocation)); case "KRB5" -> Optional.of(new Kerberos(5, detectionLocation)); case "ECDSA" -> Optional.of(new ECDSA(detectionLocation)); case "anon", "ANON" -> Optional.empty(); // Anonymous (anon) default -> Optional.empty(); }; }
@Test public void test() { final DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL"); final AuthenticationAlgorithmMapper mapper = new AuthenticationAlgorithmMapper(); final Collection<String> authCollection = JsonCipherSuites.CIPHER_SUITES.values().stream() .map(JsonCipherSuite::getAuthAlgorithm) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.toSet()); for (String auth : authCollection) { if (Objects.equals(auth, "NULL") || Objects.equals(auth, "anon")) { continue; } try { assertThat(mapper.parse(auth, testDetectionLocation)).isPresent(); } catch (AssertionError e) { System.out.println("Can't map '" + auth + "'"); throw e; } } }
public static Result<Void> failure() { return failure(ErrorCodeEnum.SERVICE_ERROR.getCode(), ErrorCodeEnum.SERVICE_ERROR.getMessage()); }
@Test public void failure() { Assert.isTrue(SERVICE_ERROR.getCode().equals(Results.failure().getCode())); Assert.isTrue(SERVICE_ERROR.getMessage().equals(Results.failure().getMessage())); }
@CheckForNull public String getDescriptionAsHtml(RuleDto ruleDto) { if (ruleDto.getDescriptionFormat() == null) { return null; } Collection<RuleDescriptionSectionDto> ruleDescriptionSectionDtos = ruleDto.getRuleDescriptionSectionDtos(); return retrieveDescription(ruleDescriptionSectionDtos, Objects.requireNonNull(ruleDto.getDescriptionFormat())); }
@Test public void getDescriptionAsHtml_ignoresAdvancedSections() { var section1 = createRuleDescriptionSection(ROOT_CAUSE_SECTION_KEY, "<div>Root is Root</div>"); var section2 = createRuleDescriptionSection(ASSESS_THE_PROBLEM_SECTION_KEY, "<div>This is not a problem</div>"); var defaultRuleDescriptionSection = createDefaultRuleDescriptionSection("uuid_432", "default description"); RuleDto rule = new RuleDto().setDescriptionFormat(RuleDto.Format.HTML) .setType(RuleType.SECURITY_HOTSPOT) .addRuleDescriptionSectionDto(section1) .addRuleDescriptionSectionDto(section2) .addRuleDescriptionSectionDto(defaultRuleDescriptionSection); String html = ruleDescriptionFormatter.getDescriptionAsHtml(rule); assertThat(html).isEqualTo(defaultRuleDescriptionSection.getContent()); }
@Override public void execute(final ConnectionSession connectionSession) { queryResultMetaData = createQueryResultMetaData(); mergedResult = new TransparentMergedResult(getQueryResult()); }
@Test void assertExecute() throws SQLException { ContextManager contextManager = mock(ContextManager.class, RETURNS_DEEP_STUBS); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); when(contextManager.getPersistServiceFacade().getProcessPersistService().getProcessList()).thenReturn(mockProcessList()); ShowProcessListExecutor showProcessListExecutor = new ShowProcessListExecutor(false); showProcessListExecutor.execute(new ConnectionSession(mock(MySQLDatabaseType.class), new DefaultAttributeMap())); assertThat(showProcessListExecutor.getQueryResultMetaData().getColumnCount(), is(8)); MergedResult mergedResult = showProcessListExecutor.getMergedResult(); while (mergedResult.next()) { assertThat(mergedResult.getValue(1, String.class), is("f6c2336a-63ba-41bf-941e-2e3504eb2c80")); assertThat(mergedResult.getValue(2, String.class), is("root")); assertThat(mergedResult.getValue(3, String.class), is("127.0.0.1")); assertThat(mergedResult.getValue(4, String.class), is("foo_db")); assertThat(mergedResult.getValue(7, String.class), is("Executing 1/2")); assertThat(mergedResult.getValue(8, String.class), is("ALTER TABLE t_order ADD COLUMN a varchar(64) AFTER order_id")); } }
@Override public ByteBuf setLong(int index, long value) { throw new ReadOnlyBufferException(); }
@Test public void testSetLong() { final ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { assertThrows(ReadOnlyBufferException.class, new Executable() { @Override public void execute() { buf.setLong(0, 1); } }); } finally { buf.release(); } }
public List<String> findAvailableSecret(String appId) { return accessKeyServiceWithCache.getAvailableSecrets(appId); }
@Test public void testFindAvailableSecret() { String appId = "someAppId"; List<String> returnSecrets = Lists.newArrayList("someSecret"); when(accessKeyServiceWithCache.getAvailableSecrets(appId)).thenReturn(returnSecrets); List<String> availableSecret = accessKeyUtil.findAvailableSecret(appId); assertThat(availableSecret).containsExactly("someSecret"); verify(accessKeyServiceWithCache).getAvailableSecrets(appId); }
public static <T> IntermediateCompatibilityResult<T> constructIntermediateCompatibilityResult( TypeSerializerSnapshot<?>[] newNestedSerializerSnapshots, TypeSerializerSnapshot<?>[] oldNestedSerializerSnapshots) { Preconditions.checkArgument( newNestedSerializerSnapshots.length == oldNestedSerializerSnapshots.length, "Different number of new serializer snapshots and existing serializer snapshots."); TypeSerializer<?>[] nestedSerializers = new TypeSerializer[newNestedSerializerSnapshots.length]; // check nested serializers for compatibility boolean nestedSerializerRequiresMigration = false; boolean hasReconfiguredNestedSerializers = false; for (int i = 0; i < oldNestedSerializerSnapshots.length; i++) { TypeSerializerSchemaCompatibility<?> compatibility = resolveCompatibility( newNestedSerializerSnapshots[i], oldNestedSerializerSnapshots[i]); // if any one of the new nested serializers is incompatible, we can just short circuit // the result if (compatibility.isIncompatible()) { return IntermediateCompatibilityResult.definedIncompatibleResult(); } if (compatibility.isCompatibleAfterMigration()) { nestedSerializerRequiresMigration = true; } else if (compatibility.isCompatibleWithReconfiguredSerializer()) { hasReconfiguredNestedSerializers = true; nestedSerializers[i] = compatibility.getReconfiguredSerializer(); } else if (compatibility.isCompatibleAsIs()) { nestedSerializers[i] = newNestedSerializerSnapshots[i].restoreSerializer(); } else { throw new IllegalStateException("Undefined compatibility type."); } } if (nestedSerializerRequiresMigration) { return IntermediateCompatibilityResult.definedCompatibleAfterMigrationResult(); } if (hasReconfiguredNestedSerializers) { return IntermediateCompatibilityResult.undefinedReconfigureResult(nestedSerializers); } // ends up here if everything is compatible as is return IntermediateCompatibilityResult.definedCompatibleAsIsResult(nestedSerializers); }
@Test void testCompatibleWithReconfiguredSerializerIntermediateCompatibilityResult() { final TypeSerializerSnapshot<?>[] previousSerializerSnapshots = new TypeSerializerSnapshot<?>[] { new SchemaCompatibilityTestingSerializer("a").snapshotConfiguration(), new SchemaCompatibilityTestingSerializer("b").snapshotConfiguration(), }; final TypeSerializerSnapshot<?>[] newSerializerSnapshots = new TypeSerializerSnapshot<?>[] { SchemaCompatibilityTestingSnapshot.thatIsCompatibleWithLastSerializer("a"), SchemaCompatibilityTestingSnapshot .thatIsCompatibleWithLastSerializerAfterReconfiguration("b"), }; IntermediateCompatibilityResult<?> intermediateCompatibilityResult = CompositeTypeSerializerUtil.constructIntermediateCompatibilityResult( newSerializerSnapshots, previousSerializerSnapshots); final TypeSerializer<?>[] expectedReconfiguredNestedSerializers = new TypeSerializer<?>[] { new SchemaCompatibilityTestingSerializer("a"), new SchemaCompatibilityTestingSerializer("b"), }; assertThat(intermediateCompatibilityResult.isCompatibleWithReconfiguredSerializer()) .isTrue(); assertThat(intermediateCompatibilityResult.getNestedSerializers()) .containsExactly(expectedReconfiguredNestedSerializers); }
@Override public Optional<Object> getNativeSchema() { return Optional.of(descriptor); }
@Test public void testGetNativeSchema() { assertTrue(genericProtobufNativeSchema.getNativeSchema().isPresent()); assertTrue(genericProtobufNativeSchema.getNativeSchema().get() instanceof Descriptors.Descriptor); }
public void unlink(Name name) { DirectoryEntry entry = remove(checkNotReserved(name, "unlink")); entry.file().unlinked(); }
@Test public void testUnlink() { assertThat(root.get(Name.simple("foo"))).isNotNull(); root.unlink(Name.simple("foo")); assertThat(root.get(Name.simple("foo"))).isNull(); }
public RecurringJobBuilder withAmountOfRetries(int amountOfRetries) { this.retries = amountOfRetries; return this; }
@Test void testWithAmountOfRetries() { RecurringJob recurringJob = aRecurringJob() .withAmountOfRetries(10) .withCron(every5Seconds) .withDetails(() -> testService.doWork()) .build(jobDetailsGenerator); assertThat(recurringJob) .hasRetries(10) .hasId() .hasScheduleExpression(every5Seconds); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() != ChatMessageType.GAMEMESSAGE && event.getType() != ChatMessageType.SPAM) { return; } String message = event.getMessage(); if (message.equals("Your Ring of endurance doubles the duration of your stamina potion's effect.")) { Integer charges = getRingOfEnduranceCharges(); if (charges == null) { log.debug("Ring of endurance charge with no known charges"); return; } // subtract the used charge charges--; setRingOfEnduranceCharges(charges); if (!roeWarningSent && charges < RING_OF_ENDURANCE_PASSIVE_EFFECT && energyConfig.ringOfEnduranceChargeMessage()) { String chatMessage = new ChatMessageBuilder() .append(ChatColorType.HIGHLIGHT) .append("Your Ring of endurance now has less than " + RING_OF_ENDURANCE_PASSIVE_EFFECT + " charges. Add more charges to regain its passive stamina effect.") .build(); chatMessageManager.queue(QueuedMessage.builder() .type(ChatMessageType.CONSOLE) .runeLiteFormattedMessage(chatMessage) .build()); roeWarningSent = true; } } else if (message.startsWith("Your Ring of endurance is charged with") || message.startsWith("You load your Ring of endurance with")) { Matcher matcher = Pattern.compile("([0-9,]+)").matcher(message); int charges = -1; while (matcher.find()) { charges = Integer.parseInt(matcher.group(1).replaceAll(",", "")); } setRingOfEnduranceCharges(charges); if (charges >= RING_OF_ENDURANCE_PASSIVE_EFFECT) { roeWarningSent = false; } } }
@Test public void testChargeMessage() { String chargeMessage = "You load your Ring of endurance with 1 stamina dose.<br>It now has 669 charges."; ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.SPAM, "", chargeMessage, "", 0); runEnergyPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration(RunEnergyConfig.GROUP_NAME, "ringOfEnduranceCharges", 669); }
@Override @SuppressWarnings("rawtypes") public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) { final long timestamp = TimeUnit.MILLISECONDS.toSeconds(clock.getTime()); for (Map.Entry<String, Gauge> entry : gauges.entrySet()) { reportGauge(timestamp, entry.getKey(), entry.getValue()); } for (Map.Entry<String, Counter> entry : counters.entrySet()) { reportCounter(timestamp, entry.getKey(), entry.getValue()); } for (Map.Entry<String, Histogram> entry : histograms.entrySet()) { reportHistogram(timestamp, entry.getKey(), entry.getValue()); } for (Map.Entry<String, Meter> entry : meters.entrySet()) { reportMeter(timestamp, entry.getKey(), entry.getValue()); } for (Map.Entry<String, Timer> entry : timers.entrySet()) { reportTimer(timestamp, entry.getKey(), entry.getValue()); } }
@Test public void reportsMeterValues() throws Exception { final Meter meter = mockMeter(); reporter.report(map(), map(), map(), map("test.meter", meter), map()); assertThat(fileContents("test.meter.csv")) .isEqualTo(csv( "t,count,mean_rate,m1_rate,m5_rate,m15_rate,rate_unit", "19910191,1,2.000000,3.000000,4.000000,5.000000,events/second" )); }
@Override @NonNull public Mono<ServerResponse> handle(@NonNull ServerRequest request) { var extensionName = request.pathVariable("name"); return client.get(scheme.type(), extensionName) .flatMap(extension -> ServerResponse.ok() .contentType(MediaType.APPLICATION_JSON) .bodyValue(extension)); }
@Test void shouldHandleCorrectly() { var scheme = Scheme.buildFromType(FakeExtension.class); var getHandler = new ExtensionGetHandler(scheme, client); var serverRequest = MockServerRequest.builder() .pathVariable("name", "my-fake") .build(); final var fake = new FakeExtension(); when(client.get(eq(FakeExtension.class), eq("my-fake"))).thenReturn(Mono.just(fake)); var responseMono = getHandler.handle(serverRequest); StepVerifier.create(responseMono) .consumeNextWith(response -> { assertEquals(HttpStatus.OK, response.statusCode()); assertEquals(MediaType.APPLICATION_JSON, response.headers().getContentType()); assertTrue(response instanceof EntityResponse<?>); assertEquals(fake, ((EntityResponse<?>) response).entity()); }) .verifyComplete(); }
public KeyGenerator getInternalRequestKeyGenerator() { try { KeyGenerator result = crypto.keyGenerator(getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG)); Optional.ofNullable(getInt(INTER_WORKER_KEY_SIZE_CONFIG)).ifPresent(result::init); return result; } catch (NoSuchAlgorithmException | InvalidParameterException e) { throw new ConfigException(String.format( "Unable to create key generator with algorithm %s and key size %d: %s", getString(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG), getInt(INTER_WORKER_KEY_SIZE_CONFIG), e.getMessage() )); } }
@Test public void shouldCreateKeyGeneratorWithSpecificSettings() { final String algorithm = "HmacSHA1"; Map<String, String> configs = configs(); configs.put(DistributedConfig.INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG, algorithm); configs.put(DistributedConfig.INTER_WORKER_KEY_SIZE_CONFIG, "512"); DistributedConfig config = new DistributedConfig(configs); KeyGenerator keyGenerator = config.getInternalRequestKeyGenerator(); assertNotNull(keyGenerator); assertEquals(algorithm, keyGenerator.getAlgorithm()); assertEquals(512 / 8, keyGenerator.generateKey().getEncoded().length); }
public boolean add(Block block, int position) { requireNonNull(block, "block must not be null"); checkArgument(position >= 0, "position must be >= 0"); // containsNullElement flag is maintained so contains() method can have shortcut for null value if (block.isNull(position)) { if (containNullElements) { return false; } containNullElements = true; } int hashPosition = getHashPositionOfElement(block, position); if (blockPositionByHash.get(hashPosition) == EMPTY_SLOT) { addNewElement(hashPosition, block, position); return true; } return false; }
@Test public void testMemoryExceeded() { try { TypedSet typedSet = new TypedSet(BIGINT, 10, FUNCTION_NAME); for (int i = 0; i <= MAX_FUNCTION_MEMORY.toBytes() + 1; i++) { Block block = createLongsBlock(nCopies(1, (long) i)); typedSet.add(block, 0); } fail("expected exception"); } catch (PrestoException e) { assertEquals(e.getErrorCode(), EXCEEDED_FUNCTION_MEMORY_LIMIT.toErrorCode()); } }
public static ItemSpec<String> item(String key, @Nullable String value) { return item(key, Type.STRING, value); }
@Test public void testItemEquality() { new EqualsTester() .addEqualityGroup(DisplayData.item("foo", "bar"), DisplayData.item("foo", "bar")) .addEqualityGroup(DisplayData.item("foo", "barz")) .testEquals(); }
public static Path getMultipartUploadCommitsDirectory(Configuration conf, String uuid) throws IOException { return getMultipartUploadCommitsDirectory(FileSystem.get(conf), conf, uuid); }
@Test public void testMPUCommitDir() throws Throwable { Configuration conf = new Configuration(); LocalFileSystem localFS = FileSystem.getLocal(conf); Path dir = getMultipartUploadCommitsDirectory(localFS, conf, "UUID"); assertTrue(dir.toString().endsWith("UUID/" + StagingCommitterConstants.STAGING_UPLOADS)); }
@Override public String toString() { return "ScramCredentialData" + "(salt=" + "[hidden]" + ", storedKey=" + "[hidden]" + ", serverKey=" + "[hidden]" + ", iterations=" + "[hidden]" + ")"; }
@Test public void testToString() { assertEquals("ScramCredentialData" + "(salt=" + "[hidden]" + ", storedKey=" + "[hidden]" + ", serverKey=" + "[hidden]" + ", iterations=" + "[hidden]" + ")", SCRAMCREDENTIALDATA.get(0).toString()); }
public long backoff(long attempts) { if (expMax == 0) { return initialInterval; } double exp = Math.min(attempts, this.expMax); double term = initialInterval * Math.pow(multiplier, exp); double randomFactor = jitter < Double.MIN_NORMAL ? 1.0 : ThreadLocalRandom.current().nextDouble(1 - jitter, 1 + jitter); long backoffValue = (long) (randomFactor * term); return Math.min(backoffValue, maxInterval); }
@Test public void testExponentialBackoffWithoutJitter() { ExponentialBackoff exponentialBackoff = new ExponentialBackoff(100, 2, 400, 0.0); assertEquals(100, exponentialBackoff.backoff(0)); assertEquals(200, exponentialBackoff.backoff(1)); assertEquals(400, exponentialBackoff.backoff(2)); assertEquals(400, exponentialBackoff.backoff(3)); }
public void addConfigurations(List<ConfigurationProperty> props) { this.addAll(props); }
@Test public void addConfigurations_shouldAddConfigurationsWithEncryptedValue() throws Exception { ConfigurationProperty property = new ConfigurationProperty(new ConfigurationKey("username"), new EncryptedConfigurationValue("some_name")); ElasticProfile profile = new ElasticProfile("id", "prod-cluster"); profile.addConfigurations(List.of(property)); assertThat(profile.size(), is(1)); assertThat(profile, contains(new ConfigurationProperty(new ConfigurationKey("username"), new EncryptedConfigurationValue("some_name")))); }
@Override public ConfigInfoTagWrapper findConfigInfo4Tag(final String dataId, final String group, final String tenant, final String tag) { String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; String tagTmp = StringUtils.isBlank(tag) ? StringUtils.EMPTY : tag.trim(); ConfigInfoTagMapper configInfoTagMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_TAG); final String sql = configInfoTagMapper.select( Arrays.asList("id", "data_id", "group_id", "tenant_id", "tag_id", "app_name", "content", "gmt_modified"), Arrays.asList("data_id", "group_id", "tenant_id", "tag_id")); return databaseOperate.queryOne(sql, new Object[] {dataId, group, tenantTmp, tagTmp}, CONFIG_INFO_TAG_WRAPPER_ROW_MAPPER); }
@Test void testFindConfigInfo4Tag() { String dataId = "dataId1112222"; String group = "group22"; String tenant = "tenant2"; String tag = "tag123345"; //mock query tag return obj ConfigInfoTagWrapper configInfoTagWrapperMocked = new ConfigInfoTagWrapper(); configInfoTagWrapperMocked.setLastModified(System.currentTimeMillis()); Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_TAG_WRAPPER_ROW_MAPPER))).thenReturn(configInfoTagWrapperMocked); ConfigInfoTagWrapper configInfo4TagReturn = embeddedConfigInfoTagPersistService.findConfigInfo4Tag(dataId, group, tenant, tag); assertEquals(configInfoTagWrapperMocked, configInfo4TagReturn); }
@Override public DescriptiveUrlBag toUrl(final Path file) { final DescriptiveUrlBag list = new DescriptiveUrlBag(); // In one hour list.add(this.toSignedUrl(file, (int) TimeUnit.HOURS.toSeconds(1))); // Default signed URL expiring in 24 hours. list.add(this.toSignedUrl(file, (int) TimeUnit.SECONDS.toSeconds( new HostPreferences(session.getHost()).getInteger("s3.url.expire.seconds")))); // 1 Week list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(7))); // 1 Month list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(30))); // 1 Year list.add(this.toSignedUrl(file, (int) TimeUnit.DAYS.toSeconds(365))); return list; }
@Test public void testDisconnected() throws Exception { final Host host = new Host(new AzureProtocol(), "kahy9boj3eib.blob.core.windows.net", new Credentials( PROPERTIES.get("azure.user"), PROPERTIES.get("azure.key") )); final AzureSession session = new AzureSession(host); final AzureUrlProvider provider = new AzureUrlProvider(session); final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); assertEquals(DescriptiveUrl.EMPTY.getUrl(), provider.toUrl(file).find(DescriptiveUrl.Type.signed).getUrl()); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { if (!(statement.getStatement() instanceof CreateSource) && !(statement.getStatement() instanceof CreateAsSelect)) { return statement; } try { if (statement.getStatement() instanceof CreateSource) { final ConfiguredStatement<CreateSource> createStatement = (ConfiguredStatement<CreateSource>) statement; return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement); } else { final ConfiguredStatement<CreateAsSelect> createStatement = (ConfiguredStatement<CreateAsSelect>) statement; return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse( createStatement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } }
@Test public void shouldInjectValuesAndMaintainKeysAndHeadersForCt() { // Given: givenKeyAndValueInferenceSupported(); when(ct.getElements()).thenReturn(HEADER_AND_VALUE); // When: final ConfiguredStatement<CreateTable> result = injector.inject(ctStatement); // Then: assertThat(result.getStatement().getElements(), is(combineElements(HEADER_ELEMENTS, INFERRED_KSQL_KEY_SCHEMA_TABLE, SOME_VALUE_ELEMENTS))); assertThat(result.getMaskedStatementText(), is( "CREATE TABLE `ct` (" + "`head` BYTES HEADER('header'), " + "`key` STRING PRIMARY KEY, " + "`bob` STRING) " + "WITH (KAFKA_TOPIC='some-topic', KEY_FORMAT='protobuf', VALUE_FORMAT='avro');" )); }
@Override public boolean test(Pickle pickle) { String name = pickle.getName(); return patterns.stream().anyMatch(pattern -> pattern.matcher(name).find()); }
@Test void anchored_name_pattern_does_not_match_part_of_name() { Pickle pickle = createPickleWithName("a pickle name with suffix"); NamePredicate predicate = new NamePredicate(singletonList(Pattern.compile("^a pickle name$"))); assertFalse(predicate.test(pickle)); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(String.format("%s(%s)", ORACLE_NUMBER, 1)); builder.dataType(ORACLE_NUMBER); builder.length(1L); break; case TINYINT: case SMALLINT: case INT: case BIGINT: builder.columnType(ORACLE_INTEGER); builder.dataType(ORACLE_INTEGER); break; case FLOAT: builder.columnType(ORACLE_BINARY_FLOAT); builder.dataType(ORACLE_BINARY_FLOAT); break; case DOUBLE: builder.columnType(ORACLE_BINARY_DOUBLE); builder.dataType(ORACLE_BINARY_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", ORACLE_NUMBER, precision, scale)); builder.dataType(ORACLE_NUMBER); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(ORACLE_BLOB); builder.dataType(ORACLE_BLOB); } else if (column.getColumnLength() <= MAX_RAW_LENGTH) { builder.columnType( String.format("%s(%s)", ORACLE_RAW, column.getColumnLength())); builder.dataType(ORACLE_RAW); } else { builder.columnType(ORACLE_BLOB); builder.dataType(ORACLE_BLOB); } break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType( String.format("%s(%s)", ORACLE_VARCHAR2, MAX_VARCHAR_LENGTH)); builder.dataType(ORACLE_VARCHAR2); } else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", ORACLE_VARCHAR2, column.getColumnLength())); builder.dataType(ORACLE_VARCHAR2); } else { builder.columnType(ORACLE_CLOB); builder.dataType(ORACLE_CLOB); } break; case DATE: builder.columnType(ORACLE_DATE); builder.dataType(ORACLE_DATE); break; case TIMESTAMP: if (column.getScale() == null || column.getScale() <= 0) { builder.columnType(ORACLE_TIMESTAMP_WITH_LOCAL_TIME_ZONE); } else { int timestampScale = column.getScale(); if (column.getScale() > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType( String.format("TIMESTAMP(%s) WITH LOCAL TIME ZONE", timestampScale)); builder.scale(timestampScale); } builder.dataType(ORACLE_TIMESTAMP_WITH_LOCAL_TIME_ZONE); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.ORACLE, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertBoolean() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.BOOLEAN_TYPE).build(); BasicTypeDefine typeDefine = OracleTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals( String.format("%s(%s)", OracleTypeConverter.ORACLE_NUMBER, 1), typeDefine.getColumnType()); Assertions.assertEquals(OracleTypeConverter.ORACLE_NUMBER, typeDefine.getDataType()); Assertions.assertEquals(1, typeDefine.getLength()); }
public static ExternalSorter create(Options options) { return options.getSorterType() == Options.SorterType.HADOOP ? HadoopExternalSorter.create(options) : NativeExternalSorter.create(options); }
@Test public void testEmpty() throws Exception { SorterTestUtils.testEmpty( ExternalSorter.create( new ExternalSorter.Options() .setTempLocation(getTmpLocation().toString()) .setSorterType(sorterType))); }
public synchronized long calculateObjectSize(Object obj) { // Breadth-first traversal instead of naive depth-first with recursive // implementation, so we don't blow the stack traversing long linked lists. boolean init = true; try { for (;;) { visit(obj, init); init = false; if (mPending.isEmpty()) { return mSize; } obj = mPending.removeFirst(); } } finally { mVisited.clear(); mPending.clear(); mSize = 0; } }
@Test public void testConstant() { CompositeObject [] test = new CompositeObject[4]; for (int i = 0; i < test.length; i++) { test[i] = new CompositeObject(); } long size = mObjectSizeCalculator.calculateObjectSize(test); Set<Class<?>> testSet = new HashSet<>(); testSet.add(ConstantObject.class); testSet.add(Long.class); ObjectSizeCalculator constantCalc = new ObjectSizeCalculator( MEMORY_LAYOUT_SPECIFICATION, testSet); assertEquals(size, constantCalc.calculateObjectSize(test)); }
public final void doesNotContainKey(@Nullable Object key) { check("keySet()").that(checkNotNull(actual).keySet()).doesNotContain(key); }
@Test public void doesNotContainNullKey() { Map<String, String> actual = Maps.newHashMap(); actual.put(null, "null"); expectFailureWhenTestingThat(actual).doesNotContainKey(null); assertFailureKeys("value of", "expected not to contain", "but was", "map was"); assertFailureValue("value of", "map.keySet()"); assertFailureValue("expected not to contain", "null"); assertFailureValue("but was", "[null]"); }
protected static boolean isSingleQuoted(String input) { if (input == null || input.isBlank()) { return false; } return input.matches("(^" + QUOTE_CHAR + "{1}([^" + QUOTE_CHAR + "]+)" + QUOTE_CHAR + "{1})"); }
@Test public void testSingleQuotedNegative2() { assertFalse(isSingleQuoted("\" \" space not allowed between quotes \"")); }
@Override public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant, Option<HoodieInstant> lastSuccessfulInstant) { HoodieActiveTimeline activeTimeline = metaClient.reloadActiveTimeline(); if (ClusteringUtils.isClusteringInstant(activeTimeline, currentInstant) || COMPACTION_ACTION.equals(currentInstant.getAction())) { return getCandidateInstantsForTableServicesCommits(activeTimeline, currentInstant); } else { return getCandidateInstantsForNonTableServicesCommits(activeTimeline, currentInstant); } }
@Test public void testConcurrentWritesWithInterleavingScheduledCompaction() throws Exception { createCommit(metaClient.createNewInstantTime(), metaClient); HoodieActiveTimeline timeline = metaClient.getActiveTimeline(); // consider commits before this are all successful Option<HoodieInstant> lastSuccessfulInstant = timeline.getCommitsTimeline().filterCompletedInstants().lastInstant(); // writer 1 starts String currentWriterInstant = metaClient.createNewInstantTime(); createInflightCommit(currentWriterInstant, metaClient); // compaction 1 gets scheduled String newInstantTime = metaClient.createNewInstantTime(); createCompactionRequested(newInstantTime, metaClient); Option<HoodieInstant> currentInstant = Option.of(new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, currentWriterInstant)); PreferWriterConflictResolutionStrategy strategy = new PreferWriterConflictResolutionStrategy(); List<HoodieInstant> candidateInstants = strategy.getCandidateInstants(metaClient, currentInstant.get(), lastSuccessfulInstant).collect( Collectors.toList()); // writer 1 does not have a conflict with scheduled compaction plan 1 // Since, scheduled compaction plan is given lower priority compared ingestion commit. Assertions.assertEquals(0, candidateInstants.size()); }
public static int nextPowerOfTwo(final int value) { return 1 << (32 - Integer.numberOfLeadingZeros(value - 1)); }
@Test public void testNextPowerOfTwo() { assertEquals(1, QuickMath.nextPowerOfTwo(-9999999)); assertEquals(1, QuickMath.nextPowerOfTwo(-1)); assertEquals(1, QuickMath.nextPowerOfTwo(0)); assertEquals(1, QuickMath.nextPowerOfTwo(1)); assertEquals(2, QuickMath.nextPowerOfTwo(2)); assertEquals(1024, QuickMath.nextPowerOfTwo(999)); assertEquals(1 << 23, QuickMath.nextPowerOfTwo((1 << 23) - 1)); assertEquals(1 << 23, QuickMath.nextPowerOfTwo(1 << 23)); assertEquals(2048L, QuickMath.nextPowerOfTwo(2000L)); assertEquals(1L << 33, QuickMath.nextPowerOfTwo((1L << 33) - 3)); assertEquals(1L << 43, QuickMath.nextPowerOfTwo((1L << 43))); }
public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } return writeLoopOnConverters(event); }
@Test public void testWithParenthesis() { pl.setPattern("\\(%msg:%msg\\) %msg"); pl.start(); le = makeLoggingEvent(aMessage, null); String val = pl.doLayout(le); assertEquals("(Some message:Some message) Some message", val); }
@Override public void store(Measure newMeasure) { saveMeasure(newMeasure.inputComponent(), (DefaultMeasure<?>) newMeasure); }
@Test(expected = UnsupportedOperationException.class) public void duplicateSymbolTable() throws Exception { InputFile inputFile = new TestInputFileBuilder("foo", "src/Foo.java") .setModuleBaseDir(temp.newFolder().toPath()).build(); DefaultSymbolTable st = new DefaultSymbolTable(null) .onFile(inputFile); underTest.store(st); underTest.store(st); }
@ConstantFunction.List(list = { @ConstantFunction(name = "from_unixtime", argTypes = {INT}, returnType = VARCHAR), @ConstantFunction(name = "from_unixtime", argTypes = {BIGINT}, returnType = VARCHAR) }) public static ConstantOperator fromUnixTime(ConstantOperator unixTime) throws AnalysisException { long value = 0; if (unixTime.getType().isInt()) { value = unixTime.getInt(); } else { value = unixTime.getBigint(); } if (value < 0 || value > TimeUtils.MAX_UNIX_TIMESTAMP) { throw new AnalysisException( "unixtime should larger than zero and less than " + TimeUtils.MAX_UNIX_TIMESTAMP); } ConstantOperator dl = ConstantOperator.createDatetime( LocalDateTime.ofInstant(Instant.ofEpochSecond(value), TimeUtils.getTimeZone().toZoneId())); return ConstantOperator.createVarchar(dl.toString()); }
@Test public void fromUnixTime() throws AnalysisException { assertEquals("1970-01-01 08:00:10", ScalarOperatorFunctions.fromUnixTime(O_BI_10).getVarchar()); }
@Override protected boolean updateCacheIfNeed(final ConfigData<AppAuthData> result) { return updateCacheIfNeed(result, ConfigGroupEnum.APP_AUTH); }
@Test public void testUpdateCacheIfNeed() { final AppAuthDataRefresh appAuthDataRefresh = mockAppAuthDataRefresh; // update cache, then assert equals ConfigData<AppAuthData> expect = new ConfigData<>(); expect.setLastModifyTime(System.currentTimeMillis()); appAuthDataRefresh.updateCacheIfNeed(expect); assertThat(appAuthDataRefresh.cacheConfigData(), is(expect)); }
@Override @Nullable public ResultChunk nextChunk() throws IOException { if (limitReached()) { LOG.debug("[{}] Reached limit for query {}", queryHash, getOriginalQuery()); return null; } final R result = this.initialResult != null ? this.initialResult : nextSearchResult(); this.lastSearchResponse = result; this.initialResult = null; final List<ResultMessage> resultMessages = result != null ? collectMessagesFromResult(result) : List.of(); if (resultMessages.isEmpty()) { // chunking exhausted LOG.debug("[{}] Reached end of {} results for query {}", queryHash, getChunkingMethodName(), getOriginalQuery()); return null; } final int remainingResultsForLimit = limit - resultCount; final List<ResultMessage> resultMessagesSlice = (limit != -1 && remainingResultsForLimit < resultMessages.size()) ? resultMessages.subList(0, remainingResultsForLimit) : resultMessages; resultCount += resultMessagesSlice.size(); return new ResultChunk(fields, chunkId++, resultMessagesSlice); }
@Test void doesNotExceedLimit(ResultMessageFactory resultMessageFactory) throws Exception { toTest = new ServerlessChunkedQueryResultSimulation(resultMessageFactory, "Client", null, "", List.of("name"), 7, 3 ); ResultChunk resultChunk = toTest.nextChunk(); assertThat(resultChunk.isFirstChunk()).isTrue(); List<ResultMessage> messages = resultChunk.messages(); assertThat(messages) .isNotNull() .hasSize(3); verifyElementAt(messages, 0, BACKING_RESULT_LIST.get(0)); verifyElementAt(messages, 1, BACKING_RESULT_LIST.get(1)); verifyElementAt(messages, 2, BACKING_RESULT_LIST.get(2)); resultChunk = toTest.nextChunk(); assertThat(resultChunk.isFirstChunk()).isFalse(); messages = resultChunk.messages(); assertThat(messages) .isNotNull() .hasSize(3); verifyElementAt(messages, 0, BACKING_RESULT_LIST.get(3)); verifyElementAt(messages, 1, BACKING_RESULT_LIST.get(4)); verifyElementAt(messages, 2, BACKING_RESULT_LIST.get(5)); resultChunk = toTest.nextChunk(); assertThat(resultChunk.isFirstChunk()).isFalse(); messages = resultChunk.messages(); assertThat(messages) .isNotNull() .hasSize(1); verifyElementAt(messages, 0, BACKING_RESULT_LIST.get(6)); resultChunk = toTest.nextChunk(); assertThat(resultChunk).isNull(); }
public static String optimizeErrorMessage(String msg) { if (msg == null) { return null; } if (SERVER_ID_CONFLICT.matcher(msg).matches()) { // Optimize the error msg when server id conflict msg += "\nThe 'server-id' in the mysql cdc connector should be globally unique, but conflicts happen now.\n" + "The server id conflict may happen in the following situations: \n" + "1. The server id has been used by other mysql cdc table in the current job.\n" + "2. The server id has been used by the mysql cdc table in other jobs.\n" + "3. The server id has been used by other sync tools like canal, debezium and so on.\n"; } else if (MISSING_BINLOG_POSITION_WHEN_BINLOG_EXPIRE.matcher(msg).matches() || MISSING_TRANSACTION_WHEN_BINLOG_EXPIRE.matcher(msg).matches()) { // Optimize the error msg when binlog is unavailable msg += "\nThe required binary logs are no longer available on the server. This may happen in following situations:\n" + "1. The speed of CDC source reading is too slow to exceed the binlog expired period. You can consider increasing the binary log expiration period, you can also to check whether there is back pressure in the job and optimize your job.\n" + "2. The job runs normally, but something happens in the database and lead to the binlog cleanup. You can try to check why this cleanup happens from MySQL side."; } return msg; }
@Test public void testOptimizeErrorMessageWhenMissingTransaction() { assertEquals( "The connector is trying to read binlog starting at Struct{version=1.6.4.Final,connector=mysql,name=mysql_binlog_source,ts_ms=1670826084012,db=,server_id=0,file=mysql-bin.000005,pos=3845,row=0}, but this is no longer available on the server. Reconfigure the connector to use a snapshot when needed." + "\nThe required binary logs are no longer available on the server. This may happen in following situations:\n" + "1. The speed of CDC source reading is too slow to exceed the binlog expired period. You can consider increasing the binary log expiration period, you can also to check whether there is back pressure in the job and optimize your job.\n" + "2. The job runs normally, but something happens in the database and lead to the binlog cleanup. You can try to check why this cleanup happens from MySQL side.", ErrorMessageUtils.optimizeErrorMessage( "The connector is trying to read binlog starting at Struct{version=1.6.4.Final,connector=mysql,name=mysql_binlog_source,ts_ms=1670826084012,db=,server_id=0,file=mysql-bin.000005,pos=3845,row=0}, but this is no longer available on the server. Reconfigure the connector to use a snapshot when needed.")); }
static void finishUpdateChecker( ProjectProperties projectProperties, Future<Optional<String>> updateCheckFuture) { UpdateChecker.finishUpdateCheck(updateCheckFuture) .ifPresent( latestVersion -> { String changelogUrl = ProjectInfo.GITHUB_URL + "/blob/master/jib-gradle-plugin/CHANGELOG.md"; String privacyUrl = ProjectInfo.GITHUB_URL + "/blob/master/docs/privacy.md"; String message = String.format( "\n\u001B[33mA new version of %s (%s) is available (currently using %s). Update your" + " build configuration to use the latest features and fixes!\n%s\u001B[0m\n\nPlease see" + " %s for info on disabling this update check.\n", projectProperties.getToolName(), latestVersion, projectProperties.getToolVersion(), changelogUrl, privacyUrl); projectProperties.log(LogEvent.lifecycle(message)); }); }
@Test public void testFinishUpdateChecker_correctMessageLogged() { when(mockProjectProperties.getToolName()).thenReturn("tool-name"); when(mockProjectProperties.getToolVersion()).thenReturn("2.0.0"); Future<Optional<String>> updateCheckFuture = Futures.immediateFuture(Optional.of("2.1.0")); TaskCommon.finishUpdateChecker(mockProjectProperties, updateCheckFuture); verify(mockProjectProperties) .log( LogEvent.lifecycle( "\n\u001B[33mA new version of tool-name (2.1.0) is available (currently using 2.0.0). " + "Update your build configuration to use the latest features and fixes!\n" + ProjectInfo.GITHUB_URL + "/blob/master/jib-gradle-plugin/CHANGELOG.md\u001B[0m\n\n" + "Please see " + ProjectInfo.GITHUB_URL + "/blob/master/docs/privacy.md for info on disabling this update check.\n")); }
@Override public int compare(Event a, Event b) { return eventOrder.compare(a, b); }
@Test void verifyTestRunFinishedSortedCorrectly() { assertAll( () -> assertThat(comparator.compare(runFinished, runStarted), greaterThan(EQUAL_TO)), () -> assertThat(comparator.compare(runFinished, suggested), greaterThan(EQUAL_TO)), () -> assertThat(comparator.compare(runFinished, testRead), greaterThan(EQUAL_TO)), () -> assertThat(comparator.compare(runFinished, testParsed), greaterThan(EQUAL_TO)), () -> assertThat(comparator.compare(runFinished, feature1Case1Started), greaterThan(EQUAL_TO)), () -> assertThat(comparator.compare(runFinished, feature1Case2Started), greaterThan(EQUAL_TO)), () -> assertThat(comparator.compare(runFinished, feature1Case3Started), greaterThan(EQUAL_TO)), () -> assertThat(comparator.compare(runFinished, feature2Case1Started), greaterThan(EQUAL_TO)), () -> assertThat(comparator.compare(runFinished, runFinished), equalTo(EQUAL_TO))); }
@Override public String encrypt(final Object plainValue, final AlgorithmSQLContext algorithmSQLContext) { return digestAlgorithm.digest(plainValue); }
@Test void assertEncryptWithNullPlaintext() { assertNull(encryptAlgorithm.encrypt(null, mock(AlgorithmSQLContext.class))); }
public Set<InstanceRedoData> findInstanceRedoData() { Set<InstanceRedoData> result = new HashSet<>(); synchronized (registeredInstances) { for (InstanceRedoData each : registeredInstances.values()) { if (each.isNeedRedo()) { result.add(each); } } } return result; }
@Test void testFindInstanceRedoData() { redoService.cacheInstanceForRedo(SERVICE, GROUP, new Instance()); assertFalse(redoService.findInstanceRedoData().isEmpty()); redoService.instanceRegistered(SERVICE, GROUP); assertTrue(redoService.findInstanceRedoData().isEmpty()); redoService.instanceDeregister(SERVICE, GROUP); assertFalse(redoService.findInstanceRedoData().isEmpty()); }
public static <T> T getFieldValue(final Object target, final String fieldName) { Optional<Field> field = findField(fieldName, target.getClass()); if (field.isPresent()) { return getFieldValue(target, field.get()); } throw new IllegalStateException(String.format("Can not find field name `%s` in class %s.", fieldName, target.getClass())); }
@Test void assertGetFieldValue() { ReflectionFixture reflectionFixture = new ReflectionFixture("foo"); assertThat(AgentReflectionUtils.getFieldValue(reflectionFixture, "value"), is(reflectionFixture.getValue())); }
@Override public void doRun() { try { LOG.debug("Removing stale events from MongoDB collection \"{}\"", COLLECTION_NAME); final long timestamp = DateTime.now(DateTimeZone.UTC).getMillis() - maxEventAge; final DBQuery.Query query = DBQuery.lessThan("timestamp", timestamp); final WriteResult<ClusterEvent, String> writeResult = dbCollection.remove(query, WriteConcern.JOURNALED); LOG.debug("Removed {} stale events from \"{}\"", writeResult.getN(), COLLECTION_NAME); } catch (Exception e) { LOG.warn("Error while removing stale cluster events from MongoDB", e); } }
@Test public void testDoRun() throws Exception { final DBCollection collection = mongoConnection.getDatabase().getCollection(ClusterEventPeriodical.COLLECTION_NAME); assertThat(insertEvent(collection, 0L)).isTrue(); assertThat(insertEvent(collection, TIME.getMillis())).isTrue(); assertThat(insertEvent(collection, TIME.minus(ClusterEventCleanupPeriodical.DEFAULT_MAX_EVENT_AGE).getMillis())).isTrue(); assertThat(insertEvent(collection, TIME.minus(2 * ClusterEventCleanupPeriodical.DEFAULT_MAX_EVENT_AGE).getMillis())).isTrue(); assertThat(collection.count()).isEqualTo(4L); clusterEventCleanupPeriodical.run(); assertThat(collection.count()).isEqualTo(2L); }
public static ObjectMapper createObjectMapper() { final ObjectMapper objectMapper = new ObjectMapper(); registerModules(objectMapper); return objectMapper; }
@Test void testCreateObjectMapperReturnDistinctMappers() { final ObjectMapper mapper1 = JacksonMapperFactory.createObjectMapper(); final ObjectMapper mapper2 = JacksonMapperFactory.createObjectMapper(); assertThat(mapper1).isNotSameAs(mapper2); }
public Boolean removeNamespace(String namespaceId) { namespacePersistService.removeTenantInfoAtomic(DEFAULT_KP, namespaceId); return true; }
@Test void testRemoveNamespace() { namespaceOperationService.removeNamespace(TEST_NAMESPACE_ID); verify(namespacePersistService).removeTenantInfoAtomic(DEFAULT_KP, TEST_NAMESPACE_ID); }
@Override public long get(long key) { final long valueAddr = hsa.get(key); return valueAddr != NULL_ADDRESS ? mem.getLong(valueAddr) : nullValue; }
@Test public void testGet() { long key = newKey(); long value = newValue(); map.put(key, value); long currentValue = map.get(key); assertEqualsKV(value, currentValue, key, value); }
@Override public HttpResponseOutputStream<Chunk> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final String uploadUri; final String resourceId; if(null == status.getUrl()) { if(status.isExists()) { resourceId = fileid.getFileId(file); uploadUri = EueUploadHelper.updateResource(session, resourceId, status, UploadType.SIMPLE).getUploadURI(); } else { final ResourceCreationResponseEntry uploadResourceCreationResponseEntry = EueUploadHelper .createResource(session, fileid.getFileId(file.getParent()), file.getName(), status, UploadType.SIMPLE); resourceId = EueResourceIdProvider.getResourceIdFromResourceUri(uploadResourceCreationResponseEntry.getHeaders().getLocation()); uploadUri = uploadResourceCreationResponseEntry.getEntity().getUploadURI(); } } else { uploadUri = status.getUrl(); resourceId = status.getParameters().get(RESOURCE_ID); } final HttpResponseOutputStream<Chunk> stream = this.write(file, status, new DelayedHttpEntityCallable<Chunk>(file) { @Override public Chunk call(final HttpEntity entity) throws BackgroundException { try { final HttpResponse response; final StringBuilder uploadUriWithParameters = new StringBuilder(uploadUri); if(!Checksum.NONE.equals(status.getChecksum())) { uploadUriWithParameters.append(String.format("&x_cdash64=%s", new ChunkListSHA256ChecksumCompute().compute(status.getLength(), Hex.decodeHex(status.getChecksum().hash)))); } if(status.getLength() != -1) { uploadUriWithParameters.append(String.format("&x_size=%d", status.getLength())); } if(status.isSegment()) { // Chunked upload from large upload service uploadUriWithParameters.append(String.format("&x_offset=%d", new HostPreferences(session.getHost()).getLong("eue.upload.multipart.size") * (status.getPart() - 1))); final HttpPut request = new HttpPut(uploadUriWithParameters.toString()); request.setEntity(entity); response = session.getClient().execute(request); } else { final HttpPost request = new HttpPost(uploadUriWithParameters.toString()); request.setEntity(entity); request.setHeader(HttpHeaders.CONTENT_TYPE, MimeTypeService.DEFAULT_CONTENT_TYPE); response = session.getClient().execute(request); } try { if(response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { return new Chunk(resourceId, status.getPart(), status.getLength(), status.getChecksum()); } EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity())); throw new EueExceptionMappingService().map(response); } finally { EntityUtils.consume(response.getEntity()); } } catch(HttpResponseException e) { throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } catch(DecoderException e) { throw new ChecksumException(LocaleFactory.localizedString("Checksum failure", "Error"), e); } } @Override public long getContentLength() { return status.getLength(); } } ); fileid.cache(file, resourceId); return stream; }
@Test public void testMissingChecksum() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final EueWriteFeature feature = new EueWriteFeature(session, fileid); final Path file = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final byte[] content = RandomUtils.nextBytes(8235); final TransferStatus status = new TransferStatus().withLength(content.length); status.withChecksum(Checksum.NONE); final HttpResponseOutputStream<EueWriteFeature.Chunk> out = feature.write(file, status, new DisabledConnectionCallback()); final ByteArrayInputStream in = new ByteArrayInputStream(content); final TransferStatus progress = new TransferStatus(); final BytecountStreamListener count = new BytecountStreamListener(); new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out); assertEquals(content.length, count.getSent()); in.close(); out.close(); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static TableElements of(final TableElement... elements) { return new TableElements(ImmutableList.copyOf(elements)); }
@Test public void shouldNotThrowOnNoKeyElements() { // Given: final List<TableElement> elements = ImmutableList.of( tableElement("v0", new Type(SqlTypes.INTEGER)) ); // When: TableElements.of(elements); // Then: did not throw. }
@Override public Num calculate(BarSeries series, Position position) { return isConsecutive(position) ? series.one() : series.zero(); }
@Test public void calculateWithTwoShortPositions() { MockBarSeries seriesLoss = new MockBarSeries(numFunction, 100, 90, 110, 120, 95, 105); TradingRecord tradingRecordLoss = new BaseTradingRecord(Trade.sellAt(0, seriesLoss), Trade.buyAt(1, seriesLoss), Trade.sellAt(3, seriesLoss), Trade.buyAt(5, seriesLoss)); assertNumEquals(0, getCriterion(PositionFilter.LOSS).calculate(seriesLoss, tradingRecordLoss)); MockBarSeries seriesProfit = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105); TradingRecord tradingRecordProfit = new BaseTradingRecord(Trade.sellAt(0, seriesProfit), Trade.buyAt(1, seriesProfit), Trade.sellAt(3, seriesProfit), Trade.buyAt(5, seriesProfit)); assertNumEquals(0, getCriterion(PositionFilter.PROFIT).calculate(seriesProfit, tradingRecordProfit)); }
public static Ip4Address valueOf(int value) { byte[] bytes = ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array(); return new Ip4Address(bytes); }
@Test public void testValueOfForIntegerIPv4() { Ip4Address ipAddress; ipAddress = Ip4Address.valueOf(0x01020304); assertThat(ipAddress.toString(), is("1.2.3.4")); ipAddress = Ip4Address.valueOf(0); assertThat(ipAddress.toString(), is("0.0.0.0")); ipAddress = Ip4Address.valueOf(0xffffffff); assertThat(ipAddress.toString(), is("255.255.255.255")); }
public boolean matches(List<ItemPointer> itemPointers, String privateCreator, int tag) { int level; if (tag != this.tag || !Objects.equals(privateCreator, this.privateCreator) || (itemPointers.size() != (level = level()))) { return false; } for (int i = 0; i < level; i++) { ItemPointer itemPointer = itemPointers.get(i); ItemPointer other = itemPointer(i); if (!(itemPointer.itemIndex < 0 || other.itemIndex < 0 ? itemPointer.equalsIgnoreItemIndex(other) : itemPointer.equals(other))) { return false; } } return true; }
@Test public void testMatches() { ItemPointer ip = new ItemPointer(Tag.RequestAttributesSequence); AttributeSelector selector = new AttributeSelector(Tag.StudyInstanceUID, null, ip); assertTrue(selector.matches(Collections.singletonList(ip), null, Tag.StudyInstanceUID)); }
public int mode() throws IOException { return -1; }
@Test public void testMode_AbstractBase() throws Exception { // This test checks the method's behavior in the abstract base class, // which generally does nothing. VirtualFile root = new VirtualFileMinimalImplementation(); assertThat(root.mode(), is(-1)); }
@Override public void preloadSegments(IndexLoadingConfig indexLoadingConfig) { if (!_isPreloading) { return; } TableDataManager tableDataManager = _context.getTableDataManager(); Preconditions.checkNotNull(tableDataManager, "Preloading segments requires tableDataManager"); HelixManager helixManager = tableDataManager.getHelixManager(); ExecutorService segmentPreloadExecutor = tableDataManager.getSegmentPreloadExecutor(); // Preloading the segments with the snapshots of validDocIds for fast upsert metadata recovery. // Note that there is a waiting logic between the thread pool doing the segment preloading here and the // other helix threads about to process segment state transitions (e.g. taking segments from OFFLINE to ONLINE). // The thread doing the segment preloading here must complete before the other helix threads start to handle // segment state transitions. This is ensured by the lock here. _preloadLock.lock(); try { // Check the flag again to ensure preloading happens only once. if (!_isPreloading) { return; } // From now on, the _isPreloading flag is true until the segments are preloaded. long startTime = System.currentTimeMillis(); doPreloadSegments(tableDataManager, indexLoadingConfig, helixManager, segmentPreloadExecutor); long duration = System.currentTimeMillis() - startTime; _serverMetrics.addTimedTableValue(_tableNameWithType, ServerTimer.UPSERT_PRELOAD_TIME_MS, duration, TimeUnit.MILLISECONDS); } catch (Exception e) { // Even if preloading fails, we should continue to complete the initialization, so that TableDataManager can be // created. Once TableDataManager is created, no more segment preloading would happen, and the normal segment // loading logic would be used. The segments not being preloaded successfully here would be loaded via the // normal segment loading logic, the one doing more costly checks on the upsert metadata. _logger.warn("Failed to preload segments from partition: {} of table: {}, skipping", _partitionId, _tableNameWithType, e); _serverMetrics.addMeteredTableValue(_tableNameWithType, ServerMeter.UPSERT_PRELOAD_FAILURE, 1); if (e instanceof InterruptedException) { // Restore the interrupted status in case the upper callers want to check. Thread.currentThread().interrupt(); } } finally { _isPreloading = false; _preloadLock.unlock(); } }
@Test public void testPreloadSegments() throws Exception { String realtimeTableName = "testTable_REALTIME"; String instanceId = "server01"; Map<String, Map<String, String>> segmentAssignment = new HashMap<>(); Map<String, SegmentZKMetadata> segmentMetadataMap = new HashMap<>(); Set<String> preloadedSegments = new HashSet<>(); AtomicBoolean wasPreloading = new AtomicBoolean(false); TableDataManager tableDataManager = mock(TableDataManager.class); UpsertContext upsertContext = mock(UpsertContext.class); when(upsertContext.isSnapshotEnabled()).thenReturn(true); when(upsertContext.isPreloadEnabled()).thenReturn(true); when(upsertContext.getTableDataManager()).thenReturn(tableDataManager); DummyPartitionUpsertMetadataManager upsertMetadataManager = new DummyPartitionUpsertMetadataManager(realtimeTableName, 0, upsertContext) { @Override Map<String, Map<String, String>> getSegmentAssignment(HelixManager helixManager) { return segmentAssignment; } @Override Map<String, SegmentZKMetadata> getSegmentsZKMetadata(HelixManager helixManager) { return segmentMetadataMap; } @Override void doPreloadSegmentWithSnapshot(TableDataManager tableDataManager, String segmentName, IndexLoadingConfig indexLoadingConfig, SegmentZKMetadata segmentZKMetadata) { wasPreloading.set(isPreloading()); preloadedSegments.add(segmentName); } }; // Setup mocks for TableConfig and Schema. TableConfig tableConfig = mock(TableConfig.class); UpsertConfig upsertConfig = new UpsertConfig(); upsertConfig.setComparisonColumn("ts"); upsertConfig.setEnablePreload(true); upsertConfig.setEnableSnapshot(true); when(tableConfig.getUpsertConfig()).thenReturn(upsertConfig); when(tableConfig.getTableName()).thenReturn(realtimeTableName); Schema schema = mock(Schema.class); when(schema.getPrimaryKeyColumns()).thenReturn(Collections.singletonList("pk")); IndexLoadingConfig indexLoadingConfig = mock(IndexLoadingConfig.class); when(indexLoadingConfig.getTableConfig()).thenReturn(tableConfig); // Setup mocks for HelixManager. HelixManager helixManager = mock(HelixManager.class); ZkHelixPropertyStore<ZNRecord> propertyStore = mock(ZkHelixPropertyStore.class); when(helixManager.getHelixPropertyStore()).thenReturn(propertyStore); // Setup segment assignment. Only ONLINE segments are preloaded. segmentAssignment.put("consuming_seg01", ImmutableMap.of(instanceId, "CONSUMING")); segmentAssignment.put("consuming_seg02", ImmutableMap.of(instanceId, "CONSUMING")); segmentAssignment.put("offline_seg01", ImmutableMap.of(instanceId, "OFFLINE")); segmentAssignment.put("offline_seg02", ImmutableMap.of(instanceId, "OFFLINE")); String seg01Name = "testTable__0__1__" + System.currentTimeMillis(); segmentAssignment.put(seg01Name, ImmutableMap.of(instanceId, "ONLINE")); String seg02Name = "testTable__0__2__" + System.currentTimeMillis(); segmentAssignment.put(seg02Name, ImmutableMap.of(instanceId, "ONLINE")); // This segment is skipped as it's not from partition 0. String seg03Name = "testTable__1__3__" + System.currentTimeMillis(); segmentAssignment.put(seg03Name, ImmutableMap.of(instanceId, "ONLINE")); SegmentZKMetadata zkMetadata = new SegmentZKMetadata(seg01Name); zkMetadata.setStatus(CommonConstants.Segment.Realtime.Status.DONE); segmentMetadataMap.put(seg01Name, zkMetadata); zkMetadata = new SegmentZKMetadata(seg02Name); zkMetadata.setStatus(CommonConstants.Segment.Realtime.Status.DONE); segmentMetadataMap.put(seg02Name, zkMetadata); zkMetadata = new SegmentZKMetadata(seg03Name); zkMetadata.setStatus(CommonConstants.Segment.Realtime.Status.DONE); segmentMetadataMap.put(seg03Name, zkMetadata); // Setup mocks to get file path to validDocIds snapshot. ExecutorService segmentPreloadExecutor = Executors.newFixedThreadPool(1); File tableDataDir = new File(TEMP_DIR, realtimeTableName); when(tableDataManager.getHelixManager()).thenReturn(helixManager); when(tableDataManager.getSegmentPreloadExecutor()).thenReturn(segmentPreloadExecutor); when(tableDataManager.getTableDataDir()).thenReturn(tableDataDir); InstanceDataManagerConfig instanceDataManagerConfig = mock(InstanceDataManagerConfig.class); when(instanceDataManagerConfig.getInstanceId()).thenReturn(instanceId); when(tableDataManager.getInstanceDataManagerConfig()).thenReturn(instanceDataManagerConfig); // No snapshot file for seg01, so it's skipped. File seg01IdxDir = new File(tableDataDir, seg01Name); FileUtils.forceMkdir(seg01IdxDir); when(tableDataManager.getSegmentDataDir(seg01Name, null, tableConfig)).thenReturn(seg01IdxDir); File seg02IdxDir = new File(tableDataDir, seg02Name); FileUtils.forceMkdir(seg02IdxDir); FileUtils.touch(new File(new File(seg02IdxDir, "v3"), V1Constants.VALID_DOC_IDS_SNAPSHOT_FILE_NAME)); when(tableDataManager.getSegmentDataDir(seg02Name, null, tableConfig)).thenReturn(seg02IdxDir); try { // If preloading is enabled, the _isPreloading flag is true initially, until preloading is done. assertTrue(upsertMetadataManager.isPreloading()); upsertMetadataManager.preloadSegments(indexLoadingConfig); assertEquals(preloadedSegments.size(), 1); assertTrue(preloadedSegments.contains(seg02Name)); assertTrue(wasPreloading.get()); assertFalse(upsertMetadataManager.isPreloading()); } finally { segmentPreloadExecutor.shutdownNow(); } }
@Override public List<TenantPackageDO> getTenantPackageListByStatus(Integer status) { return tenantPackageMapper.selectListByStatus(status); }
@Test public void testGetTenantPackageListByStatus() { // mock 数据 TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); tenantPackageMapper.insert(dbTenantPackage); // 测试 status 不匹配 tenantPackageMapper.insert(cloneIgnoreId(dbTenantPackage, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 调用 List<TenantPackageDO> list = tenantPackageService.getTenantPackageListByStatus( CommonStatusEnum.ENABLE.getStatus()); assertEquals(1, list.size()); assertPojoEquals(dbTenantPackage, list.get(0)); }
@Override public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) throws YarnException { GetClusterNodesResponse response = recordFactory.newRecordInstance(GetClusterNodesResponse.class); EnumSet<NodeState> nodeStates = request.getNodeStates(); if (nodeStates == null || nodeStates.isEmpty()) { nodeStates = EnumSet.allOf(NodeState.class); } Collection<RMNode> nodes = RMServerUtils.queryRMNodes(rmContext, nodeStates); List<NodeReport> nodeReports = new ArrayList<NodeReport>(nodes.size()); for (RMNode nodeInfo : nodes) { nodeReports.add(createNodeReports(nodeInfo)); } response.setNodeReports(nodeReports); return response; }
@Test public void testGetClusterNodes() throws Exception { MockRM rm = new MockRM() { protected ClientRMService createClientRMService() { return new ClientRMService(this.rmContext, scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.getRMContext().getRMDelegationTokenSecretManager()); }; }; resourceManager = rm; rm.start(); RMNodeLabelsManager labelsMgr = rm.getRMContext().getNodeLabelManager(); labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y")); // Add a healthy node with label = x MockNM node = rm.registerNode("host1:1234", 1024); Map<NodeId, Set<String>> map = new HashMap<NodeId, Set<String>>(); map.put(node.getNodeId(), ImmutableSet.of("x")); labelsMgr.replaceLabelsOnNode(map); rm.sendNodeStarted(node); node.nodeHeartbeat(true); // Add and lose a node with label = y MockNM lostNode = rm.registerNode("host2:1235", 1024); rm.sendNodeStarted(lostNode); lostNode.nodeHeartbeat(true); rm.waitForState(lostNode.getNodeId(), NodeState.RUNNING); rm.sendNodeLost(lostNode); // Create a client. conf = new Configuration(); rpc = YarnRPC.create(conf); InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress(); LOG.info("Connecting to ResourceManager at " + rmAddress); client = (ApplicationClientProtocol) rpc.getProxy( ApplicationClientProtocol.class, rmAddress, conf); // Make call GetClusterNodesRequest request = GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.RUNNING)); List<NodeReport> nodeReports = client.getClusterNodes(request).getNodeReports(); Assert.assertEquals(1, nodeReports.size()); Assert.assertNotSame("Node is expected to be healthy!", NodeState.UNHEALTHY, nodeReports.get(0).getNodeState()); // Check node's label = x Assert.assertTrue(nodeReports.get(0).getNodeLabels().contains("x")); Assert.assertNull(nodeReports.get(0).getDecommissioningTimeout()); Assert.assertNull(nodeReports.get(0).getNodeUpdateType()); // Now make the node unhealthy. node.nodeHeartbeat(false); rm.waitForState(node.getNodeId(), NodeState.UNHEALTHY); // Call again nodeReports = client.getClusterNodes(request).getNodeReports(); Assert.assertEquals("Unhealthy nodes should not show up by default", 0, nodeReports.size()); // Change label of host1 to y map = new HashMap<NodeId, Set<String>>(); map.put(node.getNodeId(), ImmutableSet.of("y")); labelsMgr.replaceLabelsOnNode(map); // Now query for UNHEALTHY nodes request = GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.UNHEALTHY)); nodeReports = client.getClusterNodes(request).getNodeReports(); Assert.assertEquals(1, nodeReports.size()); Assert.assertEquals("Node is expected to be unhealthy!", NodeState.UNHEALTHY, nodeReports.get(0).getNodeState()); Assert.assertTrue(nodeReports.get(0).getNodeLabels().contains("y")); Assert.assertNull(nodeReports.get(0).getDecommissioningTimeout()); Assert.assertNull(nodeReports.get(0).getNodeUpdateType()); // Remove labels of host1 map = new HashMap<NodeId, Set<String>>(); map.put(node.getNodeId(), ImmutableSet.of("y")); labelsMgr.removeLabelsFromNode(map); // Query all states should return all nodes rm.registerNode("host3:1236", 1024); request = GetClusterNodesRequest.newInstance(EnumSet.allOf(NodeState.class)); nodeReports = client.getClusterNodes(request).getNodeReports(); Assert.assertEquals(3, nodeReports.size()); // All host1-3's label should be empty (instead of null) for (NodeReport report : nodeReports) { Assert.assertTrue(report.getNodeLabels() != null && report.getNodeLabels().isEmpty()); Assert.assertNull(report.getDecommissioningTimeout()); Assert.assertNull(report.getNodeUpdateType()); } }
public static List<alluxio.grpc.Metric> reportWorkerMetrics() { long start = System.currentTimeMillis(); List<alluxio.grpc.Metric> metricsList = reportMetrics(InstanceType.WORKER); LOG.debug("Get the worker metrics list contains {} metrics to report to leading master in {}ms", metricsList.size(), System.currentTimeMillis() - start); return metricsList; }
@Test public void testReportWorkerMetrics() { String metricName = "Worker.TestMetric"; Counter counter = MetricsSystem.counter(metricName); if (!MetricKey.isValid(metricName)) { MetricKey.register(new MetricKey.Builder(metricName) .setMetricType(MetricType.COUNTER).setIsClusterAggregated(true).build()); MetricsSystem.initShouldReportMetrics(MetricsSystem.InstanceType.WORKER); } counter.inc(); assertEquals(1.0, MetricsSystem.reportWorkerMetrics().get(0).getValue(), 0); assertEquals(0, MetricsSystem.reportWorkerMetrics().size()); counter.inc(); assertEquals(1.0, MetricsSystem.reportWorkerMetrics().get(0).getValue(), 0); }
public void updateLastAppliedImageProvenance(MetadataProvenance lastAppliedProvenance) { this.lastAppliedProvenance.set(lastAppliedProvenance); }
@Test public void testUpdateLastAppliedImageProvenance() { MetricsRegistry registry = new MetricsRegistry(); try (FakeMetadataLoaderMetrics fakeMetrics = new FakeMetadataLoaderMetrics(registry)) { MetadataProvenance provenance = new MetadataProvenance(1L, 2, 3L); fakeMetrics.metrics.updateLastAppliedImageProvenance(provenance); assertEquals(provenance, fakeMetrics.provenance.get()); } }
@Override public Collection<Subscriber> getSubscribers(String namespaceId, String serviceName) { Collection<Subscriber> result = new LinkedList<>( subscriberServiceLocal.getSubscribers(namespaceId, serviceName)); if (memberManager.getServerList().size() > 1) { getSubscribersFromRemotes(namespaceId, serviceName, result); } return result; }
@Test void testGetSubscribersByServiceWithLocal() { Collection<Subscriber> actual = aggregation.getSubscribers(service); assertEquals(1, actual.size()); assertEquals("local", actual.iterator().next().getAddrStr()); }
@Override public int apply( final Integer price, final List<Coupon> percentageCoupons, final List<Coupon> discountCoupons ) { int afterPrice = price; for (Coupon discountCoupon : discountCoupons) { afterPrice = discountCoupon.discount(afterPrice); } for (Coupon percentageCoupon : percentageCoupons) { afterPrice = percentageCoupon.discount(afterPrice); } return afterPrice; }
@Test void 값을_쿠폰_금액만큼_할인_후_퍼센트_할인을_진행한다() { // given List<Coupon> percentageCoupons = List.of(쿠픈_생성_함께_사용_할인율_20_퍼센트()); List<Coupon> discountCoupons = List.of(쿠픈_생성_함께_사용_할인금액_10000원()); int price = 100000; // when int afterPrice = applyBasicPolicy.apply(price, percentageCoupons, discountCoupons); // then assertThat(afterPrice).isEqualTo(72000); }
@Bean @ConditionalOnMissingBean public ServerList<?> serverList(IClientConfig clientConfig) { final ServiceCombServiceList serviceCombServiceList = new ServiceCombServiceList(); serviceCombServiceList.initWithNiwsConfig(clientConfig); return serviceCombServiceList; }
@Test public void serverList() { final IClientConfig clientConfig = Mockito.mock(IClientConfig.class); final ServiceCombRibbonConfiguration serviceCombRibbonConfiguration = new ServiceCombRibbonConfiguration(); final ServerList<?> serverList = serviceCombRibbonConfiguration.serverList(clientConfig); final Optional<Object> configOptional = ReflectUtils.getFieldValue(serverList, "clientConfig"); Assert.assertTrue(configOptional.isPresent()); Assert.assertTrue(configOptional.get() instanceof IClientConfig); Assert.assertEquals(clientConfig, configOptional.get()); }
public HealthCheckRegistry getHealthCheckRegistry() { return healthCheckRegistry; }
@Test void hasHealthCheckRegistry() { assertThat(bootstrap.getHealthCheckRegistry()) .isNotNull(); }
@Override public byte[] echo(byte[] message) { return read(null, ByteArrayCodec.INSTANCE, ECHO, message); }
@Test public void testEcho() { assertThat(connection.echo("test".getBytes())).isEqualTo("test".getBytes()); }
@Override public boolean add(V value) { lock.lock(); try { checkComparator(); BinarySearchResult<V> res = binarySearch(value); int index = 0; if (res.getIndex() < 0) { index = -(res.getIndex() + 1); } else { index = res.getIndex() + 1; } get(commandExecutor.evalWriteNoRetryAsync(getRawName(), codec, RedisCommands.EVAL_VOID, "local len = redis.call('llen', KEYS[1]);" + "if tonumber(ARGV[1]) < len then " + "local pivot = redis.call('lindex', KEYS[1], ARGV[1]);" + "redis.call('linsert', KEYS[1], 'before', pivot, ARGV[2]);" + "return;" + "end;" + "redis.call('rpush', KEYS[1], ARGV[2]);", Arrays.asList(getRawName()), index, encode(value))); return true; } finally { lock.unlock(); } }
@Test public void testSize() { RPriorityQueue<Integer> set = redisson.getPriorityQueue("set"); set.add(1); set.add(2); set.add(3); set.add(3); set.add(4); set.add(5); set.add(5); Assertions.assertEquals(7, set.size()); }
public Optional<String> reasonAllControllersZkMigrationNotReady( MetadataVersion metadataVersion, Map<Integer, ControllerRegistration> controllers ) { if (!metadataVersion.isMigrationSupported()) { return Optional.of("The metadata.version too low at " + metadataVersion); } else if (!metadataVersion.isControllerRegistrationSupported()) { return Optional.empty(); } for (int quorumNodeId : quorumNodeIds) { ControllerRegistration registration = controllers.get(quorumNodeId); if (registration == null) { return Optional.of("No registration found for controller " + quorumNodeId); } else if (!registration.zkMigrationReady()) { return Optional.of("Controller " + quorumNodeId + " has not enabled " + "zookeeper.metadata.migration.enable"); } } return Optional.empty(); }
@Test public void testZkMigrationNotReadyIfMetadataVersionTooLow() { assertEquals(Optional.of("The metadata.version too low at 3.0-IV1"), QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( MetadataVersion.IBP_3_0_IV1, Collections.emptyMap())); }
public static <S> S loadFirst(final Class<S> clazz) { final ServiceLoader<S> loader = loadAll(clazz); final Iterator<S> iterator = loader.iterator(); if (!iterator.hasNext()) { throw new IllegalStateException(String.format( "No implementation defined in /META-INF/services/%s, please check whether the file exists and has the right implementation class!", clazz.getName())); } return iterator.next(); }
@Test public void testLoadFirst() { assertNotNull(SpiLoadFactory.loadFirst(SpiInterface.class)); }
Map<String, String> describeNetworkInterfaces(List<String> privateAddresses, AwsCredentials credentials) { if (privateAddresses.isEmpty()) { return Collections.emptyMap(); } try { Map<String, String> attributes = createAttributesDescribeNetworkInterfaces(privateAddresses); Map<String, String> headers = createHeaders(attributes, credentials); String response = callAwsService(attributes, headers); return parseDescribeNetworkInterfaces(response); } catch (Exception e) { LOGGER.finest(e); // Log warning only once. if (!isNoPublicIpAlreadyLogged) { LOGGER.warning("Cannot fetch the public IPs of ECS Tasks. You won't be able to use " + "Hazelcast Smart Client from outside of this VPC."); isNoPublicIpAlreadyLogged = true; } Map<String, String> map = new HashMap<>(); privateAddresses.forEach(k -> map.put(k, null)); return map; } }
@Test public void describeNetworkInterfacesEmptyPrivateAddressList() { // given List<String> privateAddresses = Collections.emptyList(); // when Map<String, String> result = awsEc2Api.describeNetworkInterfaces(privateAddresses, CREDENTIALS); // then assertEquals(0, result.size()); verify(exactly(0), getRequestedFor(urlEqualTo("/?Action=DescribeNetworkInterfaces&Version=2016-11-15"))); }
public static List<AclEntry> replaceAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); // Replacement is done separately for each scope: access and default. EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry aclSpecEntry: aclSpec) { scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } // Copy existing entries if the scope was not replaced. for (AclEntry existingEntry: existingAcl) { if (!scopeDirty.contains(existingEntry.getScope())) { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test public void testReplaceAclEntriesDefaultMaskPreserved() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", READ)) .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, MASK, READ_WRITE)) .add(aclEntry(ACCESS, OTHER, READ)) .add(aclEntry(DEFAULT, USER, ALL)) .add(aclEntry(DEFAULT, USER, "diana", ALL)) .add(aclEntry(DEFAULT, GROUP, READ)) .add(aclEntry(DEFAULT, MASK, READ)) .add(aclEntry(DEFAULT, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "bruce", READ), aclEntry(ACCESS, USER, "diana", READ_WRITE), aclEntry(ACCESS, GROUP, ALL), aclEntry(ACCESS, OTHER, READ)); List<AclEntry> expected = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", READ)) .add(aclEntry(ACCESS, USER, "diana", READ_WRITE)) .add(aclEntry(ACCESS, GROUP, ALL)) .add(aclEntry(ACCESS, MASK, ALL)) .add(aclEntry(ACCESS, OTHER, READ)) .add(aclEntry(DEFAULT, USER, ALL)) .add(aclEntry(DEFAULT, USER, "diana", ALL)) .add(aclEntry(DEFAULT, GROUP, READ)) .add(aclEntry(DEFAULT, MASK, READ)) .add(aclEntry(DEFAULT, OTHER, NONE)) .build(); assertEquals(expected, replaceAclEntries(existing, aclSpec)); }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { try { if(new DeepboxFindFeature(session, fileid).find(folder)) { throw new ConflictException(folder.getAbsolute()); } final Folder upload = new Folder(); upload.setName(folder.getName()); upload.setI18n(Collections.emptyMap()); final List<Folder> body = Collections.singletonList(upload); final String deepBoxNodeId = fileid.getDeepBoxNodeId(folder.getParent()); final String boxNodeId = fileid.getBoxNodeId(folder.getParent()); final List<FolderAdded> created; if(new DeepboxPathContainerService(session).isDocuments(folder.getParent())) { created = new PathRestControllerApi(session.getClient()).addFolders1( body, deepBoxNodeId, boxNodeId ); } else { final String parentNodeId = fileid.getFileId(folder.getParent()); created = new PathRestControllerApi(session.getClient()).addFolders( body, deepBoxNodeId, boxNodeId, parentNodeId ); } final FolderAdded f = created.stream().findFirst().orElse(null); if(f != null) { fileid.cache(folder, f.getNode().getNodeId()); } return folder.withAttributes(new DeepboxAttributesFinderFeature(session, fileid).toAttributes(f.getNode())); } catch(ApiException e) { throw new DeepboxExceptionMappingService(fileid).map("Cannot create folder {0}", e, folder); } }
@Test public void testDocuments() throws Exception { final DeepboxIdProvider nodeid = new DeepboxIdProvider(session); final DeepboxDirectoryFeature directory = new DeepboxDirectoryFeature(session, nodeid); final Path parent = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Documents", EnumSet.of(Path.Type.directory)); final Path folder = new Path(parent, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); directory.mkdir(folder, new TransferStatus()); assertTrue(new DeepboxFindFeature(session, nodeid).find(folder.withAttributes(new PathAttributes()), new DisabledListProgressListener())); assertEquals(0, new DeepboxListService(session, nodeid).list(folder, new DisabledListProgressListener()).size()); new DeepboxDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertThrows(NotfoundException.class, () -> nodeid.getFileId(folder.withAttributes(new PathAttributes()))); assertFalse(new DeepboxFindFeature(session, nodeid).find(folder.withAttributes(new PathAttributes()))); }
public static DynamicProtoCoder of(Descriptors.Descriptor protoMessageDescriptor) { return new DynamicProtoCoder( ProtoDomain.buildFrom(protoMessageDescriptor), protoMessageDescriptor.getFullName(), ImmutableSet.of()); }
@Test public void testDynamicNestedRepeatedMessage() throws Exception { DynamicMessage message = DynamicMessage.newBuilder(MessageA.getDescriptor()) .setField( MessageA.getDescriptor().findFieldByNumber(MessageA.FIELD1_FIELD_NUMBER), "foo") .addRepeatedField( MessageA.getDescriptor().findFieldByNumber(MessageA.FIELD2_FIELD_NUMBER), DynamicMessage.newBuilder(MessageB.getDescriptor()) .setField( MessageB.getDescriptor().findFieldByNumber(MessageB.FIELD1_FIELD_NUMBER), true) .build()) .addRepeatedField( MessageA.getDescriptor().findFieldByNumber(MessageA.FIELD2_FIELD_NUMBER), DynamicMessage.newBuilder(MessageB.getDescriptor()) .setField( MessageB.getDescriptor().findFieldByNumber(MessageB.FIELD1_FIELD_NUMBER), false) .build()) .build(); Coder<DynamicMessage> coder = DynamicProtoCoder.of(message.getDescriptorForType()); // Special code to check the DynamicMessage equality (@see IsDynamicMessageEqual) for (Coder.Context context : ALL_CONTEXTS) { CoderProperties.coderDecodeEncodeInContext( coder, context, message, IsDynamicMessageEqual.equalTo(message)); } }
public void getConfig(StorDistributionConfig.Group.Builder builder) { builder.index(index == null ? "invalid" : index); builder.name(name == null ? "invalid" : name); partitions.ifPresent(builder::partitions); for (StorageNode node : nodes) { StorDistributionConfig.Group.Nodes.Builder nb = new StorDistributionConfig.Group.Nodes.Builder(); nb.index(node.getDistributionKey()); nb.retired(node.isRetired()); builder.nodes.add(nb); } builder.capacity(getCapacity()); }
@Test void testGroupCapacity() throws Exception { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <redundancy>2</redundancy>" + " <documents/>" + " <group>\n" + " <distribution partitions=\"1|*\"/>\n" + " <group distribution-key=\"0\" name=\"sub1\">\n" + " <node hostalias=\"mockhost\" capacity=\"0.5\" distribution-key=\"0\"/>\n" + " <node hostalias=\"mockhost\" capacity=\"1.5\" distribution-key=\"1\"/>\n" + " </group>\n" + " <group distribution-key=\"1\" name=\"sub2\">\n" + " <node hostalias=\"mockhost\" capacity=\"2.0\" distribution-key=\"2\"/>\n" + " <node hostalias=\"mockhost\" capacity=\"1.5\" distribution-key=\"3\"/>\n" + " </group>\n" + " </group>\n" + "</content>" ); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); cluster.getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals(3, config.group().size()); assertEquals(5.5, config.group(0).capacity(), 0.001); assertEquals(2, config.group(1).capacity(), 0.001); assertEquals(3.5, config.group(2).capacity(), 0.001); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cluster.getConfig(distributionBuilder); DistributionConfig.Cluster clusterConfig = distributionBuilder.build().cluster("storage"); assertEquals(3, clusterConfig.group().size()); assertEquals(5.5, clusterConfig.group(0).capacity(), 0.001); assertEquals(2, clusterConfig.group(1).capacity(), 0.001); assertEquals(3.5, clusterConfig.group(2).capacity(), 0.001); }
public static ReadwriteSplittingRuleConfiguration convert(final Collection<ReadwriteSplittingRuleSegment> ruleSegments) { Collection<ReadwriteSplittingDataSourceGroupRuleConfiguration> dataSourceGroups = new LinkedList<>(); Map<String, AlgorithmConfiguration> loadBalancers = new HashMap<>(ruleSegments.size(), 1F); for (ReadwriteSplittingRuleSegment each : ruleSegments) { if (null == each.getLoadBalancer()) { dataSourceGroups.add(createDataSourceGroupRuleConfiguration(each, null)); } else { String loadBalancerName = getLoadBalancerName(each.getName(), each.getLoadBalancer().getName()); loadBalancers.put(loadBalancerName, createLoadBalancer(each)); dataSourceGroups.add(createDataSourceGroupRuleConfiguration(each, loadBalancerName)); } } return new ReadwriteSplittingRuleConfiguration(dataSourceGroups, loadBalancers); }
@Test void assertEmptyRuleSegmentConvertResult() { ReadwriteSplittingRuleConfiguration actualEmptyRuleSegmentConvertResult = ReadwriteSplittingRuleStatementConverter .convert(Collections.emptyList()); assertTrue(actualEmptyRuleSegmentConvertResult.getDataSourceGroups().isEmpty()); assertTrue(actualEmptyRuleSegmentConvertResult.getLoadBalancers().isEmpty()); }
@Override public CompletableFuture<?> cancel() { final List<CompletableFuture<?>> cancelResultFutures = new ArrayList<>(currentExecutions.size()); for (Execution execution : currentExecutions.values()) { execution.cancel(); cancelResultFutures.add(execution.getReleaseFuture()); } return FutureUtils.combineAll(cancelResultFutures); }
@Test void testCancel() throws Exception { final SpeculativeExecutionVertex ev = createSpeculativeExecutionVertex(); final Execution e1 = ev.getCurrentExecutionAttempt(); final Execution e2 = ev.createNewSpeculativeExecution(System.currentTimeMillis()); ev.cancel(); assertThat(e1.getState()).isSameAs(ExecutionState.CANCELED); assertThat(e2.getState()).isSameAs(ExecutionState.CANCELED); }
public static void setSystemVariable(SessionVariable sessionVariable, SystemVariable setVar, boolean onlySetSessionVar) throws DdlException { if (SessionVariable.DEPRECATED_VARIABLES.stream().anyMatch(c -> c.equalsIgnoreCase(setVar.getVariable()))) { return; } checkSystemVariableExist(setVar); if (setVar.getType() == SetType.VERBOSE) { ErrorReport.reportDdlException(ErrorCode.ERR_WRONG_TYPE_FOR_VAR, setVar.getVariable()); } // Check variable attribute and setVar VarContext ctx = getVarContext(setVar.getVariable()); checkUpdate(setVar, ctx.getFlag()); // To modify to default value. VarAttr attr = ctx.getField().getAnnotation(VarAttr.class); String value; // If value is null, this is `set variable = DEFAULT` if (setVar.getResolvedExpression() != null) { value = setVar.getResolvedExpression().getStringValue(); } else { value = ctx.getDefaultValue(); if (value == null) { ErrorReport.reportDdlException(ErrorCode.ERR_NO_DEFAULT, attr.name()); } } if (!onlySetSessionVar && setVar.getType() == SetType.GLOBAL) { WLOCK.lock(); try { setValue(ctx.getObj(), ctx.getField(), value); // write edit log GlobalVarPersistInfo info = new GlobalVarPersistInfo(DEFAULT_SESSION_VARIABLE, Lists.newArrayList(attr.name())); EditLog editLog = GlobalStateMgr.getCurrentState().getEditLog(); editLog.logGlobalVariableV2(info); } finally { WLOCK.unlock(); } } // set session variable setValue(sessionVariable, ctx.getField(), value); }
@Test public void testWarehouseVar() { SystemVariable systemVariable = new SystemVariable(SetType.GLOBAL, SessionVariable.WAREHOUSE_NAME, new StringLiteral("warehouse_1")); try { VariableMgr.setSystemVariable(null, systemVariable, false); } catch (DdlException e) { Assert.assertEquals("Variable 'warehouse' is a SESSION variable and can't be used with SET GLOBAL", e.getMessage()); } }
public CompletableFuture<SendMessageResponse> sendMessage(ProxyContext ctx, SendMessageRequest request) { CompletableFuture<SendMessageResponse> future = new CompletableFuture<>(); try { if (request.getMessagesCount() <= 0) { throw new GrpcProxyException(Code.MESSAGE_CORRUPTED, "no message to send"); } List<apache.rocketmq.v2.Message> messageList = request.getMessagesList(); apache.rocketmq.v2.Message message = messageList.get(0); Resource topic = message.getTopic(); validateTopic(topic); future = this.messagingProcessor.sendMessage( ctx, new SendMessageQueueSelector(request), topic.getName(), buildSysFlag(message), buildMessage(ctx, request.getMessagesList(), topic) ).thenApply(result -> convertToSendMessageResponse(ctx, request, result)); } catch (Throwable t) { future.completeExceptionally(t); } return future; }
@Test public void sendMessage() throws Exception { String msgId = MessageClientIDSetter.createUniqID(); SendResult sendResult = new SendResult(); sendResult.setSendStatus(SendStatus.SEND_OK); sendResult.setMsgId(msgId); when(this.messagingProcessor.sendMessage(any(), any(), anyString(), anyInt(), any())) .thenReturn(CompletableFuture.completedFuture(Lists.newArrayList(sendResult))); SendMessageResponse response = this.sendMessageActivity.sendMessage( createContext(), SendMessageRequest.newBuilder() .addMessages(Message.newBuilder() .setTopic(Resource.newBuilder() .setName(TOPIC) .build()) .setSystemProperties(SystemProperties.newBuilder() .setMessageId(msgId) .setQueueId(0) .setMessageType(MessageType.NORMAL) .setBornTimestamp(Timestamps.fromMillis(System.currentTimeMillis())) .setBornHost(StringUtils.defaultString(NetworkUtil.getLocalAddress(), "127.0.0.1:1234")) .build()) .setBody(ByteString.copyFromUtf8("123")) .build()) .build() ).get(); assertEquals(Code.OK, response.getStatus().getCode()); assertEquals(msgId, response.getEntries(0).getMessageId()); }
@Override public void to(final String topic) { to(topic, Produced.with(keySerde, valueSerde, null)); }
@Test public void shouldNotAllowNullProducedOnToWithTopicName() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.to("topic", null)); assertThat(exception.getMessage(), equalTo("produced can't be null")); }
static String parseAccessToken(String responseBody) throws IOException { ObjectMapper mapper = new ObjectMapper(); JsonNode rootNode = mapper.readTree(responseBody); JsonNode accessTokenNode = rootNode.at("/access_token"); if (accessTokenNode == null) { // Only grab the first N characters so that if the response body is huge, we don't // blow up. String snippet = responseBody; if (snippet.length() > MAX_RESPONSE_BODY_LENGTH) { int actualLength = responseBody.length(); String s = responseBody.substring(0, MAX_RESPONSE_BODY_LENGTH); snippet = String.format("%s (trimmed to first %d characters out of %d total)", s, MAX_RESPONSE_BODY_LENGTH, actualLength); } throw new IOException(String.format("The token endpoint response did not contain an access_token value. Response: (%s)", snippet)); } return sanitizeString("the token endpoint response's access_token JSON attribute", accessTokenNode.textValue()); }
@Test public void testParseAccessToken() throws IOException { String expected = "abc"; ObjectMapper mapper = new ObjectMapper(); ObjectNode node = mapper.createObjectNode(); node.put("access_token", expected); String actual = HttpAccessTokenRetriever.parseAccessToken(mapper.writeValueAsString(node)); assertEquals(expected, actual); }
@VisibleForTesting long getDecayPeriodMillis() { return decayPeriodMillis; }
@Test @SuppressWarnings("deprecation") public void testParsePeriodWithPortLessCostProvider() { // By default scheduler = new DecayRpcScheduler(1, "ipc.52", new Configuration()); assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_DEFAULT, scheduler.getDecayPeriodMillis()); // Custom Configuration conf = new Configuration(); conf.setLong("ipc.52." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, 1058); conf.unset("ipc.52." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY); conf.set("ipc." + CommonConfigurationKeys.IPC_COST_PROVIDER_KEY, "org.apache.hadoop.ipc.TestDecayRpcScheduler$TestCostProvider"); scheduler = new DecayRpcScheduler(1, "ipc.52", conf); assertEquals(1058L, scheduler.getDecayPeriodMillis()); }
@Override public List<String> listDbNames() { return deltaOps.getAllDatabaseNames(); }
@Test public void testListDbNames() { List<String> dbNames = deltaLakeMetadata.listDbNames(); Assert.assertEquals(2, dbNames.size()); Assert.assertEquals("db1", dbNames.get(0)); Assert.assertEquals("db2", dbNames.get(1)); }
Optional<String> getUserEmail() { return configuration.get(USER_EMAIL_ATTRIBUTE); }
@Test public void return_empty_user_email_when_no_setting() { assertThat(underTest.getUserEmail()).isNotPresent(); }
@Override public Cursor<Tuple> zScan(byte[] key, ScanOptions options) { return new KeyBoundCursor<Tuple>(key, 0, options) { private RedisClient client; @Override protected ScanIteration<Tuple> doScan(byte[] key, long cursorId, ScanOptions options) { if (isQueueing() || isPipelined()) { throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode."); } List<Object> args = new ArrayList<Object>(); args.add(key); args.add(Long.toUnsignedString(cursorId)); if (options.getPattern() != null) { args.add("MATCH"); args.add(options.getPattern()); } if (options.getCount() != null) { args.add("COUNT"); args.add(options.getCount()); } RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray()); ListScanResult<Tuple> res = syncFuture(f); client = res.getRedisClient(); return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues()); } }.open(); }
@Test public void testZScan() { connection.zAdd("key".getBytes(), 1, "value1".getBytes()); connection.zAdd("key".getBytes(), 2, "value2".getBytes()); Cursor<RedisZSetCommands.Tuple> t = connection.zScan("key".getBytes(), ScanOptions.scanOptions().build()); assertThat(t.hasNext()).isTrue(); assertThat(t.next().getValue()).isEqualTo("value1".getBytes()); assertThat(t.hasNext()).isTrue(); assertThat(t.next().getValue()).isEqualTo("value2".getBytes()); }
@Override public Object saveContent(@NonNull StaplerRequest staplerRequest, @NonNull Item item) { JSONObject body; try { body = JSONObject.fromObject(IOUtils.toString(staplerRequest.getReader())); } catch (IOException e) { throw new ServiceException.UnexpectedErrorException("Failed to read request body"); } body.put("$class", "io.jenkins.blueocean.blueocean_github_pipeline.GithubScmSaveFileRequest"); GithubScmSaveFileRequest request = staplerRequest.bindJSON(GithubScmSaveFileRequest.class, body); if(request == null){ throw new ServiceException.BadRequestException(new ErrorMessage(400, "Failed to bind request")); } ScmContentProvider scmContentProvider = ScmContentProvider.resolve(item); if(scmContentProvider != null){ return saveContent(request, item); } throw new ServiceException.BadRequestException("No save scm content provider found for pipeline: " + item.getFullName()); }
@Test public void unauthorizedSaveContentToOrgFolderGHEShouldFail() throws UnirestException, IOException { User alice = User.get("alice"); alice.setFullName("Alice Cooper"); alice.addProperty(new Mailer.UserProperty("alice@jenkins-ci.org")); String aliceCredentialId = createGithubEnterpriseCredential(alice); StaplerRequest staplerRequest = mockStapler(GithubEnterpriseScm.ID); GitContent content = new GitContent.Builder().autoCreateBranch(true).base64Data("c2xlZXAgMTUKbm9kZSB7CiAgY2hlY2tvdXQgc2NtCiAgc2ggJ2xzIC1sJwp9\\nCnNsZWVwIDE1Cg==\\n") .branch("test1").message("another commit").owner("cloudbeers").path("Jankinsfile").repo("PR-demo").sha("e23b8ef5c2c4244889bf94db6c05cc08ea138aef").build(); when(staplerRequest.bindJSON(Mockito.eq(GithubScmSaveFileRequest.class), Mockito.any(JSONObject.class))).thenReturn(new GithubScmSaveFileRequest(content)); MultiBranchProject mbp = mockMbp(aliceCredentialId, user, GithubEnterpriseScm.DOMAIN_NAME); String request = "{\n" + " \"content\" : {\n" + " \"message\" : \"first commit\",\n" + " \"path\" : \"Jenkinsfile\",\n" + " \"branch\" : \"test1\",\n" + " \"repo\" : \"PR-demo\",\n" + " \"sha\" : \"e23b8ef5c2c4244889bf94db6c05cc08ea138aef\",\n" + " \"base64Data\" : "+"\"c2xlZXAgMTUKbm9kZSB7CiAgY2hlY2tvdXQgc2NtCiAgc2ggJ2xzIC1sJwp9\\nCnNsZWVwIDE1Cg==\\n\""+ " }\n" + "}"; when(staplerRequest.getReader()).thenReturn(new BufferedReader(new StringReader(request), request.length())); try { //Bob trying to access content but his credential is not setup so should fail new GithubScmContentProvider().saveContent(staplerRequest, mbp); }catch (ServiceException.PreconditionRequired e){ assertEquals("Can't access content from github: no credential found", e.getMessage()); return; } fail("Should have failed with PreConditionException"); }
public static <P> Matcher<P> neverMatch() { return (Matcher<P>) Constants.NEVER_MATCH; }
@Test void neverMatch_unmatched() { assertThat(neverMatch().matches(null)).isFalse(); }
static TupleIdentifier createTupleIdentifierByName(String name) { return new TupleIdentifier(generateIdFromName(name), name); }
@Test void createTupleIdentifierByName() { String name = "name"; TupleIdentifier retrieved = TupleIdentifier.createTupleIdentifierByName(name); assertThat(retrieved).isNotNull(); assertThat(retrieved.getName()).isEqualTo(name); assertThat(retrieved.getId()).isNotNull(); }
public static String initCacheDir(String namespace, NacosClientProperties properties) { String jmSnapshotPath = properties.getProperty(JM_SNAPSHOT_PATH_PROPERTY); String namingCacheRegistryDir = ""; if (properties.getProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR) != null) { namingCacheRegistryDir = File.separator + properties.getProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR); } if (!StringUtils.isBlank(jmSnapshotPath)) { cacheDir = jmSnapshotPath + File.separator + FILE_PATH_NACOS + namingCacheRegistryDir + File.separator + FILE_PATH_NAMING + File.separator + namespace; } else { cacheDir = properties.getProperty(USER_HOME_PROPERTY) + File.separator + FILE_PATH_NACOS + namingCacheRegistryDir + File.separator + FILE_PATH_NAMING + File.separator + namespace; } return cacheDir; }
@Test void testInitCacheDirWithDefaultRootAndWithoutCache() { System.setProperty("user.home", "/home/admin"); String actual = CacheDirUtil.initCacheDir("test", NacosClientProperties.PROTOTYPE.derive()); assertEquals("/home/admin/nacos/naming/test", actual); }
@Override public void initialize(ServerAbilities abilities) { abilities.getNamingAbility().setSupportJraft(true); }
@Test void testInitialize() { NamingAbilityInitializer initializer = new NamingAbilityInitializer(); ServerAbilities abilities = new ServerAbilities(); assertFalse(abilities.getNamingAbility().isSupportJraft()); initializer.initialize(abilities); assertTrue(abilities.getNamingAbility().isSupportJraft()); }
public static List<TriStateSelection> forAgentsResources(Set<ResourceConfig> resourceConfigs, Agents agents) { return convert(resourceConfigs, agents, new Assigner<>() { @Override public boolean shouldAssociate(Agent agent, ResourceConfig resourceConfig) { return agent.getResourcesAsList().contains(resourceConfig.getName()); } @Override public String identifier(ResourceConfig resourceConfig) { return resourceConfig.getName(); } @Override public boolean shouldEnable(Agent agent, ResourceConfig resourceConfig) { return true; } }); }
@Test public void shouldBeNoChangeIfAllAgentsHaveThatResource() { resourceConfigs.add(new ResourceConfig("some")); agents.add(new Agent("uuid1", "host1", "127.0.0.1", List.of("some"))); agents.add(new Agent("uuid2", "host2", "127.0.0.2", emptyList())); List<TriStateSelection> selections = TriStateSelection.forAgentsResources(resourceConfigs, agents); assertThat(selections, hasItem(new TriStateSelection("some", TriStateSelection.Action.nochange))); }
@Override public void initializeState(StateInitializationContext context) throws Exception { if (isPartitionCommitTriggerEnabled()) { partitionCommitPredicate = PartitionCommitPredicate.create(conf, getUserCodeClassloader(), partitionKeys); } currentNewPartitions = new HashSet<>(); newPartitions = new TreeMap<>(); committablePartitions = new HashSet<>(); inProgressPartitions = new HashMap<>(); super.initializeState(context); }
@Test void testCommitFileWhenPartitionIsCommittableByPartitionTime() throws Exception { // the rolling policy is not to roll file by filesize and roll file after one day, // it can ensure the file can be closed only when the partition is committable in this test. FileSystemTableSink.TableRollingPolicy tableRollingPolicy = new FileSystemTableSink.TableRollingPolicy( false, Long.MAX_VALUE, Duration.ofDays(1).toMillis(), Duration.ofDays(1).toMillis()); List<String> partitionKeys = Collections.singletonList("d"); // commit delay is 1 day with partition-time trigger Configuration conf = getPartitionCommitTriggerConf(Duration.ofDays(1).toMillis()); long currentTimeMillis = System.currentTimeMillis(); Date nextYear = new Date(currentTimeMillis + Duration.ofDays(365).toMillis()); String nextYearPartition = "d=" + dateFormat.format(nextYear); Date yesterday = new Date(currentTimeMillis - Duration.ofDays(1).toMillis()); String yesterdayPartition = "d=" + dateFormat.format(yesterday); Date today = new Date(currentTimeMillis); String todayPartition = "d=" + dateFormat.format(today); Date tomorrow = new Date(currentTimeMillis + Duration.ofDays(1).toMillis()); String tomorrowPartition = "d=" + dateFormat.format(tomorrow); OperatorSubtaskState state; try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) { harness.setup(); harness.initializeEmptyState(); harness.open(); harness.processElement(row(yesterdayPartition), 0); harness.processWatermark(currentTimeMillis); state = harness.snapshot(1, 1); harness.notifyOfCompletedCheckpoint(1); // assert yesterday partition file is committed assertThat(isPartitionFileCommitted(yesterdayPartition, 0, 0)).isTrue(); } // first retry try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) { harness.setup(); harness.initializeState(state); harness.open(); harness.processElement(row(tomorrowPartition), 0); harness.processElement(row(todayPartition), 0); // simulate waiting for 1 day currentTimeMillis += Duration.ofDays(1).toMillis(); harness.processWatermark(currentTimeMillis); harness.snapshot(2, 2); harness.notifyOfCompletedCheckpoint(2); // assert today partition file is committed assertThat(isPartitionFileCommitted(todayPartition, 0, 2)).isTrue(); // assert tomorrow partition file isn't committed assertThat(isPartitionFileCommitted(tomorrowPartition, 0, 1)).isFalse(); // simulate waiting for 1 day again, now tomorrow partition is committable currentTimeMillis += Duration.ofDays(1).toMillis(); harness.processWatermark(currentTimeMillis); state = harness.snapshot(3, 3); harness.notifyOfCompletedCheckpoint(3); assertThat(isPartitionFileCommitted(tomorrowPartition, 0, 1)).isTrue(); harness.processElement(row(nextYearPartition), 0); } // second retry try (OneInputStreamOperatorTestHarness<RowData, PartitionCommitInfo> harness = create(tableRollingPolicy, partitionKeys, conf)) { harness.setup(); harness.initializeState(state); harness.open(); harness.processElement(row(nextYearPartition), 0); harness.processElement(row(tomorrowPartition), 0); harness.endInput(); // assert files in all partition have been committed assertThat(isPartitionFileCommitted(tomorrowPartition, 0, 4)).isTrue(); assertThat(isPartitionFileCommitted(nextYearPartition, 0, 3)).isTrue(); } }