focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void addSessionVariables(final Map<String, String> vars) { sessionVariables.putAll(vars); }
@Test public void testAddVariablesToCli() { // Given localCli.addSessionVariables(ImmutableMap.of("env", "qa")); // Then assertRunListCommand("variables", hasRows( row( "env", "qa" ) )); }
@Override @Transactional(value="defaultTransactionManager") public OAuth2AccessTokenEntity refreshAccessToken(String refreshTokenValue, TokenRequest authRequest) throws AuthenticationException { if (Strings.isNullOrEmpty(refreshTokenValue)) { // throw an invalid token exception if there's no refresh token value at all throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } OAuth2RefreshTokenEntity refreshToken = clearExpiredRefreshToken(tokenRepository.getRefreshTokenByValue(refreshTokenValue)); if (refreshToken == null) { // throw an invalid token exception if we couldn't find the token throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } ClientDetailsEntity client = refreshToken.getClient(); AuthenticationHolderEntity authHolder = refreshToken.getAuthenticationHolder(); // make sure that the client requesting the token is the one who owns the refresh token ClientDetailsEntity requestingClient = clientDetailsService.loadClientByClientId(authRequest.getClientId()); if (!client.getClientId().equals(requestingClient.getClientId())) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidClientException("Client does not own the presented refresh token"); } //Make sure this client allows access token refreshing if (!client.isAllowRefresh()) { throw new InvalidClientException("Client does not allow refreshing access token!"); } // clear out any access tokens if (client.isClearAccessTokensOnRefresh()) { tokenRepository.clearAccessTokensForRefreshToken(refreshToken); } if (refreshToken.isExpired()) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidTokenException("Expired refresh token: " + refreshTokenValue); } OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity(); // get the stored scopes from the authentication holder's authorization request; these are the scopes associated with the refresh token Set<String> refreshScopesRequested = new HashSet<>(refreshToken.getAuthenticationHolder().getAuthentication().getOAuth2Request().getScope()); Set<SystemScope> refreshScopes = scopeService.fromStrings(refreshScopesRequested); // remove any of the special system scopes refreshScopes = scopeService.removeReservedScopes(refreshScopes); Set<String> scopeRequested = authRequest.getScope() == null ? new HashSet<String>() : new HashSet<>(authRequest.getScope()); Set<SystemScope> scope = scopeService.fromStrings(scopeRequested); // remove any of the special system scopes scope = scopeService.removeReservedScopes(scope); if (scope != null && !scope.isEmpty()) { // ensure a proper subset of scopes if (refreshScopes != null && refreshScopes.containsAll(scope)) { // set the scope of the new access token if requested token.setScope(scopeService.toStrings(scope)); } else { String errorMsg = "Up-scoping is not allowed."; logger.error(errorMsg); throw new InvalidScopeException(errorMsg); } } else { // otherwise inherit the scope of the refresh token (if it's there -- this can return a null scope set) token.setScope(scopeService.toStrings(refreshScopes)); } token.setClient(client); if (client.getAccessTokenValiditySeconds() != null) { Date expiration = new Date(System.currentTimeMillis() + (client.getAccessTokenValiditySeconds() * 1000L)); token.setExpiration(expiration); } if (client.isReuseRefreshToken()) { // if the client re-uses refresh tokens, do that token.setRefreshToken(refreshToken); } else { // otherwise, make a new refresh token OAuth2RefreshTokenEntity newRefresh = createRefreshToken(client, authHolder); token.setRefreshToken(newRefresh); // clean up the old refresh token tokenRepository.removeRefreshToken(refreshToken); } token.setAuthenticationHolder(authHolder); tokenEnhancer.enhance(token, authHolder.getAuthentication()); tokenRepository.saveAccessToken(token); return token; }
@Test(expected = InvalidTokenException.class) public void refreshAccessToken_expired() { when(refreshToken.isExpired()).thenReturn(true); service.refreshAccessToken(refreshTokenValue, tokenRequest); }
private static Row selectRow( Row input, FieldAccessDescriptor fieldAccessDescriptor, Schema inputSchema, Schema outputSchema) { if (fieldAccessDescriptor.getAllFields()) { return input; } Row.Builder output = Row.withSchema(outputSchema); selectIntoRow(inputSchema, input, output, fieldAccessDescriptor); return output.build(); }
@Test public void testSelectNullableNestedRowArray() { FieldAccessDescriptor fieldAccessDescriptor1 = FieldAccessDescriptor.withFieldNames("nestedArray.field1").resolve(NESTED_NULLABLE_SCHEMA); Row out1 = selectRow( NESTED_NULLABLE_SCHEMA, fieldAccessDescriptor1, Row.nullRow(NESTED_NULLABLE_SCHEMA)); assertNull(out1.getValue(0)); FieldAccessDescriptor fieldAccessDescriptor2 = FieldAccessDescriptor.withFieldNames("nestedArray.*").resolve(NESTED_NULLABLE_SCHEMA); Row out2 = selectRow( NESTED_NULLABLE_SCHEMA, fieldAccessDescriptor2, Row.nullRow(NESTED_NULLABLE_SCHEMA)); assertEquals(Collections.nCopies(4, null), out2.getValues()); }
public abstract boolean isDirectory();
@Test public void testApplicationFileIsDirectory() throws Exception { assertFalse(getApplicationFile(Path.fromString("vespa-services.xml")).isDirectory()); assertTrue(getApplicationFile(Path.fromString("searchdefinitions")).isDirectory()); assertFalse(getApplicationFile(Path.fromString("searchdefinitions/sock.sd")).isDirectory()); assertFalse(getApplicationFile(Path.fromString("doesnotexist")).isDirectory()); }
@Override public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) { if (stateManager.taskType() != TaskType.ACTIVE) { throw new IllegalStateException("Tried to transition processor context to active but the state manager's " + "type was " + stateManager.taskType()); } this.streamTask = streamTask; this.collector = recordCollector; this.cache = newCache; addAllFlushListenersToNewCache(); }
@Test public void shouldAddAndGetProcessorKeyValue() { foreachSetUp(); when(stateManager.taskType()).thenReturn(TaskType.ACTIVE); context = buildProcessorContextImpl(streamsConfig, stateManager); final StreamTask task = mock(StreamTask.class); context.transitionToActive(task, null, null); mockProcessorNodeWithLocalKeyValueStore(); context.addProcessorMetadataKeyValue("key1", 100L); final Long value = context.processorMetadataForKey("key1"); assertEquals(100L, value.longValue()); final Long noValue = context.processorMetadataForKey("nokey"); assertNull(noValue); }
public static Read read(Schema schema) { return new AutoValue_ParquetIO_Read.Builder() .setSchema(schema) .setInferBeamSchema(false) .build(); }
@Test public void testReadDisplayData() { Configuration configuration = new Configuration(); configuration.set("parquet.foo", "foo"); DisplayData displayData = DisplayData.from( ParquetIO.read(SCHEMA) .from("foo.parquet") .withProjection(REQUESTED_SCHEMA, SCHEMA) .withAvroDataModel(GenericData.get()) .withConfiguration(configuration)); assertThat(displayData, hasDisplayItem("filePattern", "foo.parquet")); assertThat(displayData, hasDisplayItem("schema", SCHEMA.toString())); assertThat(displayData, hasDisplayItem("inferBeamSchema", false)); assertThat(displayData, hasDisplayItem("projectionSchema", REQUESTED_SCHEMA.toString())); assertThat(displayData, hasDisplayItem("avroDataModel", GenericData.get().toString())); assertThat(displayData, hasDisplayItem("parquet.foo", "foo")); }
@Override public <T> Optional<T> getProperty(String key, Class<T> targetType) { var targetKey = targetPropertyName(key); var result = binder.bind(targetKey, Bindable.of(targetType)); return result.isBound() ? Optional.of(result.get()) : Optional.empty(); }
@Test void resolvedSingleValueProperties() { env.setProperty("prop.0.strProp", "testStr"); env.setProperty("prop.0.intProp", "123"); var resolver = new PropertyResolverImpl(env); assertThat(resolver.getProperty("prop.0.strProp", String.class)) .hasValue("testStr"); assertThat(resolver.getProperty("prop.0.intProp", Integer.class)) .hasValue(123); }
@GetMapping("/{id}") public ResponseEntity<Hotel> obtenerHotel (@PathVariable String id) { Hotel Hotel = hotelRepository.getHotel(id); return ResponseEntity.ok(Hotel); }
@Test void testObtenerHotel() { String hotelId = "1"; Hotel hotel = new Hotel(hotelId, "Hotel Test", "Info Test", "Ubicacion Test"); when(hotelService.getHotel(hotelId)).thenReturn(hotel); // When (Cuando) ResponseEntity<Hotel> responseEntity = hotelController.obtenerHotel(hotelId); // Then (Entonces) // Verifica que el ResponseEntity tenga el código de estado HTTP 200 (OK) y el hotel devuelto sea el esperado assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.OK); assertThat(responseEntity.getBody()).isEqualTo(hotel); }
@Override public <T> T run(Supplier<T> toRun, Function<Throwable, T> fallback) { Entry entry = null; try { entry = SphU.entry(resourceName, entryType); // If the SphU.entry() does not throw `BlockException`, it means that the // request can pass. return toRun.get(); } catch (BlockException ex) { // SphU.entry() may throw BlockException which indicates that // the request was rejected (flow control or circuit breaking triggered). // So it should not be counted as the business exception. return fallback.apply(ex); } catch (Exception ex) { // For other kinds of exceptions, we'll trace the exception count via // Tracer.trace(ex). Tracer.trace(ex); return fallback.apply(ex); } finally { // Guarantee the invocation has been completed. if (entry != null) { entry.exit(); } } }
@Test public void testCreateDirectlyThenRun() { // Create a circuit breaker without any circuit breaking rules. CircuitBreaker cb = new SentinelCircuitBreaker( "testSentinelCreateDirectlyThenRunA"); assertThat(cb.run(() -> "Sentinel")).isEqualTo("Sentinel"); assertThat(DegradeRuleManager.hasConfig("testSentinelCreateDirectlyThenRunA")) .isFalse(); CircuitBreaker cb2 = new SentinelCircuitBreaker( "testSentinelCreateDirectlyThenRunB", Collections.singletonList( new DegradeRule("testSentinelCreateDirectlyThenRunB") .setCount(100).setTimeWindow(10))); assertThat(cb2.run(() -> "Sentinel")).isEqualTo("Sentinel"); assertThat(DegradeRuleManager.hasConfig("testSentinelCreateDirectlyThenRunB")) .isTrue(); }
@Override public void finishSink(String dbName, String tableName, List<TSinkCommitInfo> commitInfos, String branch) { if (commitInfos.isEmpty()) { LOG.warn("No commit info on {}.{} after hive sink", dbName, tableName); return; } HiveTable table = (HiveTable) getTable(dbName, tableName); String stagingDir = commitInfos.get(0).getStaging_dir(); boolean isOverwrite = commitInfos.get(0).isIs_overwrite(); List<PartitionUpdate> partitionUpdates = commitInfos.stream() .map(TSinkCommitInfo::getHive_file_info) .map(fileInfo -> PartitionUpdate.get(fileInfo, stagingDir, table.getTableLocation())) .collect(Collectors.collectingAndThen(Collectors.toList(), PartitionUpdate::merge)); List<String> partitionColNames = table.getPartitionColumnNames(); for (PartitionUpdate partitionUpdate : partitionUpdates) { PartitionUpdate.UpdateMode mode; if (table.isUnPartitioned()) { mode = isOverwrite ? UpdateMode.OVERWRITE : UpdateMode.APPEND; partitionUpdate.setUpdateMode(mode); break; } else { List<String> partitionValues = toPartitionValues(partitionUpdate.getName()); Preconditions.checkState(partitionColNames.size() == partitionValues.size(), "Partition columns names size doesn't equal partition values size. %s vs %s", partitionColNames.size(), partitionValues.size()); if (hmsOps.partitionExists(table, partitionValues)) { mode = isOverwrite ? UpdateMode.OVERWRITE : UpdateMode.APPEND; } else { mode = PartitionUpdate.UpdateMode.NEW; } partitionUpdate.setUpdateMode(mode); } } HiveCommitter committer = new HiveCommitter( hmsOps, fileOps, updateExecutor, refreshOthersFeExecutor, table, new Path(stagingDir)); try (Timer ignored = Tracers.watchScope(EXTERNAL, "HIVE.SINK.commit")) { committer.commit(partitionUpdates); } }
@Test public void testOverwritePartition() throws Exception { String stagingDir = "hdfs://127.0.0.1:10000/tmp/starrocks/queryid"; THiveFileInfo fileInfo = new THiveFileInfo(); fileInfo.setFile_name("myfile.parquet"); fileInfo.setPartition_path("hdfs://127.0.0.1:10000/tmp/starrocks/queryid/col1=2"); fileInfo.setRecord_count(10); fileInfo.setFile_size_in_bytes(100); TSinkCommitInfo tSinkCommitInfo = new TSinkCommitInfo(); tSinkCommitInfo.setStaging_dir(stagingDir); tSinkCommitInfo.setIs_overwrite(true); tSinkCommitInfo.setHive_file_info(fileInfo); new MockUp<RemoteFileOperations>() { @Mock public void renameDirectory(Path source, Path target, Runnable runWhenPathNotExist) { } }; AnalyzeTestUtil.init(); hiveMetadata.finishSink("hive_db", "hive_table", Lists.newArrayList(tSinkCommitInfo), null); }
@VisibleForTesting String makeUserAgent() { if (!JibSystemProperties.isUserAgentEnabled()) { return ""; } StringBuilder userAgentBuilder = new StringBuilder("jib"); userAgentBuilder.append(" ").append(toolVersion); userAgentBuilder.append(" ").append(toolName); if (!Strings.isNullOrEmpty(System.getProperty(JibSystemProperties.UPSTREAM_CLIENT))) { userAgentBuilder.append(" ").append(System.getProperty(JibSystemProperties.UPSTREAM_CLIENT)); } return userAgentBuilder.toString(); }
@Test public void testGetUserAgent_unset() throws CacheDirectoryCreationException { BuildContext buildContext = createBasicTestBuilder().build(); String generatedUserAgent = buildContext.makeUserAgent(); Assert.assertEquals("jib null jib", generatedUserAgent); }
@Udf(description = "Converts the number of days since 1970-01-01 00:00:00 UTC/GMT to a date " + "string using the given format pattern. The format pattern should be in the format" + " expected by java.time.format.DateTimeFormatter") public String dateToString( @UdfParameter( description = "The Epoch Day to convert," + " based on the epoch 1970-01-01") final int epochDays, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (formatPattern == null) { return null; } try { final DateTimeFormatter formatter = formatters.get(formatPattern); return LocalDate.ofEpochDay(epochDays).format(formatter); } catch (final ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to format date " + epochDays + " with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldThrowIfFormatInvalid() { // When: final Exception e = assertThrows( KsqlFunctionException.class, () -> udf.dateToString(44444, "invalid") ); // Then: assertThat(e.getMessage(), containsString("Failed to format date 44444 with formatter 'invalid'")); }
public void export(String resolverPath, String inputPath, File outputDir) throws IOException { List<DataSchema> dataSchemas = parseDataSchema(resolverPath, inputPath); for (DataSchema dataSchema : dataSchemas) { writeSnapshotFile(outputDir, ((NamedDataSchema) dataSchema).getFullName(), dataSchema); } }
@Test public void testExportSnapshot() throws Exception { String[] expectedFiles = new String[] { "BirthInfo.pdl", "FullName.pdl", "Date.pdl" }; String inputDir = pegasusDir + "com/linkedin/restli/tools/pegasusSchemaSnapshotTest"; PegasusSchemaSnapshotExporter exporter = new PegasusSchemaSnapshotExporter(); exporter.export(pegasusDir, inputDir, outDir); Assert.assertEquals(outDir.list().length, expectedFiles.length); for (String file : expectedFiles) { String actualFile = outDir + FS + file; String expectedFile = snapshotDir + FS + file; ExporterTestUtils.comparePegasusSchemaSnapshotFiles(actualFile, expectedFile); } }
public static <X extends Throwable> void isTrue(boolean expression, Supplier<? extends X> supplier) throws X { if (false == expression) { throw supplier.get(); } }
@Test public void isTrueTest3() { Assertions.assertThrows(IndexOutOfBoundsException.class, () -> { int i = -1; //noinspection ConstantConditions Assert.isTrue(i > 0, () -> new IndexOutOfBoundsException("relation message to return")); }); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testFetchDisconnected() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); assertEquals(1, sendFetches()); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0), true); consumerClient.poll(time.timer(0)); assertEmptyFetch("Should not return records or advance position on disconnect"); // disconnects should have no effect on subscription state assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertTrue(subscriptions.isFetchable(tp0)); assertEquals(0, subscriptions.position(tp0).offset); }
public static <T> Inner<T> create() { return new Inner<>(); }
@Test @Category(NeedsRunner.class) public void renameNestedFields() { Schema nestedSchema = Schema.builder().addStringField("field1").addInt32Field("field2").build(); Schema schema = Schema.builder().addStringField("field1").addRowField("nested", nestedSchema).build(); PCollection<Row> renamed = pipeline .apply( Create.of( Row.withSchema(schema) .addValues( "one", Row.withSchema(nestedSchema).addValues("one", 1).build()) .build(), Row.withSchema(schema) .addValues( "two", Row.withSchema(nestedSchema).addValues("two", 1).build()) .build()) .withRowSchema(schema)) .apply( RenameFields.<Row>create() .rename("nested.field1", "new1") .rename("nested.field2", "new2")); Schema expectedNestedSchema = Schema.builder().addStringField("new1").addInt32Field("new2").build(); Schema expectedSchema = Schema.builder() .addStringField("field1") .addRowField("nested", expectedNestedSchema) .build(); assertEquals(expectedSchema, renamed.getSchema()); List<Row> expectedRows = ImmutableList.of( Row.withSchema(expectedSchema) .addValues("one", Row.withSchema(expectedNestedSchema).addValues("one", 1).build()) .build(), Row.withSchema(expectedSchema) .addValues("two", Row.withSchema(expectedNestedSchema).addValues("two", 1).build()) .build()); PAssert.that(renamed).containsInAnyOrder(expectedRows); pipeline.run(); }
public static Row toBeamRow(GenericRecord record, Schema schema, ConversionOptions options) { List<Object> valuesInOrder = schema.getFields().stream() .map( field -> { try { org.apache.avro.Schema.Field avroField = record.getSchema().getField(field.getName()); Object value = avroField != null ? record.get(avroField.pos()) : null; return convertAvroFormat(field.getType(), value, options); } catch (Exception cause) { throw new IllegalArgumentException( "Error converting field " + field + ": " + cause.getMessage(), cause); } }) .collect(toList()); return Row.withSchema(schema).addValues(valuesInOrder).build(); }
@Test public void testToBeamRow_inlineArray() { Row beamRow = BigQueryUtils.toBeamRow(ARRAY_TYPE, BQ_INLINE_ARRAY_ROW); assertEquals(ARRAY_ROW, beamRow); }
public static long convertTimeUnitValueToSecond(long value, TimeUnit unit) { return TimeUnit.SECONDS.convert(value, unit); }
@Test public void testConvertTimeUnitValuetoSecond() { long dayRes = TimeUtils.convertTimeUnitValueToSecond(2, TimeUnit.DAYS); long hourRes = TimeUtils.convertTimeUnitValueToSecond(2, TimeUnit.HOURS); long minuteRes = TimeUtils.convertTimeUnitValueToSecond(2, TimeUnit.MINUTES); long secondRes = TimeUtils.convertTimeUnitValueToSecond(2, TimeUnit.SECONDS); long milRes = TimeUtils.convertTimeUnitValueToSecond(2, TimeUnit.MILLISECONDS); long micRes = TimeUtils.convertTimeUnitValueToSecond(2, TimeUnit.MICROSECONDS); long nanoRes = TimeUtils.convertTimeUnitValueToSecond(2, TimeUnit.NANOSECONDS); Assert.assertEquals(dayRes, 2 * 24 * 60 * 60); Assert.assertEquals(hourRes, 2 * 60 * 60); Assert.assertEquals(minuteRes, 2 * 60); Assert.assertEquals(secondRes, 2); Assert.assertEquals(milRes, 2 / 1000); Assert.assertEquals(micRes, 2 / 1000 / 1000); Assert.assertEquals(nanoRes, 2 / 1000 / 1000 / 1000); }
public static Boolean judge(final ConditionData conditionData, final String realData) { if (Objects.isNull(conditionData) || StringUtils.isBlank(conditionData.getOperator())) { return false; } PredicateJudge predicateJudge = newInstance(conditionData.getOperator()); if (!(predicateJudge instanceof BlankPredicateJudge) && StringUtils.isBlank(realData)) { return false; } return predicateJudge.judge(conditionData, realData); }
@Test public void testConditionDataIsNull() { assertFalse(PredicateJudgeFactory.judge(null, "testRealData")); }
public static Map<String, ResourceModel> buildResourceModels(final Set<Class<?>> restliAnnotatedClasses) { Map<String, ResourceModel> rootResourceModels = new HashMap<>(); Map<Class<?>, ResourceModel> resourceModels = new HashMap<>(); for (Class<?> annotatedClass : restliAnnotatedClasses) { processResourceInOrder(annotatedClass, resourceModels, rootResourceModels); } return rootResourceModels; }
@Test(dataProvider = "resourcesWithNoClashingNamesDataProvider") public void testResourceNameNoClash(Class<?>[] classes) { Set<Class<?>> resourceClasses = new HashSet<>(Arrays.asList(classes)); Map<String, ResourceModel> resourceModels = RestLiApiBuilder.buildResourceModels(resourceClasses); Assert.assertEquals(resourceModels.size(), classes.length, "The number of ResourceModels generated does not match the number of resource classes."); }
@Override public void configure(Map<String, ?> configs) { final SimpleConfig simpleConfig = new SimpleConfig(CONFIG_DEF, configs); final String field = simpleConfig.getString(FIELD_CONFIG); final String type = simpleConfig.getString(TARGET_TYPE_CONFIG); String formatPattern = simpleConfig.getString(FORMAT_CONFIG); final String unixPrecision = simpleConfig.getString(UNIX_PRECISION_CONFIG); schemaUpdateCache = new SynchronizedCache<>(new LRUCache<>(16)); replaceNullWithDefault = simpleConfig.getBoolean(REPLACE_NULL_WITH_DEFAULT_CONFIG); if (type.equals(TYPE_STRING) && Utils.isBlank(formatPattern)) { throw new ConfigException("TimestampConverter requires format option to be specified when using string timestamps"); } SimpleDateFormat format = null; if (!Utils.isBlank(formatPattern)) { try { format = new SimpleDateFormat(formatPattern); format.setTimeZone(UTC); } catch (IllegalArgumentException e) { throw new ConfigException("TimestampConverter requires a SimpleDateFormat-compatible pattern for string timestamps: " + formatPattern, e); } } config = new Config(field, type, format, unixPrecision); }
@Test public void testConfigInvalidFormat() { Map<String, String> config = new HashMap<>(); config.put(TimestampConverter.TARGET_TYPE_CONFIG, "string"); config.put(TimestampConverter.FORMAT_CONFIG, "bad-format"); assertThrows(ConfigException.class, () -> xformValue.configure(config)); }
@Override public CurrentStateInformation trigger(MigrationStep step, Map<String, Object> args) { context.setCurrentStep(step); if (Objects.nonNull(args) && !args.isEmpty()) { context.addActionArguments(step, args); } String errorMessage = null; try { stateMachine.fire(step); } catch (Exception e) { errorMessage = Objects.nonNull(e.getMessage()) ? e.getMessage() : e.toString(); } persistenceService.saveStateMachineContext(context); return new CurrentStateInformation(getState(), nextSteps(), errorMessage, context.getResponse()); }
@Test public void smPassesArgumentsToAction() { StateMachine<MigrationState, MigrationStep> stateMachine = testStateMachineWithAction((context) -> { assertThat(context.getActionArgument("arg1", String.class)).isEqualTo("v1"); assertThat(context.getActionArgument("arg2", Integer.class)).isEqualTo(2); }); migrationStateMachine = new MigrationStateMachineImpl(stateMachine, persistenceService, context); CurrentStateInformation context = migrationStateMachine.trigger(MIGRATION_STEP, Map.of( "arg1", "v1", "arg2", 2 )); assertThat(context.hasErrors()).isFalse(); }
@Override public void doRun() { if (isServerInPreflightMode.get()) { // we don't want to automatically trigger CSRs during preflight, don't run it if the preflight is still not finished or skipped LOG.debug("Datanode still in preflight mode, skipping cert renewal task"); return; } // always check if there are any certificates that we can accept getRenewalPolicy() .filter(this::needsNewCertificate) .ifPresent(renewalPolicy -> { switch (renewalPolicy.mode()) { case AUTOMATIC -> automaticRenewal(); case MANUAL -> manualRenewal(); } }); }
@Test void testAlreadyExpired() throws Exception { final DatanodeKeystore datanodeKeystore = datanodeKeystore(Duration.ofNanos(1)); final CsrRequester csrRequester = Mockito.mock(CsrRequester.class); final DataNodeCertRenewalPeriodical periodical = new DataNodeCertRenewalPeriodical( datanodeKeystore, autoRenewalPolicy("PT1M"), csrRequester, () -> false ); periodical.doRun(); Mockito.verify(csrRequester, Mockito.times(1)).triggerCertificateSigningRequest(); }
@SuppressWarnings("unchecked") public <T extends Expression> T rewrite(final T expression, final C context) { return (T) rewriter.process(expression, context); }
@Test public void shouldRewriteArithmeticUnary() { // Given: final ArithmeticUnaryExpression parsed = parseExpression("-(1)"); when(processor.apply(parsed.getValue(), context)).thenReturn(expr1); // When: final Expression rewritten = expressionRewriter.rewrite(parsed, context); // Then: assertThat( rewritten, equalTo(new ArithmeticUnaryExpression(parsed.getLocation(), parsed.getSign(), expr1)) ); }
private PlantUmlDiagram createDiagram(List<String> rawDiagramLines) { List<String> diagramLines = filterOutComments(rawDiagramLines); Set<PlantUmlComponent> components = parseComponents(diagramLines); PlantUmlComponents plantUmlComponents = new PlantUmlComponents(components); List<ParsedDependency> dependencies = parseDependencies(plantUmlComponents, diagramLines); return new PlantUmlDiagram.Builder(plantUmlComponents) .withDependencies(dependencies) .build(); }
@Test public void ignores_components_that_are_not_yet_defined() { File file = TestDiagram.in(temporaryFolder) .dependencyFrom("[NotYetDefined]").to("[AlsoNotYetDefined]") .write(); PlantUmlDiagram diagram = createDiagram(file); assertThat(diagram.getComponentsWithAlias()).isEmpty(); }
@Override public long putIfAbsent(long key, long value) { assert value != nullValue : "putIfAbsent() called with null-sentinel value " + nullValue; SlotAssignmentResult slot = hsa.ensure(key); if (slot.isNew()) { mem.putLong(slot.address(), value); return nullValue; } else { return mem.getLong(slot.address()); } }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void test_putIfAbsent_invalidValue() { map.putIfAbsent(newKey(), MISSING_VALUE); }
public List<Long> list(ListAllPOptions options) { try (JobMasterAuditContext auditContext = createAuditContext("list")) { List<Long> ids = new ArrayList<>(); ids.addAll(mPlanTracker.findJobs(options.getName(), options.getStatusList().stream() .map(status -> Status.valueOf(status.name())) .collect(Collectors.toList()))); ids.addAll(mWorkflowTracker.findJobs(options.getName(), options.getStatusList().stream() .map(status -> Status.valueOf(status.name())) .collect(Collectors.toList()))); Collections.sort(ids); auditContext.setSucceeded(true); return ids; } }
@Test public void list() throws Exception { try (MockedStatic<PlanCoordinator> mockStaticPlanCoordinator = mockPlanCoordinator()) { TestPlanConfig jobConfig = new TestPlanConfig("/test"); List<Long> jobIdList = new ArrayList<>(); for (long i = 0; i < TEST_JOB_MASTER_JOB_CAPACITY; i++) { jobIdList.add(mJobMaster.run(jobConfig)); } final List<Long> list = mJobMaster.list(ListAllPOptions.getDefaultInstance()); Assert.assertEquals(jobIdList, list); Assert.assertEquals(TEST_JOB_MASTER_JOB_CAPACITY, mJobMaster.list(ListAllPOptions.getDefaultInstance()).size()); } }
@Override public T getMetaForStep( Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { // Note - was a synchronized static method, but as no static variables are manipulated, this is entirely unnecessary // baseStepMeta.getParentStepMeta() or getParantTransMeta() is only null when running unit tests metaFileCache = baseStepMeta.getParentStepMeta() == null || baseStepMeta.getParentStepMeta().getParentTransMeta() == null ? null : baseStepMeta.getParentStepMeta().getParentTransMeta().getMetaFileCache(); T theMeta = null; CurrentDirectoryResolver r = new CurrentDirectoryResolver(); VariableSpace tmpSpace; if ( isTransMeta() ) { // send restricted parentVariables with several important options // Otherwise we destroy child variables and the option "Inherit all variables from the transformation" is enabled // always. tmpSpace = r.resolveCurrentDirectory( specificationMethod, getVarSpaceOnlyWithRequiredParentVars( space ), rep, baseStepMeta.getParentStepMeta(), filename ); } else { tmpSpace = r.resolveCurrentDirectory( specificationMethod, space, rep, baseStepMeta.getParentStepMeta(), filename ); } final String[] idContainer = new String[ 1 ]; //unigue portion of cache key passed though argument switch ( specificationMethod ) { case FILENAME: String realFilename = tmpSpace.environmentSubstitute( filename ); if ( isTransMeta() && space != null ) { // This is a parent transformation and parent variable should work here. A child file name can be resolved // via parent space. realFilename = space.environmentSubstitute( realFilename ); } theMeta = attemptCacheRead( realFilename ); //try to get from the cache first if ( theMeta == null ) { try { // OK, load the meta-data from file... // Don't set internal variables: they belong to the parent thread! if ( rep != null ) { theMeta = getMetaFromRepository2( realFilename, rep, r, idContainer ); } if ( theMeta == null ) { theMeta = attemptLoadMeta( realFilename, rep, metaStore, null, tmpSpace, idContainer ); LogChannel.GENERAL.logDetailed( "Loading " + friendlyMetaType + " from repository", friendlyMetaType + " was loaded from XML file [" + realFilename + "]" ); } } catch ( Exception e ) { if ( isTransMeta() ) { throw new KettleException( BaseMessages.getString( persistentClass, "StepWithMappingMeta.Exception.UnableToLoadTrans" ), e ); } else { throw new KettleException( BaseMessages.getString( persistentClass, "JobExecutorMeta.Exception.UnableToLoadJob" ), e ); } } } break; case REPOSITORY_BY_NAME: String realMetaName = tmpSpace.environmentSubstitute( Const.NVL( metaName, "" ) ); String realDirectory = tmpSpace.environmentSubstitute( Const.NVL( directory, "" ) ); if ( isTransMeta() && space != null ) { // This is a parent transformation and parent variable should work here. A child file name can be // resolved via // parent space. realMetaName = space.environmentSubstitute( realMetaName ); realDirectory = space.environmentSubstitute( realDirectory ); } if ( Utils.isEmpty( realDirectory ) && !Utils.isEmpty( realMetaName ) ) { int index = realMetaName.lastIndexOf( '/' ); String transPath = realMetaName; realMetaName = realMetaName.substring( index + 1 ); realDirectory = transPath.substring( 0, index ); } //We will use this key in cache no matter what the final successful path is so that we don't need to hit the // repo the next time it comes in. (ie: rep.findDirectory ) String cacheKey = realDirectory + "/" + realMetaName; theMeta = attemptCacheRead( cacheKey ); //try to get from the cache first if ( theMeta == null ) { if ( rep != null ) { if ( !Utils.isEmpty( realMetaName ) && !Utils.isEmpty( realDirectory ) ) { realDirectory = r.normalizeSlashes( realDirectory ); RepositoryDirectoryInterface repdir = rep.findDirectory( realDirectory ); if ( repdir != null ) { try { // reads the last revision in the repository... theMeta = isTransMeta() ? (T) rep.loadTransformation( realMetaName, repdir, null, true, null ) : (T) rep.loadJob( realMetaName, repdir, null, null ); if ( theMeta != null ) { idContainer[ 0 ] = cacheKey; } LogChannel.GENERAL.logDetailed( "Loading " + friendlyMetaType + " from repository", "Executor " + friendlyMetaType + " [" + realMetaName + "] was loaded from the repository" ); } catch ( Exception e ) { throw new KettleException( "Unable to load " + friendlyMetaType + " [" + realMetaName + "]", e ); } } } } else { // rep is null, let's try loading by filename try { theMeta = attemptLoadMeta( cacheKey, rep, metaStore, null, tmpSpace, idContainer ); } catch ( KettleException ke ) { try { // add .ktr extension and try again String extension = isTransMeta() ? Const.STRING_TRANS_DEFAULT_EXT : Const.STRING_JOB_DEFAULT_EXT; theMeta = attemptLoadMeta( cacheKey + "." + extension, rep, metaStore, null, tmpSpace, idContainer ); if ( idContainer[ 0 ] != null ) { //It successfully read in the meta but we don't want to cache it with the extension so we override // it here idContainer[ 0 ] = cacheKey; } } catch ( KettleException ke2 ) { if ( isTransMeta() ) { throw new KettleException( BaseMessages.getString( persistentClass, "StepWithMappingMeta.Exception.UnableToLoadTrans", realMetaName ) + realDirectory ); } else { throw new KettleException( BaseMessages.getString( persistentClass, "JobExecutorMeta.Exception.UnableToLoadJob", realMetaName ) + realDirectory ); } } } } } break; case REPOSITORY_BY_REFERENCE: // Read the last revision by reference... theMeta = attemptCacheRead( metaObjectId.toString() ); if ( theMeta == null ) { theMeta = isTransMeta() ? (T) rep.loadTransformation( metaObjectId, null ) : (T) rep.loadJob( metaObjectId, null ); if ( theMeta != null ) { idContainer[ 0 ] = metaObjectId.toString(); //Only set when not found in cache } } break; default: break; } //If theMeta is present and idContainer[0] != null, ( meaning it read it from repo/file ), then cache it cacheMeta( idContainer[ 0 ], theMeta ); return theMeta; }
@Test //A Transformation getting the jobMeta from the filesystem public void getMetaForStepAsJobFromFileSystemTest() throws Exception { setupJobExecutorMeta(); specificationMethod = ObjectLocationSpecificationMethod.FILENAME; MetaFileLoaderImpl metaFileLoader = new MetaFileLoaderImpl<JobMeta>( baseStepMeta, specificationMethod ); JobMeta jobMeta = (JobMeta) metaFileLoader.getMetaForStep( repository, store, space ); validateFirstJobMetaAccess( jobMeta ); jobMeta = (JobMeta) metaFileLoader.getMetaForStep( repository, store, space ); validateSecondJobMetaAccess( jobMeta ); }
@JsonCreator public static ModelId of(String id) { Preconditions.checkArgument(StringUtils.isNotBlank(id), "ID must not be blank"); return new AutoValue_ModelId(id); }
@Test public void deserialize() { final ModelId modelId = ModelId.of("foobar"); final JsonNode jsonNode = objectMapper.convertValue(modelId, JsonNode.class); assertThat(jsonNode.isTextual()).isTrue(); assertThat(jsonNode.asText()).isEqualTo("foobar"); }
@Override public Map<String, String> loadNamespaceMetadata(Namespace namespace) throws NoSuchNamespaceException { String databaseName = IcebergToGlueConverter.toDatabaseName( namespace, awsProperties.glueCatalogSkipNameValidation()); try { Database database = glue.getDatabase( GetDatabaseRequest.builder() .catalogId(awsProperties.glueCatalogId()) .name(databaseName) .build()) .database(); Map<String, String> result = Maps.newHashMap(database.parameters()); if (database.locationUri() != null) { result.put( IcebergToGlueConverter.GLUE_DB_LOCATION_KEY, LocationUtil.stripTrailingSlash(database.locationUri())); } if (database.description() != null) { result.put(IcebergToGlueConverter.GLUE_DESCRIPTION_KEY, database.description()); } LOG.debug("Loaded metadata for namespace {} found {}", namespace, result); return result; } catch (InvalidInputException e) { throw new NoSuchNamespaceException( "invalid input for namespace %s, error message: %s", namespace, e.getMessage()); } catch (EntityNotFoundException e) { throw new NoSuchNamespaceException( "fail to find Glue database for namespace %s, error message: %s", databaseName, e.getMessage()); } }
@Test public void testLoadNamespaceMetadata() { Map<String, String> parameters = Maps.newHashMap(); parameters.put("key", "val"); parameters.put(IcebergToGlueConverter.GLUE_DB_LOCATION_KEY, "s3://bucket2/db"); Mockito.doReturn( GetDatabaseResponse.builder() .database( Database.builder() .name("db1") .parameters(parameters) .locationUri("s3://bucket2/db/") .build()) .build()) .when(glue) .getDatabase(Mockito.any(GetDatabaseRequest.class)); assertThat(glueCatalog.loadNamespaceMetadata(Namespace.of("db1"))).isEqualTo(parameters); }
public HollowHashIndexResult findMatches(Object... query) { if (hashStateVolatile == null) { throw new IllegalStateException(this + " wasn't initialized"); } int hashCode = 0; for(int i=0;i<query.length;i++) { if(query[i] == null) throw new IllegalArgumentException("querying by null unsupported; i=" + i); hashCode ^= HashCodes.hashInt(keyHashCode(query[i], i)); } HollowHashIndexResult result; HollowHashIndexState hashState; do { result = null; hashState = hashStateVolatile; long bucket = hashCode & hashState.getMatchHashMask(); long hashBucketBit = bucket * hashState.getBitsPerMatchHashEntry(); boolean bucketIsEmpty = hashState.getMatchHashTable().getElementValue(hashBucketBit, hashState.getBitsPerTraverserField()[0]) == 0; while (!bucketIsEmpty) { if (matchIsEqual(hashState.getMatchHashTable(), hashBucketBit, query)) { int selectSize = (int) hashState.getMatchHashTable().getElementValue(hashBucketBit + hashState.getBitsPerMatchHashKey(), hashState.getBitsPerSelectTableSize()); long selectBucketPointer = hashState.getMatchHashTable().getElementValue(hashBucketBit + hashState.getBitsPerMatchHashKey() + hashState.getBitsPerSelectTableSize(), hashState.getBitsPerSelectTablePointer()); result = new HollowHashIndexResult(hashState, selectBucketPointer, selectSize); break; } bucket = (bucket + 1) & hashState.getMatchHashMask(); hashBucketBit = bucket * hashState.getBitsPerMatchHashEntry(); bucketIsEmpty = hashState.getMatchHashTable().getElementValue(hashBucketBit, hashState.getBitsPerTraverserField()[0]) == 0; } } while (hashState != hashStateVolatile); return result; }
@Test public void testIndexingDoubleTypeFieldWithNullValues() throws Exception { mapper.add(new TypeDouble(null)); mapper.add(new TypeDouble(-8.0)); roundTripSnapshot(); HollowHashIndex index = new HollowHashIndex(readStateEngine, "TypeDouble", "", "data.value"); Assert.assertNull(index.findMatches(2.0)); assertIteratorContainsAll(index.findMatches(-8.0).iterator(), 1); }
@Override public OAuth2CodeDO consumeAuthorizationCode(String code) { OAuth2CodeDO codeDO = oauth2CodeMapper.selectByCode(code); if (codeDO == null) { throw exception(OAUTH2_CODE_NOT_EXISTS); } if (DateUtils.isExpired(codeDO.getExpiresTime())) { throw exception(OAUTH2_CODE_EXPIRE); } oauth2CodeMapper.deleteById(codeDO.getId()); return codeDO; }
@Test public void testConsumeAuthorizationCode_null() { // 调用,并断言 assertServiceException(() -> oauth2CodeService.consumeAuthorizationCode(randomString()), OAUTH2_CODE_NOT_EXISTS); }
@EventListener public void handleRedisKeyExpiredEvent(RedisKeyExpiredEvent<EidSession> event) { if (event.getValue() instanceof EidSession) { EidSession session = (EidSession) event.getValue(); confirmService.sendError(session.getReturnUrl(), session.getConfirmId(), session.getConfirmSecret(), "eid_timeout"); } }
@Test public void testHandleRedisKeyExpiredEvent() { EidSession session = new EidSession(); session.setReturnUrl("http://localhost"); session.setId("id"); session.setConfirmId("app_session_id"); session.setConfirmSecret("secret"); Mockito.when(event.getValue()).thenReturn(session); timeoutService.handleRedisKeyExpiredEvent(event); Mockito.verify(confirmService).sendError(session.getReturnUrl(), "app_session_id", "secret", "eid_timeout"); }
@Nullable @Override public RoaringBitmap getNullBitmap(ValueBlock valueBlock) { int length = valueBlock.getNumDocs(); RoaringBitmap result = new RoaringBitmap(); RoaringBitmap mainFunctionNullBitmap = _mainFunction.getNullBitmap(valueBlock); if (mainFunctionNullBitmap != null) { result.or(mainFunctionNullBitmap); if (result.getCardinality() == length) { return result; } } int[] intValuesSV = transformToIntValuesSV(valueBlock); if (_valueSet == null) { RoaringBitmap valueFunctionsContainNull = new RoaringBitmap(); RoaringBitmap[] valueFunctionNullBitmaps = new RoaringBitmap[_valueFunctions.length]; for (int i = 0; i < _valueFunctions.length; i++) { valueFunctionNullBitmaps[i] = _valueFunctions[i].getNullBitmap(valueBlock); } for (int i = 0; i < length; i++) { for (int j = 0; j < _valueFunctions.length; j++) { if (valueFunctionNullBitmaps[j] != null && valueFunctionNullBitmaps[j].contains(i)) { valueFunctionsContainNull.add(i); break; } } } for (int i = 0; i < length; i++) { if (mainFunctionNotContainedInValues(intValuesSV[i]) && valueFunctionsContainNull.contains(i)) { result.add(i); } } } else { for (int i = 0; i < length; i++) { if (mainFunctionNotContainedInValues(intValuesSV[i]) && _valueSetContainsNull) { result.add(i); } } } return result; }
@Test public void testInTransformFunctionIdentifierNotInAllLiteralValuesThatContainNullReturnsNull() { String expressionStr = String.format("%s IN (%s, %s, NULL)", INT_SV_COLUMN, _intSVValues[0] + 1, _intSVValues[0] + 2); ExpressionContext expression = RequestContextUtils.getExpression(expressionStr); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction.getNullBitmap(_projectionBlock).contains(0)); }
@VisibleForTesting public void validateNoticeExists(Long id) { if (id == null) { return; } NoticeDO notice = noticeMapper.selectById(id); if (notice == null) { throw exception(NOTICE_NOT_FOUND); } }
@Test public void testValidateNoticeExists_success() { // 插入前置数据 NoticeDO dbNotice = randomPojo(NoticeDO.class); noticeMapper.insert(dbNotice); // 成功调用 noticeService.validateNoticeExists(dbNotice.getId()); }
@Override public MailLogDO getMailLog(Long id) { return mailLogMapper.selectById(id); }
@Test public void testGetMailLog() { // mock 数据 MailLogDO dbMailLog = randomPojo(MailLogDO.class, o -> o.setTemplateParams(randomTemplateParams())); mailLogMapper.insert(dbMailLog); // 准备参数 Long id = dbMailLog.getId(); // 调用 MailLogDO mailLog = mailLogService.getMailLog(id); // 断言 assertPojoEquals(dbMailLog, mailLog); }
public static void environmentInit() throws KettleException { // Workaround for a Mac OS/X Leopard issue where getContextClassLoader() is returning // null when run from the eclipse IDE // http://lists.apple.com/archives/java-dev/2007/Nov/msg00385.html - DM // Moving this hack to the first place where the NPE is triggered so all entrypoints can be debugged in Mac Eclipse if ( Thread.currentThread().getContextClassLoader() == null ) { Thread.currentThread().setContextClassLoader( ClassLoader.getSystemClassLoader() ); } Map<Object, Object> kettleProperties = EnvUtil.readProperties( Const.KETTLE_PROPERTIES ); insertDefaultValues( kettleProperties ); applyKettleProperties( kettleProperties ); // Also put some default values for obscure environment variables in there... // Place-holders if you will. // System.getProperties().put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, "1" ); System.getProperties().put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER, "0" ); System.getProperties().put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NAME, "slave-trans-name" ); System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_COPYNR, "0" ); System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_NAME, "step-name" ); System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_PARTITION_ID, "partition-id" ); System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_PARTITION_NR, "0" ); System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_UNIQUE_COUNT, "1" ); System.getProperties().put( Const.INTERNAL_VARIABLE_STEP_UNIQUE_NUMBER, "0" ); }
@Test public void vfsUserDirIsRoot_IsPublishedOnInitialisation() throws Exception { EnvUtil.environmentInit(); //See PDI-14522, PDI-14821 // don't check the exact value, because the initialisation depends on local settings // instead, simply check the value exists assertNotNull( Variables.getADefaultVariableSpace().getVariable( Const.VFS_USER_DIR_IS_ROOT ) ); assertNotNull( System.getProperty( Const.VFS_USER_DIR_IS_ROOT ) ); }
static ControllerResult<Void> recordsForNonEmptyLog( Consumer<String> activationMessageConsumer, long transactionStartOffset, boolean zkMigrationEnabled, FeatureControlManager featureControl, MetadataVersion metadataVersion ) { StringBuilder logMessageBuilder = new StringBuilder("Performing controller activation. "); // Logs have been replayed. We need to initialize some things here if upgrading from older KRaft versions List<ApiMessageAndVersion> records = new ArrayList<>(); // Check for in-flight transaction if (transactionStartOffset != -1L) { if (!metadataVersion.isMetadataTransactionSupported()) { throw new RuntimeException("Detected in-progress transaction at offset " + transactionStartOffset + ", but the metadata.version " + metadataVersion + " does not support transactions. Cannot continue."); } else { logMessageBuilder .append("Aborting in-progress metadata transaction at offset ") .append(transactionStartOffset) .append(". "); records.add(new ApiMessageAndVersion( new AbortTransactionRecord().setReason("Controller failover"), (short) 0)); } } if (metadataVersion.equals(MetadataVersion.MINIMUM_KRAFT_VERSION)) { logMessageBuilder.append("No metadata.version feature level record was found in the log. ") .append("Treating the log as version ") .append(MetadataVersion.MINIMUM_KRAFT_VERSION) .append(". "); } if (zkMigrationEnabled && !metadataVersion.isMigrationSupported()) { throw new RuntimeException("Should not have ZK migrations enabled on a cluster running " + "metadata.version " + featureControl.metadataVersion()); } else if (metadataVersion.isMigrationSupported()) { logMessageBuilder .append("Loaded ZK migration state of ") .append(featureControl.zkMigrationState()) .append(". "); switch (featureControl.zkMigrationState()) { case NONE: // Since this is the default state there may or may not be an actual NONE in the log. Regardless, // it will eventually be persisted in a snapshot, so we don't need to explicitly write it here. if (zkMigrationEnabled) { throw new RuntimeException("Should not have ZK migrations enabled on a cluster that was " + "created in KRaft mode."); } logMessageBuilder.append("This is expected because this is a de-novo KRaft cluster."); break; case PRE_MIGRATION: if (!metadataVersion.isMetadataTransactionSupported()) { logMessageBuilder .append("Activating pre-migration controller without empty log. ") .append("There may be a partial migration. "); } break; case MIGRATION: if (!zkMigrationEnabled) { // This can happen if controller leadership transfers to a controller with migrations enabled // after another controller had finalized the migration. For example, during a rolling restart // of the controller quorum during which the migration config is being set to false. logMessageBuilder .append("Completing the ZK migration since this controller was configured with ") .append("'zookeeper.metadata.migration.enable' set to 'false'. "); records.add(ZkMigrationState.POST_MIGRATION.toRecord()); } else { // This log message is used in zookeeper_migration_test.py logMessageBuilder .append("Staying in ZK migration mode since 'zookeeper.metadata.migration.enable' ") .append("is still 'true'. "); } break; case POST_MIGRATION: if (zkMigrationEnabled) { logMessageBuilder .append("Ignoring 'zookeeper.metadata.migration.enable' value of 'true' since ") .append("the ZK migration has been completed. "); } break; default: throw new IllegalStateException("Unsupported ZkMigrationState " + featureControl.zkMigrationState()); } } activationMessageConsumer.accept(logMessageBuilder.toString().trim()); return ControllerResult.atomicOf(records, null); }
@Test public void testActivationMessageForNonEmptyLogNoMigrations() { ControllerResult<Void> result; result = ActivationRecordsGenerator.recordsForNonEmptyLog( logMsg -> assertEquals("Performing controller activation. No metadata.version feature level " + "record was found in the log. Treating the log as version 3.0-IV1.", logMsg), -1L, false, buildFeatureControl(MetadataVersion.MINIMUM_KRAFT_VERSION, Optional.empty()), MetadataVersion.MINIMUM_KRAFT_VERSION ); assertTrue(result.isAtomic()); assertEquals(0, result.records().size()); result = ActivationRecordsGenerator.recordsForNonEmptyLog( logMsg -> assertEquals("Performing controller activation.", logMsg), -1L, false, buildFeatureControl(MetadataVersion.IBP_3_3_IV0, Optional.empty()), MetadataVersion.IBP_3_3_IV0 ); assertTrue(result.isAtomic()); assertEquals(0, result.records().size()); result = ActivationRecordsGenerator.recordsForNonEmptyLog( logMsg -> assertEquals("Performing controller activation. Loaded ZK migration state of NONE. " + "This is expected because this is a de-novo KRaft cluster.", logMsg), -1L, false, buildFeatureControl(MetadataVersion.IBP_3_4_IV0, Optional.empty()), MetadataVersion.IBP_3_4_IV0 ); assertTrue(result.isAtomic()); assertEquals(0, result.records().size()); result = ActivationRecordsGenerator.recordsForNonEmptyLog( logMsg -> assertEquals("Performing controller activation. Aborting in-progress metadata " + "transaction at offset 42. Loaded ZK migration state of NONE. " + "This is expected because this is a de-novo KRaft cluster.", logMsg), 42L, false, buildFeatureControl(MetadataVersion.IBP_3_6_IV1, Optional.empty()), MetadataVersion.IBP_3_6_IV1 ); assertTrue(result.isAtomic()); assertEquals(1, result.records().size()); assertEquals( "Detected in-progress transaction at offset 42, but the metadata.version 3.6-IV0 does not support " + "transactions. Cannot continue.", assertThrows(RuntimeException.class, () -> ActivationRecordsGenerator.recordsForNonEmptyLog( logMsg -> fail(), 42L, false, buildFeatureControl(MetadataVersion.IBP_3_6_IV0, Optional.empty()), MetadataVersion.IBP_3_6_IV0 )).getMessage() ); }
@CanIgnoreReturnValue public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) { checkNotNull(expectedMultimap, "expectedMultimap"); checkNotNull(actual); ListMultimap<?, ?> missing = difference(expectedMultimap, actual); ListMultimap<?, ?> extra = difference(actual, expectedMultimap); // TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in // the subject but not enough times. Similarly for unexpected extra items. if (!missing.isEmpty()) { if (!extra.isEmpty()) { boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries()); // Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be // grouped by key in the 'missing' and 'unexpected items' parts of the message (we still // show the actual and expected multimaps in the standard format). String missingDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(missing)); String extraDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(extra)); failWithActual( fact("missing", missingDisplay), fact("unexpected", extraDisplay), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } else { failWithActual( fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } } else if (!extra.isEmpty()) { failWithActual( fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap); }
@Test public void containsExactlyRespectsDuplicatesFailure() { ImmutableListMultimap<Integer, String> actual = ImmutableListMultimap.of(3, "one", 3, "two", 3, "one", 4, "five", 4, "five"); ImmutableSetMultimap<Integer, String> expected = ImmutableSetMultimap.copyOf(actual); expectFailureWhenTestingThat(actual).containsExactlyEntriesIn(expected); assertFailureKeys("unexpected", "---", "expected", "but was"); assertFailureValue("unexpected", "{3=[one], 4=[five]}"); assertFailureValue("expected", "{3=[one, two], 4=[five]}"); assertFailureValue("but was", "{3=[one, two, one], 4=[five, five]}"); }
public void ensureCapacity(@NonNegative long maximumSize) { requireArgument(maximumSize >= 0); int maximum = (int) Math.min(maximumSize, Integer.MAX_VALUE >>> 1); if ((table != null) && (table.length >= maximum)) { return; } table = new long[Math.max(Caffeine.ceilingPowerOfTwo(maximum), 8)]; sampleSize = (maximumSize == 0) ? 10 : (10 * maximum); blockMask = (table.length >>> 3) - 1; if (sampleSize <= 0) { sampleSize = Integer.MAX_VALUE; } size = 0; }
@Test(dataProvider = "sketch", groups = "isolated") public void ensureCapacity_maximum(FrequencySketch<Integer> sketch) { int size = Integer.MAX_VALUE / 10 + 1; sketch.ensureCapacity(size); assertThat(sketch.sampleSize).isEqualTo(Integer.MAX_VALUE); assertThat(sketch.table).hasLength(Caffeine.ceilingPowerOfTwo(size)); assertThat(sketch.blockMask).isEqualTo((sketch.table.length >> 3) - 1); }
@Override public void asyncRequest(Request request, final RequestCallBack requestCallBack) throws NacosException { Payload grpcRequest = GrpcUtils.convert(request); ListenableFuture<Payload> requestFuture = grpcFutureServiceStub.request(grpcRequest); //set callback . Futures.addCallback(requestFuture, new FutureCallback<Payload>() { @Override public void onSuccess(@Nullable Payload grpcResponse) { Response response = (Response) GrpcUtils.parse(grpcResponse); if (response != null) { if (response instanceof ErrorResponse) { requestCallBack.onException(new NacosException(response.getErrorCode(), response.getMessage())); } else { requestCallBack.onResponse(response); } } else { requestCallBack.onException(new NacosException(ResponseCode.FAIL.getCode(), "response is null")); } } @Override public void onFailure(Throwable throwable) { if (throwable instanceof CancellationException) { requestCallBack.onException( new TimeoutException("Timeout after " + requestCallBack.getTimeout() + " milliseconds.")); } else { requestCallBack.onException(throwable); } } }, requestCallBack.getExecutor() != null ? requestCallBack.getExecutor() : this.executor); // set timeout future. ListenableFuture<Payload> payloadListenableFuture = Futures.withTimeout(requestFuture, requestCallBack.getTimeout(), TimeUnit.MILLISECONDS, RpcScheduledExecutor.TIMEOUT_SCHEDULER); }
@Test void testAsyncRequestWithCancelException() throws NacosException, ExecutionException, InterruptedException { when(future.get()).thenThrow(new CancellationException("test")); doAnswer(invocationOnMock -> { ((Runnable) invocationOnMock.getArgument(0)).run(); return null; }).when(future).addListener(any(Runnable.class), eq(executor)); RequestCallBack requestCallBack = mock(RequestCallBack.class); connection.asyncRequest(new HealthCheckRequest(), requestCallBack); verify(requestCallBack).onException(any(TimeoutException.class)); }
public W getStateWindow(W window) { return mapping.get(window); }
@Test void testRestoreFromState() throws Exception { @SuppressWarnings("unchecked") ListState<Tuple2<TimeWindow, TimeWindow>> mockState = mock(ListState.class); when(mockState.get()) .thenReturn( Lists.newArrayList( new Tuple2<>(new TimeWindow(17, 42), new TimeWindow(42, 17)), new Tuple2<>(new TimeWindow(1, 2), new TimeWindow(3, 4)))); MergingWindowSet<TimeWindow> windowSet = new MergingWindowSet<>( EventTimeSessionWindows.withGap(Time.milliseconds(3)), mockState); assertThat(windowSet.getStateWindow(new TimeWindow(17, 42))) .isEqualTo(new TimeWindow(42, 17)); assertThat(windowSet.getStateWindow(new TimeWindow(1, 2))).isEqualTo(new TimeWindow(3, 4)); }
@Override public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) { if (!joinKey.isForeignKey()) { ensureMatchingPartitionCounts(buildContext.getServiceContext().getTopicClient()); } final JoinerFactory joinerFactory = new JoinerFactory( buildContext, this, buildContext.buildNodeContext(getId().toString())); return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join(); }
@Test public void shouldPerformStreamToStreamRightJoinWithGracePeriod() { // Given: setupStream(left, leftSchemaKStream); setupStream(right, rightSchemaKStream); final JoinNode joinNode = new JoinNode(nodeId, RIGHT, joinKey, true, left, right, WITHIN_EXPRESSION_WITH_GRACE, "KAFKA"); // When: joinNode.buildStream(planBuildContext); // Then: verify(leftSchemaKStream).rightJoin( rightSchemaKStream, SYNTH_KEY, WITHIN_EXPRESSION_WITH_GRACE.get(), VALUE_FORMAT.getFormatInfo(), OTHER_FORMAT.getFormatInfo(), CONTEXT_STACKER ); }
public static List<UpdateRequirement> forReplaceView( ViewMetadata base, List<MetadataUpdate> metadataUpdates) { Preconditions.checkArgument(null != base, "Invalid view metadata: null"); Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null"); Builder builder = new Builder(null, false); builder.require(new UpdateRequirement.AssertViewUUID(base.uuid())); metadataUpdates.forEach(builder::update); return builder.build(); }
@Test public void assignUUIDToView() { List<UpdateRequirement> requirements = UpdateRequirements.forReplaceView( viewMetadata, ImmutableList.of( new MetadataUpdate.AssignUUID(viewMetadata.uuid()), new MetadataUpdate.AssignUUID(UUID.randomUUID().toString()), new MetadataUpdate.AssignUUID(UUID.randomUUID().toString()))); requirements.forEach(req -> req.validate(viewMetadata)); assertThat(requirements) .hasSize(1) .hasOnlyElementsOfType(UpdateRequirement.AssertViewUUID.class); assertViewUUID(requirements); }
@Override public boolean isCooperative() { return doWithClassLoader(context.classLoader(), () -> processor.isCooperative()); }
@Test public void when_isCooperative_then_true() { assertTrue(createTasklet().isCooperative()); }
@Override public void reportTransaction(GlobalTransaction tx, GlobalStatus globalStatus) throws TransactionalExecutor.ExecutionException { try { tx.globalReport(globalStatus); triggerAfterCompletion(tx); } catch (TransactionException txe) { throw new TransactionalExecutor.ExecutionException(tx, txe, TransactionalExecutor.Code.ReportFailure); } }
@Test public void testReportTransaction() { MockGlobalTransaction mockGlobalTransaction = new MockGlobalTransaction(); GlobalStatus globalStatus = GlobalStatus.Committed; Assertions.assertDoesNotThrow(() -> sagaTransactionalTemplate.reportTransaction(mockGlobalTransaction, globalStatus)); }
public void addColumn(String columnString) { TemplateColumn column = new DefaultTemplateColumn(templateContainer, columnString); this.columns.add(column); }
@Test public void testAddColumn() { RuleTemplate rt = new RuleTemplate("rt1", getTemplateContainer()); rt.addColumn("StandardColumn"); rt.addColumn("!NotColumn"); rt.addColumn("ColumnCondition == \"test\""); rt.addColumn("!NotColumnCondition == \"test2\""); rt.addColumn("ArrayColumnCondition[0] == \"test2\""); List<TemplateColumn> columns = rt.getColumns(); assertThat(columns.size()).isEqualTo(5); TemplateColumn column1 = columns.get(0); assertThat(column1.getName()).isEqualTo("StandardColumn"); assertThat(column1.isNotCondition()).isFalse(); assertThat(StringUtils.isEmpty(column1.getCondition())).isTrue(); TemplateColumn column2 = columns.get(1); assertThat(column2.getName()).isEqualTo("NotColumn"); assertThat(column2.isNotCondition()).isTrue(); assertThat(StringUtils.isEmpty(column2.getCondition())).isTrue(); TemplateColumn column3 = columns.get(2); assertThat(column3.getName()).isEqualTo("ColumnCondition"); assertThat(column3.isNotCondition()).isFalse(); assertThat(column3.getCondition()).isEqualTo("== \"test\""); TemplateColumn column4 = columns.get(3); assertThat(column4.getName()).isEqualTo("NotColumnCondition"); assertThat(column4.isNotCondition()).isTrue(); assertThat(column4.getCondition()).isEqualTo("== \"test2\""); }
@Override public AlterPartitionReassignmentsResult alterPartitionReassignments( Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments, AlterPartitionReassignmentsOptions options) { final Map<TopicPartition, KafkaFutureImpl<Void>> futures = new HashMap<>(); final Map<String, Map<Integer, Optional<NewPartitionReassignment>>> topicsToReassignments = new TreeMap<>(); for (Map.Entry<TopicPartition, Optional<NewPartitionReassignment>> entry : reassignments.entrySet()) { String topic = entry.getKey().topic(); int partition = entry.getKey().partition(); TopicPartition topicPartition = new TopicPartition(topic, partition); Optional<NewPartitionReassignment> reassignment = entry.getValue(); KafkaFutureImpl<Void> future = new KafkaFutureImpl<>(); futures.put(topicPartition, future); if (topicNameIsUnrepresentable(topic)) { future.completeExceptionally(new InvalidTopicException("The given topic name '" + topic + "' cannot be represented in a request.")); } else if (topicPartition.partition() < 0) { future.completeExceptionally(new InvalidTopicException("The given partition index " + topicPartition.partition() + " is not valid.")); } else { Map<Integer, Optional<NewPartitionReassignment>> partitionReassignments = topicsToReassignments.get(topicPartition.topic()); if (partitionReassignments == null) { partitionReassignments = new TreeMap<>(); topicsToReassignments.put(topic, partitionReassignments); } partitionReassignments.put(partition, reassignment); } } final long now = time.milliseconds(); Call call = new Call("alterPartitionReassignments", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) { @Override public AlterPartitionReassignmentsRequest.Builder createRequest(int timeoutMs) { AlterPartitionReassignmentsRequestData data = new AlterPartitionReassignmentsRequestData(); for (Map.Entry<String, Map<Integer, Optional<NewPartitionReassignment>>> entry : topicsToReassignments.entrySet()) { String topicName = entry.getKey(); Map<Integer, Optional<NewPartitionReassignment>> partitionsToReassignments = entry.getValue(); List<ReassignablePartition> reassignablePartitions = new ArrayList<>(); for (Map.Entry<Integer, Optional<NewPartitionReassignment>> partitionEntry : partitionsToReassignments.entrySet()) { int partitionIndex = partitionEntry.getKey(); Optional<NewPartitionReassignment> reassignment = partitionEntry.getValue(); ReassignablePartition reassignablePartition = new ReassignablePartition() .setPartitionIndex(partitionIndex) .setReplicas(reassignment.map(NewPartitionReassignment::targetReplicas).orElse(null)); reassignablePartitions.add(reassignablePartition); } ReassignableTopic reassignableTopic = new ReassignableTopic() .setName(topicName) .setPartitions(reassignablePartitions); data.topics().add(reassignableTopic); } data.setTimeoutMs(timeoutMs); return new AlterPartitionReassignmentsRequest.Builder(data); } @Override public void handleResponse(AbstractResponse abstractResponse) { AlterPartitionReassignmentsResponse response = (AlterPartitionReassignmentsResponse) abstractResponse; Map<TopicPartition, ApiException> errors = new HashMap<>(); int receivedResponsesCount = 0; Errors topLevelError = Errors.forCode(response.data().errorCode()); switch (topLevelError) { case NONE: receivedResponsesCount += validateTopicResponses(response.data().responses(), errors); break; case NOT_CONTROLLER: handleNotControllerError(topLevelError); break; default: for (ReassignableTopicResponse topicResponse : response.data().responses()) { String topicName = topicResponse.name(); for (ReassignablePartitionResponse partition : topicResponse.partitions()) { errors.put( new TopicPartition(topicName, partition.partitionIndex()), new ApiError(topLevelError, response.data().errorMessage()).exception() ); receivedResponsesCount += 1; } } break; } assertResponseCountMatch(errors, receivedResponsesCount); for (Map.Entry<TopicPartition, ApiException> entry : errors.entrySet()) { ApiException exception = entry.getValue(); if (exception == null) futures.get(entry.getKey()).complete(null); else futures.get(entry.getKey()).completeExceptionally(exception); } } private void assertResponseCountMatch(Map<TopicPartition, ApiException> errors, int receivedResponsesCount) { int expectedResponsesCount = topicsToReassignments.values().stream().mapToInt(Map::size).sum(); if (errors.values().stream().noneMatch(Objects::nonNull) && receivedResponsesCount != expectedResponsesCount) { String quantifier = receivedResponsesCount > expectedResponsesCount ? "many" : "less"; throw new UnknownServerException("The server returned too " + quantifier + " results." + "Expected " + expectedResponsesCount + " but received " + receivedResponsesCount); } } private int validateTopicResponses(List<ReassignableTopicResponse> topicResponses, Map<TopicPartition, ApiException> errors) { int receivedResponsesCount = 0; for (ReassignableTopicResponse topicResponse : topicResponses) { String topicName = topicResponse.name(); for (ReassignablePartitionResponse partResponse : topicResponse.partitions()) { Errors partitionError = Errors.forCode(partResponse.errorCode()); TopicPartition tp = new TopicPartition(topicName, partResponse.partitionIndex()); if (partitionError == Errors.NONE) { errors.put(tp, null); } else { errors.put(tp, new ApiError(partitionError, partResponse.errorMessage()).exception()); } receivedResponsesCount += 1; } } return receivedResponsesCount; } @Override void handleFailure(Throwable throwable) { for (KafkaFutureImpl<Void> future : futures.values()) { future.completeExceptionally(throwable); } } }; if (!topicsToReassignments.isEmpty()) { runnable.call(call, now); } return new AlterPartitionReassignmentsResult(new HashMap<>(futures)); }
@Test public void testAlterPartitionReassignments() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); TopicPartition tp1 = new TopicPartition("A", 0); TopicPartition tp2 = new TopicPartition("B", 0); Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments = new HashMap<>(); reassignments.put(tp1, Optional.empty()); reassignments.put(tp2, Optional.of(new NewPartitionReassignment(asList(1, 2, 3)))); // 1. server returns less responses than number of partitions we sent AlterPartitionReassignmentsResponseData responseData1 = new AlterPartitionReassignmentsResponseData(); ReassignablePartitionResponse normalPartitionResponse = new ReassignablePartitionResponse().setPartitionIndex(0); responseData1.setResponses(Collections.singletonList( new ReassignableTopicResponse() .setName("A") .setPartitions(Collections.singletonList(normalPartitionResponse)))); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(responseData1)); AlterPartitionReassignmentsResult result1 = env.adminClient().alterPartitionReassignments(reassignments); Future<Void> future1 = result1.all(); Future<Void> future2 = result1.values().get(tp1); TestUtils.assertFutureError(future1, UnknownServerException.class); TestUtils.assertFutureError(future2, UnknownServerException.class); // 2. NOT_CONTROLLER error handling AlterPartitionReassignmentsResponseData controllerErrResponseData = new AlterPartitionReassignmentsResponseData() .setErrorCode(Errors.NOT_CONTROLLER.code()) .setErrorMessage(Errors.NOT_CONTROLLER.message()) .setResponses(asList( new ReassignableTopicResponse() .setName("A") .setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse() .setName("B") .setPartitions(Collections.singletonList(normalPartitionResponse))) ); MetadataResponse controllerNodeResponse = RequestTestUtils.metadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.emptyList()); AlterPartitionReassignmentsResponseData normalResponse = new AlterPartitionReassignmentsResponseData() .setResponses(asList( new ReassignableTopicResponse() .setName("A") .setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse() .setName("B") .setPartitions(Collections.singletonList(normalPartitionResponse))) ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(controllerErrResponseData)); env.kafkaClient().prepareResponse(controllerNodeResponse); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(normalResponse)); AlterPartitionReassignmentsResult controllerErrResult = env.adminClient().alterPartitionReassignments(reassignments); controllerErrResult.all().get(); controllerErrResult.values().get(tp1).get(); controllerErrResult.values().get(tp2).get(); // 3. partition-level error AlterPartitionReassignmentsResponseData partitionLevelErrData = new AlterPartitionReassignmentsResponseData() .setResponses(asList( new ReassignableTopicResponse() .setName("A") .setPartitions(Collections.singletonList(new ReassignablePartitionResponse() .setPartitionIndex(0).setErrorMessage(Errors.INVALID_REPLICA_ASSIGNMENT.message()) .setErrorCode(Errors.INVALID_REPLICA_ASSIGNMENT.code()) )), new ReassignableTopicResponse() .setName("B") .setPartitions(Collections.singletonList(normalPartitionResponse))) ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(partitionLevelErrData)); AlterPartitionReassignmentsResult partitionLevelErrResult = env.adminClient().alterPartitionReassignments(reassignments); TestUtils.assertFutureError(partitionLevelErrResult.values().get(tp1), Errors.INVALID_REPLICA_ASSIGNMENT.exception().getClass()); partitionLevelErrResult.values().get(tp2).get(); // 4. top-level error String errorMessage = "this is custom error message"; AlterPartitionReassignmentsResponseData topLevelErrResponseData = new AlterPartitionReassignmentsResponseData() .setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code()) .setErrorMessage(errorMessage) .setResponses(asList( new ReassignableTopicResponse() .setName("A") .setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse() .setName("B") .setPartitions(Collections.singletonList(normalPartitionResponse))) ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(topLevelErrResponseData)); AlterPartitionReassignmentsResult topLevelErrResult = env.adminClient().alterPartitionReassignments(reassignments); assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.all(), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage()); assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.values().get(tp1), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage()); assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.values().get(tp2), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage()); // 5. unrepresentable topic name error TopicPartition invalidTopicTP = new TopicPartition("", 0); TopicPartition invalidPartitionTP = new TopicPartition("ABC", -1); Map<TopicPartition, Optional<NewPartitionReassignment>> invalidTopicReassignments = new HashMap<>(); invalidTopicReassignments.put(invalidPartitionTP, Optional.of(new NewPartitionReassignment(asList(1, 2, 3)))); invalidTopicReassignments.put(invalidTopicTP, Optional.of(new NewPartitionReassignment(asList(1, 2, 3)))); invalidTopicReassignments.put(tp1, Optional.of(new NewPartitionReassignment(asList(1, 2, 3)))); AlterPartitionReassignmentsResponseData singlePartResponseData = new AlterPartitionReassignmentsResponseData() .setResponses(Collections.singletonList( new ReassignableTopicResponse() .setName("A") .setPartitions(Collections.singletonList(normalPartitionResponse))) ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(singlePartResponseData)); AlterPartitionReassignmentsResult unrepresentableTopicResult = env.adminClient().alterPartitionReassignments(invalidTopicReassignments); TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidTopicTP), InvalidTopicException.class); TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidPartitionTP), InvalidTopicException.class); unrepresentableTopicResult.values().get(tp1).get(); // Test success scenario AlterPartitionReassignmentsResponseData noErrResponseData = new AlterPartitionReassignmentsResponseData() .setErrorCode(Errors.NONE.code()) .setErrorMessage(Errors.NONE.message()) .setResponses(asList( new ReassignableTopicResponse() .setName("A") .setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse() .setName("B") .setPartitions(Collections.singletonList(normalPartitionResponse))) ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(noErrResponseData)); AlterPartitionReassignmentsResult noErrResult = env.adminClient().alterPartitionReassignments(reassignments); noErrResult.all().get(); noErrResult.values().get(tp1).get(); noErrResult.values().get(tp2).get(); } }
@VisibleForTesting String upload(Configuration config, String artifactUriStr) throws IOException, URISyntaxException { final URI artifactUri = PackagedProgramUtils.resolveURI(artifactUriStr); if (!"local".equals(artifactUri.getScheme())) { return artifactUriStr; } final String targetDir = config.get(KubernetesConfigOptions.LOCAL_UPLOAD_TARGET); checkArgument( !StringUtils.isNullOrWhitespaceOnly(targetDir), String.format( "Setting '%s' to a valid remote path is required.", KubernetesConfigOptions.LOCAL_UPLOAD_TARGET.key())); final FileSystem.WriteMode writeMode = config.get(KubernetesConfigOptions.LOCAL_UPLOAD_OVERWRITE) ? FileSystem.WriteMode.OVERWRITE : FileSystem.WriteMode.NO_OVERWRITE; final File src = new File(artifactUri.getPath()); final Path target = new Path(targetDir, src.getName()); if (target.getFileSystem().exists(target) && writeMode == FileSystem.WriteMode.NO_OVERWRITE) { LOG.info( "Skip uploading artifact '{}', as it already exists." + " To overwrite existing artifacts, please set the '{}' config option.", target, KubernetesConfigOptions.LOCAL_UPLOAD_OVERWRITE.key()); } else { final long start = System.currentTimeMillis(); final FileSystem fs = target.getFileSystem(); try (FSDataOutputStream os = fs.create(target, writeMode)) { FileUtils.copyFile(src, os); } LOG.debug( "Copied file from {} to {}, cost {} ms", src, target, System.currentTimeMillis() - start); } return target.toString(); }
@Test void testMissingTargetConf() { config.removeConfig(KubernetesConfigOptions.LOCAL_UPLOAD_TARGET); assertThatThrownBy(() -> artifactUploader.upload(config, "local:///tmp/my-artifact.jar")) .isInstanceOf(IllegalArgumentException.class) .hasMessage( "Setting 'kubernetes.artifacts.local-upload-target' to a valid remote path is required."); }
public static ParameterizedType iterableOf(Type elementType) { return parameterizedType(Iterable.class, elementType); }
@Test public void createIterableType() { ParameterizedType type = Types.iterableOf(Person.class); assertThat(type.getRawType()).isEqualTo(Iterable.class); assertThat(type.getActualTypeArguments()).isEqualTo(new Type[] {Person.class}); }
@Override public CDCJobConfiguration swapToObject(final YamlCDCJobConfiguration yamlConfig) { List<JobDataNodeLine> jobShardingDataNodes = null == yamlConfig.getJobShardingDataNodes() ? Collections.emptyList() : yamlConfig.getJobShardingDataNodes().stream().map(JobDataNodeLine::unmarshal).collect(Collectors.toList()); YamlSinkConfiguration yamlSinkConfig = yamlConfig.getSinkConfig(); SinkConfiguration sinkConfig = new SinkConfiguration(CDCSinkType.valueOf(yamlSinkConfig.getSinkType()), yamlSinkConfig.getProps()); JobDataNodeLine tablesFirstDataNodes = null == yamlConfig.getTablesFirstDataNodes() ? null : JobDataNodeLine.unmarshal(yamlConfig.getTablesFirstDataNodes()); return new CDCJobConfiguration(yamlConfig.getJobId(), yamlConfig.getDatabaseName(), yamlConfig.getSchemaTableNames(), yamlConfig.isFull(), TypedSPILoader.getService(DatabaseType.class, yamlConfig.getSourceDatabaseType()), (ShardingSpherePipelineDataSourceConfiguration) dataSourceConfigSwapper.swapToObject(yamlConfig.getDataSourceConfiguration()), tablesFirstDataNodes, jobShardingDataNodes, yamlConfig.isDecodeWithTX(), sinkConfig, yamlConfig.getConcurrency(), yamlConfig.getRetryTimes()); }
@Test void assertSwapToObject() { YamlCDCJobConfiguration yamlJobConfig = new YamlCDCJobConfiguration(); yamlJobConfig.setJobId("j0302p00007a8bf46da145dc155ba25c710b550220"); yamlJobConfig.setDatabaseName("test_db"); yamlJobConfig.setSchemaTableNames(Arrays.asList("test.t_order", "t_order_item")); yamlJobConfig.setFull(true); yamlJobConfig.setSourceDatabaseType("MySQL"); YamlSinkConfiguration sinkConfig = new YamlSinkConfiguration(); sinkConfig.setSinkType(CDCSinkType.SOCKET.name()); yamlJobConfig.setSinkConfig(sinkConfig); CDCJobConfiguration actual = new YamlCDCJobConfigurationSwapper().swapToObject(yamlJobConfig); assertThat(actual.getJobId(), is("j0302p00007a8bf46da145dc155ba25c710b550220")); assertThat(actual.getDatabaseName(), is("test_db")); assertThat(actual.getSchemaTableNames(), is(Arrays.asList("test.t_order", "t_order_item"))); assertTrue(actual.isFull()); }
public SchemaMapping map(Schema arrowSchema, MessageType parquetSchema) { List<TypeMapping> children = map(arrowSchema.getFields(), parquetSchema.getFields()); return new SchemaMapping(arrowSchema, parquetSchema, children); }
@Test public void testRepeatedMap() throws IOException { SchemaMapping map = converter.map(paperArrowSchema, Paper.schema); Assert.assertEquals("p, s<r<p>, r<p>>, r<s<r<s<p, p>>, p>>", toSummaryString(map)); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testWindowFrameTypeRange() { assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE UNBOUNDED FOLLOWING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN UNBOUNDED FOLLOWING AND 2 FOLLOWING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN UNBOUNDED FOLLOWING AND CURRENT ROW) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN UNBOUNDED FOLLOWING AND 5 PRECEDING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED PRECEDING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN UNBOUNDED FOLLOWING AND UNBOUNDED FOLLOWING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE 2 FOLLOWING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 2 FOLLOWING AND CURRENT ROW) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 2 FOLLOWING AND 5 PRECEDING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 2 FOLLOWING AND UNBOUNDED PRECEDING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN CURRENT ROW AND 5 PRECEDING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN CURRENT ROW AND UNBOUNDED PRECEDING) FROM (VALUES 1) T(x)"); assertFails(INVALID_WINDOW_FRAME, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 5 PRECEDING AND UNBOUNDED PRECEDING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE UNBOUNDED PRECEDING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN UNBOUNDED PRECEDING AND 5 PRECEDING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE 5 PRECEDING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 5 PRECEDING AND 10 PRECEDING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 5 PRECEDING AND 3 PRECEDING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 5 PRECEDING AND CURRENT ROW) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 5 PRECEDING AND 2 FOLLOWING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 5 PRECEDING AND UNBOUNDED FOLLOWING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE CURRENT ROW) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN CURRENT ROW AND CURRENT ROW) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN CURRENT ROW AND 2 FOLLOWING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 2 FOLLOWING AND 1 FOLLOWING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 2 FOLLOWING AND 10 FOLLOWING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 2 FOLLOWING AND UNBOUNDED FOLLOWING) FROM (VALUES 1) T(x)"); // this should pass the analysis but fail during execution analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN -x PRECEDING AND 0 * x FOLLOWING) FROM (VALUES 1) T(x)"); analyze("SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN CAST(null AS BIGINT) PRECEDING AND CAST(null AS BIGINT) FOLLOWING) FROM (VALUES 1) T(x)"); assertFails(MISSING_ORDER_BY, "SELECT array_agg(x) OVER (RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM (VALUES 1) T(x)"); assertFails(INVALID_ORDER_BY, "SELECT array_agg(x) OVER (ORDER BY x DESC, x ASC RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM (VALUES 1) T(x)"); assertFails(TYPE_MISMATCH, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM (VALUES 'a') T(x)"); assertFails(TYPE_MISMATCH, "SELECT array_agg(x) OVER (ORDER BY x RANGE BETWEEN 'a' PRECEDING AND 'z' FOLLOWING) FROM (VALUES 1) T(x)"); assertFails(TYPE_MISMATCH, "SELECT array_agg(x) OVER (ORDER BY x RANGE INTERVAL '1' day PRECEDING) FROM (VALUES INTERVAL '1' year) T(x)"); // window frame other than <expression> PRECEDING or <expression> FOLLOWING has no requirements regarding window ORDER BY clause // ORDER BY is not required analyze("SELECT array_agg(x) OVER (RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM (VALUES 1) T(x)"); // multiple sort keys and sort keys of types other than numeric or datetime are allowed analyze("SELECT array_agg(x) OVER (ORDER BY y, z RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) FROM (VALUES (1, 'text', true)) T(x, y, z)"); }
@Override public void execute(ComputationStep.Context context) { executeForBranch(treeRootHolder.getRoot()); }
@Test public void no_event_if_no_raw_measure() { when(measureRepository.getBaseMeasure(treeRootHolder.getRoot(), qualityProfileMetric)).thenReturn(Optional.of(newMeasure())); when(measureRepository.getRawMeasure(treeRootHolder.getRoot(), qualityProfileMetric)).thenReturn(Optional.empty()); underTest.execute(new TestComputationStepContext()); verifyNoMoreInteractions(eventRepository); }
@Override public void checkBeforeUpdate(final AlterReadwriteSplittingRuleStatement sqlStatement) { ReadwriteSplittingRuleStatementChecker.checkAlteration(database, sqlStatement.getRules(), rule.getConfiguration()); }
@Test void assertCheckSQLStatementWithDuplicateReadResourceNames() { ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getResourceMetaData()).thenReturn(resourceMetaData); ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class); when(rule.getConfiguration()).thenReturn(createCurrentRuleConfigurationWithMultipleRules()); executor.setRule(rule); assertThrows(DuplicateReadwriteSplittingActualDataSourceException.class, () -> executor.checkBeforeUpdate(createSQLStatement("readwrite_ds_1", "write_ds_1", Arrays.asList("read_ds_0_0", "read_ds_0_1"), "TEST"))); }
public static ListenableFuture<CustomerId> findEntityIdAsync(TbContext ctx, EntityId originator) { switch (originator.getEntityType()) { case CUSTOMER: return Futures.immediateFuture((CustomerId) originator); case USER: return toCustomerIdAsync(ctx, ctx.getUserService().findUserByIdAsync(ctx.getTenantId(), (UserId) originator)); case ASSET: return toCustomerIdAsync(ctx, ctx.getAssetService().findAssetByIdAsync(ctx.getTenantId(), (AssetId) originator)); case DEVICE: return toCustomerIdAsync(ctx, Futures.immediateFuture(ctx.getDeviceService().findDeviceById(ctx.getTenantId(), (DeviceId) originator))); default: return Futures.immediateFailedFuture(new TbNodeException("Unexpected originator EntityType: " + originator.getEntityType())); } }
@Test public void givenUnsupportedEntityTypes_whenFindEntityIdAsync_thenException() { for (var entityType : EntityType.values()) { if (!SUPPORTED_ENTITY_TYPES.contains(entityType)) { var entityId = EntityIdFactory.getByTypeAndUuid(entityType, UUID.randomUUID()); var expectedExceptionMsg = "org.thingsboard.rule.engine.api.TbNodeException: Unexpected originator EntityType: " + entityType; var exception = assertThrows(ExecutionException.class, () -> EntitiesCustomerIdAsyncLoader.findEntityIdAsync(ctxMock, entityId).get()); assertInstanceOf(TbNodeException.class, exception.getCause()); assertEquals(expectedExceptionMsg, exception.getMessage()); } } }
protected String getServiceConfig(final List<ServiceInstance> instances) { for (final ServiceInstance inst : instances) { final Map<String, String> metadata = inst.getMetadata(); if (metadata == null || metadata.isEmpty()) { continue; } final String metaValue = metadata.get(CLOUD_DISCOVERY_METADATA_SERVICE_CONFIG); if (metaValue != null && !metaValue.isEmpty()) { return metaValue; } } return null; }
@Test void testBrokenServiceConfig() { TestableListener listener = resolveServiceAndVerify("test2", "intentionally invalid service config"); NameResolver.ConfigOrError serviceConf = listener.getResult().getServiceConfig(); assertThat(serviceConf).isNotNull(); assertThat(serviceConf.getConfig()).isNull(); assertThat(serviceConf.getError()).extracting(Status::getCode).isEqualTo(Status.Code.UNKNOWN); }
protected boolean compareVersionRange(String targetVersion) { return compareVersions(this, targetVersion); }
@Test public void testCompareVersionRange() throws CpeValidationException { VulnerableSoftwareBuilder builder = new VulnerableSoftwareBuilder(); VulnerableSoftware instance = builder.version("2.0.0").build(); assertTrue(instance.compareVersionRange("2.0.0")); assertFalse(instance.compareVersionRange("2.0.1")); instance = builder.version(LogicalValue.ANY).build(); assertTrue(instance.compareVersionRange("2.0.1")); instance = builder.version(LogicalValue.NA).build(); assertFalse(instance.compareVersionRange("2.0.1")); instance = builder.version(LogicalValue.ANY).versionEndIncluding("2.0.1").build(); assertTrue(instance.compareVersionRange("2.0.1")); assertFalse(instance.compareVersionRange("2.0.2")); instance = builder.version(LogicalValue.ANY).versionEndExcluding("2.0.2").build(); assertTrue(instance.compareVersionRange("2.0.1")); assertFalse(instance.compareVersionRange("2.0.2")); instance = builder.version(LogicalValue.ANY).versionStartIncluding("1.0.1").build(); assertTrue(instance.compareVersionRange("1.0.1")); assertFalse(instance.compareVersionRange("1.0.0")); instance = builder.version(LogicalValue.ANY).versionStartExcluding("1.0.0").build(); assertTrue(instance.compareVersionRange("1.0.1")); assertFalse(instance.compareVersionRange("1.0.0")); }
@VisibleForTesting String getResources(ContainerInfo container) { Map<String, Long> allocatedResources = container.getAllocatedResources(); StringBuilder sb = new StringBuilder(); sb.append(getResourceAsString(ResourceInformation.MEMORY_URI, allocatedResources.get(ResourceInformation.MEMORY_URI))).append(", "); sb.append(getResourceAsString(ResourceInformation.VCORES_URI, allocatedResources.get(ResourceInformation.VCORES_URI))); if (container.hasCustomResources()) { container.getAllocatedResources().forEach((key, value) -> { if (!key.equals(ResourceInformation.MEMORY_URI) && !key.equals(ResourceInformation.VCORES_URI)) { sb.append(", "); sb.append(getResourceAsString(key, value)); } }); } return sb.toString(); }
@Test public void testRenderResourcesString() { CustomResourceTypesConfigurationProvider. initResourceTypes(ResourceInformation.GPU_URI); Resource resource = ResourceTypesTestHelper.newResource( DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, ImmutableMap.<String, String>builder() .put(ResourceInformation.GPU_URI, "5").build()); ContainerBlock block = new ContainerBlock( mock(ApplicationBaseProtocol.class), mock(View.ViewContext.class)); ContainerReport containerReport = createContainerReport(); containerReport.setAllocatedResource(resource); ContainerInfo containerInfo = new ContainerInfo(containerReport); String resources = block.getResources(containerInfo); Assert.assertEquals("8192 Memory, 4 VCores, 5 yarn.io/gpu", resources); }
@Override public int read(byte[] bytesBuffer, int offset, int length) throws IOException { return readInternal(new ByteArrayTargetBuffer(bytesBuffer, offset), offset, length, ReadType.READ_INTO_BYTE_ARRAY, mPosition, false); }
@Test public void readEmptyFileThroughReadByteBuffer() throws Exception { int fileSize = 0; byte[] fileData = BufferUtils.getIncreasingByteArray(fileSize); ByteArrayCacheManager manager = new ByteArrayCacheManager(); LocalCacheFileInStream stream = setupWithSingleFile(fileData, manager); byte[] readData = new byte[fileSize]; ByteBuffer buffer = ByteBuffer.wrap(readData); int totalBytesRead = stream.read(buffer, 0, fileSize + 1); Assert.assertEquals(-1, totalBytesRead); }
@CanIgnoreReturnValue public final Ordered containsAtLeast( @Nullable Object firstExpected, @Nullable Object secondExpected, @Nullable Object @Nullable ... restOfExpected) { return containsAtLeastElementsIn(accumulate(firstExpected, secondExpected, restOfExpected)); }
@Test public void iterableContainsAtLeast() { assertThat(asList(1, 2, 3)).containsAtLeast(1, 2); }
@Override public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { if (key == null) { return null; } return super.computeIfPresent(key, remappingFunction); }
@Test public void testComputeIfPresent() { Assert.assertEquals(VALUE, map.get(KEY)); Assert.assertEquals(null, map.computeIfAbsent(null, key -> "")); Assert.assertEquals("", map.computeIfPresent(KEY, (key, value) -> "")); Assert.assertEquals("", map.get(KEY)); }
public Path getParent() { if(this.isRoot()) { return this; } return parent; }
@Test public void testGetParent() { assertEquals(new Path("/b/t", EnumSet.of(Path.Type.directory)), new Path("/b/t/f.type", EnumSet.of(Path.Type.file)).getParent()); }
@Override protected Class<?> loadClass(final String name, final boolean resolve) throws ClassNotFoundException { synchronized (getClassLoadingLock(name)) { try { final Class<?> loadedClass = findLoadedClass(name); if (loadedClass != null) { return resolveIfNeeded(resolve, loadedClass); } if (isComponentFirstClass(name)) { return loadClassFromComponentFirst(name, resolve); } if (isOwnerFirstClass(name)) { return loadClassFromOwnerFirst(name, resolve); } // making this behavior configurable (component-only/component-first/owner-first) // would allow this class to subsume the FlinkUserCodeClassLoader (with an added // exception handler) return loadClassFromComponentOnly(name, resolve); } catch (ClassNotFoundException e) { // If we know the package of this class Optional<String> foundAssociatedModule = knownPackagePrefixesModuleAssociation.entrySet().stream() .filter(entry -> name.startsWith(entry.getKey())) .map(Map.Entry::getValue) .findFirst(); if (foundAssociatedModule.isPresent()) { throw new ClassNotFoundException( String.format( "Class '%s' not found. Perhaps you forgot to add the module '%s' to the classpath?", name, foundAssociatedModule.get()), e); } throw e; } } }
@Test void testComponentOnlyIsDefaultForClasses() throws Exception { assertThatExceptionOfType(ClassNotFoundException.class) .isThrownBy( () -> { TestUrlClassLoader owner = new TestUrlClassLoader( NON_EXISTENT_CLASS_NAME, CLASS_RETURNED_BY_OWNER); final ComponentClassLoader componentClassLoader = new ComponentClassLoader( new URL[0], owner, new String[0], new String[0], Collections.emptyMap()); componentClassLoader.loadClass(NON_EXISTENT_CLASS_NAME); }); }
@Bean("Configuration") public Configuration provide(Settings settings) { return new ServerConfigurationAdapter(settings); }
@Test @UseDataProvider("subsequentCommas1") public void getStringArray_on_unknown_or_non_multivalue_properties_ignores_subsequent_commas_as_Settings(String subsequentCommas) { settings.setProperty(nonDeclaredKey, subsequentCommas); settings.setProperty(nonMultivalueKey, subsequentCommas); Configuration configuration = underTest.provide(settings); getStringArrayBehaviorIsTheSame(configuration, nonDeclaredKey); getStringArrayBehaviorIsTheSame(configuration, nonMultivalueKey); }
@Override public void beforeJob(JobExecution jobExecution) { LOG.debug("sending before job execution event [{}]...", jobExecution); producerTemplate.sendBodyAndHeader(endpointUri, jobExecution, EventType.HEADER_KEY, EventType.BEFORE.name()); LOG.debug("sent before job execution event"); }
@Test public void shouldSendBeforeJobEvent() throws Exception { // When jobExecutionListener.beforeJob(jobExecution); // Then assertEquals(jobExecution, consumer().receiveBody("seda:eventQueue")); }
@Override public void register(ProviderConfig config) { String appName = config.getAppName(); if (!registryConfig.isRegister()) { if (LOGGER.isInfoEnabled(appName)) { LOGGER.infoWithApp(appName, LogCodes.getLog(LogCodes.INFO_REGISTRY_IGNORE)); } return; } if (!config.isRegister()) { return; } try { List<InstanceRegisterRequest> services = buildPolarisRegister(config); if (CommonUtils.isNotEmpty(services)) { if (LOGGER.isInfoEnabled(appName)) { LOGGER.infoWithApp(appName, LogCodes.getLog(LogCodes.INFO_ROUTE_REGISTRY_PUB_START, config.getInterfaceId())); } for (InstanceRegisterRequest service : services) { registerPolarisService(config, service); if (LOGGER.isInfoEnabled(appName)) { LOGGER.infoWithApp(appName, LogCodes.getLog(LogCodes.INFO_ROUTE_REGISTRY_PUB, config.getInterfaceId())); } } if (LOGGER.isInfoEnabled(appName)) { LOGGER.infoWithApp(appName, LogCodes.getLog(LogCodes.INFO_ROUTE_REGISTRY_PUB_OVER, config.getInterfaceId())); } } } catch (SofaRpcRuntimeException e) { throw e; } catch (Exception e) { throw new SofaRpcRuntimeException(LogCodes.getLog(LogCodes.ERROR_REG_PROVIDER, "polarisRegistry", config.buildKey()), e); } if (EventBus.isEnable(ProviderPubEvent.class)) { ProviderPubEvent event = new ProviderPubEvent(config); EventBus.post(event); } }
@Test public void testRegister() { polaris.getNamingService().addService(new ServiceKey(NAMESPACE, SERVICE)); //register ProviderConfig<?> providerConfig = providerConfig("polaris-test-1", 12200, 12201, 12202); registry.register(providerConfig); //check register ConsumerAPI consumerAPI = DiscoveryAPIFactory.createConsumerAPI(); GetAllInstancesRequest getAllInstancesRequest = new GetAllInstancesRequest(); getAllInstancesRequest.setNamespace(APPNAME); getAllInstancesRequest.setService(SERVICE); InstancesResponse allInstance = consumerAPI.getAllInstance(getAllInstancesRequest); Assert.assertEquals(3, allInstance.getInstances().length); //unregister registry.unRegister(providerConfig); //check unregister ,sleep to wait remove catch try { Thread.sleep(5000); } catch (InterruptedException e) { e.printStackTrace(); } //if no service will throw a exception try { consumerAPI.getAllInstance(getAllInstancesRequest); } catch (PolarisException e) { Assert.assertEquals(SERVER_USER_ERROR, e.getCode()); } }
public String[] getCNAMEs() { if(null == cnames) { return new String[]{}; } return cnames; }
@Test public void testCnames() { assertNotNull(new Distribution(Distribution.DOWNLOAD, false).getCNAMEs()); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { authorizationService.validate(); }
@Test(expected = LoginCanceledException.class) public void testProjectIdNoAuthorization() throws Exception { session.getHost().setCredentials( new Credentials("stellar-perigee-775", "") ); session.login(new DisabledLoginCallback() { @Override public Credentials prompt(final Host bookmark, final String username, final String title, final String reason, final LoginOptions options) { // OAuth2 return new Credentials("", ""); } }, new DisabledCancelCallback()); }
Map<String, Object> sourceProducerConfig(String role) { Map<String, Object> props = new HashMap<>(); props.putAll(originalsWithPrefix(SOURCE_CLUSTER_PREFIX)); props.keySet().retainAll(MirrorClientConfig.CLIENT_CONFIG_DEF.names()); props.putAll(originalsWithPrefix(PRODUCER_CLIENT_PREFIX)); props.putAll(originalsWithPrefix(SOURCE_PREFIX + PRODUCER_CLIENT_PREFIX)); addClientId(props, role); return props; }
@Test public void testSourceProducerConfig() { Map<String, String> connectorProps = makeProps( MirrorConnectorConfig.PRODUCER_CLIENT_PREFIX + "acks", "1" ); MirrorConnectorConfig config = new TestMirrorConnectorConfig(connectorProps); Map<String, Object> connectorProducerProps = config.sourceProducerConfig("test"); Map<String, Object> expectedProducerProps = new HashMap<>(); expectedProducerProps.put("acks", "1"); expectedProducerProps.put("client.id", "source1->target2|ConnectorName|test"); assertEquals(expectedProducerProps, connectorProducerProps, MirrorConnectorConfig.PRODUCER_CLIENT_PREFIX + " source product config not matching"); }
public void matches(@Nullable String regex) { checkNotNull(regex); if (actual == null) { failWithActual("expected a string that matches", regex); } else if (!actual.matches(regex)) { if (regex.equals(actual)) { failWithoutActual( fact("expected to match", regex), fact("but was", actual), simpleFact("Looks like you want to use .isEqualTo() for an exact equality assertion.")); } else if (Platform.containsMatch(actual, regex)) { failWithoutActual( fact("expected to match", regex), fact("but was", actual), simpleFact("Did you mean to call containsMatch() instead of match()?")); } else { failWithActual("expected to match", regex); } } }
@Test @GwtIncompatible("Pattern") public void stringMatchesPatternFailNull() { expectFailureWhenTestingThat(null).matches(Pattern.compile(".*aaa.*")); assertFailureValue("expected a string that matches", ".*aaa.*"); }
@Override public ChannelFuture writeSettings(ChannelHandlerContext ctx, Http2Settings settings, ChannelPromise promise) { outstandingLocalSettingsQueue.add(settings); try { Boolean pushEnabled = settings.pushEnabled(); if (pushEnabled != null && connection.isServer()) { throw connectionError(PROTOCOL_ERROR, "Server sending SETTINGS frame with ENABLE_PUSH specified"); } } catch (Throwable e) { return promise.setFailure(e); } return frameWriter.writeSettings(ctx, settings, promise); }
@Test public void settingsWriteAfterGoAwayShouldSucceed() throws Exception { goAwayReceived(0); ChannelPromise promise = newPromise(); encoder.writeSettings(ctx, new Http2Settings(), promise); verify(writer).writeSettings(eq(ctx), any(Http2Settings.class), eq(promise)); }
static <RequestT, ResponseT> Call<RequestT, ResponseT> of( Caller<RequestT, ResponseT> caller, Coder<ResponseT> responseTCoder) { caller = SerializableUtils.ensureSerializable(caller); return new Call<>( Configuration.<RequestT, ResponseT>builder() .setCaller(caller) .setResponseCoder(responseTCoder) .build()); }
@Test public void givenCallerThrowsTimeoutException_emitsFailurePCollection() { Result<Response> result = pipeline .apply(Create.of(new Request("a"))) .apply(Call.of(new CallerThrowsTimeout(), NON_DETERMINISTIC_RESPONSE_CODER)); PCollection<ApiIOError> failures = result.getFailures(); PAssert.thatSingleton(countStackTracesOf(failures, UserCodeExecutionException.class)) .isEqualTo(1L); PAssert.thatSingleton(countStackTracesOf(failures, UserCodeQuotaException.class)).isEqualTo(0L); PAssert.thatSingleton(countStackTracesOf(failures, UserCodeTimeoutException.class)) .isEqualTo(1L); pipeline.run(); }
public Predicate convert(ScalarOperator operator) { if (operator == null) { return null; } return operator.accept(this, null); }
@Test public void testNull() { Predicate result = CONVERTER.convert(null); Assert.assertNull(result); }
public static boolean isS3Url(String prefix) { return prefix.startsWith("oss://") || prefix.startsWith("s3n://") || prefix.startsWith("s3a://") || prefix.startsWith("s3://") || prefix.startsWith("cos://") || prefix.startsWith("cosn://") || prefix.startsWith("obs://") || prefix.startsWith("ks3://") || prefix.startsWith("tos://"); }
@Test public void testIsS3Url() { Assert.assertTrue(HiveWriteUtils.isS3Url("obs://")); }
@Override protected void copy(List<LocalResourceId> srcResourceIds, List<LocalResourceId> destResourceIds) throws IOException { checkArgument( srcResourceIds.size() == destResourceIds.size(), "Number of source files %s must equal number of destination files %s", srcResourceIds.size(), destResourceIds.size()); int numFiles = srcResourceIds.size(); for (int i = 0; i < numFiles; i++) { LocalResourceId src = srcResourceIds.get(i); LocalResourceId dst = destResourceIds.get(i); LOG.debug("Copying {} to {}", src, dst); File parent = dst.getCurrentDirectory().getPath().toFile(); if (!parent.exists()) { checkArgument( parent.mkdirs() || parent.exists(), "Unable to make output directory %s in order to copy into file %s", parent, dst.getPath()); } // Copy the source file, replacing the existing destination. // Paths.get(x) will not work on Windows OSes cause of the ":" after the drive letter. Files.copy( src.getPath(), dst.getPath(), StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); } }
@Test public void testCopyWithExistingSrcFile() throws Exception { Path srcPath1 = temporaryFolder.newFile().toPath(); Path srcPath2 = temporaryFolder.newFile().toPath(); Path destPath1 = temporaryFolder.getRoot().toPath().resolve("nonexistentdir").resolve("dest1"); Path destPath2 = srcPath2.resolveSibling("dest2"); createFileWithContent(srcPath1, "content1"); createFileWithContent(srcPath2, "content2"); localFileSystem.copy( toLocalResourceIds(ImmutableList.of(srcPath1, srcPath2), false /* isDirectory */), toLocalResourceIds(ImmutableList.of(destPath1, destPath2), false /* isDirectory */)); assertContents( ImmutableList.of(destPath1, destPath2), ImmutableList.of("content1", "content2")); }
@Override public HashSlotCursor16byteKey cursor() { return new CursorLongKey2(); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testCursor_valueAddress_withoutAdvance() { HashSlotCursor16byteKey cursor = hsa.cursor(); cursor.valueAddress(); }
public static void print(Context context) { print(context, 0); }
@Test public void testBasic() { Context context = new ContextBase(); context.getStatusManager().add(new InfoStatus("test", this)); StatusPrinter.print(context); String result = outputStream.toString(); assertTrue(result.contains("|-INFO in "+this.getClass().getName())); }
public void removeEdge(V from, V to) { if (!containsVertex(from)) { throw new IllegalArgumentException("Nonexistent vertex " + from); } if (!containsVertex(to)) { throw new IllegalArgumentException("Nonexistent vertex " + to); } neighbors.get(from).remove(to); }
@Test void removeEdge() { graph.removeEdge('B', 'F'); List<Character> result = graph.getNeighbors('B'); List<Character> expected = Arrays.asList('C'); assertEquals(expected, result); }
public List<Modification> modifications(ConsoleResult result) { List<Modification> modifications = new ArrayList<>(); for (String change : result.output()) { if (!StringUtils.isBlank(change)) { String description = ""; try { long revision = revisionFromChange(change); description = p4Client.describe(revision); modifications.add(modificationFromDescription(description, result)); } catch (P4OutputParseException e) { LOG.error("Error parsing changes for {}", this); LOG.error("---- change ---------"); LOG.error(result.replaceSecretInfo(change)); LOG.error("---- description ----"); LOG.error(result.replaceSecretInfo(description)); LOG.error("---------------------"); } catch (RuntimeException e) { throw (RuntimeException) result.smudgedException(e); } } } return modifications; }
@Test void shouldIgnoreBadLinesAndLogThem() { try (LogFixture logging = logFixtureFor(P4OutputParser.class, Level.DEBUG)) { final String output = "Change 539921 on 2008/09/24 " + "by abc@SomeRefinery_abc_sa1-sgr-xyz-001 'more work in progress on MDC un'\n"; final String description = "Change that I cannot parse :-(\n"; when(p4Client.describe(any(Long.class))).thenReturn(description); List<Modification> modifications = parser.modifications(new ConsoleResult(0, List.of(output.split("\n")), new ArrayList<>(), new ArrayList<>(), new ArrayList<>())); assertThat(modifications.size()).isEqualTo(0); assertThat(logging.getLog()).contains(description); } }
void push(SelType obj) { stack[++top] = obj; }
@Test(expected = ArrayIndexOutOfBoundsException.class) public void testPushStackOverflow() { state.push(SelString.of("foo")); state.push(SelString.of("foo")); state.push(SelString.of("foo")); }
@Override protected InputStream decorate(final InputStream in, final MessageDigest digest) throws IOException { if(null == digest) { log.warn("MD5 calculation disabled"); return super.decorate(in, null); } else { return new DigestInputStream(in, digest); } }
@Test public void testDecorate() throws Exception { final NullInputStream n = new NullInputStream(1L); final S3Session session = new S3Session(new Host(new S3Protocol())); assertSame(NullInputStream.class, new S3SingleUploadService(session, new S3WriteFeature(session, new S3AccessControlListFeature(session))).decorate(n, null).getClass()); }
@Override public Long createConfig(ConfigSaveReqVO createReqVO) { // 校验参数配置 key 的唯一性 validateConfigKeyUnique(null, createReqVO.getKey()); // 插入参数配置 ConfigDO config = ConfigConvert.INSTANCE.convert(createReqVO); config.setType(ConfigTypeEnum.CUSTOM.getType()); configMapper.insert(config); return config.getId(); }
@Test public void testCreateConfig_success() { // 准备参数 ConfigSaveReqVO reqVO = randomPojo(ConfigSaveReqVO.class) .setId(null); // 防止 id 被赋值,导致唯一性校验失败 // 调用 Long configId = configService.createConfig(reqVO); // 断言 assertNotNull(configId); // 校验记录的属性是否正确 ConfigDO config = configMapper.selectById(configId); assertPojoEquals(reqVO, config, "id"); assertEquals(ConfigTypeEnum.CUSTOM.getType(), config.getType()); }
public static boolean isValidRootUrl(String url) { UrlValidator validator = new CustomUrlValidator(); return validator.isValid(url); }
@Test @Issue("JENKINS-51064") public void withCustomDomain() { assertTrue(UrlHelper.isValidRootUrl("http://my-server:8080/jenkins")); assertTrue(UrlHelper.isValidRootUrl("http://jenkins.internal/")); assertTrue(UrlHelper.isValidRootUrl("http://jenkins.otherDomain/")); assertTrue(UrlHelper.isValidRootUrl("http://my-server.domain:8080/jenkins")); assertTrue(UrlHelper.isValidRootUrl("http://my-ser_ver.do_m-ain:8080/jenkins")); assertTrue(UrlHelper.isValidRootUrl("http://my-ser_ver.do_m-ain:8080/jenkins")); // forbidden to start or end domain with - or . assertFalse(UrlHelper.isValidRootUrl("http://-jenkins.com")); assertFalse(UrlHelper.isValidRootUrl("http://jenkins.com-")); assertFalse(UrlHelper.isValidRootUrl("http://.jenkins.com")); // allowed to have multiple dots in chain assertTrue(UrlHelper.isValidRootUrl("http://jen..kins.com")); }
@Override public void shutdown() throws NacosException { String className = this.getClass().getName(); NAMING_LOGGER.info("{} do shutdown begin", className); serviceInfoUpdateService.shutdown(); serverListManager.shutdown(); httpClientProxy.shutdown(); grpcClientProxy.shutdown(); securityProxy.shutdown(); ThreadUtils.shutdownThreadPool(executorService, NAMING_LOGGER); NAMING_LOGGER.info("{} do shutdown stop", className); }
@Test void testShutdown() throws NacosException { delegate.shutdown(); verify(mockGrpcClient, times(1)).shutdown(); }
@Nonnull public static <K, V> BatchSource<Entry<K, V>> map(@Nonnull String mapName) { return batchFromProcessor("mapSource(" + mapName + ')', readMapP(mapName)); }
@Test(expected = IllegalStateException.class) public void when_batchSourceUsedTwice_then_throwException() { // Given BatchSource<Entry<Object, Object>> source = Sources.map(srcName); p.readFrom(source); // When-Then p.readFrom(source); }
@Override public void setEventPublisher(EventPublisher publisher) { publisher.registerHandlerFor(Envelope.class, this::write); }
@Test void ignores_step_definitions() throws Throwable { ByteArrayOutputStream bytes = new ByteArrayOutputStream(); HtmlFormatter formatter = new HtmlFormatter(bytes); EventBus bus = new TimeServiceEventBus(Clock.systemUTC(), UUID::randomUUID); formatter.setEventPublisher(bus); TestRunStarted testRunStarted = new TestRunStarted(new Timestamp(10L, 0L)); bus.send(Envelope.of(testRunStarted)); StepDefinition stepDefinition = new StepDefinition( "", new StepDefinitionPattern("", StepDefinitionPatternType.CUCUMBER_EXPRESSION), SourceReference.of("https://example.com")); bus.send(Envelope.of(stepDefinition)); Hook hook = new Hook("", null, SourceReference.of("https://example.com"), null); bus.send(Envelope.of(hook)); // public ParameterType(String name, List<String> regularExpressions, // Boolean preferForRegularExpressionMatch, Boolean useForSnippets, // String id) { ParameterType parameterType = new ParameterType( "", Collections.emptyList(), true, false, "", null); bus.send(Envelope.of(parameterType)); TestRunFinished testRunFinished = new TestRunFinished( null, true, new Timestamp(15L, 0L), null); bus.send(Envelope.of(testRunFinished)); assertThat(bytes, bytes(containsString("" + "window.CUCUMBER_MESSAGES = [" + "{\"testRunStarted\":{\"timestamp\":{\"seconds\":10,\"nanos\":0}}}," + "{\"testRunFinished\":{\"success\":true,\"timestamp\":{\"seconds\":15,\"nanos\":0}}}" + "];\n"))); }
@VisibleForTesting DictTypeDO validateDictTypeExists(Long id) { if (id == null) { return null; } DictTypeDO dictType = dictTypeMapper.selectById(id); if (dictType == null) { throw exception(DICT_TYPE_NOT_EXISTS); } return dictType; }
@Test public void testValidateDictDataExists_success() { // mock 数据 DictTypeDO dbDictType = randomDictTypeDO(); dictTypeMapper.insert(dbDictType);// @Sql: 先插入出一条存在的数据 // 调用成功 dictTypeService.validateDictTypeExists(dbDictType.getId()); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = authentication.get(); if(credentials.isAnonymousLogin()) { if(log.isDebugEnabled()) { log.debug(String.format("Connect with no credentials to %s", host)); } client.setProviderCredentials(null); } else { if(credentials.getTokens().validate()) { if(log.isDebugEnabled()) { log.debug(String.format("Connect with session credentials to %s", host)); } client.setProviderCredentials(new AWSSessionCredentials( credentials.getTokens().getAccessKeyId(), credentials.getTokens().getSecretAccessKey(), credentials.getTokens().getSessionToken())); } else { if(log.isDebugEnabled()) { log.debug(String.format("Connect with basic credentials to %s", host)); } client.setProviderCredentials(new AWSCredentials(credentials.getUsername(), credentials.getPassword())); } } if(host.getCredentials().isPassed()) { log.warn(String.format("Skip verifying credentials with previous successful authentication event for %s", this)); return; } try { final Path home = new DelegatingHomeFeature(new DefaultPathHomeFeature(host)).find(); final Location.Name location = new S3LocationFeature(S3Session.this, regions).getLocation(home); if(log.isDebugEnabled()) { log.debug(String.format("Retrieved region %s", location)); } if(!Location.unknown.equals(location)) { if(log.isDebugEnabled()) { log.debug(String.format("Set default region to %s determined from %s", location, home)); } // host.setProperty("s3.location", location.getIdentifier()); } } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Failure %s querying region", e)); final Path home = new DefaultHomeFinderService(this).find(); if(log.isDebugEnabled()) { log.debug(String.format("Retrieved %s", home)); } } }
@Test public void testInteroperabilityMinio() throws Exception { final Host host = new Host(new S3Protocol(), "play.min.io", new Credentials( "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" )) { @Override public String getProperty(final String key) { if("s3.bucket.virtualhost.disable".equals(key)) { return String.valueOf(true); } return super.getProperty(key); } }; final S3Session session = new S3Session(host); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); session.close(); }
public void assignStates() { checkStateMappingCompleteness(allowNonRestoredState, operatorStates, tasks); Map<OperatorID, OperatorState> localOperators = new HashMap<>(operatorStates); // find the states of all operators belonging to this task and compute additional // information in first pass for (ExecutionJobVertex executionJobVertex : tasks) { List<OperatorIDPair> operatorIDPairs = executionJobVertex.getOperatorIDs(); Map<OperatorID, OperatorState> operatorStates = CollectionUtil.newHashMapWithExpectedSize(operatorIDPairs.size()); for (OperatorIDPair operatorIDPair : operatorIDPairs) { OperatorID operatorID = operatorIDPair .getUserDefinedOperatorID() .filter(localOperators::containsKey) .orElse(operatorIDPair.getGeneratedOperatorID()); OperatorState operatorState = localOperators.remove(operatorID); if (operatorState == null) { operatorState = new OperatorState( operatorID, executionJobVertex.getParallelism(), executionJobVertex.getMaxParallelism()); } operatorStates.put(operatorIDPair.getGeneratedOperatorID(), operatorState); } final TaskStateAssignment stateAssignment = new TaskStateAssignment( executionJobVertex, operatorStates, consumerAssignment, vertexAssignments); vertexAssignments.put(executionJobVertex, stateAssignment); for (final IntermediateResult producedDataSet : executionJobVertex.getInputs()) { consumerAssignment.put(producedDataSet.getId(), stateAssignment); } } // repartition state for (TaskStateAssignment stateAssignment : vertexAssignments.values()) { if (stateAssignment.hasNonFinishedState // FLINK-31963: We need to run repartitioning for stateless operators that have // upstream output or downstream input states. || stateAssignment.hasUpstreamOutputStates() || stateAssignment.hasDownstreamInputStates()) { assignAttemptState(stateAssignment); } } // actually assign the state for (TaskStateAssignment stateAssignment : vertexAssignments.values()) { // If upstream has output states or downstream has input states, even the empty task // state should be assigned for the current task in order to notify this task that the // old states will send to it which likely should be filtered. if (stateAssignment.hasNonFinishedState || stateAssignment.isFullyFinished || stateAssignment.hasUpstreamOutputStates() || stateAssignment.hasDownstreamInputStates()) { assignTaskStateToExecutionJobVertices(stateAssignment); } } }
@Test void testStateWithFullyFinishedOperators() throws JobException, JobExecutionException { List<OperatorID> operatorIds = buildOperatorIds(2); Map<OperatorID, OperatorState> states = buildOperatorStates(Collections.singletonList(operatorIds.get(1)), 3); // Create an operator state marked as finished OperatorState operatorState = new FullyFinishedOperatorState(operatorIds.get(0), 3, 256); states.put(operatorIds.get(0), operatorState); Map<OperatorID, ExecutionJobVertex> vertices = buildVertices(operatorIds, 2, RANGE, ROUND_ROBIN); new StateAssignmentOperation(0, new HashSet<>(vertices.values()), states, false) .assignStates(); // Check the job vertex with only finished operator. ExecutionJobVertex jobVertexWithFinishedOperator = vertices.get(operatorIds.get(0)); for (ExecutionVertex task : jobVertexWithFinishedOperator.getTaskVertices()) { JobManagerTaskRestore taskRestore = task.getCurrentExecutionAttempt().getTaskRestore(); assertThat(taskRestore.getTaskStateSnapshot().isTaskDeployedAsFinished()).isTrue(); } // Check the job vertex without finished operator. ExecutionJobVertex jobVertexWithoutFinishedOperator = vertices.get(operatorIds.get(1)); for (ExecutionVertex task : jobVertexWithoutFinishedOperator.getTaskVertices()) { JobManagerTaskRestore taskRestore = task.getCurrentExecutionAttempt().getTaskRestore(); assertThat(taskRestore.getTaskStateSnapshot().isTaskDeployedAsFinished()).isFalse(); } }
public void createUser(String addr, UserInfo userInfo, long millis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException { CreateUserRequestHeader requestHeader = new CreateUserRequestHeader(userInfo.getUsername()); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_CREATE_USER, requestHeader); request.setBody(RemotingSerializable.encode(userInfo)); RemotingCommand response = this.remotingClient.invokeSync(addr, request, millis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return; } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark()); }
@Test public void testCreateUser() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); mqClientAPI.createUser(defaultBrokerAddr, new UserInfo(), defaultTimeout); }
@Override public Collection<DatabasePacket> execute() { // TODO consider what severity and error code to use PostgreSQLErrorResponsePacket packet = PostgreSQLErrorResponsePacket.newBuilder(PostgreSQLMessageSeverityLevel.ERROR, PostgreSQLVendorError.FEATURE_NOT_SUPPORTED, PostgreSQLVendorError.FEATURE_NOT_SUPPORTED.getReason()).build(); return Collections.singleton(packet); }
@Test void assertExecute() { PostgreSQLUnsupportedCommandExecutor commandExecutor = new PostgreSQLUnsupportedCommandExecutor(); Collection<DatabasePacket> actual = commandExecutor.execute(); assertThat(actual.size(), is(1)); assertThat(actual.iterator().next(), instanceOf(PostgreSQLErrorResponsePacket.class)); }
@Override public Collection<String> getHeaderNames() { return stream.response().getHeaderNames(); }
@Test public void get_header_names() { underTest.getHeaderNames(); verify(response).getHeaderNames(); }
@Override @Nonnull public Version getClusterVersion() { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void getClusterVersion() { client().getCluster().getClusterVersion(); }
@Override public Object getInitialAggregatedValue(byte[] rawValue) { Sketch<IntegerSummary> initialValue = deserializeAggregatedValue(rawValue); Union tupleUnion = new Union<>(_nominalEntries, new IntegerSummarySetOperations(_mode, _mode)); tupleUnion.union(initialValue); return tupleUnion; }
@Test public void initialShouldParseASketch() { IntegerTupleSketchValueAggregator agg = new IntegerTupleSketchValueAggregator(IntegerSummary.Mode.Sum); assertEquals(toSketch(agg.getInitialAggregatedValue(sketchContaining("hello world", 1))).getEstimate(), 1.0); }
@Override public float get(int i, int j) { return A[index(i, j)]; }
@Test public void testGet() { System.out.println("get"); assertEquals(0.9f, matrix.get(0, 0), 1E-6f); assertEquals(0.8f, matrix.get(2, 2), 1E-6f); assertEquals(0.5f, matrix.get(1, 1), 1E-6f); assertEquals(0.0f, matrix.get(2, 0), 1E-6f); assertEquals(0.0f, matrix.get(0, 2), 1E-6f); assertEquals(0.4f, matrix.get(0, 1), 1E-6f); }
@Override public boolean hasViewPermission(CaseInsensitiveString username, UserRoleMatcher userRoleMatcher, boolean everyoneIsAllowedToViewIfNoAuthIsDefined) { return this.getAuthorizationPart().hasViewPermission(username, userRoleMatcher, everyoneIsAllowedToViewIfNoAuthIsDefined); }
@Test public void shouldUseDefaultPermissionsForViewPermissionIfAuthorizationIsNotDefined_When2ConfigParts() { BasicPipelineConfigs filePart = new BasicPipelineConfigs(); filePart.setOrigin(new FileConfigOrigin()); MergePipelineConfigs merge = new MergePipelineConfigs(new BasicPipelineConfigs(), filePart); assertThat(merge.hasViewPermission(new CaseInsensitiveString("anyone"), null, true), is(true)); assertThat(merge.hasViewPermission(new CaseInsensitiveString("anyone"), null, false), is(false)); }
public static HostAndPort toHostAndPort(NetworkEndpoint networkEndpoint) { switch (networkEndpoint.getType()) { case IP: return HostAndPort.fromHost(networkEndpoint.getIpAddress().getAddress()); case IP_PORT: return HostAndPort.fromParts( networkEndpoint.getIpAddress().getAddress(), networkEndpoint.getPort().getPortNumber()); case HOSTNAME: case IP_HOSTNAME: return HostAndPort.fromHost(networkEndpoint.getHostname().getName()); case HOSTNAME_PORT: case IP_HOSTNAME_PORT: return HostAndPort.fromParts( networkEndpoint.getHostname().getName(), networkEndpoint.getPort().getPortNumber()); case UNRECOGNIZED: case TYPE_UNSPECIFIED: throw new AssertionError("Type for NetworkEndpoint must be specified."); } throw new AssertionError( String.format( "Should never happen. Unchecked NetworkEndpoint type: %s", networkEndpoint.getType())); }
@Test public void toHostAndPort_withHostname_returnsHostWithHostname() { NetworkEndpoint hostnameEndpoint = NetworkEndpoint.newBuilder() .setType(NetworkEndpoint.Type.HOSTNAME) .setHostname(Hostname.newBuilder().setName("localhost")) .build(); assertThat(NetworkEndpointUtils.toHostAndPort(hostnameEndpoint)) .isEqualTo(HostAndPort.fromHost("localhost")); }
@Override public boolean retryRequest( HttpRequest request, IOException exception, int execCount, HttpContext context) { if (execCount > maxRetries) { // Do not retry if over max retries return false; } if (nonRetriableExceptions.contains(exception.getClass())) { return false; } else { for (Class<? extends IOException> rejectException : nonRetriableExceptions) { if (rejectException.isInstance(exception)) { return false; } } } if (request instanceof CancellableDependency && ((CancellableDependency) request).isCancelled()) { return false; } // Retry if the request is considered idempotent return Method.isIdempotent(request.getMethod()); }
@Test public void retryOnNonAbortedRequests() { HttpGet request = new HttpGet("/"); assertThat(retryStrategy.retryRequest(request, new IOException(), 1, null)).isTrue(); }
@Override public Authenticator create(KeycloakSession session) { logger.info("Trying to create {} via factory.", this.getClass().getSimpleName()); return SINGLETON; }
@Test public void testCreateMethod() { KeycloakSession session = mock(KeycloakSession.class); Authenticator authenticator = factory.create(session); assertTrue(authenticator instanceof IndividualEdsLoginAuthenticator, "The created Authenticator is not an instance of IndividualEdsLoginAuthenticator."); }