focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public boolean doNotLoadBalance() { return _doNotLoadBalance; }
@Test public void testDoNotLoadBalance() { boolean doNotLoadBalance = true; _trackerClient = new TrackerClientImpl(URI.create("uri"), new HashMap<>(), null, SystemClock.instance(), 1000, (test) -> false, false, false, doNotLoadBalance); Assert.assertEquals(_trackerClient.doNotLoadBalance(), doNotLoadBalance); doNotLoadBalance = false; _trackerClient = new TrackerClientImpl(URI.create("uri"), new HashMap<>(), null, SystemClock.instance(), 1000, (test) -> false, false, false, doNotLoadBalance); Assert.assertEquals(_trackerClient.doNotLoadBalance(), doNotLoadBalance); }
@Override public List<Object> handle(String targetName, List<Object> instances, RequestData requestData) { if (!shouldHandle(instances)) { return instances; } List<Object> result = getTargetInstancesByRules(targetName, instances); return super.handle(targetName, result, requestData); }
@Test public void testGetTargetInstancesByGlobalRules() { RuleInitializationUtils.initGlobalTagMatchRules(); List<Object> instances = new ArrayList<>(); ServiceInstance instance1 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0"); instances.add(instance1); ServiceInstance instance2 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.1"); instances.add(instance2); Map<String, String> metadata = new HashMap<>(); metadata.put("group", "red"); AppCache.INSTANCE.setMetadata(metadata); List<Object> targetInstances = tagRouteHandler.handle("foo", instances, new RequestData(null, null, null)); Assert.assertEquals(1, targetInstances.size()); Assert.assertEquals(instance2, targetInstances.get(0)); ConfigCache.getLabel(RouterConstant.SPRING_CACHE_NAME).resetGlobalRule(Collections.emptyList()); }
@Override public void deleteDictType(Long id) { // 校验是否存在 DictTypeDO dictType = validateDictTypeExists(id); // 校验是否有字典数据 if (dictDataService.getDictDataCountByDictType(dictType.getType()) > 0) { throw exception(DICT_TYPE_HAS_CHILDREN); } // 删除字典类型 dictTypeMapper.updateToDelete(id, LocalDateTime.now()); }
@Test public void testDeleteDictType_success() { // mock 数据 DictTypeDO dbDictType = randomDictTypeDO(); dictTypeMapper.insert(dbDictType);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbDictType.getId(); // 调用 dictTypeService.deleteDictType(id); // 校验数据不存在了 assertNull(dictTypeMapper.selectById(id)); }
static <T> T getWildcardMappedObject(final Map<String, T> mapping, final String query) { T value = mapping.get(query); if (value == null) { for (String key : mapping.keySet()) { // Turn the search key into a regex, using all characters but the * as a literal. String regex = Arrays.stream(key.split("\\*")) // split in parts that do not have a wildcard in them .map(Pattern::quote) // each part should be used as a literal (not as a regex or partial regex) .collect(Collectors.joining(".*")); // join all literal parts with a regex representation on the wildcard. if (key.endsWith("*")) { // the 'split' will have removed any trailing wildcard characters. Correct for that. regex += ".*"; } if (query.matches(regex)) { value = mapping.get(key); break; } } } return value; }
@Test public void testSubdirWildcardExtension() throws Exception { // Setup test fixture. final Map<String, Object> haystack = Map.of("myplugin/baz/*.jsp", new Object()); // Execute system under test. final Object result = PluginServlet.getWildcardMappedObject(haystack, "myplugin/baz/foo.jsp"); // Verify results. assertNotNull(result); }
public ModeConfiguration getModeConfiguration() { return null == modeConfig ? new ModeConfiguration("Standalone", null) : modeConfig; }
@Test void assertGetModeConfiguration() { ModeConfiguration modeConfig = new ModeConfiguration("Cluster", mock(PersistRepositoryConfiguration.class)); ContextManagerBuilderParameter param = new ContextManagerBuilderParameter(modeConfig, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyList(), new Properties(), null, null, false); assertThat(param.getModeConfiguration().getType(), is("Cluster")); assertNotNull(param.getModeConfiguration().getRepository()); }
@Override public String getSqlSelect(Table table) { List<Column> columns = table.getColumns(); StringBuilder sb = new StringBuilder("SELECT\n"); for (int i = 0; i < columns.size(); i++) { sb.append(" "); if (i > 0) { sb.append(","); } String columnComment = columns.get(i).getComment(); if (Asserts.isNotNullString(columnComment)) { if (columnComment.contains("'") || columnComment.contains("\"")) { columnComment = columnComment.replaceAll("[\"']", ""); } sb.append(String.format("`%s` -- %s \n", columns.get(i).getName(), columnComment)); } else { sb.append(String.format("`%s` \n", columns.get(i).getName())); } } if (Asserts.isNotNullString(table.getComment())) { sb.append(String.format(" FROM %s.%s; -- %s\n", table.getSchema(), table.getName(), table.getComment())); } else { sb.append(String.format(" FROM %s.%s;\n", table.getSchema(), table.getName())); } return sb.toString(); }
@Test void getSqlSelect() { SubAbstractDriver ad = new SubAbstractDriver(); String result = ad.getSqlSelect(table); assertThat( result, equalTo("SELECT\n `column1` -- comment abc \n" + " ,`column2` -- comment abc \n" + " ,`column3` -- comment abc \n" + " FROM SchemaOrigin.TableNameOrigin;\n")); }
@Override public double cost(Link link, ResourceContext context) { // explicitly call a method not depending on LinkResourceService return cost(link); }
@Test public void testCost() { sut = new LatencyConstraint(Duration.of(10, ChronoUnit.NANOS)); assertThat(sut.cost(link1, resourceContext), is(closeTo(Double.parseDouble(LATENCY1), 1.0e-6))); assertThat(sut.cost(link2, resourceContext), is(closeTo(Double.parseDouble(LATENCY2), 1.0e-6))); }
@Override public String getBucketId(IN element, BucketAssigner.Context context) { if (dateTimeFormatter == null) { dateTimeFormatter = DateTimeFormatter.ofPattern(formatString).withZone(zoneId); } return dateTimeFormatter.format(Instant.ofEpochMilli(context.currentProcessingTime())); }
@Test void testGetBucketPathWithSpecifiedFormatString() { DateTimeBucketAssigner bucketAssigner = new DateTimeBucketAssigner("yyyy-MM-dd-HH", ZoneId.of("America/Los_Angeles")); assertThat(bucketAssigner.getBucketId(null, mockedContext)).isEqualTo("2018-08-03-23"); }
@Override public <T_OTHER, OUT> ProcessConfigurableAndNonKeyedPartitionStream<OUT> connectAndProcess( KeyedPartitionStream<K, T_OTHER> other, TwoInputNonBroadcastStreamProcessFunction<V, T_OTHER, OUT> processFunction) { validateStates( processFunction.usesStates(), new HashSet<>( Collections.singletonList(StateDeclaration.RedistributionMode.IDENTICAL))); TypeInformation<OUT> outTypeInfo = StreamUtils.getOutputTypeForTwoInputNonBroadcastProcessFunction( processFunction, getType(), ((KeyedPartitionStreamImpl<K, T_OTHER>) other).getType()); KeyedTwoInputNonBroadcastProcessOperator<K, V, T_OTHER, OUT> processOperator = new KeyedTwoInputNonBroadcastProcessOperator<>(processFunction); Transformation<OUT> outTransformation = StreamUtils.getTwoInputTransformation( "Keyed-TwoInput-Process", this, (KeyedPartitionStreamImpl<K, T_OTHER>) other, outTypeInfo, processOperator); environment.addOperator(outTransformation); return StreamUtils.wrapWithConfigureHandle( new NonKeyedPartitionStreamImpl<>(environment, outTransformation)); }
@Test void testStateErrorWithConnectKeyedStream() throws Exception { ExecutionEnvironmentImpl env = StreamTestUtils.getEnv(); KeyedPartitionStream<Integer, Integer> stream = createKeyedStream(env); assertThatThrownBy( () -> stream.connectAndProcess( createKeyedStream( env, new TestingTransformation<>("t2", Types.LONG, 1), (KeySelector<Long, Integer>) Math::toIntExact), new StreamTestUtils .NoOpTwoInputNonBroadcastStreamProcessFunction( new HashSet<>( Collections.singletonList( modeIdenticalStateDeclaration))))) .isInstanceOf(IllegalRedistributionModeException.class); }
@Override public Set<String> toStrings(Set<SystemScope> scope) { if (scope == null) { return null; } else { return new LinkedHashSet<>(Collections2.filter(Collections2.transform(scope, systemScopeToString), Predicates.notNull())); } }
@Test public void toStrings() { // check null condition assertThat(service.toStrings(null), is(nullValue())); assertThat(service.toStrings(allScopes), equalTo(allScopeStrings)); assertThat(service.toStrings(allScopesWithValue), equalTo(allScopeStringsWithValue)); }
public static String processingLogStreamCreateStatement( final ProcessingLogConfig config, final KsqlConfig ksqlConfig ) { return processingLogStreamCreateStatement( config.getString(ProcessingLogConfig.STREAM_NAME), getTopicName(config, ksqlConfig) ); }
@Test public void shouldBuildCorrectStreamCreateDDLWithDefaultTopicName() { // Given: serviceContext.getTopicClient().createTopic(DEFAULT_TOPIC, 1, (short) 1); // When: final String statement = ProcessingLogServerUtils.processingLogStreamCreateStatement( new ProcessingLogConfig( ImmutableMap.of( ProcessingLogConfig.STREAM_AUTO_CREATE, true, ProcessingLogConfig.STREAM_NAME, STREAM ) ), ksqlConfig); // Then: assertThat(statement, containsString("KAFKA_TOPIC='ksql_cluster.ksql_processing_log'")); }
@Override public void upgrade() { // Only run this migration once. if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already completed."); return; } final IndexSetConfig indexSetConfig = findDefaultIndexSet(); final ImmutableSet.Builder<String> completedStreamIds = ImmutableSet.builder(); final ImmutableSet.Builder<String> failedStreamIds = ImmutableSet.builder(); // Assign the "default index set" to all existing streams. Until now, there was no way to manually create // index sets, so the only one that exists is the "default" one created by an earlier migration. for (Stream stream : streamService.loadAll()) { if (isNullOrEmpty(stream.getIndexSetId())) { LOG.info("Assigning index set <{}> ({}) to stream <{}> ({})", indexSetConfig.id(), indexSetConfig.title(), stream.getId(), stream.getTitle()); stream.setIndexSetId(indexSetConfig.id()); try { streamService.save(stream); completedStreamIds.add(stream.getId()); } catch (ValidationException e) { LOG.error("Unable to save stream <{}>", stream.getId(), e); failedStreamIds.add(stream.getId()); } } } // Mark this migration as done. clusterConfigService.write(MigrationCompleted.create(indexSetConfig.id(), completedStreamIds.build(), failedStreamIds.build())); }
@Test public void upgradeWhenAlreadyCompleted() throws Exception { final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class); when(indexSetService.findAll()).thenReturn(Collections.singletonList(indexSetConfig)); when(indexSetConfig.id()).thenReturn("abc123"); when(clusterConfigService.get(V20161122174500_AssignIndexSetsToStreamsMigration.MigrationCompleted.class)) .thenReturn(V20161122174500_AssignIndexSetsToStreamsMigration.MigrationCompleted.create("1", Collections.emptySet(), Collections.emptySet())); migration.upgrade(); verify(streamService, never()).save(any(Stream.class)); verify(clusterConfigService, never()).write(any(V20161122174500_AssignIndexSetsToStreamsMigration.MigrationCompleted.class)); }
public static <T> PrefetchableIterable<T> fromArray(T... values) { if (values.length == 0) { return emptyIterable(); } return new Default<T>() { @Override public PrefetchableIterator<T> createIterator() { return PrefetchableIterators.fromArray(values); } }; }
@Test public void testDefaultPrefetch() { PrefetchableIterable<String> iterable = new Default<String>() { @Override protected PrefetchableIterator<String> createIterator() { return new ReadyAfterPrefetchUntilNext<>( PrefetchableIterators.fromArray("A", "B", "C")); } }; assertFalse(iterable.iterator().isReady()); iterable.prefetch(); assertTrue(iterable.iterator().isReady()); }
public StreamDestinationFilterRuleDTO createForStream(String streamId, StreamDestinationFilterRuleDTO dto) { if (!isBlank(dto.id())) { throw new IllegalArgumentException("id must be blank"); } // We don't want to allow the creation of a filter rule for a different stream, so we enforce the stream ID. final var dtoId = insertedId(collection.insertOne(dto.withStream(streamId))); clusterEventBus.post(StreamDestinationFilterUpdatedEvent.of(dtoId.toHexString())); return utils.getById(dtoId) .orElseThrow(() -> new IllegalArgumentException(f("Couldn't insert document: %s", dto))); }
@Test void createForStream() { final var result = service.createForStream("stream-1", StreamDestinationFilterRuleDTO.builder() .title("Test") .description("A Test") .streamId("stream-1") .destinationType("indexer") .status(StreamDestinationFilterRuleDTO.Status.DISABLED) .rule(RuleBuilder.builder() .operator(RuleBuilderStep.Operator.AND) .conditions(List.of( RuleBuilderStep.builder() .function("has_field") .parameters(Map.of("field", "is_debug")) .build() )) .build()) .build()); assertThat(result.id()).isNotBlank(); assertThat(result.title()).isEqualTo("Test"); assertThat(result.description()).get().isEqualTo("A Test"); assertThat(result.streamId()).isEqualTo("stream-1"); assertThat(result.destinationType()).isEqualTo("indexer"); assertThat(result.status()).isEqualTo(StreamDestinationFilterRuleDTO.Status.DISABLED); assertThat(result.rule()).satisfies(rule -> { assertThat(rule.operator()).isEqualTo(RuleBuilderStep.Operator.AND); assertThat(rule.conditions()).hasSize(1); }); }
public static boolean canDrop( FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) { Objects.requireNonNull(pred, "pred cannnot be null"); Objects.requireNonNull(columns, "columns cannnot be null"); return pred.accept(new DictionaryFilter(columns, dictionaries)); }
@Test public void testEqInt96() throws Exception { BinaryColumn b = binaryColumn("int96_field"); // INT96 ordering is undefined => no filtering shall be done assertFalse("Should not drop block for -2", canDrop(eq(b, toBinary("-2", 12)), ccmd, dictionaries)); assertFalse("Should not drop block for -1", canDrop(eq(b, toBinary("-1", 12)), ccmd, dictionaries)); assertFalse("Should not drop block for null", canDrop(eq(b, null), ccmd, dictionaries)); }
@Override public Mono<ServerResponse> handle(ServerRequest request) { String name = request.pathVariable("name"); return request.bodyToMono(Unstructured.class) .filter(unstructured -> unstructured.getMetadata() != null && StringUtils.hasText(unstructured.getMetadata().getName()) && Objects.equals(unstructured.getMetadata().getName(), name)) .switchIfEmpty(Mono.error(() -> new ServerWebInputException( "Cannot read body to " + scheme.groupVersionKind()))) .flatMap(client::update) .flatMap(updated -> ServerResponse .ok() .contentType(MediaType.APPLICATION_JSON) .bodyValue(updated)); }
@Test void shouldHandleCorrectly() { final var fake = new FakeExtension(); var metadata = new Metadata(); metadata.setName("my-fake"); fake.setMetadata(metadata); var unstructured = new Unstructured(); unstructured.setMetadata(metadata); unstructured.setApiVersion("fake.halo.run/v1alpha1"); unstructured.setKind("Fake"); var serverRequest = MockServerRequest.builder() .pathVariable("name", "my-fake") .body(Mono.just(unstructured)); // when(client.fetch(eq(FakeExtension.class), eq("my-fake"))).thenReturn(Mono.just(fake)); when(client.update(eq(unstructured))).thenReturn(Mono.just(unstructured)); var scheme = Scheme.buildFromType(FakeExtension.class); var updateHandler = new ExtensionUpdateHandler(scheme, client); var responseMono = updateHandler.handle(serverRequest); StepVerifier.create(responseMono) .assertNext(response -> { assertEquals(HttpStatus.OK, response.statusCode()); assertEquals(MediaType.APPLICATION_JSON, response.headers().getContentType()); assertTrue(response instanceof EntityResponse<?>); assertEquals(unstructured, ((EntityResponse<?>) response).entity()); }) .verifyComplete(); // verify(client, times(1)).fetch(eq(FakeExtension.class), eq("my-fake")); verify(client, times(1)).update(eq(unstructured)); }
@Override public void start(long checkpointId, CheckpointOptions checkpointOptions) { LOG.debug("{} starting checkpoint {} ({})", taskName, checkpointId, checkpointOptions); ChannelStateWriteResult result = new ChannelStateWriteResult(); ChannelStateWriteResult put = results.computeIfAbsent( checkpointId, id -> { Preconditions.checkState( results.size() < maxCheckpoints, String.format( "%s can't start %d, results.size() > maxCheckpoints: %d > %d", taskName, checkpointId, results.size(), maxCheckpoints)); enqueue( new CheckpointStartRequest( jobVertexID, subtaskIndex, checkpointId, result, checkpointOptions.getTargetLocation()), false); return result; }); Preconditions.checkArgument( put == result, taskName + " result future already present for checkpoint " + checkpointId); }
@Test void testLimit() throws IOException { int maxCheckpoints = 3; try (ChannelStateWriterImpl writer = new ChannelStateWriterImpl( JOB_VERTEX_ID, TASK_NAME, SUBTASK_INDEX, () -> CHECKPOINT_STORAGE.createCheckpointStorage(JOB_ID), maxCheckpoints, new ChannelStateWriteRequestExecutorFactory(JOB_ID), 5)) { for (int i = 0; i < maxCheckpoints; i++) { writer.start(i, CheckpointOptions.forCheckpointWithDefaultLocation()); } assertThatThrownBy( () -> writer.start( maxCheckpoints, CheckpointOptions.forCheckpointWithDefaultLocation())) .isInstanceOf(IllegalStateException.class); } }
@SuppressWarnings({"SimplifyBooleanReturn"}) public static Map<String, ParamDefinition> cleanupParams(Map<String, ParamDefinition> params) { if (params == null || params.isEmpty()) { return params; } Map<String, ParamDefinition> mapped = params.entrySet().stream() .collect( MapHelper.toListMap( Map.Entry::getKey, p -> { ParamDefinition param = p.getValue(); if (param.getType() == ParamType.MAP) { MapParamDefinition mapParamDef = param.asMapParamDef(); if (mapParamDef.getValue() == null && (mapParamDef.getInternalMode() == InternalParamMode.OPTIONAL)) { return mapParamDef; } return MapParamDefinition.builder() .name(mapParamDef.getName()) .value(cleanupParams(mapParamDef.getValue())) .expression(mapParamDef.getExpression()) .name(mapParamDef.getName()) .validator(mapParamDef.getValidator()) .tags(mapParamDef.getTags()) .mode(mapParamDef.getMode()) .meta(mapParamDef.getMeta()) .build(); } else { return param; } })); Map<String, ParamDefinition> filtered = mapped.entrySet().stream() .filter( p -> { ParamDefinition param = p.getValue(); if (param.getInternalMode() == InternalParamMode.OPTIONAL) { if (param.getValue() == null && param.getExpression() == null) { return false; } else if (param.getType() == ParamType.MAP && param.asMapParamDef().getValue() != null && param.asMapParamDef().getValue().isEmpty()) { return false; } else { return true; } } else { Checks.checkTrue( param.getValue() != null || param.getExpression() != null, String.format( "[%s] is a required parameter (type=[%s])", p.getKey(), param.getType())); return true; } }) .collect(MapHelper.toListMap(Map.Entry::getKey, Map.Entry::getValue)); return cleanIntermediateMetadata(filtered); }
@Test public void testCleanupOptionalEmptyNestedMap() throws JsonProcessingException { Map<String, ParamDefinition> allParams = parseParamDefMap( "{'map': {'type': 'MAP','value': {'nested': {'type': 'MAP','value': {}, 'internal_mode': 'OPTIONAL'}}, 'internal_mode': 'OPTIONAL'}}"); Map<String, ParamDefinition> cleanedParams = ParamsMergeHelper.cleanupParams(allParams); assertEquals(0, cleanedParams.size()); }
public V remove(final K key) { V oldValue = cacheMap.remove(key); if (oldValue != null) { LOG.debug("Removed cache entry for '{}'", key); } return oldValue; }
@Test public void testRemove() { LruCache<String, SimpleValue> cache = new LruCache<String, SimpleValue>(1); SimpleValue value = new SimpleValue(true, true); cache.put(DEFAULT_KEY, value); assertEquals(1, cache.size()); assertEquals(value, cache.getCurrentValue(DEFAULT_KEY)); // remove the only value assertEquals(value, cache.remove(DEFAULT_KEY)); assertNull(cache.getCurrentValue(DEFAULT_KEY)); assertEquals(0, cache.size()); }
@Deprecated public static MessageType convert(StructType struct, FieldProjectionFilter filter) { return convert(struct, filter, true, new Configuration()); }
@Test public void testConvertLogicalBinaryType() { LogicalTypeAnnotation jsonLogicalType = LogicalTypeAnnotation.jsonType(); String fieldName = "logicalBinaryType"; Short fieldId = 0; ThriftType.StringType jsonBinaryType = new ThriftType.StringType(); jsonBinaryType.setBinary(true); jsonBinaryType.setLogicalTypeAnnotation(jsonLogicalType); StructType thriftStruct = buildOneFieldThriftStructType(fieldName, fieldId, jsonBinaryType); MessageType actual = ThriftSchemaConvertVisitor.convert(thriftStruct, FieldProjectionFilter.ALL_COLUMNS); Type expectedParquetField = Types.primitive(PrimitiveTypeName.BINARY, Repetition.REQUIRED) .as(jsonLogicalType) .named(fieldName) .withId(fieldId); MessageType expected = buildOneFieldParquetMessage(expectedParquetField); assertEquals(expected, actual); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void retryOnResultFailAfterMaxAttemptsUsingFlowable() throws InterruptedException { RetryConfig config = RetryConfig.<String>custom() .retryOnResult("retry"::equals) .waitDuration(Duration.ofMillis(50)) .maxAttempts(3).build(); Retry retry = Retry.of("testName", config); given(helloWorldService.returnHelloWorld()) .willReturn("retry"); Flowable.fromCallable(helloWorldService::returnHelloWorld) .compose(RetryTransformer.of(retry)) .test() .await() .assertValueCount(1) .assertValue("retry") .assertComplete() .assertSubscribed(); then(helloWorldService).should(times(3)).returnHelloWorld(); }
public EndpointResponse isValidProperty(final String property) { try { final Map<String, Object> properties = new HashMap<>(); properties.put(property, ""); denyListPropertyValidator.validateAll(properties); final KsqlConfigResolver resolver = new KsqlConfigResolver(); final Optional<ConfigItem> resolvedItem = resolver.resolve(property, false); if (ksqlEngine.getKsqlConfig().getBoolean(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED) && resolvedItem.isPresent()) { if (!PropertiesList.QueryLevelProperties.contains(resolvedItem.get().getPropertyName())) { throw new KsqlException(String.format("When shared runtimes are enabled, the" + " config %s can only be set for the entire cluster and all queries currently" + " running in it, and not configurable for individual queries." + " Please use ALTER SYSTEM to change this config for all queries.", properties)); } } return EndpointResponse.ok(true); } catch (final KsqlException e) { LOG.info("Processed unsuccessfully, reason: ", e); return errorHandler.generateResponse(e, Errors.badRequest(e)); } catch (final Exception e) { LOG.info("Processed unsuccessfully, reason: ", e); throw e; } }
@Test public void shouldReturnBadRequestWhenIsValidatorIsCalledWithNonQueryLevelProps() { final Map<String, Object> properties = new HashMap<>(); properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, ""); givenKsqlConfigWith(ImmutableMap.of( KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED, true )); // When: final EndpointResponse response = ksqlResource.isValidProperty("ksql.service.id"); // Then: assertThat(response.getStatus(), equalTo(400)); }
static void copy(InputStream is, OutputStream os) throws IOException { byte[] buf = new byte[4096]; int b; while ((b = is.read(buf)) != -1) os.write(buf, 0, b); }
@Test public void testCopyError() throws IOException { InputStream mockedIn = mock(InputStream.class); OutputStream out = new ByteArrayOutputStream(); when(mockedIn.read(any(byte[].class))).thenThrow(new IOException()); assertThrows(IOException.class, () -> HttpAccessTokenRetriever.copy(mockedIn, out)); }
@Override public Comparison compare(final Path.Type type, final PathAttributes local, final PathAttributes remote) { if(Checksum.NONE == remote.getChecksum()) { log.warn(String.format("No remote checksum available for comparison %s", remote)); return Comparison.unknown; } if(Checksum.NONE == local.getChecksum()) { log.warn(String.format("No local checksum available for comparison %s", local)); return Comparison.unknown; } if(remote.getChecksum().equals(local.getChecksum())) { if(log.isDebugEnabled()) { log.debug(String.format("Equal checksum %s", remote.getChecksum())); } return Comparison.equal; } if(log.isDebugEnabled()) { log.debug(String.format("Local checksum %s not equal remote %s", local.getChecksum(), remote.getChecksum())); } return Comparison.notequal; }
@Test public void testCompare() { ComparisonService s = new ChecksumComparisonService(); assertEquals(Comparison.equal, s.compare(Path.Type.file, new PathAttributes() { @Override public Checksum getChecksum() { return new Checksum(HashAlgorithm.md5, "a"); } }, new PathAttributes().withChecksum(new Checksum(HashAlgorithm.md5, "a")) )); assertEquals(Comparison.notequal, s.compare(Path.Type.file, new PathAttributes() { @Override public Checksum getChecksum() { return new Checksum(HashAlgorithm.md5, "b"); } }, new PathAttributes().withChecksum(new Checksum(HashAlgorithm.md5, "a")))); }
public static ColumnDataType convertToColumnDataType(RelDataType relDataType) { SqlTypeName sqlTypeName = relDataType.getSqlTypeName(); if (sqlTypeName == SqlTypeName.NULL) { return ColumnDataType.UNKNOWN; } boolean isArray = (sqlTypeName == SqlTypeName.ARRAY); if (isArray) { assert relDataType.getComponentType() != null; sqlTypeName = relDataType.getComponentType().getSqlTypeName(); } switch (sqlTypeName) { case BOOLEAN: return isArray ? ColumnDataType.BOOLEAN_ARRAY : ColumnDataType.BOOLEAN; case TINYINT: case SMALLINT: case INTEGER: return isArray ? ColumnDataType.INT_ARRAY : ColumnDataType.INT; case BIGINT: return isArray ? ColumnDataType.LONG_ARRAY : ColumnDataType.LONG; case DECIMAL: return resolveDecimal(relDataType, isArray); case FLOAT: case REAL: return isArray ? ColumnDataType.FLOAT_ARRAY : ColumnDataType.FLOAT; case DOUBLE: return isArray ? ColumnDataType.DOUBLE_ARRAY : ColumnDataType.DOUBLE; case DATE: case TIME: case TIMESTAMP: return isArray ? ColumnDataType.TIMESTAMP_ARRAY : ColumnDataType.TIMESTAMP; case CHAR: case VARCHAR: return isArray ? ColumnDataType.STRING_ARRAY : ColumnDataType.STRING; case BINARY: case VARBINARY: return isArray ? ColumnDataType.BYTES_ARRAY : ColumnDataType.BYTES; case OTHER: case ANY: return ColumnDataType.OBJECT; default: if (relDataType.getComponentType() != null) { throw new IllegalArgumentException("Unsupported collection type: " + relDataType); } LOGGER.warn("Unexpected SQL type: {}, use OBJECT instead", sqlTypeName); return ColumnDataType.OBJECT; } }
@Test public void testBigDecimal() { Assert.assertEquals(RelToPlanNodeConverter.convertToColumnDataType( new BasicSqlType(RelDataTypeSystem.DEFAULT, SqlTypeName.DECIMAL, 10)), DataSchema.ColumnDataType.INT); Assert.assertEquals(RelToPlanNodeConverter.convertToColumnDataType( new BasicSqlType(RelDataTypeSystem.DEFAULT, SqlTypeName.DECIMAL, 38)), DataSchema.ColumnDataType.LONG); Assert.assertEquals(RelToPlanNodeConverter.convertToColumnDataType( new BasicSqlType(RelDataTypeSystem.DEFAULT, SqlTypeName.DECIMAL, 39)), DataSchema.ColumnDataType.BIG_DECIMAL); Assert.assertEquals(RelToPlanNodeConverter.convertToColumnDataType( new BasicSqlType(RelDataTypeSystem.DEFAULT, SqlTypeName.DECIMAL, 14, 10)), DataSchema.ColumnDataType.DOUBLE); Assert.assertEquals(RelToPlanNodeConverter.convertToColumnDataType( new BasicSqlType(RelDataTypeSystem.DEFAULT, SqlTypeName.DECIMAL, 30, 10)), DataSchema.ColumnDataType.DOUBLE); Assert.assertEquals(RelToPlanNodeConverter.convertToColumnDataType( new BasicSqlType(RelDataTypeSystem.DEFAULT, SqlTypeName.DECIMAL, 31, 10)), DataSchema.ColumnDataType.BIG_DECIMAL); }
public Optional<Violation> validate(IndexSetConfig newConfig) { // Don't validate prefix conflicts in case of an update if (Strings.isNullOrEmpty(newConfig.id())) { final Violation prefixViolation = validatePrefix(newConfig); if (prefixViolation != null) { return Optional.of(prefixViolation); } } final Violation fieldMappingViolation = validateMappingChangesAreLegal(newConfig); if (fieldMappingViolation != null) { return Optional.of(fieldMappingViolation); } Violation refreshIntervalViolation = validateSimpleIndexSetConfig(newConfig); if (refreshIntervalViolation != null){ return Optional.of(refreshIntervalViolation); } return Optional.empty(); }
@Test public void validateWithInvalidFieldTypeRefreshInterval() { final Duration fieldTypeRefreshInterval = Duration.millis(999); final IndexSetConfig newConfig = mock(IndexSetConfig.class); final IndexSet indexSet = mock(IndexSet.class); when(indexSetRegistry.iterator()).thenReturn(Collections.singleton(indexSet).iterator()); when(indexSet.getIndexPrefix()).thenReturn("foo"); when(newConfig.indexPrefix()).thenReturn("graylog_index"); when(newConfig.fieldTypeRefreshInterval()).thenReturn(fieldTypeRefreshInterval); final Optional<IndexSetValidator.Violation> violation = validator.validate(newConfig); assertThat(violation).isPresent(); }
@Override public ByteBuf writeBytes(byte[] src, int srcIndex, int length) { ensureWritable(length); setBytes(writerIndex, src, srcIndex, length); writerIndex += length; return this; }
@Test public void testSliceReadGatheringByteChannelMultipleThreads() throws Exception { final byte[] bytes = new byte[8]; random.nextBytes(bytes); final ByteBuf buffer = newBuffer(8); buffer.writeBytes(bytes); try { testReadGatheringByteChannelMultipleThreads(buffer, bytes, true); } finally { buffer.release(); } }
public static double toFixed(double value, int precision) { return BigDecimal.valueOf(value).setScale(precision, RoundingMode.HALF_UP).doubleValue(); }
@Test public void toFixedFloat() { float actualF = TbUtils.toFixed(floatVal, 3); Assertions.assertEquals(1, Float.compare(floatVal, actualF)); Assertions.assertEquals(0, Float.compare(29.298f, actualF)); }
public static <T> CompletableFuture<T> firstOf(List<CompletableFuture<T>> futures) { class Combiner { final Object monitor = new Object(); final CompletableFuture<T> combined = new CompletableFuture<>(); final int futuresCount; Throwable error = null; int exceptionCount = 0; Combiner(int futuresCount) { this.futuresCount = futuresCount; } void onCompletion(T value, Throwable error) { if (combined.isDone()) return; T valueToComplete = null; Throwable exceptionToComplete = null; synchronized (monitor) { if (value != null) { valueToComplete = value; } else { if (this.error == null) { this.error = error; } else { this.error.addSuppressed(error); } if (++exceptionCount == futuresCount) { exceptionToComplete = this.error; } } } if (valueToComplete != null) { combined.complete(value); } else if (exceptionToComplete != null) { combined.completeExceptionally(exceptionToComplete); } } } int size = futures.size(); if (size == 0) throw new IllegalArgumentException(); if (size == 1) return futures.get(0); Combiner combiner = new Combiner(size); futures.forEach(future -> future.whenComplete(combiner::onCompletion)); return combiner.combined; }
@Test public void firstof_completes_if_any_futures_completes() { CompletableFuture<String> f1 = new CompletableFuture<>(); CompletableFuture<String> f2 = new CompletableFuture<>(); CompletableFuture<String> f3 = new CompletableFuture<>(); CompletableFuture<String> result = CompletableFutures.firstOf(List.of(f1, f2, f3)); f1.completeExceptionally(new Throwable("t1")); f2.completeExceptionally(new Throwable("t2")); f3.complete("success"); assertTrue(result.isDone()); assertFalse(result.isCompletedExceptionally()); assertEquals("success", result.join()); }
public String toStringNormal() { return String.format("%04d-%02d-%02d", this.year, isLeapMonth() ? this.month - 1 : this.month, this.day); }
@Test public void toStringNormalTest(){ ChineseDate date = new ChineseDate(DateUtil.parseDate("2020-03-1")); assertEquals("2020-02-08", date.toStringNormal()); }
@Override public void stop() throws Exception { factory.close(); dataSource.stop(); }
@Test void stopsTheDataSourceOnStopping() throws Exception { manager.stop(); verify(dataSource).stop(); }
@Override public WebSocketClientExtension handshakeExtension(WebSocketExtensionData extensionData) { if (!PERMESSAGE_DEFLATE_EXTENSION.equals(extensionData.name())) { return null; } boolean succeed = true; int clientWindowSize = MAX_WINDOW_SIZE; int serverWindowSize = MAX_WINDOW_SIZE; boolean serverNoContext = false; boolean clientNoContext = false; Iterator<Entry<String, String>> parametersIterator = extensionData.parameters().entrySet().iterator(); while (succeed && parametersIterator.hasNext()) { Entry<String, String> parameter = parametersIterator.next(); if (CLIENT_MAX_WINDOW.equalsIgnoreCase(parameter.getKey())) { // allowed client_window_size_bits if (allowClientWindowSize) { clientWindowSize = Integer.parseInt(parameter.getValue()); if (clientWindowSize > MAX_WINDOW_SIZE || clientWindowSize < MIN_WINDOW_SIZE) { succeed = false; } } else { succeed = false; } } else if (SERVER_MAX_WINDOW.equalsIgnoreCase(parameter.getKey())) { // acknowledged server_window_size_bits serverWindowSize = Integer.parseInt(parameter.getValue()); if (serverWindowSize > MAX_WINDOW_SIZE || serverWindowSize < MIN_WINDOW_SIZE) { succeed = false; } } else if (CLIENT_NO_CONTEXT.equalsIgnoreCase(parameter.getKey())) { // allowed client_no_context_takeover if (allowClientNoContext) { clientNoContext = true; } else { succeed = false; } } else if (SERVER_NO_CONTEXT.equalsIgnoreCase(parameter.getKey())) { // acknowledged server_no_context_takeover serverNoContext = true; } else { // unknown parameter succeed = false; } } if ((requestedServerNoContext && !serverNoContext) || requestedServerWindowSize < serverWindowSize) { succeed = false; } if (succeed) { return new PermessageDeflateExtension(serverNoContext, serverWindowSize, clientNoContext, clientWindowSize, extensionFilterProvider); } else { return null; } }
@Test public void testDecoderNoClientContext() { PerMessageDeflateClientExtensionHandshaker handshaker = new PerMessageDeflateClientExtensionHandshaker(6, true, MAX_WINDOW_SIZE, true, false); byte[] firstPayload = new byte[] { 76, -50, -53, 10, -62, 48, 20, 4, -48, 95, 41, 89, -37, 36, 77, 90, 31, -39, 41, -72, 112, 33, -120, 20, 20, 119, -79, 70, 123, -95, 121, -48, 92, -116, 80, -6, -17, -58, -99, -37, -31, 12, 51, 19, 1, -9, -12, 68, -111, -117, 25, 58, 111, 77, -127, -66, -64, -34, 20, 59, -64, -29, -2, 90, -100, -115, 30, 16, 114, -68, 61, 29, 40, 89, -112, -73, 25, 35, 120, -105, -67, -32, -43, -70, -84, 120, -55, 69, 43, -124, 106, -92, 18, -110, 114, -50, 111, 25, -3, 10, 17, -75, 13, 127, -84, 106, 90, -66, 84, -75, 84, 53, -89, -75, 92, -3, -40, -61, 119, 49, -117, 30, 49, 68, -59, 88, 74, -119, -34, 1, -83, -7, -48, 124, -124, -23, 16, 88, -118, 121, 54, -53, 1, 44, 32, 81, 19, 25, -115, -43, -32, -64, -67, -120, -110, -101, 121, -2, 2 }; byte[] secondPayload = new byte[] { -86, 86, 42, 46, 77, 78, 78, 45, 6, 26, 83, 82, 84, -102, -86, 3, -28, 38, 21, 39, 23, 101, 38, -91, 2, -51, -51, 47, 74, 73, 45, 114, -54, -49, -49, -10, 49, -78, -118, 112, 10, 9, 13, 118, 1, -102, 84, -108, 90, 88, 10, 116, 27, -56, -84, 124, -112, -13, 16, 26, 116, -108, 18, -117, -46, -127, 6, 69, 99, -45, 24, 91, 91, 11, 0 }; Map<String, String> parameters = Collections.singletonMap(CLIENT_NO_CONTEXT, null); WebSocketClientExtension extension = handshaker.handshakeExtension( new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, parameters)); assertNotNull(extension); EmbeddedChannel decoderChannel = new EmbeddedChannel(extension.newExtensionDecoder()); assertTrue( decoderChannel.writeInbound(new TextWebSocketFrame(true, RSV1, Unpooled.copiedBuffer(firstPayload)))); TextWebSocketFrame firstFrameDecompressed = decoderChannel.readInbound(); assertTrue( decoderChannel.writeInbound(new TextWebSocketFrame(true, RSV1, Unpooled.copiedBuffer(secondPayload)))); TextWebSocketFrame secondFrameDecompressed = decoderChannel.readInbound(); assertNotNull(firstFrameDecompressed); assertNotNull(firstFrameDecompressed.content()); assertTrue(firstFrameDecompressed instanceof TextWebSocketFrame); assertEquals(firstFrameDecompressed.text(), "{\"info\":\"Welcome to the BitMEX Realtime API.\",\"version\"" + ":\"2018-10-02T22:53:23.000Z\",\"timestamp\":\"2018-10-15T06:43:40.437Z\"," + "\"docs\":\"https://www.bitmex.com/app/wsAPI\",\"limit\":{\"remaining\":39}}"); assertTrue(firstFrameDecompressed.release()); assertNotNull(secondFrameDecompressed); assertNotNull(secondFrameDecompressed.content()); assertTrue(secondFrameDecompressed instanceof TextWebSocketFrame); assertEquals(secondFrameDecompressed.text(), "{\"success\":true,\"subscribe\":\"orderBookL2:XBTUSD\"," + "\"request\":{\"op\":\"subscribe\",\"args\":[\"orderBookL2:XBTUSD\"]}}"); assertTrue(secondFrameDecompressed.release()); assertFalse(decoderChannel.finish()); }
@Override public void setRootResources(final Map<String, ResourceModel> rootResources) { log.debug("Setting root resources"); _rootResources = rootResources; Collection<Class<?>> allResourceClasses = new HashSet<>(); for (ResourceModel resourceModel : _rootResources.values()) { processChildResource(resourceModel, allResourceClasses); } _jsr330Adapter = new Jsr330Adapter(allResourceClasses, _beanProvider); }
@Test public void testMissingNamedDependency() { Map<String, ResourceModel> pathRootResourceMap = buildResourceModels(SomeResource1.class); BeanProvider ctx = EasyMock.createMock(BeanProvider.class); EasyMock.expect(ctx.getBean(EasyMock.eq("dep1"))).andReturn(null).anyTimes(); EasyMock.expect(ctx.getBean(EasyMock.eq("dep3"))).andReturn(new SomeDependency1()).anyTimes(); Map<String, SomeDependency2> map = new HashMap<>(); map.put("someBeanName", new SomeDependency2()); EasyMock.expect(ctx.getBeansOfType(EasyMock.eq(SomeDependency2.class))) .andReturn(map).anyTimes(); EasyMock.replay(ctx); InjectResourceFactory factory = new InjectResourceFactory(ctx); try { factory.setRootResources(pathRootResourceMap); fail("Expected unresolvable bean exception"); } catch (RestLiInternalException e) { assertTrue(e.getMessage().startsWith("Expected to find")); } EasyMock.verify(ctx); }
@Override public GetDataStream getDataStream() { return windmillStreamFactory.createGetDataStream( dispatcherClient.getWindmillServiceStub(), throttleTimers.getDataThrottleTimer()); }
@Test @SuppressWarnings("FutureReturnValueIgnored") public void testStreamingGetData() throws Exception { // This server responds to GetDataRequests with responses that mirror the requests. serviceRegistry.addService( new CloudWindmillServiceV1Alpha1ImplBase() { @Override public StreamObserver<StreamingGetDataRequest> getDataStream( StreamObserver<StreamingGetDataResponse> responseObserver) { return new StreamObserver<StreamingGetDataRequest>() { final HashSet<Long> seenIds = new HashSet<>(); final ResponseErrorInjector injector = new ResponseErrorInjector(responseObserver); final StreamingGetDataResponse.Builder responseBuilder = StreamingGetDataResponse.newBuilder(); boolean sawHeader = false; @Override public void onNext(StreamingGetDataRequest chunk) { maybeInjectError(responseObserver); try { if (!sawHeader) { LOG.info("Received header"); errorCollector.checkThat( chunk.getHeader(), Matchers.equalTo( JobHeader.newBuilder() .setJobId("job") .setProjectId("project") .setWorkerId("worker") .setClientId(clientId) .build())); sawHeader = true; } else { LOG.info( "Received get data of {} global data, {} data requests", chunk.getGlobalDataRequestCount(), chunk.getStateRequestCount()); errorCollector.checkThat( chunk.getSerializedSize(), Matchers.lessThanOrEqualTo(STREAM_CHUNK_SIZE)); int i = 0; for (GlobalDataRequest request : chunk.getGlobalDataRequestList()) { long requestId = chunk.getRequestId(i++); errorCollector.checkThat(seenIds.add(requestId), Matchers.is(true)); sendResponse(requestId, processGlobalDataRequest(request)); } for (ComputationGetDataRequest request : chunk.getStateRequestList()) { long requestId = chunk.getRequestId(i++); errorCollector.checkThat(seenIds.add(requestId), Matchers.is(true)); sendResponse(requestId, processStateRequest(request)); } flushResponse(); } } catch (Exception e) { errorCollector.addError(e); } } @Override public void onError(Throwable throwable) {} @Override public void onCompleted() { injector.cancel(); responseObserver.onCompleted(); } private ByteString processGlobalDataRequest(GlobalDataRequest request) { errorCollector.checkThat(request.getStateFamily(), Matchers.is("family")); return GlobalData.newBuilder() .setDataId(request.getDataId()) .setStateFamily("family") .setData(ByteString.copyFromUtf8(request.getDataId().getTag())) .build() .toByteString(); } private ByteString processStateRequest(ComputationGetDataRequest compRequest) { errorCollector.checkThat(compRequest.getRequestsCount(), Matchers.is(1)); errorCollector.checkThat( compRequest.getComputationId(), Matchers.is("computation")); KeyedGetDataRequest request = compRequest.getRequests(0); KeyedGetDataResponse response = makeGetDataResponse(request.getValuesToFetch(0).getTag().toStringUtf8()); return response.toByteString(); } private void sendResponse(long id, ByteString serializedResponse) { if (ThreadLocalRandom.current().nextInt(4) == 0) { sendChunkedResponse(id, serializedResponse); } else { responseBuilder.addRequestId(id).addSerializedResponse(serializedResponse); if (responseBuilder.getRequestIdCount() > 10) { flushResponse(); } } } private void sendChunkedResponse(long id, ByteString serializedResponse) { LOG.info("Sending response with {} chunks", (serializedResponse.size() / 10) + 1); for (int i = 0; i < serializedResponse.size(); i += 10) { int end = Math.min(serializedResponse.size(), i + 10); try { responseObserver.onNext( StreamingGetDataResponse.newBuilder() .addRequestId(id) .addSerializedResponse(serializedResponse.substring(i, end)) .setRemainingBytesForResponse(serializedResponse.size() - end) .build()); } catch (IllegalStateException e) { // Stream is already closed. } } } private void flushResponse() { if (responseBuilder.getRequestIdCount() > 0) { LOG.info( "Sending batched response of {} ids", responseBuilder.getRequestIdCount()); try { responseObserver.onNext(responseBuilder.build()); } catch (IllegalStateException e) { // Stream is already closed. } responseBuilder.clear(); } } }; } }); GetDataStream stream = client.getDataStream(); // Make requests of varying sizes to test chunking, and verify the responses. ExecutorService executor = Executors.newFixedThreadPool(50); final CountDownLatch done = new CountDownLatch(200); for (int i = 0; i < 100; ++i) { final String key = "key" + i; final String s = i % 5 == 0 ? largeString(i) : "tag"; executor.submit( () -> { errorCollector.checkThat( stream.requestKeyedData("computation", makeGetDataRequest(key, s)), Matchers.equalTo(makeGetDataResponse(s))); done.countDown(); }); executor.execute( () -> { errorCollector.checkThat( stream.requestGlobalData(makeGlobalDataRequest(key)), Matchers.equalTo(makeGlobalDataResponse(key))); done.countDown(); }); } done.await(); stream.halfClose(); assertTrue(stream.awaitTermination(60, TimeUnit.SECONDS)); executor.shutdown(); }
static String currentTimestamp(Clock clock) { SimpleDateFormat df = new SimpleDateFormat("yyyyMMdd'T'HHmmss'Z'"); df.setTimeZone(TimeZone.getTimeZone("UTC")); return df.format(Instant.now(clock).toEpochMilli()); }
@Test public void currentTimestamp() { // given Clock clock = Clock.fixed(Instant.ofEpochMilli(1585909518929L), ZoneId.systemDefault()); // when String currentTimestamp = AwsRequestUtils.currentTimestamp(clock); // then assertEquals("20200403T102518Z", currentTimestamp); }
public String nextString() throws IOException { int p = peeked; if (p == PEEKED_NONE) { p = doPeek(); } String result; if (p == PEEKED_UNQUOTED) { result = nextUnquotedValue(); } else if (p == PEEKED_SINGLE_QUOTED) { result = nextQuotedValue('\''); } else if (p == PEEKED_DOUBLE_QUOTED) { result = nextQuotedValue('"'); } else if (p == PEEKED_BUFFERED) { result = peekedString; peekedString = null; } else if (p == PEEKED_LONG) { result = Long.toString(peekedLong); } else if (p == PEEKED_NUMBER) { result = new String(buffer, pos, peekedNumberLength); pos += peekedNumberLength; } else { throw unexpectedTokenError("a string"); } peeked = PEEKED_NONE; pathIndices[stackSize - 1]++; return result; }
@Test public void testNonStrictModeParsesUnescapedControlCharacter() throws IOException { String json = "\"\t\""; JsonReader reader = new JsonReader(reader(json)); assertThat(reader.nextString()).isEqualTo("\t"); }
static String computeDetailsAsString(SearchRequest searchRequest) { StringBuilder message = new StringBuilder(); message.append(String.format("ES search request '%s'", searchRequest)); if (searchRequest.indices().length > 0) { message.append(String.format(ON_INDICES_MESSAGE, Arrays.toString(searchRequest.indices()))); } return message.toString(); }
@Test public void should_format_PutMappingRequest() { PutMappingRequest request = new PutMappingRequest("index-1"); assertThat(EsRequestDetails.computeDetailsAsString(request)) .isEqualTo("ES put mapping request on indices 'index-1'"); }
@Override public String service() { return DubboParser.service(invoker); }
@Test void service() { when(invocation.getInvoker()).thenReturn(invoker); when(invoker.getUrl()).thenReturn(url); assertThat(request.service()) .isEqualTo("brave.dubbo.GreeterService"); }
public final Operation setReplicaIndex(int replicaIndex) { if (replicaIndex < 0 || replicaIndex >= InternalPartition.MAX_REPLICA_COUNT) { throw new IllegalArgumentException("Replica index is out of range [0-" + (InternalPartition.MAX_REPLICA_COUNT - 1) + "]: " + replicaIndex); } setFlag(replicaIndex != 0, BITMASK_REPLICA_INDEX_SET); this.replicaIndex = replicaIndex; return this; }
@Test(expected = IllegalArgumentException.class) public void shouldThrowException_whenReplicaIndexInvalid() { Operation op = new DummyOperation(); op.setReplicaIndex(-1); }
public CompiledPipeline.CompiledExecution buildExecution() { return buildExecution(false); }
@Test @SuppressWarnings({"unchecked"}) public void testCacheCompiledClassesWithDifferentId() throws IOException, InvalidIRException { final FixedPluginFactory pluginFactory = new FixedPluginFactory( () -> null, () -> IDENTITY_FILTER, mockOutputSupplier() ); final PipelineIR baselinePipeline = ConfigCompiler.configToPipelineIR( IRHelpers.toSourceWithMetadataFromPath("org/logstash/config/ir/cache/pipeline1.conf"), false, null); final CompiledPipeline cBaselinePipeline = new CompiledPipeline(baselinePipeline, pluginFactory); final ConfigVariableExpander cve = ConfigVariableExpander.withoutSecret(EnvironmentVariableProvider.defaultProvider()); final PipelineIR pipelineWithDifferentId = ConfigCompiler.configToPipelineIR( IRHelpers.toSourceWithMetadataFromPath("org/logstash/config/ir/cache/pipeline2.conf"), false, cve); final CompiledPipeline cPipelineWithDifferentId = new CompiledPipeline(pipelineWithDifferentId, pluginFactory); // actual test: compiling a pipeline with an extra filter should only create 1 extra class ComputeStepSyntaxElement.cleanClassCache(); cBaselinePipeline.buildExecution(); final int cachedBefore = ComputeStepSyntaxElement.classCacheSize(); cPipelineWithDifferentId.buildExecution(); final int cachedAfter = ComputeStepSyntaxElement.classCacheSize(); final String message = String.format("unexpected cache size, cachedAfter: %d, cachedBefore: %d", cachedAfter, cachedBefore); assertEquals(message, 0, cachedAfter - cachedBefore); }
public void run(String[] args) { if (!parseArguments(args)) { showOptions(); return; } if (command == null) { System.out.println("Error: Command is empty"); System.out.println(); showOptions(); return; } if (password == null) { System.out.println("Error: Password is empty"); System.out.println(); showOptions(); return; } if (input == null) { System.out.println("Error: Input is empty"); System.out.println(); showOptions(); return; } encryptor.setPassword(password); if (algorithm != null) { encryptor.setAlgorithm(algorithm); } if (randomSaltGeneratorAlgorithm != null) { encryptor.setSaltGenerator(new RandomSaltGenerator(randomSaltGeneratorAlgorithm)); } if (randomIvGeneratorAlgorithm != null) { encryptor.setIvGenerator(new RandomIvGenerator(randomIvGeneratorAlgorithm)); } if ("encrypt".equals(command)) { System.out.println("Encrypted text: " + encryptor.encrypt(input)); } else { System.out.println("Decrypted text: " + encryptor.decrypt(input)); } }
@Test public void testMainDecrypt() { Main main = new Main(); assertDoesNotThrow(() -> main.run("-c decrypt -p secret -i bsW9uV37gQ0QHFu7KO03Ww==".split(" "))); }
@Override public void click() { isSuspended = !isSuspended; if (isSuspended) { twin.suspendMe(); } else { twin.resumeMe(); } }
@Test void testClick() { final var ballThread = mock(BallThread.class); final var ballItem = new BallItem(); ballItem.setTwin(ballThread); final var inOrder = inOrder(ballThread); IntStream.range(0, 10).forEach(i -> { ballItem.click(); inOrder.verify(ballThread).suspendMe(); ballItem.click(); inOrder.verify(ballThread).resumeMe(); }); inOrder.verifyNoMoreInteractions(); }
public static Read readResources() { return new Read(); }
@Test public void test_FhirIO_failedReads() { List<String> badMessageIDs = Arrays.asList("foo", "bar"); FhirIO.Read.Result readResult = pipeline.apply(Create.of(badMessageIDs)).apply(FhirIO.readResources()); PCollection<HealthcareIOError<String>> failed = readResult.getFailedReads(); PCollection<String> resources = readResult.getResources(); PCollection<String> failedMsgIds = failed.apply( MapElements.into(TypeDescriptors.strings()).via(HealthcareIOError::getDataResource)); PAssert.that(failedMsgIds).containsInAnyOrder(badMessageIDs); PAssert.that(resources).empty(); pipeline.run(); }
@Override public Integer doCall() throws Exception { CommandLineHelper.createPropertyFile(); if (configuration.split("=").length == 1) { printer().println("Configuration parameter not in key=value format"); return 1; } CommandLineHelper.loadProperties(properties -> { String key = StringHelper.before(configuration, "=").trim(); String value = StringHelper.after(configuration, "=").trim(); properties.put(key, value); CommandLineHelper.storeProperties(properties, printer()); }); return 0; }
@Test public void shouldSetConfig() throws Exception { UserConfigHelper.createUserConfig(""); ConfigSet command = new ConfigSet(new CamelJBangMain().withPrinter(printer)); command.configuration = "foo=bar"; command.doCall(); Assertions.assertEquals("", printer.getOutput()); CommandLineHelper.loadProperties(properties -> { Assertions.assertEquals(1, properties.size()); Assertions.assertEquals("bar", properties.get("foo")); }); }
public DownlinkMsg convertTelemetryEventToDownlink(Edge edge, EdgeEvent edgeEvent) { if (edgeEvent.getBody() != null) { String bodyStr = edgeEvent.getBody().toString(); if (maxTelemetryMessageSize > 0 && bodyStr.length() > maxTelemetryMessageSize) { String error = "Conversion to a DownlinkMsg telemetry event failed due to a size limit violation."; String message = String.format("%s Current size is %s, but the limit is %s", error, bodyStr.length(), maxTelemetryMessageSize); log.debug("[{}][{}][{}] {}. {}", edgeEvent.getTenantId(), edgeEvent.getEdgeId(), edgeEvent.getEntityId(), message, StringUtils.truncate(bodyStr, 100)); notificationRuleProcessor.process(EdgeCommunicationFailureTrigger.builder().tenantId(edgeEvent.getTenantId()) .edgeId(edgeEvent.getEdgeId()).customerId(edge.getCustomerId()).edgeName(edge.getName()).failureMsg(message).error(error).build()); return null; } } EntityType entityType = EntityType.valueOf(edgeEvent.getType().name()); EntityDataProto entityDataProto = convertTelemetryEventToEntityDataProto( edgeEvent.getTenantId(), entityType, edgeEvent.getEntityId(), edgeEvent.getAction(), edgeEvent.getBody()); return DownlinkMsg.newBuilder() .setDownlinkMsgId(EdgeUtils.nextPositiveInt()) .addEntityData(entityDataProto) .build(); }
@Test public void testConvert_maxSizeLimit() { Edge edge = new Edge(); EdgeEvent edgeEvent = new EdgeEvent(); ObjectNode body = JacksonUtil.newObjectNode(); body.put("value", StringUtils.randomAlphanumeric(1000)); edgeEvent.setBody(body); DownlinkMsg downlinkMsg = telemetryEdgeProcessor.convertTelemetryEventToDownlink(edge, edgeEvent); Assert.assertNull(downlinkMsg); verify(notificationRuleProcessor, Mockito.times(1)).process(any()); }
public void isEqualTo(@Nullable Object expected) { standardIsEqualTo(expected); }
@Test public void isEqualToWithNulls() { Object o = null; assertThat(o).isEqualTo(null); }
public synchronized OutputStream open() { try { close(); fileOutputStream = new FileOutputStream(file, true); } catch (FileNotFoundException e) { throw new RuntimeException("Unable to open output stream", e); } return fileOutputStream; }
@Test(expected = RuntimeException.class) public void requireThatExceptionIsThrowIfFileNotFound() throws IOException { File file = new File("mydir1"); file.delete(); assertTrue(file.mkdir()); new FileLogTarget(file).open(); }
public ModelMBeanInfo getMBeanInfo(Object defaultManagedBean, Object customManagedBean, String objectName) throws JMException { if ((defaultManagedBean == null && customManagedBean == null) || objectName == null) return null; // skip proxy classes if (defaultManagedBean != null && Proxy.isProxyClass(defaultManagedBean.getClass())) { LOGGER.trace("Skip creating ModelMBeanInfo due proxy class {}", defaultManagedBean.getClass()); return null; } // maps and lists to contain information about attributes and operations Map<String, ManagedAttributeInfo> attributes = new LinkedHashMap<>(); Set<ManagedOperationInfo> operations = new LinkedHashSet<>(); Set<ModelMBeanAttributeInfo> mBeanAttributes = new LinkedHashSet<>(); Set<ModelMBeanOperationInfo> mBeanOperations = new LinkedHashSet<>(); Set<ModelMBeanNotificationInfo> mBeanNotifications = new LinkedHashSet<>(); // extract details from default managed bean if (defaultManagedBean != null) { extractAttributesAndOperations(defaultManagedBean.getClass(), attributes, operations); extractMbeanAttributes(defaultManagedBean, attributes, mBeanAttributes, mBeanOperations); extractMbeanOperations(defaultManagedBean, operations, mBeanOperations); extractMbeanNotifications(defaultManagedBean, mBeanNotifications); } // extract details from custom managed bean if (customManagedBean != null) { extractAttributesAndOperations(customManagedBean.getClass(), attributes, operations); extractMbeanAttributes(customManagedBean, attributes, mBeanAttributes, mBeanOperations); extractMbeanOperations(customManagedBean, operations, mBeanOperations); extractMbeanNotifications(customManagedBean, mBeanNotifications); } // create the ModelMBeanInfo String name = getName(customManagedBean != null ? customManagedBean : defaultManagedBean, objectName); String description = getDescription(customManagedBean != null ? customManagedBean : defaultManagedBean, objectName); ModelMBeanAttributeInfo[] arrayAttributes = mBeanAttributes.toArray(new ModelMBeanAttributeInfo[mBeanAttributes.size()]); ModelMBeanOperationInfo[] arrayOperations = mBeanOperations.toArray(new ModelMBeanOperationInfo[mBeanOperations.size()]); ModelMBeanNotificationInfo[] arrayNotifications = mBeanNotifications.toArray(new ModelMBeanNotificationInfo[mBeanNotifications.size()]); ModelMBeanInfo info = new ModelMBeanInfoSupport(name, description, arrayAttributes, null, arrayOperations, arrayNotifications); LOGGER.trace("Created ModelMBeanInfo {}", info); return info; }
@Test(expected = IllegalArgumentException.class) public void testAttributeSetterHavingResult() throws JMException { mbeanInfoAssembler.getMBeanInfo(new BadAttributeSetterHavinReturn(), null, "someName"); }
@Override public boolean isValid(ParameterValue value) { return choices.contains(((StringParameterValue) value).getValue()); }
@Test @Issue("JENKINS-62889") public void checkValue_WrongValueType() { String stringValue = "single"; String[] choices = new String[]{stringValue}; ChoiceParameterDefinition parameterDefinition = new ChoiceParameterDefinition("name", choices, "description"); BooleanParameterValue parameterValue = new BooleanParameterValue("choice", false); assertThrows(ClassCastException.class, () -> parameterDefinition.isValid(parameterValue)); }
@Override public BufferedReader getReader() throws IOException { return new BufferedReader(new InputStreamReader(getInputStream())); }
@Test void testGetReader() throws IOException { BufferedReader reader = reuseHttpServletRequest.getReader(); assertNotNull(reader); }
@Override public List<Container> allocateContainers(ResourceBlacklistRequest blackList, List<ResourceRequest> oppResourceReqs, ApplicationAttemptId applicationAttemptId, OpportunisticContainerContext opportContext, long rmIdentifier, String appSubmitter) throws YarnException { // Update black list. updateBlacklist(blackList, opportContext); // Add OPPORTUNISTIC requests to the outstanding ones. opportContext.addToOutstandingReqs(oppResourceReqs); Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist()); Set<String> allocatedNodes = new HashSet<>(); List<Container> allocatedContainers = new ArrayList<>(); // Satisfy the outstanding OPPORTUNISTIC requests. boolean continueLoop = true; while (continueLoop) { continueLoop = false; List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>(); for (SchedulerRequestKey schedulerKey : opportContext.getOutstandingOpReqs().descendingKeySet()) { // Allocated containers : // Key = Requested Capability, // Value = List of Containers of given cap (the actual container size // might be different than what is requested, which is why // we need the requested capability (key) to match against // the outstanding reqs) int remAllocs = -1; int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat(); if (maxAllocationsPerAMHeartbeat > 0) { remAllocs = maxAllocationsPerAMHeartbeat - allocatedContainers.size() - getTotalAllocations(allocations); if (remAllocs <= 0) { LOG.info("Not allocating more containers as we have reached max " + "allocations per AM heartbeat {}", maxAllocationsPerAMHeartbeat); break; } } Map<Resource, List<Allocation>> allocation = allocate( rmIdentifier, opportContext, schedulerKey, applicationAttemptId, appSubmitter, nodeBlackList, allocatedNodes, remAllocs); if (allocation.size() > 0) { allocations.add(allocation); continueLoop = true; } } matchAllocation(allocations, allocatedContainers, opportContext); } return allocatedContainers; }
@Test public void testSimpleAllocation() throws Exception { ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance( new ArrayList<>(), new ArrayList<>()); List<ResourceRequest> reqs = Arrays.asList(ResourceRequest.newInstance(PRIORITY_NORMAL, "*", CAPABILITY_1GB, 1, true, null, OPPORTUNISTIC_REQ)); ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(0L, 1), 1); oppCntxt.updateNodeList( Arrays.asList( RemoteNode.newInstance( NodeId.newInstance("h1", 1234), "h1:1234", "/r1"))); List<Container> containers = allocator.allocateContainers( blacklistRequest, reqs, appAttId, oppCntxt, 1L, "luser"); Assert.assertEquals(1, containers.size()); Assert.assertEquals(0, oppCntxt.getOutstandingOpReqs().size()); }
public static BrokerRequest compileToBrokerRequest(String query) { return convertToBrokerRequest(CalciteSqlParser.compileToPinotQuery(query)); }
@Test public void testSqlNumericalLiteralIntegerNPE() { CalciteSqlCompiler.compileToBrokerRequest("SELECT * FROM testTable WHERE floatColumn > " + Double.MAX_VALUE); }
@SuppressWarnings("unchecked") public static void processPartitionKey(byte[] partitionKey, byte[] offsetValue, Converter keyConverter, Map<String, Set<Map<String, Object>>> connectorPartitions) { // The key is expected to always be of the form [connectorName, partition] where connectorName is a // string value and partition is a Map<String, Object> if (partitionKey == null) { log.warn("Ignoring offset partition key with an unexpected null value"); return; } // The topic parameter is irrelevant for the JsonConverter which is the internal converter used by // Connect workers. Object deserializedKey; try { deserializedKey = keyConverter.toConnectData("", partitionKey).value(); } catch (DataException e) { log.warn("Ignoring offset partition key with unknown serialization. Expected json.", e); return; } if (!(deserializedKey instanceof List)) { log.warn("Ignoring offset partition key with an unexpected format. Expected type: {}, actual type: {}", List.class.getName(), className(deserializedKey)); return; } List<Object> keyList = (List<Object>) deserializedKey; if (keyList.size() != 2) { log.warn("Ignoring offset partition key with an unexpected number of elements. Expected: 2, actual: {}", keyList.size()); return; } if (!(keyList.get(0) instanceof String)) { log.warn("Ignoring offset partition key with an unexpected format for the first element in the partition key list. " + "Expected type: {}, actual type: {}", String.class.getName(), className(keyList.get(0))); return; } if (!(keyList.get(1) instanceof Map)) { if (keyList.get(1) != null) { log.warn("Ignoring offset partition key with an unexpected format for the second element in the partition key list. " + "Expected type: {}, actual type: {}", Map.class.getName(), className(keyList.get(1))); } return; } String connectorName = (String) keyList.get(0); Map<String, Object> partition = (Map<String, Object>) keyList.get(1); connectorPartitions.computeIfAbsent(connectorName, ignored -> new HashSet<>()); if (offsetValue == null) { connectorPartitions.get(connectorName).remove(partition); } else { connectorPartitions.get(connectorName).add(partition); } }
@Test public void testProcessPartitionKeyNullPartition() { try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(OffsetUtils.class)) { Map<String, Set<Map<String, Object>>> connectorPartitions = new HashMap<>(); OffsetUtils.processPartitionKey(serializePartitionKey(Arrays.asList("connector-name", null)), new byte[0], CONVERTER, connectorPartitions); assertEquals(Collections.emptyMap(), connectorPartitions); assertEquals(0, logCaptureAppender.getMessages().size()); } }
@Override public MutablePathKeys getPathKeys() { return _pathKeys; }
@Test(expectedExceptions = UnsupportedOperationException.class) public void testUnmodifiablePathKeysMap() throws RestLiSyntaxException { final ResourceContextImpl context = new ResourceContextImpl(); context.getPathKeys().getKeyMap().put("should", "puke"); }
@Override public void pre(SpanAdapter span, Exchange exchange, Endpoint endpoint) { super.pre(span, exchange, endpoint); span.setTag(TagConstants.MESSAGE_BUS_DESTINATION, getDestination(exchange, endpoint)); String messageId = getMessageId(exchange); if (messageId != null) { span.setTag(TagConstants.MESSAGE_ID, messageId); } }
@Test public void testPreMessageId() { String messageId = "abcd"; Endpoint endpoint = Mockito.mock(Endpoint.class); Exchange exchange = Mockito.mock(Exchange.class); Mockito.when(endpoint.getEndpointUri()).thenReturn("test"); SpanDecorator decorator = new AbstractMessagingSpanDecorator() { @Override public String getComponent() { return null; } @Override public String getComponentClassName() { return null; } @Override public String getMessageId(Exchange exchange) { return messageId; } }; MockSpanAdapter span = new MockSpanAdapter(); decorator.pre(span, exchange, endpoint); assertEquals(messageId, span.tags().get(TagConstants.MESSAGE_ID)); }
public static String[] getSignalKillCommand(int code, String pid) { // Code == 0 means check alive if (Shell.WINDOWS) { if (0 == code) { return new String[] {Shell.getWinUtilsPath(), "task", "isAlive", pid }; } else { return new String[] {Shell.getWinUtilsPath(), "task", "kill", pid }; } } // Use the bash-builtin instead of the Unix kill command (usually // /bin/kill) as the bash-builtin supports "--" in all Hadoop supported // OSes. final String quotedPid = bashQuote(pid); if (isSetsidAvailable) { return new String[] { "bash", "-c", "kill -" + code + " -- -" + quotedPid }; } else { return new String[] { "bash", "-c", "kill -" + code + " " + quotedPid }; } }
@Test public void testGetSignalKillCommand() throws Exception { String anyPid = "9999"; int anySignal = 9; String[] checkProcessAliveCommand = getSignalKillCommand(anySignal, anyPid); String[] expectedCommand; if (Shell.WINDOWS) { expectedCommand = new String[]{getWinUtilsPath(), "task", "kill", anyPid }; } else if (Shell.isSetsidAvailable) { expectedCommand = new String[] { "bash", "-c", "kill -9 -- -'" + anyPid + "'"}; } else { expectedCommand = new String[]{ "bash", "-c", "kill -9 '" + anyPid + "'"}; } Assert.assertArrayEquals(expectedCommand, checkProcessAliveCommand); }
public static DataflowRunner fromOptions(PipelineOptions options) { DataflowPipelineOptions dataflowOptions = PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options); ArrayList<String> missing = new ArrayList<>(); if (dataflowOptions.getAppName() == null) { missing.add("appName"); } if (Strings.isNullOrEmpty(dataflowOptions.getRegion()) && isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) { missing.add("region"); } if (missing.size() > 0) { throw new IllegalArgumentException( "Missing required pipeline options: " + Joiner.on(',').join(missing)); } validateWorkerSettings( PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options)); PathValidator validator = dataflowOptions.getPathValidator(); String gcpTempLocation; try { gcpTempLocation = dataflowOptions.getGcpTempLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires gcpTempLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(gcpTempLocation); String stagingLocation; try { stagingLocation = dataflowOptions.getStagingLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires stagingLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(stagingLocation); if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) { validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs()); } if (dataflowOptions.getFilesToStage() != null) { // The user specifically requested these files, so fail now if they do not exist. // (automatically detected classpath elements are permitted to not exist, so later // staging will not fail on nonexistent files) dataflowOptions.getFilesToStage().stream() .forEach( stagedFileSpec -> { File localFile; if (stagedFileSpec.contains("=")) { String[] components = stagedFileSpec.split("=", 2); localFile = new File(components[1]); } else { localFile = new File(stagedFileSpec); } if (!localFile.exists()) { // should be FileNotFoundException, but for build-time backwards compatibility // cannot add checked exception throw new RuntimeException( String.format("Non-existent files specified in filesToStage: %s", localFile)); } }); } else { dataflowOptions.setFilesToStage( detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options)); if (dataflowOptions.getFilesToStage().isEmpty()) { throw new IllegalArgumentException("No files to stage has been found."); } else { LOG.info( "PipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: will stage {} files. " + "Enable logging at DEBUG level to see which files will be staged.", dataflowOptions.getFilesToStage().size()); LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage()); } } // Verify jobName according to service requirements, truncating converting to lowercase if // necessary. String jobName = dataflowOptions.getJobName().toLowerCase(); checkArgument( jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"), "JobName invalid; the name must consist of only the characters " + "[-a-z0-9], starting with a letter and ending with a letter " + "or number"); if (!jobName.equals(dataflowOptions.getJobName())) { LOG.info( "PipelineOptions.jobName did not match the service requirements. " + "Using {} instead of {}.", jobName, dataflowOptions.getJobName()); } dataflowOptions.setJobName(jobName); // Verify project String project = dataflowOptions.getProject(); if (project.matches("[0-9]*")) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project number."); } else if (!project.matches(PROJECT_ID_REGEXP)) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project" + " description."); } DataflowPipelineDebugOptions debugOptions = dataflowOptions.as(DataflowPipelineDebugOptions.class); // Verify the number of worker threads is a valid value if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) { throw new IllegalArgumentException( "Number of worker harness threads '" + debugOptions.getNumberOfWorkerHarnessThreads() + "' invalid. Please make sure the value is non-negative."); } // Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11 if (dataflowOptions.getRecordJfrOnGcThrashing() && Environments.getJavaVersion() == Environments.JavaVersion.java8) { throw new IllegalArgumentException( "recordJfrOnGcThrashing is only supported on java 9 and up."); } if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) { dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT); } // Adding the Java version to the SDK name for user's and support convenience. String agentJavaVer = "(JRE 8 environment)"; if (Environments.getJavaVersion() != Environments.JavaVersion.java8) { agentJavaVer = String.format("(JRE %s environment)", Environments.getJavaVersion().specification()); } DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String userAgentName = dataflowRunnerInfo.getName(); Preconditions.checkArgument( !userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty."); String userAgentVersion = dataflowRunnerInfo.getVersion(); Preconditions.checkArgument( !userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty."); String userAgent = String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_"); dataflowOptions.setUserAgent(userAgent); return new DataflowRunner(dataflowOptions); }
@Test public void testProjectPrefix() throws IOException { DataflowPipelineOptions options = buildPipelineOptions(); options.setProject("google.com:some-project-12345"); DataflowRunner.fromOptions(options); }
@Override public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) { for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) { Map<String, ?> sourceOffset = offsetEntry.getValue(); if (sourceOffset == null) { // We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't // want to prevent users from being able to clean it up using the REST API continue; } Map<String, ?> sourcePartition = offsetEntry.getKey(); if (sourcePartition == null) { throw new ConnectException("Source partitions may not be null"); } MirrorUtils.validateSourcePartitionString(sourcePartition, SOURCE_CLUSTER_KEY); MirrorUtils.validateSourcePartitionString(sourcePartition, TOPIC_KEY); MirrorUtils.validateSourcePartitionPartition(sourcePartition); MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, false); } // We never commit offsets with our source consumer, so no additional effort is required beyond just validating // the format of the user-supplied offsets return true; }
@Test public void testAlterOffsetsIncorrectOffsetKey() { MirrorSourceConnector connector = new MirrorSourceConnector(); Map<Map<String, ?>, Map<String, ?>> offsets = Collections.singletonMap( sourcePartition("t1", 2, "backup"), Collections.singletonMap("unused_offset_key", 0) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(null, offsets)); }
@Override public List<Connection> getConnections(final String databaseName, final String dataSourceName, final int connectionOffset, final int connectionSize, final ConnectionMode connectionMode) throws SQLException { return getConnections0(databaseName, dataSourceName, connectionOffset, connectionSize, connectionMode); }
@Test void assertGetConnectionWithConnectionOffset() throws SQLException { assertThat(databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 0, 1, ConnectionMode.MEMORY_STRICTLY), is(databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 0, 1, ConnectionMode.MEMORY_STRICTLY))); assertThat(databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 1, 1, ConnectionMode.MEMORY_STRICTLY), is(databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 1, 1, ConnectionMode.MEMORY_STRICTLY))); assertThat(databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 0, 1, ConnectionMode.MEMORY_STRICTLY), not(databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 1, 1, ConnectionMode.MEMORY_STRICTLY))); }
public abstract List<Consumer<T>> getInputConsumers();
@Test public void testDanglingSubscriptions() throws Exception { MultiConsumerPulsarSourceConfig pulsarConfig = getMultiConsumerPulsarConfigs(true); MultiConsumerPulsarSource<?> pulsarSource = new MultiConsumerPulsarSource<>(getPulsarClient(), pulsarConfig, new HashMap<>(), Thread.currentThread().getContextClassLoader()); try { pulsarSource.open(new HashMap<>(), Mockito.mock(SourceContext.class)); fail(); } catch (CompletionException e) { pulsarSource.close(); assertEquals(pulsarSource.getInputConsumers().size(), 1); } catch (Exception e) { fail(); } }
public void setNodes(String clusterName, String group, List<Node> nodes) { this.clusterNodes.computeIfAbsent(clusterName, k -> new ConcurrentHashMap<>()).put(group, nodes); }
@Test public void testSetNodes() { Assertions.assertDoesNotThrow(() -> metadata.setNodes("cluster", "group", new ArrayList<>())); }
public static String getMaskedStatement(final String query) { try { final ParseTree tree = DefaultKsqlParser.getParseTree(query); return new Visitor().visit(tree); } catch (final Exception | StackOverflowError e) { return fallbackMasking(query); } }
@Test public void shouldMaskIfNotExistSourceConnector() { // Given: final String query = "CREATE SOURCE CONNECTOR IF NOT EXISTS testconnector WITH (" + " \"connector.class\" = 'PostgresSource', \n" + " 'connection.url' = 'jdbc:postgresql://localhost:5432/my.db',\n" + " `mode`='bulk',\n" + " \"topic.prefix\"='jdbc-',\n" + " \"table.whitelist\"='users',\n" + " \"key\"='username');"; // When final String maskedQuery = QueryMask.getMaskedStatement(query); // Then final String expected = "CREATE SOURCE CONNECTOR IF NOT EXISTS testconnector WITH " + "(\"connector.class\"='PostgresSource', " + "'connection.url'='[string]', " + "`mode`='[string]', " + "\"topic.prefix\"='[string]', " + "\"table.whitelist\"='[string]', " + "\"key\"='[string]');"; assertThat(maskedQuery, is(expected)); }
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception { CruiseConfig configForEdit; CruiseConfig config; LOGGER.debug("[Config Save] Loading config holder"); configForEdit = deserializeConfig(content); if (callback != null) callback.call(configForEdit); config = preprocessAndValidate(configForEdit); return new GoConfigHolder(config, configForEdit); }
@Test void shouldLoadConfigWithConfigRepoAndConfiguration() throws Exception { CruiseConfig cruiseConfig = xmlLoader.loadConfigHolder(configWithConfigRepos( """ <config-repos> <config-repo id="id1" pluginId="gocd-xml"> <git url="https://github.com/tomzo/gocd-indep-config-part.git" /> <configuration> <property> <key>pattern</key> <value>*.gocd.xml</value> </property> </configuration> </config-repo > </config-repos> """ )).config; assertThat(cruiseConfig.getConfigRepos().size()).isEqualTo(1); ConfigRepoConfig configRepo = cruiseConfig.getConfigRepos().get(0); assertThat(configRepo.getConfiguration().size()).isEqualTo(1); assertThat(configRepo.getConfiguration().getProperty("pattern").getValue()).isEqualTo("*.gocd.xml"); }
public static String executeDockerCommand(DockerCommand dockerCommand, String containerId, Map<String, String> env, PrivilegedOperationExecutor privilegedOperationExecutor, boolean disableFailureLogging, Context nmContext) throws ContainerExecutionException { PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation( dockerCommand, containerId, env, nmContext); if (disableFailureLogging) { dockerOp.disableFailureLogging(); } LOG.debug("Running docker command: {}", dockerCommand); try { String result = privilegedOperationExecutor .executePrivilegedOperation(null, dockerOp, null, env, true, false); if (result != null && !result.isEmpty()) { result = result.trim(); } return result; } catch (PrivilegedOperationException e) { throw new ContainerExecutionException("Docker operation failed", e.getExitCode(), e.getOutput(), e.getErrorOutput()); } }
@Test public void testExecuteDockerKillSIGKILL() throws Exception { DockerKillCommand dockerKillCommand = new DockerKillCommand(MOCK_CONTAINER_ID) .setSignal(ContainerExecutor.Signal.KILL.name()); DockerCommandExecutor.executeDockerCommand(dockerKillCommand, MOCK_CONTAINER_ID, env, mockExecutor, false, nmContext); List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor .capturePrivilegedOperations(mockExecutor, 1, true); List<String> dockerCommands = getValidatedDockerCommands(ops); assertEquals(1, ops.size()); assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(), ops.get(0).getOperationType().name()); assertEquals(4, dockerCommands.size()); assertEquals("[docker-command-execution]", dockerCommands.get(0)); assertEquals(" docker-command=kill", dockerCommands.get(1)); assertEquals(" name=" + MOCK_CONTAINER_ID, dockerCommands.get(2)); assertEquals(" signal=" + ContainerExecutor.Signal.KILL.name(), dockerCommands.get(3)); }
public Response filter(FilterableRequestSpecification requestSpec, FilterableResponseSpecification responseSpec, FilterContext ctx) { final CookieOrigin cookieOrigin = cookieOriginFromUri(requestSpec.getURI()); for (Cookie cookie : cookieStore.getCookies()) { if (cookieSpec.match(cookie, cookieOrigin) && allowMultipleCookiesWithTheSameNameOrCookieNotPreviouslyDefined(requestSpec, cookie)) { requestSpec.cookie(cookie.getName(), cookie.getValue()); } } final Response response = ctx.next(requestSpec, responseSpec); List<Cookie> responseCookies = extractResponseCookies(response, cookieOrigin); cookieStore.addCookies(responseCookies.toArray(new Cookie[0])); return response; }
@Test public void addDuplicateNameCookiesLikeInBrowser() { FilterableRequestSpecification reqOriginDomainDuplicate = (FilterableRequestSpecification) given().with().baseUri("https://foo.com/bar"); DuplicateTestFilterContext duplicateTestFilterContext = new DuplicateTestFilterContext(); final CookieFilter cookieFilterDuplicate = new CookieFilter(true); cookieFilterDuplicate.filter(reqOriginDomainDuplicate, response, duplicateTestFilterContext); cookieFilterDuplicate.filter(reqOriginDomainDuplicate, response, duplicateTestFilterContext); assertThat(reqOriginDomainDuplicate.getCookies().size(), Matchers.is(2)); final List<Cookie> list = reqOriginDomainDuplicate.getCookies().getList("cookieName"); assertThat(list.get(0).getName(), Matchers.is("cookieName")); assertThat(list.get(0).getValue(), Matchers.anyOf(Matchers.is("xxx"), Matchers.is("yyy"))); assertThat(list.get(1).getName(), Matchers.is("cookieName")); assertThat(list.get(1).getValue(), Matchers.anyOf(Matchers.is("xxx"), Matchers.is("yyy"))); }
@Override public void killProcess(final String processId) { Collection<String> triggerPaths = getKillProcessTriggerPaths(processId); boolean isCompleted = false; try { triggerPaths.forEach(each -> repository.persist(each, "")); isCompleted = ProcessOperationLockRegistry.getInstance().waitUntilReleaseReady(processId, () -> isReady(triggerPaths)); } finally { if (!isCompleted) { triggerPaths.forEach(repository::delete); } } }
@Test void assertKillProcess() { when(repository.getChildrenKeys(ComputeNode.getOnlineNodePath(InstanceType.JDBC))).thenReturn(Collections.emptyList()); when(repository.getChildrenKeys(ComputeNode.getOnlineNodePath(InstanceType.PROXY))).thenReturn(Collections.singletonList("abc")); processPersistService.killProcess("foo_process_id"); verify(repository).persist(any(), any()); }
public void run() throws Exception { final Terminal terminal = TerminalBuilder.builder() .nativeSignals(true) .signalHandler(signal -> { if (signal == Terminal.Signal.INT || signal == Terminal.Signal.QUIT) { if (execState == ExecState.RUNNING) { throw new InterruptShellException(); } else { exit(0); } } }) .build(); run((providersMap) -> { String serviceUrl = ""; String adminUrl = ""; for (ShellCommandsProvider provider : providersMap.values()) { final String providerServiceUrl = provider.getServiceUrl(); if (providerServiceUrl != null) { serviceUrl = providerServiceUrl; } final String providerAdminUrl = provider.getAdminUrl(); if (providerAdminUrl != null) { adminUrl = providerAdminUrl; } } LineReaderBuilder readerBuilder = LineReaderBuilder.builder() .terminal(terminal) .parser(parser) .completer(systemRegistry.completer()) .variable(LineReader.INDENTATION, 2) .option(LineReader.Option.INSERT_BRACKET, true); configureHistory(properties, readerBuilder); LineReader reader = readerBuilder.build(); final String welcomeMessage = String.format("Welcome to Pulsar shell!\n %s: %s\n %s: %s\n\n" + "Type %s to get started or try the autocompletion (TAB button).\n" + "Type %s or %s to end the shell session.\n", new AttributedStringBuilder().style(AttributedStyle.BOLD).append("Service URL").toAnsi(), serviceUrl, new AttributedStringBuilder().style(AttributedStyle.BOLD).append("Admin URL").toAnsi(), adminUrl, new AttributedStringBuilder().style(AttributedStyle.BOLD).append("help").toAnsi(), new AttributedStringBuilder().style(AttributedStyle.BOLD).append("exit").toAnsi(), new AttributedStringBuilder().style(AttributedStyle.BOLD).append("quit").toAnsi()); output(welcomeMessage, terminal); String promptMessage; if (configShell.getCurrentConfig() != null) { promptMessage = String.format("%s(%s)", configShell.getCurrentConfig(), getHostFromUrl(serviceUrl)); } else { promptMessage = getHostFromUrl(serviceUrl); } final String prompt = createPrompt(promptMessage); return new InteractiveLineReader() { @Override public String readLine() { return reader.readLine(prompt); } @Override public List<String> parseLine(String line) { return reader.getParser().parse(line, 0).words(); } }; }, () -> terminal); }
@Test public void testFileModeExitOnError() throws Exception { Terminal terminal = TerminalBuilder.builder().build(); final MockLineReader linereader = new MockLineReader(terminal); final Properties props = new Properties(); props.setProperty("webServiceUrl", "http://localhost:8080"); final String shellFile = Thread.currentThread() .getContextClassLoader().getResource("test-shell-file-error").getFile(); final TestPulsarShell testPulsarShell = new TestPulsarShell(new String[]{"-f", shellFile, "--fail-on-error"}, props, pulsarAdmin); try { testPulsarShell.run((a) -> linereader, () -> terminal); fail(); } catch (SystemExitCalledException ex) { assertEquals(ex.code, 1); } verify(topics).createNonPartitionedTopic(eq("persistent://public/default/my-topic"), any(Map.class)); verify(testPulsarShell.cmdProduceHolder.get(), times(0)).run(); }
public static FileSystem write(final FileSystem fs, final Path path, final byte[] bytes) throws IOException { Objects.requireNonNull(path); Objects.requireNonNull(bytes); try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build()) { out.write(bytes); } return fs; }
@Test public void testWriteStringsFileContext() throws IOException { URI uri = tmp.toURI(); Configuration conf = new Configuration(); FileContext fc = FileContext.getFileContext(uri, conf); Path testPath = new Path(new Path(uri), "writestrings.out"); Collection<String> write = Arrays.asList("over", "the", "lazy", "dog"); FileUtil.write(fc, testPath, write, StandardCharsets.UTF_8); List<String> read = FileUtils.readLines(new File(testPath.toUri()), StandardCharsets.UTF_8); assertEquals(write, read); }
@Override public QueryHeader build(final QueryResultMetaData queryResultMetaData, final ShardingSphereDatabase database, final String columnName, final String columnLabel, final int columnIndex) throws SQLException { String schemaName = null == database ? "" : database.getName(); String actualTableName = queryResultMetaData.getTableName(columnIndex); String tableName; boolean primaryKey; if (null == actualTableName || null == database) { tableName = actualTableName; primaryKey = false; } else { tableName = getLogicTableName(database, actualTableName); ShardingSphereSchema schema = database.getSchema(schemaName); primaryKey = null != schema && Optional.ofNullable(schema.getTable(tableName)).map(optional -> optional.getColumn(columnName)).map(ShardingSphereColumn::isPrimaryKey).orElse(false); } int columnType = queryResultMetaData.getColumnType(columnIndex); String columnTypeName = queryResultMetaData.getColumnTypeName(columnIndex); int columnLength = queryResultMetaData.getColumnLength(columnIndex); int decimals = queryResultMetaData.getDecimals(columnIndex); boolean signed = queryResultMetaData.isSigned(columnIndex); boolean notNull = queryResultMetaData.isNotNull(columnIndex); boolean autoIncrement = queryResultMetaData.isAutoIncrement(columnIndex); return new QueryHeader(schemaName, tableName, columnLabel, columnName, columnType, columnTypeName, columnLength, decimals, signed, primaryKey, notNull, autoIncrement); }
@Test void assertBuildWithNullSchema() throws SQLException { ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getSchemas()).thenReturn(Collections.emptyMap()); DataNodeRuleAttribute ruleAttribute = mock(DataNodeRuleAttribute.class); when(ruleAttribute.findLogicTableByActualTable("t_order")).thenReturn(Optional.of("t_order")); when(database.getRuleMetaData().getAttributes(DataNodeRuleAttribute.class)).thenReturn(Collections.singleton(ruleAttribute)); QueryResultMetaData queryResultMetaData = createQueryResultMetaData(); QueryHeader actual = new MySQLQueryHeaderBuilder().build(queryResultMetaData, database, queryResultMetaData.getColumnName(1), queryResultMetaData.getColumnLabel(1), 1); assertFalse(actual.isPrimaryKey()); assertThat(actual.getTable(), is("t_order")); }
@Override public Application install(InputStream appDescStream) { checkNotNull(appDescStream, "Application archive stream cannot be null"); Application app = store.create(appDescStream); SecurityUtil.register(app.id()); return app; }
@Test public void install() { InputStream stream = ApplicationArchive.class.getResourceAsStream("app.zip"); Application app = mgr.install(stream); validate(app); assertEquals("incorrect features URI used", app.featuresRepo().get(), ((TestFeaturesService) mgr.featuresService).uri); assertEquals("incorrect app count", 1, mgr.getApplications().size()); assertEquals("incorrect app", app, mgr.getApplication(APP_ID)); assertEquals("incorrect app state", INSTALLED, mgr.getState(APP_ID)); mgr.registerDeactivateHook(app.id(), this::deactivateHook); }
@Override public void to(final String topic) { to(topic, Produced.with(keySerde, valueSerde, null)); }
@Test public void shouldNotAllowNullTopicChooserOnToWithProduced() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.to((TopicNameExtractor<String, String>) null, Produced.as("to"))); assertThat(exception.getMessage(), equalTo("topicExtractor can't be null")); }
public static void insert( final UnsafeBuffer termBuffer, final int termOffset, final UnsafeBuffer packet, final int length) { if (0 == termBuffer.getInt(termOffset)) { termBuffer.putBytes(termOffset + HEADER_LENGTH, packet, HEADER_LENGTH, length - HEADER_LENGTH); termBuffer.putLong(termOffset + 24, packet.getLong(24)); termBuffer.putLong(termOffset + 16, packet.getLong(16)); termBuffer.putLong(termOffset + 8, packet.getLong(8)); termBuffer.putLongOrdered(termOffset, packet.getLong(0)); } }
@Test void shouldFillAfterAGap() { final int frameLength = 50; final int alignedFrameLength = BitUtil.align(frameLength, FRAME_ALIGNMENT); final int srcOffset = 0; final UnsafeBuffer packet = new UnsafeBuffer(ByteBuffer.allocate(alignedFrameLength)); final int termOffset = alignedFrameLength * 2; TermRebuilder.insert(termBuffer, termOffset, packet, alignedFrameLength); verify(termBuffer).putBytes( (alignedFrameLength * 2) + HEADER_LENGTH, packet, srcOffset + HEADER_LENGTH, alignedFrameLength - HEADER_LENGTH); }
@Override public void clearChanged() { changedEntries = false; changedHops = false; for ( int i = 0; i < nrJobEntries(); i++ ) { JobEntryCopy entry = getJobEntry( i ); entry.setChanged( false ); } for ( JobHopMeta hi : jobhops ) { // Look at all the hops hi.setChanged( false ); } super.clearChanged(); }
@Test public void testContentChangeListener() throws Exception { jobMeta.setChanged(); jobMeta.setChanged( true ); verify( listener, times( 2 ) ).contentChanged( same( jobMeta ) ); jobMeta.clearChanged(); jobMeta.setChanged( false ); verify( listener, times( 2 ) ).contentSafe( same( jobMeta ) ); jobMeta.removeContentChangedListener( listener ); jobMeta.setChanged(); jobMeta.setChanged( true ); verifyNoMoreInteractions( listener ); }
@Override public void start() { // we request a split only if we did not get splits during the checkpoint restore if (getNumberOfCurrentlyAssignedSplits() == 0) { context.sendSplitRequest(); } }
@Test void testNoSplitRequestWhenSplitRestored() throws Exception { final TestingReaderContext context = new TestingReaderContext(); final FileSourceReader<String, FileSourceSplit> reader = createReader(context); reader.addSplits(Collections.singletonList(createTestFileSplit())); reader.start(); reader.close(); assertThat(context.getNumSplitRequests()).isEqualTo(0); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(XUGU_BOOLEAN); builder.dataType(XUGU_BOOLEAN); break; case TINYINT: builder.columnType(XUGU_TINYINT); builder.dataType(XUGU_TINYINT); break; case SMALLINT: builder.columnType(XUGU_SMALLINT); builder.dataType(XUGU_SMALLINT); break; case INT: builder.columnType(XUGU_INTEGER); builder.dataType(XUGU_INTEGER); break; case BIGINT: builder.columnType(XUGU_BIGINT); builder.dataType(XUGU_BIGINT); break; case FLOAT: builder.columnType(XUGU_FLOAT); builder.dataType(XUGU_FLOAT); break; case DOUBLE: builder.columnType(XUGU_DOUBLE); builder.dataType(XUGU_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", XUGU_NUMERIC, precision, scale)); builder.dataType(XUGU_NUMERIC); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(XUGU_BLOB); builder.dataType(XUGU_BLOB); } else if (column.getColumnLength() <= MAX_BINARY_LENGTH) { builder.columnType(XUGU_BINARY); builder.dataType(XUGU_BINARY); } else { builder.columnType(XUGU_BLOB); builder.dataType(XUGU_BLOB); } break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(String.format("%s(%s)", XUGU_VARCHAR, MAX_VARCHAR_LENGTH)); builder.dataType(XUGU_VARCHAR); } else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", XUGU_VARCHAR, column.getColumnLength())); builder.dataType(XUGU_VARCHAR); } else { builder.columnType(XUGU_CLOB); builder.dataType(XUGU_CLOB); } break; case DATE: builder.columnType(XUGU_DATE); builder.dataType(XUGU_DATE); break; case TIME: builder.dataType(XUGU_TIME); if (column.getScale() != null && column.getScale() > 0) { Integer timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", XUGU_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(XUGU_TIME); } break; case TIMESTAMP: if (column.getScale() == null || column.getScale() <= 0) { builder.columnType(XUGU_TIMESTAMP); } else { int timestampScale = column.getScale(); if (column.getScale() > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(String.format("TIMESTAMP(%s)", timestampScale)); builder.scale(timestampScale); } builder.dataType(XUGU_TIMESTAMP); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.XUGU, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertInt() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.INT_TYPE).build(); BasicTypeDefine typeDefine = XuguTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(XuguTypeConverter.XUGU_INTEGER, typeDefine.getColumnType()); Assertions.assertEquals(XuguTypeConverter.XUGU_INTEGER, typeDefine.getDataType()); }
public boolean isNewerThan(JavaSpecVersion otherVersion) { return this.compareTo(otherVersion) > 0; }
@Test public void test8notNewerThan11() throws Exception { // Setup fixture. final JavaSpecVersion eight = new JavaSpecVersion( "1.8" ); final JavaSpecVersion eleven = new JavaSpecVersion( "11" ); // Execute system under test. final boolean result = eight.isNewerThan( eleven ); // Verify results. assertFalse( result ); }
@Udf(description = "Returns a masked version of the input string. The last n characters" + " will be replaced according to the default masking rules.") @SuppressWarnings("MethodMayBeStatic") // Invoked via reflection public String mask( @UdfParameter("input STRING to be masked") final String input, @UdfParameter("number of characters to mask before the end") final int numChars ) { return doMask(new Masker(), input, numChars); }
@Test public void shouldMaskAllCharsIfLengthTooLong() { final String result = udf.mask("AbCd#$123xy Z", 999); assertThat(result, is("XxXx--nnnxx-X")); }
@Override public Consumer createConsumer(Processor processor) throws Exception { // we need to have database and container set to consume events if (ObjectHelper.isEmpty(configuration.getDatabaseName()) || ObjectHelper.isEmpty(configuration.getContainerName())) { throw new IllegalArgumentException("Database name and container name must be set."); } final Consumer cosmosDbConsumer = new CosmosDbConsumer(this, processor); configureConsumer(cosmosDbConsumer); return cosmosDbConsumer; }
@Test void testCreateConsumerWithInvalidConfig() throws Exception { final String uri = "azure-cosmosdb://mydb/myContainer"; String remaining = "mydb"; final Map<String, Object> params = new HashMap<>(); params.put("databaseEndpoint", "https://test.com:443"); params.put("createDatabaseIfNotExists", "true"); params.put("accountKey", "myKey"); final CosmosDbEndpoint endpoint = (CosmosDbEndpoint) context.getComponent("azure-cosmosdb", CosmosDbComponent.class) .createEndpoint(uri, remaining, params); assertThrows(IllegalArgumentException.class, () -> endpoint.createConsumer(exchange -> { })); params.put("databaseEndpoint", "https://test.com:443"); params.put("createDatabaseIfNotExists", "true"); params.put("accountKey", "myKey"); remaining = "/mydb"; final CosmosDbEndpoint endpoint2 = (CosmosDbEndpoint) context.getComponent("azure-cosmosdb", CosmosDbComponent.class) .createEndpoint(uri, remaining, params); assertThrows(IllegalArgumentException.class, () -> endpoint2.createConsumer(exchange -> { })); params.put("databaseEndpoint", "https://test.com:443"); params.put("createDatabaseIfNotExists", "true"); params.put("accountKey", "myKey"); remaining = "mydb/"; final CosmosDbEndpoint endpoint3 = (CosmosDbEndpoint) context.getComponent("azure-cosmosdb", CosmosDbComponent.class) .createEndpoint(uri, remaining, params); assertThrows(IllegalArgumentException.class, () -> endpoint3.createConsumer(exchange -> { })); params.put("databaseEndpoint", "https://test.com:443"); params.put("createDatabaseIfNotExists", "true"); params.put("accountKey", "myKey"); remaining = ""; final CosmosDbEndpoint endpoint4 = (CosmosDbEndpoint) context.getComponent("azure-cosmosdb", CosmosDbComponent.class) .createEndpoint(uri, remaining, params); assertThrows(IllegalArgumentException.class, () -> endpoint4.createConsumer(exchange -> { })); params.put("databaseEndpoint", "https://test.com:443"); params.put("createDatabaseIfNotExists", "true"); params.put("accountKey", "myKey"); remaining = "mydb/mycn"; final CosmosDbEndpoint endpoint5 = (CosmosDbEndpoint) context.getComponent("azure-cosmosdb", CosmosDbComponent.class) .createEndpoint(uri, remaining, params); assertNotNull(endpoint5.createConsumer(exchange -> { })); }
public String getValue(String key) { String currentValue = configOverrides.getProperty(key); return StringUtils.isEmpty(currentValue) ? getInitialValue(key) : currentValue; }
@Test void testLoadPropertiesFromInitFile() throws IOException { String propBackUp = System.getProperty("spark.kubernetes.operator.basePropertyFileName"); try { String propsFilePath = SparkOperatorConfManagerTest.class .getClassLoader() .getResource("spark-operator.properties") .getPath(); System.setProperty("spark.kubernetes.operator.basePropertyFileName", propsFilePath); SparkOperatorConfManager confManager = new SparkOperatorConfManager(); Assertions.assertEquals("bar", confManager.getValue("spark.kubernetes.operator.foo")); } finally { if (StringUtils.isNotEmpty(propBackUp)) { System.setProperty("spark.kubernetes.operator.basePropertyFileName", propBackUp); } else { System.clearProperty("spark.kubernetes.operator.basePropertyFileName"); } } }
@GetMapping(params = "search=blur") @Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "users", action = ActionTypes.READ) public Page<User> fuzzySearchUser(@RequestParam int pageNo, @RequestParam int pageSize, @RequestParam(name = "username", required = false, defaultValue = "") String username) { return userDetailsService.findUsersLike4Page(username, pageNo, pageSize); }
@Test void testFuzzySearchUser() { Page<User> userPage = new Page<>(); when(userDetailsService.findUsersLike4Page(anyString(), anyInt(), anyInt())).thenReturn(userPage); Page<User> nacos = userController.fuzzySearchUser(1, 10, "nacos"); assertEquals(userPage, nacos); }
public static boolean acceptEndpoint(String endpointUrl) { return endpointUrl != null && endpointUrl.matches(ENDPOINT_PATTERN_STRING); }
@Test public void testAcceptEndpointFailures() { AsyncTestSpecification specification = new AsyncTestSpecification(); NATSMessageConsumptionTask task = new NATSMessageConsumptionTask(specification); assertFalse(NATSMessageConsumptionTask.acceptEndpoint("ssl://localhost:1883/testTopic")); assertFalse(NATSMessageConsumptionTask.acceptEndpoint("mqtt://localhost:1883")); assertFalse(NATSMessageConsumptionTask.acceptEndpoint("nats://localhost:port/testTopic")); }
static boolean isNewDatabase(String uppercaseProductName) { if (SUPPORTED_DATABASE_NAMES.contains(uppercaseProductName)) { return false; } return DETECTED_DATABASE_NAMES.add(uppercaseProductName); }
@Test public void testMySQL() { String dbName = "MYSQL"; boolean newDB1 = SupportedDatabases.isNewDatabase(dbName); assertThat(newDB1).isFalse(); }
public static List<String> splitPlainTextParagraphs( List<String> lines, int maxTokensPerParagraph) { return internalSplitTextParagraphs( lines, maxTokensPerParagraph, (text) -> internalSplitLines( text, maxTokensPerParagraph, false, s_plaintextSplitOptions)); }
@Test public void canSplitTextParagraphsOnNewlines() { List<String> input = Arrays.asList( "This is a test of the emergency broadcast system\r\nThis is only a test", "We repeat this is only a test\nA unit test", "A small note\n" + "And another\r\n" + "And once again\r" + "Seriously this is the end\n" + "We're finished\n" + "All set\n" + "Bye\n", "Done"); List<String> expected = Arrays.asList( "This is a test of the emergency broadcast system", "This is only a test", "We repeat this is only a test\nA unit test", "A small note\nAnd another\nAnd once again", "Seriously this is the end\nWe're finished\nAll set\nBye Done"); List<String> result = TextChunker.splitPlainTextParagraphs(input, 15); Assertions.assertEquals(expected, result); }
@Override public Long createRewardActivity(RewardActivityCreateReqVO createReqVO) { // 校验商品是否冲突 validateRewardActivitySpuConflicts(null, createReqVO.getProductSpuIds()); // 插入 RewardActivityDO rewardActivity = RewardActivityConvert.INSTANCE.convert(createReqVO) .setStatus(PromotionUtils.calculateActivityStatus(createReqVO.getEndTime())); rewardActivityMapper.insert(rewardActivity); // 返回 return rewardActivity.getId(); }
@Test public void testCreateRewardActivity_success() { // 准备参数 RewardActivityCreateReqVO reqVO = randomPojo(RewardActivityCreateReqVO.class, o -> { o.setConditionType(randomEle(PromotionConditionTypeEnum.values()).getType()); o.setProductScope(randomEle(PromotionProductScopeEnum.values()).getScope()); // 用于触发进行中的状态 o.setStartTime(addTime(Duration.ofDays(1))).setEndTime(addTime(Duration.ofDays(2))); }); // 调用 Long rewardActivityId = rewardActivityService.createRewardActivity(reqVO); // 断言 assertNotNull(rewardActivityId); // 校验记录的属性是否正确 RewardActivityDO rewardActivity = rewardActivityMapper.selectById(rewardActivityId); assertPojoEquals(reqVO, rewardActivity, "rules"); assertEquals(rewardActivity.getStatus(), PromotionActivityStatusEnum.WAIT.getStatus()); for (int i = 0; i < reqVO.getRules().size(); i++) { assertPojoEquals(reqVO.getRules().get(i), rewardActivity.getRules().get(i)); } }
int getMinLatForTile(double lat) { return (int) (Math.floor((90 + lat) / LAT_DEGREE) * LAT_DEGREE) - 90; }
@Test public void testMinLat() { assertEquals(50, instance.getMinLatForTile(52.5)); assertEquals(10, instance.getMinLatForTile(29.9)); assertEquals(-70, instance.getMinLatForTile(-59.9)); }
public Integer value() { return value; }
@Test void testSetValue() { IntegerNode n = new IntegerNode(); assertFalse(n.setValue("invalid")); assertTrue(n.setValue("10")); assertEquals(10, n.value().intValue()); }
void error() { if (state <= k * (n - 1)) { state += k; } else { state = k * n; } }
@Test public void lotsOfErrors() { assertOutput(500); degrader.error(); assertOutput(400); degrader.error(); assertOutput(300); degrader.error(); assertOutput(200); for (int i = 0; i < OUTPUTS.length; i++) { degrader.error(); assertOutput(100); } }
@SuppressWarnings("ParameterNumber") PersistentQueryMetadata buildPersistentQueryInDedicatedRuntime( final KsqlConfig ksqlConfig, final KsqlConstants.PersistentQueryType persistentQueryType, final String statementText, final QueryId queryId, final Optional<DataSource> sinkDataSource, final Set<DataSource> sources, final ExecutionStep<?> physicalPlan, final String planSummary, final QueryMetadata.Listener listener, final Supplier<List<PersistentQueryMetadata>> allPersistentQueries, final StreamsBuilder streamsBuilder, final MetricCollectors metricCollectors) { final String applicationId = QueryApplicationId.build(ksqlConfig, true, queryId); final Map<String, Object> streamsProperties = buildStreamsProperties( applicationId, Optional.of(queryId), metricCollectors, config.getConfig(true), processingLogContext ); final LogicalSchema logicalSchema; final KeyFormat keyFormat; final ValueFormat valueFormat; final KsqlTopic ksqlTopic; switch (persistentQueryType) { // CREATE_SOURCE does not have a sink, so the schema is obtained from the query source case CREATE_SOURCE: final DataSource dataSource = Iterables.getOnlyElement(sources); logicalSchema = dataSource.getSchema(); keyFormat = dataSource.getKsqlTopic().getKeyFormat(); valueFormat = dataSource.getKsqlTopic().getValueFormat(); ksqlTopic = dataSource.getKsqlTopic(); break; default: logicalSchema = sinkDataSource.get().getSchema(); keyFormat = sinkDataSource.get().getKsqlTopic().getKeyFormat(); valueFormat = sinkDataSource.get().getKsqlTopic().getValueFormat(); ksqlTopic = sinkDataSource.get().getKsqlTopic(); break; } final PhysicalSchema querySchema = PhysicalSchema.from( logicalSchema, keyFormat.getFeatures(), valueFormat.getFeatures() ); final RuntimeBuildContext runtimeBuildContext = buildContext( applicationId, queryId, streamsBuilder ); final Object result = buildQueryImplementation(physicalPlan, runtimeBuildContext); final Topology topology = streamsBuilder .build(PropertiesUtil.asProperties(streamsProperties)); final Optional<MaterializationProviderBuilderFactory.MaterializationProviderBuilder> materializationProviderBuilder = getMaterializationInfo(result).map(info -> materializationProviderBuilderFactory.materializationProviderBuilder( info, querySchema, keyFormat, streamsProperties, applicationId, queryId.toString() )); final Optional<ScalablePushRegistry> scalablePushRegistry = applyScalablePushProcessor( querySchema.logicalSchema(), result, allPersistentQueries, streamsProperties, applicationId, ksqlConfig, ksqlTopic, serviceContext ); return new PersistentQueryMetadataImpl( persistentQueryType, statementText, querySchema, sources.stream().map(DataSource::getName).collect(Collectors.toSet()), sinkDataSource, planSummary, queryId, materializationProviderBuilder, applicationId, topology, kafkaStreamsBuilder, runtimeBuildContext.getSchemas(), streamsProperties, config.getOverrides(), ksqlConfig.getLong(KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG), getConfiguredQueryErrorClassifier(ksqlConfig, applicationId), physicalPlan, ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_ERROR_MAX_QUEUE_SIZE), getUncaughtExceptionProcessingLogger(queryId), ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_INITIAL_MS), ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_RETRY_BACKOFF_MAX_MS), listener, scalablePushRegistry, processingLogContext.getLoggerFactory() ); }
@Test public void shouldBuildDedicatedCreateAsPersistentQueryWithSharedRuntimeCorrectly() { // Given: when(ksqlConfig.getBoolean(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED)).thenReturn(true); final ProcessingLogger uncaughtProcessingLogger = mock(ProcessingLogger.class); when(processingLoggerFactory.getLogger( QueryLoggerUtil.queryLoggerName(QUERY_ID, new QueryContext.Stacker() .push("ksql.logger.thread.exception.uncaught").getQueryContext()), Collections.singletonMap("query-id", QUERY_ID.toString())) ).thenReturn(uncaughtProcessingLogger); // When: final PersistentQueryMetadata queryMetadata = queryBuilder.buildPersistentQueryInDedicatedRuntime( ksqlConfig, KsqlConstants.PersistentQueryType.CREATE_AS, STATEMENT_TEXT, QUERY_ID, Optional.of(sink), SOURCES, physicalPlan, SUMMARY, queryListener, ArrayList::new, streamsBuilder, new MetricCollectors() ); queryMetadata.initialize(); // Then: assertThat(queryMetadata.getStatementString(), equalTo(STATEMENT_TEXT)); assertThat(queryMetadata.getQueryId(), equalTo(QUERY_ID)); assertThat(queryMetadata.getSinkName().get(), equalTo(SINK_NAME)); assertThat(queryMetadata.getPhysicalSchema(), equalTo(SINK_PHYSICAL_SCHEMA)); assertThat(queryMetadata.getResultTopic(), is(Optional.of(ksqlTopic))); assertThat(queryMetadata.getSourceNames(), equalTo(SOURCES.stream() .map(DataSource::getName).collect(Collectors.toSet()))); assertThat(queryMetadata.getDataSourceType().get(), equalTo(DataSourceType.KSTREAM)); assertThat(queryMetadata.getExecutionPlan(), equalTo(SUMMARY)); assertThat(queryMetadata.getTopology(), is(topology)); assertThat(queryMetadata.getOverriddenProperties(), equalTo(OVERRIDES)); assertThat(queryMetadata.getProcessingLogger(), equalTo(uncaughtProcessingLogger)); assertThat(queryMetadata.getPersistentQueryType(), equalTo(KsqlConstants.PersistentQueryType.CREATE_AS)); // queries in dedicated runtimes must not include alternative topic prefix assertThat( queryMetadata.getStreamsProperties().get(InternalConfig.TOPIC_PREFIX_ALTERNATIVE), is(nullValue()) ); }
public Optional<PluginMatchingResult<ServiceFingerprinter>> getServiceFingerprinter( NetworkService networkService) { return tsunamiPlugins.entrySet().stream() .filter(entry -> entry.getKey().type().equals(PluginType.SERVICE_FINGERPRINT)) .filter(entry -> hasMatchingServiceName(networkService, entry.getKey())) .map( entry -> PluginMatchingResult.<ServiceFingerprinter>builder() .setPluginDefinition(entry.getKey()) .setTsunamiPlugin((ServiceFingerprinter) entry.getValue().get()) .addMatchedService(networkService) .build()) .findFirst(); }
@Test public void getServiceFingerprinter_whenForWebServiceAnnotationAndNonWebService_returnsEmpty() { NetworkService sshService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 80)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("ssh") .build(); NetworkService rdpService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 80)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("rdp") .build(); PluginManager pluginManager = Guice.createInjector(new FakePortScannerBootstrapModule(), FakeWebFingerprinter.getModule()) .getInstance(PluginManager.class); assertThat(pluginManager.getServiceFingerprinter(sshService)).isEmpty(); assertThat(pluginManager.getServiceFingerprinter(rdpService)).isEmpty(); }
public static int compareLong(long o1, long o2) { return Long.compare(o1, o2); }
@Test public void testCompareLong() { Assert.assertEquals(0, NumberUtils.compareLong(0L, 0L)); Assert.assertEquals(1, NumberUtils.compareLong(9L, 0L)); Assert.assertEquals(-1, NumberUtils.compareLong(0L, 9L)); Assert.assertEquals(-1, NumberUtils.compareLong(-9L, 0L)); Assert.assertEquals(1, NumberUtils.compareLong(0L, -9L)); }
@SuppressWarnings("checkstyle:NestedIfDepth") @Nullable public PartitioningStrategy getPartitioningStrategy( String mapName, PartitioningStrategyConfig config, final List<PartitioningAttributeConfig> attributeConfigs ) { if (attributeConfigs != null && !attributeConfigs.isEmpty()) { return cache.computeIfAbsent(mapName, k -> createAttributePartitionStrategy(attributeConfigs)); } if (config != null && config.getPartitioningStrategy() != null) { return config.getPartitioningStrategy(); } if (config != null && config.getPartitioningStrategyClass() != null) { PartitioningStrategy<?> strategy = cache.get(mapName); if (strategy != null) { return strategy; } try { // We don't use computeIfAbsent intentionally so that the map isn't blocked if the instantiation takes a // long time - it's user code strategy = ClassLoaderUtil.newInstance(configClassLoader, config.getPartitioningStrategyClass()); } catch (Exception e) { throw ExceptionUtil.rethrow(e); } cache.putIfAbsent(mapName, strategy); return strategy; } return null; }
@Test public void whenConfigNull_getPartitioningStrategy_returnsNull() { PartitioningStrategy partitioningStrategy = partitioningStrategyFactory.getPartitioningStrategy(mapName, null, null); assertNull(partitioningStrategy); }
public static boolean isIPInRange(String ip, String cidr) { try { String[] parts = cidr.split(SLASH); if (parts.length == 1) { return StringUtils.equals(ip, cidr); } if (parts.length != 2) { return false; } InetAddress cidrIp = InetAddress.getByName(parts[0]); int prefixLength = Integer.parseInt(parts[1]); BigInteger cidrIpBigInt = new BigInteger(1, cidrIp.getAddress()); BigInteger ipBigInt = new BigInteger(1, InetAddress.getByName(ip).getAddress()); BigInteger mask = BigInteger.valueOf(-1).shiftLeft(cidrIp.getAddress().length * 8 - prefixLength); BigInteger cidrIpLower = cidrIpBigInt.and(mask); BigInteger cidrIpUpper = cidrIpLower.add(mask.not()); return ipBigInt.compareTo(cidrIpLower) >= 0 && ipBigInt.compareTo(cidrIpUpper) <= 0; } catch (Exception e) { throw new RuntimeException(e); } }
@Test public void isIPInRange() { // IPv4 test String ipv4Address = "192.168.1.10"; String ipv4Cidr = "192.168.1.0/24"; assert IPAddressUtils.isIPInRange(ipv4Address, ipv4Cidr); ipv4Address = "192.168.2.10"; assert !IPAddressUtils.isIPInRange(ipv4Address, ipv4Cidr); // IPv6 test String ipv6Address = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"; String ipv6Cidr = "2001:0db8:85a3::/48"; assert IPAddressUtils.isIPInRange(ipv6Address, ipv6Cidr); }
@Override protected int rsv(WebSocketFrame msg) { return msg instanceof TextWebSocketFrame || msg instanceof BinaryWebSocketFrame? msg.rsv() | WebSocketExtension.RSV1 : msg.rsv(); }
@Test public void testAlreadyCompressedFrame() { EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false)); // initialize byte[] payload = new byte[300]; random.nextBytes(payload); BinaryWebSocketFrame frame = new BinaryWebSocketFrame(true, WebSocketExtension.RSV3 | WebSocketExtension.RSV1, Unpooled.wrappedBuffer(payload)); // execute assertTrue(encoderChannel.writeOutbound(frame)); BinaryWebSocketFrame newFrame = encoderChannel.readOutbound(); // test assertNotNull(newFrame); assertNotNull(newFrame.content()); assertEquals(WebSocketExtension.RSV3 | WebSocketExtension.RSV1, newFrame.rsv()); assertEquals(300, newFrame.content().readableBytes()); byte[] finalPayload = new byte[300]; newFrame.content().readBytes(finalPayload); assertArrayEquals(finalPayload, payload); newFrame.release(); }
public void refreshStarted(long currentVersion, long requestedVersion) { updatePlanDetails = new ConsumerRefreshMetrics.UpdatePlanDetails(); refreshStartTimeNano = System.nanoTime(); refreshMetricsBuilder = new ConsumerRefreshMetrics.Builder(); refreshMetricsBuilder.setIsInitialLoad(currentVersion == VERSION_NONE); refreshMetricsBuilder.setUpdatePlanDetails(updatePlanDetails); cycleVersionStartTimes.clear(); // clear map to avoid accumulation over time }
@Test public void testRefreshStartedWithInitialLoad() { concreteRefreshMetricsListener.refreshStarted(VERSION_NONE, TEST_VERSION_HIGH); ConsumerRefreshMetrics refreshMetrics = concreteRefreshMetricsListener.refreshMetricsBuilder.build(); assertEquals(true, refreshMetrics.getIsInitialLoad()); assertNotNull(refreshMetrics.getUpdatePlanDetails()); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { for(Path f : files.keySet()) { if(f.isPlaceholder()) { log.warn(String.format("Ignore placeholder %s", f)); continue; } try { if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(f.getParent())) { session.getClient().teamdrives().delete(fileid.getFileId(f)).execute(); } else { if(f.attributes().isHidden()) { log.warn(String.format("Delete file %s already in trash", f)); new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(f), prompt, callback); continue; } callback.delete(f); final File properties = new File(); properties.setTrashed(true); session.getClient().files().update(fileid.getFileId(f), properties) .setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute(); } fileid.cache(f, null); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Cannot delete {0}", e, f); } } }
@Test(expected = NotfoundException.class) public void testDeleteNotFound() throws Exception { final Path test = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new DriveTrashFeature(session, new DriveFileIdProvider(session)).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public List<Flow> convertFlows(String componentName, @Nullable DbIssues.Locations issueLocations) { if (issueLocations == null) { return Collections.emptyList(); } return issueLocations.getFlowList().stream() .map(sourceFlow -> toFlow(componentName, sourceFlow)) .collect(Collectors.toCollection(LinkedList::new)); }
@Test public void convertFlows_with2DbLocations_returns() { DbIssues.Location location1 = createDbLocation("comp_id_1"); DbIssues.Location location2 = createDbLocation("comp_id_2"); DbIssues.Locations issueLocations = DbIssues.Locations.newBuilder() .addFlow(createFlow(location1, location2)) .build(); List<Flow> flows = flowGenerator.convertFlows(COMPONENT_NAME, issueLocations); assertThat(flows).hasSize(1); Flow singleFlow = flows.iterator().next(); assertThat(singleFlow.getLocations()).hasSize(2); Map<String, Location> pathToLocations = singleFlow.getLocations() .stream() .collect(toMap(Location::getFilePath, identity())); assertLocationMatches(pathToLocations.get("file_path_comp_id_1"), location1); assertLocationMatches(pathToLocations.get("file_path_comp_id_2"), location2); }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testGenericGroupOffsetCommitWhileInCompletingRebalanceState() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Create an empty group. ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup( "foo", true ); // Add member. group.add(mkGenericMember("member", Optional.of("new-instance-id"))); // Transition to next generation. group.transitionTo(ClassicGroupState.PREPARING_REBALANCE); group.initNextGeneration(); assertEquals(1, group.generationId()); // Verify that the request is rejected with the correct exception. assertThrows(RebalanceInProgressException.class, () -> context.commitOffset( new OffsetCommitRequestData() .setGroupId("foo") .setMemberId("member") .setGenerationIdOrMemberEpoch(1) .setTopics(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") .setPartitions(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) )) )) ) ); }
@Operation(summary = "prepare PCA for nik") @PostMapping(value = Constants.URL_NIK_PREPARE_PCA, consumes = "application/json", produces = "application/json") public PreparePcaResponse preparePcaRequestRestService(@Valid @RequestBody NikApduResponsesRequest request) { return nikService.preparePcaRequestRestService(request); }
@Test public void preparePcaRequestRestServiceTest() { PreparePcaResponse expectedResponse = new PreparePcaResponse(); when(nikServiceMock.preparePcaRequestRestService(any(NikApduResponsesRequest.class))).thenReturn(expectedResponse); PreparePcaResponse actualResponse = nikController.preparePcaRequestRestService(new NikApduResponsesRequest()); assertEquals(expectedResponse, actualResponse); }
@Deprecated public static <T extends Collection<String>> T readLines(InputStream in, String charsetName, T collection) throws IORuntimeException { return readLines(in, CharsetUtil.charset(charsetName), collection); }
@Test public void readLinesTest() { try (BufferedReader reader = ResourceUtil.getUtf8Reader("test_lines.csv");) { IoUtil.readLines(reader, (LineHandler) Assertions::assertNotNull); } catch (IOException e) { throw new IORuntimeException(e); } }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void returnOnErrorUsingObservable() throws InterruptedException { RetryConfig config = retryConfig(); Retry retry = Retry.of("testName", config); RetryTransformer<Object> retryTransformer = RetryTransformer.of(retry); given(helloWorldService.returnHelloWorld()) .willThrow(new HelloWorldException()); Observable.fromCallable(helloWorldService::returnHelloWorld) .compose(retryTransformer) .test() .await() .assertError(HelloWorldException.class) .assertNotComplete(); Observable.fromCallable(helloWorldService::returnHelloWorld) .compose(retryTransformer) .test() .await() .assertError(HelloWorldException.class) .assertNotComplete(); then(helloWorldService).should(times(6)).returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(2); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero(); }