focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static String maskHalfString(String str) { if (str == null) { return null; } else { // not null int halfLength = str.length() / 2; String masked = "*".repeat(halfLength) + str.substring(halfLength); return masked; } }
@Test public void testMaskHalfString() { String s = "1234567890"; Assert.assertEquals("*****67890", StringUtils.maskHalfString(s)); s = "123456789"; Assert.assertEquals("****56789", StringUtils.maskHalfString(s)); }
void visit(final PathItem.HttpMethod method, final Operation operation, final PathItem pathItem) { if (filter.accept(operation.getOperationId())) { final String methodName = method.name().toLowerCase(); emitter.emit(methodName, path); emit("id", operation.getOperationId()); emit("description", operation.getDescription()); Set<String> operationLevelConsumes = new LinkedHashSet<>(); if (operation.getRequestBody() != null && operation.getRequestBody().getContent() != null) { operationLevelConsumes.addAll(operation.getRequestBody().getContent().keySet()); } emit("consumes", operationLevelConsumes); Set<String> operationLevelProduces = new LinkedHashSet<>(); if (operation.getResponses() != null) { for (ApiResponse response : operation.getResponses().values()) { if (response.getContent() != null) { operationLevelProduces.addAll(response.getContent().keySet()); } } ApiResponse response = operation.getResponses().get(ApiResponses.DEFAULT); if (response != null && response.getContent() != null) { operationLevelProduces.addAll(response.getContent().keySet()); } } emit("produces", operationLevelProduces); if (ObjectHelper.isNotEmpty(operation.getParameters())) { operation.getParameters().forEach(this::emit); } if (ObjectHelper.isNotEmpty(pathItem.getParameters())) { pathItem.getParameters().forEach(this::emit); } emitOperation(operation); emitter.emit("to", destinationGenerator.generateDestinationFor(operation)); } }
@Test public void shouldEmitCodeForOas3ParameterInPath() { final Builder method = MethodSpec.methodBuilder("configure"); final MethodBodySourceCodeEmitter emitter = new MethodBodySourceCodeEmitter(method); final OperationVisitor<?> visitor = new OperationVisitor<>( emitter, new OperationFilter(), "/path/{param}", new DefaultDestinationGenerator(), null); final Paths paths = new Paths(); final PathItem path = new PathItem(); paths.addPathItem("/path/{param}", path); final Operation operation = new Operation(); final Parameter parameter = new Parameter(); parameter.setName("param"); parameter.setIn("path"); path.addParametersItem(parameter); visitor.visit(PathItem.HttpMethod.GET, operation, path); assertThat(method.build().toString()).isEqualTo("void configure() {\n" + " get(\"/path/{param}\")\n" + " .param()\n" + " .name(\"param\")\n" + " .type(org.apache.camel.model.rest.RestParamType.path)\n" + " .required(true)\n" + " .endParam()\n" + " .to(\"direct:rest1\")\n" + " }\n"); }
public void completeDefaults(Props props) { // init string properties for (Map.Entry<Object, Object> entry : defaults().entrySet()) { props.setDefault(entry.getKey().toString(), entry.getValue().toString()); } boolean clusterEnabled = props.valueAsBoolean(CLUSTER_ENABLED.getKey(), false); if (!clusterEnabled) { props.setDefault(SEARCH_HOST.getKey(), InetAddress.getLoopbackAddress().getHostAddress()); props.setDefault(SEARCH_PORT.getKey(), "9001"); fixPortIfZero(props, Property.SEARCH_HOST.getKey(), SEARCH_PORT.getKey()); fixEsTransportPortIfNull(props); } }
@Test public void completeDefaults_does_not_fall_back_to_default_if_transport_port_of_elasticsearch_set_in_standalone_mode() { Properties p = new Properties(); p.setProperty("sonar.es.port", "9002"); Props props = new Props(p); processProperties.completeDefaults(props); assertThat(props.valueAsInt("sonar.es.port")).isEqualTo(9002); }
@SneakyThrows public boolean isBucketExists() { return minioClient.bucketExists(BucketExistsArgs.builder().bucket(bucketName).build()); }
@Test void isBucketExists() { boolean bucketExists = minioService.isBucketExists(); System.out.println("bucketExists = " + bucketExists); assert bucketExists; }
@Override public long getPeriod() { return config.getLong(PERIOD_IN_MILISECONDS_PROPERTY).orElse(5_000L); }
@Test public void getPeriod_returnNumberFromConfig() { config.put("sonar.server.monitoring.webuptime.period", "100000"); long delay = underTest.getPeriod(); assertThat(delay).isEqualTo(100_000L); }
public void replaceLines(List<String> lines) { sourceBuilder.replace(0, sourceBuilder.length(), Joiner.on("\n").join(lines) + "\n"); }
@Test public void replaceLines() { sourceFile.replaceLines(Arrays.asList("Line1", "Line2")); assertThat(sourceFile.getSourceText()).isEqualTo("Line1\nLine2\n"); }
public List<BlameLine> blame(Path baseDir, String fileName) throws Exception { BlameOutputProcessor outputProcessor = new BlameOutputProcessor(); try { this.processWrapperFactory.create( baseDir, outputProcessor::process, gitCommand, GIT_DIR_FLAG, String.format(GIT_DIR_ARGUMENT, baseDir), GIT_DIR_FORCE_FLAG, baseDir.toString(), BLAME_COMMAND, BLAME_LINE_PORCELAIN_FLAG, IGNORE_WHITESPACES, FILENAME_SEPARATOR_FLAG, fileName) .execute(); } catch (UncommittedLineException e) { LOG.debug("Unable to blame file '{}' - it has uncommitted changes", fileName); return emptyList(); } return outputProcessor.getBlameLines(); }
@Test public void git_blame_uses_safe_local_repository() throws Exception { File projectDir = createNewTempFolder(); File baseDir = new File(projectDir, "dummy-git"); ProcessWrapperFactory mockFactory = mock(ProcessWrapperFactory.class); ProcessWrapper mockProcess = mock(ProcessWrapper.class); String gitCommand = "git"; when(mockFactory.create(any(), any(), anyString(), anyString(), anyString(), anyString(), anyString(), anyString(), anyString(), anyString(), anyString(), anyString())) .then(invocation -> mockProcess); NativeGitBlameCommand blameCommand = new NativeGitBlameCommand(gitCommand, System2.INSTANCE, mockFactory); blameCommand.blame(baseDir.toPath(), DUMMY_JAVA); verify(mockFactory).create(any(), any(), eq(gitCommand), eq(GIT_DIR_FLAG), eq(String.format(GIT_DIR_ARGUMENT, baseDir.toPath())), eq(GIT_DIR_FORCE_FLAG), eq(baseDir.toPath().toString()), eq(BLAME_COMMAND), anyString(), anyString(), anyString(), eq(DUMMY_JAVA)); }
@Override public String createToken(Authentication authentication) throws AccessException { return getExecuteTokenManager().createToken(authentication); }
@Test void testCreateToken1() throws AccessException { assertEquals("token", tokenManagerDelegate.createToken(authentication)); }
boolean reachedCapacity() { return assignedTaskCount() >= capacity; }
@Test public void shouldHaveNotReachedCapacityWhenAssignedTasksLessThanCapacity() { assertFalse(client.reachedCapacity()); }
public void setWorkingDirectory(String workingDir) { this.workingDirectory = workingDir; }
@Test public void shouldErrorOutIfWorkingDirectoryIsOutsideTheCurrentWorkingDirectoryForTemplates() { CruiseConfig config = GoConfigMother.configWithPipelines("pipeline-blah"); BuildTask task = new AntTask(); task.setWorkingDirectory("/blah"); StageConfig stageConfig = StageConfigMother.manualStage("manualStage"); stageConfig.getJobs().get(0).addTask(task); PipelineTemplateConfig template = new PipelineTemplateConfig(new CaseInsensitiveString("some-template"), stageConfig); config.addTemplate(template); List<ConfigErrors> errors = config.validateAfterPreprocess(); assertThat(errors.size(), is(1)); String message = "Task of job 'default' in stage 'manualStage' of template 'some-template' has path '/blah' which is outside the working directory."; assertThat(task.errors().on(BuildTask.WORKING_DIRECTORY), is(message)); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthMaxPriorityFeePerGas() throws Exception { web3j.ethMaxPriorityFeePerGas().send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_maxPriorityFeePerGas\",\"params\":[],\"id\":1}"); }
@Override public <VR> KTable<Windowed<K>, VR> aggregate(final Initializer<VR> initializer, final Aggregator<? super K, ? super V, VR> aggregator) { return aggregate(initializer, aggregator, Materialized.with(keySerde, null)); }
@Test public void shouldThrowNullPointerOnAggregateIfInitializerIsNull() { assertThrows(NullPointerException.class, () -> windowedStream.aggregate(null, MockAggregator.TOSTRING_ADDER)); }
public String getPartialUpdateMode() { // RoutineLoad job only support row mode. return "row"; }
@Test public void testPartialUpdateMode(@Mocked GlobalStateMgr globalStateMgr) { RoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob(); Assert.assertEquals(routineLoadJob.getPartialUpdateMode(), "row"); }
private PartitionedByError<ReconcilableTopic, Void> createTopics(List<ReconcilableTopic> kts) { var newTopics = kts.stream().map(reconcilableTopic -> { // Admin create return buildNewTopic(reconcilableTopic.kt(), reconcilableTopic.topicName()); }).collect(Collectors.toSet()); LOGGER.debugOp("Admin.createTopics({})", newTopics); var timerSample = TopicOperatorUtil.startExternalRequestTimer(metrics, enableAdditionalMetrics); CreateTopicsResult ctr = admin.createTopics(newTopics); ctr.all().whenComplete((i, e) -> { TopicOperatorUtil.stopExternalRequestTimer(timerSample, metrics::createTopicsTimer, enableAdditionalMetrics, namespace); if (e != null) { LOGGER.traceOp("Admin.createTopics({}) failed with {}", newTopics, String.valueOf(e)); } else { LOGGER.traceOp("Admin.createTopics({}) completed", newTopics); } }); Map<String, KafkaFuture<Void>> values = ctr.values(); return partitionedByError(kts.stream().map(reconcilableTopic -> { try { values.get(reconcilableTopic.topicName()).get(); reconcilableTopic.kt().setStatus(new KafkaTopicStatusBuilder() .withTopicId(ctr.topicId(reconcilableTopic.topicName()).get().toString()).build()); return new Pair<>(reconcilableTopic, Either.ofRight((null))); } catch (ExecutionException e) { if (e.getCause() != null && e.getCause() instanceof TopicExistsException) { // we treat this as a success, the next reconciliation checks the configuration return new Pair<>(reconcilableTopic, Either.ofRight((null))); } else { return new Pair<>(reconcilableTopic, Either.ofLeft(handleAdminException(e))); } } catch (InterruptedException e) { throw new UncheckedInterruptedException(e); } })); }
@Test public void shouldHandleInterruptedExceptionFromListReassignments( @BrokerCluster(numBrokers = 2) KafkaCluster cluster) throws ExecutionException, InterruptedException { var topicName = "my-topic"; kafkaAdminClient[0] = Admin.create(Map.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers())); kafkaAdminClient[0].createTopics(List.of(new NewTopic(topicName, 2, (short) 2))).all().get(); var kafkaAdminClientSpy = Mockito.spy(kafkaAdminClient[0]); var result = Mockito.mock(ListPartitionReassignmentsResult.class); Mockito.doReturn(interruptedFuture()).when(result).reassignments(); Mockito.doReturn(result).when(kafkaAdminClientSpy).listPartitionReassignments(any(Set.class)); KafkaTopic kafkaTopic = createKafkaTopic(topicName); assertOnUpdateThrowsInterruptedException(kafkaAdminClientSpy, kafkaTopic); }
@Override public String toString() { return toStringHelper(getClass()) .add("nextHeader", Byte.toString(nextHeader)) .add("payloadLength", Byte.toString(payloadLength)) .add("securityParamIndex", Integer.toString(securityParamIndex)) .add("sequence", Integer.toString(sequence)) .add("integrityCheck", Arrays.toString(integrityCheck)) .toString(); }
@Test public void testToStringAuthentication() throws Exception { Authentication auth = deserializer.deserialize(bytePacket, 0, bytePacket.length); String str = auth.toString(); assertTrue(StringUtils.contains(str, "nextHeader=" + (byte) 0x11)); assertTrue(StringUtils.contains(str, "payloadLength=" + (byte) 0x02)); assertTrue(StringUtils.contains(str, "securityParamIndex=" + 0x13572468)); assertTrue(StringUtils.contains(str, "sequence=" + 0xffff00)); assertTrue(StringUtils.contains(str, "integrityCheck=" + Arrays.toString(icv))); }
public void beginArray() throws IOException { int p = peeked; if (p == PEEKED_NONE) { p = doPeek(); } if (p == PEEKED_BEGIN_ARRAY) { push(JsonScope.EMPTY_ARRAY); pathIndices[stackSize - 1] = 0; peeked = PEEKED_NONE; } else { throw unexpectedTokenError("BEGIN_ARRAY"); } }
@Test public void testStrictNonExecutePrefix() { JsonReader reader = new JsonReader(reader(")]}'\n []")); var e = assertThrows(MalformedJsonException.class, () -> reader.beginArray()); assertStrictError(e, "line 1 column 1 path $"); }
public static <T> Write<T> write() { return new AutoValue_SnsIO_Write.Builder<T>() .setClientConfiguration(ClientConfiguration.builder().build()) .build(); }
@Test public void testSkipTopicValidation() { PCollection<String> input = mock(PCollection.class); when(input.getPipeline()).thenReturn(p); when(input.apply(any(PTransform.class))).thenReturn(mock(PCollection.class)); Write<String> snsWrite = SnsIO.<String>write().withPublishRequestBuilder(msg -> requestBuilder(msg, topicArn)); snsWrite.expand(input); verify(sns, times(0)).getTopicAttributes(any(Consumer.class)); }
@Override public Metrics toHour() { MaxFunction metrics = (MaxFunction) createNew(); metrics.setEntityId(getEntityId()); metrics.setTimeBucket(toTimeBucketInHour()); metrics.setServiceId(getServiceId()); metrics.setValue(getValue()); return metrics; }
@Test public void testToHour() { function.setTimeBucket(TimeBucket.getMinuteTimeBucket(System.currentTimeMillis())); function.accept(MeterEntity.newService("service-test", Layer.GENERAL), LARGE_VALUE); function.accept(MeterEntity.newService("service-test", Layer.GENERAL), SMALL_VALUE); function.calculate(); final MaxFunction hourFunction = (MaxFunction) function.toHour(); hourFunction.calculate(); assertThat(hourFunction.getValue()).isEqualTo(LARGE_VALUE); }
public static boolean isEIP3668(String data) { if (data == null || data.length() < 10) { return false; } return EnsUtils.EIP_3668_CCIP_INTERFACE_ID.equals(data.substring(0, 10)); }
@Test void isEIP3668WhenEmptyOrLessLength() { assertFalse(EnsUtils.isEIP3668("")); assertFalse(EnsUtils.isEIP3668("123456789")); }
public static FunctionDetails convert(FunctionConfig functionConfig) { return convert(functionConfig, (ValidatableFunctionPackage) null); }
@Test public void testAutoAckConvertFailed() { FunctionConfig functionConfig = new FunctionConfig(); functionConfig.setAutoAck(false); functionConfig.setProcessingGuarantees(FunctionConfig.ProcessingGuarantees.ATMOST_ONCE); assertThrows(IllegalArgumentException.class, () -> { FunctionConfigUtils.convert(functionConfig); }); }
@Modified public void modified(ComponentContext context) { readComponentConfiguration(context); if (requestInterceptsEnabled) { requestIntercepts(); } else { withdrawIntercepts(); } }
@Test public void removeHostByDeviceRemove() { provider.modified(CTX_FOR_REMOVE); testProcessor.process(new TestArpPacketContext(DEV1)); testProcessor.process(new TestNaPacketContext(DEV4)); Device device = new DefaultDevice(ProviderId.NONE, deviceId(DEV1), SWITCH, "m", "h", "s", "n", new ChassisId(0L)); deviceService.listener.event(new DeviceEvent(DEVICE_REMOVED, device)); assertEquals("incorrect remove count", 2, providerService.locationRemoveCount); device = new DefaultDevice(ProviderId.NONE, deviceId(DEV4), SWITCH, "m", "h", "s", "n", new ChassisId(0L)); deviceService.listener.event(new DeviceEvent(DEVICE_REMOVED, device)); assertEquals("incorrect remove count", 3, providerService.locationRemoveCount); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { var requestIdGenerator = platform.getContainer().getOptionalComponentByType(RequestIdGenerator.class); if (requestIdGenerator.isEmpty()) { chain.doFilter(request, response); } else { String requestId = requestIdGenerator.get().generate(); try (RequestIdMDCStorage mdcStorage = new RequestIdMDCStorage(requestId)) { request.setAttribute("ID", requestId); chain.doFilter(request, response); } } }
@Test public void filter_put_id_in_MDC_and_remove_it_after_chain_has_executed() throws IOException, ServletException { String requestId = "request id"; when(requestIdGenerator.generate()).thenReturn(requestId); doAnswer(invocation -> assertThat(MDC.get("HTTP_REQUEST_ID")).isEqualTo(requestId)) .when(filterChain) .doFilter(servletRequest, servletResponse); underTest.doFilter(servletRequest, servletResponse, filterChain); assertThat(MDC.get("HTTP_REQUEST_ID")).isNull(); }
@Override public byte[] serialize(final String topic, final List<?> data) { if (data == null) { return null; } try { final StringWriter stringWriter = new StringWriter(); final CSVPrinter csvPrinter = new CSVPrinter(stringWriter, csvFormat); csvPrinter.printRecord(() -> new FieldIterator(data, schema)); final String result = stringWriter.toString(); return result.substring(0, result.length() - 2).getBytes(StandardCharsets.UTF_8); } catch (final Exception e) { throw new SerializationException("Error serializing CSV message", e); } }
@Test public void shouldSerializeOneHalfDecimalWithPaddedZeros() { // Given: givenSingleColumnSerializer(SqlTypes.decimal(4, 2)); final List<?> values = Collections.singletonList(new BigDecimal("0.50")); // When: final byte[] bytes = serializer.serialize("", values); // Then: assertThat(new String(bytes, StandardCharsets.UTF_8), is("0.50")); }
public void processOnce() throws IOException { // set status of query to OK. ctx.getState().reset(); executor = null; // reset sequence id of MySQL protocol final MysqlChannel channel = ctx.getMysqlChannel(); channel.setSequenceId(0); // read packet from channel try { packetBuf = channel.fetchOnePacket(); if (packetBuf == null) { throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet."); } } catch (AsynchronousCloseException e) { // when this happened, timeout checker close this channel // killed flag in ctx has been already set, just return return; } // dispatch dispatch(); // finalize finalizeCommand(); ctx.setCommand(MysqlCommand.COM_SLEEP); }
@Test public void testQuit() throws IOException { ConnectContext ctx = initMockContext(mockChannel(quitPacket), GlobalStateMgr.getCurrentState()); ConnectProcessor processor = new ConnectProcessor(ctx); processor.processOnce(); Assert.assertEquals(MysqlCommand.COM_QUIT, myContext.getCommand()); Assert.assertTrue(myContext.getState().toResponsePacket() instanceof MysqlOkPacket); Assert.assertTrue(myContext.isKilled()); }
@Override public Set<ServiceInstance> getServiceInstance(String serviceName) { CountDownLatch countDownLatch = new CountDownLatch(1); // first check the cache and return if service instance exists if (XdsDataCache.isContainsRequestObserver(serviceName)) { return XdsDataCache.getServiceInstance(serviceName); } // locking ensures that a service only creates one stream LOCK.lock(); try { // check the cache again after locking and return if service instance exists if (XdsDataCache.isContainsRequestObserver(serviceName)) { return XdsDataCache.getServiceInstance(serviceName); } edsHandler.subscribe(serviceName, countDownLatch); } finally { LOCK.unlock(); } try { countDownLatch.await(TIMEOUT, TimeUnit.SECONDS); } catch (InterruptedException e) { LOGGER.log(Level.WARNING, "Occur InterruptedException when wait server send message.", e); } return XdsDataCache.getServiceInstance(serviceName); }
@Test public void getServiceInstance() { // clear data XdsDataCache.removeServiceInstance(serviceName); XdsDataCache.removeRequestObserver(serviceName); // no service instance in cache Set<ServiceInstance> result = xdsServiceDiscovery.getServiceInstance(serviceName); Assert.assertNotNull(XdsDataCache.getRequestObserver(serviceName)); // service instance in cache Set<ServiceInstance> instances = new HashSet<>(); instances.add(new XdsServiceInstance()); XdsDataCache.updateServiceInstance(serviceName, instances); result = xdsServiceDiscovery.getServiceInstance(serviceName); Assert.assertEquals(1, result.size()); }
public boolean isProjectOrApp() { return Qualifiers.APP.equals(qualifier) || isProject(); }
@Test void isProjectOrApp_whenQualifierIsProject_shouldReturnTrue() { ProjectDto projectDto = new ProjectDto(); projectDto.setQualifier(Qualifiers.PROJECT); boolean projectOrApp = projectDto.isProjectOrApp(); assertThat(projectOrApp).isTrue(); }
@Override public void createInstancePort(InstancePort instancePort) { checkNotNull(instancePort, ERR_NULL_INSTANCE_PORT); checkArgument(!Strings.isNullOrEmpty(instancePort.portId()), ERR_NULL_INSTANCE_PORT_ID); instancePortStore.createInstancePort(instancePort); log.info(String.format(MSG_INSTANCE_PORT, instancePort.portId(), MSG_CREATED)); }
@Test(expected = IllegalArgumentException.class) public void testCreateDuplicateInstancePort() { target.createInstancePort(instancePort1); target.createInstancePort(instancePort1); }
public static org.apache.flink.table.types.DataType toFlinkDataType(DataType type) { // ordered by type root definition List<DataType> children = type.getChildren(); int length = DataTypes.getLength(type).orElse(0); int precision = DataTypes.getPrecision(type).orElse(0); int scale = DataTypes.getScale(type).orElse(0); switch (type.getTypeRoot()) { case CHAR: return type.isNullable() ? org.apache.flink.table.api.DataTypes.CHAR(length) : org.apache.flink.table.api.DataTypes.CHAR(length).notNull(); case VARCHAR: return type.isNullable() ? org.apache.flink.table.api.DataTypes.VARCHAR(length) : org.apache.flink.table.api.DataTypes.VARCHAR(length).notNull(); case BOOLEAN: return type.isNullable() ? org.apache.flink.table.api.DataTypes.BOOLEAN() : org.apache.flink.table.api.DataTypes.BOOLEAN().notNull(); case BINARY: return type.isNullable() ? org.apache.flink.table.api.DataTypes.BINARY(length) : org.apache.flink.table.api.DataTypes.BINARY(length).notNull(); case VARBINARY: return type.isNullable() ? org.apache.flink.table.api.DataTypes.VARBINARY(length) : org.apache.flink.table.api.DataTypes.VARBINARY(length).notNull(); case DECIMAL: return type.isNullable() ? org.apache.flink.table.api.DataTypes.DECIMAL(precision, scale) : org.apache.flink.table.api.DataTypes.DECIMAL(precision, scale).notNull(); case TINYINT: return type.isNullable() ? org.apache.flink.table.api.DataTypes.TINYINT() : org.apache.flink.table.api.DataTypes.TINYINT().notNull(); case SMALLINT: return type.isNullable() ? org.apache.flink.table.api.DataTypes.SMALLINT() : org.apache.flink.table.api.DataTypes.SMALLINT().notNull(); case INTEGER: return type.isNullable() ? org.apache.flink.table.api.DataTypes.INT() : org.apache.flink.table.api.DataTypes.INT().notNull(); case DATE: return type.isNullable() ? org.apache.flink.table.api.DataTypes.DATE() : org.apache.flink.table.api.DataTypes.DATE().notNull(); case TIME_WITHOUT_TIME_ZONE: return type.isNullable() ? org.apache.flink.table.api.DataTypes.TIME(precision) : org.apache.flink.table.api.DataTypes.TIME(precision).notNull(); case BIGINT: return type.isNullable() ? org.apache.flink.table.api.DataTypes.BIGINT() : org.apache.flink.table.api.DataTypes.BIGINT().notNull(); case FLOAT: return type.isNullable() ? org.apache.flink.table.api.DataTypes.FLOAT() : org.apache.flink.table.api.DataTypes.FLOAT().notNull(); case DOUBLE: return type.isNullable() ? org.apache.flink.table.api.DataTypes.DOUBLE() : org.apache.flink.table.api.DataTypes.DOUBLE().notNull(); case TIMESTAMP_WITHOUT_TIME_ZONE: return type.isNullable() ? org.apache.flink.table.api.DataTypes.TIMESTAMP(precision) : org.apache.flink.table.api.DataTypes.TIMESTAMP(precision).notNull(); case TIMESTAMP_WITH_LOCAL_TIME_ZONE: return type.isNullable() ? org.apache.flink.table.api.DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE( precision) : org.apache.flink.table.api.DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE( precision) .notNull(); case TIMESTAMP_WITH_TIME_ZONE: return type.isNullable() ? org.apache.flink.table.api.DataTypes.TIMESTAMP_WITH_TIME_ZONE(precision) : org.apache.flink.table.api.DataTypes.TIMESTAMP_WITH_TIME_ZONE(precision) .notNull(); case ARRAY: Preconditions.checkState(children != null && !children.isEmpty()); return type.isNullable() ? org.apache.flink.table.api.DataTypes.ARRAY( toFlinkDataType(children.get(0))) : org.apache.flink.table.api.DataTypes.ARRAY( toFlinkDataType(children.get(0)).notNull()); case MAP: Preconditions.checkState(children != null && children.size() > 1); return type.isNullable() ? org.apache.flink.table.api.DataTypes.MAP( toFlinkDataType(children.get(0)), toFlinkDataType(children.get(1))) : org.apache.flink.table.api.DataTypes.MAP( toFlinkDataType(children.get(0)), toFlinkDataType(children.get(1)).notNull()); case ROW: Preconditions.checkState(!CollectionUtil.isNullOrEmpty(children)); RowType rowType = (RowType) type; List<org.apache.flink.table.api.DataTypes.Field> fields = rowType.getFields().stream() .map(DataField::toFlinkDataTypeField) .collect(Collectors.toList()); return type.isNullable() ? org.apache.flink.table.api.DataTypes.ROW(fields) : org.apache.flink.table.api.DataTypes.ROW(fields).notNull(); default: throw new IllegalArgumentException("Illegal type: " + type); } }
@Test void testToFlinkDataType() { List<DataField> list = IntStream.range(0, ALL_TYPES.length) .mapToObj(i -> DataTypes.FIELD("f" + i, ALL_TYPES[i])) .collect(Collectors.toList()); org.apache.flink.table.types.DataType dataType = DataTypeUtils.toFlinkDataType(new RowType(list)); org.apache.flink.table.types.DataType expectedDataType = ROW( FIELD("f0", BOOLEAN()), FIELD("f1", BYTES()), FIELD("f2", BINARY(10)), FIELD("f3", VARBINARY(10)), FIELD("f4", CHAR(10)), FIELD("f5", VARCHAR(10)), FIELD("f6", STRING()), FIELD("f7", INT()), FIELD("f8", TINYINT()), FIELD("f9", SMALLINT()), FIELD("f10", BIGINT()), FIELD("f11", DOUBLE()), FIELD("f12", FLOAT()), FIELD("f13", DECIMAL(6, 3)), FIELD("f14", DATE()), FIELD("f15", TIME()), FIELD("f16", TIME(6)), FIELD("f17", TIMESTAMP()), FIELD("f18", TIMESTAMP(6)), FIELD("f19", TIMESTAMP_LTZ()), FIELD("f20", TIMESTAMP_LTZ(6)), FIELD("f21", TIMESTAMP_WITH_TIME_ZONE()), FIELD("f22", TIMESTAMP_WITH_TIME_ZONE(6)), FIELD("f23", ARRAY(BIGINT())), FIELD("f24", MAP(SMALLINT(), STRING())), FIELD("f25", ROW(FIELD("f1", STRING()), FIELD("f2", STRING(), "desc"))), FIELD("f26", ROW(SMALLINT(), STRING()))); assertThat(dataType).isEqualTo(expectedDataType); }
public void sendGreetings(Integer numberOfGreetings, Long timeBetweenSendsInMillis) throws InterruptedException { final String greetingZeroId = "greeting-00"; final Greeting greetingZero = new Greeting(); greetingZero.setId(greetingZeroId); greetingZero.setName(greetingZeroId); for (int i = 0; i < numberOfGreetings; i++) { LOGGER.info("Sending greeting #{}", i + 1); final String greetingId = String.format("greeting-%2d", i); final Greeting greeting = new Greeting(); greeting.setId(greetingId); greeting.setName(greetingId); putGreeting(greeting); putGreeting(greetingZero); getGreeting(greetingId); getGreeting(greetingZeroId); // Briefly pause the loop to extend the requests over a longer period to generate metrics // for this example. Thread.sleep(timeBetweenSendsInMillis); } }
@Test @Tag("IntegrationTest") void sendGreetings() { String namespace = "DynamoDBMetricsExample" + "Text" + new Random(1000); GreetingsSender greetingsSender = new GreetingsSender(REGION, Duration.ofSeconds(15L), namespace); greetingsSender.createTable(); try { greetingsSender.sendGreetings(5, 2500L); Thread.sleep(Duration.ofSeconds(30L).toMillis()); ListMetricsResponse listMetricsResponse = cloudWatchClient.listMetrics(b -> b .namespace(namespace) .metricName(CoreMetric.API_CALL_DURATION.name())); Assertions.assertFalse(listMetricsResponse.metrics().isEmpty(), "No ApiCallDuration metric received by CloudWatch"); } catch (InterruptedException | SdkException e) { LOGGER.error(e.getMessage(), e); } finally { greetingsSender.deleteTable(); greetingsSender.close(); } }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { try { if(containerService.isContainer(folder)) { final Storage.Buckets.Insert request = session.getClient().buckets().insert(session.getHost().getCredentials().getUsername(), new Bucket() .setLocation(status.getRegion()) .setStorageClass(status.getStorageClass()) .setName(containerService.getContainer(folder).getName())); final Bucket bucket = request.execute(); final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType()); type.add(Path.Type.volume); return folder.withType(type).withAttributes(new GoogleStorageAttributesFinderFeature(session).toAttributes(bucket)); } else { final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType()); type.add(Path.Type.placeholder); // Add placeholder object return new GoogleStorageTouchFeature(session).withWriter(writer).touch(folder.withType(type), status.withMime(MIMETYPE)); } } catch(IOException e) { throw new GoogleStorageExceptionMappingService().map("Cannot create folder {0}", e, folder); } }
@Test public void testCreatePlaceholderVersioningDelete() throws Exception { final Path bucket = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new GoogleStorageDirectoryFeature(session).mkdir(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(test.getType().contains(Path.Type.placeholder)); assertTrue(new GoogleStorageFindFeature(session).find(test)); assertTrue(new GoogleStorageObjectListService(session).list(bucket, new DisabledListProgressListener()).contains(test)); new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new GoogleStorageObjectListService(session).list(bucket, new DisabledListProgressListener()).contains(test)); assertFalse(new DefaultFindFeature(session).find(test)); assertFalse(new GoogleStorageFindFeature(session).find(test)); }
@Override public void subscribe(String cluster, NamingListener listener) throws Exception { }
@Test public void testSubscribe() throws Exception { NamingserverRegistryServiceImpl registryService = NamingserverRegistryServiceImpl.getInstance(); AtomicBoolean isNotified = new AtomicBoolean(false); //1.subscribe registryService.subscribe(vGroup -> { try { isNotified.set(true); } catch (Exception e) { throw new RuntimeException(e); } }, "group2"); //2.register InetSocketAddress inetSocketAddress = new InetSocketAddress("127.0.0.1", 8088); registryService.register(inetSocketAddress); String namespace = FILE_CONFIG.getConfig("registry.namingserver.namespace"); createGroupInCluster(namespace, "group2", "cluster1"); //3.check assertEquals(isNotified.get(), true); registryService.unsubscribe("group2"); }
@Override public long size() { return get(sizeAsync()); }
@Test public void testSize() { RStream<String, String> stream = redisson.getStream("test"); assertThat(stream.size()).isEqualTo(0); Map<String, String> entries1 = new HashMap<>(); entries1.put("1", "11"); entries1.put("3", "31"); stream.add(new StreamMessageId(1), StreamAddArgs.entries(entries1).trimNonStrict().maxLen(1).noLimit()); assertThat(stream.size()).isEqualTo(1); Map<String, String> entries2 = new HashMap<>(); entries2.put("5", "55"); entries2.put("7", "77"); stream.add(new StreamMessageId(2), StreamAddArgs.entries(entries2).trimNonStrict().maxLen(1).noLimit()); assertThat(stream.size()).isEqualTo(2); }
@Override public void createHost(K8sHost host) { checkNotNull(host, ERR_NULL_HOST); hostStore.createHost(host); log.info(String.format(MSG_HOST, host.hostIp().toString(), MSG_CREATED)); }
@Test(expected = NullPointerException.class) public void testCreateNullHost() { target.createHost(null); }
public static HintValueContext extractHint(final String sql) { if (!containsSQLHint(sql)) { return new HintValueContext(); } HintValueContext result = new HintValueContext(); int hintKeyValueBeginIndex = getHintKeyValueBeginIndex(sql); String hintKeyValueText = sql.substring(hintKeyValueBeginIndex, sql.indexOf(SQL_COMMENT_SUFFIX, hintKeyValueBeginIndex)); Map<String, String> hintKeyValues = getSQLHintKeyValues(hintKeyValueText); if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY)) { result.setDataSourceName(getHintValue(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY)); } if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY)) { result.setWriteRouteOnly(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY))); } if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY)) { result.setSkipSQLRewrite(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY))); } if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY)) { String property = getHintValue(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY); result.getDisableAuditNames().addAll(getSplitterSQLHintValue(property)); } if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY)) { result.setShadow(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY))); } for (Entry<String, String> entry : hintKeyValues.entrySet()) { Object value = convert(entry.getValue()); Comparable<?> comparable = value instanceof Comparable ? (Comparable<?>) value : Objects.toString(value); if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_DATABASE_VALUE_KEY)) { result.getShardingDatabaseValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable); } if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_TABLE_VALUE_KEY)) { result.getShardingTableValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable); } } return result; }
@Test void assertSQLHintShardingTableValue() { HintValueContext actual = SQLHintUtils.extractHint("/* SHARDINGSPHERE_HINT: SHARDING_TABLE_VALUE=10 */"); assertThat(actual.getHintShardingTableValue("t_order"), is(Collections.singletonList(new BigInteger("10")))); }
static String parsePodSecurityProviderClass(String envVar) { String value = envVar != null ? envVar : POD_SECURITY_PROVIDER_CLASS.defaultValue(); if (POD_SECURITY_PROVIDER_BASELINE_SHORTCUT.defaultValue().equals(value.toLowerCase(Locale.ENGLISH))) { return POD_SECURITY_PROVIDER_BASELINE_CLASS.defaultValue(); } else if (POD_SECURITY_PROVIDER_RESTRICTED_SHORTCUT.defaultValue().equals(value.toLowerCase(Locale.ENGLISH))) { return POD_SECURITY_PROVIDER_RESTRICTED_CLASS.defaultValue(); } else { return value; } }
@Test public void testParsePodSecurityProviderClass() { assertThat(ClusterOperatorConfig.parsePodSecurityProviderClass("Baseline"), is(ClusterOperatorConfig.POD_SECURITY_PROVIDER_BASELINE_CLASS.defaultValue())); assertThat(ClusterOperatorConfig.parsePodSecurityProviderClass("baseline"), is(ClusterOperatorConfig.POD_SECURITY_PROVIDER_BASELINE_CLASS.defaultValue())); assertThat(ClusterOperatorConfig.parsePodSecurityProviderClass("RESTRICTED"), is(ClusterOperatorConfig.POD_SECURITY_PROVIDER_RESTRICTED_CLASS.defaultValue())); assertThat(ClusterOperatorConfig.parsePodSecurityProviderClass("restricted"), is(ClusterOperatorConfig.POD_SECURITY_PROVIDER_RESTRICTED_CLASS.defaultValue())); assertThat(ClusterOperatorConfig.parsePodSecurityProviderClass("my.package.MyClass"), is("my.package.MyClass")); }
@Override public void transform(Message message, DataType fromType, DataType toType) { if (message.getHeaders().containsKey(Ddb2Constants.ITEM) || message.getHeaders().containsKey(Ddb2Constants.KEY)) { return; } JsonNode jsonBody = getBodyAsJsonNode(message); String operation = Optional.ofNullable(jsonBody.get("operation")).map(JsonNode::asText).orElse(Ddb2Operations.PutItem.name()); if (message.getExchange().hasProperties() && message.getExchange().getProperty("operation", String.class) != null) { operation = message.getExchange().getProperty("operation", String.class); } if (message.getHeaders().containsKey(Ddb2Constants.OPERATION)) { operation = message.getHeader(Ddb2Constants.OPERATION, Ddb2Operations.class).name(); } JsonNode key = jsonBody.get("key"); JsonNode item = jsonBody.get("item"); Map<String, Object> keyProps; if (key != null) { keyProps = dataFormat.getObjectMapper().convertValue(key, new TypeReference<>() { }); } else { keyProps = dataFormat.getObjectMapper().convertValue(jsonBody, new TypeReference<>() { }); } Map<String, Object> itemProps; if (item != null) { itemProps = dataFormat.getObjectMapper().convertValue(item, new TypeReference<>() { }); } else { itemProps = keyProps; } final Map<String, AttributeValue> keyMap = getAttributeValueMap(keyProps); switch (Ddb2Operations.valueOf(operation)) { case PutItem: message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.PutItem); message.setHeader(Ddb2Constants.ITEM, getAttributeValueMap(itemProps)); setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message); break; case UpdateItem: message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.UpdateItem); message.setHeader(Ddb2Constants.KEY, keyMap); message.setHeader(Ddb2Constants.UPDATE_VALUES, getAttributeValueUpdateMap(itemProps)); setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_NEW.toString(), message); break; case DeleteItem: message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.DeleteItem); message.setHeader(Ddb2Constants.KEY, keyMap); setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message); break; default: throw new UnsupportedOperationException(String.format("Unsupported operation '%s'", operation)); } }
@Test @SuppressWarnings("unchecked") void shouldMapDeleteItemHeaders() throws Exception { Exchange exchange = new DefaultExchange(camelContext); exchange.getMessage().setBody(Json.mapper().readTree("{\"key\": " + keyJson + "}")); exchange.setProperty("operation", Ddb2Operations.DeleteItem.name()); transformer.transform(exchange.getMessage(), DataType.ANY, new DataType(AWS_2_DDB_APPLICATION_JSON_TRANSFORMER)); Assertions.assertTrue(exchange.getMessage().hasHeaders()); Assertions.assertEquals(Ddb2Operations.DeleteItem, exchange.getMessage().getHeader(Ddb2Constants.OPERATION)); Assertions.assertEquals(ReturnValue.ALL_OLD.toString(), exchange.getMessage().getHeader(Ddb2Constants.RETURN_VALUES)); Map<String, AttributeValue> attributeValueMap = exchange.getMessage().getHeader(Ddb2Constants.KEY, Map.class); Assertions.assertEquals(1L, attributeValueMap.size()); Assertions.assertEquals(AttributeValue.builder().s("Rajesh Koothrappali").build(), attributeValueMap.get("name")); }
@Override public HttpResponse handle(HttpRequest request) { final List<String> uris = circularArrayAccessLogKeeper.getUris(); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { JsonGenerator generator = jsonFactory.createGenerator(outputStream); generator.writeStartObject(); generator.writeArrayFieldStart("entries"); for (String uri : uris) { generator.writeStartObject(); generator.writeStringField("url", uri); generator.writeEndObject(); } generator.writeEndArray(); generator.writeEndObject(); generator.close(); } }; }
@Test void testManyLogLines() throws IOException { keeper.addUri("foo"); keeper.addUri("foo"); HttpResponse response = handler.handle(null); response.render(out); assertEquals("{\"entries\":[{\"url\":\"foo\"},{\"url\":\"foo\"}]}", out.toString()); }
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) { return new CreateStreamCommand( outputNode.getSinkName().get(), outputNode.getSchema(), outputNode.getTimestampColumn(), outputNode.getKsqlTopic().getKafkaTopicName(), Formats.from(outputNode.getKsqlTopic()), outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(), Optional.of(outputNode.getOrReplace()), Optional.of(false) ); }
@Test public void shouldThrowOnWindowEndValueColumn() { // Given: final CreateStream statement = new CreateStream( SOME_NAME, TableElements.of(tableElement(WINDOWEND_NAME.text(), new Type(BIGINT))), false, true, withProperties, false ); // When: final Exception e = assertThrows( KsqlException.class, () -> createSourceFactory.createStreamCommand(statement, ksqlConfig) ); // Then: assertThat(e.getMessage(), containsString( "'WINDOWEND' is a reserved column name.")); }
public Grok cachedGrokForPattern(String pattern) { return cachedGrokForPattern(pattern, false); }
@Test public void cachedGrokForPatternThrowsRuntimeException() { expectedException.expectMessage("No definition for key 'EMPTY' found, aborting"); expectedException.expect(RuntimeException.class); expectedException.expectCause(Matchers.any(IllegalArgumentException.class)); final Set<GrokPattern> newPatterns = Collections.singleton(GrokPattern.create("EMPTY", "")); when(grokPatternService.loadAll()).thenReturn(newPatterns); eventBus.post(GrokPatternsUpdatedEvent.create(Collections.singleton("EMPTY"))); grokPatternRegistry.cachedGrokForPattern("%{EMPTY}"); }
public static Builder builder() { return new Builder(); }
@Test void fail_if_no_qualifier_provided() { assertThatThrownBy(() -> ComponentQuery.builder().build()) .isInstanceOf(IllegalArgumentException.class) .hasMessage("At least one qualifier must be provided"); }
public RescaleMappings invert() { IntArrayList[] inverted = new IntArrayList[numberOfTargets]; for (int source = 0; source < mappings.length; source++) { final int[] targets = mappings[source]; for (int target : targets) { IntArrayList sources = inverted[target]; if (sources == null) { inverted[target] = sources = new IntArrayList(1); } sources.add(source); } } return of(Arrays.stream(inverted).map(RescaleMappings::toSortedArray), numberOfSources); }
@Test void testInvert() { RescaleMappings mapping = mappings(to(0), to(0), to(), to(2, 3), to(0, 5)); RescaleMappings inverted = mapping.invert(); RescaleMappings expected = mappings(to(0, 1, 4), to(), to(3), to(3), to(), to(4)); assertThat(inverted).isEqualTo(expected); assertThat(inverted.invert()).isEqualTo(mapping); }
@VisibleForTesting Collection<PluginClassLoaderDef> defineClassloaders(Map<String, ExplodedPlugin> pluginsByKey) { Map<String, PluginClassLoaderDef> classloadersByBasePlugin = new HashMap<>(); for (ExplodedPlugin plugin : pluginsByKey.values()) { PluginInfo info = plugin.getPluginInfo(); String baseKey = basePluginKey(info, pluginsByKey); PluginClassLoaderDef def = classloadersByBasePlugin.get(baseKey); if (def == null) { def = new PluginClassLoaderDef(baseKey); classloadersByBasePlugin.put(baseKey, def); } def.addFiles(singleton(plugin.getMain())); def.addFiles(plugin.getLibs()); def.addMainClass(info.getKey(), info.getMainClass()); for (String defaultSharedResource : DEFAULT_SHARED_RESOURCES) { def.getExportMask().include(String.format("%s/%s/api/", defaultSharedResource, info.getKey())); } // The plugins that extend other plugins can only add some files to classloader. // They can't change metadata like ordering strategy or compatibility mode. if (Strings.isNullOrEmpty(info.getBasePlugin())) { if (info.isUseChildFirstClassLoader()) { LoggerFactory.getLogger(getClass()).warn("Plugin {} [{}] uses a child first classloader which is deprecated", info.getName(), info.getKey()); } def.setSelfFirstStrategy(info.isUseChildFirstClassLoader()); Version minSonarPluginApiVersion = info.getMinimalSonarPluginApiVersion(); boolean compatibilityMode = minSonarPluginApiVersion != null && minSonarPluginApiVersion.compareToIgnoreQualifier(COMPATIBILITY_MODE_MAX_VERSION) < 0; if (compatibilityMode) { LoggerFactory.getLogger(getClass()).warn("API compatibility mode is no longer supported. In case of error, plugin {} [{}] " + "should package its dependencies.", info.getName(), info.getKey()); } } } return classloadersByBasePlugin.values(); }
@Test public void define_classloader() throws Exception { File jarFile = temp.newFile(); PluginInfo plugin = new PluginInfo("foo") .setJarFile(jarFile) .setMainClass("org.foo.FooPlugin") .setMinimalSonarPluginApiVersion(Version.create("5.2")); ExplodedPlugin explodedPlugin = createExplodedPlugin(plugin); Collection<PluginClassLoaderDef> defs = underTest.defineClassloaders( ImmutableMap.of("foo", explodedPlugin)); assertThat(defs).hasSize(1); PluginClassLoaderDef def = defs.iterator().next(); assertThat(def.getBasePluginKey()).isEqualTo("foo"); assertThat(def.isSelfFirstStrategy()).isFalse(); assertThat(def.getFiles()).containsAll(explodedPlugin.getLibs()); assertThat(def.getMainClassesByPluginKey()).containsOnly(MapEntry.entry("foo", "org.foo.FooPlugin")); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testFetchDuringEagerRebalance() { buildFetcher(); subscriptions.subscribe(singleton(topicName), Optional.empty()); subscriptions.assignFromSubscribed(singleton(tp0)); subscriptions.seek(tp0, 0); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds( 1, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds)); assertEquals(1, sendFetches()); // Now the eager rebalance happens and fetch positions are cleared subscriptions.assignFromSubscribed(Collections.emptyList()); subscriptions.assignFromSubscribed(singleton(tp0)); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); // The active fetch should be ignored since its position is no longer valid assertTrue(fetchRecords().isEmpty()); }
LocalResource getResource() { return resource; }
@Test @Timeout(10000) void testResourceTimestampChangeDuringDownload() throws IOException, InterruptedException { conf = new Configuration(); FileContext files = FileContext.getLocalFSFileContext(conf); final Path basedir = files.makeQualified( new Path("target", TestFSDownload.class.getSimpleName())); files.mkdir(basedir, null, true); conf.setStrings(TestFSDownload.class.getName(), basedir.toString()); LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName()); Path path = new Path(basedir, "test-file"); Random rand = new Random(); long sharedSeed = rand.nextLong(); rand.setSeed(sharedSeed); int size = 512; LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC; LocalResource localResource = createFile(files, path, size, rand, vis); Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf); destPath = new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())); FSDownload fsDownload = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, destPath, localResource); // Store the original local resource timestamp used to set up the // FSDownload object just before (but before the download starts) // for comparison purposes later on. long origLRTimestamp = localResource.getTimestamp(); // Modify the local resource's timestamp to yesterday on the Filesystem // just before FSDownload starts. final long msInADay = 86400 * 1000; long modifiedFSTimestamp = origLRTimestamp - msInADay; try { Path sourceFsPath = localResource.getResource().toPath(); FileSystem sourceFs = sourceFsPath.getFileSystem(conf); sourceFs.setTimes(sourceFsPath, modifiedFSTimestamp, modifiedFSTimestamp); } catch (URISyntaxException use) { fail("No exception expected."); } // Execute the FSDownload operation. Map<LocalResource, Future<Path>> pending = new HashMap<>(); ExecutorService exec = HadoopExecutors.newSingleThreadExecutor(); pending.put(localResource, exec.submit(fsDownload)); exec.shutdown(); exec.awaitTermination(1000, TimeUnit.MILLISECONDS); assertTrue(pending.get(localResource).isDone()); try { for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) { p.getValue().get(); } fail("Exception expected from timestamp update during download"); } catch (ExecutionException ee) { assertTrue(ee.getCause() instanceof IOException); assertTrue(ee.getMessage().contains(Times.formatISO8601(origLRTimestamp)), "Exception contains original timestamp"); assertTrue(ee.getMessage().contains(Times.formatISO8601(modifiedFSTimestamp)), "Exception contains modified timestamp"); } }
@Override public Path move(final Path source, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { final Path copy = proxy.copy(source, renamed, status, connectionCallback, new DisabledStreamListener()); delete.delete(Collections.singletonMap(source, status), connectionCallback, callback); return copy; }
@Test public void testMove() throws Exception { final Path bucket = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new GoogleStorageTouchFeature(session).touch( new Path(bucket, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withMetadata(Collections.singletonMap("cyberduck", "set"))); assertTrue(new GoogleStorageFindFeature(session).find(test)); assertFalse(new GoogleStorageMetadataFeature(session).getMetadata(test).isEmpty()); final Path renamed = new Path(bucket, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)); final Path moved = new GoogleStorageMoveFeature(session).move(test, renamed, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new GoogleStorageFindFeature(session).find(test)); assertTrue(new GoogleStorageFindFeature(session).find(renamed)); final PathAttributes targetAttr = new GoogleStorageAttributesFinderFeature(session).find(renamed); assertEquals(moved.attributes(), targetAttr); assertEquals(Comparison.equal, session.getHost().getProtocol().getFeature(ComparisonService.class).compare(Path.Type.file, moved.attributes(), targetAttr)); assertEquals(1, new GoogleStorageObjectListService(session).list(bucket, new DisabledListProgressListener()) .filter(new SearchFilter(renamed.getName())).size()); final Map<String, String> metadata = new GoogleStorageMetadataFeature(session).getMetadata(renamed); assertFalse(metadata.isEmpty()); assertEquals("set", metadata.get("cyberduck")); new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(renamed), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public void marshal(Exchange exchange, Object graph, OutputStream stream) throws Exception { Calendar calendar = exchange.getContext().getTypeConverter().convertTo(Calendar.class, graph); outputer.output(calendar, stream); }
@Test public void testMarshal() throws Exception { Calendar testCalendar = createTestCalendar(); MockEndpoint endpoint = getMockEndpoint("mock:result"); endpoint.expectedBodiesReceived(testCalendar.toString()); template.sendBody("direct:marshal", testCalendar); endpoint.assertIsSatisfied(); }
@Override @Deprecated public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); return doFlatTransformValues( toValueTransformerWithKeySupplier(valueTransformerSupplier), NamedInternal.empty(), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullValueTransformerSupplierOnFlatTransformValuesWithNamedAndStores() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatTransformValues( (org.apache.kafka.streams.kstream.ValueTransformerSupplier<Object, Iterable<Object>>) null, Named.as("flatValueTransformer"), "stateStore")); assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); }
public static List<File> extractFilesInLibDirAndReturnFiles(File aJarFile, Predicate<JarEntry> extractFilter, File outputTmpDir) { List<File> newClassPath = new ArrayList<>(); try (JarFile jarFile = new JarFile(aJarFile)) { List<File> extractedJars = jarFile.stream() .filter(extractFilter) .map(jarEntry -> { String jarFileBaseName = FilenameUtils.getName(jarEntry.getName()); File targetFile = new File(outputTmpDir, jarFileBaseName); return extractJarEntry(jarFile, jarEntry, targetFile); }) .toList(); // add deps in dir specified by `libDirManifestKey` newClassPath.addAll(extractedJars); } catch (IOException e) { throw new RuntimeException(e); } return newClassPath; }
@Test public void shouldExtractJars() throws Exception { File sourceFile = new File(PATH_WITH_HASHES + "test-agent.jar"); Set<File> files = new HashSet<>(JarUtil.extractFilesInLibDirAndReturnFiles(sourceFile, jarEntry -> jarEntry.getName().endsWith(".class"), temporaryFolder)); try (Stream<Path> directoryStream = Files.list(temporaryFolder.toPath())) { Set<File> actualFiles = directoryStream.map(Path::toFile).collect(Collectors.toSet()); assertEquals(files, actualFiles); assertEquals(files.size(), 2); Set<String> fileNames = files.stream().map(File::getName).collect(Collectors.toSet()); assertEquals(fileNames, Set.of("ArgPrintingMain.class", "HelloWorldStreamWriter.class")); } }
public void initializeSession(AuthenticationRequest authenticationRequest, SAMLBindingContext bindingContext) throws SamlSessionException, SharedServiceClientException { final String httpSessionId = authenticationRequest.getRequest().getSession().getId(); if (authenticationRequest.getFederationName() != null) { findOrInitializeFederationSession(authenticationRequest, httpSessionId); } findOrInitializeSamlSession(authenticationRequest, httpSessionId, bindingContext); }
@Test public void findSSOSessionHigherRequestedAuthLevelInitializeTest() throws SamlSessionException, SharedServiceClientException { FederationSession federationSession = new FederationSession(600); federationSession.setAuthLevel(10); authenticationRequest.setMinimumRequestedAuthLevel(20); authenticationRequest.setFederationName("federationName"); Optional<FederationSession> optionalFederationSession = Optional.of(federationSession); when(federationSessionRepositoryMock.findByHttpSessionIdAndFederationName(anyString(), anyString())).thenReturn(optionalFederationSession); when(sharedServiceClientMock.getSSConfigLong(anyString())).thenReturn(10L); samlSessionService.initializeSession(authenticationRequest, bindingContext); assertFalse(authenticationRequest.isValidSsoSession()); }
public static Path get(String appName) { final Path applicationDataDirectory = getPath(appName); try { Files.createDirectories(applicationDataDirectory); } catch (IOException ioe) { throw new RuntimeException("Couldn't find/create AppDataDirectory", ioe); } return applicationDataDirectory; }
@Test(expected = RuntimeException.class) public void throwsIOExceptionIfPathNotFound() { // Force exceptions with illegal characters if (PlatformUtils.isWindows()) { AppDataDirectory.get(":"); // Illegal character for Windows } if (PlatformUtils.isMac()) { // NUL character AppDataDirectory.get("\0"); // Illegal character for Mac } if (PlatformUtils.isLinux()) { // NUL character AppDataDirectory.get("\0"); // Illegal character for Linux } }
@Override public AppSettings load() { Properties p = loadPropertiesFile(homeDir); Set<String> keysOverridableFromEnv = stream(ProcessProperties.Property.values()).map(ProcessProperties.Property::getKey) .collect(Collectors.toSet()); keysOverridableFromEnv.addAll(p.stringPropertyNames()); // 1st pass to load static properties Props staticProps = reloadProperties(keysOverridableFromEnv, p); keysOverridableFromEnv.addAll(getDynamicPropertiesKeys(staticProps)); // 2nd pass to load dynamic properties like `ldap.*.url` or `ldap.*.baseDn` which keys depend on values of static // properties loaded in 1st step Props props = reloadProperties(keysOverridableFromEnv, p); new ProcessProperties(serviceLoaderWrapper).completeDefaults(props); stream(consumers).forEach(c -> c.accept(props)); return new AppSettingsImpl(props); }
@Test public void load_properties_from_env() throws Exception { when(system.getenv()).thenReturn(ImmutableMap.of( "SONAR_DASHED_PROPERTY", "2", "SONAR_JDBC_URL", "some_jdbc_url", "SONAR_EMBEDDEDDATABASE_PORT", "8765")); when(system.getenv("SONAR_DASHED_PROPERTY")).thenReturn("2"); when(system.getenv("SONAR_JDBC_URL")).thenReturn("some_jdbc_url"); when(system.getenv("SONAR_EMBEDDEDDATABASE_PORT")).thenReturn("8765"); File homeDir = temp.newFolder(); File propsFile = new File(homeDir, "conf/sonar.properties"); FileUtils.write(propsFile, "sonar.dashed-property=1", UTF_8); AppSettingsLoaderImpl underTest = new AppSettingsLoaderImpl(system, new String[0], homeDir, serviceLoaderWrapper); AppSettings settings = underTest.load(); assertThat(settings.getProps().rawProperties()).contains( entry("sonar.dashed-property", "2"), entry("sonar.jdbc.url", "some_jdbc_url"), entry("sonar.embeddedDatabase.port", "8765")); }
public Map<ClientPlatform, Map<Semver, ClientRelease>> getClientReleases() { return Collections.unmodifiableMap( Flux.from(dynamoDbAsyncClient.scanPaginator(ScanRequest.builder() .tableName(tableName) .build()) .items()) .mapNotNull(ClientReleases::releaseFromItem) .groupBy(ClientRelease::platform) .flatMap(groupedFlux -> groupedFlux.collectMap(ClientRelease::version) .map(releasesByVersion -> Tuples.of(groupedFlux.key(), releasesByVersion))) .collectMap(Tuple2::getT1, Tuple2::getT2) .blockOptional() .orElseGet(Collections::emptyMap)); }
@Test void getClientReleases() { final Instant releaseTimestamp = Instant.now().truncatedTo(ChronoUnit.SECONDS); final Instant expiration = releaseTimestamp.plusSeconds(60); storeClientRelease("IOS", "1.2.3", releaseTimestamp, expiration); storeClientRelease("IOS", "not-a-valid-version", releaseTimestamp, expiration); storeClientRelease("ANDROID", "4.5.6", releaseTimestamp, expiration); storeClientRelease("UNRECOGNIZED_PLATFORM", "7.8.9", releaseTimestamp, expiration); final Map<ClientPlatform, Map<Semver, ClientRelease>> expectedVersions = Map.of( ClientPlatform.IOS, Map.of(new Semver("1.2.3"), new ClientRelease(ClientPlatform.IOS, new Semver("1.2.3"), releaseTimestamp, expiration)), ClientPlatform.ANDROID, Map.of(new Semver("4.5.6"), new ClientRelease(ClientPlatform.ANDROID, new Semver("4.5.6"), releaseTimestamp, expiration))); assertEquals(expectedVersions, clientReleases.getClientReleases()); }
@Override public int read() throws IOException { if (mPosition == mLength) { // at end of file return -1; } updateStreamIfNeeded(); int res = mUfsInStream.get().read(); if (res == -1) { return -1; } mPosition++; Metrics.BYTES_READ_FROM_UFS.inc(1); return res; }
@Test public void readOffsetByteBuffer() throws IOException, AlluxioException { AlluxioURI ufsPath = getUfsPath(); createFile(ufsPath, CHUNK_SIZE); // 100 int start = CHUNK_SIZE / 4; // 25 int len = CHUNK_SIZE / 2; // 75 try (FileInStream inStream = getStream(ufsPath)) { ByteBuffer buffer = ByteBuffer.allocate(CHUNK_SIZE); buffer.position(start); assertEquals(CHUNK_SIZE / 2, inStream.read(buffer, start, len)); for (int i = start; i < start + len; i++) { assertEquals(i - start, buffer.get(i)); } } }
public Map<String, String> match(String text) { final HashMap<String, String> result = MapUtil.newHashMap(true); int from = 0; String key = null; int to; for (String part : patterns) { if (StrUtil.isWrap(part, "${", "}")) { // 变量 key = StrUtil.sub(part, 2, part.length() - 1); } else { to = text.indexOf(part, from); if (to < 0) { //普通字符串未匹配到,说明整个模式不能匹配,返回空 return MapUtil.empty(); } if (null != key && to > from) { // 变量对应部分有内容 result.put(key, text.substring(from, to)); } // 下一个起始点是普通字符串的末尾 from = to + part.length(); key = null; } } if (null != key && from < text.length()) { // 变量对应部分有内容 result.put(key, text.substring(from)); } return result; }
@Test public void matcherTest3(){ // 当有无匹配项的时候,按照全不匹配对待 final StrMatcher strMatcher = new StrMatcher("${name}经过${year}年"); final Map<String, String> match = strMatcher.match("小明经过20年,成长为一个大人。"); //Console.log(match); assertEquals("小明", match.get("name")); assertEquals("20", match.get("year")); }
public void setProfile(final Set<String> indexSetsIds, final String profileId, final boolean rotateImmediately) { checkProfile(profileId); checkAllIndicesSupportProfileChange(indexSetsIds); for (String indexSetId : indexSetsIds) { try { indexSetService.get(indexSetId).ifPresent(indexSetConfig -> { var updatedIndexSetConfig = setProfileForIndexSet(profileId, indexSetConfig); if (rotateImmediately) { updatedIndexSetConfig.ifPresent(this::cycleIndexSet); } }); } catch (Exception ex) { LOG.error("Failed to update field type in index set : " + indexSetId, ex); throw ex; } } }
@Test void testDoesNothingWhenAskedToSetProfileWhichWasAlreadySet() { existingIndexSet = existingIndexSet.toBuilder() .fieldTypeProfile("000000000000000000000007") .build(); doReturn(Optional.of(existingIndexSet)).when(indexSetService).get("existing_index_set"); final String profileId = "000000000000000000000007"; IndexFieldTypeProfile profile = new IndexFieldTypeProfile( profileId, "Nice profile!", "Nice profile!", new CustomFieldMappings(List.of(new CustomFieldMapping("bubamara", "ip"))) ); doReturn(Optional.of(profile)).when(profileService).get(profileId); toTest.setProfile(Set.of(existingIndexSet.id()), profileId, false); verify(mongoIndexSetService, never()).save(any()); verifyNoInteractions(existingMongoIndexSet); }
public static Criterion matchIPv6FlowLabel(int flowLabel) { return new IPv6FlowLabelCriterion(flowLabel); }
@Test public void testMatchIPv6FlowLabelMethod() { int flowLabel = 12; Criterion matchFlowLabel = Criteria.matchIPv6FlowLabel(flowLabel); IPv6FlowLabelCriterion flowLabelCriterion = checkAndConvert(matchFlowLabel, Criterion.Type.IPV6_FLABEL, IPv6FlowLabelCriterion.class); assertThat(flowLabelCriterion.flowLabel(), is(equalTo(flowLabel))); }
@Override public KeyValueSegment getOrCreateSegmentIfLive(final long segmentId, final ProcessorContext context, final long streamTime) { final KeyValueSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime); cleanupExpiredSegments(streamTime); return segment; }
@Test public void shouldGetSegmentsWithinTimeRange() { updateStreamTimeAndCreateSegment(0); updateStreamTimeAndCreateSegment(1); updateStreamTimeAndCreateSegment(2); updateStreamTimeAndCreateSegment(3); final long streamTime = updateStreamTimeAndCreateSegment(4); segments.getOrCreateSegmentIfLive(0, context, streamTime); segments.getOrCreateSegmentIfLive(1, context, streamTime); segments.getOrCreateSegmentIfLive(2, context, streamTime); segments.getOrCreateSegmentIfLive(3, context, streamTime); segments.getOrCreateSegmentIfLive(4, context, streamTime); final List<KeyValueSegment> segments = this.segments.segments(0, 2 * SEGMENT_INTERVAL, true); assertEquals(3, segments.size()); assertEquals(0, segments.get(0).id); assertEquals(1, segments.get(1).id); assertEquals(2, segments.get(2).id); }
@Override public int getWorkerCount() { if (workerCountProvider != null) { workerCount = readWorkerCount(workerCountProvider); } return workerCount; }
@Test public void getWorkerCount_returns_1_when_there_is_no_WorkerCountProvider() { assertThat(new CeConfigurationImpl(EMPTY_CONFIGURATION).getWorkerCount()).isOne(); }
@Udf public <T extends Comparable<? super T>> List<T> arraySortWithDirection(@UdfParameter( description = "The array to sort") final List<T> input, @UdfParameter( description = "Marks the end of the series (inclusive)") final String direction) { if (input == null || direction == null) { return null; } if (SORT_DIRECTION_ASC.contains(direction.toUpperCase())) { input.sort(nullsLast(naturalOrder())); } else if (SORT_DIRECTION_DESC.contains(direction.toUpperCase())) { input.sort(nullsLast(Collections.reverseOrder())); } else { return null; } return input; }
@Test public void shouldSortIntsDescending() { final List<Integer> input = Arrays.asList(1, 3, -2); final List<Integer> output = udf.arraySortWithDirection(input, "DEsc"); assertThat(output, contains(3, 1, -2)); }
@Override public int run() throws IOException { Preconditions.checkArgument(sourceFiles != null && !sourceFiles.isEmpty(), "Missing file name"); // Ensure all source files have the columns specified first Map<String, Schema> schemas = new HashMap<>(); for (String source : sourceFiles) { Schema schema = getAvroSchema(source); schemas.put(source, Expressions.filterSchema(schema, columns)); } for (String source : sourceFiles) { Schema projection = schemas.get(source); Iterable<Object> reader = openDataFile(source, projection); boolean threw = true; long count = 0; try { for (Object record : reader) { if (numRecords > 0 && count >= numRecords) { break; } if (columns == null || columns.size() != 1) { console.info(String.valueOf(record)); } else { console.info(String.valueOf(select(projection, record, columns.get(0)))); } count += 1; } threw = false; } catch (RuntimeException e) { throw new RuntimeException("Failed on record " + count + " in file " + source, e); } finally { if (reader instanceof Closeable) { Closeables.close((Closeable) reader, threw); } } } return 0; }
@Test(expected = IllegalArgumentException.class) public void testCatCommandWithInvalidColumn() throws IOException { File file = parquetFile(); CatCommand command = new CatCommand(createLogger(), 0); command.sourceFiles = Arrays.asList(file.getAbsolutePath()); command.columns = Arrays.asList("invalid_field"); command.setConf(new Configuration()); command.run(); }
@Override public Result reconcile(Request request) { String name = request.name(); if (!isSystemSetting(name)) { return new Result(false, null); } client.fetch(ConfigMap.class, name) .ifPresent(configMap -> { addFinalizerIfNecessary(configMap); routeRuleReconciler.reconcile(name); customizeSystem(name); }); return new Result(false, null); }
@Test void reconcileTagsRule() { ConfigMap configMap = systemConfigMapForRouteRule(rules -> { rules.setTags("tags-new"); return rules; }); when(environmentFetcher.getConfigMapBlocking()).thenReturn(Optional.of(configMap)); when(client.fetch(eq(ConfigMap.class), eq(SystemSetting.SYSTEM_CONFIG))) .thenReturn(Optional.of(configMap)); systemSettingReconciler.reconcile(new Reconciler.Request(SystemSetting.SYSTEM_CONFIG)); ArgumentCaptor<ConfigMap> captor = ArgumentCaptor.forClass(ConfigMap.class); verify(client, times(1)).update(captor.capture()); ConfigMap updatedConfigMap = captor.getValue(); assertThat(rulesFrom(updatedConfigMap).getTags()).isEqualTo("tags-new"); assertThat(oldRulesFromAnno(updatedConfigMap).getTags()).isEqualTo("tags-new"); verify(applicationContext, times(1)).publishEvent(any()); }
@Override public void reset() throws IOException { createDirectory(PATH_DATA.getKey()); createDirectory(PATH_WEB.getKey()); createDirectory(PATH_LOGS.getKey()); File tempDir = createOrCleanTempDirectory(PATH_TEMP.getKey()); try (AllProcessesCommands allProcessesCommands = new AllProcessesCommands(tempDir)) { allProcessesCommands.clean(); } }
@Test public void reset_creates_dirs_if_they_don_t_exist() throws Exception { assertThat(dataDir).doesNotExist(); underTest.reset(); assertThat(dataDir).exists().isDirectory(); assertThat(logsDir).exists().isDirectory(); assertThat(tempDir).exists().isDirectory(); assertThat(webDir).exists().isDirectory(); underTest.reset(); assertThat(dataDir).exists().isDirectory(); assertThat(logsDir).exists().isDirectory(); assertThat(tempDir).exists().isDirectory(); assertThat(webDir).exists().isDirectory(); }
public static Builder newBuilder() { return new AutoValue_DLPInspectText.Builder(); }
@Test public void throwsExceptionWhenDelimiterIsSetAndHeadersAreNot() { assertThrows( "Column headers should be supplied when delimiter is present.", IllegalArgumentException.class, () -> DLPInspectText.newBuilder() .setProjectId(PROJECT_ID) .setBatchSizeBytes(BATCH_SIZE_SMALL) .setInspectTemplateName(TEMPLATE_NAME) .setColumnDelimiter(DELIMITER) .build()); }
public static void sleep(final TimeUnit timeUnit, final int time) { try { timeUnit.sleep(time); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } }
@Test public void testSleep() throws InterruptedException { ThreadUtils.sleep(timeUnit, 1); verify(timeUnit, times(1)).sleep(eq(1L)); }
public Model parseWithoutDocTypeCleanup(File file) throws PomParseException { try (FileInputStream fis = new FileInputStream(file)) { return parseWithoutDocTypeCleanup(fis); } catch (IOException ex) { if (ex instanceof PomParseException) { throw (PomParseException) ex; } LOGGER.debug("", ex); throw new PomParseException(String.format("Unable to parse pom '%s'", file), ex); } }
@Test public void testParseWithoutDocTypeCleanup_InputStream() throws Exception { InputStream inputStream = BaseTest.getResourceAsStream(this, "pom/mailapi-1.4.3.pom"); PomParser instance = new PomParser(); String expVersion = "1.4.3"; Model result = instance.parseWithoutDocTypeCleanup(inputStream); assertEquals("Invalid version extracted", expVersion, result.getParentVersion()); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) { return invoke(n, BigDecimal.ZERO); }
@Test void invokeNull() { FunctionTestUtil.assertResultError(roundDownFunction.invoke(null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(roundDownFunction.invoke((BigDecimal) null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(roundDownFunction.invoke(BigDecimal.ONE, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(roundDownFunction.invoke(null, BigDecimal.ONE), InvalidParametersEvent.class); }
public RingbufferConfig setAsyncBackupCount(int asyncBackupCount) { this.asyncBackupCount = checkAsyncBackupCount(backupCount, asyncBackupCount); return this; }
@Test(expected = IllegalArgumentException.class) public void setAsyncBackupCount_whenTooSmall() { RingbufferConfig config = new RingbufferConfig(NAME); config.setAsyncBackupCount(-1); }
@Deprecated(forRemoval=true, since = "13.0") public static byte[] convertTextToOctetStream(Object source, MediaType sourceType) { if (source == null) return null; if (sourceType == null) { throw new NullPointerException("MediaType cannot be null!"); } if (source instanceof byte[]) return (byte[]) source; return source.toString().getBytes(sourceType.getCharset()); }
@Test public void testTextToOctetStreamConversion() { String source = "Like our owl?"; byte[] result = StandardConversions.convertTextToOctetStream(source, TEXT_PLAIN); assertArrayEquals(source.getBytes(UTF_8), result); }
public static List<BlockParserFactory> calculateBlockParserFactories(List<BlockParserFactory> customBlockParserFactories, Set<Class<? extends Block>> enabledBlockTypes) { List<BlockParserFactory> list = new ArrayList<>(); // By having the custom factories come first, extensions are able to change behavior of core syntax. list.addAll(customBlockParserFactories); for (Class<? extends Block> blockType : enabledBlockTypes) { list.add(NODES_TO_CORE_FACTORIES.get(blockType)); } return list; }
@Test public void calculateBlockParserFactories_givenAFullListOfAllowedNodes_includesAllCoreFactories() { List<BlockParserFactory> customParserFactories = List.of(); var enabledBlockTypes = Set.of(BlockQuote.class, Heading.class, FencedCodeBlock.class, HtmlBlock.class, ThematicBreak.class, ListBlock.class, IndentedCodeBlock.class); List<BlockParserFactory> blockParserFactories = DocumentParser.calculateBlockParserFactories(customParserFactories, enabledBlockTypes); assertThat(blockParserFactories.size(), is(CORE_FACTORIES.size())); for (BlockParserFactory factory : CORE_FACTORIES) { assertTrue(hasInstance(blockParserFactories, factory.getClass())); } }
public void validateTabNameUniqueness(ArrayList<Tab> tabs) { for (Tab tab : tabs) { if(name.equals(tab.getName())){ this.addError(NAME, String.format("Tab name '%s' is not unique.", name)); tab.addError(NAME, String.format("Tab name '%s' is not unique.", name)); return; } } tabs.add(this); }
@Test public void shouldAddToListWhenNoErrorIsEncountered() { Tab tab = new Tab("foo1", "bar"); ArrayList<Tab> visitedTabs = new ArrayList<>(); Tab existingTab = new Tab("foo0", "bar"); visitedTabs.add(existingTab); tab.validateTabNameUniqueness(visitedTabs); assertThat(visitedTabs.size(), is(2)); }
public Map<String, Object> metadata() throws JsonProcessingException { Map<String, Object> response = new HashMap<>(); response.put("issuer", frontchannel); response.put("authorization_endpoint", frontchannel + "/authorization"); response.put("jwks_uri", frontchannel + "/jwks"); response.put("token_endpoint", backchannel + "/token"); response.put("scopes_supported", List.of("openid")); response.put("response_types_supported", List.of("code")); response.put("claims_parameter_supported", false); response.put("claims_supported", List.of("sub", "acr")); response.put("grant_types_supported", List.of("authorization_code")); response.put("subject_types_supported", List.of("public")); response.put("sub_id_types_supported", List.of("urn:nl-eid-gdi:1.0:id:legacy-BSN")); response.put("acr_values_supported", List.of(LevelOfAssurance.MIDDEN, LevelOfAssurance.SUBSTANTIEEL)); response.put("token_endpoint_auth_methods_supported", List.of("tls_client_auth")); response.put("id_token_signing_alg_values_supported", List.of("RS256")); response.put("id_token_encryption_alg_values_supported", List.of("RS256")); response.put("request_object_signing_alg_values_supported", Arrays.asList("RS256")); response.put("request_object_encryption_enc_values_supported", Arrays.asList("RS256")); response.put("request_uri_parameter_supported", false); response.put("signed_metadata", generateJWT(MAPPER.writeValueAsString(response))); return response; }
@Test void metadataTest() throws JsonProcessingException { var response = provider.metadata(); assertEquals(List.of("code"), response.get("response_types_supported")); }
@Override public void removeMappingEntries(Type type, MappingEntry... mappingEntries) { for (MappingEntry entry : mappingEntries) { store.removeMapping(type, entry); } }
@Test public void removeMappingEntries() { Mapping m1 = addMapping(MAP_DATABASE, 1); Mapping m2 = addMapping(MAP_DATABASE, 2); addMapping(MAP_DATABASE, 3); assertEquals("3 mappings should exist", 3, mappingCount(MAP_DATABASE)); MappingEntry me1 = new DefaultMappingEntry(m1); MappingEntry me2 = new DefaultMappingEntry(m2); adminService.removeMappingEntries(MAP_DATABASE, me1, me2); assertEquals("1 mappings should exist", 1, mappingCount(MAP_DATABASE)); }
public static SIB fit(SparseArray[] data, int k) { return fit(data, k, 100); }
@Test public void testParseNG20() throws Exception { System.out.println("NG20"); MathEx.setSeed(19650218); // to get repeatable results. SparseDataset<Integer> train = Read.libsvm(smile.util.Paths.getTestData("libsvm/news20.dat")); SparseDataset<Integer> test = Read.libsvm(smile.util.Paths.getTestData("libsvm/news20.t.dat")); SparseArray[] trainx = train.stream().map(SampleInstance::x).toArray(SparseArray[]::new); int[] y = train.stream().mapToInt(SampleInstance::y).toArray(); int[] testy = test.stream().mapToInt(SampleInstance::y).toArray(); SIB model = SIB.fit(trainx, 20); System.out.println(model); double r = RandIndex.of(y, model.y); double r2 = AdjustedRandIndex.of(y, model.y); System.out.format("Training rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2); assertEquals(0.8842, r, 1E-4); assertEquals(0.2327, r2, 1E-4); System.out.format("MI = %.2f%n", MutualInformation.of(y, model.y)); System.out.format("NMI.joint = %.2f%%%n", 100 * NormalizedMutualInformation.joint(y, model.y)); System.out.format("NMI.max = %.2f%%%n", 100 * NormalizedMutualInformation.max(y, model.y)); System.out.format("NMI.min = %.2f%%%n", 100 * NormalizedMutualInformation.min(y, model.y)); System.out.format("NMI.sum = %.2f%%%n", 100 * NormalizedMutualInformation.sum(y, model.y)); System.out.format("NMI.sqrt = %.2f%%%n", 100 * NormalizedMutualInformation.sqrt(y, model.y)); int[] p = new int[test.size()]; for (int i = 0; i < test.size(); i++) { p[i] = model.predict(test.get(i).x()); } r = RandIndex.of(testy, p); r2 = AdjustedRandIndex.of(testy, p); System.out.format("Testing rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2); assertEquals(0.8782, r, 1E-4); assertEquals(0.2287, r2, 1E-4); java.nio.file.Path temp = Write.object(model); Read.object(temp); }
@Override public List<ApiMessageAndVersion> next() { if (!hasNext()) throw new NoSuchElementException(); List<ApiMessageAndVersion> result = new ArrayList<>(10); for (int i = 0; i < maxRecordsInBatch; i++) { if (!iterator.hasNext()) break; StandardAclWithId aclWithId = iterator.next(); result.add(new ApiMessageAndVersion(aclWithId.toRecord(), (short) 0)); } return result; }
@Test public void testNoSuchElementException() { StandardAclRecordIterator iterator = new StandardAclRecordIterator(TEST_ACLS.iterator(), 2); iterator.next(); iterator.next(); iterator.next(); assertThrows(NoSuchElementException.class, iterator::next); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Override public @Nullable <InputT> TransformEvaluator<InputT> forApplication( AppliedPTransform<?, ?, ?> application, CommittedBundle<?> inputBundle) { return createEvaluator((AppliedPTransform) application); }
@Test public void unboundedSourceInMemoryTransformEvaluatorProducesElements() throws Exception { when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle()); Collection<CommittedBundle<?>> initialInputs = new UnboundedReadEvaluatorFactory.InputProvider(context, p.getOptions()) .getInitialInputs(graph.getProducer(longs), 1); CommittedBundle<?> inputShards = Iterables.getOnlyElement(initialInputs); UnboundedSourceShard<Long, ?> inputShard = (UnboundedSourceShard<Long, ?>) Iterables.getOnlyElement(inputShards.getElements()).getValue(); TransformEvaluator<? super UnboundedSourceShard<Long, ?>> evaluator = factory.forApplication(graph.getProducer(longs), inputShards); evaluator.processElement((WindowedValue) Iterables.getOnlyElement(inputShards.getElements())); TransformResult<? super UnboundedSourceShard<Long, ?>> result = evaluator.finishBundle(); WindowedValue<? super UnboundedSourceShard<Long, ?>> residual = Iterables.getOnlyElement(result.getUnprocessedElements()); assertThat(residual.getTimestamp(), Matchers.lessThan(DateTime.now().toInstant())); UnboundedSourceShard<Long, ?> residualShard = (UnboundedSourceShard<Long, ?>) residual.getValue(); assertThat(residualShard.getSource(), equalTo(inputShard.getSource())); assertThat(residualShard.getCheckpoint(), not(nullValue())); assertThat( output.commit(Instant.now()).getElements(), containsInAnyOrder( tgw(1L), tgw(2L), tgw(4L), tgw(8L), tgw(9L), tgw(7L), tgw(6L), tgw(5L), tgw(3L), tgw(0L))); }
@Nullable static String service(Invoker<?> invoker) { URL url = invoker.getUrl(); if (url == null) return null; String service = url.getServiceInterface(); return service != null && !service.isEmpty() ? service : null; }
@Test void service_nullUrl() { assertThat(DubboParser.service(invoker)).isNull(); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final CloudBlob blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()) .getBlobReferenceFromServer(containerService.getKey(file)); if(0L == blob.getProperties().getLength()) { return new NullInputStream(0L); } final BlobRequestOptions options = new BlobRequestOptions(); options.setConcurrentRequestCount(1); final BlobInputStream in = blob.openInputStream(AccessCondition.generateEmptyCondition(), options, context); if(status.isAppend()) { try { return StreamCopier.skip(in, status.getOffset()); } catch(IndexOutOfBoundsException e) { // If offset is invalid throw new DefaultExceptionMappingService().map("Download {0} failed", e, file); } } return new ProxyInputStream(in) { @Override protected void handleIOException(final IOException e) throws IOException { if(StringUtils.equals(SR.STREAM_CLOSED, e.getMessage())) { log.warn(String.format("Ignore failure %s", e)); return; } final Throwable cause = ExceptionUtils.getRootCause(e); if(cause instanceof StorageException) { throw new IOException(e.getMessage(), new AzureExceptionMappingService().map((StorageException) cause)); } throw e; } }; } catch(StorageException e) { throw new AzureExceptionMappingService().map("Download {0} failed", e, file); } catch(URISyntaxException e) { throw new NotfoundException(e.getMessage(), e); } }
@Test(expected = NotfoundException.class) public void testReadNotFound() throws Exception { final TransferStatus status = new TransferStatus(); final Path container = new Path("cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); new AzureReadFeature(session, null).read(new Path(container, "nosuchname", EnumSet.of(Path.Type.file)), status, new DisabledConnectionCallback()); }
static void cleanStackTrace(Throwable throwable) { new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet()); }
@Test public void mixedCausingAndSuppressThrowablesAreCleaned() { if (Platform.isAndroid()) { return; // suppressed exceptions aren't supported under Ice Cream Sandwich, where we test } Throwable suppressed1 = createThrowableWithStackTrace("com.example.Foo", "org.junit.FilterMe"); Throwable cause2 = createThrowableWithStackTrace("com.example.Bar", "org.junit.FilterMe"); Throwable cause1 = createThrowableWithStackTrace(cause2, "com.example.Car", "org.junit.FilterMe"); Throwable suppressed2 = createThrowableWithStackTrace(cause1, "com.example.Dar", "org.junit.FilterMe"); Throwable throwable = createThrowableWithStackTrace("com.example.Far", "org.junit.FilterMe"); throwable.addSuppressed(suppressed1); throwable.addSuppressed(suppressed2); StackTraceCleaner.cleanStackTrace(throwable); assertThat(throwable.getStackTrace()).isEqualTo(createStackTrace("com.example.Far")); assertThat(suppressed1.getStackTrace()).isEqualTo(createStackTrace("com.example.Foo")); assertThat(suppressed2.getStackTrace()).isEqualTo(createStackTrace("com.example.Dar")); assertThat(cause1.getStackTrace()).isEqualTo(createStackTrace("com.example.Car")); assertThat(cause2.getStackTrace()).isEqualTo(createStackTrace("com.example.Bar")); }
@Override public List<MetricSample> collect() { List<MetricSample> list = new ArrayList<>(); if (!isCollectEnabled()) { return list; } collectRequests(list); collectQPS(list); collectRT(list); return list; }
@Test void testP95AndP99() throws InterruptedException { metricsDispatcher.addListener(collector); ConfigManager configManager = applicationModel.getApplicationConfigManager(); MetricsConfig config = configManager.getMetrics().orElse(null); AggregationConfig aggregationConfig = new AggregationConfig(); aggregationConfig.setEnabled(true); config.setAggregation(aggregationConfig); List<Long> requestTimes = new ArrayList<>(10000); for (int i = 0; i < 300; i++) { requestTimes.add(Double.valueOf(1000 * Math.random()).longValue()); } Collections.sort(requestTimes); double p95Index = 0.95 * (requestTimes.size() - 1); double p99Index = 0.99 * (requestTimes.size() - 1); double manualP95 = requestTimes.get((int) Math.round(p95Index)); double manualP99 = requestTimes.get((int) Math.round(p99Index)); for (Long requestTime : requestTimes) { RequestEvent requestEvent = RequestEvent.toRequestEvent( applicationModel, null, null, null, invocation, MetricsSupport.getSide(invocation), MethodMetric.isServiceLevel(applicationModel)); TestRequestEvent testRequestEvent = new TestRequestEvent(requestEvent.getSource(), requestEvent.getTypeWrapper()); testRequestEvent.putAttachment(MetricsConstants.INVOCATION, invocation); testRequestEvent.putAttachment(ATTACHMENT_KEY_SERVICE, MetricsSupport.getInterfaceName(invocation)); testRequestEvent.putAttachment(MetricsConstants.INVOCATION_SIDE, MetricsSupport.getSide(invocation)); testRequestEvent.setRt(requestTime); MetricsEventBus.post(testRequestEvent, () -> null); } Thread.sleep(4000L); List<MetricSample> samples = collector.collect(); GaugeMetricSample<?> p95Sample = samples.stream() .filter(sample -> sample.getName().endsWith("p95")) .map(sample -> (GaugeMetricSample<?>) sample) .findFirst() .orElse(null); GaugeMetricSample<?> p99Sample = samples.stream() .filter(sample -> sample.getName().endsWith("p99")) .map(sample -> (GaugeMetricSample<?>) sample) .findFirst() .orElse(null); Assertions.assertNotNull(p95Sample); Assertions.assertNotNull(p99Sample); double p95 = p95Sample.applyAsDouble(); double p99 = p99Sample.applyAsDouble(); // An error of less than 5% is allowed System.out.println(Math.abs(1 - p95 / manualP95)); Assertions.assertTrue(Math.abs(1 - p95 / manualP95) < 0.05); Assertions.assertTrue(Math.abs(1 - p99 / manualP99) < 0.05); }
public static Builder newBuilder() { return new Builder(); }
@Test public void testBuilderThrowsExceptionWhenPartitionTokenMissing() { assertThrows( "partitionToken", IllegalStateException.class, () -> PartitionMetadata.newBuilder() .setParentTokens(Sets.newHashSet(PARENT_TOKEN)) .setStartTimestamp(START_TIMESTAMP) .setEndTimestamp(END_TIMESTAMP) .setHeartbeatMillis(10) .setState(State.CREATED) .setWatermark(WATERMARK) .setCreatedAt(CREATED_AT) .build()); }
public void initialize(Configuration config) throws YarnException { setConf(config); this.plugin.initPlugin(config); // Try to diagnose FPGA LOG.info("Trying to diagnose FPGA information ..."); if (!diagnose()) { LOG.warn("Failed to pass FPGA devices diagnose"); } }
@Test public void testExecutablePathWithCorrectConfig() throws IOException, YarnException { fakeBinary = new File(getTestParentFolder() + "/aocl"); conf.set(YarnConfiguration.NM_FPGA_PATH_TO_EXEC, getTestParentFolder() + "/aocl"); touchFile(fakeBinary); fpgaDiscoverer.initialize(conf); assertEquals("Correct configuration should return user setting", getTestParentFolder() + "/aocl", openclPlugin.getPathToExecutable()); }
public static void setStaticField(Field field, Object fieldNewValue) { try { if ((field.getModifiers() & Modifier.FINAL) == Modifier.FINAL) { throw new IllegalArgumentException("Cannot set the value of final field " + field); } field.setAccessible(true); field.set(null, fieldNewValue); } catch (Exception e) { throw new RuntimeException(e); } }
@Test public void setFinalStaticFieldReflectively_withFieldName_setsStaticFields() { RuntimeException thrown = assertThrows( RuntimeException.class, () -> ReflectionHelpers.setStaticField(ExampleWithFinalStatic.class, "FIELD", 101)); assertThat(thrown) .hasCauseThat() .hasMessageThat() .contains("Cannot set the value of final field"); }
public void saveDelta() throws IOException { Configuration conf = new Configuration(); conf.set("fs.defaultFS",config.fsDefaultFs); FileSystem fs = FileSystem.get(conf); Statement stmt = null; try { if (config.fromCommitTime == null) { config.fromCommitTime = inferCommitTime(fs); LOG.info("FromCommitTime inferred as " + config.fromCommitTime); } LOG.info("FromCommitTime - " + config.fromCommitTime); String sourceTableLocation = getTableLocation(config.sourceDb, config.sourceTable); String lastCommitTime = getLastCommitTimePulled(fs, sourceTableLocation); if (lastCommitTime == null) { LOG.info("Nothing to pull. However we will continue to create a empty table"); lastCommitTime = config.fromCommitTime; } Connection conn = getConnection(); stmt = conn.createStatement(); // drop the temp table if exists String tempDbTable = config.tmpDb + "." + config.targetTable + "__" + config.sourceTable; String tempDbTablePath = config.hoodieTmpDir + "/" + config.targetTable + "__" + config.sourceTable + "/" + lastCommitTime; executeStatement("drop table if exists " + tempDbTable, stmt); deleteHDFSPath(fs, tempDbTablePath); if (!ensureTempPathExists(fs, lastCommitTime)) { throw new IllegalStateException("Could not create target path at " + new Path(config.hoodieTmpDir, config.targetTable + "/" + lastCommitTime)); } initHiveBeelineProperties(stmt); executeIncrementalSQL(tempDbTable, tempDbTablePath, stmt); LOG.info("Finished HoodieReader execution"); } catch (SQLException e) { LOG.error("Exception when executing SQL", e); throw new IOException("Could not scan " + config.sourceTable + " incrementally", e); } finally { try { if (stmt != null) { stmt.close(); } } catch (SQLException e) { LOG.error("Could not close the resultSet opened ", e); } } }
@Test @EnabledIf(value = "org.apache.hudi.HoodieSparkUtils#isSpark2", disabledReason = "Disable due to hive not support avro 1.10.2.") public void testPuller() throws IOException, URISyntaxException { createTables(); HiveIncrementalPuller.Config cfg = getHivePullerConfig("select name from testdb.test1 where `_hoodie_commit_time` > '%s'"); HoodieHiveSyncClient hiveClient = new HoodieHiveSyncClient(new HiveSyncConfig(hiveSyncProps, HiveTestUtil.getHiveConf())); hiveClient.createDatabase(cfg.tmpDb); HiveIncrementalPuller puller = new HiveIncrementalPuller(cfg); puller.saveDelta(); HoodieHiveSyncClient assertingClient = new HoodieHiveSyncClient(new HiveSyncConfig(getAssertionSyncConfig(cfg.tmpDb), HiveTestUtil.getHiveConf())); String tmpTable = cfg.targetTable + "__" + cfg.sourceTable; assertTrue(assertingClient.tableExists(tmpTable)); }
@Override public Set<TransferItem> find(final CommandLine input, final TerminalAction action, final Path remote) { if(input.getOptionValues(action.name()).length == 2) { switch(action) { case download: return new DownloadTransferItemFinder().find(input, action, remote); case upload: case synchronize: return new UploadTransferItemFinder().find(input, action, remote); } } else { switch(action) { case upload: case synchronize: return Collections.emptySet(); } } // Relative to current working directory using prefix finder. return Collections.singleton( new TransferItem(remote, LocalFactory.get(prefixer.normalize(remote.getName()))) ); }
@Test public void testLocalInOptionsDownload() throws Exception { final CommandLineParser parser = new PosixParser(); final String temp = System.getProperty("java.io.tmpdir"); final CommandLine input = parser.parse(TerminalOptionsBuilder.options(), new String[]{"--download", "rackspace://cdn.cyberduck.ch/remote", String.format("%s/f", temp)}); final Set<TransferItem> found = new SingleTransferItemFinder().find(input, TerminalAction.download, new Path("/cdn.cyberduck.ch/remote", EnumSet.of(Path.Type.file))); assertFalse(found.isEmpty()); assertEquals(new TransferItem(new Path("/cdn.cyberduck.ch/remote", EnumSet.of(Path.Type.file)), LocalFactory.get(String.format("%s/f", temp))), found.iterator().next()); }
public static String getRequestPayload(PinotQueryGenerator.GeneratedPinotQuery pinotQuery) { ImmutableMap<String, String> pinotRequest = ImmutableMap.of(REQUEST_PAYLOAD_KEY, pinotQuery.getQuery()); try { return OBJECT_MAPPER.writeValueAsString(pinotRequest); } catch (JsonProcessingException e) { throw new PinotException( PINOT_REQUEST_GENERATOR_FAILURE, Optional.of(pinotQuery.getQuery()), "Unable to Jsonify request: " + Arrays.toString(pinotRequest.entrySet().toArray()), e); } }
@Test public void testPinotBrokerRequest() { PinotQueryGenerator.GeneratedPinotQuery generatedPinotQuery = new PinotQueryGenerator.GeneratedPinotQuery( pinotTable.getTableName(), "SELECT * FROM myTable", ImmutableList.of(), false, false); PinotBrokerPageSource pageSource = new PinotBrokerPageSource( pinotConfig, new TestingConnectorSession(ImmutableList.of( booleanProperty( "mark_data_fetch_exceptions_as_retriable", "Retry Pinot query on data fetch exceptions", pinotConfig.isMarkDataFetchExceptionsAsRetriable(), false))), generatedPinotQuery, ImmutableList.of(), ImmutableList.of(), new MockPinotClusterInfoFetcher(pinotConfig), objectMapper, PinotBrokerAuthenticationProvider.create(PinotEmptyAuthenticationProvider.instance())); assertEquals(PinotBrokerPageSource.getRequestPayload(generatedPinotQuery), "{\"sql\":\"SELECT * FROM myTable\"}"); generatedPinotQuery = new PinotQueryGenerator.GeneratedPinotQuery( pinotTable.getTableName(), "SELECT * FROM myTable WHERE jsonStr = '\"{\"abc\" : \"def\"}\"'", ImmutableList.of(), false, false); assertEquals(PinotBrokerPageSource.getRequestPayload(generatedPinotQuery), "{\"sql\":\"SELECT * FROM myTable WHERE jsonStr = '\\\"{\\\"abc\\\" : \\\"def\\\"}\\\"'\"}"); }
@Override public V chooseVolume(List<V> volumes, long replicaSize, String storageId) throws IOException { if (volumes.size() < 1) { throw new DiskOutOfSpaceException("No more available volumes"); } // As all the items in volumes are with the same storage type, // so only need to get the storage type index of the first item in volumes StorageType storageType = volumes.get(0).getStorageType(); int index = storageType != null ? storageType.ordinal() : StorageType.DEFAULT.ordinal(); synchronized (syncLocks[index]) { return doChooseVolume(volumes, replicaSize, storageId); } }
@Test(timeout=60000) public void testNotEnoughSpaceOnSelectedVolume() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy = ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null); List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>(); // First volume with 1MB free space volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L); // Second volume with 3MB free space, which is a difference of 2MB, more // than the threshold of 1MB. volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3); // All writes should be assigned to the volume with the least free space. // However, if the volume with the least free space doesn't have enough // space to accept the replica size, and another volume does have enough // free space, that should be chosen instead. initPolicy(policy, BALANCED_SPACE_THRESHOLD, 0.0f); Assert.assertEquals(volumes.get(1), policy.chooseVolume(volumes, 1024L * 1024L * 2, null)); }
@GetMapping(value = "/idInfo") public RestResult<Map<String, Map<Object, Object>>> idInfo() { Map<String, Map<Object, Object>> info = new HashMap<>(10); idGeneratorManager.getGeneratorMap().forEach((resource, idGenerator) -> info.put(resource, idGenerator.info())); return RestResultUtils.success(info); }
@Test void testIdInfo() { Map<String, IdGenerator> idGeneratorMap = new HashMap<>(); idGeneratorMap.put("1", new SnowFlowerIdGenerator()); Mockito.when(idGeneratorManager.getGeneratorMap()).thenReturn(idGeneratorMap); RestResult<Map<String, Map<Object, Object>>> res = coreOpsController.idInfo(); assertEquals(2, res.getData().get("1").size()); }
@Override public DistroData getData(DistroKey key, String targetServer) { Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { throw new DistroException( String.format("[DISTRO] Cancel get snapshot caused by target server %s unhealthy", targetServer)); } DistroDataRequest request = new DistroDataRequest(); DistroData distroData = new DistroData(); distroData.setDistroKey(key); distroData.setType(DataOperation.QUERY); request.setDistroData(distroData); request.setDataOperation(DataOperation.QUERY); try { Response response = clusterRpcClientProxy.sendRequest(member, request); if (checkResponse(response)) { return ((DistroDataResponse) response).getDistroData(); } else { throw new DistroException( String.format("[DISTRO-FAILED] Get data request to %s failed, code: %d, message: %s", targetServer, response.getErrorCode(), response.getMessage())); } } catch (NacosException e) { throw new DistroException("[DISTRO-FAILED] Get distro data failed! ", e); } }
@Test void testGetDataException() throws NacosException { assertThrows(DistroException.class, () -> { when(memberManager.find(member.getAddress())).thenReturn(member); member.setState(NodeState.UP); when(clusterRpcClientProxy.isRunning(member)).thenReturn(true); when(clusterRpcClientProxy.sendRequest(eq(member), any())).thenThrow(new NacosException()); transportAgent.getData(new DistroKey(), member.getAddress()); }); }
@Override protected void runTask() { if (backgroundJobServer.isRunning() && reentrantLock.tryLock()) { try { LOGGER.trace("Looking for enqueued jobs... "); final AmountRequest workPageRequest = workDistributionStrategy.getWorkPageRequest(); if (workPageRequest.getLimit() > 0) { final List<Job> enqueuedJobs = storageProvider.getJobsToProcess(backgroundJobServer, workPageRequest); enqueuedJobs.forEach(backgroundJobServer::processJob); LOGGER.debug("Found {} enqueued jobs to process.", enqueuedJobs.size()); } } finally { reentrantLock.unlock(); } } }
@Test void testTask() { Job enqueuedJob1 = anEnqueuedJob().build(); Job enqueuedJob2 = anEnqueuedJob().build(); when(storageProvider.getJobsToProcess(eq(backgroundJobServer), any())).thenReturn(asList(enqueuedJob1, enqueuedJob2), emptyJobList()); runTask(task); verify(backgroundJobServer).processJob(enqueuedJob1); verify(backgroundJobServer).processJob(enqueuedJob2); }
public static Config getConfig( Configuration configuration, @Nullable HostAndPort externalAddress) { return getConfig( configuration, externalAddress, null, PekkoUtils.getForkJoinExecutorConfig( ActorSystemBootstrapTools.getForkJoinExecutorConfiguration(configuration))); }
@Test void getConfigDefaultsToRemoteForkJoinExecutor() { final Config config = PekkoUtils.getConfig(new Configuration(), new HostAndPort("localhost", 1234)); assertThat(config.getString("pekko.remote.default-remote-dispatcher.executor")) .isEqualTo("fork-join-executor"); }
public FontMetrics parse() throws IOException { return parseFontMetric(false); }
@Test void testStartFontMetrics() throws IOException { try { new AFMParser(new ByteArrayInputStream("huhu".getBytes(StandardCharsets.US_ASCII))) .parse(); fail("The AFMParser should have thrown an IOException because of a missing " + AFMParser.START_FONT_METRICS); } catch (IOException e) { // expected exception } }
static UnixResolverOptions parseEtcResolverOptions() throws IOException { return parseEtcResolverOptions(new File(ETC_RESOLV_CONF_FILE)); }
@Test public void attemptsOptionIsParsedIfPresent(@TempDir Path tempDir) throws IOException { File f = buildFile(tempDir, "search localdomain\n" + "nameserver 127.0.0.11\n" + "options attempts:0\n"); assertEquals(0, parseEtcResolverOptions(f).attempts()); f = buildFile(tempDir, "search localdomain\n" + "nameserver 127.0.0.11\n" + "options foo:bar attempts:12\n"); assertEquals(12, parseEtcResolverOptions(f).attempts()); }
@Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { if (!configured()) throw new IllegalStateException("Callback handler not configured"); for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) try { handleTokenCallback((OAuthBearerTokenCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else if (callback instanceof SaslExtensionsCallback) try { handleExtensionsCallback((SaslExtensionsCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else throw new UnsupportedCallbackException(callback); } }
@Test public void throwsErrorOnInvalidExtensionValue() { Map<String, String> options = new HashMap<>(); options.put("unsecuredLoginExtension_testId", "Çalifornia"); OAuthBearerUnsecuredLoginCallbackHandler callbackHandler = createCallbackHandler(options, new MockTime()); SaslExtensionsCallback callback = new SaslExtensionsCallback(); assertThrows(IOException.class, () -> callbackHandler.handle(new Callback[] {callback})); }
@VisibleForTesting PlanNodeStatsEstimate calculateJoinComplementStats( Optional<RowExpression> filter, List<EquiJoinClause> criteria, PlanNodeStatsEstimate leftStats, PlanNodeStatsEstimate rightStats) { if (rightStats.getOutputRowCount() == 0) { // no left side rows are matched return leftStats; } if (criteria.isEmpty()) { // TODO: account for non-equi conditions if (filter.isPresent()) { return PlanNodeStatsEstimate.unknown(); } return normalizer.normalize(leftStats.mapOutputRowCount(rowCount -> 0.0)); } // TODO: add support for non-equality conditions (e.g: <=, !=, >) int numberOfFilterClauses = filter.map(expression -> extractConjuncts(expression).size()).orElse(0); // Heuristics: select the most selective criteria for join complement clause. // Principals behind this heuristics is the same as in computeInnerJoinStats: // select "driving join clause" that reduces matched rows the most. return criteria.stream() .map(drivingClause -> calculateJoinComplementStats(leftStats, rightStats, drivingClause, criteria.size() - 1 + numberOfFilterClauses)) .filter(estimate -> !estimate.isOutputRowCountUnknown()) .max(comparingDouble(PlanNodeStatsEstimate::getOutputRowCount)) .map(estimate -> normalizer.normalize(estimate)) .orElse(PlanNodeStatsEstimate.unknown()); }
@Test public void testJoinComplementStats() { PlanNodeStatsEstimate expected = planNodeStats(LEFT_ROWS_COUNT * (LEFT_JOIN_COLUMN_NULLS + LEFT_JOIN_COLUMN_NON_NULLS / 4), variableStatistics(LEFT_JOIN_COLUMN, 0.0, 20.0, LEFT_JOIN_COLUMN_NULLS / (LEFT_JOIN_COLUMN_NULLS + LEFT_JOIN_COLUMN_NON_NULLS / 4), 5), LEFT_OTHER_COLUMN_STATS); PlanNodeStatsEstimate actual = JOIN_STATS_RULE.calculateJoinComplementStats( Optional.empty(), ImmutableList.of(new EquiJoinClause(LEFT_JOIN_COLUMN, RIGHT_JOIN_COLUMN)), LEFT_STATS, RIGHT_STATS); assertEquals(actual, expected); }
public static Map<PCollection<?>, ReplacementOutput> tagged( Map<TupleTag<?>, PCollection<?>> original, POutput replacement) { Map<TupleTag<?>, TaggedPValue> originalTags = new HashMap<>(); for (Map.Entry<TupleTag<?>, PCollection<?>> originalValue : original.entrySet()) { originalTags.put( originalValue.getKey(), TaggedPValue.of(originalValue.getKey(), originalValue.getValue())); } ImmutableMap.Builder<PCollection<?>, ReplacementOutput> resultBuilder = ImmutableMap.builder(); Map<TupleTag<?>, PCollection<?>> remainingTaggedOriginals = new HashMap<>(original); Map<TupleTag<?>, PCollection<?>> taggedReplacements = PValues.expandOutput(replacement); for (Map.Entry<TupleTag<?>, PCollection<?>> replacementValue : taggedReplacements.entrySet()) { TaggedPValue mapped = originalTags.get(replacementValue.getKey()); checkArgument( mapped != null, "Missing original output for Tag %s and Value %s Between original %s and replacement %s", replacementValue.getKey(), replacementValue.getValue(), original, replacement.expand()); resultBuilder.put( replacementValue.getValue(), ReplacementOutput.of( mapped, TaggedPValue.of( replacementValue.getKey(), (PCollection<?>) replacementValue.getValue()))); remainingTaggedOriginals.remove(replacementValue.getKey()); } checkArgument( remainingTaggedOriginals.isEmpty(), "Missing replacement for tagged values %s. Replacement was: %s", remainingTaggedOriginals, taggedReplacements); return resultBuilder.build(); }
@Test public void taggedSucceeds() { PCollectionTuple original = PCollectionTuple.of(intsTag, ints).and(strsTag, strs).and(moreIntsTag, moreInts); Map<PCollection<?>, ReplacementOutput> replacements = ReplacementOutputs.tagged( PValues.expandOutput((POutput) original), PCollectionTuple.of(strsTag, replacementStrs) .and(moreIntsTag, moreReplacementInts) .and(intsTag, replacementInts)); assertThat( replacements.keySet(), Matchers.containsInAnyOrder(replacementStrs, replacementInts, moreReplacementInts)); ReplacementOutput intsReplacement = replacements.get(replacementInts); ReplacementOutput strsReplacement = replacements.get(replacementStrs); ReplacementOutput moreIntsReplacement = replacements.get(moreReplacementInts); assertThat( intsReplacement, equalTo( ReplacementOutput.of( TaggedPValue.of(intsTag, ints), TaggedPValue.of(intsTag, replacementInts)))); assertThat( strsReplacement, equalTo( ReplacementOutput.of( TaggedPValue.of(strsTag, strs), TaggedPValue.of(strsTag, replacementStrs)))); assertThat( moreIntsReplacement, equalTo( ReplacementOutput.of( TaggedPValue.of(moreIntsTag, moreInts), TaggedPValue.of(moreIntsTag, moreReplacementInts)))); }
@Override public void message(final String message) { if(StringUtils.isBlank(message)) { return; } final StringAppender appender = new StringAppender('…'); appender.append(message); // Clear the line and append message. Used instead of \r because the line width may vary console.printf("\r%s%s%s", Ansi.ansi() .fg(Ansi.Color.CYAN) .saveCursorPosition() .eraseLine(Ansi.Erase.ALL) .restoreCursorPosition(), appender.toString(), Ansi.ansi().reset()); }
@Test public void testMessage() { new TerminalProgressListener().message("b"); }
static @NonNull CloudStringReader of(final @NonNull CommandInput commandInput) { return new CloudStringReader(commandInput); }
@Test void testBeginningAndMiddleRemoved() throws CommandSyntaxException { // Arrange final CommandInput commandInput = CommandInput.of("hello some worlds"); final StringReader stringReader = CloudStringReader.of(commandInput); // Act final String readString1 = stringReader.readString(); stringReader.skipWhitespace(); final String readString2 = stringReader.readString(); stringReader.skipWhitespace(); // Assert assertThat(readString1).isEqualTo("hello"); assertThat(readString2).isEqualTo("some"); assertThat(commandInput.remainingInput()).isEqualTo("worlds"); }
@Operation(summary = "viewResource", description = "VIEW_RESOURCE_BY_ID_NOTES") @Parameters({ @Parameter(name = "fullName", description = "RESOURCE_FULL_NAME", required = true, schema = @Schema(implementation = String.class, example = "tenant/1.png")), @Parameter(name = "tenantCode", description = "TENANT_CODE", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "skipLineNum", description = "SKIP_LINE_NUM", required = true, schema = @Schema(implementation = int.class, example = "100")), @Parameter(name = "limit", description = "LIMIT", required = true, schema = @Schema(implementation = int.class, example = "100")) }) @GetMapping(value = "/view") @ApiException(VIEW_RESOURCE_FILE_ON_LINE_ERROR) public Result viewResource(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "skipLineNum") int skipLineNum, @RequestParam(value = "limit") int limit, @RequestParam(value = "fullName") String fullName, @RequestParam(value = "tenantCode") String tenantCode) { return resourceService.readResource(loginUser, fullName, tenantCode, skipLineNum, limit); }
@Test public void testViewResource() throws Exception { Result mockResult = new Result<>(); mockResult.setCode(Status.HDFS_NOT_STARTUP.getCode()); Mockito.when(resourcesService.readResource(Mockito.any(), Mockito.anyString(), Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt())) .thenReturn(mockResult); MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("skipLineNum", "2"); paramsMap.add("limit", "100"); paramsMap.add("fullName", "dolphinscheduler/resourcePath"); paramsMap.add("tenantCode", "123"); MvcResult mvcResult = mockMvc.perform(get("/resources/view") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertEquals(Status.HDFS_NOT_STARTUP.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); }
@Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain chain) throws IOException, ServletException { HttpServletRequest request = (HttpServletRequest) servletRequest; HttpServletResponse response = (HttpServletResponse) servletResponse; DBSessions dbSessions = platform.getContainer().getComponentByType(DBSessions.class); ThreadLocalSettings settings = platform.getContainer().getComponentByType(ThreadLocalSettings.class); UserSessionInitializer userSessionInitializer = platform.getContainer().getOptionalComponentByType(UserSessionInitializer.class).orElse(null); LOG.trace("{} serves {}", Thread.currentThread(), request.getRequestURI()); dbSessions.enableCaching(); try { settings.load(); try { doFilter(request, response, chain, userSessionInitializer); } finally { settings.unload(); } } finally { dbSessions.disableCaching(); } }
@Test public void cleanup_user_session_after_request_handling() throws IOException, ServletException { mockUserSessionInitializer(true); underTest.doFilter(request, response, chain); verify(chain).doFilter(request, response); verify(userSessionInitializer).initUserSession(any(JavaxHttpRequest.class), any(JavaxHttpResponse.class)); }
@SqlInvokedScalarFunction(value = "array_max_by", deterministic = true, calledOnNullInput = true) @Description("Get the maximum value of array, by using a specific transformation function") @TypeParameter("T") @TypeParameter("U") @SqlParameters({@SqlParameter(name = "input", type = "array(T)"), @SqlParameter(name = "f", type = "function(T, U)")}) @SqlType("T") public static String arrayMaxBy() { return "RETURN input[" + "array_max(zip_with(transform(input, f), sequence(1, cardinality(input)), (x, y)->IF(x IS NULL, NULL, (x, y))))[2]" + "]"; }
@Test public void testArrayMaxBy() { assertFunction("ARRAY_MAX_BY(ARRAY [double'1.0', double'2.0'], i -> i)", DOUBLE, 2.0d); assertFunction("ARRAY_MAX_BY(ARRAY [double'-3.0', double'2.0'], i -> i*i)", DOUBLE, -3.0d); assertFunction("ARRAY_MAX_BY(ARRAY ['a', 'bb', 'c'], x -> LENGTH(x))", createVarcharType(2), "bb"); assertFunction("ARRAY_MAX_BY(ARRAY [1, 2, 3], x -> 1-x)", INTEGER, 1); assertFunction("ARRAY_MAX_BY(ARRAY [ARRAY['a'], ARRAY['b', 'b'], ARRAY['c']], x -> CARDINALITY(x))", new ArrayType(createVarcharType(1)), asList("b", "b")); assertFunction("ARRAY_MAX_BY(ARRAY [MAP(ARRAY['foo', 'bar'], ARRAY[1, 2]), MAP(ARRAY['foo', 'bar'], ARRAY[0, 3])], x -> x['foo'])", mapType(createVarcharType(3), INTEGER), ImmutableMap.of("foo", 1, "bar", 2)); assertFunction("ARRAY_MAX_BY(ARRAY [CAST(ROW(0, 2.0) AS ROW(x BIGINT, y DOUBLE)), CAST(ROW(1, 3.0) AS ROW(x BIGINT, y DOUBLE))], r -> r.y).x", BIGINT, 1L); assertFunction("ARRAY_MAX_BY(ARRAY [null, double'1.0', double'2.0'], i -> i)", DOUBLE, null); assertFunction("ARRAY_MAX_BY(ARRAY [cast(null as double), cast(null as double)], i -> i)", DOUBLE, null); assertFunction("ARRAY_MAX_BY(cast(null as array(double)), i -> i)", DOUBLE, null); }