focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public EventHubsConfiguration getConfiguration() { return configuration; }
@Test public void testCreateEndpointWithConfig() { final String uri = "azure-eventhubs:namespace/hubName?sharedAccessName=DummyAccessKeyName" + "&sharedAccessKey=DummyKey" + "&consumerGroupName=testConsumer&prefetchCount=100" + "&checkpointBatchSize=100&checkpointBatchTimeout=1000"; final EventHubsEndpoint endpoint = context.getEndpoint(uri, EventHubsEndpoint.class); assertEquals("namespace", endpoint.getConfiguration().getNamespace()); assertEquals("hubName", endpoint.getConfiguration().getEventHubName()); assertEquals("testConsumer", endpoint.getConfiguration().getConsumerGroupName()); assertEquals("DummyAccessKeyName", endpoint.getConfiguration().getSharedAccessName()); assertEquals("DummyKey", endpoint.getConfiguration().getSharedAccessKey()); assertEquals(100, endpoint.getConfiguration().getPrefetchCount()); assertEquals(100, endpoint.getConfiguration().getCheckpointBatchSize()); assertEquals(1000, endpoint.getConfiguration().getCheckpointBatchTimeout()); }
public static <T extends Serializable> ItemImportResult<T> success(T data, Long sizeInBytes) { Preconditions.checkNotNull(data); return new ItemImportResult<>(data, sizeInBytes, Status.SUCCESS, null); }
@Test public void testFailWithIncorrectBytes() { assertThrows(IllegalArgumentException.class, () -> { ItemImportResult.success("blabla", -1L); }); }
@Override protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group, ExtendedConsumerProperties<RocketMQConsumerProperties> extendedConsumerProperties) throws Exception { boolean anonymous = !StringUtils.hasLength(group); /*** * When using DLQ, at least the group property must be provided for proper naming of the DLQ destination * According to https://docs.spring.io/spring-cloud-stream/docs/3.2.1/reference/html/spring-cloud-stream.html#spring-cloud-stream-reference */ if (anonymous && NamespaceUtil.isDLQTopic(destination.getName())) { throw new RuntimeException( "group must be configured for DLQ" + destination.getName()); } group = anonymous ? RocketMQUtils.anonymousGroup(destination.getName()) : group; RocketMQUtils.mergeRocketMQProperties(binderConfigurationProperties, extendedConsumerProperties.getExtension()); extendedConsumerProperties.getExtension().setGroup(group); RocketMQInboundChannelAdapter inboundChannelAdapter = new RocketMQInboundChannelAdapter( destination.getName(), extendedConsumerProperties); ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, group, extendedConsumerProperties); if (extendedConsumerProperties.getMaxAttempts() > 1) { inboundChannelAdapter .setRetryTemplate(buildRetryTemplate(extendedConsumerProperties)); inboundChannelAdapter.setRecoveryCallback(errorInfrastructure.getRecoverer()); } else { inboundChannelAdapter.setErrorChannel(errorInfrastructure.getErrorChannel()); } return inboundChannelAdapter; }
@Test public void createConsumerEndpoint() throws Exception { TestConsumerDestination destination = new TestConsumerDestination("test"); MessageProducer consumerEndpoint = binder.createConsumerEndpoint(destination, "test", new ExtendedConsumerProperties<>(new RocketMQConsumerProperties())); Assertions.assertThat(consumerEndpoint).isNotNull(); }
@SuppressWarnings("unchecked") public static <E extends Enum<E>> E likeValueOf(Class<E> enumClass, Object value) { if(null == enumClass || null == value){ return null; } if (value instanceof CharSequence) { value = value.toString().trim(); } final Field[] fields = ReflectUtil.getFields(enumClass); final Enum<?>[] enums = enumClass.getEnumConstants(); String fieldName; for (Field field : fields) { fieldName = field.getName(); if (field.getType().isEnum() || "ENUM$VALUES".equals(fieldName) || "ordinal".equals(fieldName)) { // 跳过一些特殊字段 continue; } for (Enum<?> enumObj : enums) { if (ObjectUtil.equal(value, ReflectUtil.getFieldValue(enumObj, field))) { return (E) enumObj; } } } return null; }
@Test public void likeValueOfTest() { TestEnum value = EnumUtil.likeValueOf(TestEnum.class, "type2"); assertEquals(TestEnum.TEST2, value); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testDiscardUnknownEmptyFrame() throws Exception { short type = 5; byte flags = (byte) 0xFF; int length = 0; ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); decoder.decode(buf); verifyZeroInteractions(delegate); assertFalse(buf.isReadable()); buf.release(); }
@Deprecated public static UnboundedSource<Long, CounterMark> unboundedWithTimestampFn( SerializableFunction<Long, Instant> timestampFn) { return new UnboundedCountingSource(0, 1, 1L, Duration.ZERO, timestampFn); }
@Test public void testUnboundedSourceCheckpointMark() throws Exception { UnboundedSource<Long, CounterMark> source = CountingSource.unboundedWithTimestampFn(new ValueAsTimestampFn()); UnboundedReader<Long> reader = source.createReader(null, null); final long numToSkip = 3; assertTrue(reader.start()); // Advance the source numToSkip elements and manually save state. for (long l = 0; l < numToSkip; ++l) { reader.advance(); } // Confirm that we get the expected element in sequence before checkpointing. assertEquals(numToSkip, (long) reader.getCurrent()); assertEquals(numToSkip, reader.getCurrentTimestamp().getMillis()); // Checkpoint and restart, and confirm that the source continues correctly. CounterMark mark = CoderUtils.clone(source.getCheckpointMarkCoder(), (CounterMark) reader.getCheckpointMark()); reader = source.createReader(null, mark); assertTrue(reader.start()); // Confirm that we get the next element in sequence. assertEquals(numToSkip + 1, (long) reader.getCurrent()); assertEquals(numToSkip + 1, reader.getCurrentTimestamp().getMillis()); }
@Override public PageResult<NoticeDO> getNoticePage(NoticePageReqVO reqVO) { return noticeMapper.selectPage(reqVO); }
@Test public void testGetNoticePage_success() { // 插入前置数据 NoticeDO dbNotice = randomPojo(NoticeDO.class, o -> { o.setTitle("尼古拉斯赵四来啦!"); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); }); noticeMapper.insert(dbNotice); // 测试 title 不匹配 noticeMapper.insert(cloneIgnoreId(dbNotice, o -> o.setTitle("尼古拉斯凯奇也来啦!"))); // 测试 status 不匹配 noticeMapper.insert(cloneIgnoreId(dbNotice, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 准备参数 NoticePageReqVO reqVO = new NoticePageReqVO(); reqVO.setTitle("尼古拉斯赵四来啦!"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); // 调用 PageResult<NoticeDO> pageResult = noticeService.getNoticePage(reqVO); // 验证查询结果经过筛选 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbNotice, pageResult.getList().get(0)); }
public static Permission getPermission(String name, String serviceName, String... actions) { PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP.get(serviceName); if (permissionFactory == null) { throw new IllegalArgumentException("No permissions found for service: " + serviceName); } return permissionFactory.create(name, actions); }
@Test public void getPermission_ReplicatedMap() { Permission permission = ActionConstants.getPermission("foo", ReplicatedMapService.SERVICE_NAME); assertNotNull(permission); assertTrue(permission instanceof ReplicatedMapPermission); }
@Override public String getOriginalHost() { try { if (originalHost == null) { originalHost = getOriginalHost(getHeaders(), getServerName()); } return originalHost; } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } }
@Test void getOriginalHost_fallsBackOnUnbracketedIpv6Address_WithNonStrictValidation() { config.setProperty("zuul.HttpRequestMessage.host.header.strict.validation", false); HttpQueryParams queryParams = new HttpQueryParams(); Headers headers = new Headers(); headers.add("Host", "ba::dd"); request = new HttpRequestMessageImpl( new SessionContext(), "HTTP/1.1", "POST", "/some/where", queryParams, headers, "192.168.0.2", "https", 7002, "server"); assertEquals("server", request.getOriginalHost()); }
public void lazySet(long newValue) { soVal(newValue); }
@Test public void lazySet() { PaddedAtomicLong counter = new PaddedAtomicLong(); counter.lazySet(10); assertEquals(10L, counter.get()); }
@Override public void run() { try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) { doRun(); } finally { terminationFuture.complete(executionState); } }
@Test public void testCleanupWhenInvokeFails() throws Exception { createTaskBuilder() .setInvokable(InvokableWithExceptionInInvoke.class) .build(Executors.directExecutor()) .run(); assertTrue(wasCleanedUp); }
@Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); stringBuilder.append("left="); stringBuilder.append(this.left); stringBuilder.append(", top="); stringBuilder.append(this.top); stringBuilder.append(", right="); stringBuilder.append(this.right); stringBuilder.append(", bottom="); stringBuilder.append(this.bottom); return stringBuilder.toString(); }
@Test public void toStringTest() { Rectangle rectangle = create(1, 2, 3, 4); Assert.assertEquals(RECTANGLE_TO_STRING, rectangle.toString()); }
public static String substVars(String val, PropertyContainer pc1) throws ScanException { return substVars(val, pc1, null); }
@Test public void testSubstVarsRecursive() throws ScanException { context.putProperty("v1", "if"); context.putProperty("v2", "${v3}"); context.putProperty("v3", "works"); String result = OptionHelper.substVars(text, context); assertEquals(expected, result); }
static int dissectLogHeader( final String context, final Enum<?> code, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = 0; final int captureLength = buffer.getInt(offset + encodedLength, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; final int bufferLength = buffer.getInt(offset + encodedLength, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; final long timestampNs = buffer.getLong(offset + encodedLength, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; LogUtil.appendTimestamp(builder, timestampNs); builder.append(context) .append(": ") .append(code.name()) .append(" [") .append(captureLength) .append('/') .append(bufferLength) .append(']'); return encodedLength; }
@Test void dissectLogHeader() { internalEncodeLogHeader(buffer, 0, 100, 222, () -> 1234567890); final int decodedLength = CommonEventDissector .dissectLogHeader("test ctx", ArchiveEventCode.CMD_OUT_RESPONSE, buffer, 0, builder); assertEquals(LOG_HEADER_LENGTH, decodedLength); assertEquals("[1.234567890] test ctx: CMD_OUT_RESPONSE [100/222]", builder.toString()); }
public Optional<String> getHostName(String nodeId) { return hostNameCache.getUnchecked(nodeId); }
@Test public void getHostNameReturnsEmptyOptionalIfNodeIdIsInvalid() { when(cluster.nodeIdToHostName("node_id")).thenReturn(Optional.empty()); assertThat(nodeInfoCache.getHostName("node_id")).isEmpty(); }
@Override public Optional<String> resolveQueryFailure(QueryStats controlQueryStats, QueryException queryException, Optional<QueryObjectBundle> test) { return mapMatchingPrestoException(queryException, CONTROL_CHECKSUM, ImmutableSet.of(EXCEEDED_TIME_LIMIT), e -> Optional.of("Time limit exceeded when running control checksum query")); }
@Test public void testResolved() { assertEquals( getFailureResolver().resolveQueryFailure( CONTROL_QUERY_STATS, new PrestoQueryException( new RuntimeException(), false, CONTROL_CHECKSUM, Optional.of(EXCEEDED_TIME_LIMIT), createQueryActionStats(CONTROL_CPU_TIME_MILLIS / 2, CONTROL_PEAK_TOTAL_MEMORY_BYTES)), Optional.empty()), Optional.of("Time limit exceeded when running control checksum query")); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config) ); }
@Test public void shouldPrintInfoForEmptyMigrationsDir() throws Exception { // Given: givenMigrations(ImmutableList.of(), ImmutableList.of(), ImmutableList.of(), ImmutableList.of()); // When: final int result = command.command(config, cfg -> ksqlClient, migrationsDir); // Then: assertThat(result, is(0)); verify(logAppender, atLeastOnce()).doAppend(logCaptor.capture()); final List<String> logMessages = logCaptor.getAllValues().stream() .map(LoggingEvent::getRenderedMessage) .collect(Collectors.toList()); assertThat(logMessages, hasItem(containsString("Current migration version: <none>"))); assertThat(logMessages, hasItem(containsString("No migrations files found"))); }
@Override public PageResult<CodegenTableDO> getCodegenTablePage(CodegenTablePageReqVO pageReqVO) { return codegenTableMapper.selectPage(pageReqVO); }
@Test public void testGetCodegenTablePage() { // mock 数据 CodegenTableDO tableDO = randomPojo(CodegenTableDO.class, o -> { o.setTableName("t_yunai"); o.setTableComment("芋艿"); o.setClassName("SystemYunai"); o.setCreateTime(buildTime(2021, 3, 10)); }).setScene(CodegenSceneEnum.ADMIN.getScene()); codegenTableMapper.insert(tableDO); // 测试 tableName 不匹配 codegenTableMapper.insert(cloneIgnoreId(tableDO, o -> o.setTableName(randomString()))); // 测试 tableComment 不匹配 codegenTableMapper.insert(cloneIgnoreId(tableDO, o -> o.setTableComment(randomString()))); // 测试 className 不匹配 codegenTableMapper.insert(cloneIgnoreId(tableDO, o -> o.setClassName(randomString()))); // 测试 createTime 不匹配 codegenTableMapper.insert(cloneIgnoreId(tableDO, logDO -> logDO.setCreateTime(buildTime(2021, 4, 10)))); // 准备参数 CodegenTablePageReqVO reqVO = new CodegenTablePageReqVO(); reqVO.setTableName("yunai"); reqVO.setTableComment("芋"); reqVO.setClassName("Yunai"); reqVO.setCreateTime(buildBetweenTime(2021, 3, 1, 2021, 3, 31)); // 调用 PageResult<CodegenTableDO> pageResult = codegenService.getCodegenTablePage(reqVO); // 断言,只查到了一条符合条件的 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(tableDO, pageResult.getList().get(0)); }
@Override public void upgrade() { if (hasBeenRunSuccessfully()) { LOG.debug("Migration already completed."); return; } final Map<String, String> savedSearchToViewsMap = new HashMap<>(); final Map<View, Search> newViews = this.savedSearchService.streamAll() .map(savedSearch -> { final Map.Entry<View, Search> newView = migrateSavedSearch(savedSearch); savedSearchToViewsMap.put(savedSearch.id(), newView.getKey().id()); return newView; }) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); newViews.forEach((view, search) -> { viewService.save(view); searchService.save(search); }); final MigrationCompleted migrationCompleted = MigrationCompleted.create(savedSearchToViewsMap); writeMigrationCompleted(migrationCompleted); }
@Test @MongoDBFixtures("sample_saved_search_with_missing_fields.json") public void migrateSavedSearchWithMissingFields() throws Exception { this.migration.upgrade(); final MigrationCompleted migrationCompleted = captureMigrationCompleted(); assertThat(migrationCompleted.savedSearchIds()) .containsExactly(new AbstractMap.SimpleEntry<>("5de660b7b2d44b5813c1d7f6", "000000020000000000000000")); assertViewServiceCreatedViews(1, resourceFile("sample_saved_search_with_missing_fields-expected_views.json")); assertSearchServiceCreated(1, resourceFile("sample_saved_search_with_missing_fields-expected_searches.json")); }
@Override public int nrow() { return m; }
@Test public void testNrows() { System.out.println("nrow"); assertEquals(3, matrix.nrow()); }
@Override public void merge(Node root, Node node, String source) throws ConfiguratorException { if (root.getNodeId() != node.getNodeId()) { // means one of those yaml file doesn't conform to JCasC schema throw new ConfiguratorException( String.format("Found incompatible configuration elements %s %s", source, node.getStartMark())); } switch (root.getNodeId()) { case sequence: SequenceNode seq = (SequenceNode) root; SequenceNode seq2 = (SequenceNode) node; seq.getValue().addAll(seq2.getValue()); return; case mapping: MappingNode map = (MappingNode) root; MappingNode map2 = (MappingNode) node; // merge common entries for (int i = 0; i < map2.getValue().size(); ) { NodeTuple t2 = map2.getValue().get(i); boolean found = false; for (NodeTuple tuple : map.getValue()) { final Node key = tuple.getKeyNode(); final Node key2 = t2.getKeyNode(); if (key.getNodeId() == NodeId.scalar) { // We dont support merge for more complex cases (yet) if (((ScalarNode) key).getValue().equals(((ScalarNode) key2).getValue())) { try { merge(tuple.getValueNode(), t2.getValueNode(), source); } catch (ConfiguratorConflictException e) { map.getValue().set(map.getValue().indexOf(tuple), t2); } map2.getValue().remove(i); found = true; break; } } else { throw new ConfiguratorException(String.format( "Found non-mergeable configuration keys %s %s)", source, node.getEndMark())); } } if (!found) { ++i; } } // .. and add others map.getValue().addAll(map2.getValue()); return; default: throw new ConfiguratorConflictException( String.format("Found conflicting configuration at %s %s", source, node.getStartMark())); } }
@Test public void merge() throws ConfiguratorException { String normalSource = getClass().getResource("normal.yml").toExternalForm(); String overwriteSource = getClass().getResource("overwrite.yml").toExternalForm(); String conflictsSource = getClass().getResource("conflicts.yml").toExternalForm(); // merge without conflicts ConfigurationAsCode.get().configure(normalSource, overwriteSource); // merge with conflicts ConfigurationAsCode.get().configure(normalSource, conflictsSource); }
public static <X> TypeInformation<X> getForObject(X value) { return new TypeExtractor().privateGetForObject(value); }
@Test void testMethodChainingPojo() { CustomChainingPojoType t = new CustomChainingPojoType(); t.setMyField1("World").setMyField2(1); TypeInformation<?> ti = TypeExtractor.getForObject(t); assertThat(ti.isBasicType()).isFalse(); assertThat(ti.isTupleType()).isFalse(); assertThat(ti).isInstanceOf(PojoTypeInfo.class); assertThat(ti.getTypeClass()).isEqualTo(CustomChainingPojoType.class); }
static void dissectResolve( final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int absoluteOffset = offset; absoluteOffset += dissectLogHeader(CONTEXT, NAME_RESOLUTION_RESOLVE, buffer, absoluteOffset, builder); final boolean isReResolution = 1 == buffer.getByte(absoluteOffset); absoluteOffset += SIZE_OF_BYTE; final long durationNs = buffer.getLong(absoluteOffset, LITTLE_ENDIAN); absoluteOffset += SIZE_OF_LONG; builder.append(": resolver="); absoluteOffset += buffer.getStringAscii(absoluteOffset, builder); absoluteOffset += SIZE_OF_INT; builder.append(" durationNs=").append(durationNs); builder.append(" name="); absoluteOffset += buffer.getStringAscii(absoluteOffset, builder); absoluteOffset += SIZE_OF_INT; builder.append(" isReResolution=").append(isReResolution); builder.append(" address="); dissectInetAddress(buffer, absoluteOffset, builder); }
@Test void dissectResolveNullAddress() { final String resolver = "myResolver"; final long durationNs = -1; final String hostname = "some-host"; final boolean isReResolution = true; final InetAddress address = null; final int length = SIZE_OF_BOOLEAN + SIZE_OF_LONG + trailingStringLength(resolver, MAX_HOST_NAME_LENGTH) + trailingStringLength(hostname, MAX_HOST_NAME_LENGTH) + inetAddressLength(address); DriverEventEncoder.encodeResolve( buffer, 0, length, length, resolver, durationNs, hostname, isReResolution, address); final StringBuilder builder = new StringBuilder(); DriverEventDissector.dissectResolve(buffer, 0, builder); assertThat(builder.toString(), endsWith( "DRIVER: NAME_RESOLUTION_RESOLVE [40/40]: " + "resolver=myResolver durationNs=-1 name=some-host isReResolution=true address=unknown-address")); }
public static ValueReference createParameter(String value) { return ValueReference.builder() .valueType(ValueType.PARAMETER) .value(value) .build(); }
@Test public void createParameterFailsWithBlankParameter() { assertThatThrownBy(() -> ValueReference.createParameter("")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Parameter must not be blank"); assertThatThrownBy(() -> ValueReference.createParameter(" ")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Parameter must not be blank"); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchDuringCooperativeRebalance() { buildFetcher(); subscriptions.subscribe(singleton(topicName), Optional.empty()); subscriptions.assignFromSubscribed(singleton(tp0)); subscriptions.seek(tp0, 0); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds( 1, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds)); assertEquals(1, sendFetches()); // Now the cooperative rebalance happens and fetch positions are NOT cleared for unrevoked partitions subscriptions.assignFromSubscribed(singleton(tp0)); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); networkClientDelegate.poll(time.timer(0)); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords(); // The active fetch should NOT be ignored since the position for tp0 is still valid assertEquals(1, fetchedRecords.size()); assertEquals(3, fetchedRecords.get(tp0).size()); }
public double[][] test(DataFrame data) { DataFrame x = formula.x(data); int n = x.nrow(); int ntrees = trees.length; double[][] prediction = new double[ntrees][n]; for (int j = 0; j < n; j++) { Tuple xj = x.get(j); double base = b; for (int i = 0; i < ntrees; i++) { base += shrinkage * trees[i].predict(xj); prediction[i][j] = base; } } return prediction; }
@Test public void testAutoMPGHuber() { test(Loss.huber(0.9), "autoMPG", AutoMPG.formula, AutoMPG.data, 3.1155); }
public static TOlapTableLocationParam createLocation(OlapTable table, TOlapTablePartitionParam partitionParam, boolean enableReplicatedStorage) throws UserException { return createLocation(table, partitionParam, enableReplicatedStorage, WarehouseManager.DEFAULT_WAREHOUSE_ID); }
@Test public void testCreateLocationWithLocalTablet(@Mocked GlobalStateMgr globalStateMgr, @Mocked SystemInfoService systemInfoService) throws Exception { long dbId = 1L; long tableId = 2L; long partitionId = 3L; long indexId = 4L; long tabletId = 5L; long replicaId = 10L; long backendId = 20L; // Columns List<Column> columns = new ArrayList<Column>(); Column k1 = new Column("k1", Type.INT, true, null, "", ""); columns.add(k1); columns.add(new Column("k2", Type.BIGINT, true, null, "", "")); columns.add(new Column("v", Type.BIGINT, false, AggregateType.SUM, "0", "")); // Replica Replica replica1 = new Replica(replicaId, backendId, Replica.ReplicaState.NORMAL, 1, 0); Replica replica2 = new Replica(replicaId + 1, backendId + 1, Replica.ReplicaState.NORMAL, 1, 0); Replica replica3 = new Replica(replicaId + 2, backendId + 2, Replica.ReplicaState.NORMAL, 1, 0); // Tablet LocalTablet tablet = new LocalTablet(tabletId); tablet.addReplica(replica1); tablet.addReplica(replica2); tablet.addReplica(replica3); // Partition info and distribution info DistributionInfo distributionInfo = new HashDistributionInfo(1, Lists.newArrayList(k1)); PartitionInfo partitionInfo = new SinglePartitionInfo(); partitionInfo.setDataProperty(partitionId, new DataProperty(TStorageMedium.SSD)); partitionInfo.setIsInMemory(partitionId, false); partitionInfo.setTabletType(partitionId, TTabletType.TABLET_TYPE_DISK); partitionInfo.setReplicationNum(partitionId, (short) 3); // Index MaterializedIndex index = new MaterializedIndex(indexId, MaterializedIndex.IndexState.NORMAL); TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partitionId, indexId, 0, TStorageMedium.SSD); index.addTablet(tablet, tabletMeta); // Partition Partition partition = new Partition(partitionId, "p1", index, distributionInfo); // Table OlapTable table = new OlapTable(tableId, "t1", columns, KeysType.AGG_KEYS, partitionInfo, distributionInfo); Deencapsulation.setField(table, "baseIndexId", indexId); table.addPartition(partition); table.setIndexMeta(indexId, "t1", columns, 0, 0, (short) 3, TStorageType.COLUMN, KeysType.AGG_KEYS); new Expectations() { { GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); result = systemInfoService; systemInfoService.checkExceedDiskCapacityLimit((Multimap<Long, Long>) any, anyBoolean); result = Status.OK; GlobalStateMgr.getCurrentState(); result = globalStateMgr; globalStateMgr.getNodeMgr().getClusterInfo(); result = systemInfoService; systemInfoService.checkBackendAlive(anyLong); result = true; } }; TOlapTablePartitionParam partitionParam = new TOlapTablePartitionParam(); TOlapTablePartition tPartition = new TOlapTablePartition(); tPartition.setId(partitionId); partitionParam.addToPartitions(tPartition); TOlapTableLocationParam param = OlapTableSink.createLocation( table, partitionParam, false); System.out.println(param); // Check List<TTabletLocation> locations = param.getTablets(); Assert.assertEquals(1, locations.size()); TTabletLocation location = locations.get(0); List<Long> nodes = location.getNode_ids(); Assert.assertEquals(3, nodes.size()); Collections.sort(nodes); Assert.assertEquals(Lists.newArrayList(backendId, backendId + 1, backendId + 2), nodes); }
public static int readVInt(ByteData arr, long position) { byte b = arr.get(position++); if(b == (byte) 0x80) throw new RuntimeException("Attempting to read null value as int"); int value = b & 0x7F; while ((b & 0x80) != 0) { b = arr.get(position++); value <<= 7; value |= (b & 0x7F); } return value; }
@Test(expected = EOFException.class) public void testReadVIntTruncatedHollowBlobInput() throws IOException { HollowBlobInput hbi = HollowBlobInput.serial(BYTES_TRUNCATED); VarInt.readVInt(hbi); }
@Override protected Map<String, RowMetaInterface> getInputRowMetaInterfaces( RestMeta meta ) { return getInputFields( meta ); }
@Test public void testGetInputRowMetaInterface() throws Exception { Map<String, RowMetaInterface> inputs = new HashMap<>(); doReturn( inputs ).when( analyzer ).getInputFields( meta ); Map<String, RowMetaInterface> inputRowMetaInterfaces = analyzer.getInputRowMetaInterfaces( meta ); assertEquals( inputs, inputRowMetaInterfaces ); }
public OrderedProperties filter(Predicate<String> filter) { for (Iterator<Entry<String, String>> it = pairs.entrySet().iterator(); it.hasNext(); ) { if (filter.test(it.next().getKey())) { it.remove(); } } return this; }
@Test public void filter() { OrderedProperties pairs = createTestKeyValues(); pairs.filter(k -> k.equalsIgnoreCase("second")); assertKeyOrder(pairs, "first", "third", "FOURTH"); }
public boolean isRetryTcpOnTimeout() { return retryTcpOnTimeout; }
@Test void retryTcpOnTimeout() { assertThat(builder.build().isRetryTcpOnTimeout()).isFalse(); builder.retryTcpOnTimeout(true); assertThat(builder.build().isRetryTcpOnTimeout()).isTrue(); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromEarliestSnapshotWithEmptyTable() throws Exception { ScanContext scanContext = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_EARLIEST_SNAPSHOT) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null); ContinuousEnumerationResult emptyTableInitialDiscoveryResult = splitPlanner.planSplits(null); assertThat(emptyTableInitialDiscoveryResult.splits()).isEmpty(); assertThat(emptyTableInitialDiscoveryResult.fromPosition()).isNull(); assertThat(emptyTableInitialDiscoveryResult.toPosition().snapshotId()).isNull(); assertThat(emptyTableInitialDiscoveryResult.toPosition().snapshotTimestampMs()).isNull(); ContinuousEnumerationResult emptyTableSecondDiscoveryResult = splitPlanner.planSplits(emptyTableInitialDiscoveryResult.toPosition()); assertThat(emptyTableSecondDiscoveryResult.splits()).isEmpty(); assertThat(emptyTableSecondDiscoveryResult.fromPosition().snapshotId()).isNull(); assertThat(emptyTableSecondDiscoveryResult.fromPosition().snapshotTimestampMs()).isNull(); assertThat(emptyTableSecondDiscoveryResult.toPosition().snapshotId()).isNull(); assertThat(emptyTableSecondDiscoveryResult.toPosition().snapshotTimestampMs()).isNull(); // next 3 snapshots IcebergEnumeratorPosition lastPosition = emptyTableSecondDiscoveryResult.toPosition(); for (int i = 0; i < 3; ++i) { lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition; } }
public static Object emptyValueOf(Type type) { return EMPTIES.getOrDefault(Types.getRawType(type), () -> null).get(); }
@Test void emptyValueOf_nullForUndefined() throws Exception { assertThat(Util.emptyValueOf(Number.class)).isNull(); assertThat(Util.emptyValueOf(Parameterized.class)).isNull(); }
public static long getNextScheduledTime(final String cronEntry, long currentTime) throws MessageFormatException { long result = 0; if (cronEntry == null || cronEntry.length() == 0) { return result; } // Handle the once per minute case "* * * * *" // starting the next event at the top of the minute. if (cronEntry.equals("* * * * *")) { result = currentTime + 60 * 1000; result = result / 60000 * 60000; return result; } List<String> list = tokenize(cronEntry); List<CronEntry> entries = buildCronEntries(list); Calendar working = Calendar.getInstance(); working.setTimeInMillis(currentTime); working.set(Calendar.SECOND, 0); CronEntry minutes = entries.get(MINUTES); CronEntry hours = entries.get(HOURS); CronEntry dayOfMonth = entries.get(DAY_OF_MONTH); CronEntry month = entries.get(MONTH); CronEntry dayOfWeek = entries.get(DAY_OF_WEEK); // Start at the top of the next minute, cron is only guaranteed to be // run on the minute. int timeToNextMinute = 60 - working.get(Calendar.SECOND); working.add(Calendar.SECOND, timeToNextMinute); // If its already to late in the day this will roll us over to tomorrow // so we'll need to check again when done updating month and day. int currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } int currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } // We can roll into the next month here which might violate the cron setting // rules so we check once then recheck again after applying the month settings. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Start by checking if we are in the right month, if not then calculations // need to start from the beginning of the month to ensure that we don't end // up on the wrong day. (Can happen when DAY_OF_WEEK is set and current time // is ahead of the day of the week to execute on). doUpdateCurrentMonth(working, month); // Now Check day of week and day of month together since they can be specified // together in one entry, if both "day of month" and "day of week" are restricted // (not "*"), then either the "day of month" field (3) or the "day of week" field // (5) must match the current day or the Calenday must be advanced. doUpdateCurrentDay(working, dayOfMonth, dayOfWeek); // Now we can chose the correct hour and minute of the day in question. currentHours = working.get(Calendar.HOUR_OF_DAY); if (!isCurrent(hours, currentHours)) { int nextHour = getNext(hours, currentHours, working); working.add(Calendar.HOUR_OF_DAY, nextHour); } currentMinutes = working.get(Calendar.MINUTE); if (!isCurrent(minutes, currentMinutes)) { int nextMinutes = getNext(minutes, currentMinutes, working); working.add(Calendar.MINUTE, nextMinutes); } result = working.getTimeInMillis(); if (result <= currentTime) { throw new ArithmeticException("Unable to compute next scheduled exection time."); } return result; }
@Test public void testgetNextTimeMinutes() throws MessageFormatException { String test = "30 * * * *"; long current = 20*60*1000; Calendar calender = Calendar.getInstance(); calender.setTimeInMillis(current); int startHours = calender.get(Calendar.HOUR_OF_DAY); int startMinutes = calender.get(Calendar.MINUTE); LOG.debug("start:" + calender.getTime()); long next = CronParser.getNextScheduledTime(test, current); calender.setTimeInMillis(next); LOG.debug("next:" + calender.getTime()); int nextMinutes=calender.get(Calendar.MINUTE); assertEquals(30,nextMinutes); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldCreateNormalTable() { // Given: final SingleStatementContext stmt = givenQuery("CREATE TABLE X WITH (kafka_topic='X');"); // When: final CreateTable result = (CreateTable) builder.buildStatement(stmt); // Then: assertThat(result.isSource(), is(false)); }
public Collection<ServerPluginInfo> loadPlugins() { Map<String, ServerPluginInfo> bundledPluginsByKey = new LinkedHashMap<>(); for (ServerPluginInfo bundled : getBundledPluginsMetadata()) { failIfContains(bundledPluginsByKey, bundled, plugin -> MessageException.of(format("Found two versions of the plugin %s [%s] in the directory %s. Please remove one of %s or %s.", bundled.getName(), bundled.getKey(), getRelativeDir(fs.getInstalledBundledPluginsDir()), bundled.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName()))); bundledPluginsByKey.put(bundled.getKey(), bundled); } Map<String, ServerPluginInfo> externalPluginsByKey = new LinkedHashMap<>(); for (ServerPluginInfo external : getExternalPluginsMetadata()) { failIfContains(bundledPluginsByKey, external, plugin -> MessageException.of(format("Found a plugin '%s' in the directory '%s' with the same key [%s] as a built-in feature '%s'. Please remove '%s'.", external.getName(), getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getKey(), plugin.getName(), new File(getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName())))); failIfContains(externalPluginsByKey, external, plugin -> MessageException.of(format("Found two versions of the plugin '%s' [%s] in the directory '%s'. Please remove %s or %s.", external.getName(), external.getKey(), getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName()))); externalPluginsByKey.put(external.getKey(), external); } for (PluginInfo downloaded : getDownloadedPluginsMetadata()) { failIfContains(bundledPluginsByKey, downloaded, plugin -> MessageException.of(format("Fail to update plugin: %s. Built-in feature with same key already exists: %s. Move or delete plugin from %s directory", plugin.getName(), plugin.getKey(), getRelativeDir(fs.getDownloadedPluginsDir())))); ServerPluginInfo installedPlugin; if (externalPluginsByKey.containsKey(downloaded.getKey())) { deleteQuietly(externalPluginsByKey.get(downloaded.getKey()).getNonNullJarFile()); installedPlugin = moveDownloadedPluginToExtensions(downloaded); LOG.info("Plugin {} [{}] updated to version {}", installedPlugin.getName(), installedPlugin.getKey(), installedPlugin.getVersion()); } else { installedPlugin = moveDownloadedPluginToExtensions(downloaded); LOG.info("Plugin {} [{}] installed", installedPlugin.getName(), installedPlugin.getKey()); } externalPluginsByKey.put(downloaded.getKey(), installedPlugin); } Map<String, ServerPluginInfo> plugins = new HashMap<>(externalPluginsByKey.size() + bundledPluginsByKey.size()); plugins.putAll(externalPluginsByKey); plugins.putAll(bundledPluginsByKey); PluginRequirementsValidator.unloadIncompatiblePlugins(plugins); return plugins.values(); }
@Test public void fail_when_incompatible_plugins_are_installed() throws Exception { createJar(fs.getInstalledExternalPluginsDir(), "sqale", "main", null); createJar(fs.getInstalledExternalPluginsDir(), "scmgit", "main", null); createJar(fs.getInstalledExternalPluginsDir(), "scmsvn", "main", null); assertThatThrownBy(() -> underTest.loadPlugins()) .isInstanceOf(MessageException.class) .hasMessage("The following plugins are no longer compatible with this version of SonarQube: 'scmgit', 'scmsvn', 'sqale'"); }
@Override public Server build(Environment environment) { printBanner(environment.getName()); final ThreadPool threadPool = createThreadPool(environment.metrics()); final Server server = buildServer(environment.lifecycle(), threadPool); final Handler applicationHandler = createAppServlet(server, environment.jersey(), environment.getObjectMapper(), environment.getValidator(), environment.getApplicationContext(), environment.getJerseyServletContainer(), environment.metrics()); final Handler adminHandler = createAdminServlet(server, environment.getAdminContext(), environment.metrics(), environment.healthChecks(), environment.admin()); final RoutingHandler routingHandler = buildRoutingHandler(environment.metrics(), server, applicationHandler, adminHandler); final Handler gzipHandler = buildGzipHandler(routingHandler); server.setHandler(addStatsHandler(addRequestLog(server, gzipHandler, environment.getName()))); return server; }
@Test void registersDefaultExceptionMappers() { assertThat(http.getRegisterDefaultExceptionMappers()).isTrue(); http.build(environment); assertThat(environment.jersey().getResourceConfig().getSingletons()) .filteredOn(x -> x instanceof ExceptionMapperBinder).hasSize(1); }
@Override public void run() { try (DbSession dbSession = dbClient.openSession(false)) { List<AlmSettingDto> githubSettingsDtos = dbClient.almSettingDao().selectByAlm(dbSession, ALM.GITHUB); if (githubSettingsDtos.isEmpty()) { metrics.setGithubStatusToRed(); return; } validateGithub(githubSettingsDtos); } }
@Test public void run_githubValidatorDoesntThrowException_setGreenStatusInMetricsOnce() { List<AlmSettingDto> dtos = generateDtos(5, ALM.GITHUB); when(almSettingsDao.selectByAlm(any(), any())).thenReturn(dtos); underTest.run(); verify(metrics, times(1)).setGithubStatusToGreen(); verify(metrics, times(0)).setGithubStatusToRed(); }
public void loop() { while (!ctx.isKilled()) { try { processOnce(); } catch (RpcException rpce) { LOG.debug("Exception happened in one session(" + ctx + ").", rpce); ctx.setKilled(); break; } catch (Exception e) { // TODO(zhaochun): something wrong LOG.warn("Exception happened in one seesion(" + ctx + ").", e); ctx.setKilled(); break; } } }
@Test public void testPingLoop() throws IOException { ConnectContext ctx = initMockContext(mockChannel(pingPacket), GlobalStateMgr.getCurrentState()); ConnectProcessor processor = new ConnectProcessor(ctx); processor.loop(); Assert.assertEquals(MysqlCommand.COM_PING, myContext.getCommand()); Assert.assertTrue(myContext.getState().toResponsePacket() instanceof MysqlOkPacket); Assert.assertFalse(myContext.isKilled()); }
@Operation(summary = "get version", tags = { SwaggerConfig.SHARED }, operationId = "get_version", parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-V")}) @GetMapping(value = "version", produces = "application/json") @ResponseBody public AppVersionResponse getVersion(@Parameter(ref = "API-V") @RequestHeader("API-Version") String apiVersion, @Parameter(ref = "APP-V") @RequestHeader("App-Version") String appVersion, @Parameter(ref = "OS-T") @RequestHeader("OS-Type") String osType, @Parameter(ref = "OS-V") @RequestHeader("OS-Version") String osVersion, @Parameter(ref = "REL-T") @RequestHeader("Release-Type") String releaseType) throws SharedServiceClientException { var status = appVersionService.checkAppStatus(appVersion, osType, releaseType); return appVersionService.appVersionResponse(status, apiVersion, appVersion, osType, osVersion, releaseType); }
@Test void validateIfCorrectProcessesAreCalledGetVersion() throws SharedServiceClientException { String apiVersion = "1"; String appVersion = "1.0.0"; String osType = "Android"; String osVersion = "10"; String releaseType = "Productie"; configController.getVersion(apiVersion, appVersion, osType, osVersion, releaseType); verify(appVersionService, times(1)).checkAppStatus(appVersion, osType, releaseType); verify(appVersionService, times(1)).appVersionResponse(null, apiVersion, appVersion, osType, osVersion, releaseType); }
@Override public void setPartitionLeaderEpoch(int epoch) { buffer.putInt(PARTITION_LEADER_EPOCH_OFFSET, epoch); }
@Test public void testSetPartitionLeaderEpoch() { MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, Compression.NONE, TimestampType.CREATE_TIME, new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes())); int leaderEpoch = 500; DefaultRecordBatch batch = new DefaultRecordBatch(records.buffer()); batch.setPartitionLeaderEpoch(leaderEpoch); assertEquals(leaderEpoch, batch.partitionLeaderEpoch()); assertTrue(batch.isValid()); List<MutableRecordBatch> recordBatches = Utils.toList(records.batches().iterator()); assertEquals(1, recordBatches.size()); assertEquals(leaderEpoch, recordBatches.get(0).partitionLeaderEpoch()); }
@Override public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets( String groupId, Map<TopicPartition, OffsetAndMetadata> offsets, AlterConsumerGroupOffsetsOptions options ) { SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> future = AlterConsumerGroupOffsetsHandler.newFuture(groupId); AlterConsumerGroupOffsetsHandler handler = new AlterConsumerGroupOffsetsHandler(groupId, offsets, logContext); invokeDriver(handler, future, options.timeoutMs); return new AlterConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId))); }
@Test public void testOffsetCommitNumRetries() throws Exception { final Cluster cluster = mockCluster(3, 0); final Time time = new MockTime(); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); final TopicPartition tp1 = new TopicPartition("foo", 0); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); env.kafkaClient().prepareResponse(prepareOffsetCommitResponse(tp1, Errors.NOT_COORDINATOR)); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); offsets.put(tp1, new OffsetAndMetadata(123L)); final AlterConsumerGroupOffsetsResult result = env.adminClient().alterConsumerGroupOffsets(GROUP_ID, offsets); TestUtils.assertFutureError(result.all(), TimeoutException.class); } }
public MetricsBuilder exportServiceProtocol(String exportServiceProtocol) { this.exportServiceProtocol = exportServiceProtocol; return getThis(); }
@Test void exportServiceProtocol() { MetricsBuilder builder = MetricsBuilder.newBuilder(); builder.exportServiceProtocol("tri"); Assertions.assertEquals("tri", builder.build().getExportServiceProtocol()); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void streamStreamJoinTopologyWithCustomStoresNames() { final StreamsBuilder builder = new StreamsBuilder(); final KStream<Integer, String> stream1; final KStream<Integer, String> stream2; stream1 = builder.stream("input-topic1"); stream2 = builder.stream("input-topic2"); stream1.join( stream2, MockValueJoiner.TOSTRING_JOINER, JoinWindows.of(ofMillis(100)), StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String()) .withStoreName("custom-name")); final TopologyDescription describe = builder.build().describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" + " --> KSTREAM-WINDOWED-0000000002\n" + " Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" + " --> KSTREAM-WINDOWED-0000000003\n" + " Processor: KSTREAM-WINDOWED-0000000002 (stores: [custom-name-this-join-store])\n" + " --> KSTREAM-JOINTHIS-0000000004\n" + " <-- KSTREAM-SOURCE-0000000000\n" + " Processor: KSTREAM-WINDOWED-0000000003 (stores: [custom-name-other-join-store])\n" + " --> KSTREAM-JOINOTHER-0000000005\n" + " <-- KSTREAM-SOURCE-0000000001\n" + " Processor: KSTREAM-JOINOTHER-0000000005 (stores: [custom-name-this-join-store])\n" + " --> KSTREAM-MERGE-0000000006\n" + " <-- KSTREAM-WINDOWED-0000000003\n" + " Processor: KSTREAM-JOINTHIS-0000000004 (stores: [custom-name-other-join-store])\n" + " --> KSTREAM-MERGE-0000000006\n" + " <-- KSTREAM-WINDOWED-0000000002\n" + " Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" + " --> none\n" + " <-- KSTREAM-JOINTHIS-0000000004, KSTREAM-JOINOTHER-0000000005\n\n", describe.toString()); }
@Override public MergedResult decorate(final QueryResult queryResult, final SQLStatementContext sqlStatementContext, final ShardingSphereRule rule) { return new TransparentMergedResult(queryResult); }
@Test void assertDecorateMergedResult() throws SQLException { MergedResult mergedResult = mock(MergedResult.class); when(mergedResult.next()).thenReturn(true); TransparentResultDecorator decorator = new TransparentResultDecorator(); assertTrue(decorator.decorate(mergedResult, mock(SQLStatementContext.class), mock(ShardingSphereRule.class)).next()); }
public static long calculate(PhysicalRel rel, ExpressionEvalContext evalContext) { GcdCalculatorVisitor visitor = new GcdCalculatorVisitor(evalContext); visitor.go(rel); if (visitor.gcd == 0) { // there's no window aggr in the rel, return the value for joins, which is already capped at some reasonable value return visitor.maximumIntervalForJoins; } // if there's window aggr, cap it with the maximumIntervalForJoins return Math.min(visitor.gcd, visitor.maximumIntervalForJoins); }
@Test public void when_shouldNotExecutePlan_then_returnDefault() { HazelcastTable table = streamGeneratorTable("_stream", 10); List<QueryDataType> parameterTypes = Collections.singletonList(INT); final String sql = "SELECT MAX(v) FROM " + "TABLE(HOP(" + " (SELECT * FROM TABLE(IMPOSE_ORDER((SELECT * FROM TABLE(GENERATE_STREAM(10))), DESCRIPTOR(v), 1)))" + " , DESCRIPTOR(v) , 6, 3))"; PhysicalRel optimizedPhysicalRel = optimizePhysical(sql, parameterTypes, table).getPhysical(); assertPlan(optimizedPhysicalRel, plan(planRow(0, ShouldNotExecuteRel.class))); assertThat(WatermarkThrottlingFrameSizeCalculator.calculate(optimizedPhysicalRel, MOCK_EEC)) .isEqualTo(S2S_JOIN_MAX_THROTTLING_INTERVAL); ShouldNotExecuteRel sneRel = (ShouldNotExecuteRel) optimizedPhysicalRel; assertThat(sneRel.message()).contains("Streaming aggregation is supported only for window aggregation"); }
private ExitStatus run() { try { init(); return new Processor().processNamespace().getExitStatus(); } catch (IllegalArgumentException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS; } catch (IOException e) { System.out.println(e + ". Exiting ..."); LOG.error(e + ". Exiting ..."); return ExitStatus.IO_EXCEPTION; } finally { dispatcher.shutdownNow(); } }
@Test(timeout = 300000) public void testMoverWhenStoragePolicyUnset() throws Exception { final Configuration conf = new HdfsConfiguration(); initConf(conf); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .storageTypes( new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE}}) .build(); try { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final String file = "/testMoverWhenStoragePolicyUnset"; // write to DISK DFSTestUtil.createFile(dfs, new Path(file), 1L, (short) 1, 0L); // move to ARCHIVE dfs.setStoragePolicy(new Path(file), "COLD"); int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] {"-p", file.toString()}); Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc); // Wait till namenode notified about the block location details waitForLocatedBlockWithArchiveStorageType(dfs, file, 1); // verify before unset policy LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); Assert.assertTrue(StorageType.ARCHIVE == (lb.getStorageTypes())[0]); // unset storage policy dfs.unsetStoragePolicy(new Path(file)); rc = ToolRunner.run(conf, new Mover.Cli(), new String[] {"-p", file.toString()}); Assert.assertEquals("Movement to DISK should be successful", 0, rc); lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); Assert.assertTrue(StorageType.DISK == (lb.getStorageTypes())[0]); } finally { cluster.shutdown(); } }
@Override public void prepare() { try { PluggableSCMMaterial material = (PluggableSCMMaterial) revision.getMaterial(); Modification latestModification = revision.getLatestModification(); SCMRevision scmRevision = new SCMRevision(latestModification.getRevision(), latestModification.getModifiedTime(), null, null, latestModification.getAdditionalDataMap(), null); File destinationFolder = material.workingDirectory(workingDirectory); Result result = scmExtension.checkout(material.getScmConfig().getPluginConfiguration().getId(), buildSCMPropertyConfigurations(material.getScmConfig()), destinationFolder.getAbsolutePath(), scmRevision); handleCheckoutResult(material, result); } catch (Exception e) { consumer.taggedErrOutput(PREP_ERR, String.format("Material %s checkout failed: %s", revision.getMaterial().getDisplayName(), e.getMessage())); throw e; } }
@Test void shouldLogToStdOutWhenPluginSendsCheckoutResultWithSuccessMessages() { SCM scmConfig = SCMMother.create("scm-id", "scm-name", "pluginid", "version", new Configuration()); PluggableSCMMaterial pluggableSCMMaterial = MaterialsMother.pluggableSCMMaterial(); pluggableSCMMaterial.setFolder("destination-folder"); pluggableSCMMaterial.setSCMConfig(scmConfig); Modification modification = ModificationsMother.oneModifiedFile("r1"); Map<String, String> additionalData = new HashMap<>(); additionalData.put("a1", "v1"); additionalData.put("a2", "v2"); modification.setAdditionalData(new Gson().toJson(additionalData)); MaterialRevision revision = new MaterialRevision(pluggableSCMMaterial, modification); String pipelineFolder = new File(System.getProperty("java.io.tmpdir")).getAbsolutePath(); String destinationFolder = new File(pipelineFolder, "destination-folder").getAbsolutePath(); PluggableSCMMaterialAgent pluggableSCMMaterialAgent = new PluggableSCMMaterialAgent(scmExtension, revision, new File(pipelineFolder), consumer); when(scmExtension.checkout(eq("pluginid"), any(), eq(destinationFolder), any())) .thenReturn(new Result().withSuccessMessages("Material scm-name is updated.")); pluggableSCMMaterialAgent.prepare(); verify(consumer, times(1)).stdOutput("Material scm-name is updated."); }
@Override public void handleRequest(RestRequest request, RequestContext requestContext, final Callback<RestResponse> callback) { if (HttpMethod.POST != HttpMethod.valueOf(request.getMethod())) { _log.error("POST is expected, but " + request.getMethod() + " received"); callback.onError(RestException.forError(HttpStatus.S_405_METHOD_NOT_ALLOWED.getCode(), "Invalid method")); return; } // Disable server-side latency instrumentation for multiplexed requests requestContext.putLocalAttr(TimingContextUtil.TIMINGS_DISABLED_KEY_NAME, true); IndividualRequestMap individualRequests; try { individualRequests = extractIndividualRequests(request); if (_multiplexerSingletonFilter != null) { individualRequests = _multiplexerSingletonFilter.filterRequests(individualRequests); } } catch (RestException e) { _log.error("Invalid multiplexed request", e); callback.onError(e); return; } catch (Exception e) { _log.error("Invalid multiplexed request", e); callback.onError(RestException.forError(HttpStatus.S_400_BAD_REQUEST.getCode(), e)); return; } // prepare the map of individual responses to be collected final IndividualResponseMap individualResponses = new IndividualResponseMap(individualRequests.size()); final Map<String, HttpCookie> responseCookies = new HashMap<>(); // all tasks are Void and side effect based, that will be useful when we add streaming Task<?> requestProcessingTask = createParallelRequestsTask(request, requestContext, individualRequests, individualResponses, responseCookies); Task<Void> responseAggregationTask = Task.action("send aggregated response", () -> { RestResponse aggregatedResponse = aggregateResponses(individualResponses, responseCookies); callback.onSuccess(aggregatedResponse); } ); _engine.run(requestProcessingTask.andThen(responseAggregationTask), MUX_PLAN_CLASS); }
@Test(dataProvider = "multiplexerConfigurations") public void testHandleParallelRequests(MultiplexerRunMode multiplexerRunMode) throws Exception { SynchronousRequestHandler mockHandler = createMockHandler(); MultiplexedRequestHandlerImpl multiplexer = createMultiplexer(mockHandler, multiplexerRunMode); RequestContext requestContext = new RequestContext(); RestRequest request = fakeMuxRestRequest(ImmutableMap.of("0", fakeIndRequest(FOO_URL), "1", fakeIndRequest(BAR_URL))); // set expectations expect(mockHandler.handleRequestSync(fakeIndRestRequest(FOO_URL), requestContext)).andReturn(fakeIndRestResponse(FOO_ENTITY)); expect(mockHandler.handleRequestSync(fakeIndRestRequest(BAR_URL), requestContext)).andReturn(fakeIndRestResponse(BAR_ENTITY)); // switch into replay mode replay(mockHandler); FutureCallback<RestResponse> callback = new FutureCallback<>(); multiplexer.handleRequest(request, requestContext, callback); RestResponse muxRestResponse = callback.get(); RestResponse expectedMuxRestResponse = fakeMuxRestResponse(ImmutableMap.of(0, fakeIndResponse(FOO_JSON_BODY), 1, fakeIndResponse(BAR_JSON_BODY))); assertEquals(muxRestResponse, expectedMuxRestResponse); verify(mockHandler); }
public void bookRoom(int roomNumber) throws Exception { var room = hotelDao.getById(roomNumber); if (room.isEmpty()) { throw new Exception("Room number: " + roomNumber + " does not exist"); } else { if (room.get().isBooked()) { throw new Exception("Room already booked!"); } else { var updateRoomBooking = room.get(); updateRoomBooking.setBooked(true); hotelDao.update(updateRoomBooking); } } }
@Test @SneakyThrows void bookingRoomAgainShouldRaiseException() { hotel.bookRoom(1); assertThrows(Exception.class, () -> hotel.bookRoom(1), "Room already booked!"); }
public static boolean isSameDay(Date date1, Date date2) { final Calendar cal1 = Calendar.getInstance(); final Calendar cal2 = Calendar.getInstance(); cal1.setTime(date1); cal2.setTime(date2); return cal1.get(YEAR) == cal2.get(YEAR) && cal1.get(MONTH) == cal2.get(MONTH) && cal1.get(DAY_OF_MONTH) == cal2.get(DAY_OF_MONTH); }
@Test public void testSameDay() { Assert.assertTrue(isSameDay(new Date(), new Date())); Assert.assertFalse(isSameDay(new Date(), new Date(System.currentTimeMillis() + 24 * 60 * 60 * 1000))); }
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image) throws IOException { if (image.getType() != BufferedImage.TYPE_BYTE_BINARY && image.getColorModel().getPixelSize() != 1) { throw new IllegalArgumentException("Only 1-bit b/w images supported"); } int height = image.getHeight(); int width = image.getWidth(); ByteArrayOutputStream bos = new ByteArrayOutputStream(); try (MemoryCacheImageOutputStream mcios = new MemoryCacheImageOutputStream(bos)) { for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { // flip bit to avoid having to set /BlackIs1 mcios.writeBits(~(image.getRGB(x, y) & 1), 1); } int bitOffset = mcios.getBitOffset(); if (bitOffset != 0) { mcios.writeBits(0, 8 - bitOffset); } } mcios.flush(); } return prepareImageXObject(document, bos.toByteArray(), width, height, PDDeviceGray.INSTANCE); }
@Test void testCreateFromBufferedChessImage() throws IOException { try (PDDocument document = new PDDocument()) { BufferedImage bim = new BufferedImage(343, 287, BufferedImage.TYPE_BYTE_BINARY); assertNotEquals((bim.getWidth() / 8) * 8, bim.getWidth()); // not mult of 8 int col = 0; for (int x = 0; x < bim.getWidth(); ++x) { for (int y = 0; y < bim.getHeight(); ++y) { bim.setRGB(x, y, col & 0xFFFFFF); col = ~col; } } PDImageXObject ximage3 = CCITTFactory.createFromImage(document, bim); validate(ximage3, 1, 343, 287, "tiff", PDDeviceGray.INSTANCE.getName()); checkIdent(bim, ximage3.getOpaqueImage(null, 1)); PDPage page = new PDPage(PDRectangle.A4); document.addPage(page); try (PDPageContentStream contentStream = new PDPageContentStream(document, page, AppendMode.APPEND, false)) { contentStream.drawImage(ximage3, 0, 0, ximage3.getWidth(), ximage3.getHeight()); } document.save(TESTRESULTSDIR + "/singletifffromchessbi.pdf"); } try (PDDocument document = Loader.loadPDF(new File(TESTRESULTSDIR, "singletifffromchessbi.pdf"))) { assertEquals(1, document.getNumberOfPages()); } }
@Override public void run() { // top-level command, do nothing }
@Test public void test_suspendJob_byJobName() { // Given Job job = newJob(); // When run("suspend", job.getName()); // Then assertThat(job).eventuallyHasStatus(JobStatus.SUSPENDED); }
public final void isNotEmpty() { if (Iterables.isEmpty(checkNotNull(actual))) { failWithoutActual(simpleFact("expected not to be empty")); } }
@Test public void iterableIsNotEmpty() { assertThat(asList("foo")).isNotEmpty(); }
static Optional<String> globalResponseError(Optional<ClientResponse> response) { if (!response.isPresent()) { return Optional.of("Timeout"); } if (response.get().authenticationException() != null) { return Optional.of("AuthenticationException"); } if (response.get().wasTimedOut()) { return Optional.of("Disonnected[Timeout]"); } if (response.get().wasDisconnected()) { return Optional.of("Disconnected"); } if (response.get().versionMismatch() != null) { return Optional.of("UnsupportedVersionException"); } if (response.get().responseBody() == null) { return Optional.of("EmptyResponse"); } if (!(response.get().responseBody() instanceof AssignReplicasToDirsResponse)) { return Optional.of("ClassCastException"); } AssignReplicasToDirsResponseData data = ((AssignReplicasToDirsResponse) response.get().responseBody()).data(); Errors error = Errors.forCode(data.errorCode()); if (error != Errors.NONE) { return Optional.of("Response-level error: " + error.name()); } return Optional.empty(); }
@Test public void testGlobalResponseErrorAuthenticationException() { assertEquals(Optional.of("AuthenticationException"), AssignmentsManager.globalResponseError(Optional.of( new ClientResponse(null, null, "", 0, 0, false, null, new AuthenticationException("failed"), null)))); }
@Override public <T> Optional<T> valueAs(Class<T> type) { checkNotNull(type); if (type == Object.class || type == double.class || type == Double.class) { @SuppressWarnings("unchecked") T value = (T) Double.valueOf(this.value); return Optional.of(value); } return Optional.empty(); }
@Test public void testValueAsObject() { ContinuousResource resource = Resources.continuous(D1, P1, Bandwidth.class) .resource(BW1.bps()); Optional<Double> value = resource.valueAs(Double.class); assertThat(value.get(), is(BW1.bps())); }
public final void addSource(final Topology.AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer<?> keyDeserializer, final Deserializer<?> valDeserializer, final String... topics) { if (topics.length == 0) { throw new TopologyException("You must provide at least one topic"); } Objects.requireNonNull(name, "name must not be null"); if (nodeFactories.containsKey(name)) { throw new TopologyException("Processor " + name + " is already added."); } for (final String topic : topics) { Objects.requireNonNull(topic, "topic names cannot be null"); validateTopicNotAlreadyRegistered(topic); maybeAddToResetList(earliestResetTopics, latestResetTopics, offsetReset, topic); rawSourceTopicNames.add(topic); } nodeFactories.put(name, new SourceNodeFactory<>(name, topics, null, timestampExtractor, keyDeserializer, valDeserializer)); nodeToSourceTopics.put(name, Arrays.asList(topics)); nodeGrouper.add(name); nodeGroups = null; }
@Test public void testAddSourceWithSameName() { builder.addSource(null, "source", null, null, null, "topic-1"); try { builder.addSource(null, "source", null, null, null, "topic-2"); fail("Should throw TopologyException with source name conflict"); } catch (final TopologyException expected) { /* ok */ } }
@Override public void setConfiguration(final Path container, final LoggingConfiguration configuration) throws BackgroundException { try { final ServiceProperties properties = session.getClient().downloadServiceProperties(null, context); final LoggingProperties l = new LoggingProperties(); if(configuration.isEnabled()) { l.setLogOperationTypes(EnumSet.allOf(LoggingOperations.class)); } else { l.setLogOperationTypes(EnumSet.noneOf(LoggingOperations.class)); } properties.setLogging(l); session.getClient().uploadServiceProperties(properties, null, context); } catch(StorageException e) { throw new AzureExceptionMappingService().map("Failure to write attributes of {0}", e, container); } }
@Test public void testSetConfiguration() throws Exception { final Path container = new Path("/cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)); final AzureLoggingFeature feature = new AzureLoggingFeature(session, null); feature.setConfiguration(container, new LoggingConfiguration(false)); assertFalse(feature.getConfiguration(container).isEnabled()); feature.setConfiguration(container, new LoggingConfiguration(true)); assertTrue(feature.getConfiguration(container).isEnabled()); }
@Override public void handleTenantInfo(TenantInfoHandler handler) { // 如果禁用,则不执行逻辑 if (isTenantDisable()) { return; } // 获得租户 TenantDO tenant = getTenant(TenantContextHolder.getRequiredTenantId()); // 执行处理器 handler.handle(tenant); }
@Test public void testHandleTenantInfo_disable() { // 准备参数 TenantInfoHandler handler = mock(TenantInfoHandler.class); // mock 禁用 when(tenantProperties.getEnable()).thenReturn(false); // 调用 tenantService.handleTenantInfo(handler); // 断言 verify(handler, never()).handle(any()); }
public List<Issue> validateMetadata(ExtensionVersion extVersion) { return Observation.createNotStarted("ExtensionValidator#validateMetadata", observations).observe(() -> { var issues = new ArrayList<Issue>(); checkVersion(extVersion.getVersion(), issues); checkTargetPlatform(extVersion.getTargetPlatform(), issues); checkCharacters(extVersion.getDisplayName(), "displayName", issues); checkFieldSize(extVersion.getDisplayName(), DEFAULT_STRING_SIZE, "displayName", issues); checkCharacters(extVersion.getDescription(), "description", issues); checkFieldSize(extVersion.getDescription(), DESCRIPTION_SIZE, "description", issues); checkCharacters(extVersion.getCategories(), "categories", issues); checkFieldSize(extVersion.getCategories(), DEFAULT_STRING_SIZE, "categories", issues); checkCharacters(extVersion.getTags(), "keywords", issues); checkFieldSize(extVersion.getTags(), DEFAULT_STRING_SIZE, "keywords", issues); checkCharacters(extVersion.getLicense(), "license", issues); checkFieldSize(extVersion.getLicense(), DEFAULT_STRING_SIZE, "license", issues); checkURL(extVersion.getHomepage(), "homepage", issues); checkFieldSize(extVersion.getHomepage(), DEFAULT_STRING_SIZE, "homepage", issues); checkURL(extVersion.getRepository(), "repository", issues); checkFieldSize(extVersion.getRepository(), DEFAULT_STRING_SIZE, "repository", issues); checkURL(extVersion.getBugs(), "bugs", issues); checkFieldSize(extVersion.getBugs(), DEFAULT_STRING_SIZE, "bugs", issues); checkInvalid(extVersion.getMarkdown(), s -> !MARKDOWN_VALUES.contains(s), "markdown", issues, MARKDOWN_VALUES.toString()); checkCharacters(extVersion.getGalleryColor(), "galleryBanner.color", issues); checkFieldSize(extVersion.getGalleryColor(), GALLERY_COLOR_SIZE, "galleryBanner.color", issues); checkInvalid(extVersion.getGalleryTheme(), s -> !GALLERY_THEME_VALUES.contains(s), "galleryBanner.theme", issues, GALLERY_THEME_VALUES.toString()); checkFieldSize(extVersion.getLocalizedLanguages(), DEFAULT_STRING_SIZE, "localizedLanguages", issues); checkInvalid(extVersion.getQna(), s -> !QNA_VALUES.contains(s) && isInvalidURL(s), "qna", issues, QNA_VALUES.toString() + " or a URL"); checkFieldSize(extVersion.getQna(), DEFAULT_STRING_SIZE, "qna", issues); return issues; }); }
@Test public void testInvalidURL() { var extension = new ExtensionVersion(); extension.setTargetPlatform(TargetPlatform.NAME_UNIVERSAL); extension.setVersion("1.0.0"); extension.setRepository("Foo and bar!"); var issues = validator.validateMetadata(extension); assertThat(issues).hasSize(1); assertThat(issues.get(0)) .isEqualTo(new ExtensionValidator.Issue("Invalid URL in field 'repository': Foo and bar!")); }
public Attributes readDataset() throws IOException { return readDataset(o -> false); }
@Test public void correctVR() throws IOException { byte[] b = { 0x08, 0, 0x68, 0, 'U', 'K', 16, 0, 'F', 'O', 'R', ' ', 'P', 'R', 'E', 'S', 'E', 'N', 'T', 'A', 'T', 'I', 'O', 'N' }; Attributes attrs; try (DicomInputStream in = new DicomInputStream(new ByteArrayInputStream(b), UID.ExplicitVRLittleEndian)) { attrs = in.readDataset(); } assertEquals("FOR PRESENTATION", attrs.getString(Tag.PresentationIntentType)); }
@Override public boolean isSupported() { return true; }
@Test public void isSupported() { CoolpadImpl coolpad = new CoolpadImpl(mApplication); Assert.assertTrue(coolpad.isSupported()); }
public static void log(String resource, String exceptionName, String ruleLimitApp, String origin, Long ruleId, int count) { String ruleIdString = StringUtil.EMPTY; if (ruleId != null) { ruleIdString = String.valueOf(ruleId); } statLogger.stat(resource, exceptionName, ruleLimitApp, origin, ruleIdString).count(count); }
@Test public void testWriteLog() throws Exception { EagleEyeLogUtil.log("resourceName", "BlockException", "app1", "origin", 1L,1); final File file = new File(LogBase.getLogBaseDir() + EagleEyeLogUtil.FILE_NAME); await().timeout(2, TimeUnit.SECONDS) .until(new Callable<File>() { @Override public File call() throws Exception { return file; } }, FileMatchers.anExistingFile()); }
public ConfigTransportClient getAgent() { return this.agent; }
@Test void testHandleConfigChangeReqeust() throws Exception { Properties prop = new Properties(); String tenant = "c"; prop.put(NAMESPACE, tenant); ServerListManager agent = Mockito.mock(ServerListManager.class); final NacosClientProperties nacosClientProperties = NacosClientProperties.PROTOTYPE.derive(prop); ClientWorker clientWorker = new ClientWorker(null, agent, nacosClientProperties); AtomicReference<Map<String, CacheData>> cacheMapMocked = Mockito.mock(AtomicReference.class); Field cacheMap = ClientWorker.class.getDeclaredField("cacheMap"); cacheMap.setAccessible(true); cacheMap.set(clientWorker, cacheMapMocked); Map<String, CacheData> cacheDataMapMocked = Mockito.mock(Map.class); Mockito.when(cacheMapMocked.get()).thenReturn(cacheDataMapMocked); CacheData cacheDataMocked = Mockito.mock(CacheData.class); AtomicBoolean atomicBoolean = Mockito.mock(AtomicBoolean.class); Mockito.when(cacheDataMocked.getReceiveNotifyChanged()).thenReturn(atomicBoolean); String dataId = "a"; String group = "b"; Mockito.when(cacheDataMapMocked.get(GroupKey.getKeyTenant(dataId, group, tenant))).thenReturn(cacheDataMocked); ConfigChangeNotifyRequest configChangeNotifyRequest = ConfigChangeNotifyRequest.build(dataId, group, tenant); ((ClientWorker.ConfigRpcTransportClient) clientWorker.getAgent()).handleConfigChangeNotifyRequest( configChangeNotifyRequest, "testname"); Mockito.verify(cacheDataMocked, times(1)).setConsistentWithServer(false); Mockito.verify(atomicBoolean, times(1)).set(true); }
@Override public ProviderInfo doSelect(SofaRequest invocation, List<ProviderInfo> providerInfos) { ProviderInfo providerInfo = null; int size = providerInfos.size(); // 总个数 int totalWeight = 0; // 总权重 boolean isWeightSame = true; // 权重是否都一样 for (int i = 0; i < size; i++) { int weight = getWeight(providerInfos.get(i)); totalWeight += weight; // 累计总权重 if (isWeightSame && i > 0 && weight != getWeight(providerInfos.get(i - 1))) { isWeightSame = false; // 计算所有权重是否一样 } } if (totalWeight > 0 && !isWeightSame) { // 如果权重不相同且权重大于0则按总权重数随机 int offset = random.nextInt(totalWeight); // 并确定随机值落在哪个片断上 for (int i = 0; i < size; i++) { offset -= getWeight(providerInfos.get(i)); if (offset < 0) { providerInfo = providerInfos.get(i); break; } } } else { // 如果权重相同或权重为0则均等随机 providerInfo = providerInfos.get(random.nextInt(size)); } return providerInfo; }
@Test public void doSelect() throws Exception { RandomLoadBalancer loadBalancer = new RandomLoadBalancer(null); Map<Integer, Integer> cnt = new HashMap<Integer, Integer>(); int size = 20; int total = 100000; SofaRequest request = new SofaRequest(); { for (int i = 0; i < size; i++) { cnt.put(9000 + i, 0); } List<ProviderInfo> providers = buildSameWeightProviderList(size); long start = System.currentTimeMillis(); for (int i = 0; i < total; i++) { ProviderInfo provider = loadBalancer.doSelect(request, providers); int port = provider.getPort(); cnt.put(port, cnt.get(port) + 1); } long end = System.currentTimeMillis(); LOGGER.info("elapsed" + (end - start) + "ms"); LOGGER.info("avg " + (end - start) * 1000 * 1000 / total + "ns"); int avg = total / size; for (int i = 0; i < size; i++) { Assert.assertTrue(avg * 0.9 < cnt.get(9000 + i) && avg * 1.1 > cnt.get(9000 + i)); // 随机偏差不会太大,应该不超过10% } } { for (int i = 0; i < size; i++) { cnt.put(9000 + i, 0); } List<ProviderInfo> providers = buildDiffWeightProviderList(size); long start = System.currentTimeMillis(); for (int i = 0; i < total; i++) { ProviderInfo provider = loadBalancer.doSelect(request, providers); int port = provider.getPort(); cnt.put(port, cnt.get(port) + 1); } long end = System.currentTimeMillis(); LOGGER.info("elapsed" + (end - start) + "ms"); LOGGER.info("avg " + (end - start) * 1000 * 1000 / total + "ns"); Assert.assertTrue(cnt.get(9000) == 0); int count = 0; for (int i = 0; i < size; i++) { count += i; } int per = total / count; for (int i = 1; i < size; i++) { Assert.assertTrue(per * i * 0.85 < cnt.get(9000 + i) && per * i * 1.15 > cnt.get(9000 + i)); // 随机偏差不会太大,应该不超过15% } } }
public static Builder builder(HttpMethod method, String url) { return new Builder(method, url); }
@Test void builder() throws Exception { assertThat(requestKey.getMethod()).isEqualTo(HttpMethod.GET); assertThat(requestKey.getUrl()).isEqualTo("a"); assertThat(requestKey.getHeaders().size()).isEqualTo(1); assertThat(requestKey.getHeaders().fetch("my-header")) .isEqualTo(Arrays.asList("val")); assertThat(requestKey.getCharset()).isEqualTo(StandardCharsets.UTF_16); }
public static boolean isEmpty(@SuppressWarnings("rawtypes") Collection collection) { return (collection == null || collection.isEmpty()); }
@Test void collectionIsEmpty() { assertThat(CollectionUtil.isEmpty(null)).isTrue(); assertThat(CollectionUtil.isEmpty(Collections.emptyList())).isTrue(); assertThat(CollectionUtil.isEmpty(Collections.singletonList("test"))).isFalse(); }
@Override public Http2Headers decodeHeaders(int streamId, ByteBuf headerBlock) throws Http2Exception { try { final Http2Headers headers = newHeaders(); hpackDecoder.decode(streamId, headerBlock, headers, validateHeaders); headerArraySizeAccumulator = HEADERS_COUNT_WEIGHT_NEW * headers.size() + HEADERS_COUNT_WEIGHT_HISTORICAL * headerArraySizeAccumulator; return headers; } catch (Http2Exception e) { throw e; } catch (Throwable e) { // Default handler for any other types of errors that may have occurred. For example, // the Header builder throws IllegalArgumentException if the key or value was invalid // for any reason (e.g. the key was an invalid pseudo-header). throw connectionError(COMPRESSION_ERROR, e, "Error decoding headers: %s", e.getMessage()); } }
@Test public void decodeShouldSucceed() throws Exception { ByteBuf buf = encode(b(":method"), b("GET"), b("akey"), b("avalue"), randomBytes(), randomBytes()); try { Http2Headers headers = decoder.decodeHeaders(0, buf); assertEquals(3, headers.size()); assertEquals("GET", headers.method().toString()); assertEquals("avalue", headers.get(new AsciiString("akey")).toString()); } finally { buf.release(); } }
public static StreamRuleMatcher build(StreamRuleType ruleType) throws InvalidStreamRuleTypeException { switch (ruleType) { case EXACT: return new ExactMatcher(); case REGEX: return new RegexMatcher(); case GREATER: return new GreaterMatcher(); case SMALLER: return new SmallerMatcher(); case PRESENCE: return new FieldPresenceMatcher(); case CONTAINS: return new ContainsMatcher(); case ALWAYS_MATCH: return new AlwaysMatcher(); case MATCH_INPUT: return new InputMatcher(); default: throw new InvalidStreamRuleTypeException(); } }
@Test public void buildReturnsCorrectStreamRuleMatcher() throws Exception { assertThat(StreamRuleMatcherFactory.build(StreamRuleType.EXACT)).isInstanceOf(ExactMatcher.class); assertThat(StreamRuleMatcherFactory.build(StreamRuleType.REGEX)).isInstanceOf(RegexMatcher.class); assertThat(StreamRuleMatcherFactory.build(StreamRuleType.GREATER)).isInstanceOf(GreaterMatcher.class); assertThat(StreamRuleMatcherFactory.build(StreamRuleType.SMALLER)).isInstanceOf(SmallerMatcher.class); assertThat(StreamRuleMatcherFactory.build(StreamRuleType.PRESENCE)).isInstanceOf(FieldPresenceMatcher.class); assertThat(StreamRuleMatcherFactory.build(StreamRuleType.ALWAYS_MATCH)).isInstanceOf(AlwaysMatcher.class); assertThat(StreamRuleMatcherFactory.build(StreamRuleType.MATCH_INPUT)).isInstanceOf(InputMatcher.class); }
public static void preserve(FileSystem targetFS, Path path, CopyListingFileStatus srcFileStatus, EnumSet<FileAttribute> attributes, boolean preserveRawXattrs) throws IOException { // strip out those attributes we don't need any more attributes.remove(FileAttribute.BLOCKSIZE); attributes.remove(FileAttribute.CHECKSUMTYPE); // If not preserving anything from FileStatus, don't bother fetching it. FileStatus targetFileStatus = attributes.isEmpty() ? null : targetFS.getFileStatus(path); String group = targetFileStatus == null ? null : targetFileStatus.getGroup(); String user = targetFileStatus == null ? null : targetFileStatus.getOwner(); boolean chown = false; if (attributes.contains(FileAttribute.ACL)) { List<AclEntry> srcAcl = srcFileStatus.getAclEntries(); List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus); if (!srcAcl.equals(targetAcl)) { targetFS.removeAcl(path); targetFS.setAcl(path, srcAcl); } // setAcl doesn't preserve sticky bit, so also call setPermission if needed. if (srcFileStatus.getPermission().getStickyBit() != targetFileStatus.getPermission().getStickyBit()) { targetFS.setPermission(path, srcFileStatus.getPermission()); } } else if (attributes.contains(FileAttribute.PERMISSION) && !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) { targetFS.setPermission(path, srcFileStatus.getPermission()); } final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR); if (preserveXAttrs || preserveRawXattrs) { final String rawNS = StringUtils.toLowerCase(XAttr.NameSpace.RAW.name()); Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs(); Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path); if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) { for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) { String xattrName = entry.getKey(); if (xattrName.startsWith(rawNS) || preserveXAttrs) { targetFS.setXAttr(path, xattrName, entry.getValue()); } } } } // The replication factor can only be preserved for replicated files. // It is ignored when either the source or target file are erasure coded. if (attributes.contains(FileAttribute.REPLICATION) && !targetFileStatus.isDirectory() && !targetFileStatus.isErasureCoded() && !srcFileStatus.isErasureCoded() && srcFileStatus.getReplication() != targetFileStatus.getReplication()) { targetFS.setReplication(path, srcFileStatus.getReplication()); } if (attributes.contains(FileAttribute.GROUP) && !group.equals(srcFileStatus.getGroup())) { group = srcFileStatus.getGroup(); chown = true; } if (attributes.contains(FileAttribute.USER) && !user.equals(srcFileStatus.getOwner())) { user = srcFileStatus.getOwner(); chown = true; } if (chown) { targetFS.setOwner(path, user, group); } if (attributes.contains(FileAttribute.TIMES)) { targetFS.setTimes(path, srcFileStatus.getModificationTime(), srcFileStatus.getAccessTime()); } }
@Test public void testPreserveOnDirectoryUpwardRecursion() throws IOException { FileSystem fs = FileSystem.get(config); EnumSet<FileAttribute> attributes = EnumSet.allOf(FileAttribute.class); // Remove ACL because tests run with dfs.namenode.acls.enabled false attributes.remove(FileAttribute.ACL); Path src = new Path("/tmp/src2"); Path f0 = new Path("/f0"); Path f1 = new Path("/d1/f1"); Path f2 = new Path("/d1/d2/f2"); Path d1 = new Path("/d1/"); Path d2 = new Path("/d1/d2/"); createFile(fs, src); createFile(fs, f0); createFile(fs, f1); createFile(fs, f2); fs.setPermission(src, almostFullPerm); fs.setOwner(src, "somebody", "somebody-group"); fs.setTimes(src, 0, 0); fs.setReplication(src, (short) 1); fs.setPermission(d1, fullPerm); fs.setOwner(d1, "anybody", "anybody-group"); fs.setTimes(d1, 400, 400); fs.setReplication(d1, (short) 3); fs.setPermission(d2, fullPerm); fs.setOwner(d2, "anybody", "anybody-group"); fs.setTimes(d2, 300, 300); fs.setReplication(d2, (short) 3); fs.setPermission(f0, fullPerm); fs.setOwner(f0, "anybody", "anybody-group"); fs.setTimes(f0, 200, 200); fs.setReplication(f0, (short) 3); fs.setPermission(f1, fullPerm); fs.setOwner(f1, "anybody", "anybody-group"); fs.setTimes(f1, 200, 200); fs.setReplication(f1, (short) 3); fs.setPermission(f2, fullPerm); fs.setOwner(f2, "anybody", "anybody-group"); fs.setTimes(f2, 200, 200); fs.setReplication(f2, (short) 3); CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src)); DistCpUtils.preserve(fs, d2, srcStatus, attributes, false); cluster.triggerHeartbeats(); // FileStatus.equals only compares path field, must explicitly compare all fields // attributes of src -> d2 ? should be yes CopyListingFileStatus d2Status = new CopyListingFileStatus(fs.getFileStatus(d2)); Assert.assertTrue(srcStatus.getPermission().equals(d2Status.getPermission())); Assert.assertTrue(srcStatus.getOwner().equals(d2Status.getOwner())); Assert.assertTrue(srcStatus.getGroup().equals(d2Status.getGroup())); Assert.assertTrue(srcStatus.getAccessTime() == d2Status.getAccessTime()); Assert.assertTrue(srcStatus.getModificationTime() == d2Status.getModificationTime()); Assert.assertTrue(srcStatus.getReplication() != d2Status.getReplication()); // attributes of src -> d1 ? should be no CopyListingFileStatus d1Status = new CopyListingFileStatus(fs.getFileStatus(d1)); Assert.assertFalse(srcStatus.getPermission().equals(d1Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(d1Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(d1Status.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == d1Status.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == d1Status.getModificationTime()); Assert.assertTrue(srcStatus.getReplication() != d1Status.getReplication()); // attributes of src -> f2 ? should be no CopyListingFileStatus f2Status = new CopyListingFileStatus(fs.getFileStatus(f2)); Assert.assertFalse(srcStatus.getPermission().equals(f2Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(f2Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(f2Status.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == f2Status.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == f2Status.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == f2Status.getReplication()); // attributes of src -> f1 ? should be no CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1)); Assert.assertFalse(srcStatus.getPermission().equals(f1Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(f1Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(f1Status.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == f1Status.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == f1Status.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == f1Status.getReplication()); // attributes of src -> f0 ? should be no CopyListingFileStatus f0Status = new CopyListingFileStatus(fs.getFileStatus(f0)); Assert.assertFalse(srcStatus.getPermission().equals(f0Status.getPermission())); Assert.assertFalse(srcStatus.getOwner().equals(f0Status.getOwner())); Assert.assertFalse(srcStatus.getGroup().equals(f0Status.getGroup())); Assert.assertFalse(srcStatus.getAccessTime() == f0Status.getAccessTime()); Assert.assertFalse(srcStatus.getModificationTime() == f0Status.getModificationTime()); Assert.assertFalse(srcStatus.getReplication() == f0Status.getReplication()); }
public Map<String, MetaProperties> logDirProps() { return logDirProps; }
@Test public void testLogDirPropsForEmpty() { assertEquals(new HashSet<>(), EMPTY.logDirProps().keySet()); }
@Override public boolean hasNext() { try { if (this.nextElement == null) { if (this.readPhase) { // read phase, get next element from buffer T tmp = getNextRecord(this.reuseElement); if (tmp != null) { this.nextElement = tmp; return true; } else { return false; } } else { if (this.input.hasNext()) { final T next = this.input.next(); if (writeNextRecord(next)) { this.nextElement = next; return true; } else { this.leftOverElement = next; return false; } } else { this.noMoreBlocks = true; return false; } } } else { return true; } } catch (IOException ioex) { throw new RuntimeException( "Error (de)serializing record in block resettable iterator.", ioex); } }
@Test void testSerialBlockResettableIterator() throws Exception { final AbstractInvokable memOwner = new DummyInvokable(); // create the resettable Iterator final ReusingBlockResettableIterator<Record> iterator = new ReusingBlockResettableIterator<Record>( this.memman, this.reader, this.serializer, 1, memOwner); // open the iterator iterator.open(); // now test walking through the iterator int lower = 0; int upper = 0; do { lower = upper; upper = lower; // find the upper bound while (iterator.hasNext()) { Record target = iterator.next(); int val = target.getField(0, IntValue.class).getValue(); assertThat(val).isEqualTo(upper++); } // now reset the buffer a few times for (int i = 0; i < 5; ++i) { iterator.reset(); int count = 0; while (iterator.hasNext()) { Record target = iterator.next(); int val = target.getField(0, IntValue.class).getValue(); assertThat(val).isEqualTo(lower + (count++)); } assertThat(count).isEqualTo(upper - lower); } } while (iterator.nextBlock()); assertThat(upper).isEqualTo(NUM_VALUES); // close the iterator iterator.close(); }
@Override public ByteBuf retainedDuplicate() { return duplicate().retain(); }
@Test public void testSliceAfterReleaseRetainedDuplicate() { ByteBuf buf = newBuffer(1); ByteBuf buf2 = buf.retainedDuplicate(); assertSliceFailAfterRelease(buf, buf2); }
public static <R> Class<R> getRealClass(Func0<?> func) { final SerializedLambda lambda = resolve(func); checkLambdaTypeCanGetClass(lambda.getImplMethodKind()); return ClassUtil.loadClass(lambda.getImplClass()); }
@Test public void getRealClassTest() { // 引用特定类型的任意对象的实例方法 final Class<MyTeacher> functionClass = LambdaUtil.getRealClass(MyTeacher::getAge); assertEquals(MyTeacher.class, functionClass); // 枚举测试,不会导致类型擦除 final Class<LambdaKindEnum> enumFunctionClass = LambdaUtil.getRealClass(LambdaKindEnum::ordinal); assertEquals(LambdaKindEnum.class, enumFunctionClass); // 调用父类方法,能获取到正确的子类类型 final Class<MyTeacher> superFunctionClass = LambdaUtil.getRealClass(MyTeacher::getId); assertEquals(MyTeacher.class, superFunctionClass); final MyTeacher myTeacher = new MyTeacher(); // 引用特定对象的实例方法 final Class<MyTeacher> supplierClass = LambdaUtil.getRealClass(myTeacher::getAge); assertEquals(MyTeacher.class, supplierClass); // 枚举测试,只能获取到枚举类型 final Class<Enum<?>> enumSupplierClass = LambdaUtil.getRealClass(LambdaKindEnum.REF_NONE::ordinal); assertEquals(Enum.class, enumSupplierClass); // 调用父类方法,只能获取到父类类型 final Class<Entity<?>> superSupplierClass = LambdaUtil.getRealClass(myTeacher::getId); assertEquals(Entity.class, superSupplierClass); // 引用静态带参方法,能够获取到正确的参数类型 final Class<MyTeacher> staticFunctionClass = LambdaUtil.getRealClass(MyTeacher::takeAgeBy); assertEquals(MyTeacher.class, staticFunctionClass); // 引用父类静态带参方法,只能获取到父类类型 final Class<Entity<?>> staticSuperFunctionClass = LambdaUtil.getRealClass(MyTeacher::takeId); assertEquals(Entity.class, staticSuperFunctionClass); // 引用静态无参方法,能够获取到正确的类型 final Class<MyTeacher> staticSupplierClass = LambdaUtil.getRealClass(MyTeacher::takeAge); assertEquals(MyTeacher.class, staticSupplierClass); // 引用父类静态无参方法,能够获取到正确的参数类型 final Class<MyTeacher> staticSuperSupplierClass = LambdaUtil.getRealClass(MyTeacher::takeIdBy); assertEquals(MyTeacher.class, staticSuperSupplierClass); }
Object getFromForeach(String foreachStepId, String stepId, String paramName) { try { return executor .submit(() -> fromForeach(foreachStepId, stepId, paramName)) .get(TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS); } catch (Exception e) { throw new MaestroInternalError( e, "getFromForeach throws an exception for foreachStepId=[%s], stepId=[%s], paramName=[%s]", foreachStepId, stepId, paramName); } }
@Test public void testGetFromForeach() throws Exception { StepRuntimeSummary summary = loadObject(TEST_STEP_RUNTIME_SUMMARY, StepRuntimeSummary.class); when(allStepOutputData.get("foreach-job")) .thenReturn(Collections.singletonMap("maestro_step_runtime_summary", summary)); when(stepInstanceDao.getForeachParamType(any(), any(), any())).thenReturn(ParamType.LONG); when(stepInstanceDao.getEvaluatedResultsFromForeach(any(), any(), any())) .thenReturn(Collections.singletonMap(1L, "12")); long[] res = (long[]) paramExtension.getFromForeach("foreach-job", "job1", "sleep_seconds"); assertArrayEquals(new long[] {12, 0, 0, 0, 0, 0}, res); }
AwsCredentials credentials() { if (!StringUtil.isNullOrEmptyAfterTrim(awsConfig.getAccessKey())) { return AwsCredentials.builder() .setAccessKey(awsConfig.getAccessKey()) .setSecretKey(awsConfig.getSecretKey()) .build(); } if (!StringUtil.isNullOrEmptyAfterTrim(ec2IamRole)) { return fetchCredentialsFromEc2(); } if (environment.isRunningOnEcs()) { return fetchCredentialsFromEcs(); } throw new NoCredentialsException(); }
@Test public void credentialsDefaultEc2IamRole() { // given String iamRole = "sample-iam-role"; AwsConfig awsConfig = AwsConfig.builder().build(); given(awsMetadataApi.defaultIamRoleEc2()).willReturn(iamRole); given(awsMetadataApi.credentialsEc2(iamRole)).willReturn(CREDENTIALS); given(environment.isRunningOnEcs()).willReturn(false); AwsCredentialsProvider credentialsProvider = new AwsCredentialsProvider(awsConfig, awsMetadataApi, environment); // when AwsCredentials credentials = credentialsProvider.credentials(); // then assertEquals(CREDENTIALS, credentials); }
public void clear() { header.clear(); originalResponseHeader.clear(); }
@Test void testClear() { Header header = Header.newInstance(); header.addOriginalResponseHeader("test", Collections.singletonList("test")); assertEquals(3, header.getHeader().size()); assertEquals(1, header.getOriginalResponseHeader().size()); header.clear(); assertEquals(0, header.getHeader().size()); assertEquals(0, header.getOriginalResponseHeader().size()); assertEquals("Header{headerToMap={}}", header.toString()); }
public final void tag(I input, ScopedSpan span) { if (input == null) throw new NullPointerException("input == null"); if (span == null) throw new NullPointerException("span == null"); if (span.isNoop()) return; tag(span, input, span.context()); }
@Test void tag_scopedSpan() { when(parseValue.apply(input, context)).thenReturn("value"); tag.tag(input, scopedSpan); verify(scopedSpan).isNoop(); verify(scopedSpan).context(); verify(parseValue).apply(input, context); verifyNoMoreInteractions(parseValue); // doesn't parse twice verify(scopedSpan).tag("key", "value"); verifyNoMoreInteractions(scopedSpan); // doesn't tag twice }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void functionDefinition() { String inputExpression = "{ is minor : function( person's age ) person's age < 21 }"; BaseNode ctxbase = parse( inputExpression ); assertThat( ctxbase).isInstanceOf(ContextNode.class); assertThat( ctxbase.getText()).isEqualTo(inputExpression); ContextNode ctx = (ContextNode) ctxbase; assertThat( ctx.getEntries()).hasSize(1); ContextEntryNode entry = ctx.getEntries().get( 0 ); assertThat( entry.getName()).isInstanceOf(NameDefNode.class); NameDefNode name = (NameDefNode) entry.getName(); assertThat( name.getText()).isEqualTo("is minor"); assertThat( entry.getValue()).isInstanceOf(FunctionDefNode.class); assertThat( entry.getValue().getText()).isEqualTo("function( person's age ) person's age < 21"); FunctionDefNode isMinorFunc = (FunctionDefNode) entry.getValue(); assertThat( isMinorFunc.getFormalParameters()).hasSize(1); assertThat( isMinorFunc.getFormalParameters().get( 0 ).getText()).isEqualTo( "person's age"); assertThat( isMinorFunc.isExternal()).isEqualTo(false); assertThat( isMinorFunc.getBody()).isInstanceOf(InfixOpNode.class); }
@Override protected void set(String key, String value) { props.put( requireNonNull(key, "key can't be null"), requireNonNull(value, "value can't be null").trim()); }
@Test public void set_accepts_empty_value_and_trims_it() { MapSettings underTest = new MapSettings(); Random random = new Random(); String key = randomAlphanumeric(3); underTest.set(key, blank(random)); assertThat(underTest.getString(key)).isEmpty(); }
@Override public int compareTo(Path other) { if (!(other instanceof GcsPath)) { throw new ClassCastException(); } GcsPath path = (GcsPath) other; int b = bucket.compareTo(path.bucket); if (b != 0) { return b; } // Compare a component at a time, so that the separator char doesn't // get compared against component contents. Eg, "a/b" < "a-1/b". Iterator<Path> left = iterator(); Iterator<Path> right = path.iterator(); while (left.hasNext() && right.hasNext()) { String leftStr = left.next().toString(); String rightStr = right.next().toString(); int c = leftStr.compareTo(rightStr); if (c != 0) { return c; } } if (!left.hasNext() && !right.hasNext()) { return 0; } else { return left.hasNext() ? 1 : -1; } }
@Test public void testCompareTo() { GcsPath a = GcsPath.fromComponents("bucket", "a"); GcsPath b = GcsPath.fromComponents("bucket", "b"); GcsPath b2 = GcsPath.fromComponents("bucket2", "b"); GcsPath brel = GcsPath.fromComponents(null, "b"); GcsPath a2 = GcsPath.fromComponents("bucket", "a"); GcsPath arel = GcsPath.fromComponents(null, "a"); assertThat(a.compareTo(b), Matchers.lessThan(0)); assertThat(b.compareTo(a), Matchers.greaterThan(0)); assertThat(a.compareTo(a2), Matchers.equalTo(0)); assertThat(a.hashCode(), Matchers.equalTo(a2.hashCode())); assertThat(a.hashCode(), Matchers.not(Matchers.equalTo(b.hashCode()))); assertThat(b.hashCode(), Matchers.not(Matchers.equalTo(brel.hashCode()))); assertThat(brel.compareTo(b), Matchers.lessThan(0)); assertThat(b.compareTo(brel), Matchers.greaterThan(0)); assertThat(arel.compareTo(brel), Matchers.lessThan(0)); assertThat(brel.compareTo(arel), Matchers.greaterThan(0)); assertThat(b.compareTo(b2), Matchers.lessThan(0)); assertThat(b2.compareTo(b), Matchers.greaterThan(0)); }
public Search addStreamsToQueriesWithoutStreams(Supplier<Set<String>> defaultStreamsSupplier) { if (!hasQueriesWithoutStreams()) { return this; } final Set<Query> withStreams = queries().stream().filter(Query::hasStreams).collect(toSet()); final Set<Query> withoutStreams = Sets.difference(queries(), withStreams); final Set<String> defaultStreams = defaultStreamsSupplier.get(); if (defaultStreams.isEmpty()) { throw new MissingStreamPermissionException("User doesn't have access to any streams", Collections.emptySet()); } final Set<Query> withDefaultStreams = withoutStreams.stream() .map(q -> q.addStreamsToFilter(defaultStreams)) .collect(toSet()); final ImmutableSet<Query> newQueries = Sets.union(withStreams, withDefaultStreams).immutableCopy(); return toBuilder().queries(newQueries).build(); }
@Test void doesNothingIfAllQueriesHaveDefinedStreams() { Search before = searchWithQueriesWithStreams("a,b,c", "a,d,f"); Search after = before.addStreamsToQueriesWithoutStreams(() -> ImmutableSet.of("one", "two", "three")); assertThat(before).isEqualTo(after); }
public static String wrapWithMarkdownClassDiv(String html) { return new StringBuilder() .append("<div class=\"markdown-body\">\n") .append(html) .append("\n</div>") .toString(); }
@Test void testUnorderedList() { String input = new StringBuilder() .append("* Unordered list can use asterisks\n") .append("- Or minuses\n") .append("+ Or pluses") .toString(); String expected = new StringBuilder() .append("<ul>\n") .append("<li>Unordered list can use asterisks</li>\n") .append("</ul>\n") .append("<ul>\n") .append("<li>Or minuses</li>\n") .append("</ul>\n") .append("<ul>\n") .append("<li>Or pluses</li>\n") .append("</ul>\n") .toString(); InterpreterResult result = md.interpret(input, null); assertEquals(wrapWithMarkdownClassDiv(expected), result.message().get(0).getData()); }
@Override public void importData(JsonReader reader) throws IOException { logger.info("Reading configuration for 1.0"); // this *HAS* to start as an object reader.beginObject(); while (reader.hasNext()) { JsonToken tok = reader.peek(); switch (tok) { case NAME: String name = reader.nextName(); // find out which member it is if (name.equals(CLIENTS)) { readClients(reader); } else if (name.equals(GRANTS)) { readGrants(reader); } else if (name.equals(WHITELISTEDSITES)) { readWhitelistedSites(reader); } else if (name.equals(BLACKLISTEDSITES)) { readBlacklistedSites(reader); } else if (name.equals(AUTHENTICATIONHOLDERS)) { readAuthenticationHolders(reader); } else if (name.equals(ACCESSTOKENS)) { readAccessTokens(reader); } else if (name.equals(REFRESHTOKENS)) { readRefreshTokens(reader); } else if (name.equals(SYSTEMSCOPES)) { readSystemScopes(reader); } else { for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { if (extension.supportsVersion(THIS_VERSION)) { extension.importExtensionData(name, reader); break; } } } // unknown token, skip it reader.skipValue(); } break; case END_OBJECT: // the object ended, we're done here reader.endObject(); continue; default: logger.debug("Found unexpected entry"); reader.skipValue(); continue; } } fixObjectReferences(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.fixExtensionObjectReferences(maps); break; } } maps.clearAll(); }
@Test public void testImportRefreshTokens() throws IOException, ParseException { Date expirationDate1 = formatter.parse("2014-09-10T22:49:44.090+00:00", Locale.ENGLISH); ClientDetailsEntity mockedClient1 = mock(ClientDetailsEntity.class); when(mockedClient1.getClientId()).thenReturn("mocked_client_1"); AuthenticationHolderEntity mockedAuthHolder1 = mock(AuthenticationHolderEntity.class); when(mockedAuthHolder1.getId()).thenReturn(1L); OAuth2RefreshTokenEntity token1 = new OAuth2RefreshTokenEntity(); token1.setId(1L); token1.setClient(mockedClient1); token1.setExpiration(expirationDate1); token1.setJwt(JWTParser.parse("eyJhbGciOiJub25lIn0.eyJqdGkiOiJmOTg4OWQyOS0xMTk1LTQ4ODEtODgwZC1lZjVlYzAwY2Y4NDIifQ.")); token1.setAuthenticationHolder(mockedAuthHolder1); Date expirationDate2 = formatter.parse("2015-01-07T18:31:50.079+00:00", Locale.ENGLISH); ClientDetailsEntity mockedClient2 = mock(ClientDetailsEntity.class); when(mockedClient2.getClientId()).thenReturn("mocked_client_2"); AuthenticationHolderEntity mockedAuthHolder2 = mock(AuthenticationHolderEntity.class); when(mockedAuthHolder2.getId()).thenReturn(2L); OAuth2RefreshTokenEntity token2 = new OAuth2RefreshTokenEntity(); token2.setId(2L); token2.setClient(mockedClient2); token2.setExpiration(expirationDate2); token2.setJwt(JWTParser.parse("eyJhbGciOiJub25lIn0.eyJqdGkiOiJlYmEyYjc3My0xNjAzLTRmNDAtOWQ3MS1hMGIxZDg1OWE2MDAifQ.")); token2.setAuthenticationHolder(mockedAuthHolder2); String configJson = "{" + "\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " + "\"" + MITREidDataService.ACCESSTOKENS + "\": [], " + "\"" + MITREidDataService.CLIENTS + "\": [], " + "\"" + MITREidDataService.GRANTS + "\": [], " + "\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " + "\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " + "\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " + "\"" + MITREidDataService.REFRESHTOKENS + "\": [" + "{\"id\":1,\"clientId\":\"mocked_client_1\",\"expiration\":\"2014-09-10T22:49:44.090+00:00\"," + "\"authenticationHolderId\":1,\"value\":\"eyJhbGciOiJub25lIn0.eyJqdGkiOiJmOTg4OWQyOS0xMTk1LTQ4ODEtODgwZC1lZjVlYzAwY2Y4NDIifQ.\"}," + "{\"id\":2,\"clientId\":\"mocked_client_2\",\"expiration\":\"2015-01-07T18:31:50.079+00:00\"," + "\"authenticationHolderId\":2,\"value\":\"eyJhbGciOiJub25lIn0.eyJqdGkiOiJlYmEyYjc3My0xNjAzLTRmNDAtOWQ3MS1hMGIxZDg1OWE2MDAifQ.\"}" + " ]" + "}"; System.err.println(configJson); JsonReader reader = new JsonReader(new StringReader(configJson)); final Map<Long, OAuth2RefreshTokenEntity> fakeDb = new HashMap<>(); when(tokenRepository.saveRefreshToken(isA(OAuth2RefreshTokenEntity.class))).thenAnswer(new Answer<OAuth2RefreshTokenEntity>() { Long id = 343L; @Override public OAuth2RefreshTokenEntity answer(InvocationOnMock invocation) throws Throwable { OAuth2RefreshTokenEntity _token = (OAuth2RefreshTokenEntity) invocation.getArguments()[0]; if(_token.getId() == null) { _token.setId(id++); } fakeDb.put(_token.getId(), _token); return _token; } }); when(tokenRepository.getRefreshTokenById(anyLong())).thenAnswer(new Answer<OAuth2RefreshTokenEntity>() { @Override public OAuth2RefreshTokenEntity answer(InvocationOnMock invocation) throws Throwable { Long _id = (Long) invocation.getArguments()[0]; return fakeDb.get(_id); } }); when(clientRepository.getClientByClientId(anyString())).thenAnswer(new Answer<ClientDetailsEntity>() { @Override public ClientDetailsEntity answer(InvocationOnMock invocation) throws Throwable { String _clientId = (String) invocation.getArguments()[0]; ClientDetailsEntity _client = mock(ClientDetailsEntity.class); when(_client.getClientId()).thenReturn(_clientId); return _client; } }); when(authHolderRepository.getById(isNull(Long.class))).thenAnswer(new Answer<AuthenticationHolderEntity>() { Long id = 678L; @Override public AuthenticationHolderEntity answer(InvocationOnMock invocation) throws Throwable { AuthenticationHolderEntity _auth = mock(AuthenticationHolderEntity.class); when(_auth.getId()).thenReturn(id); id++; return _auth; } }); dataService.importData(reader); //2 times for token, 2 times to update client, 2 times to update authHolder verify(tokenRepository, times(6)).saveRefreshToken(capturedRefreshTokens.capture()); List<OAuth2RefreshTokenEntity> savedRefreshTokens = new ArrayList(fakeDb.values()); //capturedRefreshTokens.getAllValues(); Collections.sort(savedRefreshTokens, new refreshTokenIdComparator()); assertThat(savedRefreshTokens.size(), is(2)); assertThat(savedRefreshTokens.get(0).getClient().getClientId(), equalTo(token1.getClient().getClientId())); assertThat(savedRefreshTokens.get(0).getExpiration(), equalTo(token1.getExpiration())); assertThat(savedRefreshTokens.get(0).getValue(), equalTo(token1.getValue())); assertThat(savedRefreshTokens.get(1).getClient().getClientId(), equalTo(token2.getClient().getClientId())); assertThat(savedRefreshTokens.get(1).getExpiration(), equalTo(token2.getExpiration())); assertThat(savedRefreshTokens.get(1).getValue(), equalTo(token2.getValue())); }
@Override public void write(T record) { recordConsumer.startMessage(); try { messageWriter.writeTopLevelMessage(record); } catch (RuntimeException e) { Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record; LOG.error("Cannot write message {}: {}", e.getMessage(), m); throw e; } recordConsumer.endMessage(); }
@Test public void testRepeatedInnerMessageMessage_scalar() throws Exception { RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class); ProtoWriteSupport<TestProtobuf.TopMessage> instance = createReadConsumerInstance(TestProtobuf.TopMessage.class, readConsumerMock); TestProtobuf.TopMessage.Builder msg = TestProtobuf.TopMessage.newBuilder(); msg.addInnerBuilder().setOne("one"); msg.addInnerBuilder().setTwo("two"); instance.write(msg.build()); InOrder inOrder = Mockito.inOrder(readConsumerMock); inOrder.verify(readConsumerMock).startMessage(); inOrder.verify(readConsumerMock).startField("inner", 0); // first inner message inOrder.verify(readConsumerMock).startGroup(); inOrder.verify(readConsumerMock).startField("one", 0); inOrder.verify(readConsumerMock).addBinary(Binary.fromConstantByteArray("one".getBytes())); inOrder.verify(readConsumerMock).endField("one", 0); inOrder.verify(readConsumerMock).endGroup(); // second inner message inOrder.verify(readConsumerMock).startGroup(); inOrder.verify(readConsumerMock).startField("two", 1); inOrder.verify(readConsumerMock).addBinary(Binary.fromConstantByteArray("two".getBytes())); inOrder.verify(readConsumerMock).endField("two", 1); inOrder.verify(readConsumerMock).endGroup(); inOrder.verify(readConsumerMock).endField("inner", 0); inOrder.verify(readConsumerMock).endMessage(); Mockito.verifyNoMoreInteractions(readConsumerMock); }
public static Object[] getArguments(Invocation invocation) { if (($INVOKE.equals(invocation.getMethodName()) || $INVOKE_ASYNC.equals(invocation.getMethodName())) && invocation.getArguments() != null && invocation.getArguments().length > 2 && invocation.getArguments()[2] instanceof Object[]) { return (Object[]) invocation.getArguments()[2]; } return invocation.getArguments(); }
@Test void testGet_$invokeAsync_Arguments() { Object[] args = new Object[] {"hello", "dubbo", 520}; Class<?> demoServiceClass = DemoService.class; String serviceName = demoServiceClass.getName(); Invoker invoker = createMockInvoker(); RpcInvocation inv = new RpcInvocation( "$invokeAsync", serviceName, "", new Class<?>[] {String.class, String[].class, Object[].class}, new Object[] {"method", new String[] {}, args}, null, invoker, null); Object[] arguments = RpcUtils.getArguments(inv); for (int i = 0; i < args.length; i++) { Assertions.assertNotNull(arguments[i]); Assertions.assertEquals( args[i].getClass().getName(), arguments[i].getClass().getName()); Assertions.assertEquals(args[i], arguments[i]); } }
public static boolean hasUtmByMetaData(Context context) { if (context == null) { return false; } for (Map.Entry<String, String> entry : UTM_MAP.entrySet()) { if (entry != null) { String utmValue = getApplicationMetaData(context, entry.getKey()); if (!TextUtils.isEmpty(utmValue)) { return true; } } } return false; }
@Test public void hasUtmByMetaData() { Assert.assertFalse(ChannelUtils.hasUtmByMetaData(mApplication)); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test void testAggregationWithOrderBy() { analyze("SELECT array_agg(DISTINCT x ORDER BY x) FROM (VALUES (1, 2), (3, 4)) t(x, y)"); analyze("SELECT array_agg(x ORDER BY y) FROM (VALUES (1, 2), (3, 4)) t(x, y)"); assertFails(ORDER_BY_MUST_BE_IN_AGGREGATE, "SELECT array_agg(DISTINCT x ORDER BY y) FROM (VALUES (1, 2), (3, 4)) t(x, y)"); assertFails(MUST_BE_AGGREGATION_FUNCTION, "SELECT abs(x ORDER BY y) FROM (VALUES (1, 2), (3, 4)) t(x, y)"); assertFails(TYPE_MISMATCH, "SELECT array_agg(x ORDER BY x) FROM (VALUES MAP(ARRAY['a'], ARRAY['b'])) t(x)"); assertFails(MISSING_ATTRIBUTE, "SELECT 1 as a, array_agg(x ORDER BY a) FROM (VALUES (1), (2), (3)) t(x)"); assertFails(REFERENCE_TO_OUTPUT_ATTRIBUTE_WITHIN_ORDER_BY_AGGREGATION, "SELECT 1 AS c FROM (VALUES (1), (2)) t(x) ORDER BY sum(x order by c)"); }
@SuppressWarnings("unchecked") public <T extends Expression> T rewrite(final T expression, final C context) { return (T) rewriter.process(expression, context); }
@Test public void shouldRewriteQualifiedColumnReference() { // Given: final QualifiedColumnReferenceExp expression = new QualifiedColumnReferenceExp( SourceName.of("bar"), ColumnName.of("foo") ); // When: final Expression rewritten = expressionRewriter.rewrite(expression, context); // Then: assertThat(rewritten, is(expression)); }
public List<R> scanForResourcesUri(URI classpathResourceUri) { requireNonNull(classpathResourceUri, "classpathResourceUri must not be null"); if (CLASSPATH_SCHEME.equals(classpathResourceUri.getScheme())) { return scanForClasspathResource(resourceName(classpathResourceUri), NULL_FILTER); } return findResourcesForUri(classpathResourceUri, DEFAULT_PACKAGE_NAME, NULL_FILTER, createUriResource()); }
@Test void scanForResourcesClasspathUri() { URI uri = URI.create("classpath:io/cucumber/core/resource/test/resource.txt"); List<URI> resources = resourceScanner.scanForResourcesUri(uri); assertThat(resources, contains(uri)); }
@Override public int read(ByteBuffer dst) throws IOException { checkNotNull(dst); checkOpen(); checkReadable(); int read = 0; // will definitely either be assigned or an exception will be thrown synchronized (this) { boolean completed = false; try { if (!beginBlocking()) { return 0; // AsynchronousCloseException will be thrown } file.readLock().lockInterruptibly(); try { read = file.read(position, dst); if (read != -1) { position += read; } file.setLastAccessTime(fileSystemState.now()); completed = true; } finally { file.readLock().unlock(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); } finally { endBlocking(completed); } } return read; }
@Test public void testRead() throws IOException { RegularFile file = regularFile(20); FileChannel channel = channel(file, READ); assertEquals(0, channel.position()); ByteBuffer buf = buffer("1234567890"); ByteBuffer buf2 = buffer("123457890"); assertEquals(10, channel.read(buf)); assertEquals(10, channel.position()); buf.flip(); assertEquals(10, channel.read(new ByteBuffer[] {buf, buf2})); assertEquals(20, channel.position()); buf.flip(); buf2.flip(); file.write(20, new byte[10], 0, 10); assertEquals(10, channel.read(new ByteBuffer[] {buf, buf2}, 0, 2)); assertEquals(30, channel.position()); buf.flip(); assertEquals(10, channel.read(buf, 5)); assertEquals(30, channel.position()); buf.flip(); assertEquals(-1, channel.read(buf)); assertEquals(30, channel.position()); }
@Override public List<Decorator> findForGlobal() { return toInterfaceList(coll.find(DBQuery.or( DBQuery.notExists(DecoratorImpl.FIELD_STREAM), DBQuery.is(DecoratorImpl.FIELD_STREAM, Optional.empty()) )).toArray()); }
@Test @MongoDBFixtures("DecoratorServiceImplTest.json") public void findForGlobalReturnsDecoratorForGlobalStream() { assertThat(decoratorService.findForGlobal()) .hasSize(1) .extracting(Decorator::id) .containsOnly("588bcafebabedeadbeef0003"); }
@Udf public <T> List<T> mapValues(final Map<String, T> input) { if (input == null) { return null; } return Lists.newArrayList(input.values()); }
@Test public void shouldReturnNullsFromMapWithNulls() { final Map<String, Integer> input = Maps.newHashMap(); input.put("foo", 1); input.put(null, null); input.put("bar", null); List<Integer> result = udf.mapValues(input); assertThat(result, containsInAnyOrder(1, null, null)); }
static public boolean createMissingParentDirectories(File file) { File parent = file.getParentFile(); if (parent == null) { // Parent directory not specified, therefore it's a request to // create nothing. Done! ;) return true; } // File.mkdirs() creates the parent directories only if they don't // already exist; and it's okay if they do. parent.mkdirs(); return parent.exists(); }
@Test public void createParentDirAcceptsNoParentSpecified() { File file = new File("testing.txt"); assertTrue(FileUtil.createMissingParentDirectories(file)); }
Runnable takeTask() { BlockingQueue<Runnable> taskQueue = this.taskQueue; for (;;) { ScheduledFutureTask<?> scheduledTask = peekScheduledTask(); if (scheduledTask == null) { Runnable task = null; try { task = taskQueue.take(); } catch (InterruptedException e) { // Ignore } return task; } else { long delayNanos = scheduledTask.delayNanos(); Runnable task = null; if (delayNanos > 0) { try { task = taskQueue.poll(delayNanos, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { // Waken up. return null; } } if (task == null) { // We need to fetch the scheduled tasks now as otherwise there may be a chance that // scheduled tasks are never executed if there is always one task in the taskQueue. // This is for example true for the read task of OIO Transport // See https://github.com/netty/netty/issues/1614 fetchFromScheduledTaskQueue(); task = taskQueue.poll(); } if (task != null) { return task; } } } }
@Test @Timeout(value = 5000, unit = TimeUnit.MILLISECONDS) public void testTakeTask() throws Exception { //add task TestRunnable beforeTask = new TestRunnable(0); e.execute(beforeTask); //add scheduled task TestRunnable scheduledTask = new TestRunnable(0); ScheduledFuture<?> f = e.schedule(scheduledTask , 1500, TimeUnit.MILLISECONDS); //add task TestRunnable afterTask = new TestRunnable(0); e.execute(afterTask); f.sync(); assertThat(beforeTask.ran.get(), is(true)); assertThat(scheduledTask.ran.get(), is(true)); assertThat(afterTask.ran.get(), is(true)); }
@Override public V put(K key, V value, Duration ttl) { return get(putAsync(key, value, ttl)); }
@Test public void testRemoveValueFail() { RMapCacheNative<SimpleKey, SimpleValue> map = redisson.getMapCacheNative("simple"); map.put(new SimpleKey("1"), new SimpleValue("2")); boolean res = map.remove(new SimpleKey("2"), new SimpleValue("1")); Assertions.assertFalse(res); boolean res1 = map.remove(new SimpleKey("1"), new SimpleValue("3")); Assertions.assertFalse(res1); SimpleValue val1 = map.get(new SimpleKey("1")); Assertions.assertEquals("2", val1.getValue()); map.destroy(); }
@Transactional @Cacheable(CACHE_DATABASE_SEARCH) @CacheEvict(value = CACHE_AVERAGE_REVIEW_RATING, allEntries = true) public SearchHits<ExtensionSearch> search(ISearchService.Options options) { // grab all extensions var matchingExtensions = repositories.findAllActiveExtensions(); // no extensions in the database if (matchingExtensions.isEmpty()) { return new SearchHitsImpl<>(0,TotalHitsRelation.OFF, 0f, null, null, Collections.emptyList(), null, null); } // exlude namespaces if(options.namespacesToExclude != null) { for(var namespaceToExclude : options.namespacesToExclude) { matchingExtensions = matchingExtensions.filter(extension -> !extension.getNamespace().getName().equals(namespaceToExclude)); } } // filter target platform if(TargetPlatform.isValid(options.targetPlatform)) { matchingExtensions = matchingExtensions.filter(extension -> extension.getVersions().stream().anyMatch(ev -> ev.getTargetPlatform().equals(options.targetPlatform))); } // filter category if (options.category != null) { matchingExtensions = matchingExtensions.filter(extension -> { var latest = repositories.findLatestVersion(extension, null, false, true); return latest.getCategories().stream().anyMatch(category -> category.equalsIgnoreCase(options.category)); }); } // filter text if (options.queryString != null) { matchingExtensions = matchingExtensions.filter(extension -> { var latest = repositories.findLatestVersion(extension, null, false, true); return extension.getName().toLowerCase().contains(options.queryString.toLowerCase()) || extension.getNamespace().getName().contains(options.queryString.toLowerCase()) || (latest.getDescription() != null && latest.getDescription() .toLowerCase().contains(options.queryString.toLowerCase())) || (latest.getDisplayName() != null && latest.getDisplayName() .toLowerCase().contains(options.queryString.toLowerCase())); }); } // need to perform the sortBy () // 'relevance' | 'timestamp' | 'rating' | 'downloadCount'; Stream<ExtensionSearch> searchEntries; if("relevance".equals(options.sortBy) || "rating".equals(options.sortBy)) { var searchStats = new SearchStats(repositories); searchEntries = matchingExtensions.stream().map(extension -> relevanceService.toSearchEntry(extension, searchStats)); } else { searchEntries = matchingExtensions.stream().map(extension -> { var latest = repositories.findLatestVersion(extension, null, false, true); var targetPlatforms = repositories.findExtensionTargetPlatforms(extension); return extension.toSearch(latest, targetPlatforms); }); } var comparators = new HashMap<>(Map.of( "relevance", new RelevanceComparator(), "timestamp", new TimestampComparator(), "rating", new RatingComparator(), "downloadCount", new DownloadedCountComparator() )); var comparator = comparators.get(options.sortBy); if(comparator != null) { searchEntries = searchEntries.sorted(comparator); } var sortedExtensions = searchEntries.collect(Collectors.toList()); // need to do sortOrder // 'asc' | 'desc'; if ("desc".equals(options.sortOrder)) { // reverse the order Collections.reverse(sortedExtensions); } // Paging var totalHits = sortedExtensions.size(); var endIndex = Math.min(sortedExtensions.size(), options.requestedOffset + options.requestedSize); var startIndex = Math.min(endIndex, options.requestedOffset); sortedExtensions = sortedExtensions.subList(startIndex, endIndex); List<SearchHit<ExtensionSearch>> searchHits; if (sortedExtensions.isEmpty()) { searchHits = Collections.emptyList(); } else { // client is interested only in the extension IDs searchHits = sortedExtensions.stream().map(extensionSearch -> new SearchHit<>(null, null, null, 0.0f, null, null, null, null, null, null, extensionSearch)).collect(Collectors.toList()); } return new SearchHitsImpl<>(totalHits, TotalHitsRelation.OFF, 0f, null, null, searchHits, null, null); }
@Test public void testReverse() { var ext1 = mockExtension("yaml", 3.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages")); var ext2 = mockExtension("java", 4.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages")); Mockito.when(repositories.findAllActiveExtensions()).thenReturn(Streamable.of(List.of(ext1, ext2))); var searchOptions = new ISearchService.Options(null, "Programming Languages", TargetPlatform.NAME_UNIVERSAL, 50, 0, "desc", null, false); var result = search.search(searchOptions); // should find two extensions assertThat(result.getTotalHits()).isEqualTo(2); var hits = result.getSearchHits(); assertThat(getIdFromExtensionHits(hits, 0)).isEqualTo(getIdFromExtensionName("java")); assertThat(getIdFromExtensionHits(hits, 1)).isEqualTo(getIdFromExtensionName("yaml")); }
@Override @SuppressWarnings({"unchecked", "rawtypes"}) public RouteContext route(final ConnectionContext connectionContext, final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database) { RouteContext result = new RouteContext(); Optional<String> dataSourceName = findDataSourceByHint(queryContext.getHintValueContext(), database.getResourceMetaData().getStorageUnits()); if (dataSourceName.isPresent()) { result.getRouteUnits().add(new RouteUnit(new RouteMapper(dataSourceName.get(), dataSourceName.get()), Collections.emptyList())); return result; } for (Entry<ShardingSphereRule, SQLRouter> entry : routers.entrySet()) { if (result.getRouteUnits().isEmpty() && entry.getValue() instanceof EntranceSQLRouter) { result = ((EntranceSQLRouter) entry.getValue()).createRouteContext(queryContext, globalRuleMetaData, database, entry.getKey(), props, connectionContext); } else if (entry.getValue() instanceof DecorateSQLRouter) { ((DecorateSQLRouter) entry.getValue()).decorateRouteContext(result, queryContext, database, entry.getKey(), props, connectionContext); } } if (result.getRouteUnits().isEmpty() && 1 == database.getResourceMetaData().getStorageUnits().size()) { String singleDataSourceName = database.getResourceMetaData().getStorageUnits().keySet().iterator().next(); result.getRouteUnits().add(new RouteUnit(new RouteMapper(singleDataSourceName, singleDataSourceName), Collections.emptyList())); } return result; }
@Test void assertRouteByHintManagerHint() { try (HintManager hintManager = HintManager.getInstance()) { hintManager.setDataSourceName("ds_1"); QueryContext queryContext = new QueryContext(commonSQLStatementContext, "", Collections.emptyList(), new HintValueContext(), connectionContext, metaData); RouteContext routeContext = partialSQLRouteExecutor.route(connectionContext, queryContext, mock(RuleMetaData.class), database); assertThat(routeContext.getRouteUnits().size(), is(1)); assertThat(routeContext.getRouteUnits().iterator().next().getDataSourceMapper().getActualName(), is("ds_1")); } }
@VisibleForTesting public Account updateLastSeen(Account account, Device device) { // compute a non-negative integer between 0 and 86400. long n = Util.ensureNonNegativeLong(account.getUuid().getLeastSignificantBits()); final long lastSeenOffsetSeconds = n % ChronoUnit.DAYS.getDuration().toSeconds(); // produce a truncated timestamp which is either today at UTC midnight // or yesterday at UTC midnight, based on per-user randomized offset used. final long todayInMillisWithOffset = Util.todayInMillisGivenOffsetFromNow(clock, Duration.ofSeconds(lastSeenOffsetSeconds).negated()); // only update the device's last seen time when it falls behind the truncated timestamp. // this ensures a few things: // (1) each account will only update last-seen at most once per day // (2) these updates will occur throughout the day rather than all occurring at UTC midnight. if (device.getLastSeen() < todayInMillisWithOffset) { Metrics.summary(DAYS_SINCE_LAST_SEEN_DISTRIBUTION_NAME, IS_PRIMARY_DEVICE_TAG, String.valueOf(device.isPrimary())) .record(Duration.ofMillis(todayInMillisWithOffset - device.getLastSeen()).toDays()); return accountsManager.updateDeviceLastSeen(account, device, Util.todayInMillis(clock)); } return account; }
@Test void testUpdateLastSeenStartOfDay() { clock.pin(Instant.ofEpochMilli(today)); final Device device1 = acct1.getDevices().stream().findFirst().get(); final Device device2 = acct2.getDevices().stream().findFirst().get(); final Account updatedAcct1 = accountAuthenticator.updateLastSeen(acct1, device1); final Account updatedAcct2 = accountAuthenticator.updateLastSeen(acct2, device2); verify(accountsManager, never()).updateDeviceLastSeen(eq(acct1), any(), anyLong()); verify(accountsManager, never()).updateDeviceLastSeen(eq(acct2), any(), anyLong()); assertThat(device1.getLastSeen()).isEqualTo(yesterday); assertThat(device2.getLastSeen()).isEqualTo(yesterday); assertThat(acct1).isSameAs(updatedAcct1); assertThat(acct2).isSameAs(updatedAcct2); }
@Override public ArtifactPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) { PluggableInstanceSettings storeConfigSettings = storeConfigMetadata(descriptor.id()); PluggableInstanceSettings publishArtifactConfigSettings = publishArtifactMetadata(descriptor.id()); PluggableInstanceSettings fetchArtifactConfigSettings = fetchArtifactMetadata(descriptor.id()); Image image = image(descriptor.id()); return new ArtifactPluginInfo(descriptor, storeConfigSettings, publishArtifactConfigSettings, fetchArtifactConfigSettings, image, getCapabilities(descriptor.id())); }
@Test public void shouldBuildPluginInfoWithPublishArtifactConfigSettings() { GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build(); List<PluginConfiguration> pluginConfigurations = List.of( new PluginConfiguration("FILENAME", new Metadata(true, false)) ); when(extension.getPublishArtifactMetadata(descriptor.id())).thenReturn(pluginConfigurations); when(extension.getPublishArtifactView(descriptor.id())).thenReturn("artifact_config"); ArtifactPluginInfo pluginInfo = new ArtifactPluginInfoBuilder(extension).pluginInfoFor(descriptor); assertThat(pluginInfo.getArtifactConfigSettings(), is(new PluggableInstanceSettings(pluginConfigurations, new PluginView("artifact_config")))); }