focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
private IcebergDateObjectInspector() { super(TypeInfoFactory.dateTypeInfo); }
@Test public void testIcebergDateObjectInspector() { DateObjectInspector oi = IcebergDateObjectInspector.get(); assertThat(oi.getCategory()).isEqualTo(ObjectInspector.Category.PRIMITIVE); assertThat(oi.getPrimitiveCategory()) .isEqualTo(PrimitiveObjectInspector.PrimitiveCategory.DATE); assertThat(oi.getTypeInfo()).isEqualTo(TypeInfoFactory.dateTypeInfo); assertThat(oi.getTypeName()).isEqualTo(TypeInfoFactory.dateTypeInfo.getTypeName()); assertThat(oi.getJavaPrimitiveClass()).isEqualTo(Date.class); assertThat(oi.getPrimitiveWritableClass()).isEqualTo(DateWritable.class); assertThat(oi.copyObject(null)).isNull(); assertThat(oi.getPrimitiveJavaObject(null)).isNull(); assertThat(oi.getPrimitiveWritableObject(null)).isNull(); LocalDate local = LocalDate.of(2020, 1, 1); Date date = Date.valueOf("2020-01-01"); assertThat(oi.getPrimitiveJavaObject(local)).isEqualTo(date); assertThat(oi.getPrimitiveWritableObject(local)).isEqualTo(new DateWritable(date)); Date copy = (Date) oi.copyObject(date); assertThat(copy).isEqualTo(date); assertThat(copy).isNotSameAs(date); assertThat(oi.preferWritable()).isFalse(); }
static boolean isNewDatabase(String uppercaseProductName) { if (SUPPORTED_DATABASE_NAMES.contains(uppercaseProductName)) { return false; } return DETECTED_DATABASE_NAMES.add(uppercaseProductName); }
@Test public void testPostgreSQL() { String dbName = "POSTGRESQL"; boolean newDB = SupportedDatabases.isNewDatabase(dbName); assertThat(newDB).isFalse(); }
public static <T> double entropy(List<T> vector) { double vectorLength = vector.size(); double entropy = 0.0; Map<T,Long> countDist = calculateCountDist(vector); for (Entry<T,Long> e : countDist.entrySet()) { double prob = e.getValue() / vectorLength; entropy -= prob * Math.log(prob); } entropy /= LOG_BASE; double stateRatio = vectorLength / countDist.size(); if (stateRatio < SAMPLES_RATIO) { logger.log(Level.INFO, "Entropy estimate of {0} had samples/state ratio of {1}", new Object[]{entropy, stateRatio}); } return entropy; }
@Test void testEntropy() { List<Integer> a = Arrays.asList(0, 3, 2, 3, 4, 4, 4, 1, 3, 3, 4, 3, 2, 3, 2, 4, 2, 2, 1, 4, 1, 2, 0, 4, 4, 4, 3, 3, 2, 2, 0, 4, 0, 1, 3, 0, 4, 0, 0, 4, 0, 0, 2, 2, 2, 2, 0, 3, 0, 2, 2, 3, 1, 0, 1, 0, 3, 4, 4, 4, 0, 1, 1, 3, 3, 1, 3, 4, 0, 3, 4, 1, 0, 3, 2, 2, 2, 1, 1, 2, 3, 2, 1, 3, 0, 4, 4, 0, 4, 0, 2, 1, 4, 0, 3, 0, 1, 1, 1, 0); List<Integer> b = Arrays.asList(4, 2, 4, 0, 4, 4, 3, 3, 3, 2, 2, 0, 1, 3, 2, 1, 2, 0, 0, 4, 3, 3, 0, 1, 1, 1, 1, 4, 4, 4, 3, 1, 0, 0, 0, 1, 4, 1, 1, 1, 3, 3, 1, 2, 3, 0, 4, 0, 2, 3, 4, 2, 3, 2, 1, 0, 2, 4, 2, 2, 4, 1, 2, 4, 3, 1, 1, 1, 3, 0, 2, 3, 2, 0, 1, 0, 0, 4, 0, 3, 0, 0, 0, 1, 3, 2, 3, 4, 2, 4, 1, 0, 3, 3, 0, 2, 1, 0, 4, 1); assertEquals(2.3167546539234776, InformationTheory.entropy(a), 1e-13); assertEquals(2.316147658077609, InformationTheory.entropy(b), 1e-13); }
void setMemento(StarMemento memento) { var state = (StarMementoInternal) memento; this.type = state.getType(); this.ageYears = state.getAgeYears(); this.massTons = state.getMassTons(); }
@Test void testSetMemento() { final var star = new Star(StarType.SUN, 1, 2); final var firstMemento = star.getMemento(); assertEquals("sun age: 1 years mass: 2 tons", star.toString()); star.timePasses(); final var secondMemento = star.getMemento(); assertEquals("red giant age: 2 years mass: 16 tons", star.toString()); star.timePasses(); final var thirdMemento = star.getMemento(); assertEquals("white dwarf age: 4 years mass: 128 tons", star.toString()); star.timePasses(); assertEquals("supernova age: 8 years mass: 1024 tons", star.toString()); star.setMemento(thirdMemento); assertEquals("white dwarf age: 4 years mass: 128 tons", star.toString()); star.timePasses(); assertEquals("supernova age: 8 years mass: 1024 tons", star.toString()); star.setMemento(secondMemento); assertEquals("red giant age: 2 years mass: 16 tons", star.toString()); star.setMemento(firstMemento); assertEquals("sun age: 1 years mass: 2 tons", star.toString()); }
@Override public PageResult<NotifyMessageDO> getNotifyMessagePage(NotifyMessagePageReqVO pageReqVO) { return notifyMessageMapper.selectPage(pageReqVO); }
@Test public void testGetNotifyMessagePage() { // mock 数据 NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到 o.setUserId(1L); o.setUserType(UserTypeEnum.ADMIN.getValue()); o.setTemplateCode("test_01"); o.setTemplateType(10); o.setCreateTime(buildTime(2022, 1, 2)); o.setTemplateParams(randomTemplateParams()); }); notifyMessageMapper.insert(dbNotifyMessage); // 测试 userId 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L))); // 测试 userType 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue()))); // 测试 templateCode 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setTemplateCode("test_11"))); // 测试 templateType 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setTemplateType(20))); // 测试 createTime 不匹配 notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setCreateTime(buildTime(2022, 2, 1)))); // 准备参数 NotifyMessagePageReqVO reqVO = new NotifyMessagePageReqVO(); reqVO.setUserId(1L); reqVO.setUserType(UserTypeEnum.ADMIN.getValue()); reqVO.setTemplateCode("est_01"); reqVO.setTemplateType(10); reqVO.setCreateTime(buildBetweenTime(2022, 1, 1, 2022, 1, 10)); // 调用 PageResult<NotifyMessageDO> pageResult = notifyMessageService.getNotifyMessagePage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbNotifyMessage, pageResult.getList().get(0)); }
static String typeOf(final AbstractDescribedSObjectBase object) { final SObjectDescription description = object.description(); return description.getName(); }
@Test public void typeOfShouldBeBasedOnSimpleClassName() { assertEquals("Account", SObjectNode.typeOf(new Account()), "Type of Account should be 'Account'"); assertEquals("Contact", SObjectNode.typeOf(new Contact()), "Type of Contact should be 'Contact'"); }
public static String wrapWithMarkdownClassDiv(String html) { return new StringBuilder() .append("<div class=\"markdown-body\">\n") .append(html) .append("\n</div>") .toString(); }
@Test void testEscapeHtml() { String input = new StringBuilder() .append("This is\n") .append("<script type=\"text/javascript\">alert(1);</script>\n") .append("<div onclick='alert(2)'>this is div</div>\n") .toString(); String expected = new StringBuilder() .append("<p>This is</p>\n") .append("<p>&lt;script type=&quot;text/javascript&quot;&gt;" + "alert(1);&lt;/script&gt;</p>\n") .append("<p>&lt;div &gt;this is div&lt;/div&gt;</p>\n") .toString(); InterpreterResult result = md.interpret(input, null); assertEquals(wrapWithMarkdownClassDiv(expected), result.message().get(0).getData()); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldDeserializeToBytes() { // Given: final KsqlJsonDeserializer<ByteBuffer> deserializer = givenDeserializerForSchema(Schema.OPTIONAL_BYTES_SCHEMA, ByteBuffer.class); final byte[] bytes = serializeJson(ByteBuffer.wrap(new byte[] {123})); // When: final Object result = deserializer.deserialize(SOME_TOPIC, bytes); // Then: assertThat(result, is(ByteBuffer.wrap(new byte[] {123}))); }
@DELETE @Timed @ApiOperation(value = "Delete all revisions of a content pack") @ApiResponses(value = { @ApiResponse(code = 400, message = "Missing or invalid content pack"), @ApiResponse(code = 500, message = "Error while saving content pack") }) @AuditEvent(type = AuditEventTypes.CONTENT_PACK_DELETE) @Path("{contentPackId}") @JsonView(ContentPackView.HttpView.class) public void deleteContentPack( @ApiParam(name = "contentPackId", value = "Content Pack ID", required = true) @PathParam("contentPackId") final ModelId contentPackId) { checkPermission(RestPermissions.CONTENT_PACK_DELETE, contentPackId.toString()); if (!contentPackInstallationPersistenceService.findByContentPackId(contentPackId).isEmpty()) { throw new BadRequestException("Content pack " + contentPackId + " with all its revisions can't be deleted: There are still installations of this content pack"); } final int deleted = contentPackPersistenceService.deleteById(contentPackId); LOG.debug("Deleted {} content packs with id {}", deleted, contentPackId); }
@Test public void notDeleteContentPack() throws Exception { final ModelId id = ModelId.of("1"); when(contentPackInstallations.size()).thenReturn(1); when(contentPackInstallationPersistenceService.findByContentPackId(id)).thenReturn(contentPackInstallations); boolean exceptionCalled = false; try { contentPackResource.deleteContentPack(id); } catch (BadRequestException e) { exceptionCalled = true; } assertThat(exceptionCalled).isEqualTo(true); verify(contentPackInstallationPersistenceService, times(1)).findByContentPackId(id); verify(contentPackPersistenceService, times(0)).deleteById(id); when(contentPackInstallations.size()).thenReturn(1); when(contentPackInstallationPersistenceService.findByContentPackIdAndRevision(id, 1)).thenReturn(contentPackInstallations); exceptionCalled = false; try { contentPackResource.deleteContentPack(id, 1); } catch (BadRequestException e) { exceptionCalled = true; } assertThat(exceptionCalled).isEqualTo(true); verify(contentPackInstallationPersistenceService, times(1)).findByContentPackIdAndRevision(id, 1); verify(contentPackPersistenceService, times(0)).deleteByIdAndRevision(id, 1); }
@Override public boolean add(E e) { return map.putIfAbsent(e, e) == null; }
@Test public void testAddFailure() { ExtendedSet<TestValue> set = new ExtendedSet<>(Maps.newConcurrentMap()); TestValue val = new TestValue("foo", 1); assertTrue(set.add(val)); assertFalse(set.add(val)); }
@Override public long getLastStoredTime() { return record.getLastStoredTime(); }
@Test public void test_getLastStoredTime() { assertEquals(0, view.getLastStoredTime()); }
@Override public Map<String, LocalResource> getLocalResources(Container container) throws IOException { return linuxContainerRuntime.getLocalResources(container); }
@Test public void testGetLocalResources() throws Exception { Container container = mock(Container.class); LinuxContainerExecutor lce = mock(LinuxContainerExecutor.class); lce.getLocalResources(container); verify(lce, times(1)).getLocalResources(container); }
public long getResetInterval() { return resetInterval; }
@Test public void testGetResetInterval() { assertEquals(TestParameters.VP_RESET_INTERVAL, chmLzxcControlData.getResetInterval()); }
@Override public ParamCheckResponse checkParamInfoList(List<ParamInfo> paramInfos) { ParamCheckResponse paramCheckResponse = new ParamCheckResponse(); if (paramInfos == null) { paramCheckResponse.setSuccess(true); return paramCheckResponse; } for (ParamInfo paramInfo : paramInfos) { paramCheckResponse = checkParamInfoFormat(paramInfo); if (!paramCheckResponse.isSuccess()) { return paramCheckResponse; } } paramCheckResponse.setSuccess(true); return paramCheckResponse; }
@Test void testCheckParamInfoForGroup() { ParamInfo paramInfo = new ParamInfo(); ArrayList<ParamInfo> paramInfos = new ArrayList<>(); paramInfos.add(paramInfo); // Max Length String group = buildStringLength(129); paramInfo.setGroup(group); ParamCheckResponse actual = paramChecker.checkParamInfoList(paramInfos); assertFalse(actual.isSuccess()); assertEquals("Param 'group' is illegal, the param length should not exceed 128.", actual.getMessage()); // Pattern paramInfo.setGroup("@hsbfkj$@@!#khdkad啊@@"); actual = paramChecker.checkParamInfoList(paramInfos); assertFalse(actual.isSuccess()); assertEquals("Param 'group' is illegal, illegal characters should not appear in the param.", actual.getMessage()); // Success paramInfo.setGroup("a-zA-Z0-9-_:."); actual = paramChecker.checkParamInfoList(paramInfos); assertTrue(actual.isSuccess()); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testAbsoluteOffsetAssignmentCompressed() { Compression compression = Compression.gzip().build(); MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V0, RecordBatch.NO_TIMESTAMP, compression); long offset = 1234567; checkOffsets(records, 0); checkOffsets( new LogValidator( records, new TopicPartition("topic", 0), time, CompressionType.GZIP, compression, false, RecordBatch.MAGIC_VALUE_V0, TimestampType.CREATE_TIME, 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset ); }
int calculatePrice(Integer basePrice, Integer percent, Integer fixedPrice) { // 1. 优先使用固定佣金 if (fixedPrice != null && fixedPrice > 0) { return ObjectUtil.defaultIfNull(fixedPrice, 0); } // 2. 根据比例计算佣金 if (basePrice != null && basePrice > 0 && percent != null && percent > 0) { return MoneyUtils.calculateRatePriceFloor(basePrice, Double.valueOf(percent)); } return 0; }
@Test public void testCalculatePrice_usePercent() { // mock 数据 Integer payPrice = randomInteger(); Integer percent = randomInt(1, 101); Integer fixedPrice = randomEle(new Integer[]{0, null}); System.out.println("fixedPrice=" + fixedPrice); // 调用 int brokerage = brokerageRecordService.calculatePrice(payPrice, percent, fixedPrice); // 断言 assertEquals(brokerage, NumberUtil.div(NumberUtil.mul(payPrice, percent), 100, 0, RoundingMode.DOWN).intValue()); }
@Override public void close() { if (connection != null) { try { connection.disconnect(); } finally { connection = null; } } }
@Test public void testClose() { HttpResourceConnection instance = new HttpResourceConnection(getSettings()); instance.close(); assertTrue(instance.isClosed()); }
@Override public void characters(char[] ch, int start, int length) throws SAXException { advance(length); super.characters(ch, start, length); }
@Test public void testZeroCharactersPerByte() throws IOException { try { char[] ch = new char[]{'x'}; for (int i = 0; i < MANY_BYTES; i++) { stream.read(); } handler.characters(ch, 0, 1); } catch (SAXException e) { fail("Unexpected SAXException"); } }
public static Format of(final FormatInfo formatInfo) { final Format format = fromName(formatInfo.getFormat().toUpperCase()); format.validateProperties(formatInfo.getProperties()); return format; }
@Test public void shouldThrowOnNonAvroWithAvroSchemaName() { // Given: final FormatInfo format = FormatInfo.of("JSON", ImmutableMap.of(ConnectProperties.FULL_SCHEMA_NAME, "foo")); // When: final Exception e = assertThrows( KsqlException.class, () -> FormatFactory.of(format) ); // Then: assertThat(e.getMessage(), containsString("JSON does not support the following configs: [fullSchemaName]")); }
public static RawPrivateTransaction decode(final String hexTransaction) { final byte[] transaction = Numeric.hexStringToByteArray(hexTransaction); final TransactionType transactionType = getPrivateTransactionType(transaction); if (transactionType == TransactionType.EIP1559) { return decodePrivateTransaction1559(transaction); } return decodeLegacyPrivateTransaction(transaction); }
@Test public void testDecodingPrivacyGroup() { final BigInteger nonce = BigInteger.ZERO; final BigInteger gasPrice = BigInteger.ONE; final BigInteger gasLimit = BigInteger.TEN; final String to = "0x0add5355"; final RawPrivateTransaction rawTransaction = RawPrivateTransaction.createTransaction( nonce, gasPrice, gasLimit, to, "", MOCK_ENCLAVE_KEY, MOCK_ENCLAVE_KEY, RESTRICTED); byte[] encodedMessage = PrivateTransactionEncoder.encode(rawTransaction); final String hexMessage = Numeric.toHexString(encodedMessage); final RawPrivateTransaction result = PrivateTransactionDecoder.decode(hexMessage); assertNotNull(result); assertEquals(nonce, result.getNonce()); assertEquals(gasPrice, result.getGasPrice()); assertEquals(gasLimit, result.getGasLimit()); assertEquals(to, result.getTo()); assertEquals("", result.getData()); assertEquals(MOCK_ENCLAVE_KEY, result.getPrivateFrom()); assertEquals(MOCK_ENCLAVE_KEY, result.getPrivacyGroupId().get()); assertEquals(RESTRICTED, result.getRestriction()); }
public int encode(int size) { return getSizeGroup(size); }
@Test public void testEncode() { int sizeBits = PREFIX_BITS + SUFFIX_BITS; for (int i = 0; i < sizeBits; i++) { int size = 1 << i; int encodedSize = mSizeEncoder.encode(size); assertEquals(encodedSize, (size >> SUFFIX_BITS)); } }
@Override public AssertionResult getResult(SampleResult response) { // no error as default AssertionResult result = new AssertionResult(getName()); String resultData = response.getResponseDataAsString(); if (resultData.length() == 0) { return result.setResultForNull(); } result.setFailure(false); XMLReader builder = XML_READER.get(); if(builder != null) { try { builder.setErrorHandler(new LogErrorHandler()); builder.parse(new InputSource(new StringReader(resultData))); } catch (SAXException | IOException e) { result.setError(true); result.setFailure(true); result.setFailureMessage(e.getMessage()); } } else { result.setError(true); result.setFailureMessage("Cannot initialize XMLReader in element:"+getName()+", check jmeter.log file"); } return result; }
@Test public void testNoXML() throws Exception { sampleResult.setResponseData(NO_XML, null); result = assertion.getResult(sampleResult); assertTrue(result.isFailure()); assertTrue(result.isError()); assertNotNull(result.getFailureMessage()); assertTrue(result.getFailureMessage().contains("Content is not allowed in prolog")); }
@Override public boolean onGestureTypingInputDone() { return false; }
@Test public void testOnGestureTypingInputDone() { mUnderTest.onGestureTypingInputDone(); Mockito.verifyZeroInteractions(mMockParentListener, mMockKeyboardDismissAction); }
@Override public void invoke(IN value, Context context) throws Exception { bufferLock.lock(); try { // TODO this implementation is not very effective, // optimize this with MemorySegment if needed ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputViewStreamWrapper wrapper = new DataOutputViewStreamWrapper(baos); serializer.serialize(value, wrapper); invokingRecordBytes = baos.size(); if (invokingRecordBytes > maxBytesPerBatch) { throw new RuntimeException( "Record size is too large for CollectSinkFunction. Record size is " + invokingRecordBytes + " bytes, " + "but max bytes per batch is only " + maxBytesPerBatch + " bytes. " + "Please consider increasing max bytes per batch value by setting " + CollectSinkOperatorFactory.MAX_BATCH_SIZE.key()); } if (currentBufferBytes + invokingRecordBytes > bufferSizeLimitBytes) { bufferCanAddNextResultCondition.await(); } buffer.add(baos.toByteArray()); currentBufferBytes += baos.size(); } finally { bufferLock.unlock(); } }
@Test void testRestart() throws Exception { functionWrapper.openFunctionWithState(); for (int i = 0; i < 3; i++) { functionWrapper.invoke(i); } String version = initializeVersion(); functionWrapper.sendRequestAndGetResponse(version, 1); functionWrapper.checkpointFunction(1); functionWrapper.checkpointComplete(1); CollectCoordinationResponse response = functionWrapper.sendRequestAndGetResponse(version, 1); assertResponseEquals(response, version, 1, Arrays.asList(1, 2)); // these records are not checkpointed for (int i = 3; i < 6; i++) { functionWrapper.invoke(i); } response = functionWrapper.sendRequestAndGetResponse(version, 2); assertResponseEquals(response, version, 1, Arrays.asList(2, 3, 4)); functionWrapper.closeFunctionAbnormally(); functionWrapper.openFunctionWithState(); version = initializeVersion(); response = functionWrapper.sendRequestAndGetResponse(version, 1); assertResponseEquals(response, version, 1, Arrays.asList(1, 2)); for (int i = 6; i < 9; i++) { functionWrapper.invoke(i); } response = functionWrapper.sendRequestAndGetResponse(version, 2); assertResponseEquals(response, version, 1, Arrays.asList(2, 6, 7)); functionWrapper.closeFunctionNormally(); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } SimpleSubscriptionData that = (SimpleSubscriptionData) o; return Objects.equals(topic, that.topic) && Objects.equals(expressionType, that.expressionType) && Objects.equals(expression, that.expression); }
@Test public void testSetEqual() { String topic = "test-topic"; String expressionType = "TAG"; String expression1 = "test-expression-1"; String expression2 = "test-expression-1"; Set<SimpleSubscriptionData> set1 = Sets.newHashSet(new SimpleSubscriptionData(topic, expressionType, expression1, 1)); Set<SimpleSubscriptionData> set2 = Sets.newHashSet(new SimpleSubscriptionData(topic, expressionType, expression2, 1)); assertThat(set1.equals(set2)).isTrue(); }
public static <K, V> Write<K, V> write() { return new AutoValue_KafkaIO_Write.Builder<K, V>() .setWriteRecordsTransform(writeRecords()) .build(); }
@Test public void testExactlyOnceSink() { // testSink() with EOS enabled. // This does not actually inject retries in a stage to test exactly-once-semantics. // It mainly exercises the code in normal flow without retries. // Ideally we should test EOS Sink by triggering replays of a messages between stages. // It is not feasible to test such retries with direct runner. When DoFnTester supports // state, we can test ExactlyOnceWriter DoFn directly to ensure it handles retries correctly. if (!ProducerSpEL.supportsTransactions()) { LOG.warn( "testExactlyOnceSink() is disabled as Kafka client version does not support transactions."); return; } int numElements = 1000; try (MockProducerWrapper producerWrapper = new MockProducerWrapper(new LongSerializer())) { ProducerSendCompletionThread completionThread = new ProducerSendCompletionThread(producerWrapper.mockProducer).start(); String topic = "test-eos"; String bootStrapServer = "none"; p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata()) .apply( KafkaIO.<Integer, Long>write() .withBootstrapServers(bootStrapServer) .withTopic(topic) .withKeySerializer(IntegerSerializer.class) .withValueSerializer(LongSerializer.class) .withEOS(1, "test-eos") .withConsumerFactoryFn( new ConsumerFactoryFn( Lists.newArrayList(topic), 10, 10, OffsetResetStrategy.EARLIEST)) .withPublishTimestampFunction((e, ts) -> ts) .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))); PipelineResult result = p.run(); completionThread.shutdown(); verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false, true); assertThat( Lineage.query(result.metrics(), Lineage.Type.SINK), hasItem(String.format("kafka:%s.%s", bootStrapServer, topic))); } }
@Override public String getMethod() { return PATH; }
@Test public void testPromoteChatMemberWithEmptyUserId() { PromoteChatMember promoteChatMember = PromoteChatMember .builder() .chatId("12345") .userId(0L) .build(); assertEquals("promoteChatMember", promoteChatMember.getMethod()); Throwable thrown = assertThrows(TelegramApiValidationException.class, promoteChatMember::validate); assertEquals("UserId can't be empty", thrown.getMessage()); }
@Override public boolean deleteMaintenanceDomain(MdId mdName) throws CfmConfigException { log.info("Deleting MD {} from distributed store", mdName); return store.deleteMaintenanceDomain(mdName); }
@Test public void testDeleteMaintenanceDomain() { try { assertTrue(service.deleteMaintenanceDomain( MdIdCharStr.asMdId("test-md-1"))); } catch (CfmConfigException e) { fail("Should not have thrown exception: " + e.getMessage()); } //Now try an invalid name try { assertFalse(service.deleteMaintenanceDomain( MdIdCharStr.asMdId("test-md-3"))); } catch (CfmConfigException e) { fail("Should not have thrown exception: " + e.getMessage()); } }
public static boolean needAdminPerm(Integer code) { return ADMIN_CODE.contains(code); }
@Test public void checkAdminCodeTest() { Set<Integer> code = new HashSet<>(); code.add(RequestCode.UPDATE_AND_CREATE_TOPIC); code.add(RequestCode.UPDATE_BROKER_CONFIG); code.add(RequestCode.DELETE_TOPIC_IN_BROKER); code.add(RequestCode.UPDATE_AND_CREATE_SUBSCRIPTIONGROUP); code.add(RequestCode.DELETE_SUBSCRIPTIONGROUP); code.add(RequestCode.UPDATE_AND_CREATE_STATIC_TOPIC); code.add(RequestCode.UPDATE_AND_CREATE_ACL_CONFIG); code.add(RequestCode.DELETE_ACL_CONFIG); code.add(RequestCode.GET_BROKER_CLUSTER_ACL_INFO); for (int i = 0; i < 400; i++) { boolean boo = Permission.needAdminPerm(i); if (boo) { Assert.assertTrue(code.contains(i)); } } }
@Override public ObjectNode encode(Criterion criterion, CodecContext context) { EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context); return encoder.encode(); }
@Test public void matchIPEcnTest() { Criterion criterion = Criteria.matchIPEcn((byte) 3); ObjectNode result = criterionCodec.encode(criterion, context); assertThat(result, matchesCriterion(criterion)); }
@Override public RedisClusterNode clusterGetNodeForSlot(int slot) { Iterable<RedisClusterNode> res = clusterGetNodes(); for (RedisClusterNode redisClusterNode : res) { if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) { return redisClusterNode; } } return null; }
@Test public void testClusterGetNodeForSlot() { RedisClusterNode node1 = connection.clusterGetNodeForSlot(1); RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000); assertThat(node1.getId()).isNotEqualTo(node2.getId()); }
@Bean public LifecycleBeanPostProcessor lifecycleBeanPostProcessor() { return new LifecycleBeanPostProcessor(); }
@Test public void testLifecycleBeanPostProcessor() { LifecycleBeanPostProcessor postProcessor = shiroConfiguration.lifecycleBeanPostProcessor(); assertNotNull(postProcessor); }
@Override public boolean createTopic( final String topic, final int numPartitions, final short replicationFactor, final Map<String, ?> configs, final CreateTopicsOptions createOptions ) { final Optional<Long> retentionMs = KafkaTopicClient.getRetentionMs(configs); if (isTopicExists(topic)) { validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs); return false; } final short resolvedReplicationFactor = replicationFactor == TopicProperties.DEFAULT_REPLICAS ? getDefaultClusterReplication() : replicationFactor; final NewTopic newTopic = new NewTopic(topic, numPartitions, resolvedReplicationFactor); newTopic.configs(toStringConfigs(configs)); try { LOG.info("Creating topic '{}' {}", topic, (createOptions.shouldValidateOnly()) ? "(ONLY VALIDATE)" : "" ); ExecutorUtil.executeWithRetries( () -> adminClient.get().createTopics( Collections.singleton(newTopic), createOptions ).all().get(), ExecutorUtil.RetryBehaviour.ON_RETRYABLE); return true; } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new KafkaResponseGetFailedException( "Failed to guarantee existence of topic " + topic, e); } catch (final TopicExistsException e) { // if the topic already exists, it is most likely because another node just created it. // ensure that it matches the partition count, replication factor, and retention // before returning success validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs); return false; } catch (final TopicAuthorizationException e) { throw new KsqlTopicAuthorizationException( AclOperation.CREATE, Collections.singleton(topic)); } catch (final Exception e) { throw new KafkaResponseGetFailedException( "Failed to guarantee existence of topic " + topic, e); } }
@Test public void shouldThrowFromCreateTopicIfExistingHasDifferentReplicationFactor() { // Given: givenTopicExists("someTopic", 1, 1); givenTopicConfigs( "someTopic", overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "8640000000") ); // When: final Exception e = assertThrows( KafkaTopicExistsException.class, () -> kafkaTopicClient.createTopic("someTopic", 1, (short) 2, configs) ); // Then: assertThat(e.getMessage(), containsString( ", 2 replication factor (topic has 1)")); }
@Bean public TimeLimiterRegistry timeLimiterRegistry( TimeLimiterConfigurationProperties timeLimiterConfigurationProperties, EventConsumerRegistry<TimeLimiterEvent> timeLimiterEventConsumerRegistry, RegistryEventConsumer<TimeLimiter> timeLimiterRegistryEventConsumer, @Qualifier("compositeTimeLimiterCustomizer") CompositeCustomizer<TimeLimiterConfigCustomizer> compositeTimeLimiterCustomizer) { TimeLimiterRegistry timeLimiterRegistry = createTimeLimiterRegistry(timeLimiterConfigurationProperties, timeLimiterRegistryEventConsumer, compositeTimeLimiterCustomizer); registerEventConsumer(timeLimiterRegistry, timeLimiterEventConsumerRegistry, timeLimiterConfigurationProperties); initTimeLimiterRegistry(timeLimiterRegistry, timeLimiterConfigurationProperties, compositeTimeLimiterCustomizer); return timeLimiterRegistry; }
@Test public void testCreateTimeLimiterRegistryWithSharedConfigs() { // Given io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties defaultProperties = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); defaultProperties.setTimeoutDuration(Duration.ofSeconds(3)); defaultProperties.setCancelRunningFuture(true); io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties sharedProperties = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); sharedProperties.setTimeoutDuration(Duration.ofSeconds(2)); sharedProperties.setCancelRunningFuture(false); io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties backendWithDefaultConfig = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); backendWithDefaultConfig.setBaseConfig("default"); backendWithDefaultConfig.setTimeoutDuration(Duration.ofSeconds(5)); io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties backendWithSharedConfig = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); backendWithSharedConfig.setBaseConfig("sharedConfig"); backendWithSharedConfig.setCancelRunningFuture(true); TimeLimiterConfigurationProperties timeLimiterConfigurationProperties = new TimeLimiterConfigurationProperties(); timeLimiterConfigurationProperties.getConfigs().put("default", defaultProperties); timeLimiterConfigurationProperties.getConfigs().put("sharedConfig", sharedProperties); timeLimiterConfigurationProperties.getInstances().put("backendWithDefaultConfig", backendWithDefaultConfig); timeLimiterConfigurationProperties.getInstances().put("backendWithSharedConfig", backendWithSharedConfig); TimeLimiterConfiguration timeLimiterConfiguration = new TimeLimiterConfiguration(); DefaultEventConsumerRegistry<TimeLimiterEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); // When TimeLimiterRegistry timeLimiterRegistry = timeLimiterConfiguration.timeLimiterRegistry(timeLimiterConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeTimeLimiterCustomizerTestInstance()); // Then assertThat(timeLimiterRegistry.getAllTimeLimiters().size()).isEqualTo(2); // Should get default config and overwrite timeout duration TimeLimiter timeLimiter1 = timeLimiterRegistry.timeLimiter("backendWithDefaultConfig"); assertThat(timeLimiter1).isNotNull(); assertThat(timeLimiter1.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(5)); assertThat(timeLimiter1.getTimeLimiterConfig().shouldCancelRunningFuture()).isTrue(); // Should get shared config and overwrite cancelRunningFuture TimeLimiter timeLimiter2 = timeLimiterRegistry.timeLimiter("backendWithSharedConfig"); assertThat(timeLimiter2).isNotNull(); assertThat(timeLimiter2.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(2)); assertThat(timeLimiter2.getTimeLimiterConfig().shouldCancelRunningFuture()).isTrue(); // Unknown backend should get default config of Registry TimeLimiter timeLimiter3 = timeLimiterRegistry.timeLimiter("unknownBackend"); assertThat(timeLimiter3).isNotNull(); assertThat(timeLimiter3.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(3)); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(3); }
@Override public Decorator create(String type, Map<String, Object> config, String stream, int order) { return DecoratorImpl.create(type, config, Optional.of(stream), order); }
@Test public void createWithStreamCreatesDecorator() { final Decorator decorator = decoratorService.create("type", singletonMap("foo", "bar"), "000000000000000000000001", 42); assertThat(decorator.id()).isNull(); assertThat(decorator.type()).isEqualTo("type"); assertThat(decorator.order()).isEqualTo(42); assertThat(decorator.config()) .hasSize(1) .containsEntry("foo", "bar"); assertThat(decorator.stream()) .isPresent() .contains("000000000000000000000001"); }
@Override public Long sendSingleSmsToMember(String mobile, Long userId, String templateCode, Map<String, Object> templateParams) { // 如果 mobile 为空,则加载用户编号对应的手机号 if (StrUtil.isEmpty(mobile)) { mobile = memberService.getMemberUserMobile(userId); } // 执行发送 return sendSingleSms(mobile, userId, UserTypeEnum.MEMBER.getValue(), templateCode, templateParams); }
@Test public void testSendSingleSmsToUser() { // 准备参数 Long userId = randomLongId(); String templateCode = randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock memberService 的方法 String mobile = "15601691300"; when(memberService.getMemberUserMobile(eq(userId))).thenReturn(mobile); // mock SmsTemplateService 的方法 SmsTemplateDO template = randomPojo(SmsTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(smsTemplateService.getSmsTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String content = randomString(); when(smsTemplateService.formatSmsTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock SmsChannelService 的方法 SmsChannelDO smsChannel = randomPojo(SmsChannelDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); when(smsChannelService.getSmsChannel(eq(template.getChannelId()))).thenReturn(smsChannel); // mock SmsLogService 的方法 Long smsLogId = randomLongId(); when(smsLogService.createSmsLog(eq(mobile), eq(userId), eq(UserTypeEnum.MEMBER.getValue()), eq(Boolean.TRUE), eq(template), eq(content), eq(templateParams))).thenReturn(smsLogId); // 调用 Long resultSmsLogId = smsSendService.sendSingleSmsToMember(null, userId, templateCode, templateParams); // 断言 assertEquals(smsLogId, resultSmsLogId); // 断言调用 verify(smsProducer).sendSmsSendMessage(eq(smsLogId), eq(mobile), eq(template.getChannelId()), eq(template.getApiTemplateId()), eq(Lists.newArrayList(new KeyValue<>("code", "1234"), new KeyValue<>("op", "login")))); }
public static String classPackageAsResourcePath(Class<?> clazz) { if (clazz == null) { return ""; } String className = clazz.getName(); int packageEndIndex = className.lastIndexOf(PACKAGE_SEPARATOR); if (packageEndIndex == -1) { return ""; } String packageName = className.substring(0, packageEndIndex); return packageName.replace(PACKAGE_SEPARATOR, PATH_SEPARATOR); }
@Test void testClassPackageAsResourcePath() throws ClassNotFoundException { Class noPackageClass = ClassUtils.forName("ClassUtilsTestMockClass", null); assertEquals("", ClassUtils.classPackageAsResourcePath(null)); assertEquals("", ClassUtils.classPackageAsResourcePath(noPackageClass)); assertEquals("com/alibaba/nacos/common/utils", ClassUtils.classPackageAsResourcePath(ClassUtilsTest.class)); }
public static JsonAsserter with(String json) { return new JsonAsserterImpl(JsonPath.parse(json).json()); }
@Test public void a_value_can_asserted_to_be_null() throws Exception { with(JSON).assertNull("$.store.bicycle.nullValue"); }
@Override public Collection<EfestoOutput> evaluateInput(EfestoRuntimeContext context, EfestoInput... toEvaluate) { if (toEvaluate.length == 1) { // minor optimization for the (most typical) case with 1 input return getOptionalOutput(context, toEvaluate[0]).map(Collections::singletonList).orElse(Collections.emptyList()); } Collection<EfestoOutput> toReturn = new ArrayList<>(); for (EfestoInput efestoInput : toEvaluate) { getOptionalOutput(context, efestoInput).ifPresent(toReturn::add); } return toReturn; }
@Test void evaluateInputs() { List<EfestoInput> toProcess = new ArrayList<>(); MANAGED_Efesto_INPUTS.forEach(managedInput -> { try { EfestoInput toAdd = managedInput.getDeclaredConstructor().newInstance(); toProcess.add(toAdd); } catch (Exception e) { fail("Failed assertion on evaluateInput", e); } }); toProcess.add(new MockEfestoInputD()); Collection<EfestoOutput> retrieved = runtimeManager.evaluateInput(context, toProcess.toArray(new EfestoInput[0])); assertThat(retrieved).isNotNull().hasSize(MANAGED_Efesto_INPUTS.size()); }
@Override public CanaryScope buildCanaryScope(CanaryScope scope) { WavefrontCanaryScope wavefrontCanaryScope = new WavefrontCanaryScope(); wavefrontCanaryScope.setScope(scope.getScope()); wavefrontCanaryScope.setLocation(scope.getLocation()); wavefrontCanaryScope.setStart(scope.getStart()); wavefrontCanaryScope.setEnd(scope.getEnd()); wavefrontCanaryScope.setStep(scope.getStep()); wavefrontCanaryScope.setGranularity(generateGranularity(scope.getStep())); wavefrontCanaryScope.setExtendedScopeParams(scope.getExtendedScopeParams()); return wavefrontCanaryScope; }
@Test public void testBuildCanaryScope_WithSecondGranularity() { CanaryScope canaryScope = new CanaryScope( "scope", "location", Instant.now(), Instant.now(), WavefrontCanaryScopeFactory.SECOND, null); CanaryScope generatedCanaryScope = queryBuilder.buildCanaryScope(canaryScope); WavefrontCanaryScope wavefrontCanaryScope = (WavefrontCanaryScope) generatedCanaryScope; assertThat(wavefrontCanaryScope.getGranularity()).isEqualTo("s"); }
@Override public void execute(Context context) { List<MeasureComputerWrapper> wrappers = Arrays.stream(measureComputers).map(ToMeasureWrapper.INSTANCE).toList(); validateMetrics(wrappers); measureComputersHolder.setMeasureComputers(sortComputers(wrappers)); }
@Test public void fail_with_ISE_when_output_metric_is_not_define_by_plugin() { assertThatThrownBy(() -> { MeasureComputer[] computers = new MeasureComputer[] {newMeasureComputer(array(NEW_METRIC_4), array("unknown"))}; ComputationStep underTest = new LoadMeasureComputersStep(holder, array(new TestMetrics()), computers); underTest.execute(new TestComputationStepContext()); }) .isInstanceOf(IllegalStateException.class) .hasMessage("Metric 'unknown' cannot be used as an output metric because no plugins declare this metric"); }
@Override public Object saveContent(@NonNull StaplerRequest staplerRequest, @NonNull Item item) { JSONObject body; try { body = JSONObject.fromObject(IOUtils.toString(staplerRequest.getReader())); } catch (IOException e) { throw new ServiceException.UnexpectedErrorException("Failed to read request body"); } body.put("$class", "io.jenkins.blueocean.blueocean_github_pipeline.GithubScmSaveFileRequest"); GithubScmSaveFileRequest request = staplerRequest.bindJSON(GithubScmSaveFileRequest.class, body); if(request == null){ throw new ServiceException.BadRequestException(new ErrorMessage(400, "Failed to bind request")); } ScmContentProvider scmContentProvider = ScmContentProvider.resolve(item); if(scmContentProvider != null){ return saveContent(request, item); } throw new ServiceException.BadRequestException("No save scm content provider found for pipeline: " + item.getFullName()); }
@Test public void unauthorizedSaveContentToMbpShouldFail() throws UnirestException, IOException { User alice = User.get("alice"); alice.setFullName("Alice Cooper"); alice.addProperty(new Mailer.UserProperty("alice@jenkins-ci.org")); String aliceCredentialId = createGithubCredential(alice); StaplerRequest staplerRequest = mockStapler(); GitContent content = new GitContent.Builder().autoCreateBranch(true).base64Data("c2xlZXAgMTUKbm9kZSB7CiAgY2hlY2tvdXQgc2NtCiAgc2ggJ2xzIC1sJwp9\\nCnNsZWVwIDE1Cg==\\n") .branch("test1").message("another commit").owner("cloudbeers").path("Jankinsfile").repo("PR-demo").sha("e23b8ef5c2c4244889bf94db6c05cc08ea138aef").build(); when(staplerRequest.bindJSON(Mockito.eq(GithubScmSaveFileRequest.class), Mockito.any(JSONObject.class))).thenReturn(new GithubScmSaveFileRequest(content)); MultiBranchProject mbp = mockMbp(aliceCredentialId, user, GithubScm.DOMAIN_NAME); String request = "{\n" + " \"content\" : {\n" + " \"message\" : \"first commit\",\n" + " \"path\" : \"Jenkinsfile\",\n" + " \"branch\" : \"test1\",\n" + " \"repo\" : \"PR-demo\",\n" + " \"sha\" : \"e23b8ef5c2c4244889bf94db6c05cc08ea138aef\",\n" + " \"base64Data\" : "+"\"c2xlZXAgMTUKbm9kZSB7CiAgY2hlY2tvdXQgc2NtCiAgc2ggJ2xzIC1sJwp9\\nCnNsZWVwIDE1Cg==\\n\""+ " }\n" + "}"; when(staplerRequest.getReader()).thenReturn(new BufferedReader(new StringReader(request), request.length())); try { //Bob trying to access content but his credential is not setup so should fail new GithubScmContentProvider().saveContent(staplerRequest, mbp); }catch (ServiceException.PreconditionRequired e){ assertEquals("Can't access content from github: no credential found", e.getMessage()); return; } fail("Should have failed with PreConditionException"); }
static String relativeClasspathManifest(File relativePathRoot, Iterable<File> files) { StringBuilder sb = new StringBuilder(); files.forEach(f -> sb.append(relativePathRoot.toPath().relativize(f.toPath())).append(" ")); return sb.toString().trim(); }
@Test public void testCreatesClasspath() { //setup File dir = new File("/tmp/foo"); File subdir = new File(dir, "sub"); subdir.mkdirs(); File f1 = new File(dir, "foo.jar"); //different directory File f2 = new File(subdir, "bar.jar"); File f3 = new File(subdir, "aaa.jar"); //when String cp = ClasspathManifest.relativeClasspathManifest(subdir, Arrays.asList(f1, f2, f3)); //then assertEquals(cp, "../foo.jar bar.jar aaa.jar"); }
public static <T, S> T convert(S source, Class<T> clazz) { return Optional.ofNullable(source) .map(each -> BEAN_MAPPER_BUILDER.map(each, clazz)) .orElse(null); }
@Test public void mapToBeanConvertTest() { // 测试MapToBean final HashMap<String, Object> map = new HashMap<>(); map.put("name", "Hippo4j"); map.put("age", 1); map.put("address", "hippo4j.cn"); map.put("size", 999); final Person person = BeanUtil.convert(map, Person.class); Assert.assertEquals("Hippo4j", person.getName()); Assert.assertEquals(1, person.getAge()); Assert.assertEquals("hippo4j.cn", person.getAddress()); Assert.assertEquals(999, (int) person.getSize()); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testTableScanThenIncrementalWithNonEmptyTable() throws Exception { appendTwoSnapshots(); ScanContext scanContext = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null); ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null); assertThat(initialResult.fromPosition()).isNull(); assertThat(initialResult.toPosition().snapshotId().longValue()) .isEqualTo(snapshot2.snapshotId()); assertThat(initialResult.toPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot2.timestampMillis()); assertThat(initialResult.splits()).hasSize(1); IcebergSourceSplit split = Iterables.getOnlyElement(initialResult.splits()); assertThat(split.task().files()).hasSize(2); Set<String> discoveredFiles = split.task().files().stream() .map(fileScanTask -> fileScanTask.file().path().toString()) .collect(Collectors.toSet()); Set<String> expectedFiles = ImmutableSet.of(dataFile1.path().toString(), dataFile2.path().toString()); assertThat(discoveredFiles).containsExactlyInAnyOrderElementsOf(expectedFiles); IcebergEnumeratorPosition lastPosition = initialResult.toPosition(); for (int i = 0; i < 3; ++i) { lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition; } }
public JobStatsExtended enrich(JobStats jobStats) { JobStats latestJobStats = getLatestJobStats(jobStats, previousJobStats); if (lock.tryLock()) { setFirstRelevantJobStats(latestJobStats); setJobStatsExtended(latestJobStats); setPreviousJobStats(latestJobStats); lock.unlock(); } return jobStatsExtended; }
@Test void enrichGivenNoPreviousJobStatsAndWorkToDoProcessing() { JobStatsExtended extendedJobStats = jobStatsEnricher.enrich(getJobStats(0L, 1L, 0L, 0L)); assertThat(extendedJobStats.getAmountSucceeded()).isZero(); assertThat(extendedJobStats.getAmountFailed()).isZero(); assertThat(extendedJobStats.getEstimation().isProcessingDone()).isFalse(); assertThat(extendedJobStats.getEstimation().isEstimatedProcessingFinishedInstantAvailable()).isFalse(); }
public static RowCoder of(Schema schema) { return new RowCoder(schema); }
@Test public void testConsistentWithEqualsMapWithNull() throws Exception { Schema schema = Schema.builder() .addField( "a", Schema.FieldType.map( Schema.FieldType.INT32, Schema.FieldType.INT32.withNullable(true))) .build(); Row row = Row.withSchema(schema).addValue(Collections.singletonMap(1, null)).build(); CoderProperties.coderDecodeEncodeEqual(RowCoder.of(schema), row); }
@Override public Base64BinaryChunk parse(XmlPullParser parser, int initialDepth, XmlEnvironment xmlEnvironment) throws XmlPullParserException, IOException { String streamId = parser.getAttributeValue("", Base64BinaryChunk.ATTRIBUTE_STREAM_ID); String nrString = parser.getAttributeValue("", Base64BinaryChunk.ATTRIBUTE_NR); String lastString = parser.getAttributeValue("", Base64BinaryChunk.ATTRIBUTE_LAST); boolean last = false; int nr = Integer.parseInt(nrString); if (lastString != null) { last = Boolean.parseBoolean(lastString); } String text = null; boolean done = false; while (!done) { XmlPullParser.Event eventType = parser.next(); if (eventType == XmlPullParser.Event.END_ELEMENT) { if (parser.getName().equals(Base64BinaryChunk.ELEMENT_CHUNK)) { done = true; } else { throw new IllegalArgumentException("unexpected end tag of: " + parser.getName()); } } else if (eventType == XmlPullParser.Event.TEXT_CHARACTERS) { text = parser.getText(); } else { throw new IllegalArgumentException("unexpected eventType: " + eventType); } } return new Base64BinaryChunk(text, streamId, nr, last); }
@Test public void isNonLatsChunkParsedCorrectly() throws Exception { String base64Text = "iVBORw0KGgoAAAANSUhEUgAAASwAAAGQCAYAA"; String string = "<chunk xmlns='urn:xmpp:http' streamId='Stream0001' nr='0'>" + base64Text + "</chunk>"; Base64BinaryChunkProvider provider = new Base64BinaryChunkProvider(); XmlPullParser parser = PacketParserUtils.getParserFor(string); ExtensionElement extension = provider.parse(parser); assertTrue(extension instanceof Base64BinaryChunk); Base64BinaryChunk chunk = (Base64BinaryChunk) extension; assertEquals("Stream0001", chunk.getStreamId()); assertFalse(chunk.isLast()); assertEquals(base64Text, chunk.getText()); assertEquals(0, chunk.getNr()); }
public static NativeForwardMappingAction nativeForward() { return new NativeForwardMappingAction(); }
@Test public void testNativeForwardMethod() { MappingAction mappingAction = MappingActions.nativeForward(); checkAndConvert(mappingAction, MappingAction.Type.NATIVE_FORWARD, NativeForwardMappingAction.class); }
@Override @TpsControl(pointName = "RemoteNamingServiceQuery", name = "RemoteNamingServiceQuery") @Secured(action = ActionTypes.READ) @ExtractorManager.Extractor(rpcExtractor = ServiceQueryRequestParamExtractor.class) public QueryServiceResponse handle(ServiceQueryRequest request, RequestMeta meta) throws NacosException { String namespaceId = request.getNamespace(); String groupName = request.getGroupName(); String serviceName = request.getServiceName(); Service service = Service.newService(namespaceId, groupName, serviceName); String cluster = null == request.getCluster() ? "" : request.getCluster(); boolean healthyOnly = request.isHealthyOnly(); ServiceInfo result = serviceStorage.getData(service); ServiceMetadata serviceMetadata = metadataManager.getServiceMetadata(service).orElse(null); result = ServiceUtil.selectInstancesWithHealthyProtection(result, serviceMetadata, cluster, healthyOnly, true, NamingRequestUtil.getSourceIpForGrpcRequest(meta)); return QueryServiceResponse.buildSuccessResponse(result); }
@Test void testHandle() throws NacosException { Instance instance = new Instance(); instance.setIp("1.1.1.1"); List<Instance> instances = Arrays.asList(instance); ServiceInfo serviceInfo = new ServiceInfo(); serviceInfo.setGroupName("A"); serviceInfo.setGroupName("B"); serviceInfo.setName("C"); serviceInfo.setHosts(instances); Mockito.when(serviceStorage.getData(Mockito.any())).thenReturn(serviceInfo); ServiceMetadata serviceMetadata = new ServiceMetadata(); Mockito.when(metadataManager.getServiceMetadata(Mockito.any())).thenReturn(Optional.of(serviceMetadata)); ServiceQueryRequest serviceQueryRequest = new ServiceQueryRequest(); serviceQueryRequest.setNamespace("A"); serviceQueryRequest.setGroupName("B"); serviceQueryRequest.setServiceName("C"); serviceQueryRequest.setHealthyOnly(false); QueryServiceResponse queryServiceResponse = serviceQueryRequestHandler.handle(serviceQueryRequest, new RequestMeta()); assertEquals("C", queryServiceResponse.getServiceInfo().getName()); }
public String getConfigChanges() { return attrs.get(withXMLNS(CONFIG_CHANGES)); }
@Test public void test_config_changes() { HashMap<String, String> attrs = new HashMap<>(); attrs.put("android:configChanges", "mcc|screenLayout|orientation"); ActivityData activityData = new ActivityData(attrs, new ArrayList<IntentFilterData>()); assertThat(activityData.getConfigChanges()).isEqualTo("mcc|screenLayout|orientation"); }
@Override public Optional<FileSourceSplit> getNext(@Nullable String host) { // for a null host, we always return a remote split if (StringUtils.isNullOrWhitespaceOnly(host)) { final Optional<FileSourceSplit> split = getRemoteSplit(); if (split.isPresent()) { LOG.info("Assigning split to non-localized request: {}", split); } return split; } host = normalizeHostName(host); // for any non-null host, we take the list of non-null splits final LocatableSplitChooser localSplits = localPerHost.computeIfAbsent( host, (theHost) -> buildChooserForHost(theHost, unassigned)); final SplitWithInfo localSplit = localSplits.getNextUnassignedMinLocalCountSplit(unassigned); if (localSplit != null) { checkState( unassigned.remove(localSplit), "Selected split has already been assigned. This should not happen!"); LOG.info( "Assigning local split to requesting host '{}': {}", host, localSplit.getSplit()); localAssignments.inc(); return Optional.of(localSplit.getSplit()); } // we did not find a local split, return a remote split final Optional<FileSourceSplit> remoteSplit = getRemoteSplit(); if (remoteSplit.isPresent()) { LOG.info("Assigning remote split to requesting host '{}': {}", host, remoteSplit); } return remoteSplit; }
@Test void testAssignmentOfManySplitsRandomly() { final long seed = Calendar.getInstance().getTimeInMillis(); final int numSplits = 1000; final String[] splitHosts = new String[256]; final String[] requestingHosts = new String[256]; final Random rand = new Random(seed); for (int i = 0; i < splitHosts.length; i++) { splitHosts[i] = "localHost" + i; } for (int i = 0; i < requestingHosts.length; i++) { if (i % 2 == 0) { requestingHosts[i] = "localHost" + i; } else { requestingHosts[i] = "remoteHost" + i; } } String[] stringArray = {}; Set<String> hosts = new HashSet<>(); Set<FileSourceSplit> splits = new HashSet<>(); for (int i = 0; i < numSplits; i++) { while (hosts.size() < 3) { hosts.add(splitHosts[rand.nextInt(splitHosts.length)]); } splits.add(createSplit(i, hosts.toArray(stringArray))); hosts.clear(); } final LocalityAwareSplitAssigner ia = new LocalityAwareSplitAssigner(splits); for (int i = 0; i < numSplits; i++) { final Optional<FileSourceSplit> split = ia.getNext(requestingHosts[rand.nextInt(requestingHosts.length)]); assertThat(split).isPresent(); assertThat(splits.remove(split.get())).isTrue(); } assertThat(splits).isEmpty(); assertThat(ia.getNext("testHost")).isNotPresent(); }
@Override public InterpreterResult interpret(String st, InterpreterContext context) throws InterpreterException { InterpreterOutput out = context.out; Matcher activateMatcher = PATTERN_COMMAND_ACTIVATE.matcher(st); Matcher createMatcher = PATTERN_COMMAND_CREATE.matcher(st); Matcher installMatcher = PATTERN_COMMAND_INSTALL.matcher(st); Matcher uninstallMatcher = PATTERN_COMMAND_UNINSTALL.matcher(st); Matcher envMatcher = PATTERN_COMMAND_ENV.matcher(st); try { if (PATTERN_COMMAND_ENV_LIST.matcher(st).matches()) { String result = runCondaEnvList(); return new InterpreterResult(Code.SUCCESS, Type.HTML, result); } else if (envMatcher.matches()) { // `envMatcher` should be used after `listEnvMatcher` String result = runCondaEnv(getRestArgsFromMatcher(envMatcher)); return new InterpreterResult(Code.SUCCESS, Type.HTML, result); } else if (PATTERN_COMMAND_LIST.matcher(st).matches()) { String result = runCondaList(); return new InterpreterResult(Code.SUCCESS, Type.HTML, result); } else if (createMatcher.matches()) { String result = runCondaCreate(getRestArgsFromMatcher(createMatcher)); return new InterpreterResult(Code.SUCCESS, Type.HTML, result); } else if (activateMatcher.matches()) { String envName = activateMatcher.group(1).trim(); return runCondaActivate(envName); } else if (PATTERN_COMMAND_DEACTIVATE.matcher(st).matches()) { return runCondaDeactivate(); } else if (installMatcher.matches()) { String result = runCondaInstall(getRestArgsFromMatcher(installMatcher)); return new InterpreterResult(Code.SUCCESS, Type.HTML, result); } else if (uninstallMatcher.matches()) { String result = runCondaUninstall(getRestArgsFromMatcher(uninstallMatcher)); return new InterpreterResult(Code.SUCCESS, Type.HTML, result); } else if (st == null || PATTERN_COMMAND_HELP.matcher(st).matches()) { runCondaHelp(out); return new InterpreterResult(Code.SUCCESS); } else if (PATTERN_COMMAND_INFO.matcher(st).matches()) { String result = runCondaInfo(); return new InterpreterResult(Code.SUCCESS, Type.HTML, result); } else { return new InterpreterResult(Code.ERROR, "Not supported command: " + st); } } catch (RuntimeException | IOException | InterruptedException e) { throw new InterpreterException(e); } }
@Test void testListEnv() throws IOException, InterruptedException, InterpreterException { setMockCondaEnvList(); // list available env InterpreterContext context = getInterpreterContext(); InterpreterResult result = conda.interpret("env list", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertTrue(result.toString().contains(">env1<")); assertTrue(result.toString().contains("/path1<")); assertTrue(result.toString().contains(">env2<")); assertTrue(result.toString().contains("/path2<")); }
@Override public void setSchema(Schema actual) { // if expected is unset and actual is a specific record, // then default expected to schema of currently loaded class if (getExpected() == null && actual != null && actual.getType() == Schema.Type.RECORD) { SpecificData data = getSpecificData(); Class c = data.getClass(actual); if (c != null && SpecificRecord.class.isAssignableFrom(c)) setExpected(data.getSchema(c)); } super.setSchema(actual); }
@Test void readMyData() throws IOException { // Check that method newInstanceFromString from SpecificDatumReader extension is // called. final EncoderFactory e_factory = new EncoderFactory().configureBufferSize(30); final DecoderFactory factory = new DecoderFactory().configureDecoderBufferSize(30); final MyReader reader = new MyReader(); reader.setExpected(Schema.create(Schema.Type.STRING)); reader.setSchema(Schema.create(Schema.Type.STRING)); final ByteArrayOutputStream out = new ByteArrayOutputStream(30); final BinaryEncoder encoder = e_factory.binaryEncoder(out, null); encoder.writeString(new Utf8("Hello")); encoder.flush(); final BinaryDecoder decoder = factory.binaryDecoder(out.toByteArray(), null); reader.getData().setFastReaderEnabled(false); final MyData read = reader.read(null, decoder); Assertions.assertNotNull(read, "MyReader.newInstanceFromString was not called"); Assertions.assertEquals("Hello", read.getContent()); }
@Operation(summary = "updateQueue", description = "UPDATE_QUEUE_NOTES") @Parameters({ @Parameter(name = "id", description = "QUEUE_ID", required = true, schema = @Schema(implementation = int.class, example = "100")), @Parameter(name = "queue", description = "YARN_QUEUE_NAME", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "queueName", description = "QUEUE_NAME", required = true, schema = @Schema(implementation = String.class)) }) @PutMapping(value = "/{id}") @ResponseStatus(HttpStatus.CREATED) @ApiException(UPDATE_QUEUE_ERROR) public Result<Queue> updateQueue(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable(value = "id") int id, @RequestParam(value = "queue") String queue, @RequestParam(value = "queueName") String queueName) { return Result.success(queueService.updateQueue(loginUser, id, queue, queueName)); }
@Test public void testUpdateQueue() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "1"); paramsMap.add("queue", QUEUE_MODIFY_NAME); paramsMap.add("queueName", QUEUE_NAME_MODIFY_NAME); MvcResult mvcResult = mockMvc.perform(put("/queues/{id}", 1) .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isCreated()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertNotNull(result); Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info("update queue return result:{}", mvcResult.getResponse().getContentAsString()); }
@Override public String pluginIDFor(String bundleSymbolicName, String extensionClassCannonicalName) { return pluginRegistry.pluginIDFor(bundleSymbolicName, extensionClassCannonicalName); }
@Test void shouldGetPluginIDForAGivenBundleAndExtensionClass() { when(pluginRegistry.pluginIDFor("SYM_1", "com.path.to.MyClass")).thenReturn("plugin_1"); assertThat(serviceDefault.pluginIDFor("SYM_1", "com.path.to.MyClass")).isEqualTo("plugin_1"); }
@Override public <R> R queryOne(String sql, Class<R> cls) { return queryOne(jdbcTemplate, sql, cls); }
@Test void testQueryOne1() { String sql = "SELECT 1"; Class<Long> clazz = Long.class; Long num = 1L; when(jdbcTemplate.queryForObject(sql, clazz)).thenReturn(num); assertEquals(operate.queryOne(sql, clazz), (Long) 1L); }
KiePMMLLinearNorm[] getLimitExpectedValue(final Number input) { int counter = 0; KiePMMLLinearNorm linearNorm = linearNorms.get(counter); KiePMMLLinearNorm startLinearNorm = null; while (linearNorm.getOrig() <= input.doubleValue() && counter < linearNorms.size() -1) { startLinearNorm = linearNorm; counter ++; linearNorm = linearNorms.get(counter); } int startIndex = linearNorms.indexOf(startLinearNorm); counter = linearNorms.size() -1; linearNorm = linearNorms.get(counter); KiePMMLLinearNorm endLinearNorm = null; while (linearNorm.getOrig() >= input.doubleValue() && counter > startIndex) { endLinearNorm = linearNorm; counter --; linearNorm = linearNorms.get(counter); } return new KiePMMLLinearNorm[]{startLinearNorm, endLinearNorm}; }
@Test void getLimitExpectedValue() { KiePMMLNormContinuous kiePMMLNormContinuous = getKiePMMLNormContinuous(null, null, null); Number input = 24; KiePMMLLinearNorm[] retrieved = kiePMMLNormContinuous.getLimitExpectedValue(input); assertThat(retrieved[0]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(0)); assertThat(retrieved[1]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(1)); input = 28; retrieved = kiePMMLNormContinuous.getLimitExpectedValue(input); assertThat(retrieved[0]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(0)); assertThat(retrieved[1]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(1)); input = 30; retrieved = kiePMMLNormContinuous.getLimitExpectedValue(input); assertThat(retrieved[0]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(1)); assertThat(retrieved[1]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(2)); input = 31; retrieved = kiePMMLNormContinuous.getLimitExpectedValue(input); assertThat(retrieved[0]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(1)); assertThat(retrieved[1]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(2)); input = 36; retrieved = kiePMMLNormContinuous.getLimitExpectedValue(input); assertThat(retrieved[0]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(2)); assertThat(retrieved[1]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(3)); input = 37; retrieved = kiePMMLNormContinuous.getLimitExpectedValue(input); assertThat(retrieved[0]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(2)); assertThat(retrieved[1]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(3)); input = 40; retrieved = kiePMMLNormContinuous.getLimitExpectedValue(input); assertThat(retrieved[0]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(2)); assertThat(retrieved[1]).isEqualTo(kiePMMLNormContinuous.linearNorms.get(3)); }
@VisibleForTesting static boolean isVersion(String component) { return VERSION_PART_PATTERN.matcher(component).matches(); }
@Test public void testVersionRegex() { Assertions.assertTrue(MainApplicationConfig.isVersion("v9")); Assertions.assertTrue(MainApplicationConfig.isVersion("v45")); // temporarily don't support minor version. This case should fail when that support is in place Assertions.assertFalse(MainApplicationConfig.isVersion("v9.0")); Assertions.assertFalse(MainApplicationConfig.isVersion("views")); }
public static String[] csvReadFile(BufferedReader infile, char delim) throws IOException { int ch; ParserState state = ParserState.INITIAL; List<String> list = new ArrayList<>(); CharArrayWriter baos = new CharArrayWriter(200); boolean push = false; while (-1 != (ch = infile.read())) { push = false; switch (state) { case INITIAL: if (ch == QUOTING_CHAR) { state = ParserState.QUOTED; } else if (isDelimOrEOL(delim, ch)) { push = true; } else { baos.write(ch); state = ParserState.PLAIN; } break; case PLAIN: if (ch == QUOTING_CHAR) { baos.write(ch); throw new IOException( "Cannot have quote-char in plain field:[" + baos.toString() + "]"); } else if (isDelimOrEOL(delim, ch)) { push = true; state = ParserState.INITIAL; } else { baos.write(ch); } break; case QUOTED: if (ch == QUOTING_CHAR) { state = ParserState.EMBEDDEDQUOTE; } else { baos.write(ch); } break; case EMBEDDEDQUOTE: if (ch == QUOTING_CHAR) { baos.write(QUOTING_CHAR); // doubled quote => quote state = ParserState.QUOTED; } else if (isDelimOrEOL(delim, ch)) { push = true; state = ParserState.INITIAL; } else { baos.write(QUOTING_CHAR); throw new IOException( "Cannot have single quote-char in quoted field:[" + baos.toString() + "]"); } break; } // switch(state) if (push) { if (ch == '\r') {// Remove following \n if present infile.mark(1); if (infile.read() != '\n') { infile.reset(); // did not find \n, put the character // back } } String s = baos.toString(); list.add(s); baos.reset(); } if ((ch == '\n' || ch == '\r') && state != ParserState.QUOTED) { break; } } // while not EOF if (ch == -1) {// EOF (or end of string) so collect any remaining data if (state == ParserState.QUOTED) { throw new IOException("Missing trailing quote-char in quoted field:[\"" + baos.toString() + "]"); } // Do we have some data, or a trailing empty field? if (baos.size() > 0 // we have some data || push // we've started a field || state == ParserState.EMBEDDEDQUOTE // Just seen "" ) { list.add(baos.toString()); } } return list.toArray(new String[list.size()]); }
@Test public void testEmptyFile() throws Exception { BufferedReader br = new BufferedReader(new StringReader("")); String[] out = CSVSaveService.csvReadFile(br, ','); checkStrings(new String[]{}, out); assertEquals(-1, br.read(), "Expected to be at EOF"); }
@POST @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response createFloatingIp(InputStream input) throws IOException { log.trace(String.format(MESSAGE, "CREATE")); String inputStr = IOUtils.toString(input, REST_UTF8); if (!haService.isActive() && !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) { return syncPost(haService, FLOATING_IPS, inputStr); } final NeutronFloatingIP floatingIp = (NeutronFloatingIP) jsonToModelEntity(inputStr, NeutronFloatingIP.class); adminService.createFloatingIp(floatingIp); UriBuilder locationBuilder = uriInfo.getBaseUriBuilder() .path(FLOATING_IPS) .path(floatingIp.getId()); return created(locationBuilder.build()).build(); }
@Test public void testCreateFloatingIpWithDuplicatedIp() { expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes(); replay(mockOpenstackHaService); mockOpenstackRouterAdminService.createFloatingIp(anyObject()); expectLastCall().andThrow(new IllegalArgumentException()); replay(mockOpenstackRouterAdminService); final WebTarget wt = target(); InputStream jsonStream = OpenstackFloatingIpWebResourceTest.class .getResourceAsStream("openstack-floatingip1.json"); Response response = wt.path(PATH).request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.json(jsonStream)); final int status = response.getStatus(); assertThat(status, is(400)); verify(mockOpenstackRouterAdminService); }
@Override public String toString() { String rounded = ROUNDING_FORMAT.format(value); return rounded + " " + unit; }
@Test public void convertToString() throws Exception { Quantity<Metrics> quantity = new Quantity<Metrics>(51, Metrics.cm); assertEquals("51 cm", quantity.toString()); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void getCustomEmojiStickers() { GetCustomEmojiStickersResponse response = bot.execute(new GetCustomEmojiStickers("5434144690511290129")); assertTrue(response.isOk()); assertEquals(1, response.result().length); }
@Override public void log(Level logLevel, String message) { if (!messageConsumers.containsKey(logLevel)) { return; } Consumer<String> messageConsumer = messageConsumers.get(logLevel); singleThreadedExecutor.execute( () -> { boolean didErase = eraseFooter(); // If a previous footer was erased, the message needs to go up a line. if (didErase) { if (enableTwoCursorUpJump) { messageConsumer.accept(String.format(CURSOR_UP_SEQUENCE_TEMPLATE, 2)); messageConsumer.accept(message); } else { messageConsumer.accept(CURSOR_UP_SEQUENCE + message); } } else { messageConsumer.accept(message); } printInBold(footerLines); }); }
@Test public void testLog_ignoreIfNoMessageConsumer() { AnsiLoggerWithFooter testAnsiLoggerWithFooter = new AnsiLoggerWithFooter( ImmutableMap.of(Level.LIFECYCLE, createMessageConsumer(Level.LIFECYCLE)), singleThreadedExecutor, false); testAnsiLoggerWithFooter.log(Level.LIFECYCLE, "lifecycle"); testAnsiLoggerWithFooter.log(Level.PROGRESS, "progress"); testAnsiLoggerWithFooter.log(Level.INFO, "info"); testAnsiLoggerWithFooter.log(Level.DEBUG, "debug"); testAnsiLoggerWithFooter.log(Level.WARN, "warn"); testAnsiLoggerWithFooter.log(Level.ERROR, "error"); singleThreadedExecutor.shutDownAndAwaitTermination(SHUTDOWN_TIMEOUT); Assert.assertEquals(Collections.singletonList("lifecycle"), messages); Assert.assertEquals(Collections.singletonList(Level.LIFECYCLE), levels); }
public static Queue<Consumer<byte[]>> stopConsumers(final Queue<Consumer<byte[]>> consumers) throws PulsarClientException { while (!consumers.isEmpty()) { Consumer<byte[]> consumer = consumers.poll(); if (consumer != null) { try { consumer.close(); } catch (PulsarClientException.AlreadyClosedException e) { // ignore during stopping } catch (Exception e) { LOG.debug("Error stopping consumer: {} due to {}. This exception is ignored", consumer, e.getMessage(), e); } } } return new ConcurrentLinkedQueue<>(); }
@Test public void givenConsumerQueueIsNotEmptywhenIStopConsumersverifyCallToCloseAndUnsubscribeConsumer() throws PulsarClientException { Consumer<byte[]> consumer = mock(Consumer.class); Queue<Consumer<byte[]>> consumers = new ConcurrentLinkedQueue<>(); consumers.add(consumer); PulsarUtils.stopConsumers(consumers); verify(consumer).close(); }
public static Optional<String> getSchemaNameBySchemaPath(final String schemaPath) { Pattern pattern = Pattern.compile(getShardingSphereDataNodePath() + "/([\\w\\-]+)/schemas/([\\w\\-]+)?", Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(schemaPath); return matcher.find() ? Optional.of(matcher.group(2)) : Optional.empty(); }
@Test void assertGetSchemaNameBySchemaPathHappyPath() { assertThat(ShardingSphereDataNode.getSchemaNameBySchemaPath("/statistics/databases/db_name/schemas/db_schema"), is(Optional.of("db_schema"))); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testMultipleStartBundleElement() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Found multiple methods annotated with @StartBundle"); thrown.expectMessage("bar()"); thrown.expectMessage("baz()"); thrown.expectMessage(getClass().getName() + "$"); DoFnSignatures.getSignature( new DoFn<String, String>() { @ProcessElement public void foo() {} @StartBundle public void bar() {} @StartBundle public void baz() {} }.getClass()); }
public static Parse parse() { return new AutoValue_TikaIO_Parse.Builder().build(); }
@Test public void testParseDisplayData() { TikaIO.Parse parse = TikaIO.parse().filepattern("file.pdf"); DisplayData displayData = DisplayData.from(parse); assertThat(displayData, hasDisplayItem("filePattern", "file.pdf")); assertEquals(1, displayData.items().size()); }
public void updateServiceRule(String serviceName, List<EntireRule> entireRules) { Map<String, List<Rule>> flowRules = rules.computeIfAbsent(RouterConstant.FLOW_MATCH_KIND, key -> new ConcurrentHashMap<>()); flowRules.remove(serviceName); Map<String, List<Rule>> tagRules = rules.computeIfAbsent(RouterConstant.TAG_MATCH_KIND, key -> new ConcurrentHashMap<>()); tagRules.remove(serviceName); Map<String, List<Rule>> laneRules = rules.computeIfAbsent(RouterConstant.LANE_MATCH_KIND, key -> new ConcurrentHashMap<>()); laneRules.remove(serviceName); for (EntireRule entireRule : entireRules) { if (RouterConstant.FLOW_MATCH_KIND.equals(entireRule.getKind())) { flowRules.putIfAbsent(serviceName, entireRule.getRules()); LOGGER.info(String.format(Locale.ROOT, "Flow match rule for %s has been updated: %s ", serviceName, JSONObject.toJSONString(entireRule.getRules()))); continue; } if (RouterConstant.TAG_MATCH_KIND.equals(entireRule.getKind())) { tagRules.putIfAbsent(serviceName, entireRule.getRules()); LOGGER.info(String.format(Locale.ROOT, "Tag match rule for %s has been updated: %s ", serviceName, JSONObject.toJSONString(entireRule.getRules()))); continue; } if (RouterConstant.LANE_MATCH_KIND.equals(entireRule.getKind())) { laneRules.putIfAbsent(serviceName, entireRule.getRules()); LOGGER.info(String.format(Locale.ROOT, "Lane match rule for %s has been updated: %s ", serviceName, JSONObject.toJSONString(entireRule.getRules()))); } } }
@Test public void testUpdateServiceRule() { List<EntireRule> entireRules = new ArrayList<>(); EntireRule flowMatchRule = new EntireRule(); flowMatchRule.setKind(RouterConstant.FLOW_MATCH_KIND); flowMatchRule.setDescription("flow match rule"); flowMatchRule.setRules(new ArrayList<>()); EntireRule tagMatchRule = new EntireRule(); tagMatchRule.setKind(RouterConstant.TAG_MATCH_KIND); tagMatchRule.setDescription("tag match rule"); tagMatchRule.setRules(new ArrayList<>()); EntireRule laneMatchRule = new EntireRule(); laneMatchRule.setKind(RouterConstant.LANE_MATCH_KIND); laneMatchRule.setDescription("lane match rule"); laneMatchRule.setRules(new ArrayList<>()); entireRules.add(flowMatchRule); entireRules.add(tagMatchRule); entireRules.add(laneMatchRule); routerConfiguration.updateServiceRule("testService", entireRules); Assert.assertEquals(3, routerConfiguration.getRouteRule().size()); }
public void start() { Preconditions.checkState(state.compareAndSet(State.LATENT, State.STARTED), "Cannot be started more than once"); service.submit(new Callable<Object>() { @Override public Object call() throws Exception { processEvents(); return null; } }); }
@Test @Tag(CuratorTestBase.zk36Group) public void testConnectionStateRecoversFromUnexpectedExpiredConnection() throws Exception { Timing2 timing = new Timing2(); CuratorFramework client = CuratorFrameworkFactory.builder() .connectString(server.getConnectString()) .connectionTimeoutMs(1_000) .sessionTimeoutMs(250) // try to aggressively expire the connection .retryPolicy(new RetryOneTime(1)) .connectionStateErrorPolicy(new SessionConnectionStateErrorPolicy()) .build(); final BlockingQueue<ConnectionState> queue = Queues.newLinkedBlockingQueue(); ConnectionStateListener listener = (client1, state) -> queue.add(state); client.getConnectionStateListenable().addListener(listener); client.start(); try { ConnectionState polled = queue.poll(timing.forWaiting().seconds(), TimeUnit.SECONDS); assertEquals(polled, ConnectionState.CONNECTED); client.getZookeeperClient() .getZooKeeper() .getTestable() .queueEvent(new WatchedEvent( Watcher.Event.EventType.None, Watcher.Event.KeeperState.Disconnected, null)); polled = queue.poll(timing.forWaiting().seconds(), TimeUnit.SECONDS); assertEquals(polled, ConnectionState.SUSPENDED); assertThrows(RuntimeException.class, () -> client.getZookeeperClient() .getZooKeeper() .getTestable() .queueEvent( new WatchedEvent(Watcher.Event.EventType.None, Watcher.Event.KeeperState.Expired, null) { @Override public String getPath() { // exception will cause ZooKeeper to update current state but fail to notify // watchers throw new RuntimeException("Path doesn't exist!"); } })); polled = queue.poll(timing.forWaiting().seconds(), TimeUnit.SECONDS); assertEquals(polled, ConnectionState.LOST); polled = queue.poll(timing.forWaiting().seconds(), TimeUnit.SECONDS); assertEquals(polled, ConnectionState.RECONNECTED); } finally { CloseableUtils.closeQuietly(client); } }
@Override public synchronized Response handle(Request req) { // note the [synchronized] if (corsEnabled && "OPTIONS".equals(req.getMethod())) { Response response = new Response(200); response.setHeader("Allow", ALLOWED_METHODS); response.setHeader("Access-Control-Allow-Origin", "*"); response.setHeader("Access-Control-Allow-Methods", ALLOWED_METHODS); List<String> requestHeaders = req.getHeaderValues("Access-Control-Request-Headers"); if (requestHeaders != null) { response.setHeader("Access-Control-Allow-Headers", requestHeaders); } return response; } if (prefix != null && req.getPath().startsWith(prefix)) { req.setPath(req.getPath().substring(prefix.length())); } // rare case when http-client is active within same jvm // snapshot existing thread-local to restore ScenarioEngine prevEngine = ScenarioEngine.get(); for (Map.Entry<Feature, ScenarioRuntime> entry : scenarioRuntimes.entrySet()) { Feature feature = entry.getKey(); ScenarioRuntime runtime = entry.getValue(); // important for graal to work properly Thread.currentThread().setContextClassLoader(runtime.featureRuntime.suite.classLoader); LOCAL_REQUEST.set(req); req.processBody(); ScenarioEngine engine = initEngine(runtime, globals, req); for (FeatureSection fs : feature.getSections()) { if (fs.isOutline()) { runtime.logger.warn("skipping scenario outline - {}:{}", feature, fs.getScenarioOutline().getLine()); break; } Scenario scenario = fs.getScenario(); if (isMatchingScenario(scenario, engine)) { Map<String, Object> configureHeaders; Variable response, responseStatus, responseHeaders, responseDelay; ScenarioActions actions = new ScenarioActions(engine); Result result = executeScenarioSteps(feature, runtime, scenario, actions); engine.mockAfterScenario(); configureHeaders = engine.mockConfigureHeaders(); response = engine.vars.remove(ScenarioEngine.RESPONSE); responseStatus = engine.vars.remove(ScenarioEngine.RESPONSE_STATUS); responseHeaders = engine.vars.remove(ScenarioEngine.RESPONSE_HEADERS); responseDelay = engine.vars.remove(RESPONSE_DELAY); globals.putAll(engine.shallowCloneVariables()); Response res = new Response(200); if (result.isFailed()) { response = new Variable(result.getError().getMessage()); responseStatus = new Variable(500); } else { if (corsEnabled) { res.setHeader("Access-Control-Allow-Origin", "*"); } res.setHeaders(configureHeaders); if (responseHeaders != null && responseHeaders.isMap()) { res.setHeaders(responseHeaders.getValue()); } if (responseDelay != null) { res.setDelay(responseDelay.getAsInt()); } } if (response != null && !response.isNull()) { res.setBody(response.getAsByteArray()); if (res.getContentType() == null) { ResourceType rt = ResourceType.fromObject(response.getValue()); if (rt != null) { res.setContentType(rt.contentType); } } } if (responseStatus != null) { res.setStatus(responseStatus.getAsInt()); } if (prevEngine != null) { ScenarioEngine.set(prevEngine); } if (mockInterceptor != null) { mockInterceptor.intercept(req, res, scenario); } return res; } } } logger.warn("no scenarios matched, returning 404: {}", req); // NOTE: not logging with engine.logger if (prevEngine != null) { ScenarioEngine.set(prevEngine); } return new Response(404); }
@Test void testMultiPart() { background().scenario( "pathMatches('/hello')", "def foo = requestParams.foo[0]", "string bar = requestParts.bar[0].value", "def response = { foo: '#(foo)', bar: '#(bar)' }" ); request.path("/hello") .multiPartJson("{ name: 'foo', value: 'hello world' }") .multiPartJson("{ name: 'bar', value: 'some bytes', filename: 'bar.txt' }") .method("POST"); handle(); match(response.getBodyConverted(), "{ foo: 'hello world', bar: 'some bytes' }"); }
@VisibleForTesting Set<String> extractFields(List<ResultMessage> hits) { Set<String> filteredFields = Sets.newHashSet(); hits.forEach(hit -> { final Message message = hit.getMessage(); for (String field : message.getFieldNames()) { if (!Message.FILTERED_FIELDS.contains(field)) { filteredFields.add(field); } } }); return filteredFields; }
@Test public void extractFieldsForEmptyResult() throws Exception { final Set<String> result = searchResult.extractFields(Collections.emptyList()); assertThat(result) .isNotNull() .isEmpty(); }
static String getUnresolvedSchemaName(final Schema schema) { if (!isUnresolvedSchema(schema)) { throw new IllegalArgumentException("Not a unresolved schema: " + schema); } return schema.getProp(UR_SCHEMA_ATTR); }
@Test void isUnresolvedSchemaError3() { assertThrows(IllegalArgumentException.class, () -> { // Namespace not "org.apache.avro.compiler". Schema s = SchemaBuilder.record("UnresolvedSchema").prop("org.apache.avro.compiler.idl.unresolved.name", "x") .fields().endRecord(); SchemaResolver.getUnresolvedSchemaName(s); }); }
public T removeLast() { if ( this.lastNode == null ) { return null; } final T node = this.lastNode; this.lastNode = node.getPrevious(); node.setPrevious( null ); if ( this.lastNode != null ) { this.lastNode.setNext( null ); } else { this.firstNode = null; } this.size--; return node; }
@Test public void testRemoveLast() { this.list.add( this.node1 ); this.list.add( this.node2 ); this.list.add( this.node3 ); assertThat(this.node3).as("Last node should be node3").isSameAs(this.list.getLast()); this.list.removeLast(); assertThat(this.node2).as("Last node should be node2").isSameAs(this.list.getLast()); this.list.removeLast(); assertThat(this.node1).as("Last node should be node1").isSameAs(this.list.getLast()); this.list.removeLast(); assertThat(this.list.getLast()).as("Empty list should return null on getLast()").isNull(); }
public static <InputT, OutputT> FlatMapElements<InputT, OutputT> via( InferableFunction<? super InputT, ? extends Iterable<OutputT>> fn) { TypeDescriptor<OutputT> outputType = TypeDescriptors.extractFromTypeParameters( (TypeDescriptor<Iterable<OutputT>>) fn.getOutputTypeDescriptor(), Iterable.class, new TypeDescriptors.TypeVariableExtractor<Iterable<OutputT>, OutputT>() {}); TypeDescriptor<InputT> inputType = (TypeDescriptor<InputT>) fn.getInputTypeDescriptor(); return new FlatMapElements<>(fn, inputType, outputType); }
@Test public void testPolymorphicSimpleFunction() throws Exception { pipeline.enableAbandonedNodeEnforcement(false); pipeline .apply(Create.of(1, 2, 3)) // This is the function that needs to propagate the input T to output T .apply("Polymorphic Identity", MapElements.via(new PolymorphicSimpleFunction<>())) // This is a consumer to ensure that all coder inference logic is executed. .apply( "Test Consumer", MapElements.via( new SimpleFunction<Iterable<Integer>, Integer>() { @Override public Integer apply(Iterable<Integer> input) { return 42; } })); }
ConvertedType convertToConvertedType(LogicalTypeAnnotation logicalTypeAnnotation) { return logicalTypeAnnotation.accept(CONVERTED_TYPE_CONVERTER_VISITOR).orElse(null); }
@Test public void testLogicalToConvertedTypeConversion() { ParquetMetadataConverter parquetMetadataConverter = new ParquetMetadataConverter(); assertEquals(ConvertedType.UTF8, parquetMetadataConverter.convertToConvertedType(stringType())); assertEquals(ConvertedType.ENUM, parquetMetadataConverter.convertToConvertedType(enumType())); assertEquals(ConvertedType.INT_8, parquetMetadataConverter.convertToConvertedType(intType(8, true))); assertEquals(ConvertedType.INT_16, parquetMetadataConverter.convertToConvertedType(intType(16, true))); assertEquals(ConvertedType.INT_32, parquetMetadataConverter.convertToConvertedType(intType(32, true))); assertEquals(ConvertedType.INT_64, parquetMetadataConverter.convertToConvertedType(intType(64, true))); assertEquals(ConvertedType.UINT_8, parquetMetadataConverter.convertToConvertedType(intType(8, false))); assertEquals(ConvertedType.UINT_16, parquetMetadataConverter.convertToConvertedType(intType(16, false))); assertEquals(ConvertedType.UINT_32, parquetMetadataConverter.convertToConvertedType(intType(32, false))); assertEquals(ConvertedType.UINT_64, parquetMetadataConverter.convertToConvertedType(intType(64, false))); assertEquals(ConvertedType.DECIMAL, parquetMetadataConverter.convertToConvertedType(decimalType(8, 16))); assertEquals( ConvertedType.TIMESTAMP_MILLIS, parquetMetadataConverter.convertToConvertedType(timestampType(true, MILLIS))); assertEquals( ConvertedType.TIMESTAMP_MICROS, parquetMetadataConverter.convertToConvertedType(timestampType(true, MICROS))); assertNull(parquetMetadataConverter.convertToConvertedType(timestampType(true, NANOS))); assertEquals( ConvertedType.TIMESTAMP_MILLIS, parquetMetadataConverter.convertToConvertedType(timestampType(false, MILLIS))); assertEquals( ConvertedType.TIMESTAMP_MICROS, parquetMetadataConverter.convertToConvertedType(timestampType(false, MICROS))); assertNull(parquetMetadataConverter.convertToConvertedType(timestampType(false, NANOS))); assertEquals( ConvertedType.TIME_MILLIS, parquetMetadataConverter.convertToConvertedType(timeType(true, MILLIS))); assertEquals( ConvertedType.TIME_MICROS, parquetMetadataConverter.convertToConvertedType(timeType(true, MICROS))); assertNull(parquetMetadataConverter.convertToConvertedType(timeType(true, NANOS))); assertEquals( ConvertedType.TIME_MILLIS, parquetMetadataConverter.convertToConvertedType(timeType(false, MILLIS))); assertEquals( ConvertedType.TIME_MICROS, parquetMetadataConverter.convertToConvertedType(timeType(false, MICROS))); assertNull(parquetMetadataConverter.convertToConvertedType(timeType(false, NANOS))); assertEquals(ConvertedType.DATE, parquetMetadataConverter.convertToConvertedType(dateType())); assertEquals( ConvertedType.INTERVAL, parquetMetadataConverter.convertToConvertedType( LogicalTypeAnnotation.IntervalLogicalTypeAnnotation.getInstance())); assertEquals(ConvertedType.JSON, parquetMetadataConverter.convertToConvertedType(jsonType())); assertEquals(ConvertedType.BSON, parquetMetadataConverter.convertToConvertedType(bsonType())); assertNull(parquetMetadataConverter.convertToConvertedType(uuidType())); assertEquals(ConvertedType.LIST, parquetMetadataConverter.convertToConvertedType(listType())); assertEquals(ConvertedType.MAP, parquetMetadataConverter.convertToConvertedType(mapType())); assertEquals( ConvertedType.MAP_KEY_VALUE, parquetMetadataConverter.convertToConvertedType( LogicalTypeAnnotation.MapKeyValueTypeAnnotation.getInstance())); }
@Override public Destination createDestination(final Session session, String name, final boolean topic) throws JMSException { Destination destination; if (topic) { name = URISupport.stripPrefix(name, "topic://"); name = URISupport.stripPrefix(name, "topic:"); destination = session.createTopic(name); } else { name = URISupport.stripPrefix(name, "queue://"); name = URISupport.stripPrefix(name, "queue:"); destination = session.createQueue(name); } return destination; }
@Test public void testQueueCreation() throws Exception { Queue destination = (Queue) strategy.createDestination(getSession(), "queue://test", false); assertNotNull(destination); assertEquals("test", destination.getQueueName()); destination = (Queue) strategy.createDestination(getSession(), "queue:test", false); assertNotNull(destination); assertEquals("test", destination.getQueueName()); destination = (Queue) strategy.createDestination(getSession(), "test", false); assertNotNull(destination); assertEquals("test", destination.getQueueName()); }
@Override public Map<String, Object> create(final String idKey, final String id) { return Map.of("path_params", Map.of(idKey, List.of(id))); }
@Test void createsProperContext() { final Map<String, Object> expected = Map.of("path_params", Map.of("streamId", List.of("00000000000000042"))); DefaultFailureContextCreator toTest = new DefaultFailureContextCreator(); assertThat(toTest.create("streamId", "00000000000000042")).isEqualTo(expected); }
public static <T> Flattened<T> flattenedSchema() { return new AutoValue_Select_Flattened.Builder<T>() .setNameFn(CONCAT_FIELD_NAMES) .setNameOverrides(Collections.emptyMap()) .build(); }
@Test @Category(NeedsRunner.class) public void testClashingNamePolicyFlatten() { List<Row> bottomRow = IntStream.rangeClosed(0, 2) .mapToObj(i -> Row.withSchema(SIMPLE_SCHEMA).addValues(i, Integer.toString(i)).build()) .collect(Collectors.toList()); thrown.expect(IllegalArgumentException.class); List<Row> rows = bottomRow.stream() .map(r -> Row.withSchema(NESTED_SCHEMA).addValues(r, r).build()) .collect(Collectors.toList()); pipeline .apply(Create.of(rows).withRowSchema(NESTED_SCHEMA)) .apply(Select.<Row>flattenedSchema().keepMostNestedFieldName()); pipeline.run(); }
@Bean public ShenyuPlugin apacheDubboPlugin(final ObjectProvider<DubboParamResolveService> dubboParamResolveServices) { return new ApacheDubboPlugin(new ApacheDubboProxyService(dubboParamResolveServices.getIfAvailable())); }
@Test public void testApacheDubboPlugin() { applicationContextRunner.run(context -> { ShenyuPlugin plugin = context.getBean("apacheDubboPlugin", ShenyuPlugin.class); assertNotNull(plugin); assertThat(plugin.named(), is(PluginEnum.DUBBO.getName())); assertThat(plugin.getOrder(), is(PluginEnum.DUBBO.getCode())); } ); }
public List<JobVertex> getVerticesSortedTopologicallyFromSources() throws InvalidProgramException { // early out on empty lists if (this.taskVertices.isEmpty()) { return Collections.emptyList(); } List<JobVertex> sorted = new ArrayList<JobVertex>(this.taskVertices.size()); Set<JobVertex> remaining = new LinkedHashSet<JobVertex>(this.taskVertices.values()); // start by finding the vertices with no input edges // and the ones with disconnected inputs (that refer to some standalone data set) { Iterator<JobVertex> iter = remaining.iterator(); while (iter.hasNext()) { JobVertex vertex = iter.next(); if (vertex.hasNoConnectedInputs()) { sorted.add(vertex); iter.remove(); } } } int startNodePos = 0; // traverse from the nodes that were added until we found all elements while (!remaining.isEmpty()) { // first check if we have more candidates to start traversing from. if not, then the // graph is cyclic, which is not permitted if (startNodePos >= sorted.size()) { throw new InvalidProgramException("The job graph is cyclic."); } JobVertex current = sorted.get(startNodePos++); addNodesThatHaveNoNewPredecessors(current, sorted, remaining); } return sorted; }
@Test public void testTopoSortCyclicGraphNoSources() { try { JobVertex v1 = new JobVertex("1"); JobVertex v2 = new JobVertex("2"); JobVertex v3 = new JobVertex("3"); JobVertex v4 = new JobVertex("4"); v1.connectNewDataSetAsInput( v4, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); v2.connectNewDataSetAsInput( v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); v3.connectNewDataSetAsInput( v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); v4.connectNewDataSetAsInput( v3, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); JobGraph jg = JobGraphTestUtils.streamingJobGraph(v1, v2, v3, v4); try { jg.getVerticesSortedTopologicallyFromSources(); fail("Failed to raise error on topologically sorting cyclic graph."); } catch (InvalidProgramException e) { // that what we wanted } } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
@Override public synchronized DefaultConnectClient get( final Optional<String> ksqlAuthHeader, final List<Entry<String, String>> incomingRequestHeaders, final Optional<KsqlPrincipal> userPrincipal ) { if (defaultConnectAuthHeader == null) { defaultConnectAuthHeader = buildDefaultAuthHeader(); } final Map<String, Object> configWithPrefixOverrides = ksqlConfig.valuesWithPrefixOverride(KsqlConfig.KSQL_CONNECT_PREFIX); return new DefaultConnectClient( ksqlConfig.getString(KsqlConfig.CONNECT_URL_PROPERTY), buildAuthHeader(ksqlAuthHeader, incomingRequestHeaders), requestHeadersExtension .map(extension -> extension.getHeaders(userPrincipal)) .orElse(Collections.emptyMap()), Optional.ofNullable(newSslContext(configWithPrefixOverrides)), shouldVerifySslHostname(configWithPrefixOverrides), ksqlConfig.getLong(KsqlConfig.CONNECT_REQUEST_TIMEOUT_MS) ); }
@Test public void shouldBuildAuthHeaderOnlyOnce() throws Exception { // Given: givenCustomBasicAuthHeader(); givenValidCredentialsFile(); // When: get() is called twice connectClientFactory.get(Optional.empty(), Collections.emptyList(), Optional.empty()); connectClientFactory.get(Optional.empty(), Collections.emptyList(), Optional.empty()); // Then: only loaded the credentials once -- ideally we'd check the number of times the file // was read but this is an acceptable proxy for this unit test verify(config, times(1)).getString(KsqlConfig.CONNECT_BASIC_AUTH_CREDENTIALS_FILE_PROPERTY); }
public static String getJwt(final String authorizationHeader) { return authorizationHeader.replace(TOKEN_PREFIX, ""); }
@Test void testGetJwt_WithInvalidTokenFormat() { // Given String authorizationHeader = "sampleAccessToken"; // When String jwt = Token.getJwt(authorizationHeader); // Then assertEquals("sampleAccessToken", jwt); }
public static String toLowerHex(long v) { char[] data = RecyclableBuffers.parseBuffer(); writeHexLong(data, 0, v); return new String(data, 0, 16); }
@Test void toLowerHex_minValue() { assertThat(toLowerHex(Long.MAX_VALUE)).isEqualTo("7fffffffffffffff"); }
@Nonnull public <K, V> Consumer<K, V> newConsumer() { return newConsumer(EMPTY_PROPERTIES); }
@Test public void newConsumer_should_fail_with_shared_data_connection() { kafkaDataConnection = createKafkaDataConnection(kafkaTestSupport); assertThatThrownBy(() -> kafkaDataConnection.newConsumer()) .isInstanceOf(IllegalArgumentException.class) .hasMessage("KafkaConsumer is not thread-safe and can't be used" + " with shared DataConnection 'kafka-data-connection'"); assertThatThrownBy(() -> kafkaDataConnection.newConsumer(new Properties())) .isInstanceOf(IllegalArgumentException.class) .hasMessage("KafkaConsumer is not thread-safe and can't be used" + " with shared DataConnection 'kafka-data-connection'"); }
public <T> HttpRestResult<T> exchangeForm(String url, Header header, Query query, Map<String, String> bodyValues, String httpMethod, Type responseType) throws Exception { RequestHttpEntity requestHttpEntity = new RequestHttpEntity( header.setContentType(MediaType.APPLICATION_FORM_URLENCODED), query, bodyValues); return execute(url, httpMethod, requestHttpEntity, responseType); }
@Test void testExchangeForm() throws Exception { when(requestClient.execute(any(), eq("PUT"), any())).thenReturn(mockResponse); when(mockResponse.getStatusCode()).thenReturn(200); when(mockResponse.getBody()).thenReturn(new ByteArrayInputStream("test".getBytes())); Header header = Header.newInstance().setContentType(MediaType.APPLICATION_XML); HttpRestResult<String> result = restTemplate.exchangeForm("http://127.0.0.1:8848/nacos/test", header, Query.EMPTY, new HashMap<>(), "PUT", String.class); assertTrue(result.ok()); assertEquals(Header.EMPTY, result.getHeader()); assertEquals("test", result.getData()); assertEquals(MediaType.APPLICATION_FORM_URLENCODED, header.getValue(HttpHeaderConsts.CONTENT_TYPE)); }
@Override public String toString(final RouteUnit routeUnit) { if (null != ownerName && !Strings.isNullOrEmpty(ownerName.getValue()) && tableName.getValue().equals(ownerName.getValue())) { Set<String> actualTableNames = routeUnit.getActualTableNames(tableName.getValue()); String actualTableName = actualTableNames.isEmpty() ? tableName.getValue().toLowerCase() : actualTableNames.iterator().next(); return tableName.getQuoteCharacter().wrap(actualTableName) + "."; } return toString(); }
@Test void assertOwnerTokenWithOwnerNameEqualsTableName() { OwnerToken ownerToken = new OwnerToken(0, 1, new IdentifierValue("t_user"), new IdentifierValue("t_user")); assertThat(ownerToken.toString(buildRouteUnit()), is("t_user_0.")); assertTokenGrid(ownerToken); }
@Override public void readOne(TProtocol in, TProtocol out) throws TException { readOneStruct(in, out); }
@Test public void testIncompatibleSchemaRecord() throws Exception { // handler will rethrow the exception for verifying purpose CountingErrorHandler countingHandler = new CountingErrorHandler(); BufferedProtocolReadToWrite p = new BufferedProtocolReadToWrite(ThriftSchemaConverter.toStructType(AddressBook.class), countingHandler); final ByteArrayOutputStream in = new ByteArrayOutputStream(); final ByteArrayOutputStream out = new ByteArrayOutputStream(); OneOfEach a = new OneOfEach( true, false, (byte) 8, (short) 16, (int) 32, (long) 64, (double) 1234, "string", "å", false, ByteBuffer.wrap("a".getBytes()), new ArrayList<Byte>(), new ArrayList<Short>(), new ArrayList<Long>()); a.write(protocol(in)); try { p.readOne(protocol(new ByteArrayInputStream(in.toByteArray())), protocol(out)); fail("this should throw"); } catch (SkippableException e) { Throwable cause = e.getCause(); assertTrue(cause instanceof DecodingSchemaMismatchException); assertTrue(cause.getMessage().contains("the data type does not match the expected thrift structure")); assertTrue(cause.getMessage().contains("got BOOL")); } assertEquals(0, countingHandler.recordCountOfMissingFields); assertEquals(0, countingHandler.fieldIgnoredCount); }
@Override public void run() { try { // We kill containers until the kernel reports the OOM situation resolved // Note: If the kernel has a delay this may kill more than necessary while (true) { String status = cgroups.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL); if (!status.contains(CGroupsHandler.UNDER_OOM)) { break; } boolean containerKilled = killContainer(); if (!containerKilled) { // This can happen, if SIGKILL did not clean up // non-PGID or containers or containers launched by other users // or if a process was put to the root YARN cgroup. throw new YarnRuntimeException( "Could not find any containers but CGroups " + "reserved for containers ran out of memory. " + "I am giving up"); } } } catch (ResourceHandlerException ex) { LOG.warn("Could not fetch OOM status. " + "This is expected at shutdown. Exiting.", ex); } }
@Test(expected = YarnRuntimeException.class) public void testExceptionThrownWithNoContainersToKill() throws Exception { Context context = mock(Context.class); when(context.getContainers()).thenReturn(new ConcurrentHashMap<>(0)); CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); when(cGroupsHandler.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL)) .thenReturn("under_oom 1").thenReturn("under_oom 0"); DefaultOOMHandler handler = new DefaultOOMHandler(context, false) { @Override protected CGroupsHandler getCGroupsHandler() { return cGroupsHandler; } }; handler.run(); }
@Override public Object getValue(Object elContext) { return expression.getValue(elContext); }
@Test public void testGetValue() { ExpressionParser parser = new SpelExpressionParser(); Expression defaultExpression = parser.parseExpression("'Hello World'.concat('!')"); String value = (String) new SpringELExpression(defaultExpression).getValue(null); Assertions.assertEquals(value, "Hello World!"); }
public static LogExceptionBehaviourInterface getExceptionStrategy( LogTableCoreInterface table ) { return getExceptionStrategy( table, null ); }
@Test public void testExceptionStrategyWithPacketTooBigException() { DatabaseMeta databaseMeta = mock( DatabaseMeta.class ); DatabaseInterface databaseInterface = new MySQLDatabaseMeta(); PacketTooBigException e = new PacketTooBigException(); when( logTable.getDatabaseMeta() ).thenReturn( databaseMeta ); when( databaseMeta.getDatabaseInterface() ).thenReturn( databaseInterface ); LogExceptionBehaviourInterface exceptionStrategy = DatabaseLogExceptionFactory.getExceptionStrategy( logTable, new KettleDatabaseException( e ) ); String strategyName = exceptionStrategy.getClass().getName(); assertEquals( SUPPRESSABLE_WITH_SHORT_MESSAGE, strategyName ); }
public static File createTempFile(File dir) throws IORuntimeException { return createTempFile("hutool", null, dir, true); }
@Test @Disabled public void createTempFileTest(){ final File nullDirTempFile = FileUtil.createTempFile(); assertTrue(nullDirTempFile.exists()); final File suffixDirTempFile = FileUtil.createTempFile(".xlsx",true); assertEquals("xlsx", FileUtil.getSuffix(suffixDirTempFile)); final File prefixDirTempFile = FileUtil.createTempFile("prefix",".xlsx",true); assertTrue(FileUtil.getPrefix(prefixDirTempFile).startsWith("prefix")); }
@Override public List<PrivilegedOperation> bootstrap(Configuration configuration) throws ResourceHandlerException { conf = configuration; //We'll do this inline for the time being - since this is a one time //operation. At some point, LCE code can be refactored to batch mount //operations across multiple controllers - cpu, net_cls, blkio etc cGroupsHandler .initializeCGroupController(CGroupsHandler.CGroupController.NET_CLS); device = conf.get(YarnConfiguration.NM_NETWORK_RESOURCE_INTERFACE, YarnConfiguration.DEFAULT_NM_NETWORK_RESOURCE_INTERFACE); strictMode = configuration.getBoolean(YarnConfiguration .NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, YarnConfiguration .DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE); rootBandwidthMbit = conf.getInt(YarnConfiguration .NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT, YarnConfiguration .DEFAULT_NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT); yarnBandwidthMbit = conf.getInt(YarnConfiguration .NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT, rootBandwidthMbit); containerBandwidthMbit = (int) Math.ceil((double) yarnBandwidthMbit / MAX_CONTAINER_COUNT); StringBuilder logLine = new StringBuilder("strict mode is set to :") .append(strictMode).append(System.lineSeparator()); if (strictMode) { logLine.append("container bandwidth will be capped to soft limit.") .append(System.lineSeparator()); } else { logLine.append( "containers will be allowed to use spare YARN bandwidth.") .append(System.lineSeparator()); } logLine .append("containerBandwidthMbit soft limit (in mbit/sec) is set to : ") .append(containerBandwidthMbit); LOG.info(logLine.toString()); trafficController.bootstrap(device, rootBandwidthMbit, yarnBandwidthMbit); return null; }
@Test public void testBootstrap() { TrafficControlBandwidthHandlerImpl handlerImpl = new TrafficControlBandwidthHandlerImpl(privilegedOperationExecutorMock, cGroupsHandlerMock, trafficControllerMock); try { handlerImpl.bootstrap(conf); verify(cGroupsHandlerMock).initializeCGroupController( eq(CGroupsHandler.CGroupController.NET_CLS)); verifyNoMoreInteractions(cGroupsHandlerMock); verify(trafficControllerMock).bootstrap(eq(device), eq(ROOT_BANDWIDTH_MBIT), eq(YARN_BANDWIDTH_MBIT)); verifyNoMoreInteractions(trafficControllerMock); } catch (ResourceHandlerException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected ResourceHandlerException!"); } }
protected void logEdit(short op, Writable writable) { JournalTask task = submitLog(op, writable, -1); waitInfinity(task); }
@Test public void testtNormal() throws Exception { BlockingQueue<JournalTask> logQueue = new ArrayBlockingQueue<>(100); short threadNum = 20; List<Thread> allThreads = new ArrayList<>(); for (short i = 0; i != threadNum; i++) { final short n = i; allThreads.add(new Thread(new Runnable() { @Override public void run() { EditLog editLog = new EditLog(logQueue); editLog.logEdit(n, new Text("111")); } })); } Thread consumer = new Thread(new Runnable() { @Override public void run() { for (int i = 0; i != threadNum; i++) { try { JournalTask task = logQueue.take(); task.markSucceed(); } catch (InterruptedException e) { e.printStackTrace(); } } } }); consumer.start(); for (Thread producer : allThreads) { producer.start(); } for (Thread producer : allThreads) { producer.join(); } consumer.join(); Assert.assertEquals(0, logQueue.size()); }
@Override public void serialize(Asn1OutputStream out, Class<? extends Object> type, Object instance, Asn1ObjectMapper mapper) throws IOException { writeFields(mapper, out, type, instance); }
@Test public void shouldSerialize() { assertArrayEquals( new byte[] { (byte) 0x81, 1, 0x01, (byte) 0x82, 1, 0x02 }, serialize(new SetConverter(), Set.class, new Set(1, 2)) ); }
public static void main(String[] args) { // Getting the bar series BarSeries series = CsvTradesLoader.loadBitstampSeries(); // Building the trading strategy Strategy strategy = buildStrategy(series); // Running the strategy BarSeriesManager seriesManager = new BarSeriesManager(series); TradingRecord tradingRecord = seriesManager.run(strategy); System.out.println("Number of positions for the strategy: " + tradingRecord.getPositionCount()); // Analysis System.out.println("Total return for the strategy: " + new ReturnCriterion().calculate(series, tradingRecord)); }
@Test public void test() { RSI2Strategy.main(null); }
@Override public DiscreteResources add(DiscreteResources other) { return other; }
@Test public void testAdd() { DiscreteResource res1 = Resources.discrete(DeviceId.deviceId("a")).resource(); DiscreteResource res2 = Resources.discrete(DeviceId.deviceId("b")).resource(); assertThat(sut.add(DiscreteResources.of(ImmutableSet.of(res1))), is(DiscreteResources.of(ImmutableSet.of(res1)))); assertThat(sut.add(DiscreteResources.of(ImmutableSet.of(res2))), is(DiscreteResources.of(ImmutableSet.of(res2)))); }
@Override public void handle(final RoutingContext routingContext) { // We must set it to allow chunked encoding if we're using http1.1 if (routingContext.request().version() == HttpVersion.HTTP_1_1) { routingContext.response().putHeader(TRANSFER_ENCODING, CHUNKED_ENCODING); } else if (routingContext.request().version() == HttpVersion.HTTP_2) { // Nothing required } else { routingContext.fail(BAD_REQUEST.code(), new KsqlApiException("This endpoint is only available when using HTTP1.1 or HTTP2", ERROR_CODE_BAD_REQUEST)); } final CommonRequest request = getRequest(routingContext); if (request == null) { return; } final Optional<Boolean> internalRequest = ServerVerticle.isInternalRequest(routingContext); final MetricsCallbackHolder metricsCallbackHolder = new MetricsCallbackHolder(); final long startTimeNanos = Time.SYSTEM.nanoseconds(); endpoints.createQueryPublisher( request.sql, request.configOverrides, request.sessionProperties, request.requestProperties, context, server.getWorkerExecutor(), DefaultApiSecurityContext.create(routingContext, server), metricsCallbackHolder, internalRequest) .thenAccept(publisher -> { if (publisher instanceof BlockingPrintPublisher) { handlePrintPublisher( routingContext, (BlockingPrintPublisher) publisher); } else { handleQueryPublisher( routingContext, (QueryPublisher) publisher, metricsCallbackHolder, startTimeNanos); } }) .exceptionally(t -> ServerUtils.handleEndpointException(t, routingContext, "Failed to execute query")); }
@Test public void shouldSucceed_scalablePushQuery() { // Given: when(publisher.isPullQuery()).thenReturn(false); when(publisher.isScalablePushQuery()).thenReturn(true); final QueryStreamArgs req = new QueryStreamArgs("select * from foo emit changes;", Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); givenRequest(req); // When: handler.handle(routingContext); endHandler.getValue().handle(null); // Then: assertThat(subscriber.getValue(), notNullValue()); verify(publisher).close(); }
@Override public List<String> getSecondaryBrokers() { return this.secondary; }
@Test public void testGetSecondaryBrokers() throws Exception { List<String> secondaryBrokers = this.getDefaultPolicy().getSecondaryBrokers(); assertEquals(secondaryBrokers.size(), 1); assertEquals(secondaryBrokers.get(0), "prod1-broker.*.use.example.com"); }
public void setSendFullErrorException(boolean sendFullErrorException) { this.sendFullErrorException = sendFullErrorException; }
@Test void handleFlowableForbiddenExceptionWithoutSendFullErrorException() throws Exception { testController.exceptionSupplier = () -> new FlowableForbiddenException("no access to task"); handlerAdvice.setSendFullErrorException(false); String body = mockMvc.perform(get("/")) .andExpect(status().isForbidden()) .andReturn() .getResponse() .getContentAsString(); assertThatJson(body) .isEqualTo("{" + " message: 'Forbidden'," + " exception: 'no access to task'" + "}"); }