focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Udf(description = "Splits a string into an array of substrings based on a regexp.") public List<String> regexpSplit( @UdfParameter( description = "The string to be split. If NULL, then function returns NULL.") final String string, @UdfParameter( description = "The regular expression to split the string by. " + "If NULL, then function returns NULL.") final String regexp) { if (string == null || regexp == null) { return null; } // Use Guava version to be compatible with other splitting functions. final Pattern p = getPattern(regexp); if (regexp.isEmpty() || p.matcher("").matches()) { return Arrays.asList(p.split(string)); } else { return Splitter.on(p).splitToList(string); } }
@Test public void shouldSplitAllCharactersByGivenAnEmptyRegexp() { assertThat(udf.regexpSplit("", ""), contains("")); assertThat(udf.regexpSplit("x-y", ""), contains("x", "-", "y")); assertThat(udf.regexpSplit("x", ""), contains("x")); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { for(Path file : files.keySet()) { if(file.isFile() || file.isSymbolicLink()) { callback.delete(file); try { Files.delete(session.toPath(file)); } catch(IOException e) { throw new LocalExceptionMappingService().map("Cannot delete {0}", e, file); } } } for(Path file : files.keySet()) { if(file.isDirectory() && !file.isSymbolicLink()) { callback.delete(file); try { Files.delete(session.toPath(file)); } catch(IOException e) { throw new LocalExceptionMappingService().map("Cannot delete {0}", e, file); } } } }
@Test public void testDelete() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path file = new Path(new LocalHomeFinderFeature().find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new LocalTouchFeature(session).touch(file, new TransferStatus()); final Path folder = new Path(new LocalHomeFinderFeature().find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); new LocalDirectoryFeature(session).mkdir(folder, new TransferStatus()); new LocalDeleteFeature(session).delete(new ArrayList<>(Arrays.asList(file, folder)), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(Files.exists(session.toPath(file))); assertFalse(Files.exists(session.toPath(folder))); }
@Override public Object convert(String value) { if (isNullOrEmpty(value)) { return value; } if (value.contains("=")) { final Map<String, String> fields = new HashMap<>(); Matcher m = PATTERN.matcher(value); while (m.find()) { if (m.groupCount() != 2) { continue; } fields.put(removeQuotes(m.group(1)), removeQuotes(m.group(2))); } return fields; } else { return Collections.emptyMap(); } }
@Test public void testFilterWithQuotedValue() { TokenizerConverter f = new TokenizerConverter(new HashMap<String, Object>()); @SuppressWarnings("unchecked") Map<String, String> result = (Map<String, String>) f.convert("otters in k1=\"v1\" more otters"); assertEquals(1, result.size()); assertEquals("v1", result.get("k1")); }
@Override public void doInject(RequestResource resource, RamContext context, LoginIdentityContext result) { if (context.validate()) { try { String accessKey = context.getAccessKey(); String secretKey = context.getSecretKey(); // STS 临时凭证鉴权的优先级高于 AK/SK 鉴权 if (StsConfig.getInstance().isStsOn()) { StsCredential stsCredential = StsCredentialHolder.getInstance().getStsCredential(); accessKey = stsCredential.getAccessKeyId(); secretKey = stsCredential.getAccessKeySecret(); result.setParameter(IdentifyConstants.SECURITY_TOKEN_HEADER, stsCredential.getSecurityToken()); } String signatureKey = secretKey; if (StringUtils.isNotEmpty(context.getRegionId())) { signatureKey = CalculateV4SigningKeyUtil .finalSigningKeyStringWithDefaultInfo(secretKey, context.getRegionId()); result.setParameter(RamConstants.SIGNATURE_VERSION, RamConstants.V4); } String signData = getSignData(getGroupedServiceName(resource)); String signature = SignUtil.sign(signData, signatureKey); result.setParameter(SIGNATURE_FILED, signature); result.setParameter(DATA_FILED, signData); result.setParameter(AK_FILED, accessKey); } catch (Exception e) { NAMING_LOGGER.error("inject ak/sk failed.", e); } } }
@Test void testDoInjectWithoutGroupForSts() throws Exception { prepareForSts(); resource = RequestResource.namingBuilder().setResource("aaa").setGroup("group").build(); LoginIdentityContext actual = new LoginIdentityContext(); namingResourceInjector.doInject(resource, ramContext, actual); assertEquals(4, actual.getAllKey().size()); assertEquals("test-sts-ak", actual.getParameter("ak")); assertTrue(actual.getParameter("data").endsWith("@@group@@aaa")); String expectSign = SignUtil.sign(actual.getParameter("data"), "test-sts-sk"); assertEquals(expectSign, actual.getParameter("signature")); }
@Override public Optional<ShardingConstraintReviser> getConstraintReviser(final ShardingRule rule, final String tableName) { return rule.findShardingTableByActualTable(tableName).map(ShardingConstraintReviser::new); }
@Test void assertGetConstraintReviser() { Optional<ShardingConstraintReviser> constraintReviser = reviseEntry.getConstraintReviser(rule, "t_order1"); assertTrue(constraintReviser.isPresent()); assertThat(constraintReviser.get().getClass(), is(ShardingConstraintReviser.class)); }
public Value like( Value v ) { String cmp = v.getString(); // Is cmp part of look? int idx = getString().indexOf( cmp ); if ( idx < 0 ) { setValue( false ); } else { setValue( true ); } return this; }
@Test public void testLike() { Value vs1 = new Value( "Name1", Value.VALUE_TYPE_STRING ); Value vs2 = new Value( "Name2", Value.VALUE_TYPE_STRING ); Value vs3 = new Value( "Name3", Value.VALUE_TYPE_STRING ); vs1.setValue( "This is a test" ); vs2.setValue( "is a" ); vs3.setValue( "not" ); assertEquals( true, ( vs1.Clone().like( vs2 ) ).getBoolean() ); assertEquals( true, ( vs1.Clone().like( vs1 ) ).getBoolean() ); assertEquals( false, ( vs1.Clone().like( vs3 ) ).getBoolean() ); assertEquals( false, ( vs3.Clone().like( vs1 ) ).getBoolean() ); }
static <T extends CompoundPredicate> T flattenCompound(Predicate predicateLeft, Predicate predicateRight, Class<T> klass) { // The following could have been achieved with {@link com.hazelcast.query.impl.predicates.FlatteningVisitor}, // however since we only care for 2-argument flattening, we can avoid constructing a visitor and its internals // for each token pass at the cost of the following explicit code. Predicate[] predicates; if (klass.isInstance(predicateLeft) || klass.isInstance(predicateRight)) { Predicate[] left = getSubPredicatesIfClass(predicateLeft, klass); Predicate[] right = getSubPredicatesIfClass(predicateRight, klass); predicates = new Predicate[left.length + right.length]; ArrayUtils.concat(left, right, predicates); } else { predicates = new Predicate[]{predicateLeft, predicateRight}; } try { T compoundPredicate = klass.getDeclaredConstructor().newInstance(); compoundPredicate.setPredicates(predicates); return compoundPredicate; } catch (ReflectiveOperationException e) { throw new IllegalArgumentException(String.format("%s must have a public default constructor", klass.getName())); } }
@Test public void testFlattenAnd_withAndORPredicates() { OrPredicate orPredicate = new OrPredicate(leftOfOr, rightOfOr); AndPredicate andPredicate = new AndPredicate(leftOfAnd, rightOfAnd); AndPredicate flattenedCompoundAnd = SqlPredicate.flattenCompound(andPredicate, orPredicate, AndPredicate.class); assertSame(leftOfAnd, flattenedCompoundAnd.getPredicates()[0]); assertSame(rightOfAnd, flattenedCompoundAnd.getPredicates()[1]); assertSame(orPredicate, flattenedCompoundAnd.getPredicates()[2]); }
@Override public Object read(final MySQLPacketPayload payload, final boolean unsigned) { if (unsigned) { return payload.getByteBuf().readUnsignedShortLE(); } return payload.getByteBuf().readShortLE(); }
@Test void assertRead() { when(payload.getByteBuf()).thenReturn(Unpooled.wrappedBuffer(new byte[]{1, 0, 1, 0})); assertThat(new MySQLInt2BinaryProtocolValue().read(payload, false), is((short) 1)); assertThat(new MySQLInt2BinaryProtocolValue().read(payload, true), is(1)); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthGetBlockReceipts() throws Exception { web3j.ethGetBlockReceipts(DefaultBlockParameter.valueOf(BigInteger.valueOf(15455945))) .send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBlockReceipts\",\"params\":[" + "\"0xebd6c9\"]," + "\"id\":1}"); }
public static <T extends Type> Type decodeIndexedValue( String rawInput, TypeReference<T> typeReference) { return decoder.decodeEventParameter(rawInput, typeReference); }
@Test public void testDecodeIndexedBytes32Value() { String rawInput = "0x1234567890123456789012345678901234567890123456789012345678901234"; byte[] rawInputBytes = Numeric.hexStringToByteArray(rawInput); assertEquals( FunctionReturnDecoder.decodeIndexedValue(rawInput, new TypeReference<Bytes32>() {}), (new Bytes32(rawInputBytes))); }
public boolean isNewerThan(JavaSpecVersion otherVersion) { return this.compareTo(otherVersion) > 0; }
@Test public void test11notNewerThan17() throws Exception { // Setup fixture. final JavaSpecVersion eleven = new JavaSpecVersion( "11" ); final JavaSpecVersion seventeen = new JavaSpecVersion( "17" ); // Execute system under test. final boolean result = eleven.isNewerThan( seventeen ); // Verify results. assertFalse( result ); }
@Override public LoggingConfiguration getConfiguration(final Path file) throws BackgroundException { final Path bucket = containerService.getContainer(file); if(file.getType().contains(Path.Type.upload)) { return LoggingConfiguration.empty(); } try { final StorageBucketLoggingStatus status = session.getClient().getBucketLoggingStatusImpl(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName()); if(null == status) { log.warn(String.format("Failure parsing logging status for %s", bucket)); return LoggingConfiguration.empty(); } final LoggingConfiguration configuration = new LoggingConfiguration(status.isLoggingEnabled(), status.getTargetBucketName()); if(bucket.isRoot()) { configuration.setContainers(Collections.singletonList( new Path(RequestEntityRestStorageService.findBucketInHostname(session.getHost()), EnumSet.of(Path.Type.volume, Path.Type.directory))) ); } else { try { configuration.setContainers(new S3BucketListService(session).list(Home.ROOT, new DisabledListProgressListener()).toList()); } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Failure listing buckets. %s", e.getMessage())); configuration.setContainers(Collections.singletonList(bucket)); } } return configuration; } catch(ServiceException e) { try { throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(AccessDeniedException | InteroperabilityException l) { log.warn(String.format("Missing permission to read logging configuration for %s %s", bucket.getName(), e.getMessage())); return LoggingConfiguration.empty(); } } }
@Test(expected = NotfoundException.class) public void testReadNotFound() throws Exception { new S3LoggingFeature(session).getConfiguration( new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)) ); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowComputeNodeModeStatement sqlStatement, final ContextManager contextManager) { PersistRepositoryConfiguration repositoryConfig = contextManager.getComputeNodeInstanceContext().getModeConfiguration().getRepository(); String modeType = contextManager.getComputeNodeInstanceContext().getModeConfiguration().getType(); String repositoryType = null == repositoryConfig ? null : repositoryConfig.getType(); Properties props = null == repositoryConfig ? null : repositoryConfig.getProps(); return Collections.singleton(new LocalDataQueryResultRow(modeType, repositoryType, props)); }
@Test void assertExecute() { ShowComputeNodeModeExecutor executor = new ShowComputeNodeModeExecutor(); ContextManager contextManager = mock(ContextManager.class); ComputeNodeInstanceContext computeNodeInstanceContext = createInstanceContext(); when(contextManager.getComputeNodeInstanceContext()).thenReturn(computeNodeInstanceContext); Collection<LocalDataQueryResultRow> actual = executor.getRows(new ShowComputeNodeModeStatement(), contextManager); assertThat(actual.size(), is(1)); Iterator<LocalDataQueryResultRow> iterator = actual.iterator(); LocalDataQueryResultRow row = iterator.next(); assertThat(row.getCell(1), is("Cluster")); assertThat(row.getCell(2), is("ZooKeeper")); assertThat(row.getCell(3), is("{\"key\":\"value1,value2\"}")); }
@Override public int generate(final Properties props) { if (!props.containsKey(WORKER_ID_KEY)) { return DEFAULT_WORKER_ID; } int result = Integer.parseInt(props.get(WORKER_ID_KEY).toString()); ShardingSpherePreconditions.checkState(result >= 0 && result <= MAX_WORKER_ID, WorkerIdAssignedException::new); return result; }
@Test void assertGenerateWithEmptyProperties() { assertThat(new StandaloneWorkerIdGenerator().generate(new Properties()), is(0)); }
@Override protected void doInit() { IClientProfile profile = DefaultProfile.getProfile(ENDPOINT, properties.getApiKey(), properties.getApiSecret()); client = new DefaultAcsClient(profile); }
@Test public void testDoInit() { // 准备参数 // mock 方法 // 调用 smsClient.doInit(); // 断言 assertNotSame(client, ReflectUtil.getFieldValue(smsClient, "acsClient")); }
public OverrideParameterRequestWrapper(HttpServletRequest request) { super(request); this.params.putAll(request.getParameterMap()); }
@Test void testOverrideParameterRequestWrapper() { MockHttpServletRequest httpServletRequest = new MockHttpServletRequest(); httpServletRequest.addParameter("test1", "value1"); OverrideParameterRequestWrapper wrapper = OverrideParameterRequestWrapper.buildRequest(httpServletRequest); String value1 = wrapper.getParameter("test1"); assertEquals("value1", value1); wrapper.addParameter("test2", "value2"); assertEquals("value2", wrapper.getParameter("test2")); }
@Udf public Long trunc(@UdfParameter final Long val) { return val; }
@Test public void shouldTruncateSimpleDoubleNegative() { assertThat(udf.trunc(-1.23d), is(-1L)); assertThat(udf.trunc(-1.0d), is(-1L)); assertThat(udf.trunc(-1.5d), is(-1L)); assertThat(udf.trunc(-1.75d), is(-1L)); assertThat(udf.trunc(-1.53e6d), is(-1530000L)); assertThat(udf.trunc(-10.01d), is(-10L)); assertThat(udf.trunc(-12345.5d), is(-12345L)); assertThat(udf.trunc(-9.99d), is(-9L)); assertThat(udf.trunc(-110.1), is(-110L)); assertThat(udf.trunc(-1530000.01d), is(-1530000L)); assertThat(udf.trunc(-9999999.99d), is(-9999999L)); }
public String replaceCredentials(String code) { if (code == null) { return null; } String replaced = code; Matcher matcher = userpattern.matcher(replaced); while (matcher.find()) { String key = matcher.group(1); UsernamePassword usernamePassword = creds.getUsernamePassword(key); if (usernamePassword != null) { String value = usernamePassword.getUsername(); String quotedValue = Matcher.quoteReplacement(value); replaced = matcher.replaceFirst(quotedValue); matcher = userpattern.matcher(replaced); } } matcher = passwordpattern.matcher(replaced); while (matcher.find()) { String key = matcher.group(1); UsernamePassword usernamePassword = creds.getUsernamePassword(key); if (usernamePassword != null) { passwords.add(usernamePassword.getPassword()); String value = usernamePassword.getPassword(); String quotedValue = Matcher.quoteReplacement(value); replaced = matcher.replaceFirst(quotedValue); matcher = passwordpattern.matcher(replaced); } } return replaced; }
@Test void replaceCredentialNoTexts() { UserCredentials userCredentials = mock(UserCredentials.class); CredentialInjector testee = new CredentialInjector(userCredentials); String actual = testee.replaceCredentials(null); assertNull(actual); }
public static int[] invert(int[] arr) { int[] result = new int[arr.length]; Arrays.fill(result, -1); for (int i = 0; i < arr.length; i++) result[arr[i]] = i; return result; }
@Test public void testInvert() { assertEquals(from(-1, -1, -1, 3), from(ArrayUtil.invert(new int[]{3, 3, 3, 3}))); assertEquals(from(3, 2, 0, 1), from(ArrayUtil.invert(new int[]{2, 3, 1, 0}))); assertEquals(from(2, 3, 1, 0), from(ArrayUtil.invert(new int[]{3, 2, 0, 1}))); }
@PublicAPI(usage = ACCESS) public JavaClass get(Class<?> reflectedType) { return get(reflectedType.getName()); }
@Test public void get_returns_correct_JavaClass() { assertThat(ALL_CLASSES.get(SomeOtherClass.class)).isEqualTo(SOME_OTHER_CLASS); assertThat(ALL_CLASSES.get(SomeOtherClass.class.getName())).isEqualTo(SOME_OTHER_CLASS); }
@Override public Collection<String> getSystemSchemas() { return SYSTEM_SCHEMAS; }
@Test void assertGetSystemSchemas() { assertThat(systemDatabase.getSystemSchemas(), is(new HashSet<>(Arrays.asList("information_schema", "pg_catalog", "blockchain", "cstore", "db4ai", "dbe_perf", "dbe_pldebugger", "gaussdb", "oracle", "pkg_service", "snapshot", "sqladvisor", "dbe_pldeveloper", "pg_toast", "pkg_util", "shardingsphere")))); }
@Override public ValidationResponse validate(ValidationRequest req) { if (req.isEmptyQuery()) { return ValidationResponse.ok(); } try { final ParsedQuery parsedQuery = luceneQueryParser.parse(req.rawQuery()); final ValidationContext context = ValidationContext.builder() .request(req) .query(parsedQuery) .availableFields(fields.fieldTypesByStreamIds(req.streams(), req.timerange())) .build(); final List<ValidationMessage> explanations = validators.stream() .flatMap(val -> val.validate(context).stream()) .collect(Collectors.toList()); return ValidationResponse.withDetectedStatus(explanations); } catch (Exception e) { return ValidationResponse.error(ValidationErrors.create(e)); } }
@Test void validateNoMessages() { // validator doesn't return any warnings or errors final QueryValidator queryValidator = context -> Collections.emptyList(); final QueryValidationServiceImpl service = new QueryValidationServiceImpl( LUCENE_QUERY_PARSER, FIELD_TYPES_SERVICE, Collections.singleton(queryValidator)); final ValidationResponse validationResponse = service.validate(req()); assertThat(validationResponse.status()).isEqualTo(ValidationStatus.OK); assertThat(validationResponse.explanations()).isEmpty(); }
public static Write write() { return new AutoValue_HCatalogIO_Write.Builder().setBatchSize(BATCH_SIZE).build(); }
@Test public void testWriteFailureTableDoesNotExist() { thrown.expectCause(isA(UserCodeException.class)); thrown.expectMessage(containsString("org.apache.hive.hcatalog.common.HCatException")); thrown.expectMessage(containsString("NoSuchObjectException")); defaultPipeline .apply(Create.of(buildHCatRecords(TEST_RECORDS_COUNT))) .apply( HCatalogIO.write() .withConfigProperties(getConfigPropertiesAsMap(service.getHiveConf())) .withTable("myowntable")); defaultPipeline.run(); }
public static String getHazelcastModuleName() { try { return getName(hazelcastModule()); } catch (Exception e) { LOGGER.finest("Getting Hazelcast module name failed", e); return null; } }
@Test public void testHazelcastModuleName() { assertNull("Hazelcast module name should be null as the testsuite runs hazelcast on the classpath", ModularJavaUtils.getHazelcastModuleName()); }
@Override @Nullable public Object convert(@Nullable String value) { if (isNullOrEmpty(value)) { return null; } LOG.debug("Trying to parse date <{}> with pattern <{}>, locale <{}>, and timezone <{}>.", value, dateFormat, locale, timeZone); final DateTimeFormatter formatter; if (containsTimeZone) { formatter = DateTimeFormat .forPattern(dateFormat) .withDefaultYear(YearMonth.now(timeZone).getYear()) .withLocale(locale); } else { formatter = DateTimeFormat .forPattern(dateFormat) .withDefaultYear(YearMonth.now(timeZone).getYear()) .withLocale(locale) .withZone(timeZone); } return DateTime.parse(value, formatter); }
@Test(expected = ConfigurationException.class) public void testWithNullDateFormat() throws Exception { final DateConverter dateConverter = new DateConverter(config(null, null, null)); assertThat((DateTime) dateConverter.convert("foo")).isNull(); }
public static boolean isValidIp(String ip) { return ip != null && (parseV4(ip) != null || parseV6(ip) != null); }
@Test public void testIsValidIp() { assertTrue(Hosts.isValidIp("127.0.0.1")); assertTrue(Hosts.isValidIp("0.0.0.0")); assertFalse(Hosts.isValidIp("324.0.0.1")); assertFalse(Hosts.isValidIp("127.0.0.")); assertFalse(Hosts.isValidIp("127.0.0")); assertFalse(Hosts.isValidIp("-1.0.0")); assertFalse(Hosts.isValidIp("")); assertFalse(Hosts.isValidIp(null)); }
public RemotingDesc getServiceDesc(Object bean, String beanName) { List<RemotingDesc> ret = new ArrayList<>(); for (RemotingParser remotingParser : allRemotingParsers) { RemotingDesc s = remotingParser.getServiceDesc(bean, beanName); if (s != null) { ret.add(s); } } if (ret.size() == 1) { return ret.get(0); } else if (ret.size() > 1) { throw new FrameworkException(String.format("More than one RemotingParser for bean: %s", beanName)); } else { return null; } }
@Test public void testGetServiceDescFail() { SimpleBean simpleBean = new SimpleBean(); assertNull(remotingParser.getServiceDesc(simpleBean, simpleBean.getClass().getName())); }
@Override public void updateTag(MemberTagUpdateReqVO updateReqVO) { // 校验存在 validateTagExists(updateReqVO.getId()); // 校验名称唯一 validateTagNameUnique(updateReqVO.getId(), updateReqVO.getName()); // 更新 MemberTagDO updateObj = MemberTagConvert.INSTANCE.convert(updateReqVO); memberTagMapper.updateById(updateObj); }
@Test public void testUpdateTag_success() { // mock 数据 MemberTagDO dbTag = randomPojo(MemberTagDO.class); tagMapper.insert(dbTag);// @Sql: 先插入出一条存在的数据 // 准备参数 MemberTagUpdateReqVO reqVO = randomPojo(MemberTagUpdateReqVO.class, o -> { o.setId(dbTag.getId()); // 设置更新的 ID }); // 调用 tagService.updateTag(reqVO); // 校验是否更新正确 MemberTagDO tag = tagMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, tag); }
public static Condition parse(String field, Object expression) { return new Condition(field, expression); }
@Test public void parseTest(){ final Condition age = Condition.parse("age", "< 10"); assertEquals("age < ?", age.toString()); // issue I38LTM assertSame(BigDecimal.class, age.getValue().getClass()); }
@GetInitialRestriction public OffsetRange getInitialRestriction(@Element PulsarSourceDescriptor pulsarSource) { long startTimestamp = 0L; long endTimestamp = Long.MAX_VALUE; if (pulsarSource.getStartOffset() != null) { startTimestamp = pulsarSource.getStartOffset(); } if (pulsarSource.getEndOffset() != null) { endTimestamp = pulsarSource.getEndOffset(); } return new OffsetRange(startTimestamp, endTimestamp); }
@Test public void testInitialRestrictionWithConsumerPosition() throws Exception { long expectedStartOffset = Instant.now().getMillis(); OffsetRange result = dofnInstance.getInitialRestriction( PulsarSourceDescriptor.of( TOPIC, expectedStartOffset, null, null, SERVICE_URL, ADMIN_URL)); assertEquals(new OffsetRange(expectedStartOffset, Long.MAX_VALUE), result); }
public static <T> T retryUntilTimeout(Callable<T> callable, Supplier<String> description, Duration timeoutDuration, long retryBackoffMs) throws Exception { return retryUntilTimeout(callable, description, timeoutDuration, retryBackoffMs, Time.SYSTEM); }
@Test public void testBackoffMoreThanTimeoutWillOnlyExecuteOnce() throws Exception { Mockito.when(mockCallable.call()).thenThrow(new TimeoutException("timeout exception")); assertThrows(TimeoutException.class, () -> RetryUtil.retryUntilTimeout(mockCallable, testMsg, Duration.ofMillis(50), 100, mockTime)); Mockito.verify(mockCallable, Mockito.times(1)).call(); }
public synchronized void addRelationship(final Relationship relationship, final ShallowTraceBuilder from, final ShallowTraceBuilder to) { if (_relationships.size() < _maxTraceBuildersPerTrace) { TraceRelationship rel = new TraceRelationship(from, to, relationship); _relationships.add(rel); } }
@Test public void testAddRelationship() { final ShallowTraceBuilder trace1 = new ShallowTraceBuilder(IdGenerator.getNextId()).setName("task1").setResultType(ResultType.UNFINISHED); final ShallowTraceBuilder trace2 = new ShallowTraceBuilder(IdGenerator.getNextId()).setName("task2").setResultType(ResultType.UNFINISHED); final TraceBuilder builder = new TraceBuilder(1024, "test", 0L); builder.addRelationship(Relationship.SUCCESSOR_OF, trace1, trace2); Trace trace = builder.build(); assertEquals(trace1.build(), trace.getTraceMap().get(trace1.getNativeId())); assertEquals(trace2.build(), trace.getTraceMap().get(trace2.getNativeId())); assertEquals(1, trace.getRelationships().size()); assertTrue(trace.getRelationships() .contains(new TraceRelationship(trace1, trace2, Relationship.SUCCESSOR_OF))); }
public static String getTableVersionNode(final String databaseName, final String schemaName, final String tableName, final String version) { return String.join("/", getTableVersionsNode(databaseName, schemaName, tableName), version); }
@Test void assertGetTableVersionNode() { assertThat(TableMetaDataNode.getTableVersionNode("foo_db", "foo_schema", "foo_table", "0"), is("/metadata/foo_db/schemas/foo_schema/tables/foo_table/versions/0")); }
public static org.locationtech.jts.geom.Envelope getJtsEnvelope(OGCGeometry ogcGeometry, double radius) { Envelope esriEnvelope = getEnvelope(ogcGeometry); if (esriEnvelope.isEmpty()) { return new org.locationtech.jts.geom.Envelope(); } return new org.locationtech.jts.geom.Envelope( esriEnvelope.getXMin() - radius, esriEnvelope.getXMax() + radius, esriEnvelope.getYMin() - radius, esriEnvelope.getYMax() + radius); }
@Test public void testGetJtsEnvelope() { assertJtsEnvelope( "MULTIPOLYGON EMPTY", new Envelope()); assertJtsEnvelope( "POINT (-23.4 12.2)", new Envelope(-23.4, -23.4, 12.2, 12.2)); assertJtsEnvelope( "LINESTRING (-75.9375 23.6359, -75.9375 23.6364)", new Envelope(-75.9375, -75.9375, 23.6359, 23.6364)); assertJtsEnvelope( "GEOMETRYCOLLECTION (" + " LINESTRING (-75.9375 23.6359, -75.9375 23.6364)," + " MULTIPOLYGON (((-75.9375 23.45520, -75.9371 23.4554, -75.9375 23.46023325, -75.9375 23.45520)))" + ")", new Envelope(-75.9375, -75.9371, 23.4552, 23.6364)); }
public String convert(ILoggingEvent le) { long timestamp = le.getTimeStamp(); return cachingDateFormatter.format(timestamp); }
@Test public void convertsDateWithUtcWhenTimeZoneUnknown() { assertEquals(formatDate("UTC"), convert(_timestamp, DATETIME_PATTERN, "FakeTimeZone")); }
@Override public boolean syncVerifyData(DistroData verifyData, String targetServer) { if (isNoExistTarget(targetServer)) { return true; } // replace target server as self server so that can callback. verifyData.getDistroKey().setTargetServer(memberManager.getSelf().getAddress()); DistroDataRequest request = new DistroDataRequest(verifyData, DataOperation.VERIFY); Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { Loggers.DISTRO .warn("[DISTRO] Cancel distro verify caused by target server {} unhealthy, key: {}", targetServer, verifyData.getDistroKey()); return false; } try { Response response = clusterRpcClientProxy.sendRequest(member, request); return checkResponse(response); } catch (NacosException e) { Loggers.DISTRO.error("[DISTRO-FAILED] Verify distro data failed! key: {} ", verifyData.getDistroKey(), e); } return false; }
@Test void testSyncVerifyDataSuccess() throws NacosException { DistroData verifyData = new DistroData(); verifyData.setDistroKey(new DistroKey()); when(memberManager.hasMember(member.getAddress())).thenReturn(true); when(memberManager.find(member.getAddress())).thenReturn(member); member.setState(NodeState.UP); when(clusterRpcClientProxy.isRunning(member)).thenReturn(true); assertTrue(transportAgent.syncVerifyData(verifyData, member.getAddress())); }
public static Path[] stat2Paths(FileStatus[] stats) { if (stats == null) return null; Path[] ret = new Path[stats.length]; for (int i = 0; i < stats.length; ++i) { ret[i] = stats[i].getPath(); } return ret; }
@Test (timeout = 30000) public void testStat2Paths2() { Path defaultPath = new Path("file://default"); Path[] paths = FileUtil.stat2Paths(null, defaultPath); assertEquals(1, paths.length); assertEquals(defaultPath, paths[0]); paths = FileUtil.stat2Paths(null, null); assertTrue(paths != null); assertEquals(1, paths.length); assertEquals(null, paths[0]); Path path1 = new Path("file://foo"); Path path2 = new Path("file://moo"); FileStatus[] fileStatuses = new FileStatus[] { new FileStatus(3, false, 0, 0, 0, path1), new FileStatus(3, false, 0, 0, 0, path2) }; paths = FileUtil.stat2Paths(fileStatuses, defaultPath); assertEquals(2, paths.length); assertEquals(paths[0], path1); assertEquals(paths[1], path2); }
@BuildStep AdditionalBeanBuildItem produce(Capabilities capabilities, JobRunrBuildTimeConfiguration jobRunrBuildTimeConfiguration) { Set<Class<?>> additionalBeans = new HashSet<>(); additionalBeans.add(JobRunrProducer.class); additionalBeans.add(JobRunrStarter.class); additionalBeans.add(jsonMapper(capabilities)); additionalBeans.addAll(storageProvider(capabilities, jobRunrBuildTimeConfiguration)); return AdditionalBeanBuildItem.builder() .setUnremovable() .addBeanClasses(additionalBeans.toArray(new Class[0])) .build(); }
@Test void producesJobRunrProducer() { final AdditionalBeanBuildItem additionalBeanBuildItem = jobRunrExtensionProcessor.produce(capabilities, jobRunrBuildTimeConfiguration); assertThat(additionalBeanBuildItem.getBeanClasses()) .containsOnly( JobRunrProducer.class.getName(), JobRunrStarter.class.getName(), JobRunrInMemoryStorageProviderProducer.class.getName(), JobRunrProducer.JobRunrJsonBJsonMapperProducer.class.getName() ); }
public static boolean isValidRootUrl(String url) { UrlValidator validator = new CustomUrlValidator(); return validator.isValid(url); }
@Test public void ipv6Allowed() { assertTrue(UrlHelper.isValidRootUrl("http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]")); assertTrue(UrlHelper.isValidRootUrl("http://[FEDC:0000:0000:3210:FEDC:BA98:7654:3210]")); // 0000 can be reduced to 0 assertTrue(UrlHelper.isValidRootUrl("http://[FEDC:0:0:3210:FEDC:BA98:7654:3210]")); // an unique sequence of multiple fragments with 0's could be omitted completely assertTrue(UrlHelper.isValidRootUrl("http://[FEDC::3210:FEDC:BA98:7654:3210]")); // but only one sequence assertFalse(UrlHelper.isValidRootUrl("http://[2001::85a3::ac1f]")); // port and path are still allowed assertTrue(UrlHelper.isValidRootUrl("http://[FEDC:0:0:3210:FEDC:BA98:7654:3210]:8001/jenkins")); assertTrue(UrlHelper.isValidRootUrl("http://[FEDC:0:0:3210:FEDC:BA98:7654:3210]:8001")); assertTrue(UrlHelper.isValidRootUrl("http://[FEDC:0:0:3210:FEDC:BA98:7654:3210]/jenkins")); // dashes are not allowed inside ipv6 assertFalse(UrlHelper.isValidRootUrl("http://[FEDC:0:0:32-10:FEDC:BA98:7654:3210]:8001/jenkins")); assertFalse(UrlHelper.isValidRootUrl("http://[FEDC:0:0:3210:-FEDC:BA98:7654:3210]:8001/jenkins")); }
@Override public boolean addIfExists(double score, V object) { return get(addIfExistsAsync(score, object)); }
@Test public void testAddIfExists() { RScoredSortedSet<String> set = redisson.getScoredSortedSet("simple"); assertThat(set.addIfExists(123.81, "1980")).isFalse(); assertThat(set.getScore("1980")).isNull(); set.add(111, "1980"); assertThat(set.addIfExists(32, "1980")).isTrue(); assertThat(set.getScore("1980")).isEqualTo(32); }
public URLNormalizer addWWW() { String host = toURL().getHost(); if (!host.toLowerCase().startsWith("www.")) { url = StringUtils.replaceOnce(url, host, "www." + host); } return this; }
@Test public void testAddWWW() { s = "http://example.com/foo.html"; t = "http://www.example.com/foo.html"; assertEquals(t, n(s).addWWW().toString()); s = "http://wwwexample.com/foo.html"; t = "http://www.wwwexample.com/foo.html"; assertEquals(t, n(s).addWWW().toString()); s = "http://www.example.com/foo.html"; t = "http://www.example.com/foo.html"; assertEquals(t, n(s).addWWW().toString()); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } try { try { final PreferencesReader preferences = new HostPreferences(session.getHost()); if(preferences.getBoolean("ftp.command.mlsd")) { if(session.getClient().hasFeature(FTPCmd.MLST.getCommand())) { if(!FTPReply.isPositiveCompletion(session.getClient().sendCommand(FTPCmd.MLST, file.getAbsolute()))) { throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } final FTPDataResponseReader reader = new FTPMlsdListResponseReader(); final AttributedList<Path> attributes = reader.read(file.getParent(), Arrays.asList(session.getClient().getReplyStrings())); if(attributes.contains(file)) { return attributes.get(attributes.indexOf(file)).attributes(); } } log.warn("No support for MLST in reply to FEAT"); } return new DefaultAttributesFinderFeature(session).find(file, listener); } catch(IOException e) { throw new FTPExceptionMappingService().map("Failure to read attributes of {0}", e, file); } } catch(InteroperabilityException | AccessDeniedException | NotfoundException f) { log.warn(String.format("Failure reading attributes for %s. %s", file, f.getMessage())); return new DefaultAttributesFinderFeature(session).find(file, listener); } }
@Test public void testAttributesWrongFiletype() throws Exception { final FTPAttributesFinderFeature f = new FTPAttributesFinderFeature(session); final Path file = new FTPTouchFeature(session).touch(new Path(new FTPWorkdirService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)), new TransferStatus()); final Attributes attributes = f.find(file); assertEquals(0L, attributes.getSize()); // Test wrong type try { f.find(new Path(new FTPWorkdirService(session).find(), "test", EnumSet.of(Path.Type.directory))); fail(); } catch(NotfoundException | InteroperabilityException e) { // Expected } new FTPDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public int run(InputStream stdin, PrintStream out, PrintStream err, List<String> args) throws Exception { if (args.size() < 2) { printInfo(err); return 1; } int index = 0; String input = args.get(index); String option = "all"; if ("-o".equals(input)) { option = args.get(1); index += 2; } if (!OPTIONS.contains(option) || (args.size() - index < 1)) { printInfo(err); return 1; } input = args.get(index++); if (!REPORT.equals(option)) { if (args.size() - index < 1) { printInfo(err); return 1; } } if (ALL.equals(option)) { return recoverAll(input, args.get(index), out, err); } else if (PRIOR.equals(option)) { return recoverPrior(input, args.get(index), out, err); } else if (AFTER.equals(option)) { return recoverAfter(input, args.get(index), out, err); } else if (REPORT.equals(option)) { return reportOnly(input, out, err); } else { return 1; } }
@Test void repairPriorCorruptBlock() throws Exception { String output = run(new DataFileRepairTool(), "-o", "prior", corruptBlockFile.getPath(), repairedFile.getPath()); assertTrue(output.contains("Number of blocks: 2 Number of corrupt blocks: 1"), output); assertTrue(output.contains("Number of records: 5 Number of corrupt records: 0"), output); checkFileContains(repairedFile, "apple", "banana", "celery"); }
public static ArrowReader createArrowReader(VectorSchemaRoot root, RowType rowType) { List<ColumnVector> columnVectors = new ArrayList<>(); List<FieldVector> fieldVectors = root.getFieldVectors(); for (int i = 0; i < fieldVectors.size(); i++) { columnVectors.add(createColumnVector(fieldVectors.get(i), rowType.getTypeAt(i))); } return new ArrowReader(columnVectors.toArray(new ColumnVector[0])); }
@Test void testCreateArrowReader() { VectorSchemaRoot root = VectorSchemaRoot.create(ArrowUtils.toArrowSchema(rowType), allocator); ArrowReader reader = ArrowUtils.createArrowReader(root, rowType); ColumnVector[] columnVectors = reader.getColumnVectors(); for (int i = 0; i < columnVectors.length; i++) { assertThat(columnVectors[i].getClass()).isEqualTo(testFields.get(i).f4); } }
@Override public void resetConfigStats(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT); syncFuture(f); }
@Test public void testResetConfigStats() { testInCluster(connection -> { RedisClusterNode master = getFirstMaster(connection); connection.resetConfigStats(master); }); }
public static String[] splitWords(String text) { return text.split("[\\W]+"); }
@Test public void testSplitWords() { String[] arr = splitWords("apiName/methodName"); assertEquals(2, arr.length); assertEquals("apiName", arr[0]); assertEquals("methodName", arr[1]); arr = splitWords("hello"); assertEquals(1, arr.length); assertEquals("hello", arr[0]); }
@Override public Integer doCall() throws Exception { List<Row> rows = new ArrayList<>(); JsonObject plugins = loadConfig().getMap("plugins"); plugins.forEach((key, value) -> { JsonObject details = (JsonObject) value; String name = details.getStringOrDefault("name", key); String command = details.getStringOrDefault("command", name); String dependency = details.getStringOrDefault("dependency", "org.apache.camel:camel-jbang-plugin-%s".formatted(command)); String description = details.getStringOrDefault("description", "Plugin %s called with command %s".formatted(name, command)); rows.add(new Row(name, command, dependency, description)); }); printRows(rows); if (all) { rows.clear(); for (PluginType camelPlugin : PluginType.values()) { if (plugins.get(camelPlugin.getName()) == null) { String dependency = "org.apache.camel:camel-jbang-plugin-%s".formatted(camelPlugin.getCommand()); rows.add(new Row( camelPlugin.getName(), camelPlugin.getCommand(), dependency, camelPlugin.getDescription())); } } if (!rows.isEmpty()) { printer().println(); printer().println("Supported plugins:"); printer().println(); printRows(rows); } } return 0; }
@Test public void shouldGetAllPlugins() throws Exception { JsonObject pluginConfig = PluginHelper.getOrCreatePluginConfig(); JsonObject plugins = pluginConfig.getMap("plugins"); JsonObject fooPlugin = new JsonObject(); fooPlugin.put("name", "foo-plugin"); fooPlugin.put("command", "foo"); fooPlugin.put("dependency", "org.apache.camel:foo-plugin:1.0.0"); plugins.put("foo-plugin", fooPlugin); PluginHelper.savePluginConfig(pluginConfig); PluginGet command = new PluginGet(new CamelJBangMain().withPrinter(printer)); command.all = true; command.doCall(); List<String> output = printer.getLines(); Assertions.assertEquals(9, output.size()); Assertions.assertEquals("NAME COMMAND DEPENDENCY DESCRIPTION", output.get(0)); Assertions.assertEquals( "foo-plugin foo org.apache.camel:foo-plugin:1.0.0 Plugin foo-plugin called with command foo", output.get(1)); Assertions.assertEquals("Supported plugins:", output.get(3)); Assertions.assertEquals("NAME COMMAND DEPENDENCY DESCRIPTION", output.get(5)); Assertions.assertEquals( "camel-k k org.apache.camel:camel-jbang-plugin-k %s" .formatted(PluginType.CAMEL_K.getDescription()), output.get(6)); Assertions.assertEquals( "kubernetes kubernetes org.apache.camel:camel-jbang-plugin-kubernetes %s" .formatted(PluginType.KUBERNETES.getDescription()), output.get(7)); Assertions.assertEquals( "generate generate org.apache.camel:camel-jbang-plugin-generate %s" .formatted(PluginType.GENERATE.getDescription()), output.get(8)); }
@Override public void union(PartitionIdSet other) { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void test_union() { set.union(new PartitionIdSet(11, asList(6, 7, 8))); }
@Override public int run(String[] args) throws Exception { YarnConfiguration yarnConf = getConf() == null ? new YarnConfiguration() : new YarnConfiguration(getConf()); boolean isFederationEnabled = yarnConf.getBoolean(YarnConfiguration.FEDERATION_ENABLED, YarnConfiguration.DEFAULT_FEDERATION_ENABLED); if (args.length < 1 || !isFederationEnabled) { printUsage(CMD_EMPTY); return EXIT_ERROR; } String cmd = args[0]; if (CMD_HELP.equals(cmd)) { if (args.length > 1) { printUsage(args[1]); } else { printHelp(); } return EXIT_SUCCESS; } else if (CMD_SUBCLUSTER.equals(cmd)) { return handleSubCluster(args); } else if (CMD_POLICY.equals(cmd)) { return handlePolicy(args); } else if (CMD_APPLICATION.equals(cmd)) { return handleApplication(args); } else { System.out.println("No related commands found."); printHelp(); } return EXIT_SUCCESS; }
@Test public void testHelp() throws Exception { ByteArrayOutputStream dataOut = new ByteArrayOutputStream(); ByteArrayOutputStream dataErr = new ByteArrayOutputStream(); System.setOut(new PrintStream(dataOut)); System.setErr(new PrintStream(dataErr)); String[] args = {"-help"}; rmAdminCLI.run(args); assertEquals(0, rmAdminCLI.run(args)); args = new String[]{"-help", "-deregisterSubCluster"}; rmAdminCLI.run(args); args = new String[]{"-help", "-policy"}; rmAdminCLI.run(args); }
public LogicalSchema getIntermediateSchema() { return intermediateSchema; }
@Test public void shouldBuildPullQueryIntermediateSchemaSelectKeyWindowed() { // Given: selects = ImmutableList.of(new SingleColumn(K_REF, Optional.of(ALIAS))); when(keyFormat.isWindowed()).thenReturn(true); when(analysis.getSelectColumnNames()).thenReturn(ImmutableSet.of(ColumnName.of("K"))); // When: final QueryProjectNode projectNode = new QueryProjectNode( NODE_ID, source, selects, metaStore, ksqlConfig, analysis, true, plannerOptions, false ); // Then: final LogicalSchema expectedSchema = QueryLogicalPlanUtil.buildIntermediateSchema( INPUT_SCHEMA, true, true);; assertThat(expectedSchema, is(projectNode.getIntermediateSchema())); }
@Override public DeleteTopicsResult deleteTopics(final TopicCollection topics, final DeleteTopicsOptions options) { if (topics instanceof TopicIdCollection) return DeleteTopicsResult.ofTopicIds(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options)); else if (topics instanceof TopicNameCollection) return DeleteTopicsResult.ofTopicNames(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames(), options)); else throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for deleteTopics."); }
@Test public void testDeleteTopicsPartialResponse() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopics("myTopic", "myOtherTopic"), prepareDeleteTopicsResponse(1000, deletableTopicResult("myTopic", Errors.NONE))); DeleteTopicsResult result = env.adminClient().deleteTopics( asList("myTopic", "myOtherTopic"), new DeleteTopicsOptions()); result.topicNameValues().get("myTopic").get(); TestUtils.assertFutureThrows(result.topicNameValues().get("myOtherTopic"), ApiException.class); // With topic IDs Uuid topicId1 = Uuid.randomUuid(); Uuid topicId2 = Uuid.randomUuid(); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopicIds(topicId1, topicId2), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId1, Errors.NONE))); DeleteTopicsResult resultIds = env.adminClient().deleteTopics( TopicCollection.ofTopicIds(asList(topicId1, topicId2)), new DeleteTopicsOptions()); resultIds.topicIdValues().get(topicId1).get(); TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2), ApiException.class); } }
@Override public long getAndIncrement(K key) { return complete(asyncCounterMap.getAndIncrement(key)); }
@Test public void testGetAndIncrement() { atomicCounterMap.put(KEY1, VALUE1); Long beforeIncrement = atomicCounterMap.getAndIncrement(KEY1); assertThat(beforeIncrement, is(VALUE1)); Long afterIncrement = atomicCounterMap.get(KEY1); assertThat(afterIncrement, is(VALUE1 + 1)); }
@Override protected List<DavResource> list(final Path directory) throws IOException { return session.getClient().list(new DAVPathEncoder().encode(directory), 1, Stream.of( NextcloudAttributesFinderFeature.OC_FILEID_CUSTOM_NAMESPACE, NextcloudAttributesFinderFeature.OC_CHECKSUMS_CUSTOM_NAMESPACE, NextcloudAttributesFinderFeature.OC_SIZE_CUSTOM_NAMESPACE, DAVTimestampFeature.LAST_MODIFIED_CUSTOM_NAMESPACE, DAVTimestampFeature.LAST_MODIFIED_SERVER_CUSTOM_NAMESPACE). collect(Collectors.toSet())); }
@Test(expected = NotfoundException.class) public void testListNotfound() throws Exception { new NextcloudListService(session).list(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new DisabledListProgressListener()); }
static MD5Hash getFileClient(URL infoServer, String queryString, List<File> localPaths, Storage dstStorage, boolean getChecksum) throws IOException { URL url = new URL(infoServer, ImageServlet.PATH_SPEC + "?" + queryString); LOG.info("Opening connection to " + url); return doGetUrl(url, localPaths, dstStorage, getChecksum); }
@Test public void testClientSideExceptionOnJustOneDir() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0).build(); NNStorage mockStorage = Mockito.mock(NNStorage.class); List<File> localPaths = ImmutableList.of( new File("/xxxxx-does-not-exist/blah"), new File(TEST_DIR, "testfile") ); try { URL fsName = DFSUtil.getInfoServer( cluster.getNameNode().getServiceRpcAddress(), conf, DFSUtil.getHttpClientScheme(conf)).toURL(); String id = "getimage=1&txid=0"; TransferFsImage.getFileClient(fsName, id, localPaths, mockStorage, false); Mockito.verify(mockStorage).reportErrorOnFile(localPaths.get(0)); assertTrue("The valid local file should get saved properly", localPaths.get(1).length() > 0); } finally { cluster.shutdown(); } }
static Schema getSchema(Class<? extends Message> clazz) { return getSchema(ProtobufUtil.getDescriptorForClass(clazz)); }
@Test public void testOptionalNestedSchema() { assertEquals( TestProtoSchemas.OPTIONAL_NESTED_SCHEMA, ProtoSchemaTranslator.getSchema(Proto2SchemaMessages.OptionalNested.class)); }
@Override @MethodNotAvailable public CompletionStage<V> putAsync(K key, V value) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testPutAsyncWithExpiryPolicy() { ExpiryPolicy expiryPolicy = new HazelcastExpiryPolicy(1, 1, 1, TimeUnit.MILLISECONDS); adapter.putAsync(42, "value", expiryPolicy); }
@Override public ICardinality merge(ICardinality... estimators) throws LogLogMergeException { LogLog res = (LogLog) super.merge(estimators); return new AdaptiveCounting(res.M); }
@Test public void testMerge() throws CardinalityMergeException { int numToMerge = 10; int cardinality = 10000; AdaptiveCounting[] lcs = new AdaptiveCounting[numToMerge]; AdaptiveCounting baseline = new AdaptiveCounting(16); for (int i = 0; i < numToMerge; i++) { lcs[i] = new AdaptiveCounting(16); for (int j = 0; j < cardinality; j++) { double val = Math.random(); lcs[i].offer(val); baseline.offer(val); } } int expectedCardinality = numToMerge * cardinality; long mergedEstimate = AdaptiveCounting.mergeEstimators(lcs).cardinality(); double error = Math.abs(mergedEstimate - expectedCardinality) / (double) expectedCardinality; assertEquals(0.01, error, 0.01); AdaptiveCounting lc = lcs[0]; lcs = Arrays.asList(lcs).subList(1, lcs.length).toArray(new AdaptiveCounting[0]); mergedEstimate = lc.merge(lcs).cardinality(); error = Math.abs(mergedEstimate - expectedCardinality) / (double) expectedCardinality; assertEquals(0.01, error, 0.01); assertEquals(baseline.cardinality(), mergedEstimate); }
static String metadataVersionAtUpgrade(Reconciliation reconciliation, String currentMetadataVersion, KafkaVersion versionFrom) { if (currentMetadataVersion != null) { if (compareDottedIVVersions(currentMetadataVersion, versionFrom.metadataVersion()) > 0) { // The current metadata version is newer than the version we are upgrading from // => something went completely wrong, and we should just throw an error LOGGER.warnCr(reconciliation, "The current metadata version ({}) has to be lower or equal to the Kafka broker version we upgrade from ({})", currentMetadataVersion, versionFrom.version()); throw new KafkaUpgradeException("The current metadata version (" + currentMetadataVersion + ") has to be lower or equal to the Kafka broker version we upgrade from (" + versionFrom.version() + ")"); } else { // We stick with the current metadata version for the first phase of the upgrade // => it will be changed in the next phase (next reconciliation) LOGGER.infoCr(reconciliation, "The current metadata version {} will be used in the first phase of the upgrade", currentMetadataVersion); return currentMetadataVersion; } } else { // Current metadata version is missing. This should normally not happen in upgrade as it suggests // we are upgrading without the previous version being properly deployed. But in case it happens, // we use the metadata version from the older version first. LOGGER.warnCr(reconciliation, "The current metadata version seems to be missing during upgrade which is unexpected. The metadata version {} of the Kafka we are upgrading from will be used.", versionFrom.metadataVersion()); return versionFrom.metadataVersion(); } }
@Test public void testMetadataVersionAtUpgrade() { assertThat(KRaftVersionChangeCreator.metadataVersionAtUpgrade(Reconciliation.DUMMY_RECONCILIATION, "3.6-IV2", VERSIONS.defaultVersion()), is("3.6-IV2")); assertThat(KRaftVersionChangeCreator.metadataVersionAtUpgrade(Reconciliation.DUMMY_RECONCILIATION, "3.4-IV2", VERSIONS.defaultVersion()), is("3.4-IV2")); assertThat(KRaftVersionChangeCreator.metadataVersionAtUpgrade(Reconciliation.DUMMY_RECONCILIATION, null, VERSIONS.defaultVersion()), is(VERSIONS.defaultVersion().metadataVersion())); KafkaUpgradeException ex = assertThrows(KafkaUpgradeException.class, () -> KRaftVersionChangeCreator.metadataVersionAtUpgrade(Reconciliation.DUMMY_RECONCILIATION, "5.11-IV2", VERSIONS.defaultVersion())); assertThat(ex.getMessage(), is("The current metadata version (5.11-IV2) has to be lower or equal to the Kafka broker version we upgrade from (" + VERSIONS.defaultVersion().version() + ")")); }
@JsonIgnore public ValidationResult validate() { final ValidationResult validation = new ValidationResult(); if (title().isEmpty()) { validation.addError(FIELD_TITLE, "Notification title cannot be empty."); } try { validation.addAll(config().validate()); } catch (UnsupportedOperationException e) { validation.addError(FIELD_CONFIG, "Notification config type cannot be empty."); } return validation; }
@Test public void testValidHttpNotification() { final NotificationDto validNotification = getHttpNotification(); final ValidationResult validationResult = validNotification.validate(); assertThat(validationResult.failed()).isFalse(); assertThat(validationResult.getErrors()).isEmpty(); }
List<AttributeKvEntry> filterChangedAttr(List<AttributeKvEntry> currentAttributes, List<AttributeKvEntry> newAttributes) { if (currentAttributes == null || currentAttributes.isEmpty()) { return newAttributes; } Map<String, AttributeKvEntry> currentAttrMap = currentAttributes.stream() .collect(Collectors.toMap(AttributeKvEntry::getKey, Function.identity(), (existing, replacement) -> existing)); return newAttributes.stream() .filter(item -> { AttributeKvEntry cacheAttr = currentAttrMap.get(item.getKey()); return cacheAttr == null || !Objects.equals(item.getValue(), cacheAttr.getValue()) //JSON and String can be equals by value, but different by type || !Objects.equals(item.getDataType(), cacheAttr.getDataType()); }) .collect(Collectors.toList()); }
@Test void testFilterChangedAttr_whenCurrentAttributesContainsInAnyOrderNewAttributes_thenReturnExpectedList() { List<AttributeKvEntry> currentAttributes = List.of( new BaseAttributeKvEntry(1694000000L, new StringDataEntry("address", "Peremohy ave 1")), new BaseAttributeKvEntry(1694000000L, new BooleanDataEntry("valid", true)), new BaseAttributeKvEntry(1694000000L, new LongDataEntry("counter", 100L)), new BaseAttributeKvEntry(1694000000L, new DoubleDataEntry("temp", -18.35)), new BaseAttributeKvEntry(1694000000L, new JsonDataEntry("json", "{\"warning\":\"out of paper\"}")) ); List<AttributeKvEntry> newAttributes = List.of( new BaseAttributeKvEntry(1694000999L, new JsonDataEntry("json", "{\"status\":\"OK\"}")), // value changed, reordered new BaseAttributeKvEntry(1694000999L, new StringDataEntry("valid", "true")), //type changed new BaseAttributeKvEntry(1694000999L, new LongDataEntry("counter", 101L)), //value changed new BaseAttributeKvEntry(1694000999L, new DoubleDataEntry("temp", -18.35)), new BaseAttributeKvEntry(1694000999L, new StringDataEntry("address", "Peremohy ave 1")) // reordered ); List<AttributeKvEntry> expected = List.of( new BaseAttributeKvEntry(1694000999L, new StringDataEntry("valid", "true")), new BaseAttributeKvEntry(1694000999L, new LongDataEntry("counter", 101L)), new BaseAttributeKvEntry(1694000999L, new JsonDataEntry("json", "{\"status\":\"OK\"}")) ); List<AttributeKvEntry> filtered = node.filterChangedAttr(currentAttributes, newAttributes); assertThat(filtered).containsExactlyInAnyOrderElementsOf(expected); }
@Override public Map<String, List<V>> pollLastFromAny(Duration duration, int count, String... queueNames) throws InterruptedException { return commandExecutor.getInterrupted(pollLastFromAnyAsync(duration, count, queueNames)); }
@Test public void testPollLastFromAny() throws InterruptedException { RBlockingQueue<Integer> queue1 = redisson.getBlockingQueue("queue:pollany"); RBlockingQueue<Integer> queue2 = redisson.getBlockingQueue("queue:pollany1"); RBlockingQueue<Integer> queue3 = redisson.getBlockingQueue("queue:pollany2"); queue3.put(1); queue3.put(2); queue3.put(3); queue1.put(4); queue1.put(5); queue1.put(6); queue2.put(7); queue2.put(8); queue2.put(9); Map<String, List<Integer>> res = queue1.pollLastFromAny(Duration.ofSeconds(4), 2, "queue:pollany1", "queue:pollany2"); assertThat(res.get("queue:pollany")).containsExactly(6, 5); }
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks, final Map<TaskId, Set<TopicPartition>> standbyTasks) { log.info("Handle new assignment with:\n" + "\tNew active tasks: {}\n" + "\tNew standby tasks: {}\n" + "\tExisting active tasks: {}\n" + "\tExisting standby tasks: {}", activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds()); topologyMetadata.addSubscribedTopicsFromAssignment( activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), logPrefix ); final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks); final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks); final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>(); final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id)); final Set<TaskId> tasksToLock = tasks.allTaskIds().stream() .filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x)) .collect(Collectors.toSet()); maybeLockTasks(tasksToLock); // first put aside those unrecognized tasks because of unknown named-topologies tasks.clearPendingTasksToCreate(); tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate)); tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate)); // first rectify all existing tasks: // 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them // 2. for tasks that have changed active/standby status, just recycle and skip re-creating them // 3. otherwise, close them since they are no longer owned final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>(); if (stateUpdater == null) { handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean); } else { handleTasksWithStateUpdater( activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean, failedTasks ); failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater()); } final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean); maybeUnlockTasks(tasksToLock); failedTasks.putAll(taskCloseExceptions); maybeThrowTaskExceptions(failedTasks); createNewTasks(activeTasksToCreate, standbyTasksToCreate); }
@Test public void shouldConvertActiveTaskToStandbyTask() { final StreamTask activeTask = mock(StreamTask.class); when(activeTask.id()).thenReturn(taskId00); when(activeTask.inputPartitions()).thenReturn(taskId00Partitions); when(activeTask.isActive()).thenReturn(true); final StandbyTask standbyTask = mock(StandbyTask.class); when(standbyTask.id()).thenReturn(taskId00); when(activeTaskCreator.createTasks(any(), eq(taskId00Assignment))).thenReturn(singletonList(activeTask)); when(standbyTaskCreator.createStandbyTaskFromActive(any(), eq(taskId00Partitions))).thenReturn(standbyTask); taskManager.handleAssignment(taskId00Assignment, Collections.emptyMap()); taskManager.handleAssignment(Collections.emptyMap(), taskId00Assignment); verify(activeTaskCreator).closeAndRemoveTaskProducerIfNeeded(taskId00); verify(activeTaskCreator).createTasks(any(), eq(emptyMap())); verify(standbyTaskCreator, times(2)).createTasks(Collections.emptyMap()); verifyNoInteractions(consumer); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testRowReceiver() { DoFnSignature sig = DoFnSignatures.getSignature( new DoFn<String, String>() { @ProcessElement public void process(OutputReceiver<Row> rowReceiver) {} }.getClass()); assertThat(sig.processElement().getMainOutputReceiver().isRowReceiver(), is(true)); }
public static Builder builder() { return new Builder(); }
@TestTemplate public void overwriteWithDuplicates() { assertThat(listManifestFiles()).isEmpty(); table.newFastAppend().appendFile(FILE_A).commit(); table .newOverwrite() .deleteFile(FILE_A) .deleteFile(DataFiles.builder(SPEC).copy(FILE_A).build()) .deleteFile(FILE_A) .addFile(FILE_C) .addFile(DataFiles.builder(SPEC).copy(FILE_C).build()) .addFile(FILE_C) .commit(); assertThat(table.currentSnapshot().summary()) .hasSize(14) .containsEntry(SnapshotSummary.ADDED_FILES_PROP, "1") .containsEntry(SnapshotSummary.ADDED_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.ADDED_RECORDS_PROP, "1") .containsEntry(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP, "2") .containsEntry(SnapshotSummary.DELETED_FILES_PROP, "1") .containsEntry(SnapshotSummary.DELETED_RECORDS_PROP, "1") .containsEntry(SnapshotSummary.REMOVED_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.TOTAL_DATA_FILES_PROP, "1") .containsEntry(SnapshotSummary.TOTAL_DELETE_FILES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_EQ_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_POS_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.TOTAL_RECORDS_PROP, "1"); }
@Override public <K> HostToKeyMapper<K> mapKeysV3(URI serviceUri, Collection<K> keys, int limitNumHostsPerPartition) throws ServiceUnavailableException { return getHostToKeyMapper(serviceUri, keys, limitNumHostsPerPartition, null); }
@Test(dataProvider = "ringFactories") public void testMapKeysV3StickKey(RingFactory<URI> ringFactory) throws URISyntaxException, ServiceUnavailableException { int numHost = 2; URI serviceURI = new URI("d2://articles"); ConsistentHashKeyMapper mapper = getConsistentHashKeyMapper(ringFactory); List<Integer> keys = Arrays.asList(1, 2, 3, 4, 9, 10, 13, 15, 16); String myStickyKey = "sticky"; HostToKeyMapper<Integer> result = mapper.mapKeysV3(serviceURI, keys, numHost, myStickyKey); Map<Integer, List<URI>> originalOrderingOfHost = getOrderingOfHostsForEachKey(result, numHost); // repeat 100 times. The ordering of the hosts should always be the same because of sticky key int numOfMatch = 0; for (int i = 0; i < 100; i++) { result = mapper.mapKeysV3(serviceURI, keys, numHost, myStickyKey); Map<Integer, List<URI>> newOrderingOfHost = getOrderingOfHostsForEachKey(result, 2); if (newOrderingOfHost.equals(originalOrderingOfHost)) { numOfMatch++; } } Assert.assertEquals(100, numOfMatch); }
protected KllHistogramEstimator mergeHistogramEstimator( String columnName, KllHistogramEstimator oldEst, KllHistogramEstimator newEst) { if (oldEst != null && newEst != null) { if (oldEst.canMerge(newEst)) { LOG.trace("Merging old sketch {} with new sketch {}...", oldEst.getSketch(), newEst.getSketch()); oldEst.mergeEstimators(newEst); LOG.trace("Resulting sketch is {}", oldEst.getSketch()); return oldEst; } LOG.debug("Merging histograms of column {}", columnName); } else if (newEst != null) { LOG.trace("Old sketch is empty, the new sketch is used {}", newEst.getSketch()); return newEst; } return oldEst; }
@Test public void testMergeNonNullHistogramEstimators() { KllHistogramEstimator estimator1 = KllHistogramEstimatorFactory.getKllHistogramEstimator(KLL_1.toByteArray()); KllHistogramEstimator estimator2 = KllHistogramEstimatorFactory.getKllHistogramEstimator(KLL_2.toByteArray()); KllHistogramEstimator computedEstimator = MERGER.mergeHistogramEstimator("", estimator1, estimator2); KllFloatsSketch expectedKll = StatisticsTestUtils.createKll(Longs.concat(VALUES_1, VALUES_2)); KllHistogramEstimator expectedEstimator = KllHistogramEstimatorFactory.getKllHistogramEstimator(expectedKll.toByteArray()); assertEquals(expectedEstimator.getSketch().toString(), computedEstimator.getSketch().toString()); }
@Override public String getName() { return FUNCTION_NAME; }
@Test public void testDivisionNullColumn() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("div(%s,%s)", INT_SV_COLUMN, INT_SV_NULL_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof DivisionTransformFunction); Assert.assertEquals(transformFunction.getName(), DivisionTransformFunction.FUNCTION_NAME); double[] expectedValues = new double[NUM_ROWS]; RoaringBitmap roaringBitmap = new RoaringBitmap(); for (int i = 0; i < NUM_ROWS; i++) { if (isNullRow(i)) { roaringBitmap.add(i); } else { expectedValues[i] = 1; } } testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap); }
public void setRemoteHost(String remoteHost) { this.remoteHost = remoteHost; }
@Test public void testStartNoPort() throws Exception { receiver.setRemoteHost(TEST_HOST_NAME); receiver.start(); assertFalse(receiver.isStarted()); int count = lc.getStatusManager().getCount(); Status status = lc.getStatusManager().getCopyOfStatusList().get(count - 1); assertTrue(status.getMessage().contains("port")); }
public Optional<Violation> validate(IndexSetConfig newConfig) { // Don't validate prefix conflicts in case of an update if (Strings.isNullOrEmpty(newConfig.id())) { final Violation prefixViolation = validatePrefix(newConfig); if (prefixViolation != null) { return Optional.of(prefixViolation); } } final Violation fieldMappingViolation = validateMappingChangesAreLegal(newConfig); if (fieldMappingViolation != null) { return Optional.of(fieldMappingViolation); } Violation refreshIntervalViolation = validateSimpleIndexSetConfig(newConfig); if (refreshIntervalViolation != null){ return Optional.of(refreshIntervalViolation); } return Optional.empty(); }
@Test public void testValidationOfCustomMappingsInIndexSetConfig() { final IndexSet indexSet = mock(IndexSet.class); when(indexSetRegistry.iterator()).thenReturn(Collections.singleton(indexSet).iterator()); when(indexSet.getIndexPrefix()).thenReturn("foo"); this.validator = new IndexSetValidator(indexSetRegistry, elasticsearchConfiguration, dataTieringOrchestrator, dataTieringChecker); IndexSetConfig config = testIndexSetConfig().toBuilder().indexTemplateType(EVENT_TEMPLATE_TYPE) .customFieldMappings(new CustomFieldMappings(List.of(new CustomFieldMapping("john", "long")))) .build(); assertThat(validator.validate(config)).hasValueSatisfying(v -> assertThat(v.message()).contains("Custom field mappings cannot be set for events and failures index sets")); config = testIndexSetConfig().toBuilder().indexTemplateType("failures") .customFieldMappings(new CustomFieldMappings(List.of(new CustomFieldMapping("john", "long")))) .build(); assertThat(validator.validate(config)).hasValueSatisfying(v -> assertThat(v.message()).contains("Custom field mappings cannot be set for events and failures index sets")); config = testIndexSetConfig().toBuilder().indexTemplateType(EVENT_TEMPLATE_TYPE) .customFieldMappings(new CustomFieldMappings()) .build(); assertThat(validator.validate(config)).isEmpty(); config = testIndexSetConfig().toBuilder().indexTemplateType("failures") .customFieldMappings(new CustomFieldMappings()) .build(); assertThat(validator.validate(config)).isEmpty(); config = testIndexSetConfig().toBuilder().indexTemplateType(DEFAULT_INDEX_TEMPLATE_TYPE) .customFieldMappings(new CustomFieldMappings(List.of(new CustomFieldMapping("john", "long")))) .build(); assertThat(validator.validate(config)).isEmpty(); }
public static int upperBound(Object[] array, int size, Long value) { int left = 0; int right = size - 1; while (left < right) { int middle = left + ((right - left) >> 1); if (value >= ((JournalObserver) array[middle]).getTargetJournalVersion()) { left = middle + 1; } else { right = middle - 1; } } if (right == -1) { return 0; } Long rightValue = ((JournalObserver) array[right]).getTargetJournalVersion(); return value >= rightValue ? right + 1 : right; }
@Test public void testUpperBound() { Multiset<JournalObserver> elements = TreeMultiset.create(); JournalObserver ovserver2 = new JournalObserver(2L); JournalObserver ovserver4 = new JournalObserver(4L); JournalObserver ovserver41 = new JournalObserver(4L); JournalObserver ovserver42 = new JournalObserver(4L); JournalObserver ovserver6 = new JournalObserver(6L); // empty { Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), 0, 1L)); } // one element { elements.add(ovserver2); int size = elements.size(); Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 2L)); Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 3L)); } // same element { elements.clear(); elements.add(ovserver2); elements.add(ovserver6); elements.add(ovserver4); elements.add(ovserver41); elements.add(ovserver42); for (JournalObserver journalObserver : elements) { System.out.println(journalObserver); } int size = elements.size(); Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 2L)); Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 3L)); Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 4L)); elements.remove(ovserver41); Assert.assertEquals(3, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); elements.remove(ovserver4); Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); elements.remove(ovserver42); Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); } // same element 2 { elements.clear(); elements.add(ovserver4); elements.add(ovserver41); int size = elements.size(); Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 4L)); elements.remove(ovserver41); Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); elements.remove(ovserver4); Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); } // odd elements { elements.clear(); elements.add(ovserver2); elements.add(ovserver2); elements.add(ovserver4); elements.add(ovserver4); elements.add(ovserver6); elements.add(ovserver6); int size = elements.size(); // System.out.println("size=" + size); // for(int i = 0; i < size; i ++) { // System.out.println("array " + i + " = " + ((MasterOpExecutor)elements.get(i)).getTargetJournalId()); // } Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 2L)); Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 3L)); Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 4L)); Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 5L)); Assert.assertEquals(6, JournalObservable.upperBound(elements.toArray(), size, 6L)); Assert.assertEquals(6, JournalObservable.upperBound(elements.toArray(), size, 7L)); } // even elements { elements.clear(); elements.add(ovserver2); elements.add(ovserver2); elements.add(ovserver4); elements.add(ovserver4); elements.add(ovserver4); elements.add(ovserver6); elements.add(ovserver6); int size = elements.size(); Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 2L)); Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 3L)); Assert.assertEquals(5, JournalObservable.upperBound(elements.toArray(), size, 4L)); Assert.assertEquals(5, JournalObservable.upperBound(elements.toArray(), size, 5L)); Assert.assertEquals(7, JournalObservable.upperBound(elements.toArray(), size, 6L)); Assert.assertEquals(7, JournalObservable.upperBound(elements.toArray(), size, 7L)); } { CountDownLatch latch = new CountDownLatch(1); System.out.println(latch.getCount()); latch.countDown(); System.out.println(latch.getCount()); latch.countDown(); System.out.println(latch.getCount()); latch.countDown(); System.out.println(latch.getCount()); } System.out.println("success"); }
@SuppressWarnings("unchecked") public static <S, F> S visit(final Schema schema, final Visitor<S, F> visitor) { final BiFunction<Visitor<?, ?>, Schema, Object> handler = HANDLER.get(schema.type()); if (handler == null) { throw new UnsupportedOperationException("Unsupported schema type: " + schema.type()); } return (S) handler.apply(visitor, schema); }
@Test public void shouldVisitFloat64() { // Given: final Schema schema = Schema.OPTIONAL_FLOAT64_SCHEMA; when(visitor.visitFloat64(any())).thenReturn("Expected"); // When: final String result = SchemaWalker.visit(schema, visitor); // Then: verify(visitor).visitFloat64(same(schema)); assertThat(result, is("Expected")); }
public DataTableDiff calculateUnorderedDiffs() { List<SimpleEntry<List<String>, DiffType>> diffTableRows = new ArrayList<>(); // 1. add all "to" row in extra table // 2. iterate over "from", when a common row occurs, remove it from // extraRows // finally, only extra rows are kept and in same order that in "to". ArrayList<List<String>> extraRows = new ArrayList<>(to.cells()); for (List<String> row : from.cells()) { if (!extraRows.remove(row)) { diffTableRows.add( new SimpleEntry<>(row, DiffType.DELETE)); } else { diffTableRows.add( new SimpleEntry<>(row, DiffType.NONE)); } } for (List<String> cells : extraRows) { diffTableRows.add( new SimpleEntry<>(cells, DiffType.INSERT)); } return DataTableDiff.create(diffTableRows); }
@Test void unordered_diff_with_itself() { assertTrue(new TableDiffer(table(), table()).calculateUnorderedDiffs().isEmpty()); }
@Override public ServerConfiguration getServerConfiguration(String issuer) { return servers.get(issuer); }
@Test public void getServerConfiguration_success() { ServerConfiguration result = service.getServerConfiguration(issuer); assertThat(mockServerConfig, is(notNullValue())); assertEquals(mockServerConfig, result); }
@SuppressWarnings("unchecked") public DynamicDestinations<UserT, DestinationT, OutputT> getDynamicDestinations() { return (DynamicDestinations<UserT, DestinationT, OutputT>) dynamicDestinations; }
@Test public void testGenerateOutputFilenames() { List<ResourceId> expected; List<ResourceId> actual; ResourceId root = getBaseOutputDirectory(); SimpleSink<Void> sink = SimpleSink.makeSimpleSink( root, "file", ".SSSSS.of.NNNNN", ".test", Compression.UNCOMPRESSED); FilenamePolicy policy = sink.getDynamicDestinations().getFilenamePolicy(null); expected = Arrays.asList( root.resolve("file.00000.of.00003.test", StandardResolveOptions.RESOLVE_FILE), root.resolve("file.00001.of.00003.test", StandardResolveOptions.RESOLVE_FILE), root.resolve("file.00002.of.00003.test", StandardResolveOptions.RESOLVE_FILE)); actual = generateDestinationFilenames(policy, 3); assertEquals(expected, actual); expected = Collections.singletonList( root.resolve("file.00000.of.00001.test", StandardResolveOptions.RESOLVE_FILE)); actual = generateDestinationFilenames(policy, 1); assertEquals(expected, actual); expected = new ArrayList<>(); actual = generateDestinationFilenames(policy, 0); assertEquals(expected, actual); }
@Nonnull public static String cutOffAtNth(@Nonnull String text, char cutoff, int n) { int i = -1; while (n-- > 0) { if ((i = text.indexOf(cutoff, i + 1)) < 0) return text; } if (i < 0) return text; return text.substring(0, i); }
@Test void testCutOffAtNth() { // chars assertEquals("", StringUtil.cutOffAtNth("", 'a', -1)); assertEquals("", StringUtil.cutOffAtNth("", 'a', 0)); assertEquals("", StringUtil.cutOffAtNth("", 'a', 1)); assertEquals("aaa", StringUtil.cutOffAtNth("aaa", 'a', -1)); assertEquals("aaa", StringUtil.cutOffAtNth("aaa", 'a', 0)); assertEquals("", StringUtil.cutOffAtNth("aaa", 'a', 1)); assertEquals("a", StringUtil.cutOffAtNth("aaa", 'a', 2)); assertEquals("aa", StringUtil.cutOffAtNth("aaa", 'a', 3)); assertEquals("aaa", StringUtil.cutOffAtNth("aaa", 'a', 4)); // strings assertEquals("", StringUtil.cutOffAtNth("", "a", -1)); assertEquals("", StringUtil.cutOffAtNth("", "a", 0)); assertEquals("", StringUtil.cutOffAtNth("", "a", 1)); assertEquals("aaa", StringUtil.cutOffAtNth("aaa", "a", -1)); assertEquals("aaa", StringUtil.cutOffAtNth("aaa", "a", 0)); assertEquals("", StringUtil.cutOffAtNth("aaa", "a", 1)); assertEquals("a", StringUtil.cutOffAtNth("aaa", "a", 2)); assertEquals("aa", StringUtil.cutOffAtNth("aaa", "a", 3)); assertEquals("aaa", StringUtil.cutOffAtNth("aaa", "a", 4)); assertEquals("b", StringUtil.cutOffAtNth("bbbbbbbbbb", "bb", 2)); assertEquals("bb", StringUtil.cutOffAtNth("bbbbbbbbbb", "bb", 3)); assertEquals("bbb", StringUtil.cutOffAtNth("bbbbbbbbbb", "bb", 4)); }
@Override public void abortJob(JobContext originalContext, int status) throws IOException { JobContext jobContext = TezUtil.enrichContextWithVertexId(originalContext); JobConf jobConf = jobContext.getJobConf(); LOG.info("Job {} is aborted. Data file cleaning started", jobContext.getJobID()); Collection<String> outputs = HiveIcebergStorageHandler.outputTables(jobContext.getJobConf()); Collection<String> jobLocations = new ConcurrentLinkedQueue<>(); ExecutorService fileExecutor = fileExecutor(jobConf); ExecutorService tableExecutor = tableExecutor(jobConf, outputs.size()); try { // Cleans up the changes for the output tables in parallel Tasks.foreach(outputs) .suppressFailureWhenFinished() .executeWith(tableExecutor) .onFailure((output, exc) -> LOG.warn("Failed cleanup table {} on abort job", output, exc)) .run( output -> { LOG.info("Cleaning table {} with job id {}", output, jobContext.getJobID()); Table table = HiveIcebergStorageHandler.table(jobConf, output); jobLocations.add( generateJobLocation(table.location(), jobConf, jobContext.getJobID())); Collection<DataFile> dataFiles = dataFiles(fileExecutor, table.location(), jobContext, table.io(), false); // Check if we have files already committed and remove data files if there are any if (!dataFiles.isEmpty()) { Tasks.foreach(dataFiles) .retry(3) .suppressFailureWhenFinished() .executeWith(fileExecutor) .onFailure( (file, exc) -> LOG.warn( "Failed to remove data file {} on abort job", file.path(), exc)) .run(file -> table.io().deleteFile(file.path().toString())); } }); } finally { fileExecutor.shutdown(); if (tableExecutor != null) { tableExecutor.shutdown(); } } LOG.info("Job {} is aborted. Data file cleaning finished", jobContext.getJobID()); cleanup(jobContext, jobLocations); }
@Test public void testAbortJob() throws IOException { HiveIcebergOutputCommitter committer = new HiveIcebergOutputCommitter(); Table table = table(temp.toFile().getPath(), false); JobConf conf = jobConf(table, 1); writeRecords(table.name(), 1, 0, true, false, conf); committer.abortJob(new JobContextImpl(conf, JOB_ID), JobStatus.State.FAILED); HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 0); HiveIcebergTestUtils.validateData(table, Collections.emptyList(), 0); }
@Scheduled(fixedDelay = 3000, initialDelay = 1500) public void sendAsynchronouslyWithCallback() { CarDto carDto = CarDto.builder() .id(UUID.randomUUID()) .color("black") .name("bmw") .build(); RabbitConverterFuture<RegistrationDto> rabbitConverterFuture = asyncRabbitTemplate.convertSendAndReceiveAsType( directExchange.getName(), ROUTING_KEY, carDto, new ParameterizedTypeReference<>() {}); rabbitConverterFuture.addCallback(new ListenableFutureCallback<>() { @Override public void onFailure(Throwable ex) { LOGGER.error("Cannot get response for: {}", carDto.getId(), ex); } @Override public void onSuccess(RegistrationDto registrationDto) { LOGGER.info("Registration received {}", registrationDto); } }); }
@Test void sendAsynchronouslyWithCallback() { // given // when ThrowableAssert.ThrowingCallable send = () -> statefulCallbackClient.sendAsynchronouslyWithCallback(); // then assertThatCode(send).doesNotThrowAnyException(); }
public void initializeSession(AuthenticationRequest authenticationRequest, SAMLBindingContext bindingContext) throws SamlSessionException, SharedServiceClientException { final String httpSessionId = authenticationRequest.getRequest().getSession().getId(); if (authenticationRequest.getFederationName() != null) { findOrInitializeFederationSession(authenticationRequest, httpSessionId); } findOrInitializeSamlSession(authenticationRequest, httpSessionId, bindingContext); }
@Test public void initializeSessionCombiConnectTest() throws SamlSessionException, SharedServiceClientException { authenticationRequest.setProtocolType(SAML_COMBICONNECT); authenticationRequest.setFederationName(null); samlSessionService.initializeSession(authenticationRequest, bindingContext); assertNull(authenticationRequest.getSamlSession().getFederationName()); assertEquals(SAML_COMBICONNECT, authenticationRequest.getSamlSession().getProtocolType()); }
public String process(String preResolved, ParamHandler paramsHandler) { ReaderState state = ReaderState.NOT_IN_PATTERN; for (int i = 0; i < preResolved.length(); i++) { state = state.interpret(preResolved.charAt(i), paramsHandler); } paramsHandler.handleAfterResolution(state); return paramsHandler.getResult(); }
@Test public void shouldClearPatternWhenFound() throws Exception { ParamStateMachine stateMachine = new ParamStateMachine(); stateMachine.process("#{pattern}", handler); assertThat(ParamStateMachine.ReaderState.IN_PATTERN.pattern.length(), is(0)); verify(handler).handlePatternFound(any(StringBuilder.class)); }
@Override public double entropy() { return entropy; }
@Test public void testEntropy() { System.out.println("entropy"); GaussianDistribution instance = new GaussianDistribution(0.0, 1.0); instance.rand(); assertEquals(1.418939, instance.entropy(), 1E-6); instance = new GaussianDistribution(1.0, 2.0); instance.rand(); assertEquals(2.112086, instance.entropy(), 1E-6); instance = new GaussianDistribution(2.0, 0.5); instance.rand(); assertEquals(0.7257914, instance.entropy(), 1E-6); instance = new GaussianDistribution(3.0, 3.8); instance.rand(); assertEquals(2.753940, instance.entropy(), 1E-6); }
@Override public JMXReporter createMetricReporter(Properties properties) { String portsConfig = properties.getProperty(ARG_PORT); return new JMXReporter(portsConfig); }
@Test void testWithoutArgument() { JMXReporter metricReporter = new JMXReporterFactory().createMetricReporter(new Properties()); try { assertThat(metricReporter.getPort()).isEmpty(); } finally { metricReporter.close(); } }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldFailOnInconsistentWhenResultType() { // Given: final Expression expression = new SearchedCaseExpression( ImmutableList.of( new WhenClause( new ComparisonExpression(Type.EQUAL, TestExpressions.COL0, new IntegerLiteral(100)), new StringLiteral("one-hundred") ), new WhenClause( new ComparisonExpression(Type.EQUAL, TestExpressions.COL0, new IntegerLiteral(10)), new IntegerLiteral(10) ) ), Optional.empty() ); // When: final Exception e = assertThrows( KsqlException.class, () -> expressionTypeManager.getExpressionSqlType(expression) ); // Then: assertThat(e.getMessage(), containsString( "Invalid Case expression. Type for all 'THEN' clauses should be the same." + System.lineSeparator() + "THEN expression 'WHEN (COL0 = 10) THEN 10' has type: INTEGER." + System.lineSeparator() + "Previous THEN expression(s) type: STRING." )); }
public abstract long observeWm(int queueIndex, long wmValue);
@Test public void when_i1_idle_i2_active_then_wmForwardedImmediately() { assertEquals(NO_NEW_WM, wc.observeWm(0, IDLE_MESSAGE.timestamp())); assertEquals(100, wc.observeWm(1, 100)); }
public MapStoreConfig setEnabled(boolean enabled) { this.enabled = enabled; return this; }
@Test public void setEnabled() { assertFalse(cfgNotEnabled.isEnabled()); assertEquals(new MapStoreConfig().setEnabled(false), cfgNotEnabled); }
public static void checkContent(String content) throws NacosException { if (StringUtils.isBlank(content)) { throw new NacosException(NacosException.CLIENT_INVALID_PARAM, CONTENT_INVALID_MSG); } }
@Test void testCheckContentFail() throws NacosException { Throwable exception = assertThrows(NacosException.class, () -> { ParamUtils.checkContent(""); }); assertTrue(exception.getMessage().contains("content invalid")); }
@Override public void start() { boolean isTelemetryActivated = config.getBoolean(SONAR_TELEMETRY_ENABLE.getKey()) .orElseThrow(() -> new IllegalStateException(String.format("Setting '%s' must be provided.", SONAR_TELEMETRY_URL.getKey()))); boolean hasOptOut = internalProperties.read(I_PROP_OPT_OUT).isPresent(); if (!isTelemetryActivated && !hasOptOut) { optOut(); internalProperties.write(I_PROP_OPT_OUT, String.valueOf(system2.now())); LOG.info("Sharing of SonarQube statistics is disabled."); } if (isTelemetryActivated && hasOptOut) { internalProperties.write(I_PROP_OPT_OUT, null); } if (!isTelemetryActivated) { return; } LOG.info("Sharing of SonarQube statistics is enabled."); int frequencyInSeconds = frequency(); scheduleWithFixedDelay(telemetryCommand(), frequencyInSeconds, frequencyInSeconds, TimeUnit.SECONDS); }
@Test void do_not_send_data_if_last_ping_earlier_than_one_day_ago() throws IOException { initTelemetrySettingsToDefaultValues(); when(lockManager.tryLock(any(), anyInt())).thenReturn(true); settings.setProperty("sonar.telemetry.frequencyInSeconds", "1"); long now = system2.now(); long twentyHoursAgo = now - (ONE_HOUR * 20L); mockDataJsonWriterDoingSomething(); internalProperties.write("telemetry.lastPing", String.valueOf(twentyHoursAgo)); underTest.start(); verify(client, after(2_000).never()).upload(anyString()); }
public static boolean isNotEmpty(@Nullable String string) { return string != null && !string.isEmpty(); }
@Test public void testString() { assertThat(StringUtils.isNotEmpty("bla")).isTrue(); }
public String getGreeting() { return "Hello world."; }
@Test public void testAppHasAGreeting() { App classUnderTest = new App(); assertNotNull("app should have a greeting", classUnderTest.getGreeting()); }
public void logOnRequestVote( final int memberId, final long logLeadershipTermId, final long logPosition, final long candidateTermId, final int candidateId, final int protocolVersion) { final int length = requestVoteLength(); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(REQUEST_VOTE.toEventCodeId(), encodedLength); if (index > 0) { try { encodeOnRequestVote( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, memberId, logLeadershipTermId, logPosition, candidateTermId, candidateId, protocolVersion); } finally { ringBuffer.commit(index); } } }
@Test void logOnRequestVote() { final long logLeadershipTermId = 12; final long logPosition = 4723489263846823L; final long candidateTermId = -19; final int candidateId = 89; final int protocolVersion = SemanticVersion.compose(2, 5, 17); final int memberId = 3; final int offset = 8; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); logger.logOnRequestVote( memberId, logLeadershipTermId, logPosition, candidateTermId, candidateId, protocolVersion); verifyLogHeader(logBuffer, offset, REQUEST_VOTE.toEventCodeId(), 36, 36); final int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH; assertEquals(logLeadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN)); assertEquals(logPosition, logBuffer.getLong(index + SIZE_OF_LONG, LITTLE_ENDIAN)); assertEquals(candidateTermId, logBuffer.getLong(index + 2 * SIZE_OF_LONG, LITTLE_ENDIAN)); assertEquals(candidateId, logBuffer.getInt(index + 3 * SIZE_OF_LONG, LITTLE_ENDIAN)); assertEquals(protocolVersion, logBuffer.getInt(index + 3 * SIZE_OF_LONG + SIZE_OF_INT, LITTLE_ENDIAN)); assertEquals(memberId, logBuffer.getInt(index + 3 * SIZE_OF_LONG + 2 * SIZE_OF_INT, LITTLE_ENDIAN)); final StringBuilder sb = new StringBuilder(); ClusterEventDissector.dissectRequestVote( REQUEST_VOTE, logBuffer, encodedMsgOffset(offset), sb); final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: REQUEST_VOTE " + "\\[36/36]: memberId=3 logLeadershipTermId=12 logPosition=4723489263846823 candidateTermId=-19 " + "candidateId=89 protocolVersion=2.5.17"; assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern)); }
public Collection<ServerPluginInfo> loadPlugins() { Map<String, ServerPluginInfo> bundledPluginsByKey = new LinkedHashMap<>(); for (ServerPluginInfo bundled : getBundledPluginsMetadata()) { failIfContains(bundledPluginsByKey, bundled, plugin -> MessageException.of(format("Found two versions of the plugin %s [%s] in the directory %s. Please remove one of %s or %s.", bundled.getName(), bundled.getKey(), getRelativeDir(fs.getInstalledBundledPluginsDir()), bundled.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName()))); bundledPluginsByKey.put(bundled.getKey(), bundled); } Map<String, ServerPluginInfo> externalPluginsByKey = new LinkedHashMap<>(); for (ServerPluginInfo external : getExternalPluginsMetadata()) { failIfContains(bundledPluginsByKey, external, plugin -> MessageException.of(format("Found a plugin '%s' in the directory '%s' with the same key [%s] as a built-in feature '%s'. Please remove '%s'.", external.getName(), getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getKey(), plugin.getName(), new File(getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName())))); failIfContains(externalPluginsByKey, external, plugin -> MessageException.of(format("Found two versions of the plugin '%s' [%s] in the directory '%s'. Please remove %s or %s.", external.getName(), external.getKey(), getRelativeDir(fs.getInstalledExternalPluginsDir()), external.getNonNullJarFile().getName(), plugin.getNonNullJarFile().getName()))); externalPluginsByKey.put(external.getKey(), external); } for (PluginInfo downloaded : getDownloadedPluginsMetadata()) { failIfContains(bundledPluginsByKey, downloaded, plugin -> MessageException.of(format("Fail to update plugin: %s. Built-in feature with same key already exists: %s. Move or delete plugin from %s directory", plugin.getName(), plugin.getKey(), getRelativeDir(fs.getDownloadedPluginsDir())))); ServerPluginInfo installedPlugin; if (externalPluginsByKey.containsKey(downloaded.getKey())) { deleteQuietly(externalPluginsByKey.get(downloaded.getKey()).getNonNullJarFile()); installedPlugin = moveDownloadedPluginToExtensions(downloaded); LOG.info("Plugin {} [{}] updated to version {}", installedPlugin.getName(), installedPlugin.getKey(), installedPlugin.getVersion()); } else { installedPlugin = moveDownloadedPluginToExtensions(downloaded); LOG.info("Plugin {} [{}] installed", installedPlugin.getName(), installedPlugin.getKey()); } externalPluginsByKey.put(downloaded.getKey(), installedPlugin); } Map<String, ServerPluginInfo> plugins = new HashMap<>(externalPluginsByKey.size() + bundledPluginsByKey.size()); plugins.putAll(externalPluginsByKey); plugins.putAll(bundledPluginsByKey); PluginRequirementsValidator.unloadIncompatiblePlugins(plugins); return plugins.values(); }
@Test public void fail_when_sqale_plugin_is_installed() throws Exception { copyTestPluginTo("fake-sqale-plugin", fs.getInstalledExternalPluginsDir()); assertThatThrownBy(() -> underTest.loadPlugins()) .isInstanceOf(MessageException.class) .hasMessage("The following plugin is no longer compatible with this version of SonarQube: 'sqale'"); }
@Override public Map<String, String> generationCodes(Long tableId) { // 校验是否已经存在 CodegenTableDO table = codegenTableMapper.selectById(tableId); if (table == null) { throw exception(CODEGEN_TABLE_NOT_EXISTS); } List<CodegenColumnDO> columns = codegenColumnMapper.selectListByTableId(tableId); if (CollUtil.isEmpty(columns)) { throw exception(CODEGEN_COLUMN_NOT_EXISTS); } // 如果是主子表,则加载对应的子表信息 List<CodegenTableDO> subTables = null; List<List<CodegenColumnDO>> subColumnsList = null; if (CodegenTemplateTypeEnum.isMaster(table.getTemplateType())) { // 校验子表存在 subTables = codegenTableMapper.selectListByTemplateTypeAndMasterTableId( CodegenTemplateTypeEnum.SUB.getType(), tableId); if (CollUtil.isEmpty(subTables)) { throw exception(CODEGEN_MASTER_GENERATION_FAIL_NO_SUB_TABLE); } // 校验子表的关联字段存在 subColumnsList = new ArrayList<>(); for (CodegenTableDO subTable : subTables) { List<CodegenColumnDO> subColumns = codegenColumnMapper.selectListByTableId(subTable.getId()); if (CollUtil.findOne(subColumns, column -> column.getId().equals(subTable.getSubJoinColumnId())) == null) { throw exception(CODEGEN_SUB_COLUMN_NOT_EXISTS, subTable.getId()); } subColumnsList.add(subColumns); } } // 执行生成 return codegenEngine.execute(table, columns, subTables, subColumnsList); }
@Test public void testGenerationCodes_one_success() { // mock 数据(CodegenTableDO) CodegenTableDO table = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene()) .setTemplateType(CodegenTemplateTypeEnum.ONE.getType())); codegenTableMapper.insert(table); // mock 数据(CodegenColumnDO) CodegenColumnDO column01 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())); codegenColumnMapper.insert(column01); CodegenColumnDO column02 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())); codegenColumnMapper.insert(column02); // mock 执行生成 Map<String, String> codes = MapUtil.of(randomString(), randomString()); when(codegenEngine.execute(eq(table), argThat(columns -> { assertEquals(2, columns.size()); assertEquals(column01, columns.get(0)); assertEquals(column02, columns.get(1)); return true; }), isNull(), isNull())).thenReturn(codes); // 准备参数 Long tableId = table.getId(); // 调用 Map<String, String> result = codegenService.generationCodes(tableId); // 断言 assertSame(codes, result); }
public static int[] roundRobinPart(int objectCount, int count, int index) { if (objectCount < 0 || index < 0 || count < 1 || index >= count) { throw new IllegalArgumentException("objectCount=" + objectCount + ", count=" + count + ", index=" + index); } int[] res = new int[objectCount / count + (objectCount % count > index ? 1 : 0)]; for (int i = 0, j = index; j < objectCount; i++, j += count) { res[i] = j; } return res; }
@Test public void test_roundRobinPart() { assertArrayEquals(new int[] {}, roundRobinPart(0, 2, 0)); assertArrayEquals(new int[] {0}, roundRobinPart(1, 1, 0)); assertArrayEquals(new int[] {0}, roundRobinPart(1, 2, 0)); assertArrayEquals(new int[] {}, roundRobinPart(1, 2, 1)); assertArrayEquals(new int[] {0, 1}, roundRobinPart(2, 1, 0)); assertArrayEquals(new int[] {0}, roundRobinPart(2, 2, 0)); assertArrayEquals(new int[] {1}, roundRobinPart(2, 2, 1)); assertArrayEquals(new int[] {0, 2}, roundRobinPart(3, 2, 0)); assertArrayEquals(new int[] {1}, roundRobinPart(3, 2, 1)); }
@Override public void increment() { increment(1l); }
@Test public void increment() { longCounter.increment(); assertThat(longCounter.getValue()).isEqualTo(INITIAL_VALUE + 1); }
public boolean hasChildren() { return ! childEntries.isEmpty(); }
@Test public void hasChildren() { Entry firstStructureWithEntry = new Entry(); final Entry firstEntry = new Entry(); firstStructureWithEntry.addChild(firstEntry); assertThat(firstStructureWithEntry.hasChildren(), equalTo(true)); }
public FEELFnResult<String> invoke(@ParameterName( "string" ) String string, @ParameterName( "match" ) String match) { if ( string == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null")); } if ( match == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "match", "cannot be null")); } int index = string.indexOf( match ); if( index >= 0 ) { return FEELFnResult.ofResult( string.substring( index+match.length() ) ); } else { return FEELFnResult.ofResult( "" ); } }
@Test void invokeMatchNotExists() { FunctionTestUtil.assertResult(substringAfterFunction.invoke("foobar", "oook"), ""); }
public void updateRecreateAndSyncDurationInMs(long durationInNs) { if (metricsConfig.isMetricsOn()) { long durationInMs = getDurationInMs(durationInNs); LOG.info("Sending recreate and sync metrics {}", durationInMs); metrics.registerGauge(getMetricsName(META_SYNC_ACTION, RECREATE_TABLE_DURATION_MS_METRIC), durationInMs); } }
@Test void testUpdateRecreateAndSyncDurationInMs() throws InterruptedException { Timer.Context timerCtx = hoodieSyncMetrics.getRecreateAndSyncTimer(); Thread.sleep(5); long durationInNs = timerCtx.stop(); hoodieSyncMetrics.updateRecreateAndSyncDurationInMs(durationInNs); String metricName = hoodieSyncMetrics.getMetricsName("meta_sync", "recreate_table_duration_ms"); long timeIsMs = (Long) metrics.getRegistry().getGauges().get(metricName).getValue(); assertTrue(timeIsMs > 0, "recreate_table duration metric value should be > 0"); }
public static Statement sanitize( final Statement node, final MetaStore metaStore) { return sanitize(node, metaStore, true); }
@Test public void shouldAddQualifierForJoinColumnReferenceFromRight() { // Given: final Statement stmt = givenQuery( "SELECT COL5 FROM TEST2 JOIN TEST1 ON TEST2.COL0=TEST1.COL0;"); // When: final Query result = (Query) AstSanitizer.sanitize(stmt, META_STORE); // Then: assertThat(result.getSelect(), is(new Select(ImmutableList.of( new SingleColumn( column(TEST1_NAME, "COL5"), Optional.of(ColumnName.of("COL5"))) )))); }
@Override public synchronized DeviceEvent updatePortStatus(ProviderId providerId, DeviceId deviceId, PortDescription portDescription) { final Timestamp newTimestamp; try { newTimestamp = deviceClockService.getTimestamp(deviceId); } catch (IllegalStateException e) { log.info("Timestamp was not available for device {}", deviceId); log.debug(" discarding {}", portDescription); // Failed to generate timestamp. Ignoring. // See updatePorts comment return null; } final Timestamped<PortDescription> deltaDesc = new Timestamped<>(portDescription, newTimestamp); final DeviceEvent event; Timestamped<PortDescription> mergedDesc; final Map<ProviderId, DeviceDescriptions> device = getOrCreateDeviceDescriptionsMap(deviceId); synchronized (device) { event = updatePortStatusInternal(providerId, deviceId, deltaDesc); mergedDesc = device.get(providerId) .getPortDesc(portDescription.portNumber()); //on delete the port is removed, thus using latest known description if (mergedDesc == null) { mergedDesc = new Timestamped<>(portDescription, newTimestamp); } } if (event != null) { log.debug("Notifying peers of a port status update topology event for providerId: {} and deviceId: {}", providerId, deviceId); notifyPeers(new InternalPortStatusEvent(providerId, deviceId, mergedDesc)); } return event; }
@Test public final void testUpdatePortStatus() { putDevice(DID1, SW1); List<PortDescription> pds = Arrays.asList( DefaultPortDescription.builder().withPortNumber(P1).isEnabled(true).build() ); deviceStore.updatePorts(PID, DID1, pds); Capture<InternalPortStatusEvent> message = Capture.newInstance(); Capture<MessageSubject> subject = Capture.newInstance(); Capture<Function<InternalPortStatusEvent, byte[]>> encoder = Capture.newInstance(); resetCommunicatorExpectingSingleBroadcast(message, subject, encoder); final DefaultPortDescription desc = DefaultPortDescription.builder().withPortNumber(P1) .isEnabled(false).build(); DeviceEvent event = deviceStore.updatePortStatus(PID, DID1, desc); assertEquals(PORT_UPDATED, event.type()); assertDevice(DID1, SW1, event.subject()); assertEquals(P1, event.port().number()); assertFalse("Port is disabled", event.port().isEnabled()); verify(clusterCommunicator); assertInternalPortStatusEvent(NID1, DID1, PID, desc, NO_ANNOTATION, message, subject, encoder); assertTrue(message.hasCaptured()); }
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { AsyncByteArrayFeeder streamFeeder = streamReader.getInputFeeder(); logger.info("Decoding XMPP data.. "); byte[] buffer = new byte[in.readableBytes()]; in.readBytes(buffer); logger.debug("Buffer length: " + buffer.length); try { streamFeeder.feedInput(buffer, 0, buffer.length); } catch (XMLStreamException exception) { logger.info(exception.getMessage()); in.skipBytes(in.readableBytes()); logger.info("Bytes skipped"); throw exception; } while (streamReader.hasNext() && streamReader.next() != AsyncXMLStreamReader.EVENT_INCOMPLETE) { out.add(allocator.allocate(streamReader)); } }
@Test public void testDecodeXmppStanza() throws Exception { XmlStreamDecoder decoder = new XmlStreamDecoder(); ByteBuf buffer = Unpooled.buffer(); buffer.writeBytes(subscribeMsg.getBytes(Charsets.UTF_8)); List<Object> list = Lists.newArrayList(); decoder.decode(new ChannelHandlerContextAdapter(), buffer, list); assertThat(list.size(), is(10)); list.forEach(object -> { assertThat(object, is(instanceOf(XMLEvent.class))); }); assertThat(((XMLEvent) list.get(0)).isStartDocument(), is(true)); XMLEvent secondEvent = (XMLEvent) list.get(1); assertThat(secondEvent.isStartElement(), is(true)); StartElement secondEventAsStartElement = (StartElement) secondEvent; assertThat(secondEventAsStartElement.getName().getLocalPart(), is("iq")); assertThat(Lists.newArrayList(secondEventAsStartElement.getAttributes()).size(), is(4)); assertThat(secondEventAsStartElement.getAttributeByName(QName.valueOf("type")).getValue(), is("set")); assertThat(secondEventAsStartElement.getAttributeByName(QName.valueOf("from")).getValue(), is("test@xmpp.org")); assertThat(secondEventAsStartElement.getAttributeByName(QName.valueOf("to")).getValue(), is("xmpp.onosproject.org")); assertThat(secondEventAsStartElement.getAttributeByName(QName.valueOf("id")).getValue(), is("sub1")); XMLEvent fourthEvent = (XMLEvent) list.get(3); assertThat(fourthEvent.isStartElement(), is(true)); StartElement fourthEventAsStartElement = (StartElement) fourthEvent; assertThat(fourthEventAsStartElement.getName().getLocalPart(), is("pubsub")); assertThat(fourthEventAsStartElement.getNamespaceURI(""), is("http://jabber.org/protocol/pubsub")); XMLEvent fifthEvent = (XMLEvent) list.get(5); assertThat(fifthEvent.isStartElement(), is(true)); StartElement fifthEventAsStartElement = (StartElement) fifthEvent; assertThat(fifthEventAsStartElement.getName().getLocalPart(), is("subscribe")); assertThat(fifthEventAsStartElement.getAttributeByName(QName.valueOf("node")).getValue(), is("test")); XMLEvent sixthEvent = (XMLEvent) list.get(6); assertThat(sixthEvent.isEndElement(), is(true)); EndElement sixthEventAsEndElement = (EndElement) sixthEvent; assertThat(sixthEventAsEndElement.getName().getLocalPart(), is("subscribe")); XMLEvent seventhEvent = (XMLEvent) list.get(8); assertThat(seventhEvent.isEndElement(), is(true)); EndElement seventhEventAsEndElement = (EndElement) seventhEvent; assertThat(seventhEventAsEndElement.getName().getLocalPart(), is("pubsub")); XMLEvent eighthEvent = (XMLEvent) list.get(9); assertThat(eighthEvent.isEndElement(), is(true)); EndElement eighthEventAsEndElement = (EndElement) eighthEvent; assertThat(eighthEventAsEndElement.getName().getLocalPart(), is("iq")); }
@DeleteMapping("/batch") @RequiresPermissions("system:role:delete") public ShenyuAdminResult deleteRole(@RequestBody @NotEmpty final List<@NotBlank String> ids) { return ShenyuAdminResult.success(ShenyuResultMessage.DELETE_SUCCESS, roleService.delete(ids)); }
@Test public void testDeleteRole() throws Exception { List<String> testIds = Collections.singletonList("test_id"); given(roleService.delete(testIds)).willReturn(1); this.mockMvc.perform(MockMvcRequestBuilders.delete("/role/batch") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(testIds))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DELETE_SUCCESS))) .andReturn(); }