focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void close() throws IOException { ZipInputStream zis = (ZipInputStream) delegate; if ( zis == null ) { throw new IOException( INVALID_INPUT_MSG ); } zis.close(); }
@Test public void testClose() throws IOException { createZIPInputStream().close(); }
@NonNull public static Permutor<FeedItem> getPermutor(@NonNull SortOrder sortOrder) { Comparator<FeedItem> comparator = null; Permutor<FeedItem> permutor = null; switch (sortOrder) { case EPISODE_TITLE_A_Z: comparator = (f1, f2) -> itemTitle(f1).compareTo(itemTitle(f2)); break; case EPISODE_TITLE_Z_A: comparator = (f1, f2) -> itemTitle(f2).compareTo(itemTitle(f1)); break; case DATE_OLD_NEW: comparator = (f1, f2) -> pubDate(f1).compareTo(pubDate(f2)); break; case DATE_NEW_OLD: comparator = (f1, f2) -> pubDate(f2).compareTo(pubDate(f1)); break; case DURATION_SHORT_LONG: comparator = (f1, f2) -> Integer.compare(duration(f1), duration(f2)); break; case DURATION_LONG_SHORT: comparator = (f1, f2) -> Integer.compare(duration(f2), duration(f1)); break; case EPISODE_FILENAME_A_Z: comparator = (f1, f2) -> itemLink(f1).compareTo(itemLink(f2)); break; case EPISODE_FILENAME_Z_A: comparator = (f1, f2) -> itemLink(f2).compareTo(itemLink(f1)); break; case FEED_TITLE_A_Z: comparator = (f1, f2) -> feedTitle(f1).compareTo(feedTitle(f2)); break; case FEED_TITLE_Z_A: comparator = (f1, f2) -> feedTitle(f2).compareTo(feedTitle(f1)); break; case RANDOM: permutor = Collections::shuffle; break; case SMART_SHUFFLE_OLD_NEW: permutor = (queue) -> smartShuffle(queue, true); break; case SMART_SHUFFLE_NEW_OLD: permutor = (queue) -> smartShuffle(queue, false); break; case SIZE_SMALL_LARGE: comparator = (f1, f2) -> Long.compare(size(f1), size(f2)); break; case SIZE_LARGE_SMALL: comparator = (f1, f2) -> Long.compare(size(f2), size(f1)); break; case COMPLETION_DATE_NEW_OLD: comparator = (f1, f2) -> f2.getMedia().getPlaybackCompletionDate() .compareTo(f1.getMedia().getPlaybackCompletionDate()); break; default: throw new IllegalArgumentException("Permutor not implemented"); } if (comparator != null) { final Comparator<FeedItem> comparator2 = comparator; permutor = (queue) -> Collections.sort(queue, comparator2); } return permutor; }
@Test public void testPermutorForRule_DATE_DESC() { Permutor<FeedItem> permutor = FeedItemPermutors.getPermutor(SortOrder.DATE_NEW_OLD); List<FeedItem> itemList = getTestList(); assertTrue(checkIdOrder(itemList, 1, 3, 2)); // before sorting permutor.reorder(itemList); assertTrue(checkIdOrder(itemList, 3, 2, 1)); // after sorting }
@Override public String telnet(Channel channel, String message) { String service = (String) channel.getAttribute(ChangeTelnetHandler.SERVICE_KEY); if ((StringUtils.isEmpty(service)) && (StringUtils.isEmpty(message))) { return "Please input service name, eg: \r\ntrace XxxService\r\ntrace XxxService xxxMethod\r\ntrace XxxService xxxMethod 10\r\nor \"cd XxxService\" firstly."; } String[] parts = message.split("\\s+"); String method; String times; // message like : XxxService , XxxService 10 , XxxService xxxMethod , XxxService xxxMethod 10 if (StringUtils.isEmpty(service)) { service = parts.length > 0 ? parts[0] : null; method = parts.length > 1 ? parts[1] : null; times = parts.length > 2 ? parts[2] : "1"; } else { // message like : xxxMethod, xxxMethod 10 method = parts.length > 0 ? parts[0] : null; times = parts.length > 1 ? parts[1] : "1"; } if (StringUtils.isNumber(method)) { times = method; method = null; } if (!StringUtils.isNumber(times)) { return "Illegal times " + times + ", must be integer."; } Invoker<?> invoker = null; for (Exporter<?> exporter : DubboProtocol.getDubboProtocol().getExporters()) { if (service.equals(exporter.getInvoker().getInterface().getSimpleName()) || service.equals(exporter.getInvoker().getInterface().getName()) || service.equals(exporter.getInvoker().getUrl().getPath())) { invoker = exporter.getInvoker(); break; } } if (invoker != null) { if (StringUtils.isNotEmpty(method)) { boolean found = false; for (Method m : invoker.getInterface().getMethods()) { if (m.getName().equals(method)) { found = true; break; } } if (!found) { return "No such method " + method + " in class " + invoker.getInterface().getName(); } } TraceFilter.addTracer(invoker.getInterface(), method, channel, Integer.parseInt(times)); } else { return "No such service " + service; } return null; }
@Test void testTraceTelnetAddTracer() throws Exception { String method = "sayHello"; String message = "org.apache.dubbo.qos.legacy.service.DemoService sayHello 1"; Class<?> type = DemoService.class; ExtensionLoader.getExtensionLoader(Protocol.class) .getExtension(DubboProtocol.NAME) .export(mockInvoker); handler.telnet(mockChannel, message); String key = type.getName() + "." + method; Field tracers = TraceFilter.class.getDeclaredField("TRACERS"); tracers.setAccessible(true); ConcurrentHashMap<String, Set<Channel>> map = (ConcurrentHashMap<String, Set<Channel>>) tracers.get(new ConcurrentHashMap<String, Set<Channel>>()); Set<Channel> channels = map.getOrDefault(key, null); Assertions.assertNotNull(channels); Assertions.assertTrue(channels.contains(mockChannel)); }
@Override public boolean isDone() { if (delegate.isDone()) { try { ensureResultSet(Long.MAX_VALUE, TimeUnit.DAYS); } catch (ExecutionException | CancellationException | TimeoutException ignored) { ignore(ignored); } return true; } else { return super.isDone(); } }
@Test public void completeDelegate_bothDone_outerAskedFirst() { delegateFuture.run(); assertTrue(outerFuture.isDone()); assertTrue(delegateFuture.isDone()); }
public StatisticRange addAndCollapseDistinctValues(StatisticRange other) { double overlapPercentOfThis = this.overlapPercentWith(other); double overlapPercentOfOther = other.overlapPercentWith(this); double overlapDistinctValuesThis = overlapPercentOfThis * distinctValues; double overlapDistinctValuesOther = overlapPercentOfOther * other.distinctValues; double maxOverlappingValues = max(overlapDistinctValuesThis, overlapDistinctValuesOther); double newDistinctValues = maxOverlappingValues + (1 - overlapPercentOfThis) * distinctValues + (1 - overlapPercentOfOther) * other.distinctValues; return expandRangeWithNewDistinct(newDistinctValues, other); }
@Test public void testAddAndCollapseDistinctValues() { assertEquals(unboundedRange(NaN).addAndCollapseDistinctValues(unboundedRange(NaN)), unboundedRange(NaN)); assertEquals(unboundedRange(NaN).addAndCollapseDistinctValues(unboundedRange(1)), unboundedRange(NaN)); assertEquals(unboundedRange(1).addAndCollapseDistinctValues(unboundedRange(NaN)), unboundedRange(NaN)); assertEquals(unboundedRange(1).addAndCollapseDistinctValues(unboundedRange(2)), unboundedRange(2)); assertEquals(StatisticRange.empty().addAndCollapseDistinctValues(StatisticRange.empty()), StatisticRange.empty()); assertEquals(range(0, 1, 1).addAndCollapseDistinctValues(StatisticRange.empty()), range(0, 1, 1)); assertEquals(range(0, 1, 1).addAndCollapseDistinctValues(range(1, 2, 1)), range(0, 2, 1)); assertEquals(range(0, 3, 3).addAndCollapseDistinctValues(range(2, 6, 4)), range(0, 6, 6)); }
@Override public Set<TransferItem> find(final CommandLine input, final TerminalAction action, final Path remote) { if(input.getOptionValues(action.name()).length == 2) { switch(action) { case download: return new DownloadTransferItemFinder().find(input, action, remote); case upload: case synchronize: return new UploadTransferItemFinder().find(input, action, remote); } } else { switch(action) { case upload: case synchronize: return Collections.emptySet(); } } // Relative to current working directory using prefix finder. return Collections.singleton( new TransferItem(remote, LocalFactory.get(prefixer.normalize(remote.getName()))) ); }
@Test public void testDeferUploadNameFromLocal() throws Exception { final CommandLineParser parser = new PosixParser(); final String temp = System.getProperty("java.io.tmpdir"); final CommandLine input = parser.parse(TerminalOptionsBuilder.options(), new String[]{"--upload", "ftps://test.cyberduck.ch/remote/", String.format("%s/f", temp)}); final Set<TransferItem> found = new SingleTransferItemFinder().find(input, TerminalAction.upload, new Path("/remote", EnumSet.of(Path.Type.directory))); assertFalse(found.isEmpty()); assertEquals(new TransferItem(new Path("/remote/f", EnumSet.of(Path.Type.file)), LocalFactory.get(String.format("%s/f", temp))), found.iterator().next()); }
static Object normalizeResult(Object result) { logger.trace("normalizeResult {}", result); // this is to normalize types returned by external functions if (result != null && result.getClass().isArray()) { List<Object> objs = new ArrayList<>(); for (int i = 0; i < Array.getLength(result); i++) { objs.add(NumberEvalHelper.coerceNumber(Array.get(result, i))); } return objs; } else { return NumberEvalHelper.coerceNumber(result); } }
@Test void normalizeResult() { List<Object> originalResult = List.of(3, "4", 56); Object result = originalResult.toArray(); Object retrieved = BaseFEELFunctionHelper.normalizeResult(result); assertNotNull(retrieved); assertInstanceOf(List.class, retrieved); List<Object> retrievedList = (List<Object>) retrieved; assertEquals(originalResult.size(), retrievedList.size()); for (int i = 0; i < originalResult.size(); i++) { assertEquals(NumberEvalHelper.coerceNumber(originalResult.get(i)), retrievedList.get(i)); } result = 23; retrieved = BaseFEELFunctionHelper.normalizeResult(result); assertNotNull(retrieved); assertEquals(NumberEvalHelper.coerceNumber(result), retrieved); result = "23"; retrieved = BaseFEELFunctionHelper.normalizeResult(result); assertNotNull(retrieved); assertEquals(NumberEvalHelper.coerceNumber(result), retrieved); }
@Override public List<String> listDbNames() { ImmutableList.Builder<String> builder = ImmutableList.builder(); try { if (StringUtils.isNullOrEmpty(catalogOwner)) { SecurityManager sm = odps.projects().get().getSecurityManager(); String result = sm.runQuery("whoami", false); JsonObject js = JsonParser.parseString(result).getAsJsonObject(); catalogOwner = js.get("DisplayName").getAsString(); } Iterator<Project> iterator = odps.projects().iterator(catalogOwner); while (iterator.hasNext()) { Project project = iterator.next(); builder.add(project.getName()); } } catch (OdpsException e) { e.printStackTrace(); throw new StarRocksConnectorException("fail to list project names", e); } ImmutableList<String> databases = builder.build(); if (databases.isEmpty()) { return ImmutableList.of(odps.getDefaultProject()); } return databases; }
@Test public void testListDbNames() { List<String> expectedDbNames = Collections.singletonList("project"); List<String> dbNames = odpsMetadata.listDbNames(); Assert.assertEquals(dbNames, expectedDbNames); }
@Override public void start(final String dataSourceName, final String sql, final List<Object> params, final ConnectionProperties connectionProps, final boolean isTrunkThread) { for (SQLExecutionHook each : sqlExecutionHooks) { each.start(dataSourceName, sql, params, connectionProps, isTrunkThread); } }
@Test void assertStart() { spiSQLExecutionHook.start("ds", "SELECT 1", Collections.emptyList(), null, true); assertTrue(SQLExecutionHookFixture.containsAction("start")); }
@Override public List<RoleDO> getRoleList() { return roleMapper.selectList(); }
@Test public void testGetRoleList() { // mock 数据 RoleDO dbRole01 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); roleMapper.insert(dbRole01); RoleDO dbRole02 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())); roleMapper.insert(dbRole02); // 调用 List<RoleDO> list = roleService.getRoleList(); // 断言 assertEquals(2, list.size()); assertPojoEquals(dbRole01, list.get(0)); assertPojoEquals(dbRole02, list.get(1)); }
public static Map<String, String> parseMap(String str) { if (str != null) { StringTokenizer tok = new StringTokenizer(str, ", \t\n\r"); HashMap<String, String> map = new HashMap<>(); while (tok.hasMoreTokens()) { String record = tok.nextToken(); int endIndex = record.indexOf('='); if (endIndex == -1) { throw new RuntimeException("Failed to parse Map from String"); } String key = record.substring(0, endIndex); String value = record.substring(endIndex + 1); map.put(key.trim(), value.trim()); } return Collections.unmodifiableMap(map); } else { return Collections.emptyMap(); } }
@Test public void testParseMapEmptyString() { Map<String, String> m = parseMap(null); assertThat(m, aMapWithSize(0)); }
@Override @CacheEvict(value = RedisKeyConstants.MAIL_ACCOUNT, key = "#updateReqVO.id") public void updateMailAccount(MailAccountSaveReqVO updateReqVO) { // 校验是否存在 validateMailAccountExists(updateReqVO.getId()); // 更新 MailAccountDO updateObj = BeanUtils.toBean(updateReqVO, MailAccountDO.class); mailAccountMapper.updateById(updateObj); }
@Test public void testUpdateMailAccount_success() { // mock 数据 MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class); mailAccountMapper.insert(dbMailAccount);// @Sql: 先插入出一条存在的数据 // 准备参数 MailAccountSaveReqVO reqVO = randomPojo(MailAccountSaveReqVO.class, o -> { o.setId(dbMailAccount.getId()); // 设置更新的 ID o.setMail(randomEmail()); }); // 调用 mailAccountService.updateMailAccount(reqVO); // 校验是否更新正确 MailAccountDO mailAccount = mailAccountMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, mailAccount); }
public List<String> splitSql(String text) { List<String> queries = new ArrayList<>(); StringBuilder query = new StringBuilder(); char character; boolean multiLineComment = false; boolean singleLineComment = false; boolean singleQuoteString = false; boolean doubleQuoteString = false; for (int index = 0; index < text.length(); index++) { character = text.charAt(index); // end of single line comment if (singleLineComment && (character == '\n')) { singleLineComment = false; query.append(character); if (index == (text.length() - 1) && !query.toString().trim().isEmpty()) { // add query when it is the end of sql. queries.add(query.toString()); } continue; } // end of multiple line comment if (multiLineComment && (index - 1) >= 0 && text.charAt(index - 1) == '/' && (index - 2) >= 0 && text.charAt(index - 2) == '*') { multiLineComment = false; } if (character == '\'' && !(singleLineComment || multiLineComment)) { if (singleQuoteString) { singleQuoteString = false; } else if (!doubleQuoteString) { singleQuoteString = true; } } if (character == '"' && !(singleLineComment || multiLineComment)) { if (doubleQuoteString && index > 0) { doubleQuoteString = false; } else if (!singleQuoteString) { doubleQuoteString = true; } } if (!singleQuoteString && !doubleQuoteString && !multiLineComment && !singleLineComment && text.length() > (index + 1)) { if (isSingleLineComment(text.charAt(index), text.charAt(index + 1))) { singleLineComment = true; } else if (text.charAt(index) == '/' && text.length() > (index + 2) && text.charAt(index + 1) == '*' && text.charAt(index + 2) != '+') { multiLineComment = true; } } if (character == ';' && !singleQuoteString && !doubleQuoteString && !multiLineComment && !singleLineComment) { // meet the end of semicolon if (!query.toString().trim().isEmpty()) { queries.add(query.toString()); query = new StringBuilder(); } } else if (index == (text.length() - 1)) { // meet the last character if ((!singleLineComment && !multiLineComment)) { query.append(character); } if (!query.toString().trim().isEmpty()) { queries.add(query.toString()); query = new StringBuilder(); } } else if (!singleLineComment && !multiLineComment) { // normal case, not in single line comment and not in multiple line comment query.append(character); } else if (character == '\n') { query.append(character); } } List<String> refinedQueries = new ArrayList<>(); for (int i = 0; i < queries.size(); ++i) { String emptyLine = ""; if (i > 0) { emptyLine = createEmptyLine(refinedQueries.get(i-1)); } if (isSingleLineComment(queries.get(i)) || isMultipleLineComment(queries.get(i))) { // refine the last refinedQuery if (refinedQueries.size() > 0) { String lastRefinedQuery = refinedQueries.get(refinedQueries.size() - 1); refinedQueries.set(refinedQueries.size() - 1, lastRefinedQuery + createEmptyLine(queries.get(i))); } } else { String refinedQuery = emptyLine + queries.get(i); refinedQueries.add(refinedQuery); } } return refinedQueries; }
@Test void testInvalidSql() { SqlSplitter sqlSplitter = new SqlSplitter(); List<String> sqls = sqlSplitter.splitSql("select a from table_1 where a=' and b=1"); assertEquals(1, sqls.size()); assertEquals("select a from table_1 where a=' and b=1", sqls.get(0)); sqls = sqlSplitter.splitSql("--comment_1;\nselect a from table_1 where a=' and b=1"); assertEquals(1, sqls.size()); assertEquals("\nselect a from table_1 where a=' and b=1", sqls.get(0)); sqls = sqlSplitter.splitSql("select a from table_1 where a=' and b=1;\n--comment_1"); assertEquals(1, sqls.size()); assertEquals("select a from table_1 where a=' and b=1;\n--comment_1", sqls.get(0)); }
@Override public String toString() { StringBuilder bld = new StringBuilder(); bld.append("ListenerInfo("); String prefix = ""; for (Endpoint endpoint : listeners.values()) { bld.append(prefix).append(endpoint); prefix = ", "; } bld.append(")"); return bld.toString(); }
@Test public void testToString() { ListenerInfo listenerInfo = ListenerInfo.create(Arrays.asList(EXTERNAL, SASL_PLAINTEXT)); assertEquals("ListenerInfo(Endpoint(listenerName='EXTERNAL', securityProtocol=SASL_SSL, host='example.com', port=9092), " + "Endpoint(listenerName='SASL_PLAINTEXT', securityProtocol=SASL_PLAINTEXT, host='example2.com', port=9094))", listenerInfo.toString()); }
public String getEcosystem(DefCveItem cve) { final List<Reference> references = Optional.ofNullable(cve) .map(DefCveItem::getCve) .map(CveItem::getReferences) .orElse(null); if (Objects.nonNull(references)) { for (Reference r : references) { final Hit<String> ecosystem = search.findFirst(r.getUrl()); if (ecosystem != null) { return ecosystem.value; } } } return null; }
@Test public void testGetEcosystemMustHandleNullCveReferences() { // Given UrlEcosystemMapper mapper = new UrlEcosystemMapper(); CveItem cveItem = new CveItem(); DefCveItem defCveItem = new DefCveItem(cveItem); // When String output = mapper.getEcosystem(defCveItem); // Then assertNull(output); }
public Optional<String> reasonAllControllersZkMigrationNotReady( MetadataVersion metadataVersion, Map<Integer, ControllerRegistration> controllers ) { if (!metadataVersion.isMigrationSupported()) { return Optional.of("The metadata.version too low at " + metadataVersion); } else if (!metadataVersion.isControllerRegistrationSupported()) { return Optional.empty(); } for (int quorumNodeId : quorumNodeIds) { ControllerRegistration registration = controllers.get(quorumNodeId); if (registration == null) { return Optional.of("No registration found for controller " + quorumNodeId); } else if (!registration.zkMigrationReady()) { return Optional.of("Controller " + quorumNodeId + " has not enabled " + "zookeeper.metadata.migration.enable"); } } return Optional.empty(); }
@Test public void testZkMigrationNotReadyIfControllerNotReady() { assertEquals(Optional.of("Controller 0 has not enabled zookeeper.metadata.migration.enable"), QUORUM_FEATURES.reasonAllControllersZkMigrationNotReady( MetadataVersion.IBP_3_7_IV0, Collections.singletonMap(0, new ControllerRegistration.Builder(). setId(0). setZkMigrationReady(false). setIncarnationId(Uuid.fromString("kCBJaDGNQk6x3y5xbtQOpg")). setListeners(Collections.singletonMap("CONTROLLER", new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "localhost", 9093))). build()))); }
@Override public Collection<? extends PrimitiveTypeEncoding<UTF8Buffer>> getAllEncodings() { return Arrays.asList(smallBufferEncoding, largeBufferEncoding); }
@Test public void testGetAllEncodings() { assertEquals(2, utf8BufferEncoding.getAllEncodings().size()); }
public Set<VplsConfig> vplss() { Set<VplsConfig> vplss = Sets.newHashSet(); JsonNode vplsNode = object.get(VPLS); if (vplsNode == null) { return vplss; } vplsNode.forEach(jsonNode -> { String name = jsonNode.get(NAME).asText(); Set<String> ifaces = Sets.newHashSet(); JsonNode vplsIfaces = jsonNode.path(INTERFACE); if (vplsIfaces.toString().isEmpty()) { vplsIfaces = ((ObjectNode) jsonNode).putArray(INTERFACE); } vplsIfaces.forEach(ifacesNode -> ifaces.add(ifacesNode.asText())); String encap = null; if (jsonNode.hasNonNull(ENCAPSULATION)) { encap = jsonNode.get(ENCAPSULATION).asText(); } vplss.add(new VplsConfig(name, ifaces, EncapsulationType.enumFromString(encap))); }); return vplss; }
@Test public void vplss() { assertEquals("Cannot load VPLS configuration or unexpected configuration" + "loaded", vplss, vplsAppConfig.vplss()); }
public Model parse(File file) throws PomParseException { try (FileInputStream fis = new FileInputStream(file)) { return parse(fis); } catch (IOException ex) { if (ex instanceof PomParseException) { throw (PomParseException) ex; } LOGGER.debug("", ex); throw new PomParseException(String.format("Unable to parse pom '%s'", file), ex); } }
@Test public void testParse_InputStream() throws Exception { InputStream inputStream = BaseTest.getResourceAsStream(this, "pom/plexus-utils-3.0.24.pom"); PomParser instance = new PomParser(); String expectedArtifactId = "plexus-utils"; Model result = instance.parse(inputStream); assertEquals("Invalid artifactId extracted", expectedArtifactId, result.getArtifactId()); }
@Override public Expression getExpression(String tableName, Alias tableAlias) { // 只有有登陆用户的情况下,才进行数据权限的处理 LoginUser loginUser = SecurityFrameworkUtils.getLoginUser(); if (loginUser == null) { return null; } // 只有管理员类型的用户,才进行数据权限的处理 if (ObjectUtil.notEqual(loginUser.getUserType(), UserTypeEnum.ADMIN.getValue())) { return null; } // 获得数据权限 DeptDataPermissionRespDTO deptDataPermission = loginUser.getContext(CONTEXT_KEY, DeptDataPermissionRespDTO.class); // 从上下文中拿不到,则调用逻辑进行获取 if (deptDataPermission == null) { deptDataPermission = permissionApi.getDeptDataPermission(loginUser.getId()).getCheckedData(); if (deptDataPermission == null) { log.error("[getExpression][LoginUser({}) 获取数据权限为 null]", JsonUtils.toJsonString(loginUser)); throw new NullPointerException(String.format("LoginUser(%d) Table(%s/%s) 未返回数据权限", loginUser.getId(), tableName, tableAlias.getName())); } // 添加到上下文中,避免重复计算 loginUser.setContext(CONTEXT_KEY, deptDataPermission); } // 情况一,如果是 ALL 可查看全部,则无需拼接条件 if (deptDataPermission.getAll()) { return null; } // 情况二,即不能查看部门,又不能查看自己,则说明 100% 无权限 if (CollUtil.isEmpty(deptDataPermission.getDeptIds()) && Boolean.FALSE.equals(deptDataPermission.getSelf())) { return new EqualsTo(null, null); // WHERE null = null,可以保证返回的数据为空 } // 情况三,拼接 Dept 和 User 的条件,最后组合 Expression deptExpression = buildDeptExpression(tableName,tableAlias, deptDataPermission.getDeptIds()); Expression userExpression = buildUserExpression(tableName, tableAlias, deptDataPermission.getSelf(), loginUser.getId()); if (deptExpression == null && userExpression == null) { // TODO 芋艿:获得不到条件的时候,暂时不抛出异常,而是不返回数据 log.warn("[getExpression][LoginUser({}) Table({}/{}) DeptDataPermission({}) 构建的条件为空]", JsonUtils.toJsonString(loginUser), tableName, tableAlias, JsonUtils.toJsonString(deptDataPermission)); // throw new NullPointerException(String.format("LoginUser(%d) Table(%s/%s) 构建的条件为空", // loginUser.getId(), tableName, tableAlias.getName())); return EXPRESSION_NULL; } if (deptExpression == null) { return userExpression; } if (userExpression == null) { return deptExpression; } // 目前,如果有指定部门 + 可查看自己,采用 OR 条件。即,WHERE (dept_id IN ? OR user_id = ?) return new Parenthesis(new OrExpression(deptExpression, userExpression)); }
@Test // 无数据权限时 public void testGetExpression_noDeptDataPermission() { try (MockedStatic<SecurityFrameworkUtils> securityFrameworkUtilsMock = mockStatic(SecurityFrameworkUtils.class)) { // 准备参数 String tableName = "t_user"; Alias tableAlias = new Alias("u"); // mock 方法 LoginUser loginUser = randomPojo(LoginUser.class, o -> o.setId(1L) .setUserType(UserTypeEnum.ADMIN.getValue())); securityFrameworkUtilsMock.when(SecurityFrameworkUtils::getLoginUser).thenReturn(loginUser); // mock 方法(permissionApi 返回 null) when(permissionApi.getDeptDataPermission(eq(loginUser.getId()))).thenReturn(success(null)); // 调用 NullPointerException exception = assertThrows(NullPointerException.class, () -> rule.getExpression(tableName, tableAlias)); // 断言 assertEquals("LoginUser(1) Table(t_user/u) 未返回数据权限", exception.getMessage()); } }
@Override public List<KsqlPartitionLocation> locate( final List<KsqlKey> keys, final RoutingOptions routingOptions, final RoutingFilterFactory routingFilterFactory, final boolean isRangeScan ) { if (isRangeScan && keys.isEmpty()) { throw new IllegalStateException("Query is range scan but found no range keys."); } final ImmutableList.Builder<KsqlPartitionLocation> partitionLocations = ImmutableList.builder(); final Set<Integer> filterPartitions = routingOptions.getPartitions(); final Optional<Set<KsqlKey>> keySet = keys.isEmpty() ? Optional.empty() : Optional.of(Sets.newHashSet(keys)); // Depending on whether this is a key-based lookup, determine which metadata method to use. // If we don't have keys, find the metadata for all partitions since we'll run the query for // all partitions of the state store rather than a particular one. //For issue #7174. Temporarily turn off metadata finding for a partition with keys //if there are more than one key. final List<PartitionMetadata> metadata; if (keys.size() == 1 && keys.get(0).getKey().size() == 1 && !isRangeScan) { metadata = getMetadataForKeys(keys, filterPartitions); } else { metadata = getMetadataForAllPartitions(filterPartitions, keySet); } if (metadata.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Cannot determine which host contains the required partitions to serve the pull query. \n" + "The underlying persistent query may be restarting (e.g. as a result of " + "ALTER SYSTEM) view the status of your by issuing <DESCRIBE foo>."); LOG.debug(materializationException.getMessage()); throw materializationException; } // Go through the metadata and group them by partition. for (PartitionMetadata partitionMetadata : metadata) { LOG.debug("Handling pull query for partition {} of state store {}.", partitionMetadata.getPartition(), storeName); final HostInfo activeHost = partitionMetadata.getActiveHost(); final Set<HostInfo> standByHosts = partitionMetadata.getStandbyHosts(); final int partition = partitionMetadata.getPartition(); final Optional<Set<KsqlKey>> partitionKeys = partitionMetadata.getKeys(); LOG.debug("Active host {}, standby {}, partition {}.", activeHost, standByHosts, partition); // For a given partition, find the ordered, filtered list of hosts to consider final List<KsqlNode> filteredHosts = getFilteredHosts(routingOptions, routingFilterFactory, activeHost, standByHosts, partition); partitionLocations.add(new PartitionLocation(partitionKeys, partition, filteredHosts)); } return partitionLocations.build(); }
@Test public void shouldReturnOneStandByWhenActiveAndOtherStandByDown() { // Given: getActiveAndStandbyMetadata(); when(livenessFilter.filter(eq(ACTIVE_HOST))) .thenReturn(Host.exclude(ACTIVE_HOST, "liveness")); when(livenessFilter.filter(eq(STANDBY_HOST1))) .thenReturn(Host.exclude(STANDBY_HOST1, "liveness")); // When: final List<KsqlPartitionLocation> result = locator.locate(ImmutableList.of(KEY), routingOptions, routingFilterFactoryStandby, false); // Then: List<KsqlNode> nodeList = result.get(0).getNodes().stream() .filter(node -> node.getHost().isSelected()) .collect(Collectors.toList()); assertThat(nodeList.size(), is(1)); assertThat(nodeList.stream().findFirst().get(), is(standByNode2)); }
@Udf(description = "Converts an INT value in radians to a value in degrees") public Double degrees( @UdfParameter( value = "value", description = "The value in radians to convert to degrees." ) final Integer value ) { return degrees(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleZero() { assertThat(udf.degrees(0), closeTo(0.0, 0.000000000000001)); assertThat(udf.degrees(0L), closeTo(0.0, 0.000000000000001)); assertThat(udf.degrees(0.0), closeTo(0.0, 0.000000000000001)); }
public static String checkIPs(String... ips) { if (ips == null || ips.length == 0) { return CHECK_OK; } // illegal response StringBuilder illegalResponse = new StringBuilder(); for (String ip : ips) { if (InternetAddressUtil.isIP(ip)) { continue; } illegalResponse.append(ip).append(","); } if (illegalResponse.length() == 0) { return CHECK_OK; } return ILLEGAL_IP_PREFIX + illegalResponse.substring(0, illegalResponse.length() - 1); }
@Test void testCheckIPs() { assertEquals("ok", InternetAddressUtil.checkIPs("127.0.0.1")); assertEquals("ok", InternetAddressUtil.checkIPs()); assertEquals("ok", InternetAddressUtil.checkIPs()); assertEquals("ok", InternetAddressUtil.checkIPs(null)); assertEquals("illegal ip: 127.100.19", InternetAddressUtil.checkIPs("127.100.19", "127.0.0.1")); }
@Override @Deprecated public String toString() { if ( StringUtils.isBlank( message ) ) { return subject; } else if ( StringUtils.isBlank( subject ) ) { return getMessage(); } return String.format( "%s - %s", subject, getMessage() ); }
@Test public void testToString() throws Exception { LogMessage msg = new LogMessage( "Log message", "Channel 01", LogLevel.DEBUG ); msg.setSubject( "Simple" ); assertEquals( "Simple - Log message", msg.toString( ) ); }
@Override public boolean isFull() { return entries() >= maxEntries() - 1; }
@Test public void testIsFull() { assertFalse(idx.isFull()); appendEntries(maxEntries - 1); assertTrue(idx.isFull()); }
@VisibleForTesting Object evaluate(final GenericRow row) { return term.getValue(new TermEvaluationContext(row)); }
@SuppressWarnings("unchecked") @Test public void shouldEvaluateLambda() { // Given: final Expression lambda1 = new LambdaFunctionCall( ImmutableList.of("X"), new ArithmeticBinaryExpression( Operator.ADD, new IntegerLiteral(1), new LambdaVariable("X") ) ); final Expression lambda2 = new LambdaFunctionCall( ImmutableList.of("X", "Y"), new ArithmeticBinaryExpression( Operator.ADD, new LambdaVariable("Y"), new LambdaVariable("X") ) ); final Expression lambda3 = new LambdaFunctionCall( ImmutableList.of("X", "Y", "Z"), new ArithmeticBinaryExpression( Operator.ADD, new LambdaVariable("X"), new ArithmeticBinaryExpression( Operator.ADD, new LambdaVariable("Y"), new LambdaVariable("Z") ) ) ); final Context context1 = new Context(ImmutableMap.of("X", SqlTypes.INTEGER)); final Context context2 = new Context(ImmutableMap.of("X", SqlTypes.INTEGER, "Y", SqlTypes.INTEGER)); final Context context3 = new Context(ImmutableMap.of("X", SqlTypes.INTEGER, "Y", SqlTypes.INTEGER, "Z", SqlTypes.INTEGER)); // When: InterpretedExpression interpreter1 = interpreter(lambda1, context1); InterpretedExpression interpreter2 = interpreter(lambda2, context2); InterpretedExpression interpreter3 = interpreter(lambda3, context3); // Then: final Function<Integer, Integer> func1 = (Function<Integer, Integer>) interpreter1.evaluate(ROW); assertThat(func1.apply(1), is(2)); assertThat(func1.apply(2), is(3)); final BiFunction<Integer, Integer, Integer> func2 = (BiFunction<Integer, Integer, Integer>) interpreter2.evaluate(ROW); assertThat(func2.apply(1, 2), is(3)); assertThat(func2.apply(2, 4), is(6)); final TriFunction<Integer, Integer, Integer, Integer> func3 = (TriFunction<Integer, Integer, Integer, Integer>) interpreter3.evaluate(ROW); assertThat(func3.apply(1, 2, 3), is(6)); assertThat(func3.apply(2, 4, 6), is(12)); }
@Override String simpleTypeName() { return name; }
@Test public void testSimpleTypeName() { ContinuousResourceId id1 = Resources.continuous(D1, P1, Bandwidth.class).resource(BW1.bps()).id(); assertThat(id1.simpleTypeName(), is("Bandwidth")); }
@Override public String getSessionId() { return sessionID; }
@Test public void testGetConfigRequest() { log.info("Starting get-config async"); assertNotNull("Incorrect sessionId", session1.getSessionId()); try { assertTrue("NETCONF get-config running command failed. ", GET_REPLY_PATTERN.matcher(session1.getConfig(RUNNING, SAMPLE_REQUEST)).matches()); assertTrue("NETCONF get-config candidate command failed. ", GET_REPLY_PATTERN.matcher(session1.getConfig(CANDIDATE, SAMPLE_REQUEST)).matches()); } catch (NetconfException e) { e.printStackTrace(); fail("NETCONF get-config test failed: " + e.getMessage()); } log.info("Finishing get-config async"); }
@Override public double p(double x) { if (x <= 0.0) { throw new IllegalArgumentException("Invalid x: " + x); } return Math.exp((0.5 * nu1 - 1.0) * Math.log(x) - 0.5 * (nu1 + nu2) * Math.log(nu2 + nu1 * x) + fac); }
@Test public void testP() { System.out.println("p"); FDistribution instance = new FDistribution(10, 20); instance.rand(); assertEquals(2.90264e-06, instance.p(0.01), 1E-10); assertEquals(0.01504682, instance.p(0.1), 1E-7); assertEquals(0.1198157, instance.p(0.2), 1E-7); assertEquals(0.687882, instance.p(0.5), 1E-6); assertEquals(0.7143568, instance.p(1), 1E-7); assertEquals(6.652967e-06, instance.p(10), 1E-10); }
@Override public Set<String> keySet() { if (this.prefix.isEmpty()) { return this.backingConfig.keySet(); } final HashSet<String> set = new HashSet<>(); int prefixLen = this.prefix.length(); for (String key : this.backingConfig.keySet()) { if (key.startsWith(prefix)) { set.add(key.substring(prefixLen)); } } return set; }
@Test void testDelegationConfigurationWithPrefix() { String prefix = "pref-"; String expectedKey = "key"; /* * Key matches the prefix */ Configuration backingConf = new Configuration(); backingConf.setValueInternal(prefix + expectedKey, "value", false); DelegatingConfiguration configuration = new DelegatingConfiguration(backingConf, prefix); Set<String> keySet = configuration.keySet(); assertThat(keySet).hasSize(1).containsExactly(expectedKey); /* * Key does not match the prefix */ backingConf = new Configuration(); backingConf.setValueInternal("test-key", "value", false); configuration = new DelegatingConfiguration(backingConf, prefix); assertThat(configuration.keySet()).isEmpty(); }
@Override public String getColumnLabel() { ProjectionIdentifierExtractEngine extractEngine = new ProjectionIdentifierExtractEngine(databaseType); return getAlias().isPresent() && !DerivedColumn.isDerivedColumnName(getAlias().get().getValueWithQuoteCharacters()) ? extractEngine.getIdentifierValue(getAlias().get()) : extractEngine.getColumnNameFromFunction(type.name(), expression); }
@Test void assertGetColumnLabelWithoutAlias() { assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", null, TypedSPILoader.getService(DatabaseType.class, "MySQL")).getColumnLabel(), is("COUNT( A.\"DIRECTION\" )")); assertThat(new AggregationProjection(AggregationType.COUNT, "count( a.\"direction\" )", null, TypedSPILoader.getService(DatabaseType.class, "MySQL")).getColumnLabel(), is("count( a.\"direction\" )")); assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", null, TypedSPILoader.getService(DatabaseType.class, "PostgreSQL")).getColumnLabel(), is("count")); assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", null, TypedSPILoader.getService(DatabaseType.class, "openGauss")).getColumnLabel(), is("count")); assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( a.\"direction\" )", null, TypedSPILoader.getService(DatabaseType.class, "Oracle")).getColumnLabel(), is("COUNT(A.\"DIRECTION\")")); }
@Override public Future<RestResponse> restRequest(RestRequest request) { return restRequest(request, new RequestContext()); }
@Test(dataProvider = "isD2Async") public void testStatsConsumerUpdateAndRemove(boolean isD2Async) throws Exception { AtomicReference<ServiceProperties> serviceProperties = new AtomicReference<>(); TestBackupRequestsStrategyStatsConsumer statsConsumer = new TestBackupRequestsStrategyStatsConsumer(); serviceProperties.set(createServiceProperties(null)); BackupRequestsClient client = createClient(serviceProperties::get, statsConsumer, isD2Async); URI uri = URI.create("d2://testService"); RestRequest restRequest = new RestRequestBuilder(uri).setEntity(CONTENT).build(); RequestContext requestContext = new RequestContext(); requestContext.putLocalAttr(R2Constants.OPERATION, "get"); Future<RestResponse> response = client.restRequest(restRequest, requestContext); assertEquals(response.get().getStatus(), 200); List<StatsConsumerEvent> events = statsConsumer.getEvents(); assertEquals(events.size(), 0); serviceProperties .set(createServiceProperties(Arrays.asList(createBackupRequestsConfiguration(5, "get"), createBackupRequestsConfiguration(1, "batch_get")))); requestContext = new RequestContext(); requestContext.putLocalAttr(R2Constants.OPERATION, "get"); response = client.restRequest(restRequest, requestContext); assertEquals(response.get().getStatus(), 200); events = statsConsumer.getEvents(); assertEquals(events.size(), 2); assertEquals(events.get(0).isEventAdd(), true); assertEquals(events.get(0).getService(), SERVICE_NAME); assertEquals(events.get(0).getOperation(), "get"); BackupRequestsStrategyStatsProvider statsProvider1 = events.get(0).getStatsProvider(); assertNotNull(statsProvider1); assertEquals(events.get(1).isEventAdd(), true); assertEquals(events.get(1).getService(), SERVICE_NAME); assertEquals(events.get(1).getOperation(), "batch_get"); BackupRequestsStrategyStatsProvider statsProvider2 = events.get(1).getStatsProvider(); assertNotNull(statsProvider2); serviceProperties .set(createServiceProperties(Arrays.asList(createBackupRequestsConfiguration(1, "get")))); requestContext = new RequestContext(); requestContext.putLocalAttr(R2Constants.OPERATION, "get"); response = client.restRequest(restRequest, requestContext); assertEquals(response.get().getStatus(), 200); events = statsConsumer.getEvents(); assertEquals(events.size(), 5); assertEquals(events.get(2).isEventAdd(), false); assertEquals(events.get(2).getService(), SERVICE_NAME); assertEquals(events.get(2).getOperation(), "get"); BackupRequestsStrategyStatsProvider removedStatsProvider = events.get(2).getStatsProvider(); assertNotNull(removedStatsProvider); assertSame(statsProvider1, removedStatsProvider); assertEquals(events.get(3).isEventAdd(), true); assertEquals(events.get(3).getService(), SERVICE_NAME); assertEquals(events.get(3).getOperation(), "get"); BackupRequestsStrategyStatsProvider statsProvider3 = events.get(3).getStatsProvider(); assertNotNull(statsProvider1); assertNotSame(statsProvider1, statsProvider3); assertEquals(events.get(4).isEventAdd(), false); assertEquals(events.get(4).getService(), SERVICE_NAME); assertEquals(events.get(4).getOperation(), "batch_get"); BackupRequestsStrategyStatsProvider removedStatsProvider2 = events.get(4).getStatsProvider(); assertNotNull(removedStatsProvider); assertSame(statsProvider2, removedStatsProvider2); }
protected void setMethod() { boolean activateBody = RestMeta.isActiveBody( wMethod.getText() ); boolean activateParams = RestMeta.isActiveParameters( wMethod.getText() ); wlBody.setEnabled( activateBody ); wBody.setEnabled( activateBody ); wApplicationType.setEnabled( activateBody ); wlParameters.setEnabled( activateParams ); wParameters.setEnabled( activateParams ); wGet.setEnabled( activateParams ); wlMatrixParameters.setEnabled( activateParams ); wMatrixParameters.setEnabled( activateParams ); wMatrixGet.setEnabled( activateParams ); }
@Test public void testSetMethod_POST() { doReturn( RestMeta.HTTP_METHOD_POST ).when( method ).getText(); dialog.setMethod(); verify( bodyl, times( 1 ) ).setEnabled( true ); verify( body, times( 1 ) ).setEnabled( true ); verify( type, times( 1 ) ).setEnabled( true ); verify( paramsl, times( 1 ) ).setEnabled( true ); verify( params, times( 1 ) ).setEnabled( true ); verify( paramsb, times( 1 ) ).setEnabled( true ); verify( matrixl, times( 1 ) ).setEnabled( true ); verify( matrix, times( 1 ) ).setEnabled( true ); verify( matrixb, times( 1 ) ).setEnabled( true ); }
public static <T> Deduplicate.Values<T> values() { return new Deduplicate.Values<>(DEFAULT_TIME_DOMAIN, DEFAULT_DURATION); }
@Test @Category({NeedsRunner.class, UsesTestStream.class}) public void testInDifferentWindows() { Instant base = new Instant(0); TestStream<String> values = TestStream.create(StringUtf8Coder.of()) .advanceWatermarkTo(base) .addElements( TimestampedValue.of("k1", base), TimestampedValue.of("k2", base.plus(Duration.standardSeconds(10))), TimestampedValue.of("k3", base.plus(Duration.standardSeconds(20))), TimestampedValue.of("k1", base.plus(Duration.standardSeconds(30))), TimestampedValue.of("k2", base.plus(Duration.standardSeconds(40))), TimestampedValue.of("k3", base.plus(Duration.standardSeconds(50))), TimestampedValue.of("k4", base.plus(Duration.standardSeconds(60))), TimestampedValue.of("k5", base.plus(Duration.standardSeconds(70))), TimestampedValue.of("k6", base.plus(Duration.standardSeconds(80)))) .advanceWatermarkToInfinity(); PCollection<String> distinctValues = p.apply(values) .apply(Window.into(FixedWindows.of(Duration.standardSeconds(30)))) .apply(Deduplicate.values()); PAssert.that(distinctValues) .inWindow(new IntervalWindow(base, base.plus(Duration.standardSeconds(30)))) .containsInAnyOrder("k1", "k2", "k3"); PAssert.that(distinctValues) .inWindow( new IntervalWindow( base.plus(Duration.standardSeconds(30)), base.plus(Duration.standardSeconds(60)))) .containsInAnyOrder("k1", "k2", "k3"); PAssert.that(distinctValues) .inWindow( new IntervalWindow( base.plus(Duration.standardSeconds(60)), base.plus(Duration.standardSeconds(90)))) .containsInAnyOrder("k4", "k5", "k6"); p.run(); }
@VisibleForTesting static CPUResource getDefaultCpus(final Configuration configuration) { int fallback = configuration.get(YarnConfigOptions.VCORES); double cpuCoresDouble = TaskExecutorProcessUtils.getCpuCoresWithFallback(configuration, fallback) .getValue() .doubleValue(); @SuppressWarnings("NumericCastThatLosesPrecision") long cpuCoresLong = Math.max((long) Math.ceil(cpuCoresDouble), 1L); //noinspection FloatingPointEquality if (cpuCoresLong != cpuCoresDouble) { LOG.info( "The amount of cpu cores must be a positive integer on Yarn. Rounding {} up to the closest positive integer {}.", cpuCoresDouble, cpuCoresLong); } if (cpuCoresLong > Integer.MAX_VALUE) { throw new IllegalConfigurationException( String.format( "The amount of cpu cores %d cannot exceed Integer.MAX_VALUE: %d", cpuCoresLong, Integer.MAX_VALUE)); } //noinspection NumericCastThatLosesPrecision return new CPUResource(cpuCoresLong); }
@Test void testGetCpuCoresCommonOption() { final Configuration configuration = new Configuration(); configuration.set(TaskManagerOptions.CPU_CORES, 1.0); configuration.set(YarnConfigOptions.VCORES, 2); configuration.set(TaskManagerOptions.NUM_TASK_SLOTS, 3); assertThat(YarnWorkerResourceSpecFactory.getDefaultCpus(configuration)) .isEqualTo(new CPUResource(1.0)); }
@Override public boolean isAllowable(URL url, Invocation invocation) { int rate = url.getMethodParameter(RpcUtils.getMethodName(invocation), TPS_LIMIT_RATE_KEY, -1); long interval = url.getMethodParameter( RpcUtils.getMethodName(invocation), TPS_LIMIT_INTERVAL_KEY, DEFAULT_TPS_LIMIT_INTERVAL); String serviceKey = url.getServiceKey(); if (rate > 0) { StatItem statItem = stats.get(serviceKey); if (statItem == null) { stats.putIfAbsent(serviceKey, new StatItem(serviceKey, rate, interval)); statItem = stats.get(serviceKey); } else { // rate or interval has changed, rebuild if (statItem.getRate() != rate || statItem.getInterval() != interval) { stats.put(serviceKey, new StatItem(serviceKey, rate, interval)); statItem = stats.get(serviceKey); } } return statItem.isAllowable(); } else { StatItem statItem = stats.get(serviceKey); if (statItem != null) { stats.remove(serviceKey); } } return true; }
@Test void testIsNotAllowable() { Invocation invocation = new MockInvocation(); URL url = URL.valueOf("test://test"); url = url.addParameter(INTERFACE_KEY, "org.apache.dubbo.rpc.file.TpsService"); url = url.addParameter(TPS_LIMIT_RATE_KEY, TEST_LIMIT_RATE); url = url.addParameter(TPS_LIMIT_INTERVAL_KEY, 1000); for (int i = 1; i <= TEST_LIMIT_RATE + 1; i++) { if (i == TEST_LIMIT_RATE + 1) { Assertions.assertFalse(defaultTPSLimiter.isAllowable(url, invocation)); } else { Assertions.assertTrue(defaultTPSLimiter.isAllowable(url, invocation)); } } }
@Override public Integer call() throws Exception { super.call(); try(DefaultHttpClient client = client()) { MutableHttpRequest<Object> request = HttpRequest .GET(apiUri("/templates/export/by-query") + (namespace != null ? "?namespace=" + namespace : "")) .accept(MediaType.APPLICATION_OCTET_STREAM); HttpResponse<byte[]> response = client.toBlocking().exchange(this.requestOptions(request), byte[].class); Path zipFile = Path.of(directory.toString(), DEFAULT_FILE_NAME); zipFile.toFile().createNewFile(); Files.write(zipFile, response.body()); stdOut("Exporting template(s) for namespace '" + namespace + "' successfully done !"); } catch (HttpClientResponseException e) { TemplateValidateCommand.handleHttpException(e, "template"); return 1; } return 0; }
@Test void run() throws IOException { URL directory = TemplateExportCommandTest.class.getClassLoader().getResource("templates"); ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); try (ApplicationContext ctx = ApplicationContext.run(Map.of("kestra.templates.enabled", "true"), Environment.CLI, Environment.TEST)) { EmbeddedServer embeddedServer = ctx.getBean(EmbeddedServer.class); embeddedServer.start(); // we use the update command to add templates to extract String[] args = { "--server", embeddedServer.getURL().toString(), "--user", "myuser:pass:word", "io.kestra.tests", directory.getPath(), }; PicocliRunner.call(TemplateNamespaceUpdateCommand.class, ctx, args); assertThat(out.toString(), containsString("3 template(s)")); // then we export them String[] exportArgs = { "--server", embeddedServer.getURL().toString(), "--user", "myuser:pass:word", "--namespace", "io.kestra.tests", "/tmp", }; PicocliRunner.call(TemplateExportCommand.class, ctx, exportArgs); File file = new File("/tmp/templates.zip"); assertThat(file.exists(), is(true)); ZipFile zipFile = new ZipFile(file); assertThat(zipFile.stream().count(), is(3L)); file.delete(); } }
public static <T> Set<T> set(T... elements) { if (elements == null) { throw new IllegalArgumentException("Expected an array of elements (or empty array) but received a null."); } else { return new LinkedHashSet<>(Arrays.asList(elements)); } }
@Test void testSet() { Set<Object> set = new HashSet<>(); set.add(null); assertEquals(set, CollectionUtils.set(null, null, null)); assertEquals(new LinkedHashSet(Arrays.asList("", "a", "b")), CollectionUtils.set("", "a", "b")); assertEquals(new HashSet(), CollectionUtils.set()); }
public static UnixMountInfo parseMountInfo(String line) { // Example mount lines: // ramfs on /mnt/ramdisk type ramfs (rw,relatime,size=1gb) // map -hosts on /net (autofs, nosuid, automounted, nobrowse) UnixMountInfo.Builder builder = new UnixMountInfo.Builder(); // First get and remove the mount type if it's provided. Matcher matcher = Pattern.compile(".* (type \\w+ ).*").matcher(line); String lineWithoutType; if (matcher.matches()) { String match = matcher.group(1); builder.setFsType(match.replace("type", "").trim()); lineWithoutType = line.replace(match, ""); } else { lineWithoutType = line; } // Now parse the rest matcher = Pattern.compile("(.*) on (.*) \\((.*)\\)").matcher(lineWithoutType); if (!matcher.matches()) { LOG.warn("Unable to parse output of '{}': {}", MOUNT_COMMAND, line); return builder.build(); } builder.setDeviceSpec(matcher.group(1)); builder.setMountPoint(matcher.group(2)); builder.setOptions(parseUnixMountOptions(matcher.group(3))); return builder.build(); }
@Test public void parseMountInfoWithoutType() throws Exception { // OS X mount info. UnixMountInfo info = ShellUtils.parseMountInfo("devfs on /dev (devfs, local, nobrowse)"); assertEquals(Optional.of("devfs"), info.getDeviceSpec()); assertEquals(Optional.of("/dev"), info.getMountPoint()); assertFalse(info.getFsType().isPresent()); assertFalse(info.getOptions().getSize().isPresent()); }
@Override public void encode(final ChannelHandlerContext context, final DatabasePacket message, final ByteBuf out) { boolean isIdentifierPacket = message instanceof PostgreSQLIdentifierPacket; if (isIdentifierPacket) { prepareMessageHeader(out, ((PostgreSQLIdentifierPacket) message).getIdentifier().getValue()); } PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(out, context.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get()); try { message.write(payload); // CHECKSTYLE:OFF } catch (final RuntimeException ex) { // CHECKSTYLE:ON payload.getByteBuf().resetWriterIndex(); // TODO consider what severity to use OpenGaussErrorResponsePacket errorResponsePacket = new OpenGaussErrorResponsePacket( PostgreSQLMessageSeverityLevel.ERROR, PostgreSQLVendorError.SYSTEM_ERROR.getSqlState().getValue(), ex.getMessage()); isIdentifierPacket = true; prepareMessageHeader(out, errorResponsePacket.getIdentifier().getValue()); errorResponsePacket.write(payload); } finally { if (isIdentifierPacket) { updateMessageLength(out); } } }
@Test void assertEncodeOccursException() { PostgreSQLPacket packet = mock(PostgreSQLPacket.class); RuntimeException ex = mock(RuntimeException.class); when(ex.getMessage()).thenReturn("Error"); doThrow(ex).when(packet).write(any(PostgreSQLPacketPayload.class)); when(byteBuf.readableBytes()).thenReturn(9); new OpenGaussPacketCodecEngine().encode(context, packet, byteBuf); verify(byteBuf).resetWriterIndex(); verify(byteBuf).writeByte(PostgreSQLMessagePacketType.ERROR_RESPONSE.getValue()); verify(byteBuf).setInt(1, 8); }
@Override public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) { final OutputStream putStream = session.getClient().putAsOutputStream(file.getAbsolute()); return new VoidStatusOutputStream(putStream); }
@Test public void testWrite() throws Exception { final MantaWriteFeature feature = new MantaWriteFeature(session); final Path container = new MantaDirectoryFeature(session).mkdir(randomDirectory(), new TransferStatus()); final byte[] content = RandomUtils.nextBytes(5 * 1024 * 1024); final TransferStatus status = new TransferStatus(); status.setLength(content.length); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final StatusOutputStream<Void> out = feature.write(file, status, new DisabledConnectionCallback()); final ByteArrayInputStream in = new ByteArrayInputStream(content); final byte[] buffer = new byte[32 * 1024]; assertEquals(content.length, IOUtils.copyLarge(in, out, buffer)); in.close(); out.close(); assertNull(out.getStatus()); assertTrue(new DefaultFindFeature(session).find(file)); final byte[] compare = new byte[content.length]; final InputStream stream = new MantaReadFeature(session).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new MantaDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@JsonIgnore public long initiateAndGetByPrevMaxIterationId(ForeachStepOverview prev, long toReset) { if (prev == null || prev.details == null) { return 0; } long maxIterationId = prev.details.flatten(e -> true).values().stream() .flatMap(Collection::stream) .reduce(0L, Long::max); // We need to keep only succeeded instances as we will restart the remaining instances when // restarting all iterations from incomplete. When there is specific iteration to restart // we keep all terminal state instances. Map<WorkflowInstance.Status, List<Long>> toKeep = prev.details.flatten( toReset == 0 ? (e -> e == WorkflowInstance.Status.SUCCEEDED) : (WorkflowInstance.Status::isTerminal)); // checkpoint can be set to the first restartable instance id - 1 List<Long> instanceIds = toKeep.values().stream().flatMap(Collection::stream).sorted().collect(Collectors.toList()); checkpoint = instanceIds.size(); for (int i = 0; i < instanceIds.size(); ++i) { if (instanceIds.get(i) != i + 1) { checkpoint = i; break; } } toKeep.forEach((status, ids) -> ids.forEach(i -> addOne(i, status, null))); refreshDetail(); return maxIterationId; }
@Test public void testInitiateAndGetByPrevMaxIterationId() throws Exception { ForeachStepOverview prev = loadObject( "fixtures/instances/sample-foreach-step-overview-with-failed.json", ForeachStepOverview.class); ForeachStepOverview overview = new ForeachStepOverview(); assertEquals(80121, overview.initiateAndGetByPrevMaxIterationId(prev, 0)); assertEquals( Collections.singletonMap( WorkflowInstance.Status.SUCCEEDED, Arrays.asList(80110L, 80112L, 80113L, 80114L, 80115L)), overview.getDetails().flatten(e -> true)); assertEquals(0, overview.getCheckpoint()); overview = new ForeachStepOverview(); assertEquals(80121, overview.initiateAndGetByPrevMaxIterationId(prev, 80115)); Map<WorkflowInstance.Status, List<Long>> detailsMap = new HashMap<>(); detailsMap.put( WorkflowInstance.Status.SUCCEEDED, Arrays.asList(80110L, 80112L, 80113L, 80114L, 80115L)); detailsMap.put(WorkflowInstance.Status.FAILED, Arrays.asList(80117L, 80119L, 80120L, 80121L)); assertEquals(detailsMap, overview.getDetails().flatten(e -> true)); assertEquals(0, overview.getCheckpoint()); overview = new ForeachStepOverview(); assertEquals(0, overview.initiateAndGetByPrevMaxIterationId(null, 80115)); assertEquals(0, overview.getCheckpoint()); overview = new ForeachStepOverview(); prev.getDetails().add(1, WorkflowInstance.Status.SUCCEEDED); prev.getDetails().add(2, WorkflowInstance.Status.STOPPED); prev.refreshDetail(); assertEquals(80121, overview.initiateAndGetByPrevMaxIterationId(prev, 80115)); assertEquals(2, overview.getCheckpoint()); overview = new ForeachStepOverview(); prev.getDetails().getInfo().clear(); prev.getDetails() .getInfo() .put( WorkflowInstance.Status.STOPPED, Collections.singletonList(new ForeachDetails.Interval(1, 10000))); assertEquals(10000, overview.initiateAndGetByPrevMaxIterationId(prev, 80115)); assertEquals(10000, overview.getCheckpoint()); overview = new ForeachStepOverview(); prev.getDetails().getInfo().clear(); assertEquals(0, overview.initiateAndGetByPrevMaxIterationId(prev, 80115)); assertEquals(0, overview.getCheckpoint()); overview = new ForeachStepOverview(); prev.getDetails() .getInfo() .put( WorkflowInstance.Status.SUCCEEDED, Collections.singletonList(new ForeachDetails.Interval(1, 10000))); assertEquals(10000, overview.initiateAndGetByPrevMaxIterationId(prev, 80115)); assertEquals(10000, overview.getCheckpoint()); overview = new ForeachStepOverview(); prev.getDetails().getInfo().clear(); prev.getDetails().add(1, WorkflowInstance.Status.SUCCEEDED); prev.getDetails().add(2, WorkflowInstance.Status.STOPPED); prev.getDetails().add(3, WorkflowInstance.Status.SUCCEEDED); prev.refreshDetail(); assertEquals(3, overview.initiateAndGetByPrevMaxIterationId(prev, 80115)); assertEquals(3, overview.getCheckpoint()); overview = new ForeachStepOverview(); prev.getDetails().getInfo().clear(); prev.getDetails().add(1, WorkflowInstance.Status.SUCCEEDED); prev.getDetails().add(2, WorkflowInstance.Status.STOPPED); prev.refreshDetail(); assertEquals(2, overview.initiateAndGetByPrevMaxIterationId(prev, 80115)); assertEquals(2, overview.getCheckpoint()); }
ControllerResult<ApiError> incrementalAlterConfig( ConfigResource configResource, Map<String, Entry<OpType, String>> keyToOps, boolean newlyCreatedResource ) { List<ApiMessageAndVersion> outputRecords = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); ApiError apiError = incrementalAlterConfigResource(configResource, keyToOps, newlyCreatedResource, outputRecords); return ControllerResult.atomicOf(outputRecords, apiError); }
@Test public void testIncrementalAlterConfig() { ConfigurationControlManager manager = new ConfigurationControlManager.Builder(). setKafkaConfigSchema(SCHEMA). build(); Map<String, Entry<AlterConfigOp.OpType, String>> keyToOps = toMap(entry("abc", entry(APPEND, "123"))); ControllerResult<ApiError> result = manager. incrementalAlterConfig(MYTOPIC, keyToOps, true); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new ConfigRecord().setResourceType(TOPIC.id()).setResourceName("mytopic"). setName("abc").setValue("123"), CONFIG_RECORD.highestSupportedVersion())), ApiError.NONE), result); RecordTestUtils.replayAll(manager, result.records()); assertEquals(ControllerResult.atomicOf(Collections.singletonList(new ApiMessageAndVersion( new ConfigRecord().setResourceType(TOPIC.id()).setResourceName("mytopic"). setName("abc").setValue(null), CONFIG_RECORD.highestSupportedVersion())), ApiError.NONE), manager.incrementalAlterConfig(MYTOPIC, toMap(entry("abc", entry(DELETE, "xyz"))), true)); }
@Override public boolean betterThan(Num criterionValue1, Num criterionValue2) { return criterionValue1.isGreaterThan(criterionValue2); }
@Test public void betterThan() { AnalysisCriterion criterion = getCriterion(); assertTrue(criterion.betterThan(numOf(3.5), numOf(2.2))); assertFalse(criterion.betterThan(numOf(1.5), numOf(2.7))); }
public XAConnection xaConnection(XAConnection xaConnection) { return TracingXAConnection.create(xaConnection, this); }
@Test void xaConnection_doesntDoubleWrap() { XAConnection wrapped = jmsTracing.xaConnection(mock(XAConnection.class)); assertThat(jmsTracing.xaConnection(wrapped)) .isSameAs(wrapped); }
public long next() { long part = parts[index++]; if (index == parts.length) { index = 0; } return part; }
@Test public void check() { RandomizedRateTracker tracker = new RandomizedRateTracker(1000, 5); long p1 = tracker.next(); long p2 = tracker.next(); long p3 = tracker.next(); long p4 = tracker.next(); long p5 = tracker.next(); //total is divided up properly assertEquals(1000, p1 + p2 + p3 + p4 + p5); //returns same values in a round-robin fashion for (int i = 0; i < 5; i++) { assertEquals(p1, tracker.next()); assertEquals(p2, tracker.next()); assertEquals(p3, tracker.next()); assertEquals(p4, tracker.next()); assertEquals(p5, tracker.next()); } }
@Override public void open(Map<String, Object> config, SourceContext sourceContext) throws Exception { this.config = config; this.sourceContext = sourceContext; this.intermediateTopicName = SourceConfigUtils.computeBatchSourceIntermediateTopicName(sourceContext.getTenant(), sourceContext.getNamespace(), sourceContext.getSourceName()).toString(); this.discoveryThread = Executors.newSingleThreadExecutor( new DefaultThreadFactory( String.format("%s-batch-source-discovery", FunctionCommon.getFullyQualifiedName( sourceContext.getTenant(), sourceContext.getNamespace(), sourceContext.getSourceName())))); this.getBatchSourceConfigs(config); this.initializeBatchSource(); this.start(); }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Bad config passed to TestBatchSource") public void testWithoutRightSourceConfig() throws Exception { config.remove("foo"); config.put("something", "else"); batchSourceExecutor.open(config, context); }
@Override public String toString() { return joinForDisplay(resourceNames()); }
@Test public void shouldHaveNiceConvenienceConstructorThatDoesSomeNiftyParsing() { ResourceConfigs actual = new ResourceConfigs("mou, fou"); assertThat(actual.toString(), is("fou | mou")); }
public Map<String, String> connectorBaseConfig(SourceAndTarget sourceAndTarget, Class<?> connectorClass) { Map<String, String> props = new HashMap<>(); props.putAll(rawProperties); props.keySet().retainAll(allConfigNames()); props.putAll(stringsWithPrefix(CONFIG_PROVIDERS_CONFIG)); props.putAll(stringsWithPrefix("replication.policy")); Map<String, String> sourceClusterProps = clusterProps(sourceAndTarget.source()); // attrs non prefixed with producer|consumer|admin props.putAll(clusterConfigsWithPrefix(SOURCE_CLUSTER_PREFIX, sourceClusterProps)); // attrs prefixed with producer|consumer|admin props.putAll(clientConfigsWithPrefix(SOURCE_PREFIX, sourceClusterProps)); Map<String, String> targetClusterProps = clusterProps(sourceAndTarget.target()); props.putAll(clusterConfigsWithPrefix(TARGET_CLUSTER_PREFIX, targetClusterProps)); props.putAll(clientConfigsWithPrefix(TARGET_PREFIX, targetClusterProps)); props.putIfAbsent(NAME, connectorClass.getSimpleName()); props.putIfAbsent(CONNECTOR_CLASS, connectorClass.getName()); props.putIfAbsent(SOURCE_CLUSTER_ALIAS, sourceAndTarget.source()); props.putIfAbsent(TARGET_CLUSTER_ALIAS, sourceAndTarget.target()); // override with connector-level properties props.putAll(stringsWithPrefixStripped(sourceAndTarget.source() + "->" + sourceAndTarget.target() + ".")); // disabled by default props.putIfAbsent(MirrorConnectorConfig.ENABLED, "false"); // don't transform -- the worker will handle transformation of Connector and Task configs return props; }
@Test public void testClusterConfigProperties() { MirrorMakerConfig mirrorConfig = new MirrorMakerConfig(makeProps( "clusters", "a, b", "a.bootstrap.servers", "servers-one", "b.bootstrap.servers", "servers-two", "security.protocol", "SSL", "replication.factor", "4")); Map<String, String> connectorProps = mirrorConfig.connectorBaseConfig(new SourceAndTarget("a", "b"), MirrorSourceConnector.class); assertEquals("servers-one", connectorProps.get("source.cluster.bootstrap.servers"), "source.cluster.bootstrap.servers is set"); assertEquals("servers-two", connectorProps.get("target.cluster.bootstrap.servers"), "target.cluster.bootstrap.servers is set"); assertEquals("SSL", connectorProps.get("security.protocol"), "top-level security.protocol is passed through to connector config"); }
@Override public void clear() { map.clear(); }
@Test public void testClear() { map.put(23, "foobar"); adapter.clear(); assertEquals(0, map.size()); }
@NonNull public Client authenticate(@NonNull Request request) { // https://datatracker.ietf.org/doc/html/rfc7521#section-4.2 try { if (!CLIENT_ASSERTION_TYPE_PRIVATE_KEY_JWT.equals(request.clientAssertionType())) { throw new AuthenticationException( "unsupported client_assertion_type='%s', expected '%s'" .formatted(request.clientAssertionType(), CLIENT_ASSERTION_TYPE_PRIVATE_KEY_JWT)); } var processor = new DefaultJWTProcessor<>(); var keySelector = new JWSVerificationKeySelector<>( Set.of(JWSAlgorithm.RS256, JWSAlgorithm.ES256), jwkSource); processor.setJWSKeySelector(keySelector); processor.setJWTClaimsSetVerifier( new DefaultJWTClaimsVerifier<>( new JWTClaimsSet.Builder().audience(baseUri.toString()).build(), Set.of( JWTClaimNames.JWT_ID, JWTClaimNames.EXPIRATION_TIME, JWTClaimNames.ISSUER, JWTClaimNames.SUBJECT))); var claims = processor.process(request.clientAssertion(), null); var clientId = clientIdFromAssertion(request.clientId(), claims); return new Client(clientId); } catch (ParseException e) { throw new AuthenticationException("failed to parse client assertion", e); } catch (BadJOSEException | JOSEException e) { throw new AuthenticationException("failed to verify client assertion", e); } }
@Test void authenticate_badSubject_noMatch() throws JOSEException { var key = generateKey(); var jwkSource = new StaticJwkSource<>(key); var claims = new JWTClaimsSet.Builder() .audience(RP_ISSUER.toString()) .subject(CLIENT_ID) .issuer(CLIENT_ID) .expirationTime(Date.from(Instant.now().plusSeconds(60))) .jwtID(UUID.randomUUID().toString()) .build(); var signed = signJwt(claims, key); var authenticator = new ClientAuthenticator(jwkSource, RP_ISSUER); // when & then assertThrows( AuthenticationException.class, () -> authenticator.authenticate( new Request( "the wrong client", ClientAuthenticator.CLIENT_ASSERTION_TYPE_PRIVATE_KEY_JWT, signed))); }
@Override public Map<Long, T> loadAll(Collection<Long> keys) { long startNanos = Timer.nanos(); try { return delegate.loadAll(keys); } finally { loadAllProbe.recordValue(Timer.nanosElapsed(startNanos)); } }
@Test public void loadAll() { Collection<Long> keys = Arrays.asList(1L, 2L); Map<Long, String> values = new HashMap<>(); values.put(1L, "value1"); values.put(2L, "value2"); when(delegate.loadAll(keys)).thenReturn(values); Map<Long, String> result = queueStore.loadAll(keys); assertEquals(values, result); assertProbeCalledOnce("loadAll"); }
public FEELFnResult<TemporalAccessor> invoke(@ParameterName("from") String val) { if ( val == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } try { TemporalAccessor parsed = FEEL_TIME.parse(val); if (parsed.query(TemporalQueries.offset()) != null) { // it is an offset-zoned time, so I can know for certain an OffsetTime OffsetTime asOffSetTime = parsed.query(OffsetTime::from); return FEELFnResult.ofResult(asOffSetTime); } else if (parsed.query(TemporalQueries.zone()) == null) { // if it does not contain any zone information at all, then I know for certain is a local time. LocalTime asLocalTime = parsed.query(LocalTime::from); return FEELFnResult.ofResult(asLocalTime); } else if (parsed.query(TemporalQueries.zone()) != null) { boolean hasSeconds = timeStringWithSeconds(val); LocalTime asLocalTime = parsed.query(LocalTime::from); ZoneId zoneId = parsed.query(TemporalQueries.zone()); ZoneTime zoneTime = ZoneTime.of(asLocalTime, zoneId, hasSeconds); return FEELFnResult.ofResult(zoneTime); } return FEELFnResult.ofResult(parsed); } catch (DateTimeException e) { return manageDateTimeException(e, val); } }
@Test void invokeStringParamTimeWrongFormat() { FunctionTestUtil.assertResultError(timeFunction.invoke("10-15:06"), InvalidParametersEvent.class); }
public static VersionedBytesStoreSupplier persistentVersionedKeyValueStore(final String name, final Duration historyRetention) { Objects.requireNonNull(name, "name cannot be null"); final String hrMsgPrefix = prepareMillisCheckFailMsgPrefix(historyRetention, "historyRetention"); final long historyRetentionMs = validateMillisecondDuration(historyRetention, hrMsgPrefix); if (historyRetentionMs < 0L) { throw new IllegalArgumentException("historyRetention cannot be negative"); } return new RocksDbVersionedKeyValueBytesStoreSupplier(name, historyRetentionMs); }
@Test public void shouldCreateRocksDbVersionedStore() { final KeyValueStore<Bytes, byte[]> store = Stores.persistentVersionedKeyValueStore("store", ofMillis(1)).get(); assertThat(store, instanceOf(VersionedBytesStore.class)); assertThat(store.persistent(), equalTo(true)); }
public ConnectionContext getConnection(UserGroupInformation ugi, String nnAddress, Class<?> protocol, String nsId) throws IOException { // Check if the manager is shutdown if (!this.running) { LOG.error( "Cannot get a connection to {} because the manager isn't running", nnAddress); return null; } // Try to get the pool if created ConnectionPoolId connectionId = new ConnectionPoolId(ugi, nnAddress, protocol); ConnectionPool pool = null; readLock.lock(); try { pool = this.pools.get(connectionId); } finally { readLock.unlock(); } // Create the pool if not created before if (pool == null) { writeLock.lock(); try { pool = this.pools.get(connectionId); if (pool == null) { pool = new ConnectionPool( this.conf, nnAddress, ugi, this.minSize, this.maxSize, this.minActiveRatio, protocol, new PoolAlignmentContext(this.routerStateIdContext, nsId)); this.pools.put(connectionId, pool); } } finally { writeLock.unlock(); } } long clientStateId = RouterStateIdContext.getClientStateIdFromCurrentCall(nsId); pool.getPoolAlignmentContext().advanceClientStateId(clientStateId); ConnectionContext conn = pool.getConnection(); // Add a new connection to the pool if it wasn't usable if (conn == null || !conn.isUsable()) { if (!this.creatorQueue.contains(pool) && !this.creatorQueue.offer(pool)) { LOG.error("Cannot add more than {} connections at the same time", this.creatorQueueMaxSize); } } if (conn != null && conn.isClosed()) { LOG.error("We got a closed connection from {}", pool); conn = null; } return conn; }
@Test public void testGetConnection() throws Exception { Map<ConnectionPoolId, ConnectionPool> poolMap = connManager.getPools(); final int totalConns = 10; int activeConns = 5; ConnectionPool pool = new ConnectionPool(conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10, 0.5f, ClientProtocol.class, null); addConnectionsToPool(pool, totalConns, activeConns); poolMap.put( new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS, ClientProtocol.class), pool); // All remaining connections should be usable final int remainingSlots = totalConns - activeConns; for (int i = 0; i < remainingSlots; i++) { ConnectionContext cc = pool.getConnection(); assertTrue(cc.isUsable()); cc.getClient(); activeConns++; } checkPoolConnections(TEST_USER1, totalConns, activeConns); // Ask for more and this returns an active connection ConnectionContext cc = pool.getConnection(); assertTrue(cc.isActive()); }
@ExceptionHandler(TokenAlreadyInvalidatedException.class) protected ResponseEntity<Object> handleTokenAlreadyInvalidatedException(final TokenAlreadyInvalidatedException ex) { CustomError customError = CustomError.builder() .httpStatus(HttpStatus.BAD_REQUEST) .header(CustomError.Header.API_ERROR.getName()) .message(ex.getMessage()) .build(); return new ResponseEntity<>(customError, HttpStatus.BAD_REQUEST); }
@Test void givenTokenAlreadyInvalidatedException_whenHandleTokenAlreadyInvalidatedException_thenRespondWithBadRequest() { // Given TokenAlreadyInvalidatedException ex = new TokenAlreadyInvalidatedException(); CustomError expectedError = CustomError.builder() .httpStatus(HttpStatus.BAD_REQUEST) .header(CustomError.Header.API_ERROR.getName()) .message("Token is already invalidated!\n") .build(); // When ResponseEntity<Object> responseEntity = globalExceptionHandler.handleTokenAlreadyInvalidatedException(ex); // Then assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST); CustomError actualError = (CustomError) responseEntity.getBody(); checkCustomError(expectedError, actualError); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("sds.listing.chunksize")); }
@Test public void testListAlphanumeric() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); assertTrue(new SDSListService(session, nodeid).list(room, new DisabledListProgressListener()).isEmpty()); new SDSTouchFeature(session, nodeid).touch(new Path(room, "aa", EnumSet.of(Path.Type.file)), new TransferStatus()); new SDSTouchFeature(session, nodeid).touch(new Path(room, "0a", EnumSet.of(Path.Type.file)), new TransferStatus()); new SDSTouchFeature(session, nodeid).touch(new Path(room, "a", EnumSet.of(Path.Type.file)), new TransferStatus()); final AttributedList<Path> list = new SDSListService(session, nodeid).list(room, new DisabledListProgressListener()); assertEquals(3, list.size()); assertEquals("0a", list.get(0).getName()); assertEquals("a", list.get(1).getName()); assertEquals("aa", list.get(2).getName()); new SDSDeleteFeature(session, nodeid).delete(Collections.<Path>singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public ConfigPayloadBuilder build(Element configE) { parseConfigName(configE); ConfigPayloadBuilder payloadBuilder = new ConfigPayloadBuilder(configDefinition); for (Element child : XML.getChildren(configE)) { parseElement(child, payloadBuilder, null); } return payloadBuilder; }
@Test void require_that_item_is_reserved_in_root() { assertThrows(IllegalArgumentException.class, () -> { Element configRoot = getDocument( "<config name=\"test.arraytypes\" version=\"1\">" + " <item>13</item>" + "</config>"); new DomConfigPayloadBuilder(null).build(configRoot); }); }
@Override public Optional<Decision> onBufferConsumed(BufferIndexAndChannel consumedBuffer) { return Optional.of(Decision.NO_ACTION); }
@Test void testOnBufferConsumed() { BufferIndexAndChannel bufferIndexAndChannel = new BufferIndexAndChannel(0, 0); Optional<Decision> bufferConsumedDecision = spillStrategy.onBufferConsumed(bufferIndexAndChannel); assertThat(bufferConsumedDecision).hasValue(Decision.NO_ACTION); }
public void update(final Collection<InspectedType> updates) { final ArrayList<UUIDKey> originalItems = new ArrayList<>(map.keySet()); for (final InspectedType updatable : updates) { final InspectorType inspector = map.get(updatable.getUuidKey()); if (inspector != null) { // Everything up to date. originalItems.remove(updatable.getUuidKey()); } else { final InspectorType newInspector = inspectorFactory.make(updatable); add(newInspector); map.put(updatable.getUuidKey(), newInspector); } } // Remove left overs, they were not in updates for (final UUIDKey originalItem : originalItems) { remove(map.remove(originalItem)); } }
@Test void reAddNew() throws Exception { final ArrayList<Item> updates = new ArrayList<>(); updates.add(new Item()); list.update(updates); assertThat(list).hasSize(1); updates.add(new Item()); list.update(updates); assertThat(list).hasSize(2); }
@Override public RedisClusterNode clusterGetNodeForKey(byte[] key) { int slot = executorService.getConnectionManager().calcSlot(key); return clusterGetNodeForSlot(slot); }
@Test public void testClusterGetNodeForKey() { RedisClusterNode node = connection.clusterGetNodeForKey("123".getBytes()); assertThat(node).isNotNull(); }
public Node deserializeObject(JsonReader reader) { Log.info("Deserializing JSON to Node."); JsonObject jsonObject = reader.readObject(); return deserializeObject(jsonObject); }
@Test void testJavaDocComment() { CompilationUnit cu = parse("public class X{ " + " /**\n" + " * Woke text.\n" + " * @param a blub\n" + " * @return true \n" + " */" + " public boolean test(int a) { return true; }\n" + "}"); String serialized = serialize(cu, false); CompilationUnit deserialized = (CompilationUnit) deserializer.deserializeObject(Json.createReader(new StringReader(serialized))); ClassOrInterfaceDeclaration classDeclaration = deserialized.getClassByName("X").get(); MethodDeclaration methodDeclaration = classDeclaration.getMethods().get(0); assertTrue(methodDeclaration.getJavadoc().isPresent()); Javadoc javadoc = methodDeclaration.getJavadoc().get(); JavadocBlockTag paramBlockTag = javadoc.getBlockTags().get(0); assertEquals("param", paramBlockTag.getTagName()); assertEquals("blub", paramBlockTag.getContent().toText()); JavadocBlockTag returnBlockTag = javadoc.getBlockTags().get(1); assertEquals("return", returnBlockTag.getTagName()); assertEquals("true", returnBlockTag.getContent().toText()); }
Object getFromSignal(String signalName, String paramName) { try { return executor .submit(() -> fromSignal(signalName, paramName)) .get(TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS); } catch (Exception e) { throw new MaestroInternalError( e, "getFromSignal throws an exception for signalName=[%s], paramName=[%s]", signalName, paramName); } }
@Test public void testInvalidGetFromSignal() { SignalInitiator initiator = Mockito.mock(SignalInitiator.class); when(instanceWrapper.getInitiator()).thenReturn(initiator); when(initiator.getType()).thenReturn(Initiator.Type.TIME); AssertHelper.assertThrows( "Cannot get a param from non signal initiator", MaestroInternalError.class, "getFromSignal throws an exception", () -> paramExtension.getFromSignal("signal-a", "param1")); when(initiator.getType()).thenReturn(Initiator.Type.SIGNAL); AssertHelper.assertThrows( "signal initiator's param is null", MaestroInternalError.class, "getFromSignal throws an exception", () -> paramExtension.getFromSignal("signal-a", "param1")); when(initiator.getParams()).thenReturn(Collections.emptyMap()); AssertHelper.assertThrows( "Cannot find signal", MaestroInternalError.class, "getFromSignal throws an exception", () -> paramExtension.getFromSignal("signal-not-existing", "param1")); when(initiator.getParams()) .thenReturn( singletonMap("signal-a", StringParameter.builder().evaluatedResult("foo").build())); AssertHelper.assertThrows( "Invalid param type, which must be MAP or STRING_MAP", MaestroInternalError.class, "getFromSignal throws an exception", () -> paramExtension.getFromSignal("signal-a", "param1")); when(initiator.getParams()) .thenReturn( twoItemMap( "signal-a", StringMapParameter.builder() .evaluatedResult(singletonMap("param1", "value1")) .build(), "signal-b", MapParameter.builder().evaluatedResult(singletonMap("param2", 123L)).build())); AssertHelper.assertThrows( "Cannot find param from the signal", MaestroInternalError.class, "getFromSignal throws an exception", () -> paramExtension.getFromSignal("signal-a", "param-not-existing")); AssertHelper.assertThrows( "Cannot find string param from the signal", MaestroInternalError.class, "getFromSignal throws an exception", () -> paramExtension.getFromSignal("signal-b", "param-not-existing")); }
@Override public List<AdminUserDO> getUserList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return Collections.emptyList(); } return userMapper.selectBatchIds(ids); }
@Test public void testGetUserList() { // mock 数据 AdminUserDO user = randomAdminUserDO(); userMapper.insert(user); // 测试 id 不匹配 userMapper.insert(randomAdminUserDO()); // 准备参数 Collection<Long> ids = singleton(user.getId()); // 调用 List<AdminUserDO> result = userService.getUserList(ids); // 断言 assertEquals(1, result.size()); assertEquals(user, result.get(0)); }
public static LookupResult single(final CharSequence singleValue) { return multi(singleValue, Collections.singletonMap(SINGLE_VALUE_KEY, singleValue)); }
@Test public void serializeSingleNumber() { final LookupResult lookupResult = LookupResult.single(42); final JsonNode node = objectMapper.convertValue(lookupResult, JsonNode.class); assertThat(node.isNull()).isFalse(); assertThat(node.path("single_value").asInt()).isEqualTo(42); assertThat(node.path("multi_value").path("value").asInt()).isEqualTo(42); assertThat(node.path("ttl").asLong()).isEqualTo(Long.MAX_VALUE); }
public static <InputT, OutputT> DoFnInvoker<InputT, OutputT> invokerFor( DoFn<InputT, OutputT> fn) { return ByteBuddyDoFnInvokerFactory.only().newByteBuddyInvoker(fn); }
@Test public void testOnTimerWithWindow() throws Exception { final String timerId = "my-timer-id"; final IntervalWindow testWindow = new IntervalWindow(new Instant(0), new Instant(15)); when(mockArgumentProvider.window()).thenReturn(testWindow); class SimpleTimerDoFn extends DoFn<String, String> { public IntervalWindow window = null; @TimerId(timerId) private final TimerSpec myTimer = TimerSpecs.timer(TimeDomain.PROCESSING_TIME); @ProcessElement public void process(ProcessContext c) {} @OnTimer(timerId) public void onMyTimer(IntervalWindow w) { window = w; } } SimpleTimerDoFn fn = new SimpleTimerDoFn(); DoFnInvoker<String, String> invoker = DoFnInvokers.invokerFor(fn); invoker.invokeOnTimer(TimerDeclaration.PREFIX + timerId, "", mockArgumentProvider); assertThat(fn.window, equalTo(testWindow)); }
@Override public <T extends MigrationStep> MigrationStepRegistry add(long migrationNumber, String description, Class<T> stepClass) { validate(migrationNumber); requireNonNull(description, "description can't be null"); checkArgument(!description.isEmpty(), "description can't be empty"); requireNonNull(stepClass, "MigrationStep class can't be null"); checkState(!migrations.containsKey(migrationNumber), "A migration is already registered for migration number '%s'", migrationNumber); this.migrations.put(migrationNumber, new RegisteredMigrationStep(migrationNumber, description, stepClass)); return this; }
@Test public void add_fails_with_NPE_is_migrationstep_class_is_null() { assertThatThrownBy(() -> { underTest.add(12, "sdsd", null); }) .isInstanceOf(NullPointerException.class) .hasMessage("MigrationStep class can't be null"); }
public static boolean isAnyBlank(final CharSequence... css) { if (ArrayUtils.isEmpty(css)) { return true; } for (final CharSequence cs : css) { if (isBlank(cs)) { return true; } } return false; }
@Test void isAnyBlank() { assertTrue(StringUtils.isAnyBlank(null)); assertTrue(StringUtils.isAnyBlank(null, "foo")); assertTrue(StringUtils.isAnyBlank(null, null)); assertTrue(StringUtils.isAnyBlank("", "bar")); assertTrue(StringUtils.isAnyBlank("bob", "")); assertTrue(StringUtils.isAnyBlank(" bob ", null)); assertTrue(StringUtils.isAnyBlank(" ", "bar")); assertFalse(StringUtils.isAnyBlank("foo", "bar")); }
@Override public TransferStatus prepare(final Path file, final Local local, final TransferStatus parent, final ProgressListener progress) throws BackgroundException { final TransferStatus status = super.prepare(file, local, parent, progress); if(file.isFile()) { final Write.Append append = upload.append(file, status); if(append.append && append.offset <= status.getLength()) { // Append to existing file status.withRename((Path) null).withDisplayname((Path) null).setAppend(true); status.setLength(status.getLength() - append.offset); status.setOffset(append.offset); if(log.isDebugEnabled()) { log.debug(String.format("Resume file %s at offset %d and remaining length %d", file, status.getOffset(), status.getLength())); } } } return status; }
@Test public void testPrepareAppend() throws Exception { final Host host = new Host(new TestProtocol()); final ResumeFilter f = new ResumeFilter(new DisabledUploadSymlinkResolver(), new NullSession(host) { @Override public AttributedList<Path> list(final Path file, final ListProgressListener listener) { final Path f = new Path("t", EnumSet.of(Path.Type.file)); f.attributes().setSize(7L); return new AttributedList<>(Collections.singletonList(f)); } }, new UploadFilterOptions(host).withTemporary(true)); final Path t = new Path("t", EnumSet.of(Path.Type.file)); final TransferStatus status = f.prepare(t, new NullLocal("t") { @Override public LocalAttributes attributes() { return new LocalAttributes("t") { @Override public long getSize() { return 8L; } }; } @Override public boolean isFile() { return true; } }, new TransferStatus().exists(true), new DisabledProgressListener()); assertTrue(status.isAppend()); assertTrue(status.isExists()); // Temporary target assertNull(status.getRename().remote); assertEquals(7L, status.getOffset()); }
public static IntrinsicMapTaskExecutor withSharedCounterSet( List<Operation> operations, CounterSet counters, ExecutionStateTracker executionStateTracker) { return new IntrinsicMapTaskExecutor(operations, counters, executionStateTracker); }
@Test public void testPerElementProcessingTimeCounters() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); options .as(DataflowPipelineDebugOptions.class) .setExperiments( Lists.newArrayList(DataflowElementExecutionTracker.TIME_PER_ELEMENT_EXPERIMENT)); DataflowExecutionStateTracker stateTracker = new DataflowExecutionStateTracker( ExecutionStateSampler.newForTest(), new TestDataflowExecutionState( NameContext.forStage("test-stage"), "other", null /* requestingStepName */, null /* sideInputIndex */, null /* metricsContainer */, NoopProfileScope.NOOP), counterSet, options, "test-work-item-id"); NameContext parDoName = nameForStep("s1"); // Wire a read operation with 3 elements to a ParDoOperation and assert that we count // the correct number of elements. ReadOperation read = ReadOperation.forTest( new TestReader("a", "b", "c"), new OutputReceiver(), TestOperationContext.create(counterSet, nameForStep("s0"), null, stateTracker)); ParDoOperation parDo = new ParDoOperation( new NoopParDoFn(), new OutputReceiver[0], TestOperationContext.create(counterSet, parDoName, null, stateTracker)); parDo.attachInput(read, 0); List<Operation> operations = Lists.newArrayList(read, parDo); try (IntrinsicMapTaskExecutor executor = IntrinsicMapTaskExecutor.withSharedCounterSet(operations, counterSet, stateTracker)) { executor.execute(); } CounterName counterName = CounterName.named("per-element-processing-time").withOriginalName(parDoName); Counter<Long, CounterDistribution> counter = (Counter<Long, CounterDistribution>) counterSet.getExistingCounter(counterName); assertThat(counter.getAggregate().getCount(), equalTo(3L)); }
@Override public JDefinedClass apply(String nodeName, JsonNode node, JsonNode parent, JDefinedClass jclass, Schema schema) { List<String> requiredFieldMethods = new ArrayList<>(); JsonNode properties = schema.getContent().get("properties"); for (Iterator<JsonNode> iterator = node.elements(); iterator.hasNext(); ) { String requiredArrayItem = iterator.next().asText(); if (requiredArrayItem.isEmpty()) { continue; } JsonNode propertyNode = null; if (properties != null) { propertyNode = properties.findValue(requiredArrayItem); } String fieldName = ruleFactory.getNameHelper().getPropertyName(requiredArrayItem, propertyNode); JFieldVar field = jclass.fields().get(fieldName); if (field == null) { continue; } addJavaDoc(field); if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations()) { addNotNullAnnotation(field); } if (ruleFactory.getGenerationConfig().isIncludeJsr305Annotations()) { addNonnullAnnotation(field); } requiredFieldMethods.add(getGetterName(fieldName, field.type(), node)); requiredFieldMethods.add(getSetterName(fieldName, node)); } updateGetterSetterJavaDoc(jclass, requiredFieldMethods); return jclass; }
@Test public void shouldUpdateJavaDoc() throws JClassAlreadyExistsException { JDefinedClass jclass = new JCodeModel()._class(TARGET_CLASS_NAME); jclass.field(JMod.PRIVATE, jclass.owner().ref(String.class), "fooBar"); jclass.field(JMod.PRIVATE, jclass.owner().ref(String.class), "foo"); ObjectMapper mapper = new ObjectMapper(); ArrayNode requiredNode = mapper.createArrayNode().add("fooBar"); rule.apply("Class", requiredNode, null, jclass, new Schema(null, requiredNode, null)); JDocComment fooBarJavaDoc = jclass.fields().get("fooBar").javadoc(); JDocComment fooJavaDoc = jclass.fields().get("foo").javadoc(); assertThat(fooBarJavaDoc.size(), is(1)); assertThat((String) fooBarJavaDoc.get(0), is("\n(Required)")); assertThat(fooJavaDoc.size(), is(0)); }
@SneakyThrows public static Integer getAreaId(String ip) { return Integer.parseInt(SEARCHER.search(ip.trim())); }
@Test public void testGetAreaId_string() { // 120.202.4.0|120.202.4.255|420600 Integer areaId = IPUtils.getAreaId("120.202.4.50"); assertEquals(420600, areaId); }
@Override public List<Map<String, String>> taskConfigs(int maxTasks) { if (knownConsumerGroups == null) { // If knownConsumerGroup is null, it means the initial loading has not finished. // An exception should be thrown to trigger the retry behavior in the framework. log.debug("Initial consumer loading has not yet completed"); throw new RetriableException("Timeout while loading consumer groups."); } // if the replication is disabled, known consumer group is empty, or checkpoint emission is // disabled by setting 'emit.checkpoints.enabled' to false, the interval of checkpoint emission // will be negative and no 'MirrorCheckpointTask' will be created if (!config.enabled() || knownConsumerGroups.isEmpty() || config.emitCheckpointsInterval().isNegative()) { return Collections.emptyList(); } int numTasks = Math.min(maxTasks, knownConsumerGroups.size()); List<List<String>> groupsPartitioned = ConnectorUtils.groupPartitions(new ArrayList<>(knownConsumerGroups), numTasks); return IntStream.range(0, numTasks) .mapToObj(i -> config.taskConfigForConsumerGroups(groupsPartitioned.get(i), i)) .collect(Collectors.toList()); }
@Test public void testConsumerGroupInitializeTimeout() { MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps()); MirrorCheckpointConnector connector = new MirrorCheckpointConnector(null, config); assertThrows( RetriableException.class, () -> connector.taskConfigs(1), "taskConfigs should throw exception when initial loading ConsumerGroup timeout" ); }
public AthenzDomain getParent() { return new AthenzDomain(name.substring(0, lastDot())); }
@Test void parent_domain_is_without_name_suffix() { assertEquals(new AthenzDomain("home.john"), new AthenzDomain("home.john.myapp").getParent()); }
public static <T> PCollections<T> pCollections() { return new PCollections<>(); }
@Test @Category({ValidatesRunner.class, FlattenWithHeterogeneousCoders.class}) public void testFlattenMultipleCoders() throws CannotProvideCoderException { PCollection<Long> bigEndianLongs = p.apply( "BigEndianLongs", Create.of(0L, 1L, 2L, 3L, null, 4L, 5L, null, 6L, 7L, 8L, null, 9L) .withCoder(NullableCoder.of(BigEndianLongCoder.of()))); PCollection<Long> varLongs = p.apply("VarLengthLongs", GenerateSequence.from(0).to(5)).setCoder(VarLongCoder.of()); PCollection<Long> flattened = PCollectionList.of(bigEndianLongs) .and(varLongs) .apply(Flatten.pCollections()) .setCoder(NullableCoder.of(VarLongCoder.of())); PAssert.that(flattened) .containsInAnyOrder( 0L, 0L, 1L, 1L, 2L, 3L, 2L, 4L, 5L, 3L, 6L, 7L, 4L, 8L, 9L, null, null, null); p.run(); }
@Override public CloseableIterator<ScannerReport.SyntaxHighlightingRule> readComponentSyntaxHighlighting(int fileRef) { ensureInitialized(); return delegate.readComponentSyntaxHighlighting(fileRef); }
@Test public void verify_readComponentSyntaxHighlighting() { writer.writeComponentSyntaxHighlighting(COMPONENT_REF, of(SYNTAX_HIGHLIGHTING_1, SYNTAX_HIGHLIGHTING_2)); CloseableIterator<ScannerReport.SyntaxHighlightingRule> res = underTest.readComponentSyntaxHighlighting(COMPONENT_REF); assertThat(res).toIterable().containsExactly(SYNTAX_HIGHLIGHTING_1, SYNTAX_HIGHLIGHTING_2); res.close(); }
public Meter(MetricName rateMetricName, MetricName totalMetricName) { this(TimeUnit.SECONDS, new WindowedSum(), rateMetricName, totalMetricName); }
@Test public void testMeter() { Map<String, String> emptyTags = Collections.emptyMap(); MetricName rateMetricName = new MetricName("rate", "test", "", emptyTags); MetricName totalMetricName = new MetricName("total", "test", "", emptyTags); Meter meter = new Meter(rateMetricName, totalMetricName); List<NamedMeasurable> stats = meter.stats(); assertEquals(2, stats.size()); NamedMeasurable total = stats.get(0); NamedMeasurable rate = stats.get(1); assertEquals(rateMetricName, rate.name()); assertEquals(totalMetricName, total.name()); Rate rateStat = (Rate) rate.stat(); CumulativeSum totalStat = (CumulativeSum) total.stat(); MetricConfig config = new MetricConfig(); double nextValue = 0.0; double expectedTotal = 0.0; long now = 0; int intervalMs = 100; double delta = 5.0; // Record values in multiple windows and verify that rates are reported // for time windows and that the total is cumulative. for (int i = 1; i <= 100; i++) { for (; now < i * 1000; now += intervalMs, nextValue += delta) { expectedTotal += nextValue; meter.record(config, nextValue, now); } assertEquals(expectedTotal, totalStat.measure(config, now), EPS); long windowSizeMs = rateStat.windowSize(config, now); long windowStartMs = Math.max(now - windowSizeMs, 0); double sampledTotal = 0.0; double prevValue = nextValue - delta; for (long timeMs = now - 100; timeMs >= windowStartMs; timeMs -= intervalMs, prevValue -= delta) sampledTotal += prevValue; assertEquals(sampledTotal * 1000 / windowSizeMs, rateStat.measure(config, now), EPS); } }
@Override public Set<KubevirtPort> ports(String networkId) { checkArgument(!Strings.isNullOrEmpty(networkId), ERR_NULL_PORT_NET_ID); return ImmutableSet.copyOf(kubevirtPortStore.ports().stream() .filter(p -> p.networkId().equals(networkId)) .collect(Collectors.toSet())); }
@Test public void testGetPortsByNetworkId() { createBasicPorts(); assertEquals("Number of port did not match", 1, target.ports(NETWORK_ID).size()); assertEquals("Number of port did not match", 0, target.ports(UNKNOWN_ID).size()); }
public String convert(Object o) { StringBuilder buf = new StringBuilder(); Converter<Object> p = headTokenConverter; while (p != null) { buf.append(p.convert(o)); p = p.getNext(); } return buf.toString(); }
@Test public void dateWithTimeZone() { TimeZone utc = TimeZone.getTimeZone("UTC"); Calendar cal = Calendar.getInstance(utc); cal.set(2003, 4, 20, 10, 55); FileNamePattern fnp = new FileNamePattern("foo%d{yyyy-MM-dd'T'HH:mm, Australia/Perth}", context); // Perth is 8 hours ahead of UTC assertEquals("foo2003-05-20T18:55", fnp.convert(cal.getTime())); }
JFieldRef getOrAddNotFoundVar(JDefinedClass jclass) { jclass.field(PROTECTED | STATIC | FINAL, Object.class, NOT_FOUND_VALUE_FIELD, _new(jclass.owner()._ref(Object.class))); return jclass.staticRef(NOT_FOUND_VALUE_FIELD); }
@Test public void shouldAddNotFoundField() { JFieldRef var = rule.getOrAddNotFoundVar(type); assertThat(var, notNullValue()); }
@Override protected String buildHandle(final List<URIRegisterDTO> uriList, final SelectorDO selectorDO) { return ""; }
@Test public void testBuildHandle() { List<URIRegisterDTO> list = new ArrayList<>(); list.add(URIRegisterDTO.builder().build()); assertEquals(StringUtils.EMPTY, shenyuClientRegisterMotanService.buildHandle(list, SelectorDO.builder().build())); }
public static PipelineMetricRegistry create(MetricRegistry metricRegistry, String pipelinesPrefix, String rulesPrefix) { return new PipelineMetricRegistry(metricRegistry, pipelinesPrefix, rulesPrefix); }
@Test void validation() { final var metricRegistry = new MetricRegistry(); assertThatThrownBy(() -> PipelineMetricRegistry.create(null, "PIPELINE", "RULE")) .isInstanceOf(NullPointerException.class); assertThatThrownBy(() -> PipelineMetricRegistry.create(metricRegistry, null, "RULE")) .isInstanceOf(IllegalArgumentException.class); assertThatThrownBy(() -> PipelineMetricRegistry.create(metricRegistry, "", "RULE")) .isInstanceOf(IllegalArgumentException.class); assertThatThrownBy(() -> PipelineMetricRegistry.create(metricRegistry, " ", "RULE")) .isInstanceOf(IllegalArgumentException.class); assertThatThrownBy(() -> PipelineMetricRegistry.create(metricRegistry, "PIPELINE", null)) .isInstanceOf(IllegalArgumentException.class); assertThatThrownBy(() -> PipelineMetricRegistry.create(metricRegistry, "PIPELINE", "")) .isInstanceOf(IllegalArgumentException.class); assertThatThrownBy(() -> PipelineMetricRegistry.create(metricRegistry, "PIPELINE", " ")) .isInstanceOf(IllegalArgumentException.class); }
@Override public void flush() throws IOException { mLocalOutputStream.flush(); }
@Test @PrepareForTest(OSSOutputStream.class) public void testFlush() throws Exception { PowerMockito.whenNew(BufferedOutputStream.class) .withArguments(any(DigestOutputStream.class)).thenReturn(mLocalOutputStream); OSSOutputStream stream = new OSSOutputStream("testBucketName", "testKey", mOssClient, sConf .getList(PropertyKey.TMP_DIRS)); stream.flush(); stream.close(); assertEquals(mEtag, stream.getContentHash().get()); Mockito.verify(mLocalOutputStream).flush(); }
@Override public CompletableFuture<ConsumerRunningInfo> getConsumerRunningInfo(String address, GetConsumerRunningInfoRequestHeader requestHeader, long timeoutMillis) { CompletableFuture<ConsumerRunningInfo> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_CONSUMER_RUNNING_INFO, requestHeader); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS) { ConsumerRunningInfo info = ConsumerRunningInfo.decode(response.getBody(), ConsumerRunningInfo.class); future.complete(info); } else { log.warn("getConsumerRunningInfo getResponseCommand failed, {} {}", response.getCode(), response.getRemark()); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertGetConsumerRunningInfoWithSuccess() throws Exception { ConsumerRunningInfo responseBody = new ConsumerRunningInfo(); setResponseSuccess(RemotingSerializable.encode(responseBody)); GetConsumerRunningInfoRequestHeader requestHeader = mock(GetConsumerRunningInfoRequestHeader.class); CompletableFuture<ConsumerRunningInfo> actual = mqClientAdminImpl.getConsumerRunningInfo(defaultBrokerAddr, requestHeader, defaultTimeout); ConsumerRunningInfo result = actual.get(); assertNotNull(result); assertEquals(0, result.getProperties().size()); }
@Override public int hashCode() { return name.hashCode(); }
@Test public void hashcode() { assertEquals(t1.hashCode(), t2.hashCode()); assertNotEquals(t1.hashCode(), t3.hashCode()); assertNotEquals(t1.hashCode(), t4.hashCode()); }
@Override public boolean overlap(final Window other) throws IllegalArgumentException { if (getClass() != other.getClass()) { throw new IllegalArgumentException("Cannot compare windows of different type. Other window has type " + other.getClass() + "."); } final TimeWindow otherWindow = (TimeWindow) other; return startMs < otherWindow.endMs && otherWindow.startMs < endMs; }
@Test public void cannotCompareTimeWindowWithDifferentWindowType() { assertThrows(IllegalArgumentException.class, () -> window.overlap(sessionWindow)); }
public int readWPShort() throws IOException { int ch1 = in.read(); int ch2 = in.read(); if ((ch1 | ch2) < 0) { throw new EOFException(); } return (ch2 << 8) + ch1; }
@Test public void testReadShort() throws Exception { try (WPInputStream wpInputStream = emptyWPStream()) { wpInputStream.readWPShort(); fail("should have thrown EOF"); } catch (EOFException e) { //swallow } }
private WorkAttempt doWork() { AgentIdentifier agentIdentifier = agentIdentifier(); LOG.debug("[Agent Loop] {} is checking for work from Go", agentIdentifier); try { getAgentRuntimeInfo().idle(); Work work = client.getWork(getAgentRuntimeInfo()); LOG.debug("[Agent Loop] Got work from server: [{}]", work.description()); runner = new JobRunner(); final AgentWorkContext agentWorkContext = new AgentWorkContext(agentIdentifier, client, manipulator, getAgentRuntimeInfo(), packageRepositoryExtension, scmExtension, taskExtension, artifactExtension, pluginRequestProcessorRegistry); runner.run(work, agentWorkContext); LOG.debug("[Agent Loop] Successfully executed work."); return WorkAttempt.fromWork(work); } catch (UnregisteredAgentException e) { LOG.warn("[Agent Loop] Agent is not registered. [{}] Registering with server on next iteration.", e.getMessage()); sslInfrastructureService.createSslInfrastructure(); return WorkAttempt.FAILED; } finally { getAgentRuntimeInfo().idle(); } }
@Test void shouldRetrieveCookieIfNotPresent() { agentController = createAgentController(); agentController.init(); when(loopServer.getCookie(eq(agentController.getAgentRuntimeInfo()))).thenReturn("cookie"); when(sslInfrastructureService.isRegistered()).thenReturn(true); when(loopServer.getWork(agentController.getAgentRuntimeInfo())).thenReturn(work); when(agentRegistry.uuid()).thenReturn(agentUuid); when(pluginJarLocationMonitor.hasRunAtLeastOnce()).thenReturn(true); assertThat(agentController.performWork()).isEqualTo(WorkAttempt.OK); verify(work).doWork(any(), any()); }
public static TriRpcStatus fromCode(int code) { return fromCode(Code.fromCode(code)); }
@Test void fromCode() { Assertions.assertEquals(Code.UNKNOWN, TriRpcStatus.fromCode(2).code); try { TriRpcStatus.fromCode(1000); fail(); } catch (Throwable t) { // pass } }
public double pairingThreshold() { /* * We use 7000 because this equals 7 seconds (in milliseconds). Radar hits are normally * updated every 13 seconds or less. Thus, we know any two aircraft will have radar hits * within 6.5 seconds of each other. 6500 is rounded up to 7000 because...why not. */ final double timeComponent = timeCoef() * 7_000; final double distComponent = distCoef() * trackPairingDistanceInNM() * Spherical.feetPerNM(); final double pairingThreshold = timeComponent + distComponent; return pairingThreshold; }
@Test public void testDerivedPairThresholdReflectsTimeCoef() { double TOLERANCE = 0.0001; PairingConfig noTimeProps = new PairingConfig(timeWindow, 10, 0, 1.0); assertEquals( noTimeProps.pairingThreshold(), 10.0 * Spherical.feetPerNM(), TOLERANCE ); }
public static boolean isLocalHost(String host) { return host != null && (LOCAL_IP_PATTERN.matcher(host).matches() || host.equalsIgnoreCase(LOCALHOST_KEY)); }
@Test void testIsLocalHost() { assertTrue(NetUtils.isLocalHost("localhost")); assertTrue(NetUtils.isLocalHost("127.1.2.3")); assertFalse(NetUtils.isLocalHost("128.1.2.3")); }
@Nonnull public static String fillRight(int len, @Nonnull String pattern, @Nullable String string) { StringBuilder sb = new StringBuilder(string == null ? "" : string); while (sb.length() < len) sb.append(pattern); return sb.toString(); }
@Test void testFillRight() { assertEquals("baaa", StringUtil.fillRight(4, "a", "b")); assertEquals("aaaa", StringUtil.fillRight(4, "a", null)); }
public String getManagedInstanceProviderName() { if (managedInstanceService.isInstanceExternallyManaged()) { return managedInstanceService.getProviderName(); } return null; }
@Test public void getManagedInstanceProvider_whenInstanceNotManaged_shouldReturnNull() { mockIdentityProviders(List.of()); mockManagedInstance(false); assertThat(commonSystemInformation.getManagedInstanceProviderName()) .isNull(); }
@Override public TableStatistics getTableStatistics( ConnectorSession session, SchemaTableName table, Map<String, ColumnHandle> columns, Map<String, Type> columnTypes, List<HivePartition> partitions) { if (!isStatisticsEnabled(session)) { return TableStatistics.empty(); } if (partitions.isEmpty()) { return createZeroStatistics(columns, columnTypes); } int sampleSize = getPartitionStatisticsSampleSize(session); List<HivePartition> partitionsSample = getPartitionsSample(partitions, sampleSize); try { Map<String, PartitionStatistics> statisticsSample = statisticsProvider.getPartitionsStatistics(session, table, partitionsSample); validatePartitionStatistics(table, statisticsSample); return getTableStatistics(columns, columnTypes, partitions, statisticsSample); } catch (PrestoException e) { if (e.getErrorCode().equals(HIVE_CORRUPTED_COLUMN_STATISTICS.toErrorCode()) && isIgnoreCorruptedStatistics(session)) { log.error(e); return TableStatistics.empty(); } throw e; } }
@Test public void testGetTableStatisticsSampling() { MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, table, hivePartitions) -> { assertEquals(table, TABLE); assertEquals(hivePartitions.size(), 1); return ImmutableMap.of(); }, quickStatsProvider); TestingConnectorSession session = new TestingConnectorSession(new HiveSessionProperties( new HiveClientConfig().setPartitionStatisticsSampleSize(1), new OrcFileWriterConfig(), new ParquetFileWriterConfig(), new CacheConfig()) .getSessionProperties()); statisticsProvider.getTableStatistics( session, TABLE, ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(partition("p1=string1/p2=1234"), partition("p1=string1/p2=1235"))); }
@Override public TopicAssignment place( PlacementSpec placement, ClusterDescriber cluster ) throws InvalidReplicationFactorException { RackList rackList = new RackList(random, cluster.usableBrokers()); throwInvalidReplicationFactorIfNonPositive(placement.numReplicas()); throwInvalidReplicationFactorIfZero(rackList.numUnfencedBrokers()); throwInvalidReplicationFactorIfTooFewBrokers(placement.numReplicas(), rackList.numTotalBrokers()); List<List<Integer>> placements = new ArrayList<>(placement.numPartitions()); for (int partition = 0; partition < placement.numPartitions(); partition++) { placements.add(rackList.place(placement.numReplicas())); } return new TopicAssignment( placements.stream().map(replicas -> new PartitionAssignment(replicas, cluster)).collect(Collectors.toList()) ); }
@Test public void testNotEnoughBrokers() { MockRandom random = new MockRandom(); StripedReplicaPlacer placer = new StripedReplicaPlacer(random); assertEquals("The target replication factor of 3 cannot be reached because only " + "2 broker(s) are registered.", assertThrows(InvalidReplicationFactorException.class, () -> place(placer, 0, 1, (short) 3, Arrays.asList( new UsableBroker(11, Optional.of("1"), false), new UsableBroker(10, Optional.of("1"), false)))).getMessage()); }
public static ImmutableList<String> glob(final String glob) { Path path = getGlobPath(glob); int globIndex = getGlobIndex(path); if (globIndex < 0) { return of(glob); } return doGlob(path, searchPath(path, globIndex)); }
@Test public void should_glob_direct_files() { ImmutableList<String> files = Globs.glob("src/test/resources/details/foo.json"); assertThat(files.contains("src/test/resources/details/foo.json"), is(true)); }
@Override public boolean needToLoad(FilterInvoker invoker) { AbstractInterfaceConfig<?, ?> config = invoker.getConfig(); String enabled = config.getParameter(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED); if (StringUtils.isNotBlank(enabled)) { return Boolean.parseBoolean(enabled); } return RpcConfigs.getOrDefaultValue(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED, true); }
@Test public void testNeedToLoadProvider() { SentinelSofaRpcProviderFilter providerFilter = new SentinelSofaRpcProviderFilter(); ProviderConfig providerConfig = new ProviderConfig(); providerConfig.setInterfaceId(Serializer.class.getName()); providerConfig.setId("AAA"); FilterInvoker invoker = new FilterInvoker(null, null, providerConfig); assertTrue(providerFilter.needToLoad(invoker)); providerConfig.setParameter(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED, "false"); assertFalse(providerFilter.needToLoad(invoker)); providerConfig.setParameter(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED, ""); assertTrue(providerFilter.needToLoad(invoker)); RpcConfigs.putValue(SentinelConstants.SOFA_RPC_SENTINEL_ENABLED, "false"); assertFalse(providerFilter.needToLoad(invoker)); }
@Override public int getClassId() { throw new UnsupportedOperationException(getClass().getName() + " is only used locally!"); }
@Test(expected = UnsupportedOperationException.class) public void testGetId() { operation.getClassId(); }
public static void shutdownThreadPool(ExecutorService executor) { shutdownThreadPool(executor, null); }
@Test void testShutdownThreadPoolWithInterruptedException() throws InterruptedException { ExecutorService executor = mock(ExecutorService.class); when(executor.awaitTermination(100, TimeUnit.MILLISECONDS)).thenThrow(new InterruptedException()); ThreadUtils.shutdownThreadPool(executor); verify(executor, times(4)).shutdownNow(); }
@Override public Set<NetworkPolicy> networkPolicies() { return ImmutableSet.copyOf(k8sNetworkPolicyStore.networkPolicies()); }
@Test public void testGetNetworkPolicies() { createBasicNetworkPolicies(); assertEquals("Number of network policies did not match", 1, target.networkPolicies().size()); }