focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public E intern(E sample) { E canonical = map.get(sample); if (canonical != null) { return canonical; } var value = map.putIfAbsent(sample, sample); return (value == null) ? sample : value; }
@Test public void intern_weak_cleanup() { var interner = (WeakInterner<Int>) Interner.<Int>newWeakInterner(); interner.cache.drainStatus = BoundedLocalCache.REQUIRED; var canonical = new Int(1); var interned1 = interner.intern(canonical); assertThat(interned1).isSameInstanceAs(canonical); assertThat(interner.cache.drainStatus).isEqualTo(BoundedLocalCache.IDLE); interner.cache.drainStatus = BoundedLocalCache.REQUIRED; var interned2 = interner.intern(canonical); assertThat(interned2).isSameInstanceAs(canonical); assertThat(interner.cache.drainStatus).isEqualTo(BoundedLocalCache.IDLE); }
@Override @Transactional(rollbackFor = Exception.class) @CacheEvict(value = RedisKeyConstants.PERMISSION_MENU_ID_LIST, allEntries = true) // allEntries 清空所有缓存,因为此时不知道 id 对应的 permission 是多少。直接清理,简单有效 public void deleteMenu(Long id) { // 校验是否还有子菜单 if (menuMapper.selectCountByParentId(id) > 0) { throw exception(MENU_EXISTS_CHILDREN); } // 校验删除的菜单是否存在 if (menuMapper.selectById(id) == null) { throw exception(MENU_NOT_EXISTS); } // 标记删除 menuMapper.deleteById(id); // 删除授予给角色的权限 permissionService.processMenuDeleted(id); }
@Test public void testDeleteMenu_success() { // mock 数据 MenuDO menuDO = randomPojo(MenuDO.class); menuMapper.insert(menuDO); // 准备参数 Long id = menuDO.getId(); // 调用 menuService.deleteMenu(id); // 断言 MenuDO dbMenuDO = menuMapper.selectById(id); assertNull(dbMenuDO); verify(permissionService).processMenuDeleted(id); }
@Override public String getName() { return _name; }
@Test public void testStringTrimTransformFunction() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("ltrim(lpad(%s, 50, ' '))", STRING_ALPHANUM_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "ltrim"); testTransformFunction(transformFunction, _stringAlphaNumericSVValues); expression = RequestContextUtils.getExpression(String.format("rtrim(rpad(%s, 50, ' '))", STRING_ALPHANUM_SV_COLUMN)); transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "rtrim"); testTransformFunction(transformFunction, _stringAlphaNumericSVValues); expression = RequestContextUtils.getExpression( String.format("trim(rpad(lpad(%s, 50, ' '), 100, ' '))", STRING_ALPHANUM_SV_COLUMN)); transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "trim"); testTransformFunction(transformFunction, _stringAlphaNumericSVValues); expression = RequestContextUtils.getExpression( String.format("trim(leading ' _&|' from lpad(%s, 10, '& |_'))", STRING_ALPHANUM_SV_COLUMN)); transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "trim"); testTransformFunction(transformFunction, _stringAlphaNumericSVValues); }
public List<String> tokenize(String text) { List<String> tokens = new ArrayList<>(); Matcher regexMatcher = regexExpression.matcher(text); int lastIndexOfPrevMatch = 0; while (regexMatcher.find(lastIndexOfPrevMatch)) // this is where the magic happens: // the regexp is used to find a matching pattern for substitution { int beginIndexOfNextMatch = regexMatcher.start(); String prevToken = text.substring(lastIndexOfPrevMatch, beginIndexOfNextMatch); if (!prevToken.isEmpty()) { tokens.add(prevToken); } String currentMatch = regexMatcher.group(); tokens.add(currentMatch); lastIndexOfPrevMatch = regexMatcher.end(); if (lastIndexOfPrevMatch < text.length() && text.charAt(lastIndexOfPrevMatch) != '_') { // beause it is sometimes positioned after the "_", but it should be positioned // before the "_" --lastIndexOfPrevMatch; } } String tail = text.substring(lastIndexOfPrevMatch); if (!tail.isEmpty()) { tokens.add(tail); } return tokens; }
@Test void testTokenize_happyPath_10() { // given CompoundCharacterTokenizer tokenizer = new CompoundCharacterTokenizer( new HashSet<>(Arrays.asList("_201_", "_202_"))); String text = "_100_101_102_103_104_"; // when List<String> tokens = tokenizer.tokenize(text); // then assertEquals(Collections.singletonList("_100_101_102_103_104_"), tokens); }
public static void reset(File directory, int processNumber) { try (DefaultProcessCommands processCommands = new DefaultProcessCommands(directory, processNumber, true)) { // nothing else to do than open file and reset the space of specified process } }
@Test public void reset_fails_if_processNumber_is_less_than_0() throws Exception { int processNumber = -2; expectProcessNumberNoValidIAE(() -> DefaultProcessCommands.reset(temp.newFolder(), processNumber), processNumber); }
@Override public void close() { delegate.close(); }
@Test public void shouldCloseDelegate() { // When: serializer.close(); // Then: verify(delegate).close(); }
@Override // mappedStatementId 参数,暂时没有用。以后,可以基于 mappedStatementId + DataPermission 进行缓存 public List<DataPermissionRule> getDataPermissionRule(String mappedStatementId) { // 1. 无数据权限 if (CollUtil.isEmpty(rules)) { return Collections.emptyList(); } // 2. 未配置,则默认开启 DataPermission dataPermission = DataPermissionContextHolder.get(); if (dataPermission == null) { return rules; } // 3. 已配置,但禁用 if (!dataPermission.enable()) { return Collections.emptyList(); } // 4. 已配置,只选择部分规则 if (ArrayUtil.isNotEmpty(dataPermission.includeRules())) { return rules.stream().filter(rule -> ArrayUtil.contains(dataPermission.includeRules(), rule.getClass())) .collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询 } // 5. 已配置,只排除部分规则 if (ArrayUtil.isNotEmpty(dataPermission.excludeRules())) { return rules.stream().filter(rule -> !ArrayUtil.contains(dataPermission.excludeRules(), rule.getClass())) .collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询 } // 6. 已配置,全部规则 return rules; }
@Test public void testGetDataPermissionRule_04() { // 准备参数 String mappedStatementId = randomString(); // mock 方法 DataPermissionContextHolder.add(AnnotationUtils.findAnnotation(TestClass04.class, DataPermission.class)); // 调用 List<DataPermissionRule> result = dataPermissionRuleFactory.getDataPermissionRule(mappedStatementId); // 断言 assertEquals(1, result.size()); assertEquals(DataPermissionRule01.class, result.get(0).getClass()); }
public void setProfile(final Set<String> indexSetsIds, final String profileId, final boolean rotateImmediately) { checkProfile(profileId); checkAllIndicesSupportProfileChange(indexSetsIds); for (String indexSetId : indexSetsIds) { try { indexSetService.get(indexSetId).ifPresent(indexSetConfig -> { var updatedIndexSetConfig = setProfileForIndexSet(profileId, indexSetConfig); if (rotateImmediately) { updatedIndexSetConfig.ifPresent(this::cycleIndexSet); } }); } catch (Exception ex) { LOG.error("Failed to update field type in index set : " + indexSetId, ex); throw ex; } } }
@Test void testThrowsExceptionWhenTryingToSetProfileWithReservedFields() { IndexFieldTypeProfile profile = new IndexFieldTypeProfile( "000000000000000000000013", "Wrong!", "Profile with reserved fields", new CustomFieldMappings(List.of(new CustomFieldMapping(FIELD_TIMESTAMP, "ip"))) ); doReturn(Optional.of(profile)).when(profileService).get("000000000000000000000013"); assertThrows(BadRequestException.class, () -> toTest.setProfile(Set.of(), "000000000000000000000013", false)); }
@Override public String[] split(String text) { if (splitContraction) { text = WONT_CONTRACTION.matcher(text).replaceAll("$1ill not"); text = SHANT_CONTRACTION.matcher(text).replaceAll("$1ll not"); text = AINT_CONTRACTION.matcher(text).replaceAll("$1m not"); for (Pattern regexp : NOT_CONTRACTIONS) { text = regexp.matcher(text).replaceAll("$1 not"); } for (Pattern regexp : CONTRACTIONS2) { text = regexp.matcher(text).replaceAll("$1 $2"); } for (Pattern regexp : CONTRACTIONS3) { text = regexp.matcher(text).replaceAll("$1 $2 $3"); } } text = DELIMITERS[0].matcher(text).replaceAll(" $1 "); text = DELIMITERS[1].matcher(text).replaceAll(" $1"); text = DELIMITERS[2].matcher(text).replaceAll(" $1"); text = DELIMITERS[3].matcher(text).replaceAll(" . "); text = DELIMITERS[4].matcher(text).replaceAll(" $1 "); String[] words = WHITESPACE.split(text); if (words.length > 1 && words[words.length-1].equals(".")) { if (EnglishAbbreviations.contains(words[words.length-2])) { words[words.length-2] = words[words.length-2] + "."; } } ArrayList<String> result = new ArrayList<>(); for (String token : words) { if (!token.isEmpty()) { result.add(token); } } return result.toArray(new String[0]); }
@Test public void testSplitContraction() { System.out.println("tokenize contraction"); String text = "Here are some examples of contractions: 'tis, " + "'twas, ain't, aren't, Can't, could've, couldn't, didn't, doesn't, " + "don't, hasn't, he'd, he'll, he's, how'd, how'll, how's, i'd, i'll, i'm, " + "i've, isn't, it's, might've, mightn't, must've, mustn't, Shan't, " + "she'd, she'll, she's, should've, shouldn't, that'll, that's, " + "there's, they'd, they'll, they're, they've, wasn't, we'd, we'll, " + "we're, weren't, what'd, what's, when'd, when'll, when's, " + "where'd, where'll, where's, who'd, who'll, who's, why'd, why'll, " + "why's, Won't, would've, wouldn't, you'd, you'll, you're, you've"; String[] expResult = {"Here", "are", "some", "examples", "of", "contractions", ":", "'t", "is", ",", "'t", "was", ",", "am", "not", ",", "are", "not", ",", "Can", "not", ",", "could", "'ve", ",", "could", "not", ",", "did", "not", ",", "does", "not", ",", "do", "not", ",", "has", "not", ",", "he", "'d", ",", "he", "'ll", ",", "he", "'s", ",", "how", "'d", ",", "how", "'ll", ",", "how", "'s", ",", "i", "'d", ",", "i", "'ll", ",", "i", "'m", ",", "i", "'ve", ",", "is", "not", ",", "it", "'s", ",", "might", "'ve", ",", "might", "not", ",", "must", "'ve", ",", "must", "not", ",", "Shall", "not", ",", "she", "'d", ",", "she", "'ll", ",", "she", "'s", ",", "should", "'ve", ",", "should", "not", ",", "that", "'ll", ",", "that", "'s", ",", "there", "'s", ",", "they", "'d", ",", "they", "'ll", ",", "they", "'re", ",", "they", "'ve", ",", "was", "not", ",", "we", "'d", ",", "we", "'ll", ",", "we", "'re", ",", "were", "not", ",", "what", "'d", ",", "what", "'s", ",", "when", "'d", ",", "when", "'ll", ",", "when", "'s", ",", "where", "'d", ",", "where", "'ll", ",", "where", "'s", ",", "who", "'d", ",", "who", "'ll", ",", "who", "'s", ",", "why", "'d", ",", "why", "'ll", ",", "why", "'s", ",", "Will", "not", ",", "would", "'ve", ",", "would", "not", ",", "you", "'d", ",", "you", "'ll", ",", "you", "'re", ",", "you", "'ve"}; SimpleTokenizer instance = new SimpleTokenizer(true); String[] result = instance.split(text); assertEquals(expResult.length, result.length); for (int i = 0; i < result.length; i++) { assertEquals(expResult[i], result[i]); } }
public RegionId region() { String r = get(REGION, null); return r == null ? null : regionId(r); }
@Test public void setRegion() { loadLayout(L1); assertEquals("not region-1", R1, cfg.region()); cfg.region(NEW_REGION); assertEquals("not new region", NEW_REGION, cfg.region()); cfg.region(null); assertNull("region not cleared", cfg.region()); }
@Override public Map<K, V> getCachedMap() { return localCacheView.getCachedMap(); }
@Test public void testReplaceOldValueSuccess() { RLocalCachedMap<SimpleKey, SimpleValue> map = redisson.getLocalCachedMap(LocalCachedMapOptions.name("test")); Map<SimpleKey, SimpleValue> cache = map.getCachedMap(); map.put(new SimpleKey("1"), new SimpleValue("2")); boolean res = map.replace(new SimpleKey("1"), new SimpleValue("2"), new SimpleValue("3")); Assertions.assertTrue(res); boolean res1 = map.replace(new SimpleKey("1"), new SimpleValue("2"), new SimpleValue("3")); Assertions.assertFalse(res1); SimpleValue val1 = map.get(new SimpleKey("1")); Assertions.assertEquals("3", val1.getValue()); assertThat(cache.size()).isEqualTo(1); }
@Override public boolean find(final Path file, final ListProgressListener listener) { return session.getClient().existsAndIsAccessible(file.getAbsolute()); }
@Test public void testFindPrivate() throws Exception { final MantaFindFeature f = new MantaFindFeature(session); assertTrue(f.find(new Path( new MantaAccountHomeInfo(session.getHost().getCredentials().getUsername(), session.getHost().getDefaultPath()).getAccountRoot(), MantaAccountHomeInfo.HOME_PATH_PRIVATE, EnumSet.of(Path.Type.directory)))); }
public AlertResult send(String title, String content) { AlertResult alertResult = new AlertResult(); if (httpParams.getMethod() == null) { alertResult.setSuccess(false); alertResult.setMessage("Request types are not supported"); return alertResult; } try { createHttpRequest(title, content); String resp = this.getResponseString(httpRequest); alertResult.setSuccess(true); alertResult.setMessage(resp); } catch (Exception e) { logger.error("send http alert msg failed", e); alertResult.setSuccess(false); alertResult.setMessage("send http request alert fail."); } return alertResult; }
@Test public void sendTest() { HttpAlert httpAlert = new HttpAlert(); AlertConfig alertConfig = new AlertConfig(); alertConfig.setType(HttpConstants.TYPE); alertConfig.setParam(httpConfig); httpAlert.setConfig(alertConfig); AlertResult alertResult = httpAlert.send(AlertBaseConstant.ALERT_TEMPLATE_TITLE, AlertBaseConstant.ALERT_TEMPLATE_MSG); Assert.assertEquals(true, alertResult.getSuccess()); }
static OSSOutputFile fromLocation( OSS client, String location, AliyunProperties aliyunProperties) { return new OSSOutputFile( client, new OSSURI(location), aliyunProperties, MetricsContext.nullMetrics()); }
@Test public void testFromLocation() { assertThatThrownBy(() -> OSSOutputFile.fromLocation(ossClient, null, aliyunProperties)) .isInstanceOf(NullPointerException.class) .hasMessageContaining("location cannot be null"); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { ctx.tellNext(msg, msg.getInternalType().getRuleNodeConnection()); }
@Test void givenAllTypes_whenOnMsg_then_allTypesSupported() throws TbNodeException { // GIVEN List<TbMsg> tbMsgList = new ArrayList<>(); var tbMsgTypes = TbMsgType.values(); for (var msgType : tbMsgTypes) { tbMsgList.add(getTbMsg(msgType)); } // WHEN for (TbMsg tbMsg : tbMsgList) { node.onMsg(ctx, tbMsg); } // THEN ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); ArgumentCaptor<String> nodeConnectionCapture = ArgumentCaptor.forClass(String.class); verify(ctx, times(tbMsgList.size())).tellNext(newMsgCaptor.capture(), nodeConnectionCapture.capture()); verify(ctx, never()).tellFailure(any(), any()); var resultMsgs = newMsgCaptor.getAllValues(); var resultNodeConnections = nodeConnectionCapture.getAllValues(); for (int i = 0; i < resultMsgs.size(); i++) { var msg = resultMsgs.get(i); assertThat(msg).isNotNull(); assertThat(msg.getType()).isNotNull(); assertThat(msg.getType()).isEqualTo(msg.getInternalType().name()); assertThat(msg).isSameAs(tbMsgList.get(i)); assertThat(resultNodeConnections.get(i)) .isEqualTo(msg.getInternalType().getRuleNodeConnection()); } }
@Override public void putAll(Map<K, V> map) { throw MODIFICATION_ATTEMPT_ERROR; }
@Test void testPutAll() throws Exception { assertThat(mapState.contains(1L)).isTrue(); long value = mapState.get(1L); assertThat(value).isEqualTo(5L); assertThat(mapState.contains(2L)).isTrue(); value = mapState.get(2L); assertThat(value).isEqualTo(5L); Map<Long, Long> nMap = new HashMap<>(); nMap.put(1L, 7L); nMap.put(2L, 7L); assertThatThrownBy(() -> mapState.putAll(nMap)) .isInstanceOf(UnsupportedOperationException.class); }
@Override public KTable<K, Long> count(final Materialized<K, Long, KeyValueStore<Bytes, byte[]>> materialized) { return count(NamedInternal.empty(), materialized); }
@SuppressWarnings("unchecked") @Test public void shouldThrowNullPointOnCountWhenMaterializedIsNull() { assertThrows(NullPointerException.class, () -> groupedTable.count((Materialized) null)); }
public static List<DwrfProto.ColumnEncoding> toColumnEncodings(Map<Integer, ColumnEncoding> columnEncodingsByNodeId) { ImmutableList.Builder<DwrfProto.ColumnEncoding> columnEncodings = ImmutableList.builder(); for (Entry<Integer, ColumnEncoding> entry : columnEncodingsByNodeId.entrySet()) { int nodeId = entry.getKey(); ColumnEncoding columnEncoding = entry.getValue(); if (columnEncoding.getAdditionalSequenceEncodings().isPresent()) { Map<Integer, DwrfSequenceEncoding> sequences = columnEncoding.getAdditionalSequenceEncodings().get(); for (Entry<Integer, DwrfSequenceEncoding> sequenceEntry : sequences.entrySet()) { int sequence = sequenceEntry.getKey(); DwrfSequenceEncoding sequenceEncoding = sequenceEntry.getValue(); columnEncodings.add(toColumnEncoding(nodeId, sequence, sequenceEncoding)); } } else { columnEncodings.add(toColumnEncoding(nodeId, columnEncoding)); } } return columnEncodings.build(); }
@Test(dataProvider = "sequenceKeyProvider") public void testToColumnEncodingsWithSequence(DwrfProto.KeyInfo key1, DwrfProto.KeyInfo key2) { int expectedDictionarySize1 = 5; int expectedSequenceId1 = 0; ColumnEncoding valueEncoding1 = new ColumnEncoding(DIRECT, expectedDictionarySize1); DwrfSequenceEncoding sequenceEncoding1 = new DwrfSequenceEncoding(key1, valueEncoding1); int expectedDictionarySize2 = 10; int expectedSequenceId2 = 5; ColumnEncoding valueEncoding2 = new ColumnEncoding(DICTIONARY, expectedDictionarySize2); DwrfSequenceEncoding sequenceEncoding2 = new DwrfSequenceEncoding(key2, valueEncoding2); ImmutableSortedMap<Integer, DwrfSequenceEncoding> additionalSequenceEncodings = ImmutableSortedMap.of( expectedSequenceId1, sequenceEncoding1, expectedSequenceId2, sequenceEncoding2); ColumnEncoding columnEncoding = new ColumnEncoding(DIRECT, 0, Optional.of(additionalSequenceEncodings)); List<DwrfProto.ColumnEncoding> actual = toColumnEncodings(ImmutableMap.of(COLUMN_ID, columnEncoding)); assertEquals(actual.size(), 2); DwrfProto.ColumnEncoding actualValueEncoding1 = actual.get(0); assertEquals(actualValueEncoding1.getColumn(), COLUMN_ID); assertEquals(actualValueEncoding1.getKind(), DwrfProto.ColumnEncoding.Kind.DIRECT); assertEquals(actualValueEncoding1.getDictionarySize(), expectedDictionarySize1); assertEquals(actualValueEncoding1.getSequence(), expectedSequenceId1); assertEquals(actualValueEncoding1.getKey(), key1); DwrfProto.ColumnEncoding actualValueEncoding2 = actual.get(1); assertEquals(actualValueEncoding2.getColumn(), COLUMN_ID); assertEquals(actualValueEncoding2.getKind(), DwrfProto.ColumnEncoding.Kind.DICTIONARY); assertEquals(actualValueEncoding2.getDictionarySize(), expectedDictionarySize2); assertEquals(actualValueEncoding2.getSequence(), expectedSequenceId2); assertEquals(actualValueEncoding2.getKey(), key2); }
static boolean isTableUsingInstancePoolAndReplicaGroup(@Nonnull TableConfig tableConfig) { boolean status = true; Map<String, InstanceAssignmentConfig> instanceAssignmentConfigMap = tableConfig.getInstanceAssignmentConfigMap(); if (instanceAssignmentConfigMap != null) { for (InstanceAssignmentConfig instanceAssignmentConfig : instanceAssignmentConfigMap.values()) { if (instanceAssignmentConfig != null) { status &= (instanceAssignmentConfig.getTagPoolConfig().isPoolBased() && instanceAssignmentConfig.getReplicaGroupPartitionConfig().isReplicaGroupBased()); } else { status = false; } } } else { status = false; } return status; }
@Test public void testNoPoolsRealtimeTable() { InstanceAssignmentConfig config = new InstanceAssignmentConfig(new InstanceTagPoolConfig("DefaultTenant", false, 0, null), null, new InstanceReplicaGroupPartitionConfig(true, 0, 0, 0, 0, 0, false, null), null, false); TableConfig tableConfig = new TableConfig("table", TableType.REALTIME.name(), new SegmentsValidationAndRetentionConfig(), new TenantConfig("DefaultTenant", "DefaultTenant", null), new IndexingConfig(), new TableCustomConfig(null), null, null, null, null, Map.of("CONSUMING", config), null, null, null, null, null, null, false, null, null, null); Assert.assertFalse(TableConfigUtils.isTableUsingInstancePoolAndReplicaGroup(tableConfig)); }
@Override public void onRuleChanged(final List<RuleData> changed, final DataEventTypeEnum eventType) { if (CollectionUtils.isEmpty(changed)) { return; } LOG.info("onRuleChanged, changed:{}, eventType:{}", JsonUtils.toJson(changed), JsonUtils.toJson(eventType)); this.updateRuleCache(); this.afterRuleChanged(changed, eventType); }
@Test public void testOnRuleChanged() { List<RuleData> empty = Lists.newArrayList(); DataEventTypeEnum eventType = mock(DataEventTypeEnum.class); listener.onRuleChanged(empty, eventType); assertFalse(listener.getCache().containsKey(ConfigGroupEnum.RULE.name())); List<RuleData> ruleDatas = Lists.newArrayList(mock(RuleData.class)); listener.onRuleChanged(ruleDatas, eventType); assertTrue(listener.getCache().containsKey(ConfigGroupEnum.RULE.name())); }
void resolveSelectors(EngineDiscoveryRequest request, CucumberEngineDescriptor engineDescriptor) { Predicate<String> packageFilter = buildPackageFilter(request); resolve(request, engineDescriptor, packageFilter); filter(engineDescriptor, packageFilter); pruneTree(engineDescriptor); }
@Test void resolveRequestWithPackageSelector() { DiscoverySelector resource = selectPackage("io.cucumber.junit.platform.engine"); EngineDiscoveryRequest discoveryRequest = new SelectorRequest(resource); resolver.resolveSelectors(discoveryRequest, testDescriptor); assertEquals(6, testDescriptor.getChildren().size()); }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void nestedExpressionsAreLiterals() { /* * the template of {foo{bar}}, will be treated as literals as nested templates are ignored */ String template = "https://www.example.com/{foo{bar}}/{baz}"; UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8); assertThat(uriTemplate.getVariables()).contains("baz").hasSize(1); Map<String, Object> variables = new LinkedHashMap<>(); variables.put("baz", "stuff"); String expandedTemplate = uriTemplate.expand(variables); assertThat(expandedTemplate) .isEqualToIgnoringCase("https://www.example.com/%7Bfoo%7Bbar%7D%7D/stuff"); assertThat(URI.create(expandedTemplate)).isNotNull(); // this should fail, the result is not a // valid uri }
@SuppressWarnings("checkstyle:npathcomplexity") public static <T extends Throwable> T tryCreateExceptionWithMessageAndCause(Class<T> exceptionClass, String message, @Nullable Throwable cause) { for (ConstructorMethod method : ConstructorMethod.values()) { try { return method.cloneException(exceptionClass, message, cause); } catch (ClassCastException | WrongMethodTypeException | IllegalAccessException | SecurityException | NoSuchMethodException | ClassNotFoundException ignored) { } catch (Throwable t) { throw new RuntimeException("Exception creation failed ", t); } } return null; }
@Test public void testCanCreateExceptionsWithMessageAndCauseWhenExceptionHasCauseSetImplicitlyByMessageConstructor() { ExceptionUtil.tryCreateExceptionWithMessageAndCause( ExceptionThatHasCauseImplicitlyByMessageConstructor.class, "", new RuntimeException() ); }
@JsonProperty public String getMessage() { return message.getMessage(); }
@Test public void testGetMessage() throws Exception { assertEquals(message.getMessage(), messageSummary.getMessage()); }
public static void printIfWarnEnabled(Logger logger, String s, Object... args) { if (logger.isWarnEnabled()) { logger.warn(s, args); } }
@Test void testPrintIfWarnEnabled() { Logger logger = Mockito.mock(Logger.class); Mockito.when(logger.isWarnEnabled()).thenReturn(true); LoggerUtils.printIfWarnEnabled(logger, "test", "arg1", "arg2", "arg3"); Mockito.verify(logger, Mockito.times(1)).warn("test", "arg1", "arg2", "arg3"); }
public static Collection<DatabasePacket> buildQueryResponsePackets(final QueryResponseHeader queryResponseHeader, final int characterSet, final int statusFlags) { Collection<DatabasePacket> result = new LinkedList<>(); List<QueryHeader> queryHeaders = queryResponseHeader.getQueryHeaders(); result.add(new MySQLFieldCountPacket(queryHeaders.size())); for (QueryHeader each : queryHeaders) { result.add(new MySQLColumnDefinition41Packet(characterSet, getColumnDefinitionFlag(each), each.getSchema(), each.getTable(), each.getTable(), each.getColumnLabel(), each.getColumnName(), each.getColumnLength(), MySQLBinaryColumnType.valueOfJDBCType(each.getColumnType()), each.getDecimals(), false)); } result.add(new MySQLEofPacket(statusFlags)); return result; }
@Test void assertBuildQueryResponsePackets() { QueryHeader queryHeader1 = new QueryHeader("schema1", "table1", "columnLabel1", "columnName1", 5, "VARCHAR", 4, 6, false, true, false, true); QueryHeader queryHeader2 = new QueryHeader("schema2", "table2", "columnLabel2", "columnName2", 8, "VARCHAR", 7, 9, false, true, true, true); List<QueryHeader> queryHeaders = Arrays.asList(queryHeader1, queryHeader2); QueryResponseHeader queryResponseHeader = new QueryResponseHeader(queryHeaders); Collection<DatabasePacket> actual = ResponsePacketBuilder.buildQueryResponsePackets(queryResponseHeader, 255, 0); assertTrue(actual.stream().findAny().isPresent()); assertThat(actual.stream().findAny().get(), anyOf(instanceOf(MySQLFieldCountPacket.class), instanceOf(MySQLColumnDefinition41Packet.class), instanceOf(MySQLEofPacket.class))); }
public Node getNode(String path) { return convertObjectTo(get(path), Node.class); }
@Test public void xmlPathCanExtractNodeFromSoap() throws Exception { // Given String soap = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n" + "<env:Envelope \n" + " xmlns:soapenc=\"http://schemas.xmlsoap.org/soap/encoding/\" \n" + " xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" \n" + " xmlns:env=\"http://schemas.xmlsoap.org/soap/envelope/\" \n" + " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n" + " <env:Header/>\n" + "\n" + "<env:Body>\n" + " <n1:importProjectResponse \n" + " xmlns:n1=\"n1\" \n" + " xmlns:n2=\"n2\" \n" + " xsi:type=\"n2:ArrayOfProjectImportResultCode\">\n" + " <n2:ProjectImportResultCode>\n" + " <n2:code>1</n2:code>\n" + " <n2:message>Project 'test1' import was successful.</n2:message>\n" + " </n2:ProjectImportResultCode>\n" + " </n1:importProjectResponse>\n" + "</env:Body></env:Envelope>"; // When XmlPath xmlPath = new XmlPath(soap); Node node = xmlPath.getNode("Envelope"); // Then assertThat(node.<String>getPath("Body.importProjectResponse.ProjectImportResultCode.code"), equalTo("1")); }
@Override public SelType call(String methodName, SelType[] args) { if (args.length == 0 && "getDays".equals(methodName)) { return SelLong.of((long) val.getDays()); } else if (args.length == 2 && "daysBetween".equals(methodName)) { return new SelJodaDateTimeDays( Days.daysBetween( ((SelJodaDateTime) args[0]).getInternalVal(), ((SelJodaDateTime) args[1]).getInternalVal())); } throw new UnsupportedOperationException( type() + " DO NOT support calling method: " + methodName + " with args: " + Arrays.toString(args)); }
@Test(expected = UnsupportedOperationException.class) public void testInvalidCallMethod() { one.call("getWeeks", new SelType[] {}); }
public static Set<String> splitByComma(Set<String> values) { return split(values).collect(Collectors.toSet()); }
@Test public void testSplitByComma() { Assertions.assertThat(StringUtils.splitByComma((Set<String>) null)) .isInstanceOf(Set.class) .isEmpty(); Assertions.assertThat(StringUtils.splitByComma(Collections.emptySet())) .isInstanceOf(Set.class) .isEmpty(); Assertions.assertThat(StringUtils.splitByComma(List.of("one", "two,three,", "", " "))) .hasSize(3) .containsExactlyInAnyOrder("one", "two", "three"); Assertions.assertThat(StringUtils.splitByComma(List.of("one", "two,three"))) .hasSize(3) .containsExactlyInAnyOrder("one", "two", "three"); Assertions.assertThat(StringUtils.splitByComma(List.of("one", "two,three"))) .hasSize(3) .containsExactlyInAnyOrder("one", "two", "three"); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchDisconnected() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); assertEquals(1, sendFetches()); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0), true); networkClientDelegate.poll(time.timer(0)); assertEmptyFetch("Should not return records or advance position on disconnect"); // disconnects should have no effect on subscription state assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertTrue(subscriptions.isFetchable(tp0)); assertEquals(0, subscriptions.position(tp0).offset); }
@Override @Deprecated public <VR> KStream<K, VR> transformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, ? extends VR> valueTransformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); return doTransformValues( toValueTransformerWithKeySupplier(valueTransformerSupplier), NamedInternal.empty(), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullValueTransformerSupplierOnTransformValues() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.transformValues((org.apache.kafka.streams.kstream.ValueTransformerSupplier<Object, Object>) null)); assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); }
static int readHeapBuffer(InputStream f, ByteBuffer buf) throws IOException { int bytesRead = f.read(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); if (bytesRead < 0) { // if this resulted in EOF, don't update position return bytesRead; } else { buf.position(buf.position() + bytesRead); return bytesRead; } }
@Test public void testHeapLimit() throws Exception { ByteBuffer readBuffer = ByteBuffer.allocate(20); readBuffer.limit(8); MockInputStream stream = new MockInputStream(7); int len = DelegatingSeekableInputStream.readHeapBuffer(stream, readBuffer); Assert.assertEquals(7, len); Assert.assertEquals(7, readBuffer.position()); Assert.assertEquals(8, readBuffer.limit()); len = DelegatingSeekableInputStream.readHeapBuffer(stream, readBuffer); Assert.assertEquals(1, len); Assert.assertEquals(8, readBuffer.position()); Assert.assertEquals(8, readBuffer.limit()); len = DelegatingSeekableInputStream.readHeapBuffer(stream, readBuffer); Assert.assertEquals(0, len); readBuffer.flip(); Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY, 0, 8), readBuffer); }
@Override public String toString() { return StringUtils.join(matchers, ','); }
@Test void shouldReturnCommaSplittedString() { assertThat(new Matcher("JH,Pavan").toString()).isEqualTo("JH,Pavan"); }
@Override public R apply(Class<?> c) { return cache.get(c); }
@Test void basic() { ClassValueCache<String> cache = new ClassValueCache<>(Class::toString); String fromCache = cache.apply(String.class); assertThat(fromCache, is("class java.lang.String")); assertThat(cache.apply(String.class), sameInstance(fromCache)); }
public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } return writeLoopOnConverters(event); }
@Test public void testNoExeptionHandler() { pl.setPattern("%m%n"); pl.start(); String val = pl.doLayout(makeLoggingEvent(aMessage, ex)); assertTrue(val.contains("java.lang.Exception: Bogus exception")); }
@Override public void bind() { thread.start(); }
@Test public void bindTest() throws IOException { EventLoopGroup leader = new NioEventLoopGroup(); EventLoopGroup worker = new NioEventLoopGroup(); ServerTakeHandler handler = new ServerTakeHandler(instance, o -> 1); ServerConnection connection = new SimpleServerConnection(leader, worker, handler); RPCServer rpcServer = new RPCServer(connection, RandomPort::getSafeRandomPort); rpcServer.bind(); while (!rpcServer.isActive()) { LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(1L)); } boolean active = rpcServer.isActive(); Assert.assertTrue(active); rpcServer.close(); }
public void isInstanceOf(Class<?> clazz) { if (clazz == null) { throw new NullPointerException("clazz"); } if (actual == null) { failWithActual("expected instance of", clazz.getName()); return; } if (!isInstanceOfType(actual, clazz)) { if (Platform.classMetadataUnsupported()) { throw new UnsupportedOperationException( actualCustomStringRepresentation() + ", an instance of " + actual.getClass().getName() + ", may or may not be an instance of " + clazz.getName() + ". Under -XdisableClassMetadata, we do not have enough information to tell."); } failWithoutActual( fact("expected instance of", clazz.getName()), fact("but was instance of", actual.getClass().getName()), fact("with value", actualCustomStringRepresentation())); } }
@Test public void isInstanceOfUnrelatedClass() { expectFailure.whenTesting().that(4.5).isInstanceOf(Long.class); assertFailureKeys("expected instance of", "but was instance of", "with value"); assertFailureValue("expected instance of", "java.lang.Long"); assertFailureValue("but was instance of", "java.lang.Double"); assertFailureValue("with value", "4.5"); }
public static AWSCredentialsProvider createAwsCredentialsProvider( UnderFileSystemConfiguration conf) { // Set the aws credential system properties based on Alluxio properties, if they are set; // otherwise, use the default credential provider. if (conf.isSet(PropertyKey.S3A_ACCESS_KEY) && conf.isSet(PropertyKey.S3A_SECRET_KEY)) { return new AWSStaticCredentialsProvider(new BasicAWSCredentials( conf.getString(PropertyKey.S3A_ACCESS_KEY), conf.getString(PropertyKey.S3A_SECRET_KEY))); } // Checks, in order, env variables, system properties, profile file, and instance profile. return new DefaultAWSCredentialsProviderChain(); }
@Test public void createCredentialsFromConf() throws Exception { Map<PropertyKey, Object> conf = new HashMap<>(); conf.put(PropertyKey.S3A_ACCESS_KEY, "key1"); conf.put(PropertyKey.S3A_SECRET_KEY, "key2"); try (Closeable c = new ConfigurationRule(conf, CONF).toResource()) { UnderFileSystemConfiguration ufsConf = UnderFileSystemConfiguration.defaults(CONF); AWSCredentialsProvider credentialsProvider = S3AUnderFileSystem.createAwsCredentialsProvider(ufsConf); Assert.assertEquals("key1", credentialsProvider.getCredentials().getAWSAccessKeyId()); Assert.assertEquals("key2", credentialsProvider.getCredentials().getAWSSecretKey()); Assert.assertTrue(credentialsProvider instanceof AWSStaticCredentialsProvider); } }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema == null && value == null) { return null; } JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
@Test public void stringToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.STRING_SCHEMA, "test-string")); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"string\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals("test-string", converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).textValue()); }
@Override public String toString() { return toString(false, null, BitcoinNetwork.MAINNET); }
@Test public void testToString() { ECKey key = ECKey.fromPrivate(BigInteger.TEN).decompress(); // An example private key. assertEquals("ECKey{pub HEX=04a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c7893aba425419bc27a3b6c7e693a24c696f794c2ed877a1593cbee53b037368d7, isEncrypted=false, isPubKeyOnly=false}", key.toString()); assertEquals("ECKey{pub HEX=04a0434d9e47f3c86235477c7b1ae6ae5d3442d49b1943c2b752a68e2a47e247c7893aba425419bc27a3b6c7e693a24c696f794c2ed877a1593cbee53b037368d7, priv HEX=000000000000000000000000000000000000000000000000000000000000000a, priv WIF=5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreBoNWTw6, isEncrypted=false, isPubKeyOnly=false}", key.toStringWithPrivate(null, MAINNET)); }
public static boolean equals(FlatRecordTraversalObjectNode left, FlatRecordTraversalObjectNode right) { if (left == null && right == null) { return true; } if (left == null || right == null) { return false; } if (!left.getSchema().getName().equals(right.getSchema().getName())) { return false; } extractCommonObjectSchema(left, right); return compare(left, right); }
@Test public void shouldFindThatRecordsAreNotEqualBecauseMapValuesDiffer() { TypeState1 typeState1 = new TypeState1(); typeState1.longField = 1L; Map<String, SubValue> map1 = new HashMap<>(); map1.put("A", new SubValue("A", "AA")); map1.put("B", new SubValue("B", "BB")); map1.put("D", new SubValue("D", "DD")); typeState1.simpleMapField = map1; writer1.reset(); mapper1.writeFlat(typeState1, writer1); FlatRecord rec1 = writer1.generateFlatRecord(); TypeState2 typeState2 = new TypeState2(); typeState2.longField = 1L; Map<String, SubValue> map2 = new HashMap<>(); map2.put("A", new SubValue("A", "AA")); map2.put("B", new SubValue("B", "BB")); map2.put("C", new SubValue("C", "CC")); typeState2.simpleMapField = map2; writer2.reset(); mapper2.writeFlat(typeState2, writer2); FlatRecord rec2 = writer2.generateFlatRecord(); FlatRecordTraversalObjectNode leftNode = new FlatRecordTraversalObjectNode(rec1); FlatRecordTraversalObjectNode rightNode = new FlatRecordTraversalObjectNode(rec2); assertThat(FlatRecordTraversalObjectNodeEquality.equals(leftNode, rightNode)).isFalse(); assertThat(FlatRecordTraversalObjectNodeEquality.equals(rightNode, leftNode)).isFalse(); }
@Override public boolean isOutput() { return false; }
@Test public void testIsOutput() throws Exception { assertFalse( analyzer.isOutput() ); }
@Override final public boolean isTooSoon(long currentTime) { boolean maskMatch = ((invocationCounter++) & mask) == mask; if (maskMatch) { if (currentTime < this.lowerLimitForMaskMatch) { increaseMask(); } updateLimits(currentTime); } else { if (currentTime > this.upperLimitForNoMaskMatch) { decreaseMask(); updateLimits(currentTime); return false; } } return !maskMatch; }
@Test public void smoke() { long currentTime = 0; long minDelayThreshold = 4; long maxDelayThreshold = 8; DefaultInvocationGate gate = new DefaultInvocationGate(minDelayThreshold, maxDelayThreshold, currentTime); assertTrue(gate.isTooSoon(0)); }
@Override public long inc() { long l = (long) VALUE.getOpaque(this) + 1; VALUE.setOpaque(this, l); return l; }
@Test public void test_inc_withAmount() { assertEquals(10, counter.inc(10)); assertEquals(10, counter.inc(0)); assertEquals(0, counter.inc(-10)); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldChooseMidVarargWithNullValues() { // Given: givenFunctions( function(OTHER, -1, INT, STRING), function(EXPECTED, 1, INT, STRING_VARARGS, STRING) ); // When: final KsqlScalarFunction fun = udfIndex.getFunction(Arrays.asList( SqlArgument.of(SqlTypes.INTEGER), null, SqlArgument.of(SqlTypes.STRING) )); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
public static Action resolve(Schema writer, Schema reader, GenericData data) { return resolve(Schema.applyAliases(writer, reader), reader, data, new HashMap<>()); }
@Test void resolveUnion() { final Schema schema = new TimeConversions.TimeMicrosConversion().getRecommendedSchema(); final Schema writeSchema = Schema.createUnion(Schema.create(Schema.Type.INT)); final Schema readSchema = Schema.createUnion(schema); Resolver.Action action = Resolver.resolve(writeSchema, readSchema); Assertions.assertNotNull(action); Assertions.assertEquals(action.type, Resolver.Action.Type.WRITER_UNION); MatcherAssert.assertThat("Wrong class for action", action, Matchers.instanceOf(Resolver.WriterUnion.class)); Assertions.assertEquals(1, ((Resolver.WriterUnion) action).actions.length); Resolver.Action innerAction = ((Resolver.WriterUnion) action).actions[0]; MatcherAssert.assertThat("Wrong class for action", innerAction, Matchers.instanceOf(Resolver.ReaderUnion.class)); Resolver.ReaderUnion innerUnionAction = (Resolver.ReaderUnion) innerAction; Resolver.Action promoteAction = innerUnionAction.actualAction; Assertions.assertEquals(promoteAction.type, Resolver.Action.Type.PROMOTE); Assertions.assertNotNull(promoteAction.logicalType); }
@Override public void createNode(OpenstackNode osNode) { checkNotNull(osNode, ERR_NULL_NODE); OpenstackNode updatedNode; if (osNode.intgBridge() == null && osNode.type() != CONTROLLER) { String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet()); checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID); updatedNode = osNode.updateIntbridge(DeviceId.deviceId(deviceIdStr)); checkArgument(!hasIntgBridge(updatedNode.intgBridge(), updatedNode.hostname()), NOT_DUPLICATED_MSG, updatedNode.intgBridge()); } else { updatedNode = osNode; checkArgument(!hasIntgBridge(updatedNode.intgBridge(), updatedNode.hostname()), NOT_DUPLICATED_MSG, updatedNode.intgBridge()); } osNodeStore.createNode(updatedNode); log.info(String.format(MSG_NODE, osNode.hostname(), MSG_CREATED)); }
@Test(expected = NullPointerException.class) public void testCreateNullNode() { target.createNode(null); }
@Override public <T> T persist(T detachedObject) { Map<Object, Object> alreadyPersisted = new HashMap<Object, Object>(); return persist(detachedObject, alreadyPersisted, RCascadeType.PERSIST); }
@Test public void testTransformation() { RLiveObjectService service = redisson.getLiveObjectService(); TestClass ts = new TestClass(); ts = service.persist(ts); HashMap<String, String> m = new HashMap<>(); ts.setContent(m); assertFalse(HashMap.class.isAssignableFrom(ts.getContent().getClass())); assertTrue(RMap.class.isAssignableFrom(ts.getContent().getClass())); HashSet<String> s = new HashSet<>(); ts.setContent(s); assertFalse(HashSet.class.isAssignableFrom(ts.getContent().getClass())); assertTrue(RSet.class.isAssignableFrom(ts.getContent().getClass())); TreeSet<String> ss = new TreeSet<>(); ts.setContent(ss); assertFalse(TreeSet.class.isAssignableFrom(ts.getContent().getClass())); assertTrue(RSortedSet.class.isAssignableFrom(ts.getContent().getClass())); ArrayList<String> al = new ArrayList<>(); ts.setContent(al); assertFalse(ArrayList.class.isAssignableFrom(ts.getContent().getClass())); assertTrue(RList.class.isAssignableFrom(ts.getContent().getClass())); ConcurrentHashMap<String, String> chm = new ConcurrentHashMap<>(); ts.setContent(chm); assertFalse(ConcurrentHashMap.class.isAssignableFrom(ts.getContent().getClass())); assertTrue(RMap.class.isAssignableFrom(ts.getContent().getClass())); ArrayBlockingQueue<String> abq = new ArrayBlockingQueue<>(10); ts.setContent(abq); assertFalse(ArrayBlockingQueue.class.isAssignableFrom(ts.getContent().getClass())); assertTrue(RBlockingQueue.class.isAssignableFrom(ts.getContent().getClass())); ConcurrentLinkedQueue<String> clq = new ConcurrentLinkedQueue<>(); ts.setContent(clq); assertFalse(ConcurrentLinkedQueue.class.isAssignableFrom(ts.getContent().getClass())); assertTrue(RQueue.class.isAssignableFrom(ts.getContent().getClass())); LinkedBlockingDeque<String> lbdq = new LinkedBlockingDeque<>(); ts.setContent(lbdq); assertFalse(LinkedBlockingDeque.class.isAssignableFrom(ts.getContent().getClass())); assertTrue(RBlockingDeque.class.isAssignableFrom(ts.getContent().getClass())); LinkedList<String> ll = new LinkedList<>(); ts.setContent(ll); assertFalse(LinkedList.class.isAssignableFrom(ts.getContent().getClass())); assertTrue(RDeque.class.isAssignableFrom(ts.getContent().getClass())); }
static Serializer createSerializer(Fury fury, Class<?> cls) { for (Tuple2<Class<?>, Function> factory : unmodifiableFactories()) { if (factory.f0 == cls) { return createSerializer(fury, factory); } } throw new IllegalArgumentException("Unsupported type " + cls); }
@SuppressWarnings("unchecked") @Test public void testWrite() throws Exception { Fury fury = Fury.builder().withLanguage(Language.JAVA).requireClassRegistration(false).build(); MemoryBuffer buffer = MemoryUtils.buffer(32); Object[] values = new Object[] { Collections.unmodifiableCollection(Collections.singletonList("abc")), Collections.unmodifiableCollection(Arrays.asList("abc", "def")), Collections.unmodifiableList(Arrays.asList("abc", "def")), Collections.unmodifiableList(new LinkedList<>(Arrays.asList("abc", "def"))), Collections.unmodifiableSet(new HashSet<>(Arrays.asList("abc", "def"))), Collections.unmodifiableSortedSet(new TreeSet<>(Arrays.asList("abc", "def"))), Collections.unmodifiableMap(ImmutableMap.of("k1", "v1")), Collections.unmodifiableSortedMap(new TreeMap<>(ImmutableMap.of("k1", "v1"))) }; for (Object value : values) { buffer.writerIndex(0); buffer.readerIndex(0); Serializer serializer = createSerializer(fury, value.getClass()); serializer.write(buffer, value); Object newObj = serializer.read(buffer); assertEquals(newObj.getClass(), value.getClass()); long sourceCollectionFieldOffset = Collection.class.isAssignableFrom(value.getClass()) ? SOURCE_COLLECTION_FIELD_OFFSET : SOURCE_MAP_FIELD_OFFSET; Object innerValue = Platform.getObject(value, sourceCollectionFieldOffset); Object newValue = Platform.getObject(newObj, sourceCollectionFieldOffset); assertEquals(innerValue, newValue); newObj = serDe(fury, value); innerValue = Platform.getObject(value, sourceCollectionFieldOffset); newValue = Platform.getObject(newObj, sourceCollectionFieldOffset); assertEquals(innerValue, newValue); assertTrue( fury.getClassResolver() .getSerializerClass(value.getClass()) .getName() .contains("Unmodifiable")); } }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testOffsetAssignmentAfterDownConversionV2ToV1NonCompressed() { long offset = 1234567; long now = System.currentTimeMillis(); MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V2, now, Compression.NONE); checkOffsets(records, 0); checkOffsets(new LogValidator( records, new TopicPartition("topic", 0), time, CompressionType.NONE, Compression.NONE, false, RecordBatch.MAGIC_VALUE_V1, TimestampType.CREATE_TIME, 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); }
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image) throws IOException { if (isGrayImage(image)) { return createFromGrayImage(image, document); } // We try to encode the image with predictor if (USE_PREDICTOR_ENCODER) { PDImageXObject pdImageXObject = new PredictorEncoder(document, image).encode(); if (pdImageXObject != null) { if (pdImageXObject.getColorSpace() == PDDeviceRGB.INSTANCE && pdImageXObject.getBitsPerComponent() < 16 && image.getWidth() * image.getHeight() <= 50 * 50) { // also create classic compressed image, compare sizes PDImageXObject pdImageXObjectClassic = createFromRGBImage(image, document); if (pdImageXObjectClassic.getCOSObject().getLength() < pdImageXObject.getCOSObject().getLength()) { pdImageXObject.getCOSObject().close(); return pdImageXObjectClassic; } else { pdImageXObjectClassic.getCOSObject().close(); } } return pdImageXObject; } } // Fallback: We export the image as 8-bit sRGB and might lose color information return createFromRGBImage(image, document); }
@Test void testCreateLosslessFromImageINT_RGB() throws IOException { PDDocument document = new PDDocument(); BufferedImage image = ImageIO.read(this.getClass().getResourceAsStream("png.png")); BufferedImage imgRgb = new BufferedImage(image.getWidth(), image.getHeight(), BufferedImage.TYPE_INT_RGB); Graphics2D graphics = imgRgb.createGraphics(); graphics.drawImage(image, 0, 0, null); PDImageXObject ximage = LosslessFactory.createFromImage(document, imgRgb); validate(ximage, 8, imgRgb.getWidth(), imgRgb.getHeight(), "png", PDDeviceRGB.INSTANCE.getName()); checkIdent(image, ximage.getImage()); }
public JunctionTree junctionTree(List<OpenBitSet> cliques, boolean init) { return junctionTree(null, null, null, cliques, init); }
@Test public void testJunctionWithPruning3() { Graph<BayesVariable> graph = new BayesNetwork(); GraphNode x0 = addNode(graph); GraphNode x1 = addNode(graph); GraphNode x2 = addNode(graph); GraphNode x3 = addNode(graph); GraphNode x4 = addNode(graph); GraphNode x5 = addNode(graph); GraphNode x6 = addNode(graph); GraphNode x7 = addNode(graph); List<OpenBitSet> list = new ArrayList<OpenBitSet>(); OpenBitSet OpenBitSet1 = bitSet("00001111"); OpenBitSet OpenBitSet2 = bitSet("00011110"); OpenBitSet OpenBitSet3 = bitSet("11100000"); OpenBitSet OpenBitSet4 = bitSet("01100001"); OpenBitSet intersect1And2 = OpenBitSet2.clone(); intersect1And2.and(OpenBitSet1); OpenBitSet intersect2And3 = OpenBitSet2.clone(); intersect2And3.and(OpenBitSet3); OpenBitSet intersect1And4 = OpenBitSet1.clone(); intersect1And4.and(OpenBitSet4); OpenBitSet intersect3And4 = OpenBitSet3.clone(); intersect3And4.and(OpenBitSet4); list.add(OpenBitSet1); list.add(OpenBitSet2); list.add(OpenBitSet3); list.add(OpenBitSet4); JunctionTreeBuilder jtBuilder = new JunctionTreeBuilder( graph ); JunctionTreeClique jtNode = jtBuilder.junctionTree(list, false).getRoot();; JunctionTreeClique root = jtNode; assertThat(root.getBitSet()).isEqualTo(OpenBitSet1); assertThat(root.getChildren().size()).isEqualTo(2); JunctionTreeSeparator sep = root.getChildren().get(0); assertThat(sep.getParent().getBitSet()).isEqualTo(OpenBitSet1); assertThat(sep.getChild().getBitSet()).isEqualTo(OpenBitSet2); assertThat(sep.getChild().getChildren().size()).isEqualTo(0); sep = root.getChildren().get(1); assertThat(sep.getParent().getBitSet()).isEqualTo(OpenBitSet1); assertThat(sep.getChild().getBitSet()).isEqualTo(OpenBitSet4); assertThat(sep.getBitSet()).isEqualTo(intersect1And4); assertThat(sep.getChild().getChildren().size()).isEqualTo(1); jtNode = sep.getChild(); assertThat(jtNode.getBitSet()).isEqualTo(OpenBitSet4); assertThat(jtNode.getChildren().size()).isEqualTo(1); sep = jtNode.getChildren().get(0); assertThat(sep.getParent().getBitSet()).isEqualTo(OpenBitSet4); assertThat(sep.getChild().getBitSet()).isEqualTo(OpenBitSet3); assertThat(sep.getBitSet()).isEqualTo(intersect3And4); assertThat(sep.getChild().getChildren().size()).isEqualTo(0); }
@Override public void execute(ComputationStep.Context context) { new PathAwareCrawler<>( FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository) .buildFor(List.of(duplicationFormula))) .visit(treeRootHolder.getRoot()); }
@Test public void compute_new_lines() { setNewLines(FILE_1, FILE_2, FILE_4); underTest.execute(new TestComputationStepContext()); assertRawMeasureValue(FILE_1_REF, NEW_LINES_KEY, 11); assertRawMeasureValue(FILE_2_REF, NEW_LINES_KEY, 11); assertNoRawMeasure(FILE_3_REF, NEW_LINES_KEY); assertRawMeasureValue(FILE_4_REF, NEW_LINES_KEY, 11); assertRawMeasureValue(DIRECTORY_REF, NEW_LINES_KEY, 22); assertNoRawMeasure(DIRECTORY_2_REF, NEW_LINES_KEY); assertRawMeasureValue(ROOT_REF, NEW_LINES_KEY, 33); }
public CompletableFuture<BackupUploadDescriptor> createTemporaryAttachmentUploadDescriptor( final AuthenticatedBackupUser backupUser) { checkBackupLevel(backupUser, BackupLevel.MEDIA); return rateLimiters.forDescriptor(RateLimiters.For.BACKUP_ATTACHMENT) .validateAsync(rateLimitKey(backupUser)).thenApply(ignored -> { final byte[] bytes = new byte[15]; secureRandom.nextBytes(bytes); final String attachmentKey = Base64.getUrlEncoder().encodeToString(bytes); final AttachmentGenerator.Descriptor descriptor = tusAttachmentGenerator.generateAttachment(attachmentKey); return new BackupUploadDescriptor(3, attachmentKey, descriptor.headers(), descriptor.signedUploadLocation()); }).toCompletableFuture(); }
@Test public void createTemporaryMediaAttachmentWrongTier() { final AuthenticatedBackupUser backupUser = backupUser(TestRandomUtil.nextBytes(16), BackupLevel.MESSAGES); assertThatExceptionOfType(StatusRuntimeException.class) .isThrownBy(() -> backupManager.createTemporaryAttachmentUploadDescriptor(backupUser)) .extracting(StatusRuntimeException::getStatus) .extracting(Status::getCode) .isEqualTo(Status.Code.PERMISSION_DENIED); }
public static byte[] getBytesWithoutClosing(InputStream stream) throws IOException { if (stream instanceof ExposedByteArrayInputStream) { // Fast path for the exposed version. return ((ExposedByteArrayInputStream) stream).readAll(); } else if (stream instanceof ByteArrayInputStream) { // Fast path for ByteArrayInputStream. byte[] ret = new byte[stream.available()]; stream.read(ret); return ret; } // Falls back to normal stream copying. SoftReference<byte[]> refBuffer = threadLocalBuffer.get(); byte[] buffer = refBuffer == null ? null : refBuffer.get(); if (buffer == null) { buffer = new byte[BUF_SIZE]; threadLocalBuffer.set(new SoftReference<>(buffer)); } ByteArrayOutputStream outStream = new ByteArrayOutputStream(); while (true) { int r = stream.read(buffer); if (r == -1) { break; } outStream.write(buffer, 0, r); } return outStream.toByteArray(); }
@Test public void testGetBytesFromExposedByteArrayInputStream() throws IOException { InputStream stream = new ExposedByteArrayInputStream(testData); byte[] bytes = StreamUtils.getBytesWithoutClosing(stream); assertArrayEquals(testData, bytes); assertSame(testData, bytes); assertEquals(0, stream.available()); }
@Override public void shutdown(final Callback<None> callback) { final int count; synchronized (_mutex) { _running = false; count = _clientsOutstanding; _factoryShutdownCallback = callback; } if (count == 0) { finishShutdown(); } else { LOG.info("Awaiting shutdown of {} outstanding clients", count); } }
@Test public void testShutdownTimeoutDoesNotOccupyExecutors() throws InterruptedException, ExecutionException, TimeoutException { EventLoopGroup eventLoop = new NioEventLoopGroup(); ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(); HttpClientFactory factory = getHttpClientFactory(eventLoop, false, scheduler, false); FutureCallback<None> callback = new FutureCallback<>(); factory.shutdown(callback, 60, TimeUnit.MINUTES); callback.get(60, TimeUnit.SECONDS); scheduler.shutdown(); eventLoop.shutdownGracefully(); Assert.assertTrue(scheduler.awaitTermination(60, TimeUnit.SECONDS)); Assert.assertTrue(eventLoop.awaitTermination(60, TimeUnit.SECONDS)); }
@PUT @ApiOperation(value = "Updates the AWS default configuration.") @RequiresPermissions({RestPermissions.CLUSTER_CONFIG_ENTRY_CREATE, RestPermissions.CLUSTER_CONFIG_ENTRY_EDIT}) @AuditEvent(type = AuditEventTypes.CLUSTER_CONFIGURATION_UPDATE) public Response updateConfig(@Valid AWSPluginConfigurationUpdate update) { final AWSPluginConfiguration existingConfiguration = clusterConfigService.getOrDefault( AWSPluginConfiguration.class, AWSPluginConfiguration.createDefault() ); final AWSPluginConfiguration.Builder newConfigBuilder = existingConfiguration.toBuilder() .lookupsEnabled(update.lookupsEnabled()) .lookupRegions(update.lookupRegions()) .accessKey(update.accessKey()) .proxyEnabled(update.proxyEnabled()); final AWSPluginConfiguration newConfiguration = update.secretKey() .map(secretKey -> newConfigBuilder.secretKey(secretKey, systemConfiguration.getPasswordSecret())) .orElse(newConfigBuilder) .build(); clusterConfigService.write(newConfiguration); return Response.accepted(newConfiguration).build(); }
@Test public void updatesSecretKeyAndSaltIfPresentForPreviouslyMissingSecretKey() { mockPreviousConfig(sampleConfigWithoutSecretKey()); final AWSPluginConfigurationUpdate update = AWSPluginConfigurationUpdate.create( true, "lookupRegions", "myAccessKey", "newSecretKey", true ); this.awsConfigurationResource.updateConfig(update); final AWSPluginConfiguration writtenConfig = captureWrittenConfig(); assertThat(writtenConfig.secretKey("verySecret123456")).isEqualTo("newSecretKey"); }
@Override public List<PrivilegedOperation> bootstrap(Configuration configuration) throws ResourceHandlerException { // The plugin should be initilized by FpgaDiscoverer already if (!vendorPlugin.initPlugin(configuration)) { throw new ResourceHandlerException("FPGA plugin initialization failed"); } LOG.info("FPGA Plugin bootstrap success."); // Get avialable devices minor numbers from toolchain or static configuration List<FpgaDevice> fpgaDeviceList = fpgaDiscoverer.discover(); allocator.addFpgaDevices(vendorPlugin.getFpgaType(), fpgaDeviceList); this.cGroupsHandler.initializeCGroupController( CGroupsHandler.CGroupController.DEVICES); return null; }
@Test public void testBootstrap() throws ResourceHandlerException { // Case 1. auto String allowed = "auto"; configuration.set(YarnConfiguration.NM_FPGA_ALLOWED_DEVICES, allowed); fpgaResourceHandler.bootstrap(configuration); // initPlugin() was also called in setup() verify(mockVendorPlugin, times(2)).initPlugin(configuration); verify(mockCGroupsHandler, times(1)).initializeCGroupController( CGroupsHandler.CGroupController.DEVICES); Assert.assertEquals(5, fpgaResourceHandler.getFpgaAllocator() .getAvailableFpgaCount()); Assert.assertEquals(5, fpgaResourceHandler.getFpgaAllocator() .getAllowedFpga().size()); // Case 2. subset of devices fpgaResourceHandler = new FpgaResourceHandlerImpl(mockContext, mockCGroupsHandler, mockPrivilegedExecutor, mockVendorPlugin, fpgaDiscoverer); allowed = "0,1,2"; configuration.set(YarnConfiguration.NM_FPGA_ALLOWED_DEVICES, allowed); fpgaResourceHandler.bootstrap(configuration); Assert.assertEquals(3, fpgaResourceHandler.getFpgaAllocator().getAllowedFpga().size()); List<FpgaDevice> allowedDevices = fpgaResourceHandler.getFpgaAllocator().getAllowedFpga(); for (String s : allowed.split(",")) { boolean check = false; for (FpgaDevice device : allowedDevices) { if (String.valueOf(device.getMinor()).equals(s)) { check = true; } } Assert.assertTrue("Minor:" + s +" found", check); } Assert.assertEquals(3, fpgaResourceHandler.getFpgaAllocator().getAvailableFpgaCount()); // Case 3. User configuration contains invalid minor device number fpgaResourceHandler = new FpgaResourceHandlerImpl(mockContext, mockCGroupsHandler, mockPrivilegedExecutor, mockVendorPlugin, fpgaDiscoverer); allowed = "0,1,7"; configuration.set(YarnConfiguration.NM_FPGA_ALLOWED_DEVICES, allowed); fpgaResourceHandler.bootstrap(configuration); Assert.assertEquals(2, fpgaResourceHandler.getFpgaAllocator().getAvailableFpgaCount()); Assert.assertEquals(2, fpgaResourceHandler.getFpgaAllocator().getAllowedFpga().size()); }
public static Range<ZonedDateTime> zonedDateTimeRange(String rangeStr) { Range<ZonedDateTime> range = ofString(rangeStr, parseZonedDateTime().compose(unquote()), ZonedDateTime.class); if (range.hasLowerBound() && range.hasUpperBound() && !EMPTY.equals(rangeStr)) { ZoneId lowerZone = range.lower().getZone(); ZoneId upperZone = range.upper().getZone(); if (!lowerZone.equals(upperZone)) { Duration lowerDst = ZoneId.systemDefault().getRules().getDaylightSavings(range.lower().toInstant()); Duration upperDst = ZoneId.systemDefault().getRules().getDaylightSavings(range.upper().toInstant()); long dstSeconds = upperDst.minus(lowerDst).getSeconds(); if(dstSeconds < 0 ) { dstSeconds *= -1; } long zoneDriftSeconds = ((ZoneOffset) lowerZone).getTotalSeconds() - ((ZoneOffset) upperZone).getTotalSeconds(); if (zoneDriftSeconds < 0) { zoneDriftSeconds *= -1; } if (dstSeconds != zoneDriftSeconds) { throw new IllegalArgumentException("The upper and lower bounds must be in same time zone!"); } } } return range; }
@Test public void zonedDateTimeTest() { assertNotNull(Range.zonedDateTimeRange("[2019-03-27 16:33:10.1-06,)")); assertNotNull(Range.zonedDateTimeRange("[2019-03-27 16:33:10.12-06,)")); assertNotNull(Range.zonedDateTimeRange("[2019-03-27 16:33:10.123-06,)")); assertNotNull(Range.zonedDateTimeRange("[2019-03-27 16:33:10.1234-06,)")); assertNotNull(Range.zonedDateTimeRange("[2019-03-27 16:33:10.12345-06,)")); assertNotNull(Range.zonedDateTimeRange("[2019-03-27 16:33:10.123456-06,)")); assertNotNull(Range.zonedDateTimeRange("[2019-03-27 16:33:10.123456+05:30,)")); assertNotNull(Range.zonedDateTimeRange("[2019-03-27 16:33:10.123456-06,infinity)")); }
@Override public ResourceModel processResourceModel(ResourceModel model, Configuration config) { // Create new resource model. final ResourceModel.Builder resourceModelBuilder = new ResourceModel.Builder(false); for (final Resource resource : model.getResources()) { for (Class<?> handlerClass : resource.getHandlerClasses()) { final String packageName = handlerClass.getPackage().getName(); final Optional<String> packagePrefix = packagePrefixes.entrySet().stream() .filter(entry -> packageName.startsWith(entry.getKey())) .sorted((o1, o2) -> -o1.getKey().compareTo(o2.getKey())) .map(Map.Entry::getValue) .findFirst(); if (packagePrefix.isPresent()) { final String prefixedPath = prefixPath(packagePrefix.get(), resource.getPath()); final Resource newResource = Resource.builder(resource) .path(prefixedPath) .build(); resourceModelBuilder.addResource(newResource); } else { resourceModelBuilder.addResource(resource); } } } return resourceModelBuilder.build(); }
@Test public void processResourceModelAddsPrefixToResourceClassInCorrectPackage() throws Exception { final ImmutableMap<String, String> packagePrefixes = ImmutableMap.of(PACKAGE_NAME, "/test/prefix"); when(configuration.isCloud()).thenReturn(false); final PrefixAddingModelProcessor modelProcessor = new PrefixAddingModelProcessor(packagePrefixes); final ResourceModel originalResourceModel = new ResourceModel.Builder(false) .addResource(Resource.from(TestResource.class)) .addResource(Resource.from(HiddenTestResource.class)).build(); final ResourceModel resourceModel = modelProcessor.processResourceModel(originalResourceModel, new ResourceConfig()); assertThat(resourceModel.getResources()).hasSize(2); final Resource resource = resourceModel.getResources().get(0); assertThat(resource.getPath()).isEqualTo("/test/prefix/foobar/{test}"); final Resource resource2 = resourceModel.getResources().get(1); assertThat(resource2.getPath()).isEqualTo("/test/prefix/hide-cloud/{test}"); }
public ModuleBuilder isDefault(Boolean isDefault) { this.isDefault = isDefault; return getThis(); }
@Test void isDefault() { ModuleBuilder builder = ModuleBuilder.newBuilder(); builder.isDefault(true); Assertions.assertTrue(builder.build().isDefault()); }
@VisibleForTesting boolean isStepSkipped(WorkflowSummary workflowSummary, StepRuntimeSummary runtimeSummary) { RestartConfig stepRestartConfig = ObjectHelper.valueOrDefault( runtimeSummary.getRestartConfig(), workflowSummary.getRestartConfig()); if (stepRestartConfig != null && stepRestartConfig.getRestartPath() != null && stepRestartConfig.getRestartPath().size() == 1 && stepRestartConfig.getSkipSteps() != null) { boolean skipped = stepRestartConfig.getSkipSteps().contains(runtimeSummary.getStepId()); if (skipped) { LOG.info( "workflow {}'s step {} is skipped.", workflowSummary.getIdentity(), runtimeSummary.getIdentity()); runtimeSummary.markTerminated(StepInstance.Status.SKIPPED, tracingManager); runtimeSummary.addTimeline( TimelineLogEvent.info("Step is skipped because of a user skip action.")); } return skipped; } return false; }
@Test public void testIsStepSkipped() { WorkflowSummary summary = new WorkflowSummary(); summary.setWorkflowId("test-workflow"); summary.setWorkflowInstanceId(1L); summary.setWorkflowRunId(1L); StepRuntimeState runtimeState = new StepRuntimeState(); runtimeState.setStatus(StepInstance.Status.USER_FAILED); Timeline timeline = new Timeline(new ArrayList<>()); StepInstance.StepRetry stepRetry = new StepInstance.StepRetry(); stepRetry.setRetryable(true); StepRuntimeSummary runtimeSummary = StepRuntimeSummary.builder() .stepId("step1") .timeline(timeline) .runtimeState(runtimeState) .stepRetry(stepRetry) .build(); Assert.assertFalse(maestroTask.isStepSkipped(summary, runtimeSummary)); summary.setRestartConfig(RestartConfig.builder().build()); Assert.assertFalse(maestroTask.isStepSkipped(summary, runtimeSummary)); summary.setRestartConfig( RestartConfig.builder() .addRestartNode("test-workflow", 1, "step1") .addRestartNode("test-workflow", 2, "step1") .build()); Assert.assertFalse(maestroTask.isStepSkipped(summary, runtimeSummary)); summary.setRestartConfig( RestartConfig.builder() .addRestartNode("test-workflow", 1, "step1") .restartPolicy(RunPolicy.RESTART_FROM_SPECIFIC) .build()); Assert.assertFalse(maestroTask.isStepSkipped(summary, runtimeSummary)); summary.setRestartConfig( RestartConfig.builder() .addRestartNode("test-workflow", 1, "step1") .restartPolicy(RunPolicy.RESTART_FROM_SPECIFIC) .skipSteps(Collections.singleton("step2")) .stepRestartParams(Collections.singletonMap("step2", Collections.emptyMap())) .build()); Assert.assertFalse(maestroTask.isStepSkipped(summary, runtimeSummary)); summary.setRestartConfig( RestartConfig.builder() .addRestartNode("test-workflow", 1, "step1") .restartPolicy(RunPolicy.RESTART_FROM_SPECIFIC) .skipSteps(Collections.singleton("step1")) .stepRestartParams(Collections.singletonMap("step1", Collections.emptyMap())) .build()); Assert.assertTrue(maestroTask.isStepSkipped(summary, runtimeSummary)); assertThat(timeline.getTimelineEvents()) .hasSize(1) .usingRecursiveFieldByFieldElementComparatorIgnoringFields("timestamp") .contains(TimelineLogEvent.info("Step is skipped because of a user skip action.")); Assert.assertTrue(stepRetry.isRetryable()); Assert.assertEquals(DbOperation.UPSERT, runtimeSummary.getDbOperation()); Assert.assertEquals(StepInstance.Status.SKIPPED, runtimeState.getStatus()); assertThat(runtimeSummary.getPendingRecords()) .hasSize(1) .usingRecursiveFieldByFieldElementComparatorIgnoringFields("eventTime") .contains( StepInstanceUpdateJobEvent.createRecord( StepInstance.Status.USER_FAILED, StepInstance.Status.SKIPPED, 0L)); summary.setRestartConfig(null); runtimeSummary.getPendingRecords().clear(); runtimeState.setStatus(StepInstance.Status.USER_FAILED); runtimeSummary.setRestartConfig( RestartConfig.builder() .addRestartNode("test-workflow", 1, "step1") .restartPolicy(RunPolicy.RESTART_FROM_SPECIFIC) .skipSteps(Collections.singleton("step1")) .stepRestartParams(Collections.singletonMap("step1", Collections.emptyMap())) .build()); Assert.assertTrue(maestroTask.isStepSkipped(summary, runtimeSummary)); assertThat(timeline.getTimelineEvents()) .hasSize(1) .usingRecursiveFieldByFieldElementComparatorIgnoringFields("timestamp") .contains(TimelineLogEvent.info("Step is skipped because of a user skip action.")); Assert.assertTrue(stepRetry.isRetryable()); Assert.assertEquals(DbOperation.UPSERT, runtimeSummary.getDbOperation()); Assert.assertEquals(StepInstance.Status.SKIPPED, runtimeState.getStatus()); assertThat(runtimeSummary.getPendingRecords()) .hasSize(1) .usingRecursiveFieldByFieldElementComparatorIgnoringFields("eventTime") .contains( StepInstanceUpdateJobEvent.createRecord( StepInstance.Status.USER_FAILED, StepInstance.Status.SKIPPED, 0L)); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldIncludeSelectAliasIfPresent() { // Given: final SingleStatementContext stmt = givenQuery("SELECT COL0 AS FOO FROM TEST1;"); // When: final Query result = (Query) builder.buildStatement(stmt); // Then: assertThat(result.getSelect(), is(new Select(ImmutableList.of( new SingleColumn( column("COL0"), Optional.of(ColumnName.of("FOO"))) )))); }
public PreparedStatement prepareSQL( String sql ) throws KettleDatabaseException { return prepareSQL( sql, false ); }
@Test public void testPrepareSQL() throws Exception { doReturn( databaseInterface ).when( meta ).getDatabaseInterface(); Database db = new Database( log, meta ); db.setConnection( mockConnection( dbMetaData ) ); db.setCommit( 1 ); db.prepareSQL( "SELECT * FROM DUMMY" ); db.prepareSQL( "SELECT * FROM DUMMY", true ); verify( databaseInterface, times( 2 ) ).supportsAutoGeneratedKeys(); }
void handleRestore(final QueuedCommand queuedCommand) { throwIfNotConfigured(); handleStatementWithTerminatedQueries( queuedCommand.getAndDeserializeCommand(commandDeserializer), queuedCommand.getAndDeserializeCommandId(), queuedCommand.getStatus(), Mode.RESTORE, queuedCommand.getOffset(), true ); }
@Test public void shouldSkipStartWhenReplayingLog() { // Given: final QueryId queryId = new QueryId("csas-query-id"); final String name = "foo"; final PersistentQueryMetadata mockQuery = mockReplayCSAS(queryId); final Command command = new Command("CSAS", emptyMap(), emptyMap(), Optional.of(plan)); when(commandDeserializer.deserialize(any(), any())).thenReturn(command); // When: statementExecutorWithMocks.handleRestore( new QueuedCommand( new CommandId(Type.STREAM, name, Action.CREATE), command, Optional.empty(), 0L ) ); // Then: verify(mockQueryIdGenerator, times(1)).setNextId(anyLong()); verify(mockQuery, times(0)).start(); }
public double getY01FromLatitude(double latitude, boolean wrapEnabled) { latitude = wrapEnabled ? Clip(latitude, getMinLatitude(), getMaxLatitude()) : latitude; final double result = getY01FromLatitude(latitude); return wrapEnabled ? Clip(result, 0, 1) : result; }
@Test public void testGetY01FromLatitude() { checkXY01(0, tileSystem.getY01FromLatitude(tileSystem.getMaxLatitude(), true)); checkXY01(.5, tileSystem.getY01FromLatitude(0, true)); checkXY01(1, tileSystem.getY01FromLatitude(tileSystem.getMinLatitude(), true)); }
public Future<KafkaCluster> prepareKafkaCluster( Kafka kafkaCr, List<KafkaNodePool> nodePools, Map<String, Storage> oldStorage, Map<String, List<String>> currentPods, KafkaVersionChange versionChange, KafkaStatus kafkaStatus, boolean tryToFixProblems) { return createKafkaCluster(kafkaCr, nodePools, oldStorage, currentPods, versionChange) .compose(kafka -> brokerRemovalCheck(kafkaCr, kafka)) .compose(kafka -> { if (checkFailed() && tryToFixProblems) { // We have a failure, and should try to fix issues // Once we fix it, we call this method again, but this time with tryToFixProblems set to false return revertScaleDown(kafka, kafkaCr, nodePools) .compose(kafkaAndNodePools -> revertRoleChange(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs())) .compose(kafkaAndNodePools -> prepareKafkaCluster(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs(), oldStorage, currentPods, versionChange, kafkaStatus, false)); } else if (checkFailed()) { // We have a failure, but we should not try to fix it List<String> errors = new ArrayList<>(); if (scaleDownCheckFailed) { errors.add("Cannot scale-down Kafka brokers " + kafka.removedNodes() + " because they have assigned partition-replicas."); } if (usedToBeBrokersCheckFailed) { errors.add("Cannot remove the broker role from nodes " + kafka.usedToBeBrokerNodes() + " because they have assigned partition-replicas."); } return Future.failedFuture(new InvalidResourceException("Following errors were found when processing the Kafka custom resource: " + errors)); } else { // If everything succeeded, we return the KafkaCluster object // If any warning conditions exist from the reverted changes, we add them to the status if (!warningConditions.isEmpty()) { kafkaStatus.addConditions(warningConditions); } return Future.succeededFuture(kafka); } }); }
@Test public void testRevertScaleDownWithKRaft(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock brokers-in-use check BrokersInUseCheck brokersInUseOps = supplier.brokersInUseCheck; when(brokersInUseOps.brokersInUse(any(), any(), any(), any())).thenReturn(Future.succeededFuture(Set.of(1000, 1001, 1002, 1003, 2004))); KafkaStatus kafkaStatus = new KafkaStatus(); KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); Checkpoint async = context.checkpoint(); creator.prepareKafkaCluster(KAFKA, List.of(POOL_CONTROLLERS_WITH_STATUS_5_NODES, POOL_A_WITH_STATUS_5_NODES, POOL_B_WITH_STATUS_5_NODES), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); assertThat(kc.nodes().size(), is(13)); assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(1000, 1001, 1002, 1003, 1004, 2000, 2001, 2002, 2003, 2004, 3000, 3001, 3002))); assertThat(kc.removedNodes(), is(Set.of(3003, 3004))); // Controllers are not affected // Check the status conditions assertThat(kafkaStatus.getConditions().size(), is(2)); assertThat(kafkaStatus.getConditions().get(0).getStatus(), is("True")); assertThat(kafkaStatus.getConditions().get(0).getType(), is("Warning")); assertThat(kafkaStatus.getConditions().get(0).getReason(), is("ScaleDownPreventionCheck")); assertThat(kafkaStatus.getConditions().get(0).getMessage(), is("Reverting scale-down of KafkaNodePool pool-a by changing number of replicas to 5")); assertThat(kafkaStatus.getConditions().get(1).getStatus(), is("True")); assertThat(kafkaStatus.getConditions().get(1).getType(), is("Warning")); assertThat(kafkaStatus.getConditions().get(1).getReason(), is("ScaleDownPreventionCheck")); assertThat(kafkaStatus.getConditions().get(1).getMessage(), is("Reverting scale-down of KafkaNodePool pool-b by changing number of replicas to 5")); // Scale-down reverted => should be called twice as we still scale down controllers after the revert is done verify(supplier.brokersInUseCheck, times(2)).brokersInUse(any(), any(), any(), any()); async.flag(); }))); }
@Override public DataSourceProvenance getProvenance() { return new DemoLabelDataSourceProvenance(this); }
@Test public void testConcentricCircles() { // Check zero samples throws assertThrows(PropertyException.class, () -> new ConcentricCirclesDataSource(0, 1, 1, 0.5)); // Check invalid radius throws assertThrows(PropertyException.class, () -> new ConcentricCirclesDataSource(200, 1, -1, 0.5)); // Check invalid class proportion throws assertThrows(PropertyException.class, () -> new ConcentricCirclesDataSource(200, 1, 1, 0.0)); // Check valid parameters work ConcentricCirclesDataSource source = new ConcentricCirclesDataSource(200, 1, 1, 0.5); assertEquals(200, source.examples.size()); Dataset<Label> dataset = new MutableDataset<>(source); Map<String, Long> map = new HashMap<>(); dataset.getOutputInfo().outputCountsIterable().forEach((p) -> map.put(p.getA(), p.getB())); assertEquals(94, map.get("X")); assertEquals(106, map.get("O")); Helpers.testProvenanceMarshalling(source.getProvenance()); }
static void cleanStackTrace(Throwable throwable) { new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet()); }
@Test public void classNestedInSubject() { Throwable throwable = createThrowableWithStackTrace( "com.google.common.truth.IterableSubject$UsingCorrespondence", "com.example.MyTest"); StackTraceCleaner.cleanStackTrace(throwable); assertThat(throwable.getStackTrace()) .isEqualTo( new StackTraceElement[] { createStackTraceElement("com.example.MyTest"), }); }
@Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { if (!configured()) throw new IllegalStateException("Callback handler not configured"); for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) try { handleTokenCallback((OAuthBearerTokenCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else if (callback instanceof SaslExtensionsCallback) try { handleExtensionsCallback((SaslExtensionsCallback) callback); } catch (KafkaException e) { throw new IOException(e.getMessage(), e); } else throw new UnsupportedCallbackException(callback); } }
@Test public void throwsErrorOnInvalidExtensionName() { Map<String, String> options = new HashMap<>(); options.put("unsecuredLoginExtension_test.Id", "1"); OAuthBearerUnsecuredLoginCallbackHandler callbackHandler = createCallbackHandler(options, new MockTime()); SaslExtensionsCallback callback = new SaslExtensionsCallback(); assertThrows(IOException.class, () -> callbackHandler.handle(new Callback[] {callback})); }
public static int getUserId(String user) { if (ServerUtils.IS_ON_WINDOWS) { throw new IllegalArgumentException("Not supported in Windows platform"); } List<String> cmdArgs = new ArrayList<>(); cmdArgs.add("id"); cmdArgs.add("-u"); if (user != null && !user.isEmpty()) { cmdArgs.add(user); int exitCode = 0; try { exitCode = new ProcessBuilder(cmdArgs).start().waitFor(); } catch (Exception e) { // Ignore } finally { if (exitCode != 0) { LOG.debug("CMD: '{}' returned exit code of {}", String.join(" ", cmdArgs), exitCode); cmdArgs.remove(user); } } } LOG.debug("CMD: {}", String.join(" ", cmdArgs)); ProcessBuilder pb = new ProcessBuilder(cmdArgs); pb.redirectError(ProcessBuilder.Redirect.INHERIT); try (BufferedReader in = new BufferedReader(new InputStreamReader(pb.start().getInputStream(), StandardCharsets.UTF_8))) { String line = in.readLine(); LOG.debug("CMD-LINE#1: {}", line); try { return Integer.parseInt(line.trim()); } catch (NumberFormatException ex) { LOG.error("Expecting UID integer but got {} in output of \"id -u {}\" command", line, user); return -1; } } catch (IOException ex) { LOG.error(String.format("Cannot read output of command \"%s\"", String.join(" ", cmdArgs)), ex); return -1; } }
@Test public void testGetUserId() throws Exception { if (ServerUtils.IS_ON_WINDOWS) { return; // trivially succeed on Windows, since this test is not for Windows platform } int uid1 = ServerUtils.getUserId(null); Path p = Files.createTempFile("testGetUser", ".txt"); int uid2 = ServerUtils.getPathOwnerUid(p.toString()); if (!p.toFile().delete()) { LOG.warn("Could not delete temporary file {}", p); } assertEquals(uid1, uid2, "User UID " + uid1 + " is not same as file " + p + " owner UID of " + uid2); }
@Override public String getName() { return _name; }
@Test public void testStringReverseTransformFunction() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("rEvErSe(%s)", STRING_ALPHANUM_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getName(), "reverse"); String[] expectedValues = new String[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = new StringBuilder(_stringAlphaNumericSVValues[i]).reverse().toString(); } testTransformFunction(transformFunction, expectedValues); }
public static JsonAsserter with(String json) { return new JsonAsserterImpl(JsonPath.parse(json).json()); }
@Test public void path_including_wildcard_path_followed_by_another_path_concatenates_results_to_list() throws Exception { with(getResourceAsStream("lotto.json")).assertThat("lotto.winners[*].winnerId", hasItems(23, 54)); }
@Override public List<V> poll(int limit) { return get(pollAsync(limit)); }
@Test public void testPollLimited() { RQueue<Integer> queue = getQueue(); queue.addAll(Arrays.asList(1, 2, 3, 4, 5, 6, 7)); List<Integer> elements = queue.poll(3); assertThat(elements).containsExactly(1, 2, 3); List<Integer> elements2 = queue.poll(10); assertThat(elements2).containsExactly(4, 5, 6, 7); List<Integer> elements3 = queue.poll(5); assertThat(elements3).isEmpty(); }
synchronized private void updateMapIncr(final String name, final boolean isGrp) throws IOException { if (!checkSupportedPlatform()) { return; } if (isInteger(name) && isGrp) { loadFullGroupMap(); return; } boolean updated = false; updateStaticMapping(); String name2 = bashQuote(name); if (OS.startsWith("Linux") || OS.equals("SunOS") || OS.contains("BSD")) { if (isGrp) { updated = updateMapInternal(gidNameMap, "group", getName2IdCmdNIX(name2, true), ":", staticMapping.gidMapping); } else { updated = updateMapInternal(uidNameMap, "user", getName2IdCmdNIX(name2, false), ":", staticMapping.uidMapping); } } else { // Mac if (isGrp) { updated = updateMapInternal(gidNameMap, "group", getName2IdCmdMac(name2, true), "\\s+", staticMapping.gidMapping); } else { updated = updateMapInternal(uidNameMap, "user", getName2IdCmdMac(name2, false), "\\s+", staticMapping.uidMapping); } } if (updated) { lastUpdateTime = Time.monotonicNow(); } }
@Test public void testUpdateMapIncr() throws IOException { Configuration conf = new Configuration(); conf.setLong(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY, 600000); ShellBasedIdMapping refIdMapping = new ShellBasedIdMapping(conf, true); ShellBasedIdMapping incrIdMapping = new ShellBasedIdMapping(conf); // Command such as "getent passwd <userName>" will return empty string if // <username> is numerical, remove them from the map for testing purpose. BiMap<Integer, String> uidNameMap = refIdMapping.getUidNameMap(); BiMap<Integer, String> gidNameMap = refIdMapping.getGidNameMap(); // Force empty map, to see effect of incremental map update of calling // getUserName() incrIdMapping.clearNameMaps(); uidNameMap = refIdMapping.getUidNameMap(); for (BiMap.Entry<Integer, String> me : uidNameMap.entrySet()) { Integer id = me.getKey(); String name = me.getValue(); String tname = incrIdMapping.getUserName(id, null); assertEquals(name, tname); } assertEquals(uidNameMap.size(), incrIdMapping.getUidNameMap().size()); // Force empty map, to see effect of incremental map update of calling // getUid() incrIdMapping.clearNameMaps(); for (BiMap.Entry<Integer, String> me : uidNameMap.entrySet()) { Integer id = me.getKey(); String name = me.getValue(); Integer tid = incrIdMapping.getUid(name); assertEquals(id, tid); } assertEquals(uidNameMap.size(), incrIdMapping.getUidNameMap().size()); // Force empty map, to see effect of incremental map update of calling // getGroupName() incrIdMapping.clearNameMaps(); gidNameMap = refIdMapping.getGidNameMap(); for (BiMap.Entry<Integer, String> me : gidNameMap.entrySet()) { Integer id = me.getKey(); String name = me.getValue(); String tname = incrIdMapping.getGroupName(id, null); assertEquals(name, tname); } assertEquals(gidNameMap.size(), incrIdMapping.getGidNameMap().size()); // Force empty map, to see effect of incremental map update of calling // getGid() incrIdMapping.clearNameMaps(); gidNameMap = refIdMapping.getGidNameMap(); for (BiMap.Entry<Integer, String> me : gidNameMap.entrySet()) { Integer id = me.getKey(); String name = me.getValue(); Integer tid = incrIdMapping.getGid(name); assertEquals(id, tid); } assertEquals(gidNameMap.size(), incrIdMapping.getGidNameMap().size()); }
public static long sizeOf(Path path) throws IOException { SizeVisitor visitor = new SizeVisitor(); Files.walkFileTree(path, visitor); return visitor.size; }
@Test public void sizeOf_sums_sizes_of_all_files_in_directory() throws IOException { File dir = temporaryFolder.newFolder(); File child = new File(dir, "child.txt"); File grandChild1 = new File(dir, "grand/child1.txt"); File grandChild2 = new File(dir, "grand/child2.txt"); FileUtils.write(child, "foo", UTF_8); FileUtils.write(grandChild1, "bar", UTF_8); FileUtils.write(grandChild2, "baz", UTF_8); long childSize = FileUtils2.sizeOf(child.toPath()); assertThat(childSize).isPositive(); long grandChild1Size = FileUtils2.sizeOf(grandChild1.toPath()); assertThat(grandChild1Size).isPositive(); long grandChild2Size = FileUtils2.sizeOf(grandChild2.toPath()); assertThat(grandChild2Size).isPositive(); assertThat(FileUtils2.sizeOf(dir.toPath())) .isEqualTo(childSize + grandChild1Size + grandChild2Size); // sanity check by comparing commons-io assertThat(FileUtils2.sizeOf(dir.toPath())) .isEqualTo(FileUtils.sizeOfDirectory(dir)); }
@Override public void checkAuthorization( final KsqlSecurityContext securityContext, final MetaStore metaStore, final Statement statement ) { if (statement instanceof Query) { validateQuery(securityContext, metaStore, (Query)statement); } else if (statement instanceof InsertInto) { validateInsertInto(securityContext, metaStore, (InsertInto)statement); } else if (statement instanceof CreateAsSelect) { validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement); } else if (statement instanceof PrintTopic) { validatePrintTopic(securityContext, (PrintTopic)statement); } else if (statement instanceof CreateSource) { validateCreateSource(securityContext, (CreateSource)statement); } }
@Test public void shouldNotThrowWhenCreateAsSelectOnNewTopicWithoutValueSchemaInferenceFormats() { // Given: givenSubjectAccessDenied("topic-value", AclOperation.WRITE); final Statement statement = givenStatement(String.format( "CREATE STREAM newStream WITH (kafka_topic='topic', value_format='DELIMITED') " + "AS SELECT * FROM %s;", KAFKA_STREAM_TOPIC) ); // Then/When: authorizationValidator.checkAuthorization(securityContext, metaStore, statement); }
public void analyze(Column baseCol, boolean dropField) throws AnalysisException { if (baseCol == null) { throw new AnalysisException(String.format("Analyze add/drop field failed, modify column is not exist")); } Type targetFieldType = baseCol.getType(); if (!checkType(targetFieldType)) { throw new AnalysisException( String.format("column %s type %s is not Struct", baseCol.getName(), targetFieldType.toString())); } if (nestedParentFieldNames != null && !nestedParentFieldNames.isEmpty()) { for (String name : nestedParentFieldNames) { targetFieldType = getFieldType(targetFieldType, name); if (targetFieldType == null) { throw new AnalysisException( String.format("No field %s exist in column %s", name, baseCol.getName())); } if (!checkType(targetFieldType)) { throw new AnalysisException( String.format("Field %s type %s is not valid", name, targetFieldType.toString())); } } } if (!targetFieldType.isStructType()) { throw new AnalysisException("Target Field is not struct"); } StructField childField = ((StructType) targetFieldType).getField(fieldName); if (dropField) { if (childField == null) { throw new AnalysisException(String.format("Drop field %s is not found", fieldName)); } } else { if (childField != null) { throw new AnalysisException(String.format("Field %s is already exist", fieldName)); } if (typeDef == null) { throw new AnalysisException("No filed type in field definition"); } if (typeDef.getType().isScalarType()) { final ScalarType targetType = (ScalarType) typeDef.getType(); if (targetType.getPrimitiveType().isStringType()) { if (targetType.getLength() <= 0) { targetType.setLength(1); } } } typeDef.analyze(); if (fieldPos != null) { fieldPos.analyze(); } } }
@Test public void testAnalyze() { StructField field1 = new StructField("v1", Type.INT); StructField field2 = new StructField("v2", Type.VARCHAR); StructField field3 = new StructField("v3", Type.INT); StructType subStructType = new StructType(Lists.newArrayList(field2, field3)); StructField field4 = new StructField("v4", subStructType); StructType type = new StructType(Lists.newArrayList(field1, field4)); Column structCol1 = new Column("structCol1", type); Type addType = ScalarType.createType(PrimitiveType.INT); TypeDef addTypeDef = new TypeDef(addType); Column intCol1 = new Column("intCol1", addType); StructFieldDesc dropFieldDesc1 = new StructFieldDesc("v2", Lists.newArrayList("v1"), null, null); // base column not exist; Assertions.assertThrows(AnalysisException.class, () -> dropFieldDesc1.analyze(null, true)); // base column is not struct column Assertions.assertThrows(AnalysisException.class, () -> dropFieldDesc1.analyze(intCol1, true)); // nested field is not struct Assertions.assertThrows(AnalysisException.class, () -> dropFieldDesc1.analyze(structCol1, true)); // drop field is not exist StructFieldDesc dropFieldDesc2 = new StructFieldDesc("v1", Lists.newArrayList("v6"), null, null); Assertions.assertThrows(AnalysisException.class, () -> dropFieldDesc2.analyze(structCol1, true)); // normal drop field StructFieldDesc dropFieldDesc3 = new StructFieldDesc("v2", Lists.newArrayList("v4"), null, null); Assertions.assertDoesNotThrow(() -> dropFieldDesc3.analyze(structCol1, true)); // add exist field StructFieldDesc addFieldDesc1 = new StructFieldDesc("v2", Lists.newArrayList("v4"), addTypeDef, null); Assertions.assertThrows(AnalysisException.class, () -> addFieldDesc1.analyze(structCol1, false)); // type not exist StructFieldDesc addFieldDesc2 = new StructFieldDesc("v5", Lists.newArrayList("v6"), null, null); Assertions.assertThrows(AnalysisException.class, () -> addFieldDesc2.analyze(structCol1, false)); // normal add field StructFieldDesc addFieldDesc3 = new StructFieldDesc("v5", Lists.newArrayList("v4"), addTypeDef, null); Assertions.assertDoesNotThrow(() -> addFieldDesc3.analyze(structCol1, false)); }
@Override public int compareTo(DocumentPath that) { return Comparators.lexicographical(Comparator.<String>naturalOrder()) .compare(this.pathElements, that.pathElements); }
@SuppressWarnings("SelfComparison") @Test public void comparePaths() { DocumentPath one = path("root"); DocumentPath four = path("root.a.b.c.d"); DocumentPath difFour = path("root.e.c.b.a"); assertThat(one.compareTo(four), is(lessThan(0))); assertThat(four.compareTo(one), is(greaterThan(0))); assertThat(difFour.compareTo(four), is(greaterThan(0))); assertThat(difFour.compareTo(difFour), is(equalTo(0))); }
@Override public byte[] getFileContent(Long configId, String path) throws Exception { FileClient client = fileConfigService.getFileClient(configId); Assert.notNull(client, "客户端({}) 不能为空", configId); return client.getContent(path); }
@Test public void testGetFileContent() throws Exception { // 准备参数 Long configId = 10L; String path = "tudou.jpg"; // mock 方法 FileClient client = mock(FileClient.class); when(fileConfigService.getFileClient(eq(10L))).thenReturn(client); byte[] content = new byte[]{}; when(client.getContent(eq("tudou.jpg"))).thenReturn(content); // 调用 byte[] result = fileService.getFileContent(configId, path); // 断言 assertSame(result, content); }
public static Collection<HazelcastInstance> getAllHazelcastClients() { Set<HazelcastInstance> result = createHashSet(CLIENTS.size()); for (InstanceFuture<HazelcastClientProxy> f : CLIENTS.values()) { result.add(f.get()); } return Collections.unmodifiableCollection(result); }
@Test public void testGetAllHazelcastClients() { HazelcastInstance client1 = HazelcastClient.newHazelcastClient(new ClientConfig() .setInstanceName(randomString())); HazelcastInstance client2 = HazelcastClient.getOrCreateHazelcastClient(new ClientConfig() .setInstanceName(randomString())); Collection<HazelcastInstance> clients = HazelcastClient.getAllHazelcastClients(); assertEquals(2, clients.size()); assertContains(clients, client1); assertContains(clients, client2); }
@Override public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { LOG.debug("Dropping catalog {}", catalogName); boolean committed = false; try { openTransaction(); MCatalog mCat = getMCatalog(catalogName); pm.retrieve(mCat); if (mCat == null) { throw new NoSuchObjectException("No catalog " + catalogName); } pm.deletePersistent(mCat); committed = commitTransaction(); } finally { if (!committed) { rollbackTransaction(); } } }
@Test(expected = NoSuchObjectException.class) public void dropNoSuchCatalog() throws MetaException, NoSuchObjectException { objectStore.dropCatalog("no_such_catalog"); }
static void addClusterToMirrorMaker2ConnectorConfig(Map<String, Object> config, KafkaMirrorMaker2ClusterSpec cluster, String configPrefix) { config.put(configPrefix + "alias", cluster.getAlias()); config.put(configPrefix + AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); String securityProtocol = addTLSConfigToMirrorMaker2ConnectorConfig(config, cluster, configPrefix); if (cluster.getAuthentication() != null) { if (cluster.getAuthentication() instanceof KafkaClientAuthenticationTls) { config.put(configPrefix + SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PKCS12"); config.put(configPrefix + SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, STORE_LOCATION_ROOT + cluster.getAlias() + KEYSTORE_SUFFIX); config.put(configPrefix + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "${file:" + CONNECTORS_CONFIG_FILE + ":" + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG + "}"); } else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationPlain plainAuthentication) { securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT"; config.put(configPrefix + SaslConfigs.SASL_MECHANISM, "PLAIN"); config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG, AuthenticationUtils.jaasConfig("org.apache.kafka.common.security.plain.PlainLoginModule", Map.of("username", plainAuthentication.getUsername(), "password", "${file:" + CONNECTORS_CONFIG_FILE + ":" + cluster.getAlias() + ".sasl.password}"))); } else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationScram scramAuthentication) { securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT"; config.put(configPrefix + SaslConfigs.SASL_MECHANISM, scramAuthentication instanceof KafkaClientAuthenticationScramSha256 ? "SCRAM-SHA-256" : "SCRAM-SHA-512"); config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG, AuthenticationUtils.jaasConfig("org.apache.kafka.common.security.scram.ScramLoginModule", Map.of("username", scramAuthentication.getUsername(), "password", "${file:" + CONNECTORS_CONFIG_FILE + ":" + cluster.getAlias() + ".sasl.password}"))); } else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationOAuth oauthAuthentication) { securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT"; config.put(configPrefix + SaslConfigs.SASL_MECHANISM, "OAUTHBEARER"); config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG, oauthJaasConfig(cluster, oauthAuthentication)); config.put(configPrefix + SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler"); } } // Security protocol config.put(configPrefix + AdminClientConfig.SECURITY_PROTOCOL_CONFIG, securityProtocol); config.putAll(cluster.getConfig().entrySet().stream() .collect(Collectors.toMap(entry -> configPrefix + entry.getKey(), Map.Entry::getValue))); config.putAll(cluster.getAdditionalProperties()); }
@Test public void testAddClusterToMirrorMaker2ConnectorConfigWithoutAuth() { Map<String, Object> config = new HashMap<>(); KafkaMirrorMaker2ClusterSpec cluster = new KafkaMirrorMaker2ClusterSpecBuilder() .withAlias("sourceClusterAlias") .withBootstrapServers("sourceClusterAlias.sourceNamespace.svc:9092") .build(); KafkaMirrorMaker2Connectors.addClusterToMirrorMaker2ConnectorConfig(config, cluster, PREFIX); assertThat(new TreeMap<>(config), is(new TreeMap<>(Map.of("prefix.alias", "sourceClusterAlias", "prefix.security.protocol", "PLAINTEXT", "prefix.bootstrap.servers", "sourceClusterAlias.sourceNamespace.svc:9092")))); }
@Override @SuppressWarnings("CallToSystemGC") public void execute(Map<String, List<String>> parameters, PrintWriter output) { final int count = parseRuns(parameters); for (int i = 0; i < count; i++) { output.println("Running GC..."); output.flush(); runtime.gc(); } output.println("Done!"); }
@Test void usesTheFirstRunsParameter() throws Exception { final Map<String, List<String>> parameters = Collections.singletonMap("runs", Arrays.asList("3", "2")); task.execute(parameters, output); verify(runtime, times(3)).gc(); }
@Override public <T> Exporter<T> export(Invoker<T> invoker) throws RpcException { startQosServer(invoker.getUrl(), true); return protocol.export(invoker); }
@Test void testMultiProtocol() throws Exception { // tri protocol start first, acceptForeignIp = true triWrapper.export(triInvoker); assertThat(server.isStarted(), is(true)); assertThat(server.getHost(), is("localhost")); assertThat(server.getPort(), is(12345)); assertThat(server.isAcceptForeignIp(), is(true)); verify(triProtocol).export(triInvoker); // next registry protocol server still acceptForeignIp=true even though wrapper invoker url set false wrapper.export(invoker); assertThat(server.isStarted(), is(true)); assertThat(server.getHost(), is("localhost")); assertThat(server.getPort(), is(12345)); assertThat(server.isAcceptForeignIp(), is(true)); verify(protocol).export(invoker); }
public ClusterNode getNode(String clusterId, UUID nodeId) { CqlSession session = cqlSessionFactory.get(clusterId); Map<UUID, Node> nodes = session.getMetadata().getNodes(); Node node = nodes.get(nodeId); if (node == null) { throw new ClusterNodeNotFoundException(String.format("not exists node(%s)", nodeId)); } return ClusterNode.from(node); }
@Test void not_exists_node_in_cluster() { // when & then assertThatThrownBy(() -> clusterNodeGetCommander.getNode(CLUSTER_ID, UUID.randomUUID())) .isInstanceOf(ClusterNodeNotFoundException.class); }
public static <T> Map<String, T> translateDeprecatedConfigs(Map<String, T> configs, String[][] aliasGroups) { return translateDeprecatedConfigs(configs, Stream.of(aliasGroups) .collect(Collectors.toMap(x -> x[0], x -> Stream.of(x).skip(1).collect(Collectors.toList())))); }
@Test public void testNullMapEntriesWithoutAliasesDoNotThrowNPE() { Map<String, String> config = new HashMap<>(); config.put("other", null); Map<String, String> newConfig = ConfigUtils.translateDeprecatedConfigs(config, new String[][]{ {"foo.bar", "foo.bar.deprecated"} }); assertNotNull(newConfig); assertNull(newConfig.get("other")); }
static boolean fieldMatchCaseInsensitive(Object repoObj, Object filterObj) { return fieldMatch(repoObj, filterObj) || compareIgnoreCaseOnlyIfStringType(repoObj, filterObj); }
@Test public void testFieldMatchWithEqualObjectsIgnoreCaseShouldReturnTrue() { assertTrue(Utilities.fieldMatchCaseInsensitive("repoObject", "REPOOBJECT")); }
@Override public MultiPointToSinglePointIntent decode(ObjectNode json, CodecContext context) { MultiPointToSinglePointIntent.Builder builder = MultiPointToSinglePointIntent.builder(); IntentCodec.intentAttributes(json, context, builder); ConnectivityIntentCodec.intentAttributes(json, context, builder); ArrayNode ingressJson = nullIsIllegal((ArrayNode) json.get(INGRESS_POINT), INGRESS_POINT + IntentCodec.MISSING_MEMBER_MESSAGE); if (ingressJson != null) { final JsonCodec<ConnectPoint> connectPointCodec = context.codec(ConnectPoint.class); JsonNode connectPointsJson = json.get(INGRESS_POINT); Set<FilteredConnectPoint> ingressCp = new HashSet<>(); if (connectPointsJson != null) { for (int i = 0; i < connectPointsJson.size(); i++) { ingressCp.add(new FilteredConnectPoint(connectPointCodec.decode(get(connectPointsJson, i), context))); } builder.filteredIngressPoints(ingressCp); } } ObjectNode egressJson = nullIsIllegal(get(json, EGRESS_POINT), EGRESS_POINT + IntentCodec.MISSING_MEMBER_MESSAGE); ConnectPoint egress = context.codec(ConnectPoint.class) .decode(egressJson, context); builder.filteredEgressPoint(new FilteredConnectPoint(egress)); return builder.build(); }
@Test public void decodeMultiPointToSinglePointIntent() throws IOException { final JsonNodeFactory nodeFactory = JsonNodeFactory.instance; ObjectNode json = nodeFactory.objectNode(); json.put("type", "MultiPointToSinglePointIntent"); json.put("id", "0x0"); json.put("appId", "foo"); json.put("priority", 100); ArrayNode ingress = nodeFactory.arrayNode(); ObjectNode ingressPoint = nodeFactory.objectNode(); ingressPoint.put("port", "3"); ingressPoint.put("device", "333"); ingress.add(ingressPoint); ObjectNode ingressPoint2 = nodeFactory.objectNode(); ingressPoint2.put("port", "1"); ingressPoint2.put("device", "111"); ingress.add(ingressPoint2); json.set("ingressPoint", ingress); ObjectNode egressPoint = nodeFactory.objectNode(); egressPoint.put("port", "2"); egressPoint.put("device", "222"); json.set("egressPoint", egressPoint); assertThat(json, notNullValue()); JsonCodec<MultiPointToSinglePointIntent> intentCodec = context.codec(MultiPointToSinglePointIntent.class); assertThat(intentCodec, notNullValue()); final MultiPointToSinglePointIntent intent = intentCodec.decode(json, context); assertThat(intent.toString(), notNullValue()); assertThat(intent, instanceOf(MultiPointToSinglePointIntent.class)); assertThat(intent.priority(), is(100)); assertThat(intent.ingressPoints().toString(), is("[333/3, 111/1]")); assertThat(intent.egressPoint().toString(), is("222/2")); }
@Override public void update(Component component, Metric metric, Measure measure) { requireNonNull(component); checkValueTypeConsistency(metric, measure); Optional<Measure> existingMeasure = find(component, metric); if (!existingMeasure.isPresent()) { throw new UnsupportedOperationException( format( "a measure can be updated only if one already exists for a specific Component (key=%s), Metric (key=%s). Use add method", component.getKey(), metric.getKey())); } add(component, metric, measure, OverridePolicy.OVERRIDE); }
@Test public void update_throws_NPE_if_Component_metric_is_null() { assertThatThrownBy(() -> underTest.update(FILE_COMPONENT, null, SOME_MEASURE)) .isInstanceOf(NullPointerException.class); }
@Override public void build(final DefaultGoPublisher publisher, final EnvironmentVariableContext environmentVariableContext, TaskExtension taskExtension, ArtifactExtension artifactExtension, PluginRequestProcessorRegistry pluginRequestProcessorRegistry, Charset consoleLogCharset) { ExecutionResult executionResult = null; try { executionResult = taskExtension.execute(pluginId, (task, pluginDescriptor) -> executeTask(task, publisher, environmentVariableContext, consoleLogCharset)); } catch (Exception e) { logException(publisher, e); } finally { JobConsoleLoggerInternal.unsetContext(); } if (executionResult == null) { logError(publisher, "ExecutionResult cannot be null. Please return a success or a failure response."); } else if (!executionResult.isSuccessful()) { logError(publisher, executionResult.getMessagesForDisplay()); } }
@Test public void shouldPublishErrorMessageIfPluginReturnsAFailureResponse() { PluggableTaskBuilder taskBuilder = new PluggableTaskBuilder(runIfConfigs, cancelBuilder, pluggableTask, TEST_PLUGIN_ID, "test-directory") { @Override protected ExecutionResult executeTask(Task task, DefaultGoPublisher publisher, EnvironmentVariableContext environmentVariableContext, Charset consoleLogCharset) { return ExecutionResult.failure("err"); } }; assertThatThrownBy(() -> taskBuilder.build(goPublisher, variableContext, taskExtension, null, null, UTF_8)) .isInstanceOf(RuntimeException.class) .hasMessage("err"); ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class); verify(goPublisher).taggedConsumeLine(eq(DefaultGoPublisher.ERR), captor.capture()); assertThat(captor.getValue()).isEqualTo("err"); }
public KsqlConfig getConfig(final boolean withOverridesApplied) { return withOverridesApplied ? systemConfig.cloneWithPropertyOverwrite(overrides) : systemConfig; }
@Test public void shouldGetConfigWithoutOverrides() { // When: final KsqlConfig result = config.getConfig(false); // Then: assertThat(result, is(sameInstance(systemConfig))); }
@VisibleForTesting Properties getTemplateBindings(String userName) { Properties k8sProperties = new Properties(); // k8s template properties k8sProperties.put("zeppelin.k8s.interpreter.user", String.valueOf(userName).trim()); k8sProperties.put("zeppelin.k8s.interpreter.namespace", getInterpreterNamespace()); k8sProperties.put("zeppelin.k8s.interpreter.pod.name", getPodName()); k8sProperties.put("zeppelin.k8s.interpreter.serviceAccount", getServiceAccount()); k8sProperties.put("zeppelin.k8s.interpreter.container.name", interpreterGroupName.toLowerCase()); k8sProperties.put("zeppelin.k8s.interpreter.container.image", containerImage); k8sProperties.put("zeppelin.k8s.interpreter.group.id", getInterpreterGroupId()); k8sProperties.put("zeppelin.k8s.interpreter.group.name", interpreterGroupName); k8sProperties.put("zeppelin.k8s.interpreter.setting.name", getInterpreterSettingName()); k8sProperties.put("zeppelin.k8s.interpreter.localRepo", getLocalRepoDir()); k8sProperties.put("zeppelin.k8s.interpreter.rpc.portRange", getInterpreterPortRange()); k8sProperties.put("zeppelin.k8s.server.rpc.service", intpEventServerHost); k8sProperties.put("zeppelin.k8s.server.rpc.portRange", intpEventServerPort); String serverNamespace = K8sUtils.getCurrentK8sNamespace(); String interpreterNamespace = getInterpreterNamespace(); //Set the owner reference (zeppelin-server pod) for garbage collection when zeppelin server and the zeppelin interpreter is in the same namespace (Kubernetes cannot specify an owner in different namespace). if (ownerUID() != null && ownerName() != null && StringUtils.equals(serverNamespace, interpreterNamespace)) { k8sProperties.put("zeppelin.k8s.server.uid", ownerUID()); k8sProperties.put("zeppelin.k8s.server.pod.name", ownerName()); } Map<String, String> k8sEnv = new HashMap<>(getEnv()); // environment variables k8sEnv.put(ENV_SERVICE_DOMAIN, getEnv().getOrDefault(ENV_SERVICE_DOMAIN, System.getenv(ENV_SERVICE_DOMAIN) == null ? "local.zeppelin-project.org" : System.getenv(ENV_SERVICE_DOMAIN))); k8sEnv.put(ENV_ZEPPELIN_HOME, getEnv().getOrDefault(ENV_ZEPPELIN_HOME, System.getenv(ENV_ZEPPELIN_HOME))); if (isSpark()) { int webUiPort = 4040; k8sProperties.put("zeppelin.k8s.spark.container.image", sparkImage); // There is already initial value following --driver-java-options added in interpreter.sh // so we need to pass spark.driver.defaultJavaOptions and spark.driver.extraJavaOptions // as SPARK_DRIVER_EXTRAJAVAOPTIONS_CONF env variable to build spark-submit command correctly. StringJoiner driverExtraJavaOpts = new StringJoiner(" "); if (properties.containsKey(SPARK_DRIVER_DEFAULTJAVAOPTS)) { driverExtraJavaOpts.add((String) properties.remove(SPARK_DRIVER_DEFAULTJAVAOPTS)); } if (properties.containsKey(SPARK_DRIVER_EXTRAJAVAOPTS)) { driverExtraJavaOpts.add((String) properties.remove(SPARK_DRIVER_EXTRAJAVAOPTS)); } if (driverExtraJavaOpts.length() > 0) { k8sEnv.put("SPARK_DRIVER_EXTRAJAVAOPTIONS_CONF", driverExtraJavaOpts.toString()); } if (isSparkOnKubernetes(properties)) { addSparkK8sProperties(); k8sEnv.put("ZEPPELIN_SPARK_CONF", prepareZeppelinSparkConf(userName)); } k8sEnv.put("SPARK_HOME", getEnv().getOrDefault("SPARK_HOME", "/spark")); // configure interpreter property "zeppelin.spark.uiWebUrl" if not defined, to enable spark ui through reverse proxy String webUrl = (String) properties.get("zeppelin.spark.uiWebUrl"); if (StringUtils.isBlank(webUrl)) { webUrl = "//{{PORT}}-{{SERVICE_NAME}}.{{SERVICE_DOMAIN}}"; } properties.put("zeppelin.spark.uiWebUrl", sparkUiWebUrlFromTemplate( webUrl, webUiPort, getPodName(), k8sEnv.get(ENV_SERVICE_DOMAIN) )); // configure interpreter property "zeppelin.k8s.spark.ingress.host" if not defined, to enable spark ui through ingress String ingressHost = (String) properties.get("zeppelin.k8s.spark.ingress.host"); if (StringUtils.isBlank(ingressHost)) { ingressHost = "{{PORT}}-{{SERVICE_NAME}}.{{SERVICE_DOMAIN}}"; } properties.put("zeppelin.k8s.spark.ingress.host", sparkUiWebUrlFromTemplate( ingressHost, webUiPort, getPodName(), k8sEnv.get(ENV_SERVICE_DOMAIN) )); // Resources of Interpreter Pod if (properties.containsKey(SPARK_DRIVER_MEMORY)) { String memory; if (properties.containsKey(SPARK_DRIVER_MEMORY_OVERHEAD)) { memory = K8sUtils.calculateSparkMemory(properties.getProperty(SPARK_DRIVER_MEMORY), properties.getProperty(SPARK_DRIVER_MEMORY_OVERHEAD)); } else { memory = K8sUtils.calculateMemoryWithDefaultOverhead(properties.getProperty(SPARK_DRIVER_MEMORY)); } k8sProperties.put("zeppelin.k8s.interpreter.memory", memory); } if (properties.containsKey(SPARK_DRIVER_CORES)) { k8sProperties.put("zeppelin.k8s.interpreter.cores", properties.getProperty(SPARK_DRIVER_CORES)); } } k8sProperties.put("zeppelin.k8s.envs", k8sEnv); // interpreter properties overrides the values k8sProperties.putAll(Maps.fromProperties(properties)); return k8sProperties; }
@Test void testSparkPodResources() { // given Properties properties = new Properties(); properties.put("spark.driver.memory", "1g"); properties.put("spark.driver.cores", "1"); Map<String, String> envs = new HashMap<>(); envs.put("SERVICE_DOMAIN", "mydomain"); K8sRemoteInterpreterProcess intp = new K8sRemoteInterpreterProcess( client, "default", new File(".skip"), "interpreter-container:1.0", "shared_process", "spark", "myspark", properties, envs, "zeppelin.server.service", 12320, false, "spark-container:1.0", 10, 10, false, false); // when Properties p = intp.getTemplateBindings(null); // then assertEquals("1", p.get("zeppelin.k8s.interpreter.cores")); assertEquals("1408Mi", p.get("zeppelin.k8s.interpreter.memory")); intp.close(); }
@VisibleForTesting protected void handleException( String message, Exception exception, StringBuilder text, StringBuilder details ) { if ( exception instanceof KettleException ) { // Normal error KettleException ke = (KettleException) exception; Throwable cause = ke.getCause(); if ( cause != null && cause.getMessage() != null ) { text.append( cause.getMessage() ); } else { text.append( ke.getMessage() ); } } else if ( exception instanceof InvocationTargetException ) { // Error from somewhere else, what is the cause? Throwable cause = exception.getCause(); if ( cause instanceof KettleException ) { KettleException ke = (KettleException) cause; text.append( ke.getMessage() ); } else { text.append( Const.NVL( cause.getMessage(), cause.toString() ) ); while ( text.length() == 0 && cause != null ) { cause = cause.getCause(); if ( cause != null ) { text.append( Const.NVL( cause.getMessage(), cause.toString() ) ); } } } } else { // Error from somewhere else... if ( exception.getMessage() == null ) { text.append( message ); } else { text.append( exception.getMessage() ); } } StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter( sw ); exception.printStackTrace( pw ); details.append( sw.getBuffer() ); }
@Test public void setErrorTextWithNoCauseException() { Exception e = new KettleException( "kettleMessage" ); StringBuilder text = new StringBuilder(); StringBuilder details = new StringBuilder(); ErrorDialog dialog = mock( ErrorDialog.class ); doCallRealMethod().when( dialog ).handleException( anyString(), any( Exception.class ), any( StringBuilder.class ), any( StringBuilder.class ) ); dialog.handleException( "argMessage", e, text, details ); assertEquals( text.toString(), e.getMessage().toString() ); }
@Override public Collection<SQLToken> generateSQLTokens(final InsertStatementContext insertStatementContext) { Optional<InsertColumnsSegment> insertColumnsSegment = insertStatementContext.getSqlStatement().getInsertColumns(); Preconditions.checkState(insertColumnsSegment.isPresent()); Collection<ColumnSegment> insertColumns = insertColumnsSegment.get().getColumns(); if (null != insertStatementContext.getInsertSelectContext()) { Collection<Projection> projections = insertStatementContext.getInsertSelectContext().getSelectStatementContext().getProjectionsContext().getExpandProjections(); ShardingSpherePreconditions.checkState(insertColumns.size() == projections.size(), () -> new UnsupportedSQLOperationException("Column count doesn't match value count.")); ShardingSpherePreconditions.checkState(InsertSelectColumnsEncryptorComparator.isSame(insertColumns, projections, encryptRule), () -> new UnsupportedSQLOperationException("Can not use different encryptor in insert select columns")); } EncryptTable encryptTable = encryptRule.getEncryptTable(insertStatementContext.getSqlStatement().getTable().getTableName().getIdentifier().getValue()); Collection<SQLToken> result = new LinkedList<>(); for (ColumnSegment each : insertColumns) { String columnName = each.getIdentifier().getValue(); if (encryptTable.isEncryptColumn(columnName)) { Collection<Projection> projections = Collections.singleton(new ColumnProjection(null, encryptTable.getEncryptColumn(columnName).getCipher().getName(), null, insertStatementContext.getDatabaseType())); result.add(new SubstitutableColumnNameToken(each.getStartIndex(), each.getStopIndex(), projections, insertStatementContext.getDatabaseType())); } } return result; }
@Test void assertGenerateSQLTokensWhenInsertColumnsUseDifferentEncryptorWithSelectProjection() { assertThrows(UnsupportedSQLOperationException.class, () -> generator.generateSQLTokens(EncryptGeneratorFixtureBuilder.createInsertSelectStatementContext(Collections.emptyList(), true))); }
public GammaDistribution(double shape, double scale) { if (shape <= 0) { throw new IllegalArgumentException("Invalid shape: " + shape); } if (scale <= 0) { throw new IllegalArgumentException("Invalid scale: " + scale); } theta = scale; k = shape; logTheta = Math.log(theta); thetaGammaK = theta * Gamma.gamma(k); logGammaK = Gamma.lgamma(k); entropy = k + Math.log(theta) + Gamma.lgamma(k) + (1 - k) * Gamma.digamma(k); }
@Test public void testGammaDistribution() { System.out.println("GammaDistribution"); MathEx.setSeed(19650218); // to get repeatable results. GammaDistribution instance = new GammaDistribution(3, 2.1); double[] data = instance.rand(1000); GammaDistribution est = GammaDistribution.fit(data); assertEquals(2.09, est.theta, 1E-2); assertEquals(2.95, est.k, 1E-2); }
@Override public void addPermits(int permits) { get(addPermitsAsync(permits)); }
@Test public void testAddPermits() throws InterruptedException { RSemaphore s = redisson.getSemaphore("test"); s.trySetPermits(10); s.acquire(10); assertThat(s.availablePermits()).isEqualTo(0); s.addPermits(4); assertThat(s.availablePermits()).isEqualTo(4); s.release(10); assertThat(s.availablePermits()).isEqualTo(14); s.acquire(5); assertThat(s.availablePermits()).isEqualTo(9); }
public static String formatTimeSortable(long timeDiff) { StringBuilder buf = new StringBuilder(); long hours = timeDiff / (60 * 60 * 1000); long rem = (timeDiff % (60 * 60 * 1000)); long minutes = rem / (60 * 1000); rem = rem % (60 * 1000); long seconds = rem / 1000; // if hours is more than 99 hours, it will be set a max value format if (hours > 99) { hours = 99; minutes = 59; seconds = 59; } buf.append(String.format("%02d", hours)); buf.append("hrs, "); buf.append(String.format("%02d", minutes)); buf.append("mins, "); buf.append(String.format("%02d", seconds)); buf.append("sec"); return buf.toString(); }
@Test public void testFormatTimeSortable() { long timeDiff = 523452311; String timeDiffStr = "99hrs, 59mins, 59sec"; assertEquals("Incorrect time diff string returned", timeDiffStr, StringUtils.formatTimeSortable(timeDiff)); }
@Override public boolean isSubTypeOf(Class<?> ancestor) { checkNotNull(ancestor); return id.isSubTypeOf(ancestor); }
@Test public void testSubTypeOfObject() { ContinuousResource continuous = Resources.continuous(D1, P1, Bandwidth.class) .resource(BW1.bps()); assertThat(continuous.isSubTypeOf(Object.class), is(true)); }
@Override public void to(final String topic) { to(topic, Produced.with(keySerde, valueSerde, null)); }
@Test public void shouldNotAllowNullTopicOnTo() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.to((String) null)); assertThat(exception.getMessage(), equalTo("topic can't be null")); }