focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public boolean isAbilitySupportedByServer(AbilityKey abilityKey) { return rpcClient.getConnectionAbility(abilityKey) == AbilityStatus.SUPPORTED; }
@Test void testIsAbilitySupportedByServer4() { when(this.rpcClient.getConnectionAbility(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC)).thenReturn( null); assertFalse(client.isAbilitySupportedByServer(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC)); verify(this.rpcClient, times(1)).getConnectionAbility(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC); }
public static RowCoder of(Schema schema) { return new RowCoder(schema); }
@Test(expected = NonDeterministicException.class) public void testVerifyDeterministicNestedRow() throws NonDeterministicException { Schema schema = Schema.builder() .addField( "f1", FieldType.row( Schema.builder() .addField("a1", FieldType.DOUBLE) .addField("a2", FieldType.INT64) .build())) .build(); RowCoder coder = RowCoder.of(schema); coder.verifyDeterministic(); }
public static UTypeCast create(UTree<?> type, UExpression expression) { return new AutoValue_UTypeCast(type, expression); }
@Test public void equality() { new EqualsTester() .addEqualityGroup(UTypeCast.create(UPrimitiveTypeTree.BYTE, ULiteral.intLit(100))) .addEqualityGroup(UTypeCast.create(UPrimitiveTypeTree.BYTE, ULiteral.intLit(150))) .addEqualityGroup(UTypeCast.create(UPrimitiveTypeTree.CHAR, ULiteral.intLit(100))) .testEquals(); }
public static boolean isSupportPrometheus() { return isClassPresent("io.micrometer.prometheus.PrometheusConfig") && isClassPresent("io.prometheus.client.exporter.BasicAuthHttpConnectionFactory") && isClassPresent("io.prometheus.client.exporter.HttpConnectionFactory") && isClassPresent("io.prometheus.client.exporter.PushGateway"); }
@Test void isImportPrometheus() { MetricsConfig metricsConfig = new MetricsConfig(); metricsConfig.setProtocol("prometheus"); boolean importPrometheus = PROTOCOL_PROMETHEUS.equals(metricsConfig.getProtocol()) && !DefaultApplicationDeployer.isSupportPrometheus(); Assert.assertTrue(!importPrometheus, " should return false"); }
@Override public int getDefaultDatabasePort() { if ( getAccessType() == DatabaseMeta.TYPE_ACCESS_NATIVE ) { return 5439; } return -1; }
@Test public void testGetDefaultDatabasePort() throws Exception { assertEquals( 5439, dbMeta.getDefaultDatabasePort() ); dbMeta.setAccessType( DatabaseMeta.TYPE_ACCESS_JNDI ); assertEquals( -1, dbMeta.getDefaultDatabasePort() ); }
@Udf public <T> Boolean contains( @UdfParameter final String jsonArray, @UdfParameter final T val ) { try (JsonParser parser = PARSER_FACTORY.createParser(jsonArray)) { if (parser.nextToken() != START_ARRAY) { return false; } while (parser.nextToken() != null) { final JsonToken token = parser.currentToken(); if (token == null) { return val == null; } else if (token == END_ARRAY) { return false; } parser.skipChildren(); if (TOKEN_COMPAT.getOrDefault(token, foo -> false).test(val)) { if (token == VALUE_NULL || (val != null && Objects.equals(parser.readValueAs(val.getClass()), val))) { return true; } } } return false; } catch (final IOException e) { return false; } }
@Test public void shouldFindStringsInJsonArray() { assertEquals(true, jsonUdf.contains("[\"abc\"]", "abc")); assertEquals(true, jsonUdf.contains("[\"cbda\", \"abc\"]", "abc")); assertEquals(true, jsonUdf.contains("[{}, \"abc\", null, 1]", "abc")); assertEquals(true, jsonUdf.contains("[\"\"]", "")); assertEquals(false, jsonUdf.contains("[\"\"]", null)); assertEquals(false, jsonUdf.contains("[1,2,3]", "1")); assertEquals(false, jsonUdf.contains("[null]", "")); assertEquals(false, jsonUdf.contains("[\"abc\", \"dba\"]", "abd")); }
@Override public Checksum compute(final InputStream in, final TransferStatus status) throws BackgroundException { return new Checksum(HashAlgorithm.md5, this.digest("MD5", this.normalize(in, status), status)); }
@Test public void testNormalize() throws Exception { assertEquals("a43c1b0aa53a0c908810c06ab1ff3967", new MD5FastChecksumCompute().compute(IOUtils.toInputStream("input", Charset.defaultCharset()), new TransferStatus()).hash); assertEquals("a43c1b0aa53a0c908810c06ab1ff3967", new MD5FastChecksumCompute().compute(IOUtils.toInputStream("_input", Charset.defaultCharset()), new TransferStatus().withOffset(1)).hash); assertEquals("a43c1b0aa53a0c908810c06ab1ff3967", new MD5FastChecksumCompute().compute(IOUtils.toInputStream("_input_", Charset.defaultCharset()), new TransferStatus().withOffset(1).withLength(5)).hash); }
public AlertResult send(String content) { AlertResult alertResult = new AlertResult(); String url; try { if (WeChatType.APP.getValue().equals(wechatParams.getSendType())) { String token = getToken(); assert token != null; url = String.format(WeChatConstants.WECHAT_PUSH_URL, wechatParams.getSendUrl(), token); } else { url = wechatParams.getWebhook(); } return checkWeChatSendMsgResult(HttpUtils.post(url, content)); } catch (Exception e) { logger.error("send we chat alert msg exception : {}", e.getMessage()); alertResult.setMessage("send we chat alert fail"); alertResult.setSuccess(false); } return alertResult; }
@Ignore @Test public void testSendMarkDownMsg() { WeChatAlert weChatAlert = new WeChatAlert(); AlertConfig alertConfig = new AlertConfig(); weChatConfig.put(WeChatConstants.SEND_TYPE, WeChatType.CHAT.getValue()); alertConfig.setType("WeChat"); alertConfig.setParam(weChatConfig); weChatAlert.setConfig(alertConfig); AlertResult alertResult = weChatAlert.send(AlertBaseConstant.ALERT_TEMPLATE_TITLE, AlertBaseConstant.ALERT_TEMPLATE_MSG); Assert.assertEquals(true, alertResult.getSuccess()); }
@Transactional public AppNamespace createAppNamespaceInLocal(AppNamespace appNamespace) { return createAppNamespaceInLocal(appNamespace, true); }
@Test(expected = BadRequestException.class) @Sql(scripts = "/sql/appnamespaceservice/init-appnamespace.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD) @Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD) public void testCreatePublicAppNamespaceExistedAsPrivateAppNamespace() { AppNamespace appNamespace = assembleBaseAppNamespace(); appNamespace.setPublic(true); appNamespace.setName("private-01"); appNamespace.setFormat(ConfigFileFormat.Properties.getValue()); appNamespaceService.createAppNamespaceInLocal(appNamespace); }
@Override public double doubleValue() { return (double) lvVal(); }
@Test public void testDoubleValue() { PaddedAtomicLong counter = new PaddedAtomicLong(10); assertEquals(10d, counter.doubleValue(), 0.01); }
public static WhitelistConfig load() { return new WhitelistConfig(); }
@Test public void testConfigMapFormat() { WhitelistConfig config = WhitelistConfig.load("whitelist-map"); System.out.println(config); }
public Iterable<TimestampedValue<T>> read() { checkState( !isClosed, "OrderedList user state is no longer usable because it is closed for %s", requestTemplate.getStateKey()); return readRange(Instant.ofEpochMilli(Long.MIN_VALUE), Instant.ofEpochMilli(Long.MAX_VALUE)); }
@Test public void testRead() throws Exception { FakeBeamFnStateClient fakeClient = new FakeBeamFnStateClient( timestampedValueCoder, ImmutableMap.of(createOrderedListStateKey("A", 1), asList(A1, B1))); OrderedListUserState<String> userState = new OrderedListUserState<>( Caches.noop(), fakeClient, "instructionId", createOrderedListStateKey("A"), StringUtf8Coder.of()); assertArrayEquals( asList(A1, B1).toArray(), Iterables.toArray(userState.read(), TimestampedValue.class)); userState.asyncClose(); assertThrows(IllegalStateException.class, () -> userState.read()); }
public Protocol forNameOrDefault(final String identifier) { return forNameOrDefault(identifier, null); }
@Test public void testForNameOrDefaultMissing() throws Exception { final TestProtocol dav = new TestProtocol(Scheme.dav); final ProtocolFactory f = new ProtocolFactory(Collections.singleton(dav)); assertEquals(dav, f.forNameOrDefault("dav")); assertNull(f.forNameOrDefault("ftp")); }
@Override public Connection getConnection() throws SQLException { Connection connection = dataSource.getConnection(); return getConnectionProxy(connection); }
@Test public void testGetMariaXaConnection() throws SQLException, ClassNotFoundException { // Mock Driver driver = Mockito.mock(Driver.class); Class clazz = Class.forName("org.mariadb.jdbc.MariaDbConnection"); Connection connection = (Connection)(Mockito.mock(clazz)); Mockito.when(connection.getAutoCommit()).thenReturn(true); DatabaseMetaData metaData = Mockito.mock(DatabaseMetaData.class); Mockito.when(metaData.getURL()).thenReturn("jdbc:mariadb:xxx"); Mockito.when(connection.getMetaData()).thenReturn(metaData); Mockito.when(driver.connect(any(), any())).thenReturn(connection); DruidDataSource druidDataSource = new DruidDataSource(); druidDataSource.setDriver(driver); DataSourceProxyXA dataSourceProxyXA = new DataSourceProxyXA(druidDataSource); Connection connFromDataSourceProxyXA = dataSourceProxyXA.getConnection(); Assertions.assertFalse(connFromDataSourceProxyXA instanceof ConnectionProxyXA); RootContext.bind("test"); connFromDataSourceProxyXA = dataSourceProxyXA.getConnection(); Assertions.assertTrue(connFromDataSourceProxyXA instanceof ConnectionProxyXA); ConnectionProxyXA connectionProxyXA = (ConnectionProxyXA)dataSourceProxyXA.getConnection(); Connection wrappedConnection = connectionProxyXA.getWrappedConnection(); Assertions.assertTrue(wrappedConnection instanceof PooledConnection); Connection wrappedPhysicalConn = ((PooledConnection)wrappedConnection).getConnection(); Assertions.assertSame(wrappedPhysicalConn, connection); XAConnection xaConnection = connectionProxyXA.getWrappedXAConnection(); Connection connectionInXA = xaConnection.getConnection(); Assertions.assertEquals("org.mariadb.jdbc.MariaDbConnection", connectionInXA.getClass().getName()); tearDown(); }
@Override public RList<V> get(K key) { String keyHash = keyHash(key); String valuesName = getValuesName(keyHash); return new RedissonListMultimapValues<>(codec, commandExecutor, valuesName, getTimeoutSetName(), key); }
@Test public void testValues() { RMultimapCache<String, String> multimap = getMultimapCache("test"); multimap.put("1", "1"); multimap.put("1", "2"); multimap.put("1", "3"); multimap.put("1", "3"); assertThat(multimap.get("1").size()).isEqualTo(4); assertThat(multimap.get("1")).containsExactly("1", "2", "3", "3"); assertThat(multimap.get("1").remove("3")).isTrue(); assertThat(multimap.get("1").remove("3")).isTrue(); assertThat(multimap.get("1").remove("3")).isFalse(); assertThat(multimap.get("1").contains("3")).isFalse(); assertThat(multimap.get("1").contains("2")).isTrue(); assertThat(multimap.get("1").containsAll(Arrays.asList("1"))).isTrue(); assertThat(multimap.get("1").containsAll(Arrays.asList("1", "2"))).isTrue(); assertThat(multimap.get("1").retainAll(Arrays.asList("1"))).isTrue(); assertThat(multimap.get("1").removeAll(Arrays.asList("1"))).isTrue(); }
public static String readFile(String path) { StringBuilder builder = new StringBuilder(); File file = new File(path); if (!file.isFile()) { throw new BusException(StrUtil.format("File path {} is not a file.", path)); } try (InputStreamReader inputStreamReader = new InputStreamReader(Files.newInputStream(file.toPath()), StandardCharsets.UTF_8); BufferedReader bufferedReader = new BufferedReader(inputStreamReader)) { String content; while ((content = bufferedReader.readLine()) != null) { builder.append("\n"); builder.append(content); } } catch (Exception e) { e.printStackTrace(); } return builder.toString(); }
@Ignore @Test public void testReadRootLog() { String result = DirUtil.readFile(DirConstant.ROOT_LOG_PATH); Assertions.assertThat(result).isNotNull(); }
@Override public V pollFirst(long timeout, TimeUnit unit) throws InterruptedException { return commandExecutor.getInterrupted(pollFirstAsync(timeout, unit)); }
@Test public void testPollFirst() throws InterruptedException { RBlockingDeque<Integer> queue1 = redisson.getBlockingDeque("queue1"); queue1.put(1); queue1.put(2); queue1.put(3); assertThat(queue1.pollFirst(2, TimeUnit.SECONDS)).isEqualTo(1); assertThat(queue1.pollFirst(2, TimeUnit.SECONDS)).isEqualTo(2); assertThat(queue1.pollFirst(2, TimeUnit.SECONDS)).isEqualTo(3); long s = System.currentTimeMillis(); assertThat(queue1.pollFirst(5, TimeUnit.SECONDS)).isNull(); assertThat(System.currentTimeMillis() - s).isGreaterThan(5000); }
public static Optional<Long> getWarehouseIdByNodeId(SystemInfoService systemInfo, long nodeId) { ComputeNode node = systemInfo.getBackendOrComputeNode(nodeId); if (node == null) { LOG.warn("failed to get warehouse id by node id: {}", nodeId); return Optional.empty(); } return Optional.of(node.getWarehouseId()); }
@Test public void testGetWarehouseIdByNodeId() { SystemInfoService systemInfo = new SystemInfoService(); Backend b1 = new Backend(10001L, "192.168.0.1", 9050); b1.setBePort(9060); b1.setWarehouseId(10001L); Backend b2 = new Backend(10002L, "192.168.0.2", 9050); b2.setBePort(9060); b2.setWarehouseId(10002L); // add two backends to different warehouses systemInfo.addBackend(b1); systemInfo.addBackend(b2); // If the version of be is old, it may pass null. Assert.assertEquals(WarehouseManager.DEFAULT_WAREHOUSE_ID, Utils.getWarehouseIdByNodeId(systemInfo, 0).orElse(WarehouseManager.DEFAULT_WAREHOUSE_ID).longValue()); // pass a wrong tBackend Assert.assertEquals(WarehouseManager.DEFAULT_WAREHOUSE_ID, Utils.getWarehouseIdByNodeId(systemInfo, 10003).orElse(WarehouseManager.DEFAULT_WAREHOUSE_ID).longValue()); // pass a right tBackend Assert.assertEquals(10001L, Utils.getWarehouseIdByNodeId(systemInfo, 10001).get().longValue()); Assert.assertEquals(10002L, Utils.getWarehouseIdByNodeId(systemInfo, 10002).get().longValue()); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testAnalyzePackageJson() throws Exception { try (Engine engine = new Engine(getSettings())) { final Dependency result = new Dependency(BaseTest.getResourceAsFile(this, "composer.lock")); //simulate normal operation when the composer.lock is already added to the engine as a dependency engine.addDependency(result); analyzer.analyze(result, engine); //make sure the redundant composer.lock is removed assertFalse(ArrayUtils.contains(engine.getDependencies(), result)); assertEquals(30, engine.getDependencies().length); boolean found = false; for (Dependency d : engine.getDependencies()) { if ("classpreloader".equals(d.getName())) { found = true; assertEquals("2.0.0", d.getVersion()); assertThat(d.getDisplayFileName(), equalTo("classpreloader:2.0.0")); assertEquals(ComposerLockAnalyzer.DEPENDENCY_ECOSYSTEM, d.getEcosystem()); } } assertTrue("Expeced to find classpreloader", found); } }
@Override public V move(DequeMoveArgs args) { return get(moveAsync(args)); }
@Test public void testMove() { RDeque<Integer> deque1 = redisson.getDeque("deque1"); RDeque<Integer> deque2 = redisson.getDeque("deque2"); deque1.add(1); deque1.add(2); deque1.add(3); deque2.add(4); deque2.add(5); deque2.add(6); Integer r1 = deque1.move(DequeMoveArgs.pollFirst().addLastTo(deque2.getName())); assertThat(r1).isEqualTo(1); assertThat(deque1).containsExactly(2, 3); assertThat(deque2).containsExactly(4, 5, 6, 1); Integer r2 = deque2.move(DequeMoveArgs.pollLast().addFirstTo(deque1.getName())); assertThat(r2).isEqualTo(1); assertThat(deque1).containsExactly(1, 2, 3); assertThat(deque2).containsExactly(4, 5, 6); }
public static boolean shutdownExecutorService(ExecutorService service) throws InterruptedException { return shutdownExecutorService(service, SHUTDOWN_WAIT_MS); }
@Test public void testShutdownThreadPool() throws InterruptedException { ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1); executor.execute(sampleRunnable); boolean ret = ShutdownThreadsHelper.shutdownExecutorService(executor); boolean isTerminated = executor.isTerminated(); assertEquals("Incorrect return value", ret, isTerminated); assertTrue("ExecutorService is not shutdown", isTerminated); }
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { /*----------------configure this parser by ParseContext Object---------------------*/ GeoParserConfig geoParserConfig = context.get(GeoParserConfig.class, defaultConfig); initialize(geoParserConfig); if (!isAvailable(geoParserConfig)) { return; } NameEntityExtractor extractor = null; try { extractor = new NameEntityExtractor(nameFinder); } catch (Exception e) { LOG.warn("Named Entity Extractor setup failed: {}", e.getMessage(), e); return; } /*----------------get locationNameEntities and best nameEntity for the input stream---------------------*/ extractor.getAllNameEntitiesfromInput(stream); extractor.getBestNameEntity(); ArrayList<String> locationNameEntities = extractor.locationNameEntities; String bestner = extractor.bestNameEntity; /*------------------------resolve geonames for each ner, store results in a hashmap---------------------*/ Map<String, List<Location>> resolvedGeonames = searchGeoNames(locationNameEntities); /*----------------store locationNameEntities and their geonames in a geotag, each input has one geotag---------------------*/ GeoTag geotag = new GeoTag(); geotag.toGeoTag(resolvedGeonames, bestner); /* add resolved entities in metadata */ metadata.add("Geographic_NAME", geotag.location.getName()); metadata.add("Geographic_LONGITUDE", geotag.location.getLongitude()); metadata.add("Geographic_LATITUDE", geotag.location.getLatitude()); for (int i = 0; i < geotag.alternatives.size(); ++i) { GeoTag alter = (GeoTag) geotag.alternatives.get(i); metadata.add("Optional_NAME" + (i + 1), alter.location.getName()); metadata.add("Optional_LONGITUDE" + (i + 1), alter.location.getLongitude()); metadata.add("Optional_LATITUDE" + (i + 1), alter.location.getLatitude()); } }
@Test public void testNulls() throws IOException, SAXException, TikaException { String text = ""; Metadata metadata = new Metadata(); ParseContext context = new ParseContext(); GeoParserConfig config = new GeoParserConfig(); context.set(GeoParserConfig.class, config); geoparser.parse(new ByteArrayInputStream(text.getBytes(UTF_8)), new BodyContentHandler(), metadata, context); assertNull(metadata.get("Geographic_NAME")); assertNull(metadata.get("Geographic_LONGITUDE")); assertNull(metadata.get("Geographic_LATITUDE")); }
@Override public List<RoleDO> getRoleListByStatus(Collection<Integer> statuses) { return roleMapper.selectListByStatus(statuses); }
@Test public void testGetRoleListByStatus() { // mock 数据 RoleDO dbRole01 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); roleMapper.insert(dbRole01); RoleDO dbRole02 = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())); roleMapper.insert(dbRole02); // 调用 List<RoleDO> list = roleService.getRoleListByStatus( singleton(CommonStatusEnum.ENABLE.getStatus())); // 断言 assertEquals(1, list.size()); assertPojoEquals(dbRole01, list.get(0)); }
public void insert(Inode inode) { insert(inode, TtlBucket.DEFAULT_RETRY_ATTEMPTS); }
@Test public void insert() { // No bucket should expire. List<TtlBucket> expired = pollSortedExpiredBuckets(BUCKET1_START); Assert.assertTrue(expired.isEmpty()); mBucketList.insert(BUCKET1_FILE1); // The first bucket should expire. expired = pollSortedExpiredBuckets(BUCKET1_END); assertExpired(expired, 0, BUCKET1_FILE1); mBucketList.insert(BUCKET1_FILE1); mBucketList.insert(BUCKET1_FILE2); // Only the first bucket should expire. for (long end = BUCKET2_START; end < BUCKET2_END; end++) { expired = pollSortedExpiredBuckets(end); assertExpired(expired, 0, BUCKET1_FILE1, BUCKET1_FILE2); mBucketList.insert(BUCKET1_FILE1); mBucketList.insert(BUCKET1_FILE2); } mBucketList.insert(BUCKET2_FILE); // All buckets should expire. expired = pollSortedExpiredBuckets(BUCKET2_END); assertExpired(expired, 0, BUCKET1_FILE1, BUCKET1_FILE2); assertExpired(expired, 1, BUCKET2_FILE); }
public static String initNamespaceForNaming(NacosClientProperties properties) { String tmpNamespace = null; String isUseCloudNamespaceParsing = properties.getProperty(PropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING, properties.getProperty(SystemPropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING, String.valueOf(Constants.DEFAULT_USE_CLOUD_NAMESPACE_PARSING))); if (Boolean.parseBoolean(isUseCloudNamespaceParsing)) { tmpNamespace = TenantUtil.getUserTenantForAns(); LogUtils.NAMING_LOGGER.info("initializer namespace from ans.namespace attribute : {}", tmpNamespace); tmpNamespace = TemplateUtils.stringEmptyAndThenExecute(tmpNamespace, () -> { String namespace = properties.getProperty(PropertyKeyConst.SystemEnv.ALIBABA_ALIWARE_NAMESPACE); LogUtils.NAMING_LOGGER.info("initializer namespace from ALIBABA_ALIWARE_NAMESPACE attribute :" + namespace); return namespace; }); } tmpNamespace = TemplateUtils.stringEmptyAndThenExecute(tmpNamespace, () -> { String namespace = properties.getPropertyFrom(SourceType.JVM, PropertyKeyConst.NAMESPACE); LogUtils.NAMING_LOGGER.info("initializer namespace from namespace attribute :" + namespace); return namespace; }); if (StringUtils.isEmpty(tmpNamespace)) { tmpNamespace = properties.getProperty(PropertyKeyConst.NAMESPACE); } tmpNamespace = TemplateUtils.stringEmptyAndThenExecute(tmpNamespace, () -> UtilAndComs.DEFAULT_NAMESPACE_ID); return tmpNamespace; }
@Test void testInitNamespaceFromPropNamespaceWithoutCloudParsing() { System.setProperty(SystemPropertyKeyConst.ANS_NAMESPACE, "ans"); System.setProperty(SystemPropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING, "false"); final NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive(); String expect = "ns1"; properties.setProperty(PropertyKeyConst.NAMESPACE, expect); String ns = InitUtils.initNamespaceForNaming(properties); assertEquals(expect, ns); }
public static SFCertificatePanel sharedCertificatePanel() { return Rococoa.createClass("SFCertificatePanel", SFCertificatePanel._Class.class).sharedCertificatePanel(); }
@Test public void sharedCertificatePanel() { assertNotNull(SFCertificatePanel.sharedCertificatePanel()); }
static MemberMap cloneExcluding(MemberMap source, MemberImpl... excludeMembers) { if (source.size() == 0) { return source; } Map<Address, MemberImpl> addressMap = new LinkedHashMap<>(source.addressToMemberMap); Map<UUID, MemberImpl> uuidMap = new LinkedHashMap<>(source.uuidToMemberMap); for (MemberImpl member : excludeMembers) { MemberImpl removed = addressMap.remove(member.getAddress()); if (removed != null) { uuidMap.remove(removed.getUuid()); } removed = uuidMap.remove(member.getUuid()); if (removed != null) { addressMap.remove(removed.getAddress()); } } return new MemberMap(source.version + excludeMembers.length, addressMap, uuidMap); }
@Test public void cloneExcluding() { MemberImpl[] members = newMembers(6); MemberImpl exclude0 = members[0]; MemberImpl exclude1 = new MemberImpl.Builder(newAddress(6000)) .version(VERSION) .uuid(members[1].getUuid()) .build(); MemberImpl exclude2 = new MemberImpl.Builder(members[2].getAddress()) .version(VERSION) .uuid(newUnsecureUUID()) .build(); MemberMap map = MemberMap.cloneExcluding(MemberMap.createNew(members), exclude0, exclude1, exclude2); int numOfExcludedMembers = 3; assertEquals(members.length - numOfExcludedMembers, map.getMembers().size()); assertEquals(members.length - numOfExcludedMembers, map.getAddresses().size()); assertEquals(members.length - numOfExcludedMembers, map.size()); for (int i = 0; i < numOfExcludedMembers; i++) { MemberImpl member = members[i]; assertNotContains(map, member.getAddress()); assertNotContains(map, member.getUuid()); assertNull(map.getMember(member.getAddress())); assertNull(map.getMember(member.getUuid())); } for (int i = numOfExcludedMembers; i < members.length; i++) { MemberImpl member = members[i]; assertContains(map, member.getAddress()); assertContains(map, member.getUuid()); assertSame(member, map.getMember(member.getAddress())); assertSame(member, map.getMember(member.getUuid())); } assertMemberSet(map); }
@Override public int length() { return 2; }
@Test public void testLength() { System.out.println("length"); FDistribution instance = new FDistribution(10, 20); instance.rand(); assertEquals(2, instance.length()); }
public File getElasticsearchYml() { return new File(confDirectory, "elasticsearch.yml"); }
@Test public void getElasticsearchYml_is_in_es_conf_directory() throws IOException { File tempDir = temp.newFolder(); Props props = new Props(new Properties()); props.set(PATH_DATA.getKey(), temp.newFolder().getAbsolutePath()); props.set(PATH_HOME.getKey(), temp.newFolder().getAbsolutePath()); props.set(PATH_TEMP.getKey(), tempDir.getAbsolutePath()); props.set(PATH_LOGS.getKey(), temp.newFolder().getAbsolutePath()); EsInstallation underTest = new EsInstallation(props); assertThat(underTest.getElasticsearchYml()).isEqualTo(new File(tempDir, "conf/es/elasticsearch.yml")); }
@Override public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException { if (!kubernetesJobManagerParameters.isInternalServiceEnabled()) { return Collections.emptyList(); } final String serviceName = getInternalServiceName(kubernetesJobManagerParameters.getClusterId()); final Service headlessService = HeadlessClusterIPService.INSTANCE.buildUpInternalService( kubernetesJobManagerParameters); // Set job manager address to namespaced service name final String namespace = kubernetesJobManagerParameters.getNamespace(); kubernetesJobManagerParameters .getFlinkConfiguration() .set( JobManagerOptions.ADDRESS, getNamespacedInternalServiceName(serviceName, namespace)); return Collections.singletonList(headlessService); }
@Test void testBuildAccompanyingKubernetesResources() throws IOException { final List<HasMetadata> resources = this.internalServiceDecorator.buildAccompanyingKubernetesResources(); assertThat(resources).hasSize(1); assertThat(InternalServiceDecorator.getNamespacedInternalServiceName(CLUSTER_ID, NAMESPACE)) .isEqualTo(this.flinkConfig.get(JobManagerOptions.ADDRESS)); final Service internalService = (Service) resources.get(0); assertThat(internalService.getApiVersion()).isEqualTo(Constants.API_VERSION); assertThat(internalService.getMetadata().getName()) .isEqualTo(InternalServiceDecorator.getInternalServiceName(CLUSTER_ID)); final Map<String, String> expectedLabels = getCommonLabels(); assertThat(internalService.getMetadata().getLabels()).isEqualTo(expectedLabels); assertThat(internalService.getMetadata().getAnnotations()).isEqualTo(userAnnotations); assertThat(internalService.getSpec().getType()).isNull(); assertThat(internalService.getSpec().getClusterIP()).isEqualTo("None"); List<ServicePort> expectedServicePorts = Arrays.asList( new ServicePortBuilder() .withName(Constants.JOB_MANAGER_RPC_PORT_NAME) .withPort(RPC_PORT) .build(), new ServicePortBuilder() .withName(Constants.BLOB_SERVER_PORT_NAME) .withPort(BLOB_SERVER_PORT) .build()); assertThat(internalService.getSpec().getPorts()).isEqualTo(expectedServicePorts); expectedLabels.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_JOB_MANAGER); assertThat(internalService.getSpec().getSelector()).isEqualTo(expectedLabels); }
@Override public boolean equals(Object other) { if (this == other) { return true; } if (!(other instanceof AbsoluteUnixPath)) { return false; } AbsoluteUnixPath otherAbsoluteUnixPath = (AbsoluteUnixPath) other; return unixPath.equals(otherAbsoluteUnixPath.unixPath); }
@Test public void testEquals() { AbsoluteUnixPath absoluteUnixPath1 = AbsoluteUnixPath.get("/absolute/path"); AbsoluteUnixPath absoluteUnixPath2 = AbsoluteUnixPath.get("/absolute/path/"); AbsoluteUnixPath absoluteUnixPath3 = AbsoluteUnixPath.get("/another/path"); Assert.assertEquals(absoluteUnixPath1, absoluteUnixPath2); Assert.assertNotEquals(absoluteUnixPath1, absoluteUnixPath3); }
public static <K, V> Printed<K, V> toFile(final String filePath) { Objects.requireNonNull(filePath, "filePath can't be null"); if (Utils.isBlank(filePath)) { throw new TopologyException("filePath can't be an empty string"); } try { return new Printed<>(Files.newOutputStream(Paths.get(filePath))); } catch (final IOException e) { throw new TopologyException("Unable to write stream to file at [" + filePath + "] " + e.getMessage()); } }
@Test public void shouldThrowNullPointerExceptionIfFilePathIsNull() { assertThrows(NullPointerException.class, () -> Printed.toFile(null)); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test public void shouldNotConvertArrayValuesToDecimal() { List<Object> decimals = Arrays.asList("\"1.0\"", BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE), BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE), (byte) 1, (byte) 1); List<Object> expected = new ArrayList<>(decimals); // most values are directly reproduced with the same type expected.set(0, "1.0"); // The quotes are parsed away, but the value remains a string SchemaAndValue schemaAndValue = Values.parseString(decimals.toString()); Schema schema = schemaAndValue.schema(); assertEquals(Type.ARRAY, schema.type()); assertNull(schema.valueSchema()); assertEquals(expected, schemaAndValue.value()); }
public int writeVarUint64(long value) { // Var long encoding algorithm is based kryo UnsafeMemoryOutput.writeVarInt64. // var long are written using little endian byte order. ensure(writerIndex + 9); return _unsafeWriteVarUint64(value); }
@Test public void testWriteVarUint64() { MemoryBuffer buf = MemoryUtils.buffer(8); checkVarUint64(buf, -1, 9); for (int i = 0; i < 9; i++) { for (int j = 0; j < i; j++) { checkVarUint64(buf(i), -1, 9); checkVarUint64(buf(i), 1, 1); checkVarUint64(buf(i), 1L << 6, 1); checkVarUint64(buf(i), 1L << 7, 2); checkVarUint64(buf(i), -(2 << 5), 9); checkVarUint64(buf(i), -(2 << 6), 9); checkVarUint64(buf(i), 1L << 13, 2); checkVarUint64(buf(i), 1L << 14, 3); checkVarUint64(buf(i), -(2 << 12), 9); checkVarUint64(buf(i), -(2 << 13), 9); checkVarUint64(buf(i), 1L << 20, 3); checkVarUint64(buf(i), 1L << 21, 4); checkVarUint64(buf(i), -(2 << 19), 9); checkVarUint64(buf(i), -(2 << 20), 9); checkVarUint64(buf(i), 1L << 27, 4); checkVarUint64(buf(i), 1L << 28, 5); checkVarUint64(buf(i), -(2 << 26), 9); checkVarUint64(buf(i), -(2 << 27), 9); checkVarUint64(buf(i), 1L << 30, 5); checkVarUint64(buf(i), -(2L << 29), 9); checkVarUint64(buf(i), 1L << 30, 5); checkVarUint64(buf(i), -(2L << 30), 9); checkVarUint64(buf(i), 1L << 32, 5); checkVarUint64(buf(i), -(2L << 31), 9); checkVarUint64(buf(i), 1L << 34, 5); checkVarUint64(buf(i), -(2L << 33), 9); checkVarUint64(buf(i), 1L << 35, 6); checkVarUint64(buf(i), -(2L << 34), 9); checkVarUint64(buf(i), 1L << 41, 6); checkVarUint64(buf(i), -(2L << 40), 9); checkVarUint64(buf(i), 1L << 42, 7); checkVarUint64(buf(i), -(2L << 41), 9); checkVarUint64(buf(i), 1L << 48, 7); checkVarUint64(buf(i), -(2L << 47), 9); checkVarUint64(buf(i), 1L << 49, 8); checkVarUint64(buf(i), -(2L << 48), 9); checkVarUint64(buf(i), 1L << 55, 8); checkVarUint64(buf(i), -(2L << 54), 9); checkVarUint64(buf(i), 1L << 56, 9); checkVarUint64(buf(i), -(2L << 55), 9); checkVarUint64(buf(i), 1L << 62, 9); checkVarUint64(buf(i), -(2L << 62), 9); checkVarUint64(buf(i), 1L << 63 - 1, 9); checkVarUint64(buf(i), -(2L << 62), 9); checkVarUint64(buf(i), Long.MAX_VALUE, 9); checkVarUint64(buf(i), Long.MIN_VALUE, 9); } } }
protected RemotingCommand request(ChannelHandlerContext ctx, RemotingCommand request, ProxyContext context, long timeoutMillis) throws Exception { String brokerName; if (request.getCode() == RequestCode.SEND_MESSAGE_V2) { if (request.getExtFields().get(BROKER_NAME_FIELD_FOR_SEND_MESSAGE_V2) == null) { return RemotingCommand.buildErrorResponse(ResponseCode.VERSION_NOT_SUPPORTED, "Request doesn't have field bname"); } brokerName = request.getExtFields().get(BROKER_NAME_FIELD_FOR_SEND_MESSAGE_V2); } else { if (request.getExtFields().get(BROKER_NAME_FIELD) == null) { return RemotingCommand.buildErrorResponse(ResponseCode.VERSION_NOT_SUPPORTED, "Request doesn't have field bname"); } brokerName = request.getExtFields().get(BROKER_NAME_FIELD); } if (request.isOnewayRPC()) { messagingProcessor.requestOneway(context, brokerName, request, timeoutMillis); return null; } messagingProcessor.request(context, brokerName, request, timeoutMillis) .thenAccept(r -> writeResponse(ctx, context, request, r)) .exceptionally(t -> { writeErrResponse(ctx, context, request, t); return null; }); return null; }
@Test public void testRequestOneway() throws Exception { String brokerName = "broker"; RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.PULL_MESSAGE, null); request.markOnewayRPC(); request.addExtField(AbstractRemotingActivity.BROKER_NAME_FIELD, brokerName); RemotingCommand remotingCommand = remotingActivity.request(ctx, request, null, 10000); assertThat(remotingCommand).isNull(); verify(messagingProcessorMock, times(1)).requestOneway(any(), eq(brokerName), any(), anyLong()); }
public void convertQueueHierarchy(FSQueue queue) { List<FSQueue> children = queue.getChildQueues(); final String queueName = queue.getName(); emitChildQueues(queueName, children); emitMaxAMShare(queueName, queue); emitMaxParallelApps(queueName, queue); emitMaxAllocations(queueName, queue); emitPreemptionDisabled(queueName, queue); emitChildCapacity(queue); emitMaximumCapacity(queueName, queue); emitSizeBasedWeight(queueName); emitOrderingPolicy(queueName, queue); checkMaxChildCapacitySetting(queue); emitDefaultUserLimitFactor(queueName, children); for (FSQueue childQueue : children) { convertQueueHierarchy(childQueue); } }
@Test public void testZeroSumCapacityValidation() { converter = builder.withPercentages(true).build(); converter.convertQueueHierarchy(rootQueue); Set<String> noZeroSumAllowedQueues = Sets.difference(ALL_QUEUES, Sets.newHashSet("root.misc")); for (String queue : noZeroSumAllowedQueues) { key = PREFIX + queue + ".allow-zero-capacity-sum"; assertEquals("Key " + key + " has different value", false, csConfig .getAllowZeroCapacitySum(new QueuePath(queue))); } assertTrue("root.misc allow zero capacities", csConfig.getAllowZeroCapacitySum(MISC)); }
public PageListResponse<IndexSetFieldType> getIndexSetFieldTypesListPage( final String indexSetId, final String fieldNameQuery, final List<String> filters, final int page, final int perPage, final String sort, final Sorting.Direction order) { final List<IndexSetFieldType> filteredFields = getFilteredList(indexSetId, fieldNameQuery, filters, sort, order); final int total = filteredFields.size(); final List<IndexSetFieldType> retrievedPage = filteredFields.stream() .skip((long) Math.max(0, page - 1) * perPage) .limit(perPage) .toList(); return PageListResponse.create("", PaginatedList.PaginationInfo.create( total, retrievedPage.size(), page, perPage), total, sort, order.toString().toLowerCase(Locale.ROOT), retrievedPage, IndexSetFieldType.ATTRIBUTES, IndexSetFieldType.ENTITY_DEFAULTS); }
@Test void testReturnsEmptyPageOnWrongIndexId() { doReturn(Optional.empty()).when(indexSetService).get("I_do_not_exist!"); final PageListResponse<IndexSetFieldType> response = toTest.getIndexSetFieldTypesListPage("I_do_not_exist!", "", List.of(), 0, 10, "index_set_id", Sorting.Direction.ASC); assertEquals(0, response.total()); assertTrue(response.elements().isEmpty()); verifyNoInteractions(indexFieldTypesService); verifyNoInteractions(indexSetFactory); }
@Override @PublicAPI(usage = ACCESS) public SliceRule as(String newDescription) { return copyWithTransformation(new As(newDescription)); }
@Test @UseDataProvider("cycle_limits") public void limits_number_of_cycles_to_configured_limit(Runnable configureLimit, int expectedNumberOfReportedCycles) { configureLimit.run(); String violations = getFailureReportForCyclesInRootPackageOf(CompleteSevenNodesGraphRoot.class); int numberOfDetectedCycles = countCyclesInMessage(violations); assertThat(numberOfDetectedCycles).as("number of cycles detected").isEqualTo(expectedNumberOfReportedCycles); }
@Override public V compute(String key, BiFunction<? super String, ? super V, ? extends V> remappingFunction) { return Map.super.compute(key.toLowerCase(), remappingFunction); }
@Test void compute() { Map<String, Object> map = new LowerCaseLinkHashMap<>(lowerCaseLinkHashMap); Object result = map.compute("key", (key,value)-> key.toUpperCase()); Assertions.assertEquals("KEY", result); Assertions.assertEquals("KEY", map.get("key")); result = map.compute("key", (key, value) -> null); Assertions.assertNull(result); Assertions.assertFalse(map.containsKey("key")); result = map.compute("compute", (key,value)-> key.toUpperCase()); Assertions.assertEquals("COMPUTE", result); Assertions.assertEquals("COMPUTE", map.get("compute")); }
public void write( ByteBuffer record, TieredStorageSubpartitionId subpartitionId, Buffer.DataType dataType, boolean isBroadcast) throws IOException { if (isBroadcast && !isBroadcastOnly) { int currentPosition = record.position(); for (int i = 0; i < numSubpartitions; ++i) { // As the tiered storage subpartition ID is created only for broadcast records, // which are fewer than normal records, the performance impact of generating new // TieredStorageSubpartitionId objects is expected to be manageable. If the // performance is significantly affected, this logic will be optimized accordingly. bufferAccumulator.receive( record, new TieredStorageSubpartitionId(i), dataType, isBroadcast); record.position(currentPosition); } } else { bufferAccumulator.receive(record, subpartitionId, dataType, isBroadcast); } }
@TestTemplate void testWriteRecords() throws IOException { int numSubpartitions = 10; int numToWriteRecords = 20; int bufferSize = 1024; Random random = new Random(); AtomicInteger numReceivedBuffers = new AtomicInteger(0); AtomicInteger numReceivedBytes = new AtomicInteger(0); AtomicInteger numReceivedBuffersInTier1 = new AtomicInteger(0); AtomicInteger numReceivedBuffersInTier2 = new AtomicInteger(0); TestingTierProducerAgent tierProducerAgent1 = new TestingTierProducerAgent.Builder() .setTryStartSegmentSupplier( ((subpartitionId, integer) -> numReceivedBuffersInTier1.get() < 1)) .setTryWriterFunction( ((subpartitionId, buffer) -> { boolean isSuccess = numReceivedBuffersInTier1.get() % 2 == 0; if (isSuccess) { numReceivedBuffers.incrementAndGet(); numReceivedBuffersInTier1.incrementAndGet(); numReceivedBytes.set( numReceivedBytes.get() + buffer.readableBytes()); } return isSuccess; })) .build(); TestingTierProducerAgent tierProducerAgent2 = new TestingTierProducerAgent.Builder() .setTryWriterFunction( ((subpartitionId, buffer) -> { numReceivedBuffers.incrementAndGet(); numReceivedBuffersInTier2.incrementAndGet(); numReceivedBytes.set( numReceivedBytes.get() + buffer.readableBytes()); return true; })) .build(); List<TierProducerAgent> tierProducerAgents = new ArrayList<>(); tierProducerAgents.add(tierProducerAgent1); tierProducerAgents.add(tierProducerAgent2); TieredStorageProducerClient tieredStorageProducerClient = createTieredStorageProducerClient(numSubpartitions, tierProducerAgents); TieredStorageSubpartitionId subpartitionId = new TieredStorageSubpartitionId(0); for (int i = 0; i < numToWriteRecords; i++) { tieredStorageProducerClient.write( generateRandomData(bufferSize, random), subpartitionId, Buffer.DataType.DATA_BUFFER, isBroadcast); } int numExpectedBytes = isBroadcast ? numSubpartitions * numToWriteRecords * bufferSize : numToWriteRecords * bufferSize; assertThat(numReceivedBuffersInTier1.get()).isEqualTo(1); assertThat(numReceivedBuffers.get()) .isEqualTo(numReceivedBuffersInTier1.get() + numReceivedBuffersInTier2.get()); assertThat(numReceivedBytes.get()).isEqualTo(numExpectedBytes); }
@Override public PipelineResult run(Pipeline pipeline) { LOG.info( "running Pipeline using {}: defaultEnvironmentType: {}, jobEndpoint: {}", PortableRunner.class.getName(), prismPipelineOptions.getDefaultEnvironmentType(), prismPipelineOptions.getJobEndpoint()); return internal.run(pipeline); }
@Test public void givenUnboundedSource_runsUntilCancel() throws IOException { Pipeline pipeline = Pipeline.create(options()); pipeline.apply(PeriodicImpulse.create()); PipelineResult result = pipeline.run(); assertThat(result.getState()).isEqualTo(PipelineResult.State.RUNNING); PipelineResult.State state = result.cancel(); assertThat(state).isEqualTo(PipelineResult.State.CANCELLED); }
protected ChannelHealthChecker healthChecker() { return healthCheck; }
@Test public void testHealthChecker() { final ChannelHealthChecker healthChecker = ChannelHealthChecker.ACTIVE; final SimpleChannelPool pool = new SimpleChannelPool( new Bootstrap(), new CountingChannelPoolHandler(), healthChecker); try { assertSame(healthChecker, pool.healthChecker()); } finally { pool.close(); } }
public static String getClientIp(ServerHttpRequest request) { for (String header : IP_HEADER_NAMES) { String ipList = request.getHeaders().getFirst(header); if (StringUtils.hasText(ipList) && !UNKNOWN.equalsIgnoreCase(ipList)) { String[] ips = ipList.trim().split("[,;]"); for (String ip : ips) { if (StringUtils.hasText(ip) && !UNKNOWN.equalsIgnoreCase(ip)) { return ip; } } } } var remoteAddress = request.getRemoteAddress(); return remoteAddress == null || remoteAddress.isUnresolved() ? UNKNOWN : remoteAddress.getAddress().getHostAddress(); }
@Test void testGetUnknownIPAddressWhenRemoteAddressIsUnresolved() { var request = MockServerHttpRequest.get("/") .remoteAddress(InetSocketAddress.createUnresolved("localhost", 8090)) .build(); var actual = IpAddressUtils.getClientIp(request); assertEquals(IpAddressUtils.UNKNOWN, actual); }
public ProcessContinuation run( PartitionRecord partitionRecord, RestrictionTracker<StreamProgress, StreamProgress> tracker, OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator) throws IOException { BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator = new BytesThroughputEstimator<>(sizeEstimator, Instant.now()); // Lock the partition if (tracker.currentRestriction().isEmpty()) { boolean lockedPartition = metadataTableDao.lockAndRecordPartition(partitionRecord); // Clean up NewPartition on the first run regardless of locking result. If locking fails it // means this partition is being streamed, then cleaning up NewPartitions avoids lingering // NewPartitions. for (NewPartition newPartition : partitionRecord.getParentPartitions()) { metadataTableDao.deleteNewPartition(newPartition); } if (!lockedPartition) { LOG.info( "RCSP {} : Could not acquire lock with uid: {}, because this is a " + "duplicate and another worker is working on this partition already.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } } else if (tracker.currentRestriction().getCloseStream() == null && !metadataTableDao.doHoldLock( partitionRecord.getPartition(), partitionRecord.getUuid())) { // We only verify the lock if we are not holding CloseStream because if this is a retry of // CloseStream we might have already cleaned up the lock in a previous attempt. // Failed correctness check on this worker holds the lock on this partition. This shouldn't // fail because there's a restriction tracker which means this worker has already acquired the // lock and once it has acquired the lock it shouldn't fail the lock check. LOG.warn( "RCSP {} : Subsequent run that doesn't hold the lock {}. This is not unexpected and " + "should probably be reviewed.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } // Process CloseStream if it exists CloseStream closeStream = tracker.currentRestriction().getCloseStream(); if (closeStream != null) { LOG.debug("RCSP: Processing CloseStream"); metrics.decPartitionStreamCount(); if (closeStream.getStatus().getCode() == Status.Code.OK) { // We need to update watermark here. We're terminating this stream because we have reached // endTime. Instant.now is greater or equal to endTime. The goal here is // DNP will need to know this stream has passed the endTime so DNP can eventually terminate. Instant terminatingWatermark = Instant.ofEpochMilli(Long.MAX_VALUE); Instant endTime = partitionRecord.getEndTime(); if (endTime != null) { terminatingWatermark = endTime; } watermarkEstimator.setWatermark(terminatingWatermark); metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.currentWatermark(), null); LOG.info( "RCSP {}: Reached end time, terminating...", formatByteStringRange(partitionRecord.getPartition())); return ProcessContinuation.stop(); } if (closeStream.getStatus().getCode() != Status.Code.OUT_OF_RANGE) { LOG.error( "RCSP {}: Reached unexpected terminal state: {}", formatByteStringRange(partitionRecord.getPartition()), closeStream.getStatus()); return ProcessContinuation.stop(); } // Release the lock only if the uuid matches. In normal operation this doesn't change // anything. However, it's possible for this RCSP to crash while processing CloseStream but // after the side effects of writing the new partitions to the metadata table. New partitions // can be created while this RCSP restarts from the previous checkpoint and processes the // CloseStream again. In certain race scenarios the child partitions may merge back to this // partition, but as a new RCSP. The new partition (same as this partition) would write the // exact same content to the metadata table but with a different uuid. We don't want to // accidentally delete the StreamPartition because it now belongs to the new RCSP. // If the uuid is the same (meaning this race scenario did not take place) we release the lock // and mark the StreamPartition to be deleted, so we can delete it after we have written the // NewPartitions. metadataTableDao.releaseStreamPartitionLockForDeletion( partitionRecord.getPartition(), partitionRecord.getUuid()); // The partitions in the continuation tokens must cover the same key space as this partition. // If there's only 1 token, then the token's partition is equals to this partition. // If there are more than 1 tokens, then the tokens form a continuous row range equals to this // partition. List<ByteStringRange> childPartitions = new ArrayList<>(); List<ByteStringRange> tokenPartitions = new ArrayList<>(); // Check if NewPartitions field exists, if not we default to using just the // ChangeStreamContinuationTokens. boolean useNewPartitionsField = closeStream.getNewPartitions().size() == closeStream.getChangeStreamContinuationTokens().size(); for (int i = 0; i < closeStream.getChangeStreamContinuationTokens().size(); i++) { ByteStringRange childPartition; if (useNewPartitionsField) { childPartition = closeStream.getNewPartitions().get(i); } else { childPartition = closeStream.getChangeStreamContinuationTokens().get(i).getPartition(); } childPartitions.add(childPartition); ChangeStreamContinuationToken token = getTokenWithCorrectPartition( partitionRecord.getPartition(), closeStream.getChangeStreamContinuationTokens().get(i)); tokenPartitions.add(token.getPartition()); metadataTableDao.writeNewPartition( new NewPartition( childPartition, Collections.singletonList(token), watermarkEstimator.getState())); } LOG.info( "RCSP {}: Split/Merge into {}", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(childPartitions)); if (!coverSameKeySpace(tokenPartitions, partitionRecord.getPartition())) { LOG.warn( "RCSP {}: CloseStream has tokens {} that don't cover the entire keyspace", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(tokenPartitions)); } // Perform the real cleanup. This step is no op if the race mentioned above occurs (splits and // merges results back to this partition again) because when we register the "new" partition, // we unset the deletion bit. metadataTableDao.deleteStreamPartitionRow(partitionRecord.getPartition()); return ProcessContinuation.stop(); } // Update the metadata table with the watermark metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.getState(), tracker.currentRestriction().getCurrentToken()); // Start to stream the partition. ServerStream<ChangeStreamRecord> stream = null; try { stream = changeStreamDao.readChangeStreamPartition( partitionRecord, tracker.currentRestriction(), partitionRecord.getEndTime(), heartbeatDuration); for (ChangeStreamRecord record : stream) { Optional<ProcessContinuation> result = changeStreamAction.run( partitionRecord, record, tracker, receiver, watermarkEstimator, throughputEstimator); // changeStreamAction will usually return Optional.empty() except for when a checkpoint // (either runner or pipeline initiated) is required. if (result.isPresent()) { return result.get(); } } } catch (Exception e) { throw e; } finally { if (stream != null) { stream.cancel(); } } return ProcessContinuation.resume(); }
@Test public void testCloseStreamMergeWithoutNewPartitionsField() throws IOException { // Force lock fail because CloseStream should not depend on locking when(metadataTableDao.doHoldLock(partition, uuid)).thenReturn(false); // This is testing the old version of the API without the NewPartitions field. This means the // ChangeStreamContinuationToken's partition represents the new partition. // AC merging into AD. ByteStringRange childPartitionAD = ByteStringRange.create("A", "D"); // The partition in the token is different from the test above. The token is actually encoded // for partition AC, but in this version, the partition in the token represents the NEW (child) // partition. This has been replaced by the new_partitions field in CloseStream. ChangeStreamContinuationToken parentTokenAC = ChangeStreamContinuationToken.create(ByteStringRange.create("A", "D"), "AC"); CloseStream mockCloseStream = Mockito.mock(CloseStream.class); Status statusProto = Status.newBuilder().setCode(11).build(); Mockito.when(mockCloseStream.getStatus()) .thenReturn(com.google.cloud.bigtable.common.Status.fromProto(statusProto)); Mockito.when(mockCloseStream.getChangeStreamContinuationTokens()) .thenReturn(Collections.singletonList(parentTokenAC)); Mockito.when(mockCloseStream.getNewPartitions()).thenReturn(Collections.emptyList()); when(restriction.getCloseStream()).thenReturn(mockCloseStream); when(restriction.isEmpty()).thenReturn(false); final DoFn.ProcessContinuation result = action.run(partitionRecord, tracker, receiver, watermarkEstimator); assertEquals(DoFn.ProcessContinuation.stop(), result); // Should terminate before reaching processing stream partition responses. verify(changeStreamAction, never()).run(any(), any(), any(), any(), any(), any()); // Should not try claim any restriction when processing CloseStream verify(tracker, (never())).tryClaim(any()); // Should decrement the metric on termination. verify(metrics).decPartitionStreamCount(); // We have to correct the partition in the tokens if we don't have new_partitions field. ChangeStreamContinuationToken realTokenAC = getTokenWithCorrectPartition(partition, parentTokenAC); // Write the new partitions. NewPartition newPartitionAD = new NewPartition( childPartitionAD, Collections.singletonList(realTokenAC), watermarkEstimator.getState()); verify(metadataTableDao).writeNewPartition(eq(newPartitionAD)); verify(metadataTableDao, times(1)) .releaseStreamPartitionLockForDeletion( partitionRecord.getPartition(), partitionRecord.getUuid()); verify(metadataTableDao, times(1)).deleteStreamPartitionRow(partitionRecord.getPartition()); }
@Override public void readOne(TProtocol in, TProtocol out) throws TException { readOneStruct(in, out); }
@Test public void TestExtraFieldWhenFieldIndexIsNotStartFromZero() throws Exception { CountingErrorHandler countingHandler = new CountingErrorHandler() { @Override public void handleFieldIgnored(TField field) { assertEquals(3, field.id); fieldIgnoredCount++; } }; BufferedProtocolReadToWrite structForRead = new BufferedProtocolReadToWrite( ThriftSchemaConverter.toStructType(StructWithIndexStartsFrom4.class), countingHandler); // Data has an extra field of type struct final ByteArrayOutputStream in = new ByteArrayOutputStream(); StructWithExtraField dataWithNewExtraField = new StructWithExtraField(new Phone("111", "222"), new Phone("333", "444")); dataWithNewExtraField.write(protocol(in)); // read using the schema that doesn't have the extra field final ByteArrayOutputStream out = new ByteArrayOutputStream(); structForRead.readOne(protocol(new ByteArrayInputStream(in.toByteArray())), protocol(out)); assertEquals(1, countingHandler.recordCountOfMissingFields); assertEquals(1, countingHandler.fieldIgnoredCount); }
public static CorsConfig allowMethods(final String... methods) { if (Arrays.stream(methods).allMatch(MocoCors::isValidMethod)) { return new CorsMethodsConfig(methods); } throw new IllegalArgumentException("Invalid HTTP method"); }
@Test public void should_not_allow_unknown_headers() { assertThrows(IllegalArgumentException.class, () -> allowMethods("foo")); }
@Override public void verify(String value) { if (!this.universe.contains(value)) { throw new RuntimeException("value is not in set: " + this.universe); } }
@Test public void verify_ValidValue_NoExceptionThrown() { enumAttribute.verify("value1"); enumAttribute.verify("value2"); enumAttribute.verify("value3"); }
public static WindowBytesStoreSupplier persistentWindowStore(final String name, final Duration retentionPeriod, final Duration windowSize, final boolean retainDuplicates) throws IllegalArgumentException { return persistentWindowStore(name, retentionPeriod, windowSize, retainDuplicates, false); }
@Test public void shouldThrowIfIPersistentWindowStoreRetentionPeriodIsNegative() { final Exception e = assertThrows(IllegalArgumentException.class, () -> Stores.persistentWindowStore("anyName", ofMillis(-1L), ZERO, false)); assertEquals("retentionPeriod cannot be negative", e.getMessage()); }
@Override public final void checkConnection() { LOGGER.info("[TFS] Checking Connection: Server {}, Domain {}, User {}, Project Path {}", url, domain, userName, projectPath); try { List<Modification> modifications = latestInHistory(); if (modifications.isEmpty()) { throw new IllegalStateException("There might be no commits on the project path, project path might be invalid or user may have insufficient permissions."); } } catch (Exception e) { String message = String.format("Failed while checking connection using Url: %s, Project Path: %s, Username: %s, Domain: %s, Root Cause: %s", url, projectPath, userName, domain, e.getMessage()); throw new RuntimeException(message, e); } }
@Test public void shouldNotFailToCheckConnectionForUrlWithURLEncodedSpaceInIt() throws Exception { tfsCommand = tfsCommandFor(null, new UrlArgument("abc%20def"), domain, user, password, workspace, projectPath); tfsCommand.checkConnection(); }
@Override public void run() { try (DbSession dbSession = dbClient.openSession(false)) { List<AlmSettingDto> bitbucketServerDtos = dbClient.almSettingDao().selectByAlm(dbSession, ALM.BITBUCKET); List<AlmSettingDto> bitbucketCloudDtos = dbClient.almSettingDao().selectByAlm(dbSession, ALM.BITBUCKET_CLOUD); if (bitbucketServerDtos.isEmpty() && bitbucketCloudDtos.isEmpty()) { metrics.setBitbucketStatusToRed(); return; } try { validate(bitbucketServerDtos, bitbucketCloudDtos); metrics.setBitbucketStatusToGreen(); } catch (RuntimeException e) { metrics.setBitbucketStatusToRed(); } } }
@Test public void run_bitbucketServerConfiguredBitbucketCloudNot_setGreenStatusInMetricsOnce() { when(almSettingsDao.selectByAlm(dbSession, ALM.BITBUCKET)).thenReturn(generateDtos(1, ALM.BITBUCKET)); underTest.run(); verify(metrics, times(1)).setBitbucketStatusToGreen(); verify(metrics, times(0)).setBitbucketStatusToRed(); }
public void clearData() { resetSentence(); mNextWordMap.clear(); }
@Test public void testClearData() throws Exception { mNextWordDictionaryUnderTest.load(); assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "hello"); assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "menny"); assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "hello", "menny"); assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "menny", "hello"); mNextWordDictionaryUnderTest.clearData(); assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "hello"); assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "menny"); mNextWordDictionaryUnderTest.close(); }
protected SortedMap<String, String> parseRequestContent(RemotingCommand request) { request.makeCustomHeaderToNet(); Map<String, String> extFields = request.getExtFields(); // Sort property return new TreeMap<>(extFields); }
@Test public void testParseRequestContent() { PullMessageRequestHeader requestHeader = new PullMessageRequestHeader(); requestHeader.setConsumerGroup("group"); requestHeader.setTopic("topic"); requestHeader.setQueueId(1); requestHeader.setQueueOffset(2L); requestHeader.setMaxMsgNums(32); requestHeader.setSysFlag(0); requestHeader.setCommitOffset(0L); requestHeader.setSuspendTimeoutMillis(15000L); requestHeader.setSubVersion(0L); RemotingCommand testPullRemotingCommand = RemotingCommand.createRequestCommand(RequestCode.PULL_MESSAGE, requestHeader); SortedMap<String, String> oldContent = oldVersionParseRequestContent(testPullRemotingCommand, "ak", null); byte[] oldBytes = AclUtils.combineRequestContent(testPullRemotingCommand, oldContent); testPullRemotingCommand.addExtField(ACCESS_KEY, "ak"); SortedMap<String, String> content = aclClientRPCHook.parseRequestContent(testPullRemotingCommand); byte[] newBytes = AclUtils.combineRequestContent(testPullRemotingCommand, content); assertThat(newBytes).isEqualTo(oldBytes); }
@VisibleForTesting void colorChatMessage() { final int[] intStack = client.getIntStack(); final String[] stringStack = client.getStringStack(); final int size = client.getStringStackSize(); final int isize = client.getIntStackSize(); final int uid = intStack[isize - 1]; final boolean splitpmbox = intStack[isize - 2] == 1; final MessageNode messageNode = client.getMessages().get(uid); assert messageNode != null : "chat message build for unknown message"; String message = stringStack[size - 2]; final String username = stringStack[size - 3]; final String channel = stringStack[size - 4]; final ChatMessageType chatMessageType = messageNode.getType(); final boolean isChatboxTransparent = client.isResized() && client.getVarbitValue(Varbits.TRANSPARENT_CHATBOX) == 1; Color usernameColor = null; Color channelColor = null; switch (chatMessageType) { // username recoloring for MODPRIVATECHAT, PRIVATECHAT and PRIVATECHATOUT // ChatMessageTypes is handled in the script callback event case TRADEREQ: case AUTOTYPER: case PUBLICCHAT: case MODCHAT: { String sanitizedUsername = Text.removeTags(username).replace('\u00A0', ' '); if (client.getLocalPlayer().getName().equals(sanitizedUsername)) { usernameColor = isChatboxTransparent ? chatColorConfig.transparentPlayerUsername() : chatColorConfig.opaquePlayerUsername(); } else if (client.isFriended(sanitizedUsername, true)) { usernameColor = isChatboxTransparent ? chatColorConfig.transparentPublicFriendUsernames() : chatColorConfig.opaquePublicFriendUsernames(); } else { usernameColor = isChatboxTransparent ? chatColorConfig.transparentUsername() : chatColorConfig.opaqueUsername(); } break; } case FRIENDSCHAT: case FRIENDSCHATNOTIFICATION: usernameColor = isChatboxTransparent ? chatColorConfig.transparentFriendsChatUsernames() : chatColorConfig.opaqueFriendsChatUsernames(); channelColor = isChatboxTransparent ? chatColorConfig.transparentFriendsChatChannelName() : chatColorConfig.opaqueFriendsChatChannelName(); break; case CLAN_CHAT: case CLAN_MESSAGE: case CLAN_GIM_CHAT: case CLAN_GIM_MESSAGE: usernameColor = isChatboxTransparent ? chatColorConfig.transparentClanChatUsernames() : chatColorConfig.opaqueClanChatUsernames(); channelColor = isChatboxTransparent ? chatColorConfig.transparentClanChannelName() : chatColorConfig.opaqueClanChannelName(); break; case CLAN_GUEST_CHAT: case CLAN_GUEST_MESSAGE: usernameColor = isChatboxTransparent ? chatColorConfig.transparentClanChatGuestUsernames() : chatColorConfig.opaqueClanChatGuestUsernames(); channelColor = isChatboxTransparent ? chatColorConfig.transparentClanChannelGuestName() : chatColorConfig.opaqueClanGuestChatChannelName(); break; } if (usernameColor != null) { stringStack[size - 3] = ColorUtil.wrapWithColorTag(username, usernameColor); } if (channelColor != null && !Strings.isNullOrEmpty(channel)) { stringStack[size - 4] = ColorUtil.wrapWithColorTag(channel, channelColor); } String prefix = ""; if (chatMessageType == ChatMessageType.CLAN_GIM_CHAT || chatMessageType == ChatMessageType.CLAN_GIM_MESSAGE) { message = message.substring(1); // remove | prefix = "|"; } if (messageNode.getRuneLiteFormatMessage() != null) { message = formatRuneLiteMessage(messageNode.getRuneLiteFormatMessage(), chatMessageType, splitpmbox); } final Collection<ChatColor> chatColors = colorCache.get(chatMessageType); for (ChatColor chatColor : chatColors) { if (chatColor.isTransparent() != isChatboxTransparent || chatColor.getType() != ChatColorType.NORMAL || chatColor.isDefault()) { continue; } // Replace </col> tags in the message with the new color so embedded </col> won't reset the color final Color color = chatColor.getColor(); message = ColorUtil.wrapWithColorTag( message.replace(ColorUtil.CLOSING_COLOR_TAG, ColorUtil.colorTag(color)), color); break; } stringStack[size - 2] = prefix + message; }
@Test public void testPublicFriendUsernameRecolouring() { final String localPlayerName = "RuneLite"; final String friendName = "Zezima"; when(chatColorConfig.opaquePublicFriendUsernames()).thenReturn(Color.decode("#b20000")); setupVm(ChatMessageType.PUBLICCHAT, friendName, ""); // Setup friend checking Player localPlayer = mock(Player.class); when(client.isFriended(friendName, true)).thenReturn(true); when(client.getLocalPlayer()).thenReturn(localPlayer); when(localPlayer.getName()).thenReturn(localPlayerName); chatMessageManager.colorChatMessage(); assertEquals("<col=b20000>" + friendName + "</col>", sstack[1]); }
public static Map<String, ResourceModel> buildResourceModels(final Set<Class<?>> restliAnnotatedClasses) { Map<String, ResourceModel> rootResourceModels = new HashMap<>(); Map<Class<?>, ResourceModel> resourceModels = new HashMap<>(); for (Class<?> annotatedClass : restliAnnotatedClasses) { processResourceInOrder(annotatedClass, resourceModels, rootResourceModels); } return rootResourceModels; }
@Test(dataProvider = "unsupportedFinderResourceTypeData", expectedExceptions = ResourceConfigException.class, expectedExceptionsMessageRegExp = "Class '.*' does not support @Finder methods, because it's an unstructured data resource") public void testFinderUnsupportedResourceType(Class<?> resourceClass) { RestLiApiBuilder.buildResourceModels(Collections.singleton(resourceClass)); }
public String getStatus() { return String.format("Publisher %-30s: shutdown=%5s, queue=%7d/%-7d", publisherName, shutdown, currentEventSize(), queueMaxSize); }
@Test void getStatus() throws NacosException { namingEventPublisher.publish(new TestEvent()); namingEventPublisher.publish(new TestEvent.TestEvent1()); namingEventPublisher.publish(new TestEvent.TestEvent2()); String expectedStatus = "Publisher TestEvent : shutdown=false, queue= 3/8 "; assertThat(namingEventPublisher.getStatus(), is(expectedStatus)); namingEventPublisher.addSubscriber(subscriber, TestEvent.TestEvent1.class); ThreadUtils.sleep(2000L); expectedStatus = "Publisher TestEvent : shutdown=false, queue= 0/8 "; assertThat(namingEventPublisher.getStatus(), is(expectedStatus)); namingEventPublisher.shutdown(); expectedStatus = "Publisher TestEvent : shutdown= true, queue= 0/8 "; assertThat(namingEventPublisher.getStatus(), is(expectedStatus)); }
public Optional<Execution> evaluate(RunContext runContext, io.kestra.core.models.flows.Flow flow, Execution current) { Logger logger = runContext.logger(); Execution.ExecutionBuilder builder = Execution.builder() .id(IdUtils.create()) .tenantId(flow.getTenantId()) .namespace(flow.getNamespace()) .flowId(flow.getId()) .flowRevision(flow.getRevision()) .labels(flow.getLabels()) .state(new State()) .trigger(ExecutionTrigger.of( this, Output.builder() .executionId(current.getId()) .namespace(current.getNamespace()) .flowId(current.getFlowId()) .flowRevision(current.getFlowRevision()) .state(current.getState().getCurrent()) .build() )); try { if (this.inputs != null) { Map<String, Object> outputs = current.getOutputs(); if (outputs != null && !outputs.isEmpty()) { builder.inputs(runContext.render(this.inputs, Map.of(TRIGGER_VAR, Map.of(OUTPUTS_VAR, outputs)))); } else { builder.inputs(runContext.render(this.inputs)); } } else { builder.inputs(new HashMap<>()); } return Optional.of(builder.build()); } catch (Exception e) { logger.warn( "Failed to trigger flow {}.{} for trigger {}, invalid inputs", flow.getNamespace(), flow.getId(), this.getId(), e ); return Optional.empty(); } }
@Test void withTenant() { var flow = io.kestra.core.models.flows.Flow.builder() .id("flow-with-flow-trigger") .tenantId("tenantId") .namespace("io.kestra.unittest") .revision(1) .labels( List.of( new Label("flow-label-1", "flow-label-1"), new Label("flow-label-2", "flow-label-2") ) ) .tasks(Collections.singletonList(Return.builder() .id("test") .type(Return.class.getName()) .format("test") .build())) .build(); var execution = Execution.builder() .id(IdUtils.create()) .tenantId("tenantId") .namespace("io.kestra.unittest") .flowId("flow-with-flow-trigger") .flowRevision(1) .state(State.of(State.Type.RUNNING, Collections.emptyList())) .build(); var flowTrigger = Flow.builder() .id("flow") .type(Flow.class.getName()) .build(); Optional<Execution> evaluate = flowTrigger.evaluate( runContextFactory.of(), flow, execution ); assertThat(evaluate.isPresent(), is(true)); assertThat(evaluate.get().getFlowId(), is("flow-with-flow-trigger")); assertThat(evaluate.get().getTenantId(), is("tenantId")); assertThat(evaluate.get().getLabels(), hasItem(new Label("flow-label-1", "flow-label-1"))); assertThat(evaluate.get().getLabels(), hasItem(new Label("flow-label-2", "flow-label-2"))); }
@Override public Connection connect(String url, Properties info) throws SQLException { // calciteConnection is initialized with an empty Beam schema, // we need to populate it with pipeline options, load table providers, etc return JdbcConnection.initialize((CalciteConnection) super.connect(url, info)); }
@Test @Ignore("https://issues.apache.org/jira/browse/CALCITE-2394") public void testTimestampWithNonzeroTimezone() throws Exception { Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("Asia/Tokyo"), Locale.ROOT); TestTableProvider tableProvider = new TestTableProvider(); Connection connection = JdbcDriver.connect(tableProvider, PipelineOptionsFactory.create()); // A table with one TIMESTAMP column Schema schema = Schema.builder().addDateTimeField("ts").build(); connection .createStatement() .executeUpdate("CREATE EXTERNAL TABLE test (ts TIMESTAMP) TYPE 'test'"); ReadableInstant july1 = ISODateTimeFormat.dateTimeParser().parseDateTime("2018-07-01T01:02:03Z"); tableProvider.addRows("test", Row.withSchema(schema).addValue(july1).build()); ResultSet selectResult = connection.createStatement().executeQuery(String.format("SELECT ts FROM test")); selectResult.next(); Timestamp ts = selectResult.getTimestamp(1, cal); assertThat( String.format( "Wrote %s to a table, but got back %s", ISODateTimeFormat.basicDateTime().print(july1), ISODateTimeFormat.basicDateTime().print(ts.getTime())), ts.getTime(), equalTo(july1.getMillis())); }
static boolean shouldStoreMessage(final Message message) { // XEP-0334: Implement the <no-store/> hint to override offline storage if (message.getChildElement("no-store", "urn:xmpp:hints") != null) { return false; } // OF-2083: Prevent storing offline message that is already stored if (message.getChildElement("offline", "http://jabber.org/protocol/offline") != null) { return false; } switch (message.getType()) { case chat: // XEP-0160: Messages with a 'type' attribute whose value is "chat" SHOULD be stored offline, with the exception of messages that contain only Chat State Notifications (XEP-0085) [7] content // Iterate through the child elements to see if we can find anything that's not a chat state notification or // real time text notification Iterator<?> it = message.getElement().elementIterator(); while (it.hasNext()) { Object item = it.next(); if (item instanceof Element) { Element el = (Element) item; if (Namespace.NO_NAMESPACE.equals(el.getNamespace())) { continue; } if (!el.getNamespaceURI().equals("http://jabber.org/protocol/chatstates") && !(el.getQName().equals(QName.get("rtt", "urn:xmpp:rtt:0"))) ) { return true; } } } return message.getBody() != null && !message.getBody().isEmpty(); case groupchat: case headline: // XEP-0160: "groupchat" message types SHOULD NOT be stored offline // XEP-0160: "headline" message types SHOULD NOT be stored offline return false; case error: // XEP-0160: "error" message types SHOULD NOT be stored offline, // although a server MAY store advanced message processing errors offline if (message.getChildElement("amp", "http://jabber.org/protocol/amp") == null) { return false; } break; default: // XEP-0160: Messages with a 'type' attribute whose value is "normal" (or messages with no 'type' attribute) SHOULD be stored offline. break; } return true; }
@Test public void shouldStoreNormalMessages() { // XEP-0160: Messages with a 'type' attribute whose value is "normal" (or messages with no 'type' attribute) SHOULD be stored offline. Message message = new Message(); message.setType(Message.Type.normal); assertTrue(OfflineMessageStore.shouldStoreMessage(message)); Message message2 = new Message(); assertTrue(OfflineMessageStore.shouldStoreMessage(message2)); }
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception { return fromXmlPartial(toInputStream(partial, UTF_8), o); }
@Test void shouldThrowExceptionWhenXmlDoesNotMapToXmlPartial() { String stageXmlPartial = """ <stage name="mingle"> <jobs> <job name="functional"> <artifacts> <log src="artifact1.xml" dest="cruise-output" /> </artifacts> </job> </jobs> </stage> """; assertThatThrownBy(() -> xmlLoader.fromXmlPartial(stageXmlPartial, JobConfig.class)) .as("Should not be able to load stage into jobConfig") .hasMessage("Unable to parse element <stage> for class JobConfig"); }
private Mono<ServerResponse> listPosts(ServerRequest request) { PostPublicQuery query = new PostPublicQuery(request.exchange()); return postPublicQueryService.list(query.toListOptions(), query.toPageRequest()) .flatMap(result -> ServerResponse.ok().contentType(MediaType.APPLICATION_JSON) .bodyValue(result) ); }
@Test public void listPosts() { ListResult<ListedPostVo> result = new ListResult<>(List.of()); when(postPublicQueryService.list(any(), any(PageRequest.class))) .thenReturn(Mono.just(result)); webClient.get().uri("/posts") .exchange() .expectStatus().isOk() .expectHeader().contentType(MediaType.APPLICATION_JSON) .expectBody() .jsonPath("$.items").isArray(); verify(postPublicQueryService).list(any(), any(PageRequest.class)); }
@Override public void process(Tuple input) { String key = filterMapper.getKeyFromTuple(input); boolean found; JedisCommandsContainer jedisCommand = null; try { jedisCommand = getInstance(); switch (dataType) { case STRING: found = jedisCommand.exists(key); break; case SET: found = jedisCommand.sismember(additionalKey, key); break; case HASH: found = jedisCommand.hexists(additionalKey, key); break; case SORTED_SET: found = jedisCommand.zrank(additionalKey, key) != null; break; case HYPER_LOG_LOG: found = jedisCommand.pfcount(key) > 0; break; case GEO: List<GeoCoordinate> geopos = jedisCommand.geopos(additionalKey, key); if (geopos == null || geopos.isEmpty()) { found = false; } else { // If any entry is NOT null, then we have a match. found = geopos.stream() .anyMatch(Objects::nonNull); } break; default: throw new IllegalArgumentException("Cannot process such data type: " + dataType); } if (found) { collector.emit(input, input.getValues()); } collector.ack(input); } catch (Exception e) { this.collector.reportError(e); this.collector.fail(input); } }
@Test void smokeTest_geopos_notMember() { // Define input key final String geoKey = "ThisIsMyGeoKey"; final String inputKey = "ThisIsMyKey"; // Create an input tuple final Map<String, Object> values = new HashMap<>(); values.put("key", inputKey); values.put("value", "ThisIsMyValue"); final Tuple tuple = new StubTuple(values); final JedisPoolConfig config = configBuilder.build(); final TestMapper mapper = new TestMapper(GEO, geoKey); final RedisFilterBolt bolt = new RedisFilterBolt(config, mapper); bolt.prepare(new HashMap<>(), topologyContext, new OutputCollector(outputCollector)); bolt.process(tuple); // Verify the bolt filtered the input tuple. verifyTupleFiltered(); }
public static String toSocketAddressString(InetSocketAddress addr) { String port = String.valueOf(addr.getPort()); final StringBuilder sb; if (addr.isUnresolved()) { String hostname = getHostname(addr); sb = newSocketAddressStringBuilder(hostname, port, !isValidIpV6Address(hostname)); } else { InetAddress address = addr.getAddress(); String hostString = toAddressString(address); sb = newSocketAddressStringBuilder(hostString, port, address instanceof Inet4Address); } return sb.append(':').append(port).toString(); }
@Test public void testIp6InetSocketAddressToString() throws UnknownHostException { for (Entry<byte[], String> testEntry : ipv6ToAddressStrings.entrySet()) { assertEquals('[' + testEntry.getValue() + "]:9999", toSocketAddressString(new InetSocketAddress(InetAddress.getByAddress(testEntry.getKey()), 9999))); } }
@Override public ImportResult importItem( UUID jobId, IdempotentImportExecutor idempotentImportExecutor, TokensAndUrlAuthData authData, PhotosContainerResource resource) throws Exception { // Ensure credential is populated getOrCreateCredential(authData); monitor.debug( () -> String .format("%s: Importing %s albums and %s photos before transmogrification", jobId, resource.getAlbums().size(), resource.getPhotos().size())); // Make the data onedrive compatible resource.transmogrify(transmogrificationConfig); monitor.debug( () -> String.format("%s: Importing %s albums and %s photos after transmogrification", jobId, resource.getAlbums().size(), resource.getPhotos().size())); for (PhotoAlbum album : resource.getAlbums()) { // Create a OneDrive folder and then save the id with the mapping data idempotentImportExecutor.executeAndSwallowIOExceptions( album.getId(), album.getName(), () -> createOneDriveFolder(album)); } for (PhotoModel photoModel : resource.getPhotos()) { idempotentImportExecutor.executeAndSwallowIOExceptions( photoModel.getIdempotentId(), photoModel.getTitle(), () -> importSinglePhoto(photoModel, jobId, idempotentImportExecutor)); } return ImportResult.OK; }
@Test public void testCleanAlbumNames() throws Exception { List<PhotoAlbum> albums = ImmutableList.of(new PhotoAlbum("id1", "album1.", "This is a fake albumb")); PhotosContainerResource data = new PhotosContainerResource(albums, null); Call call = mock(Call.class); doReturn(call).when(client).newCall(argThat((Request r) -> { String body = ""; try { final Buffer buffer = new Buffer(); r.body().writeTo(buffer); body = buffer.readUtf8(); } catch (IOException e) { return false; } return r.url().toString().equals("https://www.baseurl.com/v1.0/me/drive/special/photos/children") && body.contains("album1_"); })); Response response = mock(Response.class); ResponseBody body = mock(ResponseBody.class); when(body.bytes()).thenReturn( ResponseBody.create(MediaType.parse("application/json"), "{\"id\": \"id1\"}").bytes()); when(body.string()).thenReturn( ResponseBody.create(MediaType.parse("application/json"), "{\"id\": \"id1\"}").string()); when(response.code()).thenReturn(200); when(response.body()).thenReturn(body); when(call.execute()).thenReturn(response); ImportResult result = importer.importItem(uuid, executor, authData, data); verify(client, times(1)).newCall(any()); assertThat(result).isEqualTo(ImportResult.OK); }
public static <FnT extends DoFn<?, ?>> DoFnSignature signatureForDoFn(FnT fn) { return getSignature(fn.getClass()); }
@Test public void testSimpleTimerIdNamedDoFn() throws Exception { class DoFnForTestSimpleTimerIdNamedDoFn extends DoFn<KV<String, Integer>, Long> { @TimerId("foo") private final TimerSpec bizzle = TimerSpecs.timer(TimeDomain.EVENT_TIME); @ProcessElement public void foo(ProcessContext context) {} @OnTimer("foo") public void onFoo() {} } // Test classes at the bottom of the file DoFnSignature sig = DoFnSignatures.signatureForDoFn(new DoFnForTestSimpleTimerIdNamedDoFn()); final String timerDeclarationId = TimerDeclaration.PREFIX + "foo"; assertThat(sig.timerDeclarations().size(), equalTo(1)); DoFnSignature.TimerDeclaration decl = sig.timerDeclarations().get(timerDeclarationId); assertThat(decl.id(), equalTo(timerDeclarationId)); assertThat( decl.field(), equalTo(DoFnForTestSimpleTimerIdNamedDoFn.class.getDeclaredField("bizzle"))); }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testAllNulls() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("all_nulls")).eval(FILE); assertThat(shouldRead).as("Should skip: no non-null value in all null column").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThan("all_nulls", "a")).eval(FILE); assertThat(shouldRead).as("Should skip: lessThan on all null column").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThanOrEqual("all_nulls", "a")).eval(FILE); assertThat(shouldRead).as("Should skip: lessThanOrEqual on all null column").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("all_nulls", "a")).eval(FILE); assertThat(shouldRead).as("Should skip: greaterThan on all null column").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("all_nulls", "a")).eval(FILE); assertThat(shouldRead).as("Should skip: greaterThanOrEqual on all null column").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("all_nulls", "a")).eval(FILE); assertThat(shouldRead).as("Should skip: equal on all null column").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, startsWith("all_nulls", "a")).eval(FILE); assertThat(shouldRead).as("Should skip: startsWith on all null column").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notStartsWith("all_nulls", "a")).eval(FILE); assertThat(shouldRead).as("Should read: notStartsWith on all null column").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("some_nulls")).eval(FILE); assertThat(shouldRead) .as("Should read: column with some nulls contains a non-null value") .isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("no_nulls")).eval(FILE); assertThat(shouldRead).as("Should read: non-null column contains a non-null value").isTrue(); }
public static UnnamedFieldsMapping mapping( int fieldsCount ) { return new UnnamedFieldsMapping( fieldsCount ); }
@Test public void mapping() { UnnamedFieldsMapping mapping = UnnamedFieldsMapping.mapping( 2 ); assertEquals( 1, mapping.fieldMetaIndex( 1 ) ); }
public String format(Date then) { if (then == null) then = now(); Duration d = approximateDuration(then); return format(d); }
@Test public void testRightNow() throws Exception { PrettyTime t = new PrettyTime(); Assert.assertEquals("moments from now", t.format(new Date())); }
@Override public void deleteTenantPackage(Long id) { // 校验存在 validateTenantPackageExists(id); // 校验正在使用 validateTenantUsed(id); // 删除 tenantPackageMapper.deleteById(id); }
@Test public void testDeleteTenantPackage_success() { // mock 数据 TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class); tenantPackageMapper.insert(dbTenantPackage);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbTenantPackage.getId(); // mock 租户未使用该套餐 when(tenantService.getTenantCountByPackageId(eq(id))).thenReturn(0L); // 调用 tenantPackageService.deleteTenantPackage(id); // 校验数据不存在了 assertNull(tenantPackageMapper.selectById(id)); }
public static StatementExecutorResponse execute( final ConfiguredStatement<ListProperties> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final KsqlConfigResolver resolver = new KsqlConfigResolver(); final Map<String, String> engineProperties = statement .getSessionConfig() .getConfig(false) .getAllConfigPropsWithSecretsObfuscated(); final List<Property> mergedProperties = mergedProperties(statement); final List<String> overwritten = mergedProperties .stream() .filter(property -> !Objects.equals( engineProperties.get(property.getName()), property.getValue())) .map(Property::getName) .collect(Collectors.toList()); final List<String> defaultProps = mergedProperties.stream() .filter(property -> resolver.resolve(property.getName(), false) .map(resolved -> resolved.isDefaultValue(property.getValue())) .orElse(false)) .map(Property::getName) .collect(Collectors.toList()); return StatementExecutorResponse.handled(Optional.of(new PropertiesList( statement.getMaskedStatementText(), mergedProperties, overwritten, defaultProps))); }
@Test public void shouldListProperties() { // When: final PropertiesList properties = (PropertiesList) CustomExecutors.LIST_PROPERTIES.execute( engine.configure("LIST PROPERTIES;"), mock(SessionProperties.class), engine.getEngine(), engine.getServiceContext() ).getEntity().orElseThrow(IllegalStateException::new); // Then: assertThat( toStringMap(properties), equalTo(engine.getKsqlConfig().getAllConfigPropsWithSecretsObfuscated())); assertThat(properties.getOverwrittenProperties(), is(empty())); }
@Override public Boolean authenticate(final Host bookmark, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = bookmark.getCredentials(); if(credentials.isPublicKeyAuthentication()) { if(log.isDebugEnabled()) { log.debug(String.format("Login using public key authentication with credentials %s", credentials)); } final Local privKey = credentials.getIdentity(); final Local pubKey; final FileKeyProvider provider; final AtomicBoolean canceled = new AtomicBoolean(); try { final KeyFormat format = KeyProviderUtil.detectKeyFileFormat( new InputStreamReader(privKey.getInputStream(), StandardCharsets.UTF_8), true); if(log.isInfoEnabled()) { log.info(String.format("Reading private key %s with key format %s", privKey, format)); } switch(format) { case PKCS8: provider = new PKCS8KeyFile.Factory().create(); pubKey = null; break; case OpenSSH: { provider = new OpenSSHKeyFile.Factory().create(); final File f = OpenSSHKeyFileUtil.getPublicKeyFile(new File(privKey.getAbsolute())); if(f != null) { pubKey = LocalFactory.get(f.getAbsolutePath()); } else { pubKey = null; } break; } case OpenSSHv1: { provider = new OpenSSHKeyV1KeyFile.Factory().create(); final File f = OpenSSHKeyFileUtil.getPublicKeyFile(new File(privKey.getAbsolute())); if(f != null) { pubKey = LocalFactory.get(f.getAbsolutePath()); } else { pubKey = null; } break; } case PuTTY: provider = new PuTTYKeyFile.Factory().create(); pubKey = null; break; default: throw new InteroperabilityException(String.format("Unknown key format for file %s", privKey.getName())); } provider.init(new InputStreamReader(privKey.getInputStream(), StandardCharsets.UTF_8), pubKey != null ? new InputStreamReader(pubKey.getInputStream(), StandardCharsets.UTF_8) : null, new PasswordFinder() { @Override public char[] reqPassword(Resource<?> resource) { if(StringUtils.isEmpty(credentials.getIdentityPassphrase())) { try { // Use password prompt final Credentials input = prompt.prompt(bookmark, LocaleFactory.localizedString("Private key password protected", "Credentials"), String.format("%s (%s)", LocaleFactory.localizedString("Enter the passphrase for the private key file", "Credentials"), privKey.getAbbreviatedPath()), new LoginOptions() .icon(bookmark.getProtocol().disk()) .user(false).password(true) ); credentials.setSaved(input.isSaved()); credentials.setIdentityPassphrase(input.getPassword()); } catch(LoginCanceledException e) { canceled.set(true); // Return null if user cancels return StringUtils.EMPTY.toCharArray(); } } return credentials.getIdentityPassphrase().toCharArray(); } @Override public boolean shouldRetry(Resource<?> resource) { return false; } }); client.auth(credentials.getUsername(), new AuthPublickey(provider)); return client.isAuthenticated(); } catch(IOException e) { if(canceled.get()) { throw new LoginCanceledException(); } throw new SFTPExceptionMappingService().map(e); } } return false; }
@Test(expected = LoginCanceledException.class) public void testAuthenticatePuTTYKeyWithWrongPassword() throws Exception { final Local key = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); try { new DefaultLocalTouchFeature().touch(key); IOUtils.copy(new StringReader("PuTTY-User-Key-File-2: ssh-rsa\n" + "Encryption: aes256-cbc\n" + "Comment: rsa-key-20121215\n" + "Public-Lines: 4\n" + "AAAAB3NzaC1yc2EAAAABJQAAAIB7KdUyuvGb2ne9G9YDAjaYvX/Mq6Q6ppGjbEQo\n" + "bac66VUazxVpZsnAWikcdYAU7odkyt3jg7Nn1NgQS1a5mpXk/j77Ss5C9W4rymrU\n" + "p32cmbgB/KIV80DnOyZyOtDWDPM0M0RRXqQvAO6TsnmsNSnBa8puMLHqCtrhvvJD\n" + "KU+XEw==\n" + "Private-Lines: 8\n" + "4YMkPgLQJ9hOI1L1HsdOUnYi57tDy5h9DoPTHD55fhEYsn53h4WaHpxuZH8dTpbC\n" + "5TcV3vYTfhh+aFBY0p/FI8L1hKfABLRxhkqkkc7xMmOGlA6HejAc8oTA3VArgSeG\n" + "tRBuQRmBAC1Edtek/U+s8HzI2whzTw8tZoUUnT6844oc4tyCpWJUy5T8l+O3/03s\n" + "SceJ98DN2k+L358VY8AXgPxP6NJvHvIlwmIo+PtcMWsyZegfSHEnoXN2GN4N0ul6\n" + "298RzA9R+I3GSKKxsxUvWfOVibLq0dDM3+CTwcbmo4qvyM2xrRRLhObB2rVW07gL\n" + "7+FZpHxf44QoQQ8mVkDJNaT1faF+h/8tCp2j1Cj5yEPHMOHGTVMyaz7gqhoMw5RX\n" + "sfSP4ZaCGinLbouPrZN9Ue3ytwdEpmqU2MelmcZdcH6kWbLCqpWBswsxPfuhFdNt\n" + "oYhmT2+0DKBuBVCAM4qRdA==\n" + "Private-MAC: 40ccc8b9a7291ec64e5be0c99badbc8a012bf220\n" + "tp.key.openssh.rsa=-----BEGIN RSA PRIVATE KEY-----\n" + "Proc-Type: 4,ENCRYPTED\n" + "DEK-Info: AES-128-CBC,356A353DAFDC2E16E8BD0EE23A6201A2\n" + "\n" + "pvpC2RA1TteVsp584fSZ6RYFz97CF8tJXfyP4/8UdzpIVM8VXXlk4g3AnyI9/JD0\n" + "4/0dzNqGvg/84xUpDpdJp/w8fWJ8IzE7RXf1xDfg0xavr2iegp2aZBd48KVKImwU\n" + "yJlzy27VmVvIvWID2zPrNhOWzr4AdnP/NprLfhHPQDHV5gRcS92s6vFktZOPzNtQ\n" + "3O+j3O5MAyf/MpgPH4BTubOjcuZuZg3AJCjEPxLlwrRfxqXkRXXMB7XxDFdK7LQ/\n" + "fQnJzikcrYXFio8+DJhBg7OyOnlAmC0I85YomZJ+8C3A3bye9PakIxHJn/qNIQew\n" + "BujHxPVmnezjFzStr/SyfLE2a+RZu84Jm6u9+DuJYF5/Vo6yv6+zubsVaflXp5fS\n" + "SAogS0quWfoqoiUfhgCuOZlqv/aeo/BEetUEdHVi4KTdeSpcfrJa4CphXd8TwEPN\n" + "L4NFSc+8CeGayO45o5lXeQiKa4UH2oPEBaANHK4SQPKJ9NdyTlFN/O1c77kCvG4W\n" + "4thchQkUvwqwTYXwx9jNW3x7FBytJwmhi9DpzHMa/LFRrnedarFPDgep4E40NjRB\n" + "fy877Wd+KJTlrHjyQR13wgtlGZdcTO5QzLseztxqdaD14Dn7jPF/YJBDaj65Jw1N\n" + "+G6EB0zN70WL7Y3+2HnSLNZWEnLhletzfwbjVqr+Vg4XB2HQKH52gCyh+ITPEjqR\n" + "wU00oMJvGf518U+6awxzb3zwnoxMrFwcnaLqwsZNQ5CjmYVE/yERSK47OMYCNQl4\n" + "0Xxa9mWYBqWlfdMurkGCD6OuUWMx5t4mcpSg30UEQNBEVfrVk6t480iztgVJprEO\n" + "vhepM2nw326PH5VYAoXH+OmEezjI1AmHKqpbB/y9UQv6ZjEyUT70Tbs9JBtU4dze\n" + "Yha1Dc0+eYkUvZ5AjENQ/Bvfdyit4xxbDrU6TbFmyHpHwMPCNkcgO0u/Mgtc5Hmc\n" + "Gi6RaxUaxSZ2IlpJDNkqAzmv1Xr+M9TxbF2gZY+TJHUt/mc1rFpTl2qZ/tK/Ei1U\n" + "8TBVJHcNNwHiHtm/NpREYTmzu0s8X602JgXrkBxkM40NGVRqd08jaULhxdWcTmzW\n" + "pweib9WhIrvjTNZTAjjGku625qLihDt5jtbJxspM2dLGfcG4zgYgRr4u9HA+60oD\n" + "l1oNjz8IfBuJLJ3rwENI6oX9FW7huKc/XV1hP72/l2VhfuxtTufdjbaiwwiwObRA\n" + "O+zwB8NPWRG6UYj9IAWjASoPXOoyhk/f1fzvTH7xeO35QjkCICln095T+hNMZRiC\n" + "VpCCKsQGY2O30D9OJlnTpylBAq/Q/HXNL8Jj2f/rZRqDGzidj2No5mun/pZ3uwzr\n" + "CRrEpvfFuf8g1EnPQXmdlYRi/nmtBKsiQr0GWVzIOzNRi/tgsV0tyUgBT9QL4JKt\n" + "/z54PrlBK74I9SWcBv9EwCAfL9YdZ7mW0iWrmUUmcpuJcRUXnKvTynTpq/l6GE8+\n" + "Ld5saHMVWt7GlEbM3Fjqfvj7/dbtcy3TTmy0Vx4GbKzsaPytAb2jgLGn8bQfjQzp\n" + "hnPC1l+r7ebV7tBR216+6PmsXQu7atqgbGjb7Dh+GP8Ak73F8v6LPtyz+tAOYwpB\n" + "-----END RSA PRIVATE KEY-----\n"), key.getOutputStream(false), StandardCharsets.UTF_8); // Reconnect session.disconnect(); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); final AtomicBoolean p = new AtomicBoolean(); session.getHost().getCredentials().setIdentity(key); assertFalse(new SFTPPublicKeyAuthentication(session.getClient()).authenticate(session.getHost(), new DisabledLoginCallback() { @Override public Credentials prompt(final Host bookmark, String username, String title, String reason, LoginOptions options) throws LoginCanceledException { p.set(true); throw new LoginCanceledException(); } }, new DisabledCancelCallback())); assertTrue(p.get()); } finally { key.delete(); } }
public static byte[] jsonToAvro(String json, String avroSchema) throws AvroTypeException, IOException { return jsonToAvro(json, new Schema.Parser().parse(avroSchema)); }
@Test void testJsonToAvro() { String jsonText = "{\"name\":\"Laurent Broudoux\", \"email\":\"laurent@microcks.io\", \"age\":41}"; try { // Load schema from file. Schema schema = new Schema.Parser() .parse(new File("target/test-classes/io/github/microcks/util/user-signedup-bad.avsc")); // Convert back and forth to and from JSON. byte[] avroBinary = AvroUtil.jsonToAvro(jsonText, schema); System.err.println("binaryEncoding: \n" + new String(avroBinary, "UTF-8")); String jsonRepresentation = AvroUtil.avroToJson(avroBinary, schema); System.err.println("\njsonRepresentation: \n" + jsonRepresentation); assertTrue(jsonRepresentation.contains("\"Laurent Broudoux\"")); assertTrue(jsonRepresentation.contains("\"laurent@microcks.io\"")); assertTrue(jsonRepresentation.contains("41")); // Deserialize from binary encoding representation. DatumReader<GenericRecord> datumReader = new GenericDatumReader<GenericRecord>(schema); GenericRecord user = null; Decoder decoder = DecoderFactory.get().binaryDecoder(avroBinary, null); try { while (true) { user = datumReader.read(user, decoder); System.err.println("\nUser from binary representation: \n" + user.toString()); } } catch (EOFException eofException) { // Nothing to do here, just exit the while loop. } assertEquals("Laurent Broudoux", user.get("name").toString()); assertEquals("laurent@microcks.io", user.get("email").toString()); assertEquals(Integer.valueOf(41), (Integer) user.get("age")); } catch (Exception e) { fail("Exception should not be thrown"); } }
public static Optional<Object> getAdjacentValue(Type type, Object value, boolean isPrevious) { if (!type.isOrderable()) { throw new IllegalStateException("Type is not orderable: " + type); } requireNonNull(value, "value is null"); if (type.equals(BIGINT) || type instanceof TimestampType) { return getBigintAdjacentValue(value, isPrevious); } if (type.equals(INTEGER) || type.equals(DATE)) { return getIntegerAdjacentValue(value, isPrevious); } if (type.equals(SMALLINT)) { return getSmallIntAdjacentValue(value, isPrevious); } if (type.equals(TINYINT)) { return getTinyIntAdjacentValue(value, isPrevious); } if (type.equals(DOUBLE)) { return getDoubleAdjacentValue(value, isPrevious); } if (type.equals(REAL)) { return getRealAdjacentValue(value, isPrevious); } return Optional.empty(); }
@Test public void testPreviousValueForSmallInt() { long minValue = Short.MIN_VALUE; long maxValue = Short.MAX_VALUE; assertThat(getAdjacentValue(SMALLINT, minValue, true)) .isEqualTo(Optional.empty()); assertThat(getAdjacentValue(SMALLINT, minValue + 1, true)) .isEqualTo(Optional.of(minValue)); assertThat(getAdjacentValue(SMALLINT, 1234L, true)) .isEqualTo(Optional.of(1233L)); assertThat(getAdjacentValue(SMALLINT, maxValue - 1, true)) .isEqualTo(Optional.of(maxValue - 2)); assertThat(getAdjacentValue(SMALLINT, maxValue, true)) .isEqualTo(Optional.of(maxValue - 1)); }
@Override public Object read(final MySQLPacketPayload payload, final boolean unsigned) throws SQLException { int length = payload.readInt1(); payload.readInt1(); payload.readInt4(); switch (length) { case 0: return new Timestamp(0L); case 8: return getTimestamp(payload); case 12: Timestamp result = getTimestamp(payload); result.setNanos(payload.readInt4()); return result; default: throw new SQLFeatureNotSupportedException(String.format("Wrong length `%d` of MYSQL_TYPE_DATE", length)); } }
@Test void assertReadWithTwelveBytes() throws SQLException { when(payload.readInt1()).thenReturn(12, 0, 10, 59, 0); Calendar actual = Calendar.getInstance(); actual.setTimeInMillis(((Timestamp) new MySQLTimeBinaryProtocolValue().read(payload, false)).getTime()); assertThat(actual.get(Calendar.HOUR_OF_DAY), is(10)); assertThat(actual.get(Calendar.MINUTE), is(59)); assertThat(actual.get(Calendar.SECOND), is(0)); }
@Override public void serialize(Asn1OutputStream out, Class<? extends ASN1Primitive> type, ASN1Primitive value, Asn1ObjectMapper mapper) throws IOException { Asn1Utils.writeRawValue(value.getEncoded(), out); }
@Test public void shouldSerialize() { assertArrayEquals( new byte[] { 0x2a, 0x03, 0x04 }, serialize(new BouncyCastlePrimitiveConverter(), ASN1ObjectIdentifier.class, new ASN1ObjectIdentifier("1.2.3.4")) ); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JType baseType, Schema schema) { Class<?> type = getType(node.asText()); if (type != null) { JType jtype = baseType.owner()._ref(type); if (ruleFactory.getGenerationConfig().isUsePrimitives()) { jtype = jtype.unboxify(); } return jtype; } else { return baseType; } }
@Test public void applyGeneratesTypeFromFormatValue() { TextNode formatNode = TextNode.valueOf(formatValue); JType result = rule.apply("fooBar", formatNode, null, new JCodeModel().ref(String.class), null); assertThat(result.fullName(), equalTo(expectedType.getName())); }
@Override public FileMergingCheckpointStateOutputStream createCheckpointStateOutputStream( CheckpointedStateScope scope) throws IOException { return fileMergingSnapshotManager.createCheckpointStateOutputStream( subtaskKey, checkpointId, scope); }
@Test public void testCheckpointStreamClosedExceptionally() throws Exception { try (FileMergingSnapshotManager snapshotManager = createFileMergingSnapshotManager()) { Path filePath1 = null; try (FileMergingCheckpointStateOutputStream stream1 = snapshotManager.createCheckpointStateOutputStream(SUBTASK_KEY, 1, EXCLUSIVE)) { stream1.flushToFile(); filePath1 = stream1.getFilePath(); assertPathNotNullAndCheckExistence(filePath1, true); throw new IOException(); } catch (IOException ignored) { } assertPathNotNullAndCheckExistence(filePath1, false); } }
@Deprecated @Restricted(DoNotUse.class) public static String resolve(ConfigurationContext context, String toInterpolate) { return context.getSecretSourceResolver().resolve(toInterpolate); }
@Test public void resolve_singleEntry() { environment.set("FOO", "hello"); assertThat(resolve("${FOO}"), equalTo("hello")); }
@Override public DdlCommand create( final String sqlExpression, final DdlStatement ddlStatement, final SessionConfig config ) { return FACTORIES .getOrDefault(ddlStatement.getClass(), (statement, cf, ci) -> { throw new KsqlException( "Unable to find ddl command factory for statement:" + statement.getClass() + " valid statements:" + FACTORIES.keySet() ); }) .handle( this, new CallInfo(sqlExpression, config), ddlStatement); }
@Test public void shouldCreateDropType() { // Given: final DropType dropType = new DropType(Optional.empty(), SOME_TYPE_NAME, false); // When: final DropTypeCommand cmd = (DropTypeCommand) commandFactories.create( "sqlExpression", dropType, SessionConfig.of(ksqlConfig, emptyMap()) ); // Then: assertThat(cmd, is(dropTypeCommand)); verify(dropTypeFactory).create(dropType); }
public LoggingConfiguration setVerbose(boolean verbose) { return setRootLevel(verbose ? LEVEL_ROOT_VERBOSE : LEVEL_ROOT_DEFAULT); }
@Test public void testSetVerbose() { assertThat(new LoggingConfiguration(null).setVerbose(true) .getSubstitutionVariable(LoggingConfiguration.PROPERTY_ROOT_LOGGER_LEVEL)).isEqualTo(LoggingConfiguration.LEVEL_ROOT_VERBOSE); assertThat(new LoggingConfiguration(null).setVerbose(false) .getSubstitutionVariable(LoggingConfiguration.PROPERTY_ROOT_LOGGER_LEVEL)).isEqualTo(LoggingConfiguration.LEVEL_ROOT_DEFAULT); assertThat(new LoggingConfiguration(null).setRootLevel("ERROR") .getSubstitutionVariable(LoggingConfiguration.PROPERTY_ROOT_LOGGER_LEVEL)).isEqualTo("ERROR"); }
@VisibleForTesting List<Path> getShipArchives() { return shipArchives; }
@Test void testShipArchives() throws IOException { final File homeFolder = Files.createTempDirectory(temporaryFolder, UUID.randomUUID().toString()).toFile(); File dir1 = new File(homeFolder.getPath(), "dir1"); File file1 = new File(homeFolder.getPath(), "file1"); File archive1 = new File(homeFolder.getPath(), "archive1.zip"); File archive2 = new File(homeFolder.getPath(), "archive2.zip"); assertThat(dir1.mkdirs()).isTrue(); assertThat(file1.createNewFile()).isTrue(); assertThat(archive1.createNewFile()).isTrue(); assertThat(archive2.createNewFile()).isTrue(); Configuration flinkConfiguration = new Configuration(); flinkConfiguration.set( YarnConfigOptions.SHIP_ARCHIVES, Arrays.asList(dir1.getAbsolutePath(), archive1.getAbsolutePath())); assertThrows( "Directories or non-archive files are included.", IllegalArgumentException.class, () -> createYarnClusterDescriptor(flinkConfiguration)); flinkConfiguration.set( YarnConfigOptions.SHIP_ARCHIVES, Arrays.asList(file1.getAbsolutePath(), archive1.getAbsolutePath())); assertThrows( "Directories or non-archive files are included.", IllegalArgumentException.class, () -> createYarnClusterDescriptor(flinkConfiguration)); flinkConfiguration.set( YarnConfigOptions.SHIP_ARCHIVES, Arrays.asList(archive1.getAbsolutePath(), archive2.getAbsolutePath())); createYarnClusterDescriptor(flinkConfiguration); String archive3 = "hdfs:///flink/archive3.zip"; final org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration(); hdConf.set( MiniDFSCluster.HDFS_MINIDFS_BASEDIR, temporaryFolder.toAbsolutePath().toString()); try (final MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(hdConf).build()) { final org.apache.hadoop.fs.Path hdfsRootPath = new org.apache.hadoop.fs.Path(hdfsCluster.getURI()); hdfsCluster.getFileSystem().createNewFile(new org.apache.hadoop.fs.Path(archive3)); flinkConfiguration.set( YarnConfigOptions.SHIP_ARCHIVES, Arrays.asList(archive1.getAbsolutePath(), archive3)); final YarnConfiguration yarnConfig = new YarnConfiguration(); yarnConfig.set( CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, hdfsRootPath.toString()); YarnClusterDescriptor descriptor = createYarnClusterDescriptor(flinkConfiguration, yarnConfig); assertThat(descriptor.getShipArchives()) .containsExactly(getPathFromLocalFile(archive1), new Path(archive3)); } }
@Override public void close() { close(Duration.ofMillis(Long.MAX_VALUE)); }
@Test public void closeShouldBeIdempotent() { Properties producerProps = new Properties(); producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); Producer<byte[], byte[]> producer = new KafkaProducer<>(producerProps, new ByteArraySerializer(), new ByteArraySerializer()); producer.close(); producer.close(); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public NodeInfo get() { return getNodeInfo(); }
@Test public void testNodeSlash() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("node/") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); verifyNodeInfo(json); }
public Optional<Schema> getTableSchema(String db, String table) { try { org.apache.hadoop.hive.metastore.api.Table metastoreTable = metastore.getTable(db, table); List<FieldSchema> fields = Lists.newArrayList(metastoreTable.getSd().getCols()); fields.addAll(metastoreTable.getPartitionKeys()); Schema schema = SchemaUtils.toBeamSchema(fields); return Optional.of(schema); } catch (NoSuchObjectException e) { return Optional.absent(); } catch (Exception e) { throw new RuntimeException(e); } }
@Test public void testGetTableSchema() { HCatalogBeamSchema hcatSchema = HCatalogBeamSchema.create(service.getHiveConfAsMap()); Schema schema = hcatSchema.getTableSchema(TEST_DATABASE, TEST_TABLE).get(); Schema expectedSchema = Schema.builder() .addNullableField("mycol1", Schema.FieldType.STRING) .addNullableField("mycol2", Schema.FieldType.INT32) .build(); assertEquals(expectedSchema, schema); }
public Schema mergeTables( Map<FeatureOption, MergingStrategy> mergingStrategies, Schema sourceSchema, List<SqlNode> derivedColumns, List<SqlWatermark> derivedWatermarkSpecs, SqlTableConstraint derivedPrimaryKey) { SchemaBuilder schemaBuilder = new SchemaBuilder( mergingStrategies, sourceSchema, (FlinkTypeFactory) validator.getTypeFactory(), dataTypeFactory, validator, escapeExpression); schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns); schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs); schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey); return schemaBuilder.build(); }
@Test void mergeIncludingMetadataColumnsFailsOnDuplicate() { Schema sourceSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .columnByMetadata("two", DataTypes.INT()) .build(); List<SqlNode> derivedColumns = Collections.singletonList(metadataColumn("two", DataTypes.INT(), false)); assertThatThrownBy( () -> util.mergeTables( getDefaultMergingStrategies(), sourceSchema, derivedColumns, Collections.emptyList(), null)) .isInstanceOf(ValidationException.class) .hasMessage( "A metadata column named 'two' already exists in the base table. You " + "might want to specify EXCLUDING METADATA or " + "OVERWRITING METADATA."); }
public FallbackProperties getFallback() { return fallback; }
@Test public void testFallbackPropertiesInitialization() { Assert.assertNull(properties.getFallback()); }
boolean valid(int nodeId, MetadataImage image) { TopicImage topicImage = image.topics().getTopic(topicIdPartition.topicId()); if (topicImage == null) { return false; // The topic has been deleted. } PartitionRegistration partition = topicImage.partitions().get(topicIdPartition.partitionId()); if (partition == null) { return false; // The partition no longer exists. } // Check if this broker is still a replica. return Replicas.contains(partition.replicas, nodeId); }
@Test public void testAssignmentForNonExistentPartitionIsNotValid() { assertFalse(new Assignment( new TopicIdPartition(Uuid.fromString("rTudty6ITOCcO_ldVyzZYg"), 2), Uuid.fromString("rzRT8XZaSbKsP6j238zogg"), 0, NoOpRunnable.INSTANCE).valid(0, TEST_IMAGE)); }
public ConsumerGroup consumerGroup( String groupId, long committedOffset ) throws GroupIdNotFoundException { Group group = group(groupId, committedOffset); if (group.type() == CONSUMER) { return (ConsumerGroup) group; } else { // We don't support upgrading/downgrading between protocols at the moment so // we throw an exception if a group exists with the wrong type. throw new GroupIdNotFoundException(String.format("Group %s is not a consumer group.", groupId)); } }
@Test public void testJoiningConsumerGroupFailingToPersistRecords() throws Exception { String groupId = "group-id"; Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; String memberId = Uuid.randomUuid().toString(); String newMemberId = Uuid.randomUuid().toString(); MockPartitionAssignor assignor = new MockPartitionAssignor("range"); assignor.prepareGroupAssignment(new GroupAssignment( new HashMap<String, MemberAssignment>() { { put(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0) ))); put(newMemberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 1) ))); } } )); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 2) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withSubscriptionMetadata(new HashMap<String, TopicMetadata>() { { put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, mkMapOfPartitionRacks(2))); } }) .withMember(new ConsumerGroupMember.Builder(memberId) .setState(MemberState.STABLE) .setMemberEpoch(10) .setPreviousMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1))) .build()) .withAssignment(memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1))) .withAssignmentEpoch(10)) .build(); context.commit(); JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() .withGroupId(groupId) .withMemberId(newMemberId) .withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol( Collections.singletonList(fooTopicName), Collections.emptyList())) .build(); GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(request); // Simulate a failed write to the log. joinResult.appendFuture.completeExceptionally(new NotLeaderOrFollowerException()); context.rollback(); context.assertNoSessionTimeout(groupId, newMemberId); context.assertNoSyncTimeout(groupId, newMemberId); assertFalse(context.groupMetadataManager.consumerGroup(groupId).hasMember(newMemberId)); }
public static GenericRecord toGenericRecord(Row row) { return toGenericRecord(row, null); }
@Test public void testUnionFieldInBeamSchema() { OneOfType oneOfType = OneOfType.create(Field.of("int", FieldType.INT32), Field.of("string", FieldType.STRING)); Schema beamSchema = Schema.builder().addLogicalTypeField("union", oneOfType).build(); List<org.apache.avro.Schema.Field> fields = Lists.newArrayList(); List<org.apache.avro.Schema> unionFields = Lists.newArrayList(); unionFields.add(org.apache.avro.Schema.create(Type.INT)); unionFields.add(org.apache.avro.Schema.create(Type.STRING)); fields.add( new org.apache.avro.Schema.Field( "union", org.apache.avro.Schema.createUnion(unionFields), "", (Object) null)); org.apache.avro.Schema avroSchema = org.apache.avro.Schema.createRecord("topLevelRecord", null, null, false, fields); GenericRecord expectedGenericRecord = new GenericRecordBuilder(avroSchema).set("union", 23423).build(); Row row = Row.withSchema(beamSchema).addValue(oneOfType.createValue(0, 23423)).build(); assertEquals(expectedGenericRecord, AvroUtils.toGenericRecord(row, avroSchema)); }
public static synchronized void v(final String tag, String text, Object... args) { if (msLogger.supportsV()) { String msg = getFormattedString(text, args); msLogger.v(tag, msg); addLog(LVL_V, tag, msg); } }
@Test public void testV() throws Exception { Logger.v("mTag", "Text with %d digits", 0); Mockito.verify(mMockLog).v("mTag", "Text with 0 digits"); Logger.v("mTag", "Text with no digits"); Mockito.verify(mMockLog).v("mTag", "Text with no digits"); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { authorizationService.validate(); }
@Test(expected = LoginCanceledException.class) public void testInvalidProjectId() throws Exception { session.getHost().setCredentials( new Credentials("duck-1432", "") ); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); }
void handleCommitBatch(Batch<ApiMessageAndVersion> batch) { this.lastCommittedOffset = batch.lastOffset(); this.lastCommittedEpoch = batch.epoch(); maybeAdvanceLastStableOffset(); metrics.setLastCommittedRecordOffset(batch.lastOffset()); if (!active()) { // On standby controllers, the last applied record offset is equals to the last // committed offset. metrics.setLastAppliedRecordOffset(batch.lastOffset()); metrics.setLastAppliedRecordTimestamp(batch.appendTimestamp()); } }
@Test public void testHandleCommitBatch() { OffsetControlManager offsetControl = new OffsetControlManager.Builder().build(); offsetControl.handleCommitBatch(newFakeBatch(1000L, 200, 3000L)); assertEquals(Collections.singletonList(1000L), offsetControl.snapshotRegistry().epochsList()); assertEquals(1000L, offsetControl.lastCommittedOffset()); assertEquals(200, offsetControl.lastCommittedEpoch()); assertEquals(1000L, offsetControl.lastStableOffset()); assertEquals(-1L, offsetControl.transactionStartOffset()); assertEquals(-1L, offsetControl.nextWriteOffset()); assertFalse(offsetControl.active()); assertFalse(offsetControl.metrics().active()); assertEquals(1000L, offsetControl.metrics().lastAppliedRecordOffset()); assertEquals(1000L, offsetControl.metrics().lastCommittedRecordOffset()); assertEquals(3000L, offsetControl.metrics().lastAppliedRecordTimestamp()); }
@Bean("ScmChangedFiles") public ScmChangedFiles provide(ScmConfiguration scmConfiguration, BranchConfiguration branchConfiguration, DefaultInputProject project) { Path rootBaseDir = project.getBaseDir(); Set<ChangedFile> changedFiles = loadChangedFilesIfNeeded(scmConfiguration, branchConfiguration, rootBaseDir); if (changedFiles != null) { validatePaths(getAbsoluteFilePaths(changedFiles)); } return new ScmChangedFiles(changedFiles); }
@Test public void testReturnChangedFiles() { when(branchConfiguration.targetBranchName()).thenReturn("target"); when(branchConfiguration.isPullRequest()).thenReturn(true); when(scmConfiguration.provider()).thenReturn(scmProvider); when(scmProvider.branchChangedFiles("target", rootBaseDir)).thenReturn(Collections.singleton(Paths.get("changedFile").toAbsolutePath())); ScmChangedFiles scmChangedFiles = provider.provide(scmConfiguration, branchConfiguration, project); Path filePath = Paths.get("changedFile").toAbsolutePath(); ChangedFile changedFile = ChangedFile.of(filePath); assertThat(scmChangedFiles.get()).containsOnly(changedFile); verify(scmProvider).branchChangedFiles("target", rootBaseDir); }
public static void handleUncaughtException( CompletableFuture<?> completableFuture, Thread.UncaughtExceptionHandler uncaughtExceptionHandler) { handleUncaughtException( completableFuture, uncaughtExceptionHandler, FatalExitExceptionHandler.INSTANCE); }
@Test void testHandleUncaughtExceptionWithNormalCompletion() { final CompletableFuture<String> future = new CompletableFuture<>(); final TestingUncaughtExceptionHandler uncaughtExceptionHandler = new TestingUncaughtExceptionHandler(); FutureUtils.handleUncaughtException(future, uncaughtExceptionHandler); future.complete("barfoo"); assertThat(uncaughtExceptionHandler.hasBeenCalled()).isFalse(); }
@Override public void uncaughtException(Thread t, Throwable e) { if(ShutdownHookManager.get().isShutdownInProgress()) { LOG.error("Thread " + t + " threw an Throwable, but we are shutting " + "down, so ignoring this", e); } else if(e instanceof Error) { try { LOG.error(FATAL, "Thread " + t + " threw an Error. Shutting down now...", e); } catch (Throwable err) { //We don't want to not exit because of an issue with logging } if(e instanceof OutOfMemoryError) { //After catching an OOM java says it is undefined behavior, so don't //even try to clean up or we can get stuck on shutdown. try { System.err.println("Halting due to Out Of Memory Error..."); } catch (Throwable err) { //Again we done want to exit because of logging issues. } ExitUtil.halt(-1); } else { ExitUtil.terminate(-1); } } else { LOG.error("Thread " + t + " threw an Exception.", e); } }
@Test void testUncaughtExceptionHandlerWithError() throws InterruptedException { ExitUtil.disableSystemExit(); final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler); final java.lang.Error error = new java.lang.Error("test-error"); final Thread errorThread = new Thread(new Runnable() { @Override public void run() { throw error; } }); errorThread.setUncaughtExceptionHandler(spyErrorHandler); assertSame(spyErrorHandler, errorThread.getUncaughtExceptionHandler()); errorThread.start(); errorThread.join(); verify(spyErrorHandler).uncaughtException(errorThread, error); }
@VisibleForTesting void validateParentDept(Long id, Long parentId) { if (parentId == null || DeptDO.PARENT_ID_ROOT.equals(parentId)) { return; } // 1. 不能设置自己为父部门 if (Objects.equals(id, parentId)) { throw exception(DEPT_PARENT_ERROR); } // 2. 父部门不存在 DeptDO parentDept = deptMapper.selectById(parentId); if (parentDept == null) { throw exception(DEPT_PARENT_NOT_EXITS); } // 3. 递归校验父部门,如果父部门是自己的子部门,则报错,避免形成环路 if (id == null) { // id 为空,说明新增,不需要考虑环路 return; } for (int i = 0; i < Short.MAX_VALUE; i++) { // 3.1 校验环路 parentId = parentDept.getParentId(); if (Objects.equals(id, parentId)) { throw exception(DEPT_PARENT_IS_CHILD); } // 3.2 继续递归下一级父部门 if (parentId == null || DeptDO.PARENT_ID_ROOT.equals(parentId)) { break; } parentDept = deptMapper.selectById(parentId); if (parentDept == null) { break; } } }
@Test public void testValidateParentDept_parentIsChild() { // mock 数据(父节点) DeptDO parentDept = randomPojo(DeptDO.class); deptMapper.insert(parentDept); // mock 数据(子节点) DeptDO childDept = randomPojo(DeptDO.class, o -> { o.setParentId(parentDept.getId()); }); deptMapper.insert(childDept); // 准备参数 Long id = parentDept.getId(); Long parentId = childDept.getId(); // 调用, 并断言异常 assertServiceException(() -> deptService.validateParentDept(id, parentId), DEPT_PARENT_IS_CHILD); }
@Override public Collection<IExternalResourceInfo> getResourcesFromRow( Rest step, RowMetaInterface rowMeta, Object[] row ) { Set<IExternalResourceInfo> resources = new HashSet<>(); RestMeta meta = (RestMeta) step.getStepMetaInterface(); if ( meta == null ) { meta = (RestMeta) step.getStepMeta().getStepMetaInterface(); } if ( meta != null ) { String url; String method; String body; try { if ( meta.isUrlInField() ) { url = rowMeta.getString( row, meta.getUrlField(), null ); } else { url = meta.getUrl(); } if ( StringUtils.isNotEmpty( url ) ) { WebServiceResourceInfo resourceInfo = createResourceInfo( url, meta ); if ( ArrayUtils.isNotEmpty( meta.getHeaderField() ) ) { for ( int i = 0; i < meta.getHeaderField().length; i++ ) { String field = meta.getHeaderField()[ i ]; String label = meta.getHeaderName()[ i ]; resourceInfo.addHeader( label, rowMeta.getString( row, field, null ) ); } } if ( ArrayUtils.isNotEmpty( meta.getParameterField() ) ) { for ( int i = 0; i < meta.getParameterField().length; i++ ) { String field = meta.getParameterField()[ i ]; String label = meta.getParameterName()[ i ]; resourceInfo.addParameter( label, rowMeta.getString( row, field, null ) ); } } if ( meta.isDynamicMethod() ) { method = rowMeta.getString( row, meta.getMethodFieldName(), null ); resourceInfo.setMethod( method ); } if ( StringUtils.isNotEmpty( meta.getBodyField() ) ) { body = rowMeta.getString( row, meta.getBodyField(), null ); resourceInfo.setBody( body ); } resources.add( resourceInfo ); } } catch ( KettleValueException e ) { // could not find a url on this row log.debug( e.getMessage(), e ); } } return resources; }
@Test public void testGetResourcesFromRow() throws Exception { when( meta.isUrlInField() ).thenReturn( true ); when( meta.getUrlField() ).thenReturn( "url" ); when( meta.getHeaderField() ).thenReturn( headerFields ); when( meta.getParameterField() ).thenReturn( paramFields ); when( meta.getHeaderName() ).thenReturn( headerNames ); when( meta.getParameterName() ).thenReturn( paramNames ); when( rmi.getString( row, "header", null ) ).thenReturn( row[ 2 ].toString() ); when( rmi.getString( row, "param", null ) ).thenReturn( row[ 2 ].toString() ); Collection<IExternalResourceInfo> resourcesFromMeta = consumer.getResourcesFromRow( step, rmi, row ); assertEquals( 1, resourcesFromMeta.size() ); IExternalResourceInfo resourceInfo = resourcesFromMeta.toArray( new IExternalResourceInfo[ 1 ] )[ 0 ]; assertEquals( row[ 0 ], resourceInfo.getName() ); assertNotNull( resourceInfo.getAttributes() ); }
@Override public long freeze() { finalizeSnapshotWithFooter(); appendBatches(accumulator.drain()); snapshot.freeze(); accumulator.close(); return snapshot.sizeInBytes(); }
@Test void testKBuilderRaftVersion1WithVoterSet() { OffsetAndEpoch snapshotId = new OffsetAndEpoch(100, 10); int maxBatchSize = 1024; VoterSet voterSet = VoterSetTest.voterSet( new HashMap<>(VoterSetTest.voterMap(IntStream.of(1, 2, 3), true)) ); AtomicReference<ByteBuffer> buffer = new AtomicReference<>(null); RecordsSnapshotWriter.Builder builder = new RecordsSnapshotWriter.Builder() .setKraftVersion(KRaftVersion.KRAFT_VERSION_1) .setVoterSet(Optional.of(voterSet)) .setTime(new MockTime()) .setMaxBatchSize(maxBatchSize) .setRawSnapshotWriter( new MockRawSnapshotWriter(snapshotId, buffer::set) ); try (RecordsSnapshotWriter<String> snapshot = builder.build(STRING_SERDE)) { snapshot.freeze(); } try (RecordsSnapshotReader<String> reader = RecordsSnapshotReader.of( new MockRawSnapshotReader(snapshotId, buffer.get()), STRING_SERDE, BufferSupplier.NO_CACHING, maxBatchSize, true ) ) { // Consume the control record batch Batch<String> batch = reader.next(); assertEquals(3, batch.controlRecords().size()); // Check snapshot header control record assertEquals(ControlRecordType.SNAPSHOT_HEADER, batch.controlRecords().get(0).type()); assertEquals(new SnapshotHeaderRecord(), batch.controlRecords().get(0).message()); // Check kraft version control record assertEquals(ControlRecordType.KRAFT_VERSION, batch.controlRecords().get(1).type()); assertEquals(new KRaftVersionRecord().setKRaftVersion((short) 1), batch.controlRecords().get(1).message()); // Check the voters control record assertEquals(ControlRecordType.KRAFT_VOTERS, batch.controlRecords().get(2).type()); assertEquals(voterSet.toVotersRecord((short) 0), batch.controlRecords().get(2).message()); // Consume the reader until we find a control record do { batch = reader.next(); } while (batch.controlRecords().isEmpty()); // Check snapshot footer control record assertEquals(1, batch.controlRecords().size()); assertEquals(ControlRecordType.SNAPSHOT_FOOTER, batch.controlRecords().get(0).type()); assertEquals(new SnapshotFooterRecord(), batch.controlRecords().get(0).message()); // Snapshot footer must be last record assertFalse(reader.hasNext()); } }
public RuleRestResponse toRuleRestResponse(RuleInformation ruleInformation) { RuleRestResponse.Builder builder = RuleRestResponse.Builder.builder(); RuleDto ruleDto = ruleInformation.ruleDto(); builder .setId(ruleDto.getUuid()) .setKey(ruleDto.getKey().toString()) .setRepositoryKey(ruleDto.getRepositoryKey()) .setName(ruleDto.getName()) .setSeverity(ruleDto.getSeverityString()) .setType(RuleTypeRestEnum.from(RuleType.valueOf(ruleDto.getType()))) .setImpacts(toImpactRestResponse(ruleDto.getDefaultImpacts())) .setCleanCodeAttribute(CleanCodeAttributeRestEnum.from(ruleDto.getCleanCodeAttribute())) .setCleanCodeAttributeCategory(ofNullable(ruleDto.getCleanCodeAttribute()) .map(CleanCodeAttribute::getAttributeCategory) .map(CleanCodeAttributeCategoryRestEnum::from) .orElse(null)) .setStatus(RuleStatusRestEnum.from(ruleDto.getStatus())) .setExternal(ruleDto.isExternal()) .setCreatedAt(toDateTime(ruleDto.getCreatedAt())) .setGapDescription(ruleDto.getGapDescription()) .setHtmlNote(ofNullable(ruleDto.getNoteData()).map(n -> macroInterpreter.interpret(Markdown.convertToHtml(n))).orElse(null)) .setMarkdownNote(ruleDto.getNoteData()) .setEducationPrinciples(new ArrayList<>(ruleDto.getEducationPrinciples())) .setTemplate(ruleDto.isTemplate()) .setTemplateId(ruleDto.getTemplateUuid()) .setTags(new ArrayList<>(ruleDto.getTags())) .setSystemTags(new ArrayList<>(ruleDto.getSystemTags())) .setLanguageKey(ruleDto.getLanguage()) .setLanguageName(getLanguageName(ruleDto.getLanguage())) .setParameters(toRuleParameterResponse(ruleInformation.params())); setDescriptionFields(builder, ruleDto); setRemediationFunctionFields(builder, ruleDto); if (ruleDto.isAdHoc()) { ofNullable(ruleDto.getAdHocName()).ifPresent(builder::setName); ofNullable(ruleDto.getAdHocDescription()) .map(this::toDescriptionSectionResponse) .ifPresent(section -> builder.setDescriptionSections(List.of(section))); ofNullable(ruleDto.getAdHocSeverity()).ifPresent(builder::setSeverity); ofNullable(ruleDto.getAdHocType()).ifPresent(type -> builder.setType(RuleTypeRestEnum.from(RuleType.valueOf(type)))); } return builder.build(); }
@Test public void toRuleRestResponse_shouldReturnSameFieldForStandardMapping() { when(macroInterpreter.interpret(Mockito.anyString())).thenAnswer(invocation -> "interpreted" + invocation.getArgument(0)); when(ruleDescriptionFormatter.toHtml(any(), any())).thenAnswer(invocation -> "html" + ((RuleDescriptionSectionDto) invocation.getArgument(1)).getContent()); RuleDto dto = RuleTesting.newRule(); when(languages.get(dto.getLanguage())).thenReturn(LanguageTesting.newLanguage(dto.getLanguage(), "languageName")); RuleRestResponse ruleRestResponse = ruleRestResponseGenerator.toRuleRestResponse(new RuleInformation(dto, List.of())); assertThat(ruleRestResponse.id()).isEqualTo(dto.getUuid()); assertThat(ruleRestResponse.key()).isEqualTo(dto.getKey().toString()); assertThat(ruleRestResponse.repositoryKey()).isEqualTo(dto.getRepositoryKey()); assertThat(ruleRestResponse.name()).isEqualTo(dto.getName()); assertThat(ruleRestResponse.descriptionSections()).extracting(s -> s.key(), s -> s.content(), s -> s.context()) .containsExactly(dto.getRuleDescriptionSectionDtos().stream().map(s -> tuple(s.getKey(), "interpreted" + "html" + s.getContent(), s.getContext())).toArray(Tuple[]::new)); assertThat(ruleRestResponse.severity()).isEqualTo(dto.getSeverityString()); assertThat(ruleRestResponse.type().name()).isEqualTo(RuleType.valueOf(dto.getType()).name()); assertThat(ruleRestResponse.impacts()).extracting(r -> r.severity().name(), r -> r.softwareQuality().name()) .containsExactly(dto.getDefaultImpacts().stream().map(e -> tuple(e.getSeverity().name(), e.getSoftwareQuality().name())).toArray(Tuple[]::new)); assertThat(ruleRestResponse.cleanCodeAttribute().name()).isEqualTo(dto.getCleanCodeAttribute().name()); assertThat(ruleRestResponse.cleanCodeAttributeCategory().name()).isEqualTo(dto.getCleanCodeAttribute().getAttributeCategory().name()); assertThat(ruleRestResponse.status().name()).isEqualTo(dto.getStatus().name()); assertThat(ruleRestResponse.external()).isEqualTo(dto.isExternal()); assertThat(ruleRestResponse.createdAt()).isEqualTo(DateUtils.formatDateTime(dto.getCreatedAt())); assertThat(ruleRestResponse.gapDescription()).isEqualTo(dto.getGapDescription()); assertThat(ruleRestResponse.markdownNote()).isEqualTo(dto.getNoteData()); assertThat(ruleRestResponse.educationPrinciples()).containsExactlyElementsOf(dto.getEducationPrinciples()); assertThat(ruleRestResponse.template()).isEqualTo(dto.isTemplate()); assertThat(ruleRestResponse.templateId()).isEqualTo(dto.getTemplateUuid()); assertThat(ruleRestResponse.tags()).containsExactlyElementsOf(dto.getTags()); assertThat(ruleRestResponse.systemTags()).containsExactlyElementsOf(dto.getSystemTags()); assertThat(ruleRestResponse.languageKey()).isEqualTo(dto.getLanguage()); assertThat(ruleRestResponse.languageName()).isEqualTo("languageName"); DefaultDebtRemediationFunction function = new DefaultDebtRemediationFunction(DebtRemediationFunction.Type.valueOf(dto.getRemediationFunction().toUpperCase(Locale.ENGLISH)), dto.getRemediationGapMultiplier(), dto.getRemediationBaseEffort()); assertThat(ruleRestResponse.remediationFunctionBaseEffort()).isEqualTo(function.baseEffort()); assertThat(ruleRestResponse.remediationFunctionGapMultiplier()).isEqualTo(function.gapMultiplier()); assertThat(ruleRestResponse.remediationFunctionType()).isEqualTo(dto.getRemediationFunction()); }
@Operation(summary = "verifyAlertInstanceName", description = "VERIFY_ALERT_INSTANCE_NAME_NOTES") @Parameters({ @Parameter(name = "alertInstanceName", description = "ALERT_INSTANCE_NAME", required = true, schema = @Schema(implementation = String.class)), }) @GetMapping(value = "/verify-name") @ResponseStatus(HttpStatus.OK) public Result verifyGroupName(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "alertInstanceName") String alertInstanceName) { boolean exist = alertPluginInstanceService.checkExistPluginInstanceName(alertInstanceName); if (exist) { log.error("alert plugin instance {} has exist, can't create again.", alertInstanceName); return Result.error(Status.PLUGIN_INSTANCE_ALREADY_EXISTS); } else { return Result.success(); } }
@Test public void testVerifyGroupName() throws Exception { // Given final MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("pluginDefineId", String.valueOf(pluginDefineId)); paramsMap.add("alertInstanceName", instanceName); when(alertPluginInstanceService.checkExistPluginInstanceName(eq(instanceName))) .thenReturn(false); Result expectResponseContent = JSONUtils.parseObject( "{\"code\":0,\"msg\":\"success\",\"data\":null,\"failed\":false,\"success\":true}", Result.class); // When final MvcResult mvcResult = mockMvc.perform(get("/alert-plugin-instances/verify-name") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); // Then final Result actualResponseContent = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); assertThat(actualResponseContent.getMsg()).isEqualTo(expectResponseContent.getMsg()); assertThat(actualResponseContent.getCode()).isEqualTo(expectResponseContent.getCode()); }
public static long hash64(byte[] data) { return hash64(data, 1337); }
@Test public void metroHash64Test() { byte[] str = "我是一段测试123".getBytes(CharsetUtil.CHARSET_UTF_8); final long hash64 = MetroHash.hash64(str); assertEquals(62920234463891865L, hash64); }