focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static <K, V> Write<K, V> write() { return new AutoValue_CdapIO_Write.Builder<K, V>().build(); }
@Test public void testWriteBuildsCorrectly() { EmployeeConfig pluginConfig = new ConfigWrapper<>(EmployeeConfig.class).withParams(TEST_EMPLOYEE_PARAMS_MAP).build(); CdapIO.Write<String, String> write = CdapIO.<String, String>write() .withCdapPlugin( Plugin.createBatch( EmployeeBatchSink.class, EmployeeOutputFormat.class, EmployeeOutputFormatProvider.class)) .withPluginConfig(pluginConfig) .withKeyClass(String.class) .withValueClass(String.class) .withLocksDirPath(tmpFolder.getRoot().getAbsolutePath()); Plugin<String, String> cdapPlugin = write.getCdapPlugin(); assertNotNull(cdapPlugin); assertNotNull(write.getLocksDirPath()); assertEquals(EmployeeBatchSink.class, cdapPlugin.getPluginClass()); assertEquals(EmployeeOutputFormat.class, cdapPlugin.getFormatClass()); assertEquals(EmployeeOutputFormatProvider.class, cdapPlugin.getFormatProviderClass()); assertNotNull(cdapPlugin.getContext()); assertEquals(BatchSinkContextImpl.class, cdapPlugin.getContext().getClass()); assertEquals(PluginConstants.PluginType.SINK, cdapPlugin.getPluginType()); assertNotNull(cdapPlugin.getHadoopConfiguration()); assertEquals(pluginConfig, write.getPluginConfig()); assertEquals(String.class, write.getKeyClass()); assertEquals(String.class, write.getValueClass()); }
public static void install() { installStyle( STYLE_REGULAR ); installStyle( STYLE_ITALIC ); installStyle( STYLE_BOLD ); installStyle( STYLE_BOLD_ITALIC ); }
@Test void testFont() { FlatRobotoMonoFont.install(); testFont( FlatRobotoMonoFont.FAMILY, Font.PLAIN, 13 ); testFont( FlatRobotoMonoFont.FAMILY, Font.ITALIC, 13 ); testFont( FlatRobotoMonoFont.FAMILY, Font.BOLD, 13 ); testFont( FlatRobotoMonoFont.FAMILY, Font.BOLD | Font.ITALIC, 13 ); }
@Override public Long getInteger( Object object ) throws KettleValueException { InetAddress address = getInternetAddress( object ); if ( address == null ) { return null; } long total = 0L; byte[] addr = address.getAddress(); if ( addr.length > 8 ) { throw new KettleValueException( "Unable to convert Internet Address v6 to an Integer: " + getString( object ) + " (The precision is too high to be contained in a long integer value)" ); } for ( int i = 0; i < addr.length; i++ ) { total += ( addr[i] & 0xFF ) * ( (long) Math.pow( 256, ( addr.length - 1 - i ) ) ); } return total; }
@Test public void testGetInteger_Success() throws UnknownHostException, KettleValueException { ValueMetaInternetAddress vm = new ValueMetaInternetAddress(); String[] addresses = { // Some IPv4 addresses "192.168.10.0", "0.0.0.1", "0.0.0.0", "127.0.0.1", "255.255.0.10", "192.0.2.235" }; // No exception should be thrown in any of the following calls for ( String address : addresses ) { InetAddress addr = InetAddress.getByName( address ); vm.getInteger( addr ); } }
@Override public double p(double x) { if (x < 0) { return 0.0; } else { return Math.pow(x / theta, k - 1) * Math.exp(-x / theta) / thetaGammaK; } }
@Test public void testP() { System.out.println("p"); GammaDistribution instance = new GammaDistribution(3, 2.1); instance.rand(); assertEquals(0.0, instance.p(-0.1), 1E-7); assertEquals(0.0, instance.p(0.0), 1E-7); assertEquals(0.0005147916, instance.p(0.1), 1E-7); assertEquals(0.03353553, instance.p(1.0), 1E-7); assertEquals(0.08332174, instance.p(2.0), 1E-7); assertEquals(0.1164485, instance.p(3.0), 1E-7); assertEquals(0.1285892, instance.p(4.0), 1E-7); assertEquals(0.04615759, instance.p(10), 1E-7); assertEquals(0.001578462, instance.p(20), 1E-7); assertEquals(3.036321e-05, instance.p(30), 1E-7); }
private HazelcastInstanceFactory() { }
@Test public void testTestHazelcastInstanceFactory() { TestHazelcastInstanceFactory instanceFactory = new TestHazelcastInstanceFactory(); try { final HazelcastInstance instance1 = instanceFactory.newHazelcastInstance(); final HazelcastInstance instance2 = instanceFactory.newHazelcastInstance(); final HazelcastInstance instance3 = instanceFactory.newHazelcastInstance(); assertClusterSizeEventually(3, instance1, instance2, instance3); } finally { instanceFactory.terminateAll(); } }
@Override public Metrics getMetrics() { return new AtomicRateLimiterMetrics(); }
@Test public void metrics() { setup(Duration.ZERO); then(rateLimiter.getMetrics().getNumberOfWaitingThreads()).isZero(); }
@Override public Iterator<WorkerInfo> iterator() { return Iterators.unmodifiableIterator(mWorkers.values().iterator()); }
@Test public void immutableIterator() { WorkerClusterView view = new WorkerClusterView(ImmutableList.of( new WorkerInfo().setIdentity(WorkerIdentityTestUtils.randomLegacyId()))); Iterator<WorkerInfo> iter = view.iterator(); assertTrue(iter.hasNext()); while (iter.hasNext()) { iter.next(); assertThrows(UnsupportedOperationException.class, iter::remove); } }
@Override public CompletableFuture<ProxyRelayResult<ConsumerRunningInfo>> processGetConsumerRunningInfo( ProxyContext context, RemotingCommand command, GetConsumerRunningInfoRequestHeader header) { CompletableFuture<ProxyRelayResult<ConsumerRunningInfo>> future = new CompletableFuture<>(); future.thenAccept(proxyOutResult -> { RemotingServer remotingServer = this.brokerController.getRemotingServer(); if (remotingServer instanceof NettyRemotingAbstract) { NettyRemotingAbstract nettyRemotingAbstract = (NettyRemotingAbstract) remotingServer; RemotingCommand remotingCommand = RemotingCommand.createResponseCommand(null); remotingCommand.setOpaque(command.getOpaque()); remotingCommand.setCode(proxyOutResult.getCode()); remotingCommand.setRemark(proxyOutResult.getRemark()); if (proxyOutResult.getCode() == ResponseCode.SUCCESS && proxyOutResult.getResult() != null) { ConsumerRunningInfo consumerRunningInfo = proxyOutResult.getResult(); remotingCommand.setBody(consumerRunningInfo.encode()); } SimpleChannel simpleChannel = new SimpleChannel(context.getRemoteAddress(), context.getLocalAddress()); nettyRemotingAbstract.processResponseCommand(simpleChannel.getChannelHandlerContext(), remotingCommand); } }); return future; }
@Test public void testProcessGetConsumerRunningInfo() { ConsumerRunningInfo runningInfo = new ConsumerRunningInfo(); runningInfo.setJstack("jstack"); String remark = "ok"; int opaque = 123; RemotingCommand remotingCommand = RemotingCommand.createRequestCommand(RequestCode.GET_CONSUMER_RUNNING_INFO, null); remotingCommand.setOpaque(opaque); GetConsumerRunningInfoRequestHeader requestHeader = new GetConsumerRunningInfoRequestHeader(); requestHeader.setJstackEnable(true); ArgumentCaptor<RemotingCommand> argumentCaptor = ArgumentCaptor.forClass(RemotingCommand.class); CompletableFuture<ProxyRelayResult<ConsumerRunningInfo>> future = localProxyRelayService.processGetConsumerRunningInfo(ProxyContext.create(), remotingCommand, requestHeader); future.complete(new ProxyRelayResult<>(ResponseCode.SUCCESS, remark, runningInfo)); Mockito.verify(nettyRemotingServerMock, Mockito.times(1)) .processResponseCommand(Mockito.any(SimpleChannelHandlerContext.class), argumentCaptor.capture()); RemotingCommand remotingCommand1 = argumentCaptor.getValue(); assertThat(remotingCommand1.getCode()).isEqualTo(ResponseCode.SUCCESS); assertThat(remotingCommand1.getRemark()).isEqualTo(remark); assertThat(remotingCommand1.getBody()).isEqualTo(runningInfo.encode()); }
String generate(String sentence) { if (sentence.isEmpty()) { throw new IllegalArgumentException("Cannot create function name from empty sentence"); } List<String> words = Stream.of(sentence) .map(this::replaceIllegalCharacters) .map(String::trim) .flatMap(SPLIT_WHITESPACE::splitAsStream) .flatMap(SPLIT_CAMEL_CASE::splitAsStream) .flatMap(SPLIT_UNDERSCORE::splitAsStream) .collect(Collectors.toList()); return joiner.concatenate(words); }
@Test void testSanitizeEmptyFunctionName() { Executable testMethod = () -> snakeCase.generate(""); IllegalArgumentException expectedThrown = assertThrows(IllegalArgumentException.class, testMethod); assertThat(expectedThrown.getMessage(), is(equalTo("Cannot create function name from empty sentence"))); }
@Override public Collection<String> getJdbcUrlPrefixes() { return Arrays.asList("jdbc:ch:", "jdbc:clickhouse:"); }
@Test void assertGetJdbcUrlPrefixes() { assertThat(TypedSPILoader.getService(DatabaseType.class, "ClickHouse").getJdbcUrlPrefixes(), is(Arrays.asList("jdbc:ch:", "jdbc:clickhouse:"))); }
@Override public Catalog createCatalog(Context context) { final FactoryUtil.CatalogFactoryHelper helper = FactoryUtil.createCatalogFactoryHelper(this, context); helper.validateExcept(HadoopConfigurations.HADOOP_PREFIX); String mode = helper.getOptions().get(CatalogOptions.MODE); switch (mode.toLowerCase(Locale.ROOT)) { case "hms": return new HoodieHiveCatalog( context.getName(), (Configuration) helper.getOptions()); case "dfs": return new HoodieCatalog( context.getName(), (Configuration) helper.getOptions()); default: throw new HoodieCatalogException(String.format("Invalid catalog mode: %s, supported modes: [hms, dfs].", mode)); } }
@Test void testCreateHMSCatalog() { final String catalogName = "mycatalog"; final HoodieHiveCatalog expectedCatalog = HoodieCatalogTestUtils.createHiveCatalog(catalogName); final Map<String, String> options = new HashMap<>(); options.put(CommonCatalogOptions.CATALOG_TYPE.key(), HoodieCatalogFactory.IDENTIFIER); options.put(CatalogOptions.HIVE_CONF_DIR.key(), CONF_DIR.getPath()); options.put(CatalogOptions.MODE.key(), "hms"); options.put(CatalogOptions.TABLE_EXTERNAL.key(), "false"); final Catalog actualCatalog = FactoryUtil.createCatalog( catalogName, options, null, Thread.currentThread().getContextClassLoader()); assertEquals( ((HoodieHiveCatalog) actualCatalog) .getHiveConf() .getVar(HiveConf.ConfVars.METASTOREURIS), "dummy-hms"); checkEquals(expectedCatalog, (HoodieHiveCatalog) actualCatalog); }
public String getAllVespaServices() { return vespaServices.getVespaServices().stream() .map(VespaService::getServiceName) .distinct() .collect(Collectors.joining(" ")); }
@Test public void getServices_returns_service_types() { assertEquals("dummy", metricsManager.getAllVespaServices()); }
public OpenIdSession startSession(@Valid AuthenticateRequest params, String jwksUri, Long legacyWebserviceId, String serviceName) { OpenIdSession session = new OpenIdSession(); session.setJwksUri(jwksUri); session.setClientId(params.getClientId()); session.setResponseType(params.getResponseType()); session.setScope(params.getScope()); session.setRedirectUri(params.getRedirectUri()); session.setState(params.getState()); session.setNonce(params.getNonce()); session.setCodeChallenge(params.getCodeChallenge()); session.setCodeChallengeMethod(params.getCodeChallengeMethod()); session.setCode(UUID.randomUUID().toString()); session.setLegacyWebserviceId(legacyWebserviceId); session.setServiceName(serviceName); return session; }
@Test void startSessionTest() { AuthenticateRequest authenticateRequest = new AuthenticateRequest(); OpenIdSession openIdSession = openIdService.startSession(authenticateRequest, "jwksUri", 1L, "serviceName"); assertEquals("jwksUri", openIdSession.getJwksUri()); assertEquals("serviceName", openIdSession.getServiceName()); assertEquals(1L, openIdSession.getLegacyWebserviceId()); }
public String decode(byte[] val) { return codecs[0].decode(val, 0, val.length); }
@Test public void testDecodeJapanesePersonNameJISX0201_withAlias() { assertEquals(JAPANESE_PERSON_NAME_JISX0201, jisX0201_withAlias().decode(JAPANESE_PERSON_NAME_JISX0201_BYTES)); }
public static Mode parse(String value) { if (StringUtils.isBlank(value)) { throw new IllegalArgumentException(ExceptionMessage.INVALID_MODE.getMessage(value)); } try { return parseNumeric(value); } catch (NumberFormatException e) { // Treat as symbolic return parseSymbolic(value); } }
@Test public void symbolicsBadEmpty() { mThrown.expect(IllegalArgumentException.class); mThrown.expectMessage(ExceptionMessage.INVALID_MODE.getMessage("")); ModeParser.parse(""); }
public static long readUIntLE(InputStream stream) throws IOException, BufferUnderrunException { int ch1 = stream.read(); int ch2 = stream.read(); int ch3 = stream.read(); int ch4 = stream.read(); if ((ch1 | ch2 | ch3 | ch4) < 0) { throw new BufferUnderrunException(); } return ((ch4 << 24) + (ch3 << 16) + (ch2 << 8) + (ch1)) & 0x00FFFFFFFFl; }
@Test public void testReadUIntLE() throws Exception { byte[] data = new byte[]{(byte) 0x08, (byte) 0x00, (byte) 0x00, (byte) 0x00}; assertEquals(8, EndianUtils.readUIntLE(new ByteArrayInputStream(data))); data = new byte[]{(byte) 0xF0, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}; assertEquals(4294967280L, EndianUtils.readUIntLE(new ByteArrayInputStream(data))); data = new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF}; try { EndianUtils.readUIntLE(new ByteArrayInputStream(data)); fail("Should have thrown exception"); } catch (EndianUtils.BufferUnderrunException e) { //swallow } }
@Nullable public File beginPut(String key) { long startTime = getLogTime(); openIfNotOpen(); verifyCanaryOrClear(); EntryCache.Entry entry = entries.get(key); entry.acquireWriteLock(); File permanentFile = getCacheFile(key); if (fileSystem.exists(permanentFile)) { return null; } File result = getTempFile(key); if (LOG_VERBOSE) { Log.v(TAG, "Completed begin put in: " + getElapsedTime(startTime) + ", key: " + key); } return result; }
@Test public void beginPut_createsCanaryFile() { cache.beginPut("key"); assertThat(cacheDir.listFiles()).hasLength(1); }
public static String getInterfaceResourceName(SofaRequest request) { return request.getInterfaceName(); }
@Test public void testGetInterfaceResourceName() { SofaRequest request = new SofaRequest(); request.setInterfaceName("com.alibaba.csp.sentinel.adapter.sofa.rpc.service.DemoService"); String interfaceResourceName = SofaRpcUtils.getInterfaceResourceName(request); assertEquals("com.alibaba.csp.sentinel.adapter.sofa.rpc.service.DemoService", interfaceResourceName); }
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"}) void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas, MigrationDecisionCallback callback) { assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: " + Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas); if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas)); logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas)); } initState(oldReplicas); assertNoDuplicate(partitionId, oldReplicas, newReplicas); // fix cyclic partition replica movements if (fixCycle(oldReplicas, newReplicas)) { if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId, Arrays.toString(newReplicas)); } } int currentIndex = 0; while (currentIndex < oldReplicas.length) { if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex, Arrays.toString(state)); } assertNoDuplicate(partitionId, oldReplicas, newReplicas); if (newReplicas[currentIndex] == null) { if (state[currentIndex] != null) { // replica owner is removed and no one will own this replica logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1); state[currentIndex] = null; } currentIndex++; continue; } if (state[currentIndex] == null) { int i = getReplicaIndex(state, newReplicas[currentIndex]); if (i == -1) { // fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex); state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } if (i > currentIndex) { // SHIFT UP replica from i to currentIndex, copy data from partition owner logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId, state[i], i, currentIndex); callback.migrate(null, -1, -1, state[i], i, currentIndex); state[currentIndex] = state[i]; state[i] = null; continue; } throw new AssertionError("partitionId=" + partitionId + "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas)); } if (newReplicas[currentIndex].equals(state[currentIndex])) { // no change, no action needed currentIndex++; continue; } if (getReplicaIndex(newReplicas, state[currentIndex]) == -1 && getReplicaIndex(state, newReplicas[currentIndex]) == -1) { // MOVE partition replica from its old owner to new owner logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex); state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) { int newIndex = getReplicaIndex(newReplicas, state[currentIndex]); assert newIndex > currentIndex : "partitionId=" + partitionId + ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: " + Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas); if (state[newIndex] == null) { // it is a SHIFT DOWN logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId, state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex); state[newIndex] = state[currentIndex]; } else { logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex); } state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex); } assert Arrays.equals(state, newReplicas) : "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas) + " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas); }
@Test public void test_SHIFT_UP_nonNullSource_willGetAnotherMOVE() throws UnknownHostException { final PartitionReplica[] oldReplicas = { new PartitionReplica(new Address("localhost", 5701), uuids[0]), new PartitionReplica(new Address("localhost", 5702), uuids[1]), new PartitionReplica(new Address("localhost", 5703), uuids[2]), null, null, null, null, }; final PartitionReplica[] newReplicas = { new PartitionReplica(new Address("localhost", 5703), uuids[2]), new PartitionReplica(new Address("localhost", 5701), uuids[0]), null, null, null, null, null, }; migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5701), uuids[0]), 0, -1, new PartitionReplica(new Address("localhost", 5703), uuids[2]), 2, 0); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5702), uuids[1]), 1, -1, new PartitionReplica(new Address("localhost", 5701), uuids[0]), -1, 1); }
@Override public FlowEntry getFlowEntry(FlowRule rule) { checkPermission(FLOWRULE_READ); checkNotNull(rule, FLOW_RULE_NULL); return store.getFlowEntry(rule); }
@Test public void getFlowEntry() { assertTrue("store should be empty", Sets.newHashSet(service.getFlowEntries(DID)).isEmpty()); FlowRule f1 = addFlowRule(1); FlowRule f2 = addFlowRule(2); FlowEntry fe1 = new DefaultFlowEntry(f1); FlowEntry fe2 = new DefaultFlowEntry(f2); assertEquals("2 rules should exist", 2, flowCount()); FlowEntry actual1 = service.getFlowEntry(f1); FlowEntry actual2 = service.getFlowEntry(f2); assertEquals(fe1, actual1); assertEquals(fe2, actual2); }
@Override public ConfigOperateResult insertOrUpdateTagCas(final ConfigInfo configInfo, final String tag, final String srcIp, final String srcUser) { if (findConfigInfo4TagState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), tag) == null) { return addConfigInfo4Tag(configInfo, tag, srcIp, srcUser); } else { return updateConfigInfo4TagCas(configInfo, tag, srcIp, srcUser); } }
@Test void testInsertOrUpdateTagCasOfException() { String dataId = "dataId111222"; String group = "group"; String tenant = "tenant"; String appName = "appname1234"; String content = "c12345"; ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); configInfo.setEncryptedDataKey("key23456"); configInfo.setMd5("casMd5"); //mock query config state CannotGetJdbcConnectionException ConfigInfoStateWrapper configInfoStateWrapper = new ConfigInfoStateWrapper(); configInfoStateWrapper.setLastModified(System.currentTimeMillis()); configInfoStateWrapper.setId(234567890L); String tag = "tag123"; Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenThrow(new CannotGetJdbcConnectionException("state query throw exception")); String srcIp = "ip345678"; String srcUser = "user1234567"; try { externalConfigInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp, srcUser); assertTrue(false); } catch (Exception e) { assertEquals("state query throw exception", e.getMessage()); } //mock get state return null,and execute add throw CannotGetJdbcConnectionException Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(null); Mockito.when(jdbcTemplate.update(anyString(), eq(dataId), eq(group), eq(tenant), eq(tag), eq(appName), eq(configInfo.getContent()), eq(MD5Utils.md5Hex(configInfo.getContent(), Constants.PERSIST_ENCODE)), eq(srcIp), eq(srcUser), any(Timestamp.class), any(Timestamp.class))).thenThrow(new CannotGetJdbcConnectionException("throw exception add config tag")); try { externalConfigInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp, srcUser); assertTrue(false); } catch (Exception e) { assertEquals("throw exception add config tag", e.getMessage()); } //mock get state return obj,and execute update throw CannotGetJdbcConnectionException Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(configInfoStateWrapper); Mockito.when(jdbcTemplate.update(anyString(), eq(configInfo.getContent()), eq(MD5Utils.md5Hex(configInfo.getContent(), Constants.PERSIST_ENCODE)), eq(srcIp), eq(srcUser), any(Timestamp.class), eq(appName), eq(dataId), eq(group), eq(tenant), eq(tag), eq(configInfo.getMd5()))) .thenThrow(new CannotGetJdbcConnectionException("throw exception update config tag")); try { externalConfigInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp, srcUser); assertTrue(false); } catch (Exception e) { assertEquals("throw exception update config tag", e.getMessage()); } }
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) { return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context); }
@Test public void testShowDataCacheRules() throws DdlException, AnalysisException { DataCacheMgr dataCacheMgr = DataCacheMgr.getInstance(); dataCacheMgr.createCacheRule(QualifiedName.of(ImmutableList.of("test1", "test1", "test1")), null, -1, null); Map<String, String> properties = new HashMap<>(); properties.put("hello", "world"); properties.put("ni", "hao"); StringLiteral stringLiteral = new StringLiteral("hello"); dataCacheMgr.createCacheRule(QualifiedName.of(ImmutableList.of("test2", "test2", "test2")), stringLiteral, -1, properties); ShowDataCacheRulesStmt stmt = new ShowDataCacheRulesStmt(NodePosition.ZERO); ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx); List<String> row1 = resultSet.getResultRows().get(0); List<String> row2 = resultSet.getResultRows().get(1); Assert.assertEquals("[0, test1, test1, test1, -1, NULL, NULL]", row1.toString()); Assert.assertEquals("[1, test2, test2, test2, -1, 'hello', \"hello\"=\"world\", \"ni\"=\"hao\"]", row2.toString()); }
public static <T> int lastIndexOf(T[] array, Object value) { if (isEmpty(array)) { return INDEX_NOT_FOUND; } return lastIndexOf(array, value, array.length - 1); }
@Test public void lastIndexOfTest() { Integer[] a = {1, 2, 3, 4, 3, 6}; int index = ArrayUtil.lastIndexOf(a, 3); assertEquals(4, index); long[] b = {1, 2, 3, 4, 3, 6}; int index2 = ArrayUtil.lastIndexOf(b, 3); assertEquals(4, index2); }
public static FieldType fieldTypeForJavaType(TypeDescriptor typeDescriptor) { // TODO: Convert for registered logical types. if (typeDescriptor.isArray() || typeDescriptor.isSubtypeOf(TypeDescriptor.of(Collection.class))) { return getArrayFieldType(typeDescriptor); } else if (typeDescriptor.isSubtypeOf(TypeDescriptor.of(Map.class))) { return getMapFieldType(typeDescriptor); } else if (typeDescriptor.isSubtypeOf(TypeDescriptor.of(Iterable.class))) { return getIterableFieldType(typeDescriptor); } else if (typeDescriptor.isSubtypeOf(TypeDescriptor.of(Row.class))) { throw new IllegalArgumentException( "Cannot automatically determine a field type from a Row class" + " as we cannot determine the schema. You should set a field type explicitly."); } else { TypeName typeName = PRIMITIVE_MAPPING.inverse().get(typeDescriptor); if (typeName == null) { throw new RuntimeException("Couldn't find field type for " + typeDescriptor); } return FieldType.of(typeName); } }
@Test public void testRowTypeToFieldType() { thrown.expect(IllegalArgumentException.class); FieldTypeDescriptors.fieldTypeForJavaType(TypeDescriptors.rows()); }
List<String> listTaskPrivateAddresses(String cluster, AwsCredentials credentials) { LOGGER.fine("Listing tasks from cluster: '%s'", cluster); List<String> taskArns = listTasks(cluster, credentials); LOGGER.fine("AWS ECS ListTasks found the following tasks: %s", taskArns); if (!taskArns.isEmpty()) { List<Task> tasks = describeTasks(cluster, taskArns, credentials); if (!tasks.isEmpty()) { return tasks.stream().map(Task::getPrivateAddress).collect(Collectors.toList()); } } return emptyList(); }
@Test public void listTasksFilteredByTags() { // given String cluster = "arn:aws:ecs:eu-central-1:665466731577:cluster/rafal-test-cluster"; AwsConfig awsConfig = AwsConfig.builder() .setTagKey("tag-key") .setTagValue("51a01bdf-d00e-487e-ab14-7645330b6207") .build(); AwsEcsApi awsEcsApi = new AwsEcsApi(endpoint, awsConfig, requestSigner, CLOCK); stubListTasks("arn:aws:ecs:eu-central-1:665466731577:cluster/rafal-test-cluster", null); stubDescribeTasks(Map.of( "arn:aws:ecs:us-east-1:012345678910:task/0b69d5c0-d655-4695-98cd-5d2d526d9d5a", "10.0.1.16", "arn:aws:ecs:us-east-1:012345678910:task/51a01bdf-d00e-487e-ab14-7645330b6207", "10.0.1.219"), cluster); // when List<String> ips = awsEcsApi.listTaskPrivateAddresses(cluster, CREDENTIALS); // then assertEquals(1, ips.size()); assertThat(ips).contains("10.0.1.219"); }
@Override public void sendSmsCode(SmsCodeSendReqDTO reqDTO) { SmsSceneEnum sceneEnum = SmsSceneEnum.getCodeByScene(reqDTO.getScene()); Assert.notNull(sceneEnum, "验证码场景({}) 查找不到配置", reqDTO.getScene()); // 创建验证码 String code = createSmsCode(reqDTO.getMobile(), reqDTO.getScene(), reqDTO.getCreateIp()); // 发送验证码 smsSendService.sendSingleSms(reqDTO.getMobile(), null, null, sceneEnum.getTemplateCode(), MapUtil.of("code", code)); }
@Test public void sendSmsCode_exceedDay() { // mock 数据 SmsCodeDO smsCodeDO = randomPojo(SmsCodeDO.class, o -> o.setMobile("15601691300").setTodayIndex(10).setCreateTime(LocalDateTime.now())); smsCodeMapper.insert(smsCodeDO); // 准备参数 SmsCodeSendReqDTO reqDTO = randomPojo(SmsCodeSendReqDTO.class, o -> { o.setMobile("15601691300"); o.setScene(SmsSceneEnum.MEMBER_LOGIN.getScene()); }); // mock 方法 SqlConstants.init(DbType.MYSQL); when(smsCodeProperties.getSendFrequency()).thenReturn(Duration.ofMillis(0)); // 调用,并断言异常 assertServiceException(() -> smsCodeService.sendSmsCode(reqDTO), SMS_CODE_EXCEED_SEND_MAXIMUM_QUANTITY_PER_DAY); }
@Override public HttpHeaders add(HttpHeaders headers) { if (headers instanceof DefaultHttpHeaders) { this.headers.add(((DefaultHttpHeaders) headers).headers); return this; } else { return super.add(headers); } }
@Test public void toStringOnSingleHeader() { assertEquals("DefaultHttpHeaders[foo: bar]", newDefaultDefaultHttpHeaders() .add("foo", "bar") .toString()); }
@Override public DescriptiveUrl toUploadUrl(final Path file, final Sharee sharee, CreateUploadShareRequest options, final PasswordCallback callback) throws BackgroundException { try { if(log.isDebugEnabled()) { log.debug(String.format("Create upload share for %s", file)); } if(null == options) { options = new CreateUploadShareRequest(); log.warn(String.format("Use default share options %s", options)); } final Host bookmark = session.getHost(); final UploadShare share = new SharesApi(session.getClient()).createUploadShare( options.targetId(Long.parseLong(nodeid.getVersionId(file))), StringUtils.EMPTY, null); final String help; if(null == share.getExpireAt()) { help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3")); } else { final long expiry = share.getExpireAt().getMillis(); help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3")) + " (" + MessageFormat.format(LocaleFactory.localizedString("Expires {0}", "S3") + ")", UserDateFormatterFactory.get().getShortFormat(expiry * 1000) ); } final Matcher matcher = Pattern.compile(SDSSession.VERSION_REGEX).matcher(session.softwareVersion().getRestApiVersion()); if(matcher.matches()) { if(new Version(matcher.group(1)).compareTo(new Version("4.26")) < 0) { return new DescriptiveUrl(URI.create(String.format("%s://%s/#/public/shares-uploads/%s", bookmark.getProtocol().getScheme(), bookmark.getHostname(), share.getAccessKey())), DescriptiveUrl.Type.signed, help); } } return new DescriptiveUrl(URI.create(String.format("%s://%s/public/upload-shares/%s", bookmark.getProtocol().getScheme(), bookmark.getHostname(), share.getAccessKey())), DescriptiveUrl.Type.signed, help); } catch(ApiException e) { throw new SDSExceptionMappingService(nodeid).map(e); } }
@Test public void testUploadAccountSubRoom() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final DescriptiveUrl url = new SDSShareFeature(session, nodeid).toUploadUrl(test, Share.Sharee.world, new CreateUploadShareRequest() .name(new AlphanumericRandomStringService().random()) .expiration(new ObjectExpiration().enableExpiration(false)) .notifyCreator(false) .sendMail(false) .sendSms(false) .password(null) .mailRecipients(null) .mailSubject(null) .mailBody(null) .maxSize(null) .maxSlots(null) .notes(null) .filesExpiryPeriod(null), new DisabledPasswordCallback()); assertNotEquals(DescriptiveUrl.EMPTY, url); assertEquals(DescriptiveUrl.Type.signed, url.getType()); assertTrue(url.getUrl().startsWith("https://duck.dracoon.com/public/upload-shares/")); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public StreamGraph generate() { streamGraph = new StreamGraph( configuration, executionConfig, checkpointConfig, savepointRestoreSettings); shouldExecuteInBatchMode = shouldExecuteInBatchMode(); configureStreamGraph(streamGraph); alreadyTransformed = new IdentityHashMap<>(); for (Transformation<?> transformation : transformations) { transform(transformation); } streamGraph.setSlotSharingGroupResource(slotSharingGroupResources); setFineGrainedGlobalStreamExchangeMode(streamGraph); LineageGraph lineageGraph = LineageGraphUtils.convertToLineageGraph(transformations); streamGraph.setLineageGraph(lineageGraph); for (StreamNode node : streamGraph.getStreamNodes()) { if (node.getInEdges().stream().anyMatch(this::shouldDisableUnalignedCheckpointing)) { for (StreamEdge edge : node.getInEdges()) { edge.setSupportsUnalignedCheckpoints(false); } } } final StreamGraph builtStreamGraph = streamGraph; alreadyTransformed.clear(); alreadyTransformed = null; streamGraph = null; return builtStreamGraph; }
@Test void testEnableSlotSharing() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> sourceDataStream = env.fromData(1, 2, 3); DataStream<Integer> mapDataStream = sourceDataStream.map(x -> x + 1); final List<Transformation<?>> transformations = new ArrayList<>(); transformations.add(sourceDataStream.getTransformation()); transformations.add(mapDataStream.getTransformation()); // all stream nodes share default group by default StreamGraph streamGraph = new StreamGraphGenerator( transformations, env.getConfig(), env.getCheckpointConfig()) .generate(); Collection<StreamNode> streamNodes = streamGraph.getStreamNodes(); for (StreamNode streamNode : streamNodes) { assertThat(streamNode.getSlotSharingGroup()) .isEqualTo(StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP); } }
@Override public void loginFailure(HttpRequest request, AuthenticationException e) { checkRequest(request); requireNonNull(e, "AuthenticationException can't be null"); if (!LOGGER.isDebugEnabled()) { return; } Source source = e.getSource(); LOGGER.debug("login failure [cause|{}][method|{}][provider|{}|{}][IP|{}|{}][login|{}]", emptyIfNull(e.getMessage()), source.getMethod(), source.getProvider(), source.getProviderName(), request.getRemoteAddr(), getAllIps(request), preventLogFlood(emptyIfNull(e.getLogin()))); }
@Test public void login_failure_logs_X_Forwarded_For_header_from_request() { AuthenticationException exception = newBuilder() .setSource(Source.realm(Method.EXTERNAL, "bar")) .setMessage("Hop la!") .setLogin("foo") .build(); HttpRequest request = mockRequest("1.2.3.4", List.of("2.3.4.5")); underTest.loginFailure(request, exception); verifyLog("login failure [cause|Hop la!][method|EXTERNAL][provider|REALM|bar][IP|1.2.3.4|2.3.4.5][login|foo]", Set.of("logout", "login success")); }
@Override public String builder(final String paramName, final ServerWebExchange exchange) { List<String> headers = exchange.getRequest().getHeaders().get(paramName); if (CollectionUtils.isEmpty(headers)) { return ""; } return headers.get(0); }
@Test public void testBuilder() { assertEquals("", parameterData.builder("invalidParamName", exchange)); assertEquals("shenyuHeader", parameterData.builder("shenyu", exchange)); }
public List<Pair<String, String>> getAll() { List<Pair<String, String>> result = new ArrayList<>(); Cursor cursor = db.rawQuery("SELECT * FROM " + TABLE_HOSTS + ";", null); try { while (cursor.moveToNext()) { String host = SqlUtils.getString(cursor, COLUMN_HOST, null); String ip = SqlUtils.getString(cursor, COLUMN_IP, null); InetAddress inetAddress = toInetAddress(host, ip); if (inetAddress == null) { continue; } result.add(new Pair<>(host, ip)); } } finally { cursor.close(); } return result; }
@Test public void testGetAll() { Hosts hosts = new Hosts(RuntimeEnvironment.application, "hosts.db"); List<Pair<String, String>> all = hosts.getAll(); assertEquals(0, all.size()); hosts.put("ni.hao", "127.0.0.1"); hosts.put("wo.hao", "127.0.0.2"); all = hosts.getAll(); assertEquals(2, all.size()); assertEquals("ni.hao", all.get(0).first); assertEquals("127.0.0.1", all.get(0).second); assertEquals("wo.hao", all.get(1).first); assertEquals("127.0.0.2", all.get(1).second); }
public List<QueuePath> getWildcardedQueuePaths(int maxAutoCreatedQueueDepth) { List<QueuePath> wildcardedPaths = new ArrayList<>(); // Start with the most explicit format (without wildcard) wildcardedPaths.add(this); String[] pathComponents = getPathComponents(); int supportedWildcardLevel = getSupportedWildcardLevel(maxAutoCreatedQueueDepth); // Collect all template entries for (int wildcardLevel = 1; wildcardLevel <= supportedWildcardLevel; wildcardLevel++) { int wildcardedComponentIndex = pathComponents.length - wildcardLevel; pathComponents[wildcardedComponentIndex] = WILDCARD_QUEUE; QueuePath wildcardedPath = createFromQueues(pathComponents); wildcardedPaths.add(wildcardedPath); } return wildcardedPaths; }
@Test public void testWildcardingWhenMaxACQDepthIsGreaterThanQueuePathDepth() { int maxAutoCreatedQueueDepth = 4; List<QueuePath> expectedPaths = new ArrayList<>(); expectedPaths.add(TEST_QUEUE_PATH); expectedPaths.add(ONE_LEVEL_WILDCARDED_TEST_PATH); expectedPaths.add(TWO_LEVEL_WILDCARDED_TEST_PATH); expectedPaths.add(THREE_LEVEL_WILDCARDED_TEST_PATH); List<QueuePath> wildcardedPaths = TEST_QUEUE_PATH .getWildcardedQueuePaths(maxAutoCreatedQueueDepth); Assert.assertEquals(expectedPaths, wildcardedPaths); }
@Deprecated public ScheduledExecutorService getPushJobExecutor() { return pushJobExecutor; }
@Test void testPushgateway() { PrometheusConfig prometheusConfig = new PrometheusConfig(); PrometheusConfig.Pushgateway pushgateway = new PrometheusConfig.Pushgateway(); pushgateway.setJob("mock"); pushgateway.setBaseUrl("localhost:9091"); pushgateway.setEnabled(true); pushgateway.setPushInterval(1); prometheusConfig.setPushgateway(pushgateway); metricsConfig.setPrometheus(prometheusConfig); PrometheusMetricsReporter reporter = new PrometheusMetricsReporter(metricsConfig.toUrl(), applicationModel); reporter.init(); ScheduledExecutorService executor = reporter.getPushJobExecutor(); Assertions.assertTrue(executor != null && !executor.isTerminated() && !executor.isShutdown()); reporter.destroy(); Assertions.assertTrue(executor.isTerminated() || executor.isShutdown()); }
@Override public String stringify(HollowRecord record) { return stringify(record.getTypeDataAccess().getDataAccess(), record.getSchema().getName(), record.getOrdinal()); }
@Test public void testStringifyIterator() throws IOException { HollowRecordJsonStringifier recordJsonStringifier = new HollowRecordJsonStringifier(false, false); HollowWriteStateEngine writeEngine = new HollowWriteStateEngine(); HollowObjectMapper mapper = new HollowObjectMapper(writeEngine); mapper.useDefaultHashKeys(); mapper.add(new TestTypeA(1, "one")); mapper.add(new TestTypeA(2, "two")); HollowReadStateEngine readEngine = StateEngineRoundTripper.roundTripSnapshot(writeEngine); Iterable<HollowRecord> genericHollowObjects = (Iterable) Arrays.asList(new GenericHollowObject(readEngine, "TestTypeA", 0), new GenericHollowObject(readEngine, "TestTypeA", 1)); StringWriter writer = new StringWriter(); recordJsonStringifier.stringify(writer, genericHollowObjects); Assert.assertEquals("Multiple records should be printed correctly", "[{\"id\": 1,\"name\": {\"value\": \"one\"}},{\"id\": 2,\"name\": {\"value\": \"two\"}}]", writer.toString()); }
@Override protected ReadableByteChannel open(ClassLoaderResourceId resourceId) throws IOException { ClassLoader classLoader = getClass().getClassLoader(); InputStream inputStream = classLoader.getResourceAsStream(resourceId.path.substring(PREFIX.length())); if (inputStream == null) { throw new IOException( "Unable to load " + resourceId.path + " with " + classLoader + " URL " + classLoader.getResource(resourceId.path.substring(PREFIX.length()))); } return Channels.newChannel(inputStream); }
@Test public void testOpen() throws IOException { ClassLoaderFileSystem filesystem = new ClassLoaderFileSystem(); ReadableByteChannel channel = filesystem.open(filesystem.matchNewResource(SOME_CLASS, false)); checkIsClass(channel); }
@Override public boolean registry(ServiceInstance serviceInstance) { return zkServiceManager.chooseService().registry(serviceInstance); }
@Test public void registry() { final DefaultServiceInstance instance = new DefaultServiceInstance("localhost", "127.0.0.1", 8080, Collections.emptyMap(), "zk"); Mockito.when(zkService34.registry(instance)).thenReturn(true); final boolean registry = zkDiscoveryClient.registry(instance); Assert.assertTrue(registry); }
public Set<String> getJsonStructure(String user) { fillPermissions(user); return _roles; }
@Test public void testGetUerPermissionsWhenNoSecurity() { setupUserPermissions(false); assertThrows("Unable to retrieve privilege information for an unsecure connection", UserRequestException.class, () -> _userPermissions.getJsonStructure("ANONYMOUS")); }
@Override public int compareTo(DateTimeStamp dateTimeStamp) { return comparator.compare(this,dateTimeStamp); }
@Test void testCompareGreaterTimeStamp() { DateTimeStamp smaller = new DateTimeStamp("2018-04-04T10:10:00.586-0100", 122); DateTimeStamp greater = new DateTimeStamp("2018-04-04T10:10:00.586-0100", 123); assertEquals(1, greater.compareTo(smaller)); }
@Override // mappedStatementId 参数,暂时没有用。以后,可以基于 mappedStatementId + DataPermission 进行缓存 public List<DataPermissionRule> getDataPermissionRule(String mappedStatementId) { // 1. 无数据权限 if (CollUtil.isEmpty(rules)) { return Collections.emptyList(); } // 2. 未配置,则默认开启 DataPermission dataPermission = DataPermissionContextHolder.get(); if (dataPermission == null) { return rules; } // 3. 已配置,但禁用 if (!dataPermission.enable()) { return Collections.emptyList(); } // 4. 已配置,只选择部分规则 if (ArrayUtil.isNotEmpty(dataPermission.includeRules())) { return rules.stream().filter(rule -> ArrayUtil.contains(dataPermission.includeRules(), rule.getClass())) .collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询 } // 5. 已配置,只排除部分规则 if (ArrayUtil.isNotEmpty(dataPermission.excludeRules())) { return rules.stream().filter(rule -> !ArrayUtil.contains(dataPermission.excludeRules(), rule.getClass())) .collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询 } // 6. 已配置,全部规则 return rules; }
@Test public void testGetDataPermissionRule_04() { // 准备参数 String mappedStatementId = randomString(); // mock 方法 DataPermissionContextHolder.add(AnnotationUtils.findAnnotation(TestClass04.class, DataPermission.class)); // 调用 List<DataPermissionRule> result = dataPermissionRuleFactory.getDataPermissionRule(mappedStatementId); // 断言 assertEquals(1, result.size()); assertEquals(DataPermissionRule01.class, result.get(0).getClass()); }
public boolean canUserViewTemplates(CaseInsensitiveString username, List<Role> roles, boolean isGroupAdministrator) { for (PipelineTemplateConfig templateConfig : this) { if (hasViewAccessToTemplate(templateConfig, username, roles, isGroupAdministrator)) { return true; } } return false; }
@Test public void shouldReturnTrueIfUserCanViewAtLeastOneTemplate() { CaseInsensitiveString templateViewUser = new CaseInsensitiveString("template-view"); TemplatesConfig templates = configForUserWhoCanViewATemplate(); templates.add(PipelineTemplateConfigMother.createTemplate("template200", new Authorization(new ViewConfig(new AdminUser(templateViewUser))), StageConfigMother.manualStage("stage-name"))); assertThat(templates.canUserViewTemplates(templateViewUser, null, false), is(true)); }
@Override public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { // if value == null ,HashMap will throw NPE if (key == null && value != null) { return null; } return super.merge(key, value, remappingFunction); }
@Test public void testMerge() { Assert.assertEquals(VALUE, map.get(KEY)); try { map.merge(null, null, (key, value) -> ""); Assert.fail(); } catch (NullPointerException npe) { //ignore } Assert.assertNull(map.merge(null, "value", (key, value) -> "")); Assert.assertEquals("value1", map.merge("key1", "value1", (v1, v2) -> v1 + "" + v2)); Assert.assertEquals(VALUE + VALUE, map.merge(KEY, VALUE, (v1, v2) -> v1 + "" + v2)); }
@Override public void run() { // top-level command, do nothing }
@Test public void test_submit() { run("submit", testJobJarFile.toString()); assertTrueEventually(() -> assertEquals(1, hz.getJet().getJobs().size())); Job job = hz.getJet().getJobs().get(0); assertThat(job).eventuallyHasStatus(JobStatus.RUNNING); assertNull(job.getName()); }
public static String initCacheDir(String namespace, NacosClientProperties properties) { String jmSnapshotPath = properties.getProperty(JM_SNAPSHOT_PATH_PROPERTY); String namingCacheRegistryDir = ""; if (properties.getProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR) != null) { namingCacheRegistryDir = File.separator + properties.getProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR); } if (!StringUtils.isBlank(jmSnapshotPath)) { cacheDir = jmSnapshotPath + File.separator + FILE_PATH_NACOS + namingCacheRegistryDir + File.separator + FILE_PATH_NAMING + File.separator + namespace; } else { cacheDir = properties.getProperty(USER_HOME_PROPERTY) + File.separator + FILE_PATH_NACOS + namingCacheRegistryDir + File.separator + FILE_PATH_NAMING + File.separator + namespace; } return cacheDir; }
@Test void testInitCacheDirWithJmSnapshotPathRootAndWithCache() { System.setProperty("user.home", "/home/snapshot"); NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive(); properties.setProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR, "custom"); String actual = CacheDirUtil.initCacheDir("test", properties); assertEquals("/home/snapshot/nacos/custom/naming/test", actual); }
public static void delete(final File file, final boolean ignoreFailures) { if (file.exists()) { if (file.isDirectory()) { final File[] files = file.listFiles(); if (null != files) { for (final File f : files) { delete(f, ignoreFailures); } } } if (!file.delete() && !ignoreFailures) { try { Files.delete(file.toPath()); } catch (final IOException ex) { LangUtil.rethrowUnchecked(ex); } } } }
@Test void deleteErrorHandlerShouldCatchExceptionIfDeleteOfADirectoryFails() { final ErrorHandler errorHandler = mock(ErrorHandler.class); final File dir = mock(File.class); when(dir.exists()).thenReturn(true); when(dir.isDirectory()).thenReturn(true); when(dir.delete()).thenReturn(false); IoUtil.delete(dir, errorHandler); verify(errorHandler).onError(isA(NullPointerException.class)); }
public static String getZodiac(Date date) { return getZodiac(DateUtil.calendar(date)); }
@Test public void getZodiacTest() { assertEquals("摩羯座", Zodiac.getZodiac(Month.JANUARY, 19)); assertEquals("水瓶座", Zodiac.getZodiac(Month.JANUARY, 20)); assertEquals("巨蟹座", Zodiac.getZodiac(6, 17)); final Calendar calendar = Calendar.getInstance(); calendar.set(2022, Calendar.JULY, 17); assertEquals("巨蟹座", Zodiac.getZodiac(calendar.getTime())); assertEquals("巨蟹座", Zodiac.getZodiac(calendar)); assertNull(Zodiac.getZodiac((Calendar) null)); }
@PostMapping @Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX + "namespaces", action = ActionTypes.WRITE, signType = SignType.CONSOLE) public Result<Boolean> createNamespace(NamespaceForm namespaceForm) throws NacosException { namespaceForm.validate(); String namespaceId = namespaceForm.getNamespaceId(); String namespaceName = namespaceForm.getNamespaceName(); String namespaceDesc = namespaceForm.getNamespaceDesc(); if (StringUtils.isBlank(namespaceId)) { namespaceId = UUID.randomUUID().toString(); } else { namespaceId = namespaceId.trim(); if (!namespaceIdCheckPattern.matcher(namespaceId).matches()) { throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE, "namespaceId [" + namespaceId + "] mismatch the pattern"); } if (namespaceId.length() > NAMESPACE_ID_MAX_LENGTH) { throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE, "too long namespaceId, over " + NAMESPACE_ID_MAX_LENGTH); } // check unique if (namespacePersistService.tenantInfoCountByTenantId(namespaceId) > 0) { throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE, "the namespaceId is existed, namespaceId: " + namespaceForm.getNamespaceId()); } } // contains illegal chars if (!namespaceNameCheckPattern.matcher(namespaceName).matches()) { throw new NacosApiException(HttpStatus.BAD_REQUEST.value(), ErrorCode.ILLEGAL_NAMESPACE, "namespaceName [" + namespaceName + "] contains illegal char"); } return Result.success(namespaceOperationService.createNamespace(namespaceId, namespaceName, namespaceDesc)); }
@Test void testCreateNamespaceWithIllegalName() { NamespaceForm form = new NamespaceForm(); form.setNamespaceDesc("testDesc"); form.setNamespaceName("test@Name"); assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form)); form.setNamespaceName("test$Name"); assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form)); form.setNamespaceName("test#Name"); assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form)); form.setNamespaceName("test%Name"); assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form)); form.setNamespaceName("test^Name"); assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form)); form.setNamespaceName("test&Name"); assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form)); form.setNamespaceName("test*Name"); assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form)); form.setNamespaceName(""); assertThrows(NacosException.class, () -> namespaceControllerV2.createNamespace(form)); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, Timer timer) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) { // We guarantee that the callbacks for all commitAsync() will be invoked when // commitSync() completes, even if the user tries to commit empty offsets. return invokePendingAsyncCommits(timer); } long attempts = 0L; do { if (coordinatorUnknownAndUnreadySync(timer)) { return false; } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, timer); // We may have had in-flight offset commits when the synchronous commit began. If so, ensure that // the corresponding callbacks are invoked prior to returning in order to preserve the order that // the offset commits were applied. invokeCompletedOffsetCommitCallbacks(); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (future.failed() && !future.isRetriable()) throw future.exception(); timer.sleep(retryBackoff.backoff(attempts++)); } while (timer.notExpired()); return false; }
@Test public void shouldLoseAllOwnedPartitionsBeforeRejoiningAfterResettingGenerationId() { final List<TopicPartition> partitions = singletonList(t1p); try (ConsumerCoordinator coordinator = prepareCoordinatorForCloseTest(true, false, Optional.of("group-id"), true)) { final Time realTime = Time.SYSTEM; coordinator.ensureActiveGroup(); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.REBALANCE_IN_PROGRESS); assertThrows(RebalanceInProgressException.class, () -> coordinator.commitOffsetsSync( singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE))); int generationId = 42; String memberId = "consumer-42"; client.prepareResponse(joinGroupFollowerResponse(generationId, memberId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.ILLEGAL_GENERATION)); boolean res = coordinator.joinGroupIfNeeded(realTime.timer(1000)); assertFalse(res); assertEquals(AbstractCoordinator.Generation.NO_GENERATION.generationId, coordinator.generation().generationId); assertEquals(AbstractCoordinator.Generation.NO_GENERATION.protocolName, coordinator.generation().protocolName); // member ID should not be reset assertEquals(memberId, coordinator.generation().memberId); res = coordinator.joinGroupIfNeeded(realTime.timer(1000)); assertFalse(res); } Collection<TopicPartition> lost = getLost(partitions); assertEquals(lost.isEmpty() ? 0 : 1, rebalanceListener.lostCount); assertEquals(lost.isEmpty() ? null : lost, rebalanceListener.lost); }
@VisibleForTesting int getCapacity() { return capacity; }
@Test public void testCacheSizeConfigs() { // Assert the default configs. Configuration config = new Configuration(); cache = new JournaledEditsCache(config); assertEquals((int) (Runtime.getRuntime().maxMemory() * 0.5f), cache.getCapacity()); // Set dfs.journalnode.edit-cache-size.bytes. Configuration config1 = new Configuration(); config1.setInt(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY, 1); config1.setFloat(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_KEY, 0.1f); cache = new JournaledEditsCache(config1); assertEquals(1, cache.getCapacity()); // Don't set dfs.journalnode.edit-cache-size.bytes. Configuration config2 = new Configuration(); config2.setFloat(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_KEY, 0.1f); cache = new JournaledEditsCache(config2); assertEquals((int) (Runtime.getRuntime().maxMemory() * 0.1f), cache.getCapacity()); }
public static TableFactoryHelper createTableFactoryHelper( DynamicTableFactory factory, DynamicTableFactory.Context context) { return new TableFactoryHelper(factory, context); }
@Test void testFactoryHelperWithEmptyEnrichmentOptions() { final Map<String, String> options = new HashMap<>(); options.put(TestDynamicTableFactory.TARGET.key(), "abc"); options.put(TestDynamicTableFactory.BUFFER_SIZE.key(), "1000"); final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper( new TestDynamicTableFactory(), FactoryMocks.createTableContext(SCHEMA, options)); helper.validate(); assertThat(helper.getOptions().get(TestDynamicTableFactory.TARGET)).isEqualTo("abc"); assertThat(helper.getOptions().get(TestDynamicTableFactory.BUFFER_SIZE)).isEqualTo(1000); }
@Override public <R> HoodieData<HoodieRecord<R>> tagLocation( HoodieData<HoodieRecord<R>> records, HoodieEngineContext context, HoodieTable hoodieTable) { return HoodieJavaRDD.of(HoodieJavaRDD.getJavaRDD(records) .mapPartitionsWithIndex(locationTagFunction(hoodieTable.getMetaClient()), true)); }
@Test public void testHbaseTagLocationForArchivedCommits() throws Exception { // Load to memory Map<String, String> params = new HashMap<String, String>(); params.put(HoodieCleanConfig.CLEANER_COMMITS_RETAINED.key(), "1"); params.put(HoodieMetadataConfig.COMPACT_NUM_DELTA_COMMITS.key(), "3"); params.put(HoodieArchivalConfig.MAX_COMMITS_TO_KEEP.key(), "5"); params.put(HoodieArchivalConfig.MIN_COMMITS_TO_KEEP.key(), "4"); HoodieWriteConfig config = getConfigBuilder(100, false, false).withProps(params).build(); SparkHoodieHBaseIndex index = new SparkHoodieHBaseIndex(config); try (SparkRDDWriteClient writeClient = getHoodieWriteClient(config)) { // make first commit with 20 records JavaRDD<HoodieRecord> writeRecords1 = generateAndCommitRecords(writeClient, 20); metaClient = HoodieTableMetaClient.reload(metaClient); String commit1 = metaClient.getActiveTimeline().firstInstant().get().getTimestamp(); // Make 6 additional commits, so that first commit is archived for (int nCommit = 0; nCommit < 6; nCommit++) { generateAndCommitRecords(writeClient, 20); } // tagLocation for the first set of records (for the archived commit), hbaseIndex should tag them as valid metaClient = HoodieTableMetaClient.reload(metaClient); assertTrue(metaClient.getArchivedTimeline().containsInstant(commit1)); HoodieTable hoodieTable = HoodieSparkTable.create(config, context, metaClient); JavaRDD<HoodieRecord> javaRDD1 = tagLocation(index, writeRecords1, hoodieTable); assertEquals(20, javaRDD1.filter(HoodieRecord::isCurrentLocationKnown).collect().size()); } }
public static Optional<YamlShardingRuleConfiguration> findYamlShardingRuleConfiguration(final Collection<YamlRuleConfiguration> yamlRuleConfigs) { return yamlRuleConfigs.stream().filter(YamlShardingRuleConfiguration.class::isInstance).findFirst().map(YamlShardingRuleConfiguration.class::cast); }
@Test void assertFindYamlShardingRuleConfiguration() { Optional<YamlShardingRuleConfiguration> actual = ShardingRuleConfigurationConverter.findYamlShardingRuleConfiguration(yamlRuleConfig); assertTrue(actual.isPresent()); assertThat(actual.get().getTables().size(), is(1)); assertTrue(actual.get().getTables().containsKey("LOGIC_TABLE")); }
@Override public String getPath() { var fullPath = request.getRequestURI(); // it shouldn't be null, but in case it is, it's better to return empty string if (fullPath == null) { return Pac4jConstants.EMPTY_STRING; } // very strange use case if (fullPath.startsWith("//")) { fullPath = fullPath.substring(1); } val context = request.getContextPath(); // this one shouldn't be null either, but in case it is, then let's consider it is empty if (context != null) { return fullPath.substring(context.length()); } return fullPath; }
@Test public void testGetPathNullFullPath() { when(request.getRequestURI()).thenReturn(null); WebContext context = new JEEContext(request, response); assertEquals(Pac4jConstants.EMPTY_STRING, context.getPath()); }
@Override public Object handle(ProceedingJoinPoint proceedingJoinPoint, TimeLimiter timeLimiter, String methodName) throws Throwable { Object returnValue = proceedingJoinPoint.proceed(); if (Flux.class.isAssignableFrom(returnValue.getClass())) { Flux<?> fluxReturnValue = (Flux<?>) returnValue; return fluxReturnValue.transformDeferred(TimeLimiterOperator.of(timeLimiter)); } else if (Mono.class.isAssignableFrom(returnValue.getClass())) { Mono<?> monoReturnValue = (Mono<?>) returnValue; return monoReturnValue.transformDeferred(TimeLimiterOperator.of(timeLimiter)); } else { throw new IllegalReturnTypeException(returnValue.getClass(), methodName, "Reactor expects Mono/Flux."); } }
@Test public void shouldThrowIllegalArgumentExceptionWithNotReactorType() throws Throwable{ TimeLimiter timeLimiter = TimeLimiter.ofDefaults("test"); when(proceedingJoinPoint.proceed()).thenReturn("NOT REACTOR TYPE"); try { reactorTimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod"); fail("exception missed"); } catch (Throwable e) { assertThat(e).isInstanceOf(IllegalReturnTypeException.class) .hasMessage( "java.lang.String testMethod has unsupported by @TimeLimiter return type. Reactor expects Mono/Flux."); } }
public void updateBatchProcessingTimeNs(long elapsedNs) { batchProcessingTimeNsUpdater.accept(elapsedNs); }
@Test public void testUpdateBatchProcessingTimeNs() { MetricsRegistry registry = new MetricsRegistry(); try (FakeMetadataLoaderMetrics fakeMetrics = new FakeMetadataLoaderMetrics(registry)) { fakeMetrics.metrics.updateBatchProcessingTimeNs(123L); assertEquals(123L, fakeMetrics.batchProcessingTimeNs.get()); } }
public void putValue(String fieldName, @Nullable Object value) { _fieldToValueMap.put(fieldName, value); }
@Test public void testMapValuesSameSizeNotEqual() { GenericRow first = new GenericRow(); first.putValue("one", 1); HashMap<String, Object> firstData = new HashMap<String, Object>(); firstData.put("two", 2); GenericRow second = new GenericRow(); HashMap<String, Object> secondData = new HashMap<String, Object>(); secondData.put("two", "two"); second.putValue("one", secondData); Assert.assertNotEquals(first, second); }
static QueryId buildId( final Statement statement, final EngineContext engineContext, final QueryIdGenerator idGenerator, final OutputNode outputNode, final boolean createOrReplaceEnabled, final Optional<String> withQueryId) { if (withQueryId.isPresent()) { final String queryId = withQueryId.get().toUpperCase(); validateWithQueryId(queryId); return new QueryId(queryId); } if (statement instanceof CreateTable && ((CreateTable) statement).isSource()) { // Use the CST name as part of the QueryID final String suffix = ((CreateTable) statement).getName().text().toUpperCase() + "_" + idGenerator.getNext().toUpperCase(); return new QueryId(ReservedQueryIdsPrefixes.CST + suffix); } if (!outputNode.getSinkName().isPresent()) { final String prefix = "transient_" + outputNode.getSource().getLeftmostSourceNode().getAlias().text() + "_"; return new QueryId(prefix + Math.abs(ThreadLocalRandom.current().nextLong())); } final KsqlStructuredDataOutputNode structured = (KsqlStructuredDataOutputNode) outputNode; if (!structured.createInto()) { return new QueryId(ReservedQueryIdsPrefixes.INSERT + idGenerator.getNext()); } final SourceName sink = outputNode.getSinkName().get(); final Set<QueryId> queriesForSink = engineContext.getQueryRegistry().getQueriesWithSink(sink); if (queriesForSink.size() > 1) { throw new KsqlException("REPLACE for sink " + sink + " is not supported because there are " + "multiple queries writing into it: " + queriesForSink); } else if (!queriesForSink.isEmpty()) { if (!createOrReplaceEnabled) { final String type = outputNode.getNodeOutputType().getKsqlType().toLowerCase(); throw new UnsupportedOperationException( String.format( "Cannot add %s '%s': A %s with the same name already exists", type, sink.text(), type)); } return Iterables.getOnlyElement(queriesForSink); } final String suffix = outputNode.getId().toString().toUpperCase() + "_" + idGenerator.getNext().toUpperCase(); return new QueryId( outputNode.getNodeOutputType() == DataSourceType.KTABLE ? ReservedQueryIdsPrefixes.CTAS + suffix : ReservedQueryIdsPrefixes.CSAS + suffix ); }
@Test public void shouldComputeQueryIdCorrectlyForInsertInto() { // Given: when(plan.getSinkName()).thenReturn(Optional.of(SINK)); when(idGenerator.getNext()).thenReturn("1"); // When: final QueryId queryId = QueryIdUtil.buildId(statement, engineContext, idGenerator, plan, false, Optional.empty()); // Then: assertThat(queryId, is(new QueryId("INSERTQUERY_1"))); }
public static boolean isPropsActiveVersionPath(final String propsPath) { Pattern pattern = Pattern.compile(getPropsNode() + ACTIVE_VERSION_SUFFIX, Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(propsPath); return matcher.find(); }
@Test void assertIsPropsActiveVersionPath() { assertTrue(GlobalNodePath.isPropsActiveVersionPath("/props/active_version")); }
public static List<String> splitToWhiteSpaceSeparatedTokens(String input) { if (input == null) { return new ArrayList<>(); } StringTokenizer tokenizer = new StringTokenizer(input.trim(), QUOTE_CHAR + WHITESPACE, true); List<String> tokens = new ArrayList<>(); StringBuilder quotedText = new StringBuilder(); while (tokenizer.hasMoreTokens()) { String token = tokenizer.nextToken(); if (QUOTE_CHAR.equals(token)) { // if we have a quote, add the next tokens to the quoted text // until the quoting has finished quotedText.append(QUOTE_CHAR); String buffer = quotedText.toString(); if (isSingleQuoted(buffer) || isDoubleQuoted(buffer)) { tokens.add(buffer.substring(1, buffer.length() - 1)); quotedText = new StringBuilder(); } } else if (WHITESPACE.equals(token)) { // a white space, if in quote, add the white space, otherwise // skip it if (quotedText.length() > 0) { quotedText.append(WHITESPACE); } } else { if (quotedText.length() > 0) { quotedText.append(token); } else { tokens.add(token); } } } if (quotedText.length() > 0) { throw new IllegalArgumentException("Invalid quoting found in args " + quotedText); } return tokens; }
@Test public void testWhiteSpaceSeparatedArgs() { List<String> args = splitToWhiteSpaceSeparatedTokens("arg0 arg1 arg2"); assertEquals("arg0", args.get(0)); assertEquals("arg1", args.get(1)); assertEquals("arg2", args.get(2)); }
@Override @CheckForNull public EmailMessage format(Notification notif) { if (!(notif instanceof ChangesOnMyIssuesNotification)) { return null; } ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif; if (notification.getChange() instanceof AnalysisChange) { checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty"); return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification); } return formatMultiProject(notification); }
@Test public void format_set_html_message_with_header_dealing_with_plural_security_hotspots_when_change_from_User() { Set<ChangedIssue> changedIssues = IntStream.range(0, 2 + new Random().nextInt(4)) .mapToObj(i -> newChangedIssue(i + "", randomValidStatus(), newProject("prj_" + i), newSecurityHotspotRule("rule_" + i))) .collect(toSet()); UserChange userChange = newUserChange(); EmailMessage singleIssueMessage = underTest.format(new ChangesOnMyIssuesNotification( userChange, changedIssues.stream().limit(1).collect(toSet()))); EmailMessage multiIssueMessage = underTest.format(new ChangesOnMyIssuesNotification(userChange, changedIssues)); HtmlFragmentAssert.assertThat(singleIssueMessage.getMessage()) .hasParagraph("Hi,") .withoutLink() .hasParagraph("A manual change has updated a hotspot assigned to you:") .withoutLink(); HtmlFragmentAssert.assertThat(multiIssueMessage.getMessage()) .hasParagraph("Hi,") .withoutLink() .hasParagraph("A manual change has updated hotspots assigned to you:") .withoutLink(); }
public HttpResult getBinary(String url) throws IOException, NotModifiedException { return getBinary(url, null, null); }
@Test void eTagReturns304() { this.mockServerClient.when(HttpRequest.request().withMethod("GET").withHeader(HttpHeaders.IF_NONE_MATCH, "78910")) .respond(HttpResponse.response().withStatusCode(HttpStatus.SC_NOT_MODIFIED)); Assertions.assertThrows(NotModifiedException.class, () -> getter.getBinary(this.feedUrl, null, "78910")); }
public static String jsToString( Object value, String classType ) { if ( classType.equalsIgnoreCase( JS_NATIVE_JAVA_OBJ ) || classType.equalsIgnoreCase( JS_UNDEFINED ) ) { // Is it a java Value class ? try { Value v = (Value) Context.jsToJava( value, Value.class ); return v.toString(); } catch ( Exception ev ) { // convert to a string should work in most cases... // return Context.toString( value ); } } else { // A String perhaps? return Context.toString( value ); } }
@Test public void jsToString_NativeJavaObject_Int() throws Exception { assertEquals( "1", JavaScriptUtils.jsToString( getIntValue(), JAVA_OBJECT ).trim() ); }
public void addMergeTask(String dataId, String groupId, String tenant, String clientIp) { if (!canExecute()) { return; } MergeDataTask task = new MergeDataTask(dataId, groupId, tenant, clientIp); mergeTasks.addTask(task.getId(), task); }
@Test void testAddMergeTaskExternalModel() { String dataId = "dataId12345"; String group = "group123"; String tenant = "tenant1234"; String clientIp = "127.0.0.1"; DatasourceConfiguration.setEmbeddedStorage(false); TaskManager mockTasker = Mockito.mock(TaskManager.class); ReflectionTestUtils.setField(mergeDatumService, "mergeTasks", mockTasker); mergeDatumService.addMergeTask(dataId, group, tenant, clientIp); Mockito.verify(mockTasker, times(1)).addTask(anyString(), any(MergeDataTask.class)); }
public static String queryParamString(final Collection<Map.Entry<String, String>> params) { final StringBuilder sb = new StringBuilder(); if (params.isEmpty()) { return sb.toString(); } sb.append("?"); sb.append(params.stream() .map(e -> "%s=%s".formatted( URLEncoder.encode(e.getKey(), StandardCharsets.UTF_8), URLEncoder.encode(e.getValue(), StandardCharsets.UTF_8))) .collect(Collectors.joining("&"))); return sb.toString(); }
@Test public void queryParameterStringEncodesUnsafeChars() { final String result = HttpUtils.queryParamString(List.of(Map.entry("&k?e=y/!", "=v/a?l&u;e"))); assertThat(result).isEqualTo("?%26k%3Fe%3Dy%2F%21=%3Dv%2Fa%3Fl%26u%3Be"); }
public Map<String, SystemJob> getRunningJobs() { return jobs; }
@Test public void testGetRunningJobs() throws Exception { SystemJobManager manager = new SystemJobManager(systemMessageActivityWriter, new MetricRegistry()); LongRunningJob job1 = new LongRunningJob(1); LongRunningJob job2 = new LongRunningJob(1); String jobID1 = manager.submit(job1); String jobID2 = manager.submit(job2); assertEquals(2, manager.getRunningJobs().size()); assertTrue(manager.getRunningJobs().containsValue(job1)); assertTrue(manager.getRunningJobs().containsValue(job2)); assertEquals(jobID1, manager.getRunningJobs().get(jobID1).getId()); assertEquals(jobID2, manager.getRunningJobs().get(jobID2).getId()); }
@Override public void removeMappingEntriesByAppId(Type type, ApplicationId appId) { removeMappingEntries(type, Iterables.toArray( getMappingEntriesByAppId(type, appId), MappingEntry.class)); }
@Test public void removeMappingEntriesByAppId() { addMapping(MAP_DATABASE, 1); addMapping(MAP_DATABASE, 2); adminService.removeMappingEntriesByAppId(MAP_DATABASE, appId); assertTrue("should not have any mappings", Lists.newLinkedList( service.getMappingEntriesByAppId(MAP_DATABASE, appId)).size() == 0); }
public synchronized String createDataset(String region) throws BigQueryResourceManagerException { // Check to see if dataset already exists, and throw error if it does if (dataset != null) { throw new IllegalStateException( "Dataset " + datasetId + " already exists for project " + projectId + "."); } LOG.info("Creating dataset {} in project {}.", datasetId, projectId); // Send the dataset request to Google Cloud try { DatasetInfo datasetInfo = DatasetInfo.newBuilder(datasetId).setLocation(region).build(); LOG.info("Dataset {} created successfully", datasetId); dataset = bigQuery.create(datasetInfo); return datasetId; } catch (Exception e) { throw new BigQueryResourceManagerException("Failed to create dataset.", e); } }
@Test public void testCreateDatasetShouldNotCreateDatasetWhenDatasetAlreadyExists() { testManager.createDataset(DATASET_ID); assertThrows(IllegalStateException.class, () -> testManager.createDataset(DATASET_ID)); }
@Nullable public Object sanitize(String key, @Nullable Object value) { for (Pattern pattern : sanitizeKeysPatterns) { if (pattern.matcher(key).matches()) { return SANITIZED_VALUE; } } return value; }
@Test void obfuscateCredentials() { final var sanitizer = new KafkaConfigSanitizer(true, List.of()); assertThat(sanitizer.sanitize("sasl.jaas.config", "secret")).isEqualTo("******"); assertThat(sanitizer.sanitize("consumer.sasl.jaas.config", "secret")).isEqualTo("******"); assertThat(sanitizer.sanitize("producer.sasl.jaas.config", "secret")).isEqualTo("******"); assertThat(sanitizer.sanitize("main.consumer.sasl.jaas.config", "secret")).isEqualTo("******"); assertThat(sanitizer.sanitize("database.password", "secret")).isEqualTo("******"); assertThat(sanitizer.sanitize("basic.auth.user.info", "secret")).isEqualTo("******"); //AWS var sanitizing assertThat(sanitizer.sanitize("aws.access.key.id", "secret")).isEqualTo("******"); assertThat(sanitizer.sanitize("aws.accessKeyId", "secret")).isEqualTo("******"); assertThat(sanitizer.sanitize("aws.secret.access.key", "secret")).isEqualTo("******"); assertThat(sanitizer.sanitize("aws.secretAccessKey", "secret")).isEqualTo("******"); assertThat(sanitizer.sanitize("aws.sessionToken", "secret")).isEqualTo("******"); }
public static SharedScheduledExecutorService getPoolThreadExecutor() { setup(); return poolThreadExecutor; }
@Test public void poolThread() { ScheduledExecutorService a = SharedScheduledExecutors.getPoolThreadExecutor(); assertNotNull("ScheduledExecutorService must not be null", a); ScheduledExecutorService b = SharedScheduledExecutors.getPoolThreadExecutor(); assertSame("factories should be same", a, b); }
public static Builder forMagic(byte magic, ProduceRequestData data) { // Message format upgrades correspond with a bump in the produce request version. Older // message format versions are generally not supported by the produce request versions // following the bump. final short minVersion; final short maxVersion; if (magic < RecordBatch.MAGIC_VALUE_V2) { minVersion = 2; maxVersion = 2; } else { minVersion = 3; maxVersion = ApiKeys.PRODUCE.latestVersion(); } return new Builder(minVersion, maxVersion, data); }
@Test public void testMixedIdempotentData() { final long producerId = 15L; final short producerEpoch = 5; final int sequence = 10; final MemoryRecords nonIdempotentRecords = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("foo".getBytes())); final MemoryRecords idempotentRecords = MemoryRecords.withIdempotentRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord("bar".getBytes())); ProduceRequest.Builder builder = ProduceRequest.forMagic(RecordVersion.current().value, new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList( new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(idempotentRecords))), new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonIdempotentRecords)))) .iterator())) .setAcks((short) -1) .setTimeoutMs(5000)); final ProduceRequest request = builder.build(); assertFalse(RequestUtils.hasTransactionalRecords(request)); assertTrue(RequestTestUtils.hasIdempotentRecords(request)); }
@Converter(fallback = true) public static <T> T convertTo(Class<T> type, Exchange exchange, Object value, TypeConverterRegistry registry) { if (NodeInfo.class.isAssignableFrom(value.getClass())) { // use a fallback type converter so we can convert the embedded body if the value is NodeInfo NodeInfo ni = (NodeInfo) value; // first try to find a Converter for Node TypeConverter tc = registry.lookup(type, Node.class); if (tc != null) { Node node = NodeOverNodeInfo.wrap(ni); return tc.convertTo(type, exchange, node); } // if this does not exist we can also try NodeList (there are some type converters for that) as // the default Xerces Node implementation also implements NodeList. tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> nil = new LinkedList<>(); nil.add(ni); return tc.convertTo(type, exchange, toDOMNodeList(nil)); } } else if (List.class.isAssignableFrom(value.getClass())) { TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> lion = new LinkedList<>(); for (Object o : (List<?>) value) { if (o instanceof NodeInfo) { lion.add((NodeInfo) o); } } if (!lion.isEmpty()) { NodeList nl = toDOMNodeList(lion); return tc.convertTo(type, exchange, nl); } } } else if (NodeOverNodeInfo.class.isAssignableFrom(value.getClass())) { // NodeOverNode info is a read-only Node implementation from Saxon. In contrast to the JDK // com.sun.org.apache.xerces.internal.dom.NodeImpl class it does not implement NodeList, but // many Camel type converters are based on that interface. Therefore we convert to NodeList and // try type conversion in the fallback type converter. TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<Node> domNodeList = new LinkedList<>(); domNodeList.add((NodeOverNodeInfo) value); return tc.convertTo(type, exchange, new DOMNodeList(domNodeList)); } } return null; }
@Test public void convertSubNodeSetToDocument() throws XPathExpressionException { evaluator.setNamespaceContext(NS_CONTEXT); Object nodeObj = evaluator.evaluate("/ns1:a/ns1:b", doc, XPathConstants.NODESET); assertNotNull(nodeObj); Document document = context.getTypeConverter().convertTo(Document.class, exchange, nodeObj); assertNotNull(document); String string = context.getTypeConverter().convertTo(String.class, exchange, document); assertEquals(CONTENT_B, string); }
private static void execute(String... args) throws Exception { LogDirsCommandOptions options = new LogDirsCommandOptions(args); try (Admin adminClient = createAdminClient(options)) { execute(options, adminClient); } }
@Test @SuppressWarnings("unchecked") public void shouldQuerySpecifiedBroker() throws JsonProcessingException { Node brokerOne = new Node(1, "hostname", 9092); Node brokerTwo = new Node(2, "hostname", 9092); try (MockAdminClient adminClient = new MockAdminClient(Arrays.asList(brokerOne, brokerTwo), brokerOne)) { String standardOutput = execute(fromArgsToOptions("--bootstrap-server", "EMPTY", "--broker-list", "1", "--describe"), adminClient); String[] standardOutputLines = standardOutput.split("\n"); assertEquals(3, standardOutputLines.length); Map<String, Object> information = new ObjectMapper().readValue(standardOutputLines[2], HashMap.class); List<Object> brokersInformation = (List<Object>) information.get("brokers"); Integer brokerId = (Integer) ((HashMap<String, Object>) brokersInformation.get(0)).get("broker"); assertEquals(1, brokersInformation.size()); assertEquals(1, brokerId); } }
@Override public void deleteFileConfig(Long id) { // 校验存在 FileConfigDO config = validateFileConfigExists(id); if (Boolean.TRUE.equals(config.getMaster())) { throw exception(FILE_CONFIG_DELETE_FAIL_MASTER); } // 删除 fileConfigMapper.deleteById(id); // 清空缓存 clearCache(id, null); }
@Test public void testDeleteFileConfig_master() { // mock 数据 FileConfigDO dbFileConfig = randomFileConfigDO().setMaster(true); fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbFileConfig.getId(); // 调用, 并断言异常 assertServiceException(() -> fileConfigService.deleteFileConfig(id), FILE_CONFIG_DELETE_FAIL_MASTER); }
public static String getServiceName(final String serviceNameWithGroup) { if (StringUtils.isBlank(serviceNameWithGroup)) { return StringUtils.EMPTY; } if (!serviceNameWithGroup.contains(Constants.SERVICE_INFO_SPLITER)) { return serviceNameWithGroup; } return serviceNameWithGroup.split(Constants.SERVICE_INFO_SPLITER)[1]; }
@Test void testGetServiceName() { String validServiceName = "group@@serviceName"; assertEquals("serviceName", NamingUtils.getServiceName(validServiceName)); }
@Nullable public Long getLongValue(@LongFormat final int formatType, @IntRange(from = 0) final int offset) { if ((offset + getTypeLen(formatType)) > size()) return null; return switch (formatType) { case FORMAT_UINT32_LE -> unsignedBytesToLong( mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3] ); case FORMAT_UINT32_BE -> unsignedBytesToLong( mValue[offset + 3], mValue[offset + 2], mValue[offset + 1], mValue[offset] ); case FORMAT_SINT32_LE -> unsignedToSigned(unsignedBytesToLong( mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3] ), 32); case FORMAT_SINT32_BE -> unsignedToSigned(unsignedBytesToLong( mValue[offset + 3], mValue[offset + 2], mValue[offset + 1], mValue[offset] ), 32); default -> null; }; }
@Test public void getValue_UINT32_big() { final Data data = new Data(new byte[] { 0x01, 0x00, 0x00, (byte) 0xF0 }); final long value = data.getLongValue(Data.FORMAT_UINT32_LE, 0); assertEquals(0xF0000001L, value); }
public void deleteSnapshotsBefore(long offset) throws IOException { for (SnapshotFile snapshot : snapshots.subMap(0L, offset).values()) { removeAndDeleteSnapshot(snapshot.offset); } }
@Test public void testDeleteSnapshotsBefore() throws IOException { appendClientEntry(stateManager, producerId, epoch, defaultSequence, 0L, false); appendClientEntry(stateManager, producerId, epoch, 1, 1L, false); stateManager.takeSnapshot(); assertEquals(1, Objects.requireNonNull(logDir.listFiles()).length); assertEquals(singleton(2L), currentSnapshotOffsets()); appendClientEntry(stateManager, producerId, epoch, 2, 2L, false); stateManager.takeSnapshot(); assertEquals(2, Objects.requireNonNull(logDir.listFiles()).length); assertEquals(new HashSet<>(asList(2L, 3L)), currentSnapshotOffsets()); stateManager.deleteSnapshotsBefore(3L); assertEquals(1, Objects.requireNonNull(logDir.listFiles()).length); assertEquals(singleton(3L), currentSnapshotOffsets()); stateManager.deleteSnapshotsBefore(4L); assertEquals(0, Objects.requireNonNull(logDir.listFiles()).length); assertEquals(emptySet(), currentSnapshotOffsets()); }
@Override public Object apply(Object input) { return PropertyOrFieldSupport.EXTRACTION.getValueOf(propertyOrFieldName, input); }
@Test void should_throw_exception_when_given_name_is_empty() { // GIVEN ByNameSingleExtractor underTest = new ByNameSingleExtractor(""); // WHEN Throwable thrown = catchThrowable(() -> underTest.apply(YODA)); // THEN then(thrown).isInstanceOf(IllegalArgumentException.class) .hasMessage("The name of the property/field to read should not be empty"); }
@Override public void open() throws Exception { super.open(); final String operatorID = getRuntimeContext().getOperatorUniqueID(); this.workerPool = ThreadPools.newWorkerPool("iceberg-worker-pool-" + operatorID, workerPoolSize); }
@TestTemplate public void testCommitTxnWithoutDataFiles() throws Exception { long checkpointId = 0; long timestamp = 0; JobID jobId = new JobID(); OperatorID operatorId; try (OneInputStreamOperatorTestHarness<WriteResult, Void> harness = createStreamSink(jobId)) { harness.setup(); harness.open(); operatorId = harness.getOperator().getOperatorID(); SimpleDataUtil.assertTableRows(table, Lists.newArrayList(), branch); assertSnapshotSize(0); assertMaxCommittedCheckpointId(jobId, operatorId, -1L); // It's better to advance the max-committed-checkpoint-id in iceberg snapshot, so that the // future flink job // failover won't fail. for (int i = 1; i <= 3; i++) { harness.snapshot(++checkpointId, ++timestamp); assertFlinkManifests(0); harness.notifyOfCompletedCheckpoint(checkpointId); assertFlinkManifests(0); assertSnapshotSize(i); assertMaxCommittedCheckpointId(jobId, operatorId, checkpointId); } } }
@Override public double interpolate(double x1p, double x2p) { for (int i = 0; i < m; i++) { yv[i] = srp[i].interpolate(x2p); } CubicSplineInterpolation1D scol = new CubicSplineInterpolation1D(x1, yv); return scol.interpolate(x1p); }
@Test public void testInterpolate() { System.out.println("interpolate"); double[] x1 = {1950, 1960, 1970, 1980, 1990}; double[] x2 = {10, 20, 30}; double[][] y = { {150.697, 199.592, 187.625}, {179.323, 195.072, 250.287}, {203.212, 179.092, 322.767}, {226.505, 153.706, 426.730}, {249.633, 120.281, 598.243} }; CubicSplineInterpolation2D instance = new CubicSplineInterpolation2D(x1, x2, y); assertEquals(167.9922755, instance.interpolate(1975, 15), 1E-7); assertEquals(167.5167746, instance.interpolate(1975, 20), 1E-7); assertEquals(244.3006193, instance.interpolate(1975, 25), 1E-7); }
@Override public void close() throws BlockStoreException { try { buffer.force(); buffer = null; // Allow it to be GCd and the underlying file mapping to go away. fileLock.release(); randomAccessFile.close(); blockCache.clear(); } catch (IOException e) { throw new BlockStoreException(e); } }
@Test public void twoStores_butSequentially() throws Exception { SPVBlockStore store = new SPVBlockStore(TESTNET, blockStoreFile); store.close(); store = new SPVBlockStore(TESTNET, blockStoreFile); }
public ImmutableList<PluginMatchingResult<VulnDetector>> getVulnDetectors( ReconnaissanceReport reconnaissanceReport) { return tsunamiPlugins.entrySet().stream() .filter(entry -> isVulnDetector(entry.getKey())) .map(entry -> matchAllVulnDetectors(entry.getKey(), entry.getValue(), reconnaissanceReport)) .flatMap(Streams::stream) .collect(toImmutableList()); }
@Test public void getVulnDetectors_whenRemoteDetectorWithSoftwareFilterHasNoMatchingService_returnsNoServices() { NetworkService wordPressService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 443)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("https") .setSoftware(Software.newBuilder().setName("WordPress")) .build(); ReconnaissanceReport fakeReconnaissanceReport = ReconnaissanceReport.newBuilder() .setTargetInfo(TargetInfo.getDefaultInstance()) .addNetworkServices(wordPressService) .build(); PluginManager pluginManager = Guice.createInjector( new FakePortScannerBootstrapModule(), new FakeServiceFingerprinterBootstrapModule(), FakeFilteringRemoteDetector.getModule()) .getInstance(PluginManager.class); ImmutableList<PluginMatchingResult<VulnDetector>> vulnDetectors = pluginManager.getVulnDetectors(fakeReconnaissanceReport); assertThat(vulnDetectors).hasSize(1); ImmutableList<MatchedPlugin> matchedResult = ((FakeFilteringRemoteDetector) vulnDetectors.get(0).tsunamiPlugin()).getMatchedPlugins(); assertThat(matchedResult).hasSize(4); for (var mr : matchedResult) { assertThat(mr.getServicesCount()).isEqualTo(0); } }
@Override public void executeUpdate(final AlterStorageUnitStatement sqlStatement, final ContextManager contextManager) { checkBefore(sqlStatement); Map<String, DataSourcePoolProperties> propsMap = DataSourceSegmentsConverter.convert(database.getProtocolType(), sqlStatement.getStorageUnits()); validateHandler.validate(propsMap, getExpectedPrivileges(sqlStatement)); try { contextManager.getPersistServiceFacade().getMetaDataManagerPersistService().alterStorageUnits(database.getName(), propsMap); } catch (final SQLException | ShardingSphereExternalException ex) { throw new StorageUnitsOperateException("alter", propsMap.keySet(), ex); } }
@Test void assertExecuteUpdateWithNotExistedStorageUnitNames() { assertThrows(MissingRequiredStorageUnitsException.class, () -> executor.executeUpdate(createAlterStorageUnitStatement("not_existed"), mockContextManager(mock(MetaDataContexts.class, RETURNS_DEEP_STUBS)))); }
@Override public Object handle(ProceedingJoinPoint proceedingJoinPoint, Bulkhead bulkhead, String methodName) throws Throwable { BulkheadOperator<?> bulkheadOperator = BulkheadOperator.of(bulkhead); Object returnValue = proceedingJoinPoint.proceed(); return executeRxJava3Aspect(bulkheadOperator, returnValue); }
@Test public void testRxTypes() throws Throwable { Bulkhead bulkhead = Bulkhead.ofDefaults("test"); when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test")); assertThat(rxJava3BulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod")) .isNotNull(); when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test")); assertThat(rxJava3BulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod")) .isNotNull(); }
@VisibleForTesting WxMaService getWxMaService(Integer userType) { // 第一步,查询 DB 的配置项,获得对应的 WxMaService 对象 SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType( SocialTypeEnum.WECHAT_MINI_APP.getType(), userType); if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) { return wxMaServiceCache.getUnchecked(client.getClientId() + ":" + client.getClientSecret()); } // 第二步,不存在 DB 配置项,则使用 application-*.yaml 对应的 WxMaService 对象 return wxMaService; }
@Test public void testGetWxMaService_clientEnable() { // 准备参数 Integer userType = randomPojo(UserTypeEnum.class).getValue(); // mock 数据 SocialClientDO client = randomPojo(SocialClientDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()) .setUserType(userType).setSocialType(SocialTypeEnum.WECHAT_MINI_APP.getType())); socialClientMapper.insert(client); // mock 方法 WxMaProperties.ConfigStorage configStorage = mock(WxMaProperties.ConfigStorage.class); when(wxMaProperties.getConfigStorage()).thenReturn(configStorage); // 调用 WxMaService result = socialClientService.getWxMaService(userType); // 断言 assertNotSame(wxMaService, result); assertEquals(client.getClientId(), result.getWxMaConfig().getAppid()); assertEquals(client.getClientSecret(), result.getWxMaConfig().getSecret()); }
@Override public void run() { final Instant now = time.get(); try { final Collection<PersistentQueryMetadata> queries = engine.getPersistentQueries(); final Optional<Double> saturation = queries.stream() .collect(Collectors.groupingBy(PersistentQueryMetadata::getQueryApplicationId)) .entrySet() .stream() .map(e -> measure(now, e.getKey(), e.getValue())) .max(PersistentQuerySaturationMetrics::compareSaturation) .orElse(Optional.of(0.0)); saturation.ifPresent(s -> report(now, s)); final Set<String> appIds = queries.stream() .map(PersistentQueryMetadata::getQueryApplicationId) .collect(Collectors.toSet()); for (final String appId : Sets.difference(new HashSet<>(perKafkaStreamsStats.keySet()), appIds)) { perKafkaStreamsStats.get(appId).cleanup(reporter); perKafkaStreamsStats.remove(appId); } } catch (final RuntimeException e) { LOGGER.error("Error collecting saturation", e); throw e; } }
@Test public void shouldComputeSaturationForNode() { // Given: final Instant start = Instant.now(); when(clock.get()).thenReturn(start); givenMetrics(kafkaStreams1) .withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t1", Duration.ofMinutes(2)); givenMetrics(kafkaStreams2) .withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t1", Duration.ofMinutes(2)); collector.run(); when(clock.get()).thenReturn(start.plus(WINDOW)); givenMetrics(kafkaStreams1) .withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t1", Duration.ofMinutes(3)); givenMetrics(kafkaStreams2) .withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t1", Duration.ofMinutes(7)); // When: collector.run(); // Then: final DataPoint point = verifyAndGetLatestDataPoint( "max-node-query-saturation", Collections.emptyMap() ); assertThat((Double) point.getValue(), closeTo(.9, .01)); }
public <E extends SamplingEntry> Iterable<E> getRandomSamples(int sampleCount) { if (sampleCount < 0) { throw new IllegalArgumentException("Sample count cannot be a negative value."); } if (sampleCount == 0 || size() == 0) { return Collections.emptyList(); } return new LazySamplingEntryIterableIterator<>(sampleCount); }
@Test public void testIteratorContract() { final int entryCount = 100; final int sampleCount = 30; map = new SampleableConcurrentHashMap<>(100); for (int i = 0; i < entryCount; i++) { map.put(i, i); } Iterable<SampleableConcurrentHashMap.SamplingEntry<Integer, Integer>> samples = map.getRandomSamples(sampleCount); Iterator<SampleableConcurrentHashMap.SamplingEntry<Integer, Integer>> iterator = samples.iterator(); // hasNext should not consume the items for (int i = 0; i < entryCount * 2; i++) { assertTrue(iterator.hasNext()); } Set<Integer> set = new HashSet<>(); // should return unique samples for (int i = 0; i < sampleCount; i++) { set.add(iterator.next().key); } assertEquals(30, set.size()); assertFalse(iterator.hasNext()); }
public static Transformer buildTransformer() throws TransformerConfigurationException { TransformerFactory transformerFactory = TransformerFactory.newInstance(); transformerFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, Boolean.TRUE); transformerFactory.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, ""); transformerFactory.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, ""); Transformer transformer = transformerFactory.newTransformer(); transformer.setOutputProperty(OutputKeys.INDENT, "yes"); transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); return transformer; }
@Test public void testBuildTransformer() throws Exception { assertNotNull(XmlHelper.buildTransformer()); }
@Override public void validate() { if (!getBoolean(PropertyKey.CONF_VALIDATION_ENABLED)) { return; } for (PropertyKey key : keySet()) { checkState( getSource(key).getType() != Source.Type.SITE_PROPERTY || !key.isIgnoredSiteProperty(), "%s is not accepted in alluxio-site.properties, " + "and must be specified as a JVM property. " + "If no JVM property is present, Alluxio will use default value '%s'.", key.getName(), key.getDefaultValue()); if (PropertyKey.isDeprecated(key) && isSetByUser(key)) { LOG.warn("{} is deprecated. Please avoid using this key in the future. {}", key.getName(), PropertyKey.getDeprecationMessage(key)); } } checkTimeouts(); checkUserFileBufferBytes(); checkZkConfiguration(); checkCheckpointZipConfig(); }
@Test public void testDeprecatedKeysNotLogged() { mConfiguration.validate(); assertFalse(mLogger.wasLogged(" is deprecated")); }
@Override public IndexedFieldProvider<Class<?>> getIndexedFieldProvider() { return entityType -> { IndexDescriptor indexDescriptor = getIndexDescriptor(entityType); if (indexDescriptor == null) { return CLASS_NO_INDEXING; } return new SearchFieldIndexingMetadata(indexDescriptor); }; }
@Test public void testRecognizeUnanalyzedField() { assertThat(propertyHelper.getIndexedFieldProvider().get(TestEntity.class).isAnalyzed(new String[]{"i"})).isFalse(); }
@Override public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuorumOptions options) { NodeProvider provider = new LeastLoadedBrokerOrActiveKController(); final KafkaFutureImpl<QuorumInfo> future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call( "describeMetadataQuorum", calcDeadlineMs(now, options.timeoutMs()), provider) { private QuorumInfo.ReplicaState translateReplicaState(DescribeQuorumResponseData.ReplicaState replica) { return new QuorumInfo.ReplicaState( replica.replicaId(), replica.replicaDirectoryId() == null ? Uuid.ZERO_UUID : replica.replicaDirectoryId(), replica.logEndOffset(), replica.lastFetchTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastFetchTimestamp()), replica.lastCaughtUpTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastCaughtUpTimestamp())); } private QuorumInfo createQuorumResult(final DescribeQuorumResponseData.PartitionData partition, DescribeQuorumResponseData.NodeCollection nodeCollection) { List<QuorumInfo.ReplicaState> voters = partition.currentVoters().stream() .map(this::translateReplicaState) .collect(Collectors.toList()); List<QuorumInfo.ReplicaState> observers = partition.observers().stream() .map(this::translateReplicaState) .collect(Collectors.toList()); Map<Integer, QuorumInfo.Node> nodes = nodeCollection.stream().map(n -> { List<RaftVoterEndpoint> endpoints = n.listeners().stream() .map(l -> new RaftVoterEndpoint(l.name(), l.host(), l.port())) .collect(Collectors.toList()); return new QuorumInfo.Node(n.nodeId(), endpoints); }).collect(Collectors.toMap(QuorumInfo.Node::nodeId, Function.identity())); return new QuorumInfo( partition.leaderId(), partition.leaderEpoch(), partition.highWatermark(), voters, observers, nodes ); } @Override DescribeQuorumRequest.Builder createRequest(int timeoutMs) { return new Builder(DescribeQuorumRequest.singletonRequest( new TopicPartition(CLUSTER_METADATA_TOPIC_NAME, CLUSTER_METADATA_TOPIC_PARTITION.partition()))); } @Override void handleResponse(AbstractResponse response) { final DescribeQuorumResponse quorumResponse = (DescribeQuorumResponse) response; if (quorumResponse.data().errorCode() != Errors.NONE.code()) { throw Errors.forCode(quorumResponse.data().errorCode()).exception(quorumResponse.data().errorMessage()); } if (quorumResponse.data().topics().size() != 1) { String msg = String.format("DescribeMetadataQuorum received %d topics when 1 was expected", quorumResponse.data().topics().size()); log.debug(msg); throw new UnknownServerException(msg); } DescribeQuorumResponseData.TopicData topic = quorumResponse.data().topics().get(0); if (!topic.topicName().equals(CLUSTER_METADATA_TOPIC_NAME)) { String msg = String.format("DescribeMetadataQuorum received a topic with name %s when %s was expected", topic.topicName(), CLUSTER_METADATA_TOPIC_NAME); log.debug(msg); throw new UnknownServerException(msg); } if (topic.partitions().size() != 1) { String msg = String.format("DescribeMetadataQuorum received a topic %s with %d partitions when 1 was expected", topic.topicName(), topic.partitions().size()); log.debug(msg); throw new UnknownServerException(msg); } DescribeQuorumResponseData.PartitionData partition = topic.partitions().get(0); if (partition.partitionIndex() != CLUSTER_METADATA_TOPIC_PARTITION.partition()) { String msg = String.format("DescribeMetadataQuorum received a single partition with index %d when %d was expected", partition.partitionIndex(), CLUSTER_METADATA_TOPIC_PARTITION.partition()); log.debug(msg); throw new UnknownServerException(msg); } if (partition.errorCode() != Errors.NONE.code()) { throw Errors.forCode(partition.errorCode()).exception(partition.errorMessage()); } future.complete(createQuorumResult(partition, quorumResponse.data().nodes())); } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }; runnable.call(call, now); return new DescribeMetadataQuorumResult(future); }
@Test public void testDescribeMetadataQuorumSuccess() throws Exception { try (final AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.DESCRIBE_QUORUM.id, ApiKeys.DESCRIBE_QUORUM.oldestVersion(), ApiKeys.DESCRIBE_QUORUM.latestVersion())); // Test with optional fields set env.kafkaClient().prepareResponse( body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.NONE, Errors.NONE, false, false, false, false, false)); KafkaFuture<QuorumInfo> future = env.adminClient().describeMetadataQuorum().quorumInfo(); QuorumInfo quorumInfo = future.get(); assertEquals(defaultQuorumInfo(false), quorumInfo); // Test with optional fields empty env.kafkaClient().prepareResponse( body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.NONE, Errors.NONE, false, false, false, false, true)); future = env.adminClient().describeMetadataQuorum().quorumInfo(); quorumInfo = future.get(); assertEquals(defaultQuorumInfo(true), quorumInfo); } }
public static PCollectionTuple empty(Pipeline pipeline) { return new PCollectionTuple(pipeline); }
@Test public void testEmpty() { TupleTag<Object> tag = new TupleTag<>(); assertFalse(PCollectionTuple.empty(pipeline).has(tag)); }
public static Read<JmsRecord> read() { return new AutoValue_JmsIO_Read.Builder<JmsRecord>() .setMaxNumRecords(Long.MAX_VALUE) .setCoder(SerializableCoder.of(JmsRecord.class)) .setCloseTimeout(DEFAULT_CLOSE_TIMEOUT) .setRequiresDeduping(false) .setMessageMapper( new MessageMapper<JmsRecord>() { @Override public JmsRecord mapMessage(Message message) throws Exception { TextMessage textMessage = (TextMessage) message; Map<String, Object> properties = new HashMap<>(); @SuppressWarnings("rawtypes") Enumeration propertyNames = textMessage.getPropertyNames(); while (propertyNames.hasMoreElements()) { String propertyName = (String) propertyNames.nextElement(); properties.put(propertyName, textMessage.getObjectProperty(propertyName)); } return new JmsRecord( textMessage.getJMSMessageID(), textMessage.getJMSTimestamp(), textMessage.getJMSCorrelationID(), textMessage.getJMSReplyTo(), textMessage.getJMSDestination(), textMessage.getJMSDeliveryMode(), textMessage.getJMSRedelivered(), textMessage.getJMSType(), textMessage.getJMSExpiration(), textMessage.getJMSPriority(), properties, textMessage.getText()); } }) .build(); }
@Test public void testAuthenticationWithBadPassword() { pipeline.apply( JmsIO.read() .withConnectionFactory(connectionFactory) .withQueue(QUEUE) .withUsername(USERNAME) .withPassword("BAD")); String errorMessage = this.connectionFactoryClass == ActiveMQConnectionFactory.class ? "User name [" + USERNAME + "] or password is invalid." : "Client failed to authenticate using SASL: PLAIN"; runPipelineExpectingJmsConnectException(errorMessage); }
@Override protected Result check() { final HttpHealthResponse httpHealthResponse = httpCheck(url); if (isHealthResponseValid(httpHealthResponse)) { LOGGER.debug("Health check against url={} successful", url); return Result.healthy(); } LOGGER.debug("Health check against url={} failed with response={}", url, httpHealthResponse); return Result.unhealthy("Http health check against url=%s failed with response=%s", url, httpHealthResponse); }
@Test void httpHealthCheckShouldConsiderA200ResponseHealthy() { httpServer.createContext(SUCCESS_PATH, httpExchange -> { try { httpExchange.sendResponseHeaders(200, 0); } finally { httpExchange.close(); } }); httpServer.start(); final HttpHealthCheck httpHealthCheck = new HttpHealthCheck(BASE_URI + httpServer.getAddress().getPort() + SUCCESS_PATH); assertThat(httpHealthCheck.check().isHealthy()).isTrue(); }
public static ClusterOperatorConfig buildFromMap(Map<String, String> map) { warningsForRemovedEndVars(map); KafkaVersion.Lookup lookup = parseKafkaVersions(map.get(STRIMZI_KAFKA_IMAGES), map.get(STRIMZI_KAFKA_CONNECT_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES)); return buildFromMap(map, lookup); }
@Test public void testInvalidFeatureGate() { // We test that the configuration is really parsing the feature gates environment variable. We test it on // non-existing feature gate instead of a real one so that we do not have to change it when the FGs are promoted Map<String, String> envVars = new HashMap<>(ClusterOperatorConfigTest.ENV_VARS); envVars.put(ClusterOperatorConfig.FEATURE_GATES.key(), "-NonExistingGate"); InvalidConfigurationException e = assertThrows(InvalidConfigurationException.class, () -> ClusterOperatorConfig.buildFromMap(envVars, KafkaVersionTestUtils.getKafkaVersionLookup())); assertThat(e.getMessage(), containsString("Unknown feature gate NonExistingGate found in the configuration")); }
public byte exitStatus() { return exitStatus.exitStatus(); }
@Test void with_pending_scenarios() { Runtime runtime = createRuntime(); bus.send(testCaseFinishedWithStatus(Status.PENDING)); assertThat(runtime.exitStatus(), is(equalTo((byte) 0x1))); }
public long runCycle(HollowProducer.Populator task) { return runCycle(null, task); }
@Test public void testPopulateNoChangesVersion() { HollowProducer producer = createProducer(tmpFolder); long v1 = producer.runCycle(ws -> { ws.add(1); }); assertEquals(producer.getCycleCountWithPrimaryStatus(), 1); // Run cycle with no changes long v2 = producer.runCycle(ws -> { ws.add(1); }); assertEquals(producer.getCycleCountWithPrimaryStatus(), 2); long v3 = producer.runCycle(ws -> { ws.add(2); }); assertEquals(producer.getCycleCountWithPrimaryStatus(), 3); assertEquals(v1, v2); assertTrue(v3 > v2); }
@ApiOperation(value = "Create Or Update Widget Bundle (saveWidgetsBundle)", notes = "Create or update the Widget Bundle. " + WIDGET_BUNDLE_DESCRIPTION + " " + "When creating the bundle, platform generates Widget Bundle Id as " + UUID_WIKI_LINK + "The newly created Widget Bundle Id will be present in the response. " + "Specify existing Widget Bundle id to update the Widget Bundle. " + "Referencing non-existing Widget Bundle Id will cause 'Not Found' error." + "\n\nWidget Bundle alias is unique in the scope of tenant. " + "Special Tenant Id '13814000-1dd2-11b2-8080-808080808080' is automatically used if the create bundle request is sent by user with 'SYS_ADMIN' authority." + "Remove 'id', 'tenantId' from the request body example (below) to create new Widgets Bundle entity." + SYSTEM_OR_TENANT_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAnyAuthority('SYS_ADMIN', 'TENANT_ADMIN')") @RequestMapping(value = "/widgetsBundle", method = RequestMethod.POST) @ResponseBody public WidgetsBundle saveWidgetsBundle( @Parameter(description = "A JSON value representing the Widget Bundle.", required = true) @RequestBody WidgetsBundle widgetsBundle) throws Exception { var currentUser = getCurrentUser(); if (Authority.SYS_ADMIN.equals(currentUser.getAuthority())) { widgetsBundle.setTenantId(TenantId.SYS_TENANT_ID); } else { widgetsBundle.setTenantId(currentUser.getTenantId()); } checkEntity(widgetsBundle.getId(), widgetsBundle, Resource.WIDGETS_BUNDLE); return tbWidgetsBundleService.save(widgetsBundle, currentUser); }
@Test public void testSaveWidgetsBundle() throws Exception { WidgetsBundle widgetsBundle = new WidgetsBundle(); widgetsBundle.setTitle("My widgets bundle"); Mockito.reset(tbClusterService, auditLogService); WidgetsBundle savedWidgetsBundle = doPost("/api/widgetsBundle", widgetsBundle, WidgetsBundle.class); testNotifyEntityAllOneTime(savedWidgetsBundle, savedWidgetsBundle.getId(), savedWidgetsBundle.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.ADDED); Mockito.reset(tbClusterService, auditLogService); Assert.assertNotNull(savedWidgetsBundle); Assert.assertNotNull(savedWidgetsBundle.getId()); Assert.assertNotNull(savedWidgetsBundle.getAlias()); Assert.assertTrue(savedWidgetsBundle.getCreatedTime() > 0); Assert.assertEquals(savedTenant.getId(), savedWidgetsBundle.getTenantId()); Assert.assertEquals(widgetsBundle.getTitle(), savedWidgetsBundle.getTitle()); savedWidgetsBundle.setTitle("My new widgets bundle"); doPost("/api/widgetsBundle", savedWidgetsBundle, WidgetsBundle.class); WidgetsBundle foundWidgetsBundle = doGet("/api/widgetsBundle/" + savedWidgetsBundle.getId().getId().toString(), WidgetsBundle.class); Assert.assertEquals(foundWidgetsBundle.getTitle(), savedWidgetsBundle.getTitle()); testNotifyEntityAllOneTime(savedWidgetsBundle, savedWidgetsBundle.getId(), savedWidgetsBundle.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.UPDATED); }
public boolean hasValidPluginAndStore(ArtifactStore artifactStore) { if (artifactStore == null) { return false; } ArtifactPluginInfo pluginInfo = ArtifactMetadataStore.instance().getPluginInfo(artifactStore.getPluginId()); return pluginInfo != null; }
@Test public void hasValidPluginAndStore_shouldReturnFalseIfStoreDoesNotExist() { PluggableArtifactConfig pluggableArtifactConfig = new PluggableArtifactConfig("dist", "s3"); assertFalse(pluggableArtifactConfig.hasValidPluginAndStore(new ArtifactStore("docker", "cd.go.docker"))); }
@Override public Stream<Path> getKShortestPaths(ElementId src, ElementId dst, LinkWeigher weigher) { checkNotNull(src, ELEMENT_ID_NULL); checkNotNull(dst, ELEMENT_ID_NULL); LinkWeigher internalWeigher = weigher != null ? weigher : DEFAULT_WEIGHER; // Get the source and destination edge locations EdgeLink srcEdge = getEdgeLink(src, true); EdgeLink dstEdge = getEdgeLink(dst, false); // If either edge is null, bail with no paths. if (srcEdge == null || dstEdge == null) { return Stream.empty(); } DeviceId srcDevice = srcEdge != NOT_HOST ? srcEdge.dst().deviceId() : (DeviceId) src; DeviceId dstDevice = dstEdge != NOT_HOST ? dstEdge.src().deviceId() : (DeviceId) dst; // If the source and destination are on the same edge device, there // is just one path, so build it and return it. if (srcDevice.equals(dstDevice)) { return Stream.of(edgeToEdgePath(srcEdge, dstEdge, null, internalWeigher)); } // Otherwise get all paths between the source and destination edge // devices. Topology topology = topologyService.currentTopology(); return topologyService.getKShortestPaths(topology, srcDevice, dstDevice, internalWeigher) .map(path -> edgeToEdgePath(srcEdge, dstEdge, path, internalWeigher)); }
@Test public void testKShortestPath() { topoMgr.definePaths(ImmutableSet.of(path1)); List<Path> paths = service.getKShortestPaths(did("A"), did("C"), new TestWeigher()) .collect(Collectors.toList()); checkPaths(paths); }
@Override public EvaluatedQualityGate evaluate(QualityGate gate, Measures measures, Configuration configuration) { EvaluatedQualityGate.Builder result = EvaluatedQualityGate.newBuilder() .setQualityGate(gate); boolean ignoreSmallChanges = configuration.getBoolean(CoreProperties.QUALITY_GATE_IGNORE_SMALL_CHANGES).orElse(true); boolean isSmallChangeset = ignoreSmallChanges && isSmallChangeset(measures); gate.getConditions().forEach(condition -> { String metricKey = condition.getMetricKey(); EvaluatedCondition evaluation = ConditionEvaluator.evaluate(condition, measures); if (isSmallChangeset && evaluation.getStatus() != EvaluationStatus.OK && METRICS_TO_IGNORE_ON_SMALL_CHANGESETS.contains(metricKey)) { result.addEvaluatedCondition(new EvaluatedCondition(evaluation.getCondition(), EvaluationStatus.OK, evaluation.getValue().orElse(null))); result.setIgnoredConditionsOnSmallChangeset(true); } else { result.addEvaluatedCondition(evaluation); } }); result.setStatus(overallStatusOf(result.getEvaluatedConditions())); return result.build(); }
@Test public void evaluate_is_ERROR() { Condition condition = new Condition(NEW_MAINTAINABILITY_RATING_KEY, Condition.Operator.GREATER_THAN, "0"); QualityGate gate = mock(QualityGate.class); when(gate.getConditions()).thenReturn(singleton(condition)); QualityGateEvaluator.Measures measures = key -> Optional.of(new FakeMeasure(1)); assertThat(underTest.evaluate(gate, measures, configuration).getStatus()).isEqualTo(Metric.Level.ERROR); }