focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void doInject(RequestResource resource, RamContext context, LoginIdentityContext result) { String accessKey = context.getAccessKey(); String secretKey = context.getSecretKey(); // STS 临时凭证鉴权的优先级高于 AK/SK 鉴权 if (StsConfig.getInstance().isStsOn()) { StsCredential stsCredential = StsCredentialHolder.getInstance().getStsCredential(); accessKey = stsCredential.getAccessKeyId(); secretKey = stsCredential.getAccessKeySecret(); result.setParameter(IdentifyConstants.SECURITY_TOKEN_HEADER, stsCredential.getSecurityToken()); } if (StringUtils.isNotEmpty(accessKey) && StringUtils.isNotBlank(secretKey)) { result.setParameter(ACCESS_KEY_HEADER, accessKey); } String signatureKey = secretKey; if (StringUtils.isNotEmpty(context.getRegionId())) { signatureKey = CalculateV4SigningKeyUtil .finalSigningKeyStringWithDefaultInfo(secretKey, context.getRegionId()); result.setParameter(RamConstants.SIGNATURE_VERSION, RamConstants.V4); } Map<String, String> signHeaders = SpasAdapter .getSignHeaders(getResource(resource.getNamespace(), resource.getGroup()), signatureKey); result.setParameters(signHeaders); }
@Test void testDoInjectWithTenant() throws Exception { resource.setGroup(""); LoginIdentityContext actual = new LoginIdentityContext(); configResourceInjector.doInject(resource, ramContext, actual); assertEquals(3, actual.getAllKey().size()); assertEquals(PropertyKeyConst.ACCESS_KEY, actual.getParameter("Spas-AccessKey")); assertTrue(actual.getAllKey().contains("Timestamp")); assertTrue(actual.getAllKey().contains("Spas-Signature")); }
@Override public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows); }
@SuppressWarnings("deprecation") @Test public void shouldNotAllowNullValueJoinerOnLeftJoin() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.leftJoin(testStream, (ValueJoiner<? super String, ? super String, ?>) null, JoinWindows.of(ofMillis(10)))); assertThat(exception.getMessage(), equalTo("joiner can't be null")); }
@Override public String authenticate(AuthenticationDataSource authData) throws AuthenticationException { String token; try { // Get Token token = getToken(authData); } catch (AuthenticationException exception) { incrementFailureMetric(ErrorCode.INVALID_AUTH_DATA); throw exception; } // Parse Token by validating String role = getPrincipal(authenticateToken(token)); AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName()); return role; }
@Test(expectedExceptions = AuthenticationException.class) public void testAuthenticateWhenAuthorizationHeaderNotExist() throws AuthenticationException { AuthenticationProviderToken provider = new AuthenticationProviderToken(); provider.authenticate(new AuthenticationDataSource() { @Override public String getHttpHeader(String name) { return null; } @Override public boolean hasDataFromHttp() { return true; } }); }
@Override public Proxy find(final String target) { final String route = this.findNative(target); if(null == route) { if(log.isInfoEnabled()) { log.info(String.format("No proxy configuration found for target %s", target)); } // Direct return Proxy.DIRECT; } final URI proxy; try { proxy = new URI(route); try { // User info is never populated. Would have to lookup in keychain but we are unaware of the username return new Proxy(Proxy.Type.valueOf(StringUtils.upperCase(proxy.getScheme())), proxy.getHost(), proxy.getPort()); } catch(IllegalArgumentException e) { log.warn(String.format("Unsupported scheme for proxy %s", proxy)); } } catch(URISyntaxException e) { log.warn(String.format("Invalid proxy configuration %s", route)); } return Proxy.DIRECT; }
@Test public void testSimpleExcluded() { final SystemConfigurationProxy proxy = new SystemConfigurationProxy(); assertEquals(Proxy.Type.DIRECT, proxy.find("http://simple").getType()); }
public void timePasses() { ageYears *= 2; massTons *= 8; switch (type) { case RED_GIANT -> type = StarType.WHITE_DWARF; case SUN -> type = StarType.RED_GIANT; case SUPERNOVA -> type = StarType.DEAD; case WHITE_DWARF -> type = StarType.SUPERNOVA; case DEAD -> { ageYears *= 2; massTons = 0; } default -> { } } }
@Test void testTimePasses() { final var star = new Star(StarType.SUN, 1, 2); assertEquals("sun age: 1 years mass: 2 tons", star.toString()); star.timePasses(); assertEquals("red giant age: 2 years mass: 16 tons", star.toString()); star.timePasses(); assertEquals("white dwarf age: 4 years mass: 128 tons", star.toString()); star.timePasses(); assertEquals("supernova age: 8 years mass: 1024 tons", star.toString()); star.timePasses(); assertEquals("dead star age: 16 years mass: 8192 tons", star.toString()); star.timePasses(); assertEquals("dead star age: 64 years mass: 0 tons", star.toString()); star.timePasses(); assertEquals("dead star age: 256 years mass: 0 tons", star.toString()); }
public void clearLocalStateForNamedTopology(final String topologyName) { final File namedTopologyDir = new File(stateDir, getNamedTopologyDirName(topologyName)); if (!namedTopologyDir.exists() || !namedTopologyDir.isDirectory()) { log.debug("Tried to clear out the local state for NamedTopology {} but none was found", topologyName); } try { Utils.delete(namedTopologyDir); } catch (final IOException e) { log.error("Hit an unexpected error while clearing local state for topology " + topologyName, e); throw new StreamsException("Unable to delete state for the named topology " + topologyName, e, new TaskId(-1, -1, topologyName)); // use dummy taskid to report source topology for this error } }
@Test public void shouldRemoveEmptyNamedTopologyDirsWhenCallingClearLocalStateForNamedTopology() throws IOException { initializeStateDirectory(true, true); final String topologyName = "topology1"; final File namedTopologyDir = new File(appDir, "__" + topologyName + "__"); assertThat(namedTopologyDir.mkdir(), is(true)); assertThat(namedTopologyDir.exists(), is(true)); directory.clearLocalStateForNamedTopology(topologyName); assertThat(namedTopologyDir.exists(), is(false)); }
@Override public String getGroupKeyColumnName(int groupKeyColumnIndex) { throw new AssertionError("No group key column name for selection results"); }
@Test(expectedExceptions = AssertionError.class) public void testGetGroupKeyColumnName() { // Run the test _selectionResultSetUnderTest.getGroupKeyColumnName(0); }
public void startAsync() { try { udfLoader.load(); ProcessingLogServerUtils.maybeCreateProcessingLogTopic( serviceContext.getTopicClient(), processingLogConfig, ksqlConfig); if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) { log.warn("processing log auto-create is enabled, but this is not supported " + "for headless mode."); } rocksDBConfigSetterHandler.accept(ksqlConfig); processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); ksqlConfig.originals().forEach((key, value) -> { if (nonNull(value)) { properties.put(key, value.toString()); } }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); throw e; } }
@Test public void shouldThrowIfCanNotLoadQueryFile() { // Given: givenFileDoesNotExist(); // When: final KsqlException e = assertThrows( KsqlException.class, () -> standaloneExecutor.startAsync() ); // Then: assertThat(e.getMessage(), containsString("Could not read the query file")); }
public static <T, E extends Exception> Supplier<T> rethrowSupplier(SupplierWithExceptions<T, E> function) throws E { return () -> { try { return function.get(); } catch (Exception exception) { throwAsUnchecked(exception); return null; } }; }
@Test public void test_Supplier_with_checked_exceptions() throws UnsupportedEncodingException { Collector.of( rethrowSupplier(() -> new StringJoiner(new String(new byte[]{77, 97, 114, 107}, "UTF-8"))), StringJoiner::add, StringJoiner::merge, StringJoiner::toString); }
@VisibleForTesting List<MessageSummary> getMessageBacklog(EventNotificationContext ctx, SlackEventNotificationConfig config) { List<MessageSummary> backlog = notificationCallbackService.getBacklogForEvent(ctx); if (config.backlogSize() > 0 && backlog != null) { return backlog.stream().limit(config.backlogSize()).collect(Collectors.toList()); } return backlog; }
@Test public void testBacklogMessageLimitWhenBacklogSizeIsFive() { SlackEventNotificationConfig slackConfig = SlackEventNotificationConfig.builder() .backlogSize(5) .build(); //global setting is at N and the message override is 5 then the backlog size = 5 List<MessageSummary> messageSummaries = slackEventNotification.getMessageBacklog(eventNotificationContext, slackConfig); assertThat(messageSummaries.size()).isEqualTo(5); }
UriEndpoint createUriEndpoint(String url, boolean isWs) { return createUriEndpoint(url, isWs, connectAddress); }
@Test void createUriEndpointRelativeNoLeadingSlash() { String test1 = this.builder.sslSupport().build() .createUriEndpoint("example.com:8443/bar", false) .toExternalForm(); String test2 = this.builder.build() .createUriEndpoint("example.com:8443/bar", true) .toExternalForm(); assertThat(test1).isEqualTo("https://example.com:8443/bar"); assertThat(test2).isEqualTo("wss://example.com:8443/bar"); }
@Udf(description = "Converts a string representation of a date in the given format" + " into the TIMESTAMP value." + " Single quotes in the timestamp format can be escaped with ''," + " for example: 'yyyy-MM-dd''T''HH:mm:ssX'.") public Timestamp parseTimestamp( @UdfParameter( description = "The string representation of a date.") final String formattedTimestamp, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { return parseTimestamp(formattedTimestamp, formatPattern, ZoneId.of("GMT").getId()); }
@Test public void shouldHandleNullTimeZone() { // When: final Object result = udf.parseTimestamp("2018-08-15 17:10:43", "yyyy-MM-dd HH:mm:ss", null); // Then: assertThat(result, is(nullValue())); }
@InterfaceAudience.Private @Override public Token<?> selectDelegationToken(Credentials creds) { Token<?> token = selectDelegationToken(creds, dtService); if (token == null) { token = selectDelegationToken(creds, canonicalService); } return token; }
@Test public void testSelectDelegationToken() throws Exception { final Credentials creds = new Credentials(); creds.addToken(new Text(providerUriString), token); assertNull(KMSClientProvider.selectDelegationToken(creds, null)); assertNull(KMSClientProvider .selectDelegationToken(creds, new Text(oldTokenService))); assertEquals(token, KMSClientProvider .selectDelegationToken(creds, new Text(providerUriString))); }
@Override public Dataset<Row> apply( final JavaSparkContext jsc, final SparkSession sparkSession, final Dataset<Row> rowDataset, final TypedProperties props) { final String sqlFile = getStringWithAltKeys(props, SqlTransformerConfig.TRANSFORMER_SQL_FILE); final FileSystem fs = HadoopFSUtils.getFs(sqlFile, jsc.hadoopConfiguration(), true); // tmp table name doesn't like dashes final String tmpTable = TMP_TABLE.concat(UUID.randomUUID().toString().replace("-", "_")); LOG.info("Registering tmp table: {}", tmpTable); rowDataset.createOrReplaceTempView(tmpTable); try (final Scanner scanner = new Scanner(fs.open(new Path(sqlFile)), "UTF-8")) { Dataset<Row> rows = null; // each sql statement is separated with semicolon hence set that as delimiter. scanner.useDelimiter(";"); LOG.info("SQL Query for transformation:"); while (scanner.hasNext()) { String sqlStr = scanner.next(); sqlStr = sqlStr.replaceAll(SRC_PATTERN, tmpTable).trim(); if (!sqlStr.isEmpty()) { LOG.info(sqlStr); // overwrite the same dataset object until the last statement then return. rows = sparkSession.sql(sqlStr); } } return rows; } catch (final IOException ioe) { throw new HoodieTransformExecutionException("Error reading transformer SQL file.", ioe); } finally { sparkSession.catalog().dropTempView(tmpTable); } }
@Test public void testSqlFileBasedTransformer() throws IOException { UtilitiesTestBase.Helpers.copyToDFS( "streamer-config/sql-file-transformer.sql", UtilitiesTestBase.storage, UtilitiesTestBase.basePath + "/sql-file-transformer.sql"); // Test if the SQL file based transformer works as expected for the correct input. props.setProperty( "hoodie.streamer.transformer.sql.file", UtilitiesTestBase.basePath + "/sql-file-transformer.sql"); Dataset<Row> transformedRow = sqlFileTransformer.apply(jsc, sparkSession, inputDatasetRows, props); // Called distinct() and sort() to match the transformation in this file: // hudi-utilities/src/test/resources/streamer-config/sql-file-transformer.sql String[] expectedRows = inputDatasetRows .distinct() .sort("col1") .as(Encoders.STRING()) .collectAsList() .toArray(new String[0]); String[] actualRows = transformedRow.as(Encoders.STRING()).collectAsList().toArray(new String[0]); assertArrayEquals(expectedRows, actualRows); }
public boolean record(final Throwable observation) { final long timestampMs; DistinctObservation distinctObservation; timestampMs = clock.time(); synchronized (this) { distinctObservation = find(distinctObservations, observation); if (null == distinctObservation) { distinctObservation = newObservation(timestampMs, observation); if (INSUFFICIENT_SPACE == distinctObservation) { return false; } } } final int offset = distinctObservation.offset; buffer.getAndAddInt(offset + OBSERVATION_COUNT_OFFSET, 1); buffer.putLongOrdered(offset + LAST_OBSERVATION_TIMESTAMP_OFFSET, timestampMs); return true; }
@Test void shouldRecordTwoDistinctObservations() { final long timestampOne = 7; final long timestampTwo = 10; final int offset = 0; final RuntimeException errorOne = new RuntimeException("Test Error One"); final IllegalStateException errorTwo = new IllegalStateException("Test Error Two"); when(clock.time()).thenReturn(timestampOne).thenReturn(timestampTwo); assertTrue(log.record(errorOne)); assertTrue(log.record(errorTwo)); final ArgumentCaptor<Integer> lengthArg = ArgumentCaptor.forClass(Integer.class); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).putBytes(eq(offset + ENCODED_ERROR_OFFSET), any(byte[].class)); inOrder.verify(buffer).putLong(offset + FIRST_OBSERVATION_TIMESTAMP_OFFSET, timestampOne); inOrder.verify(buffer).putIntOrdered(eq(offset + LENGTH_OFFSET), lengthArg.capture()); inOrder.verify(buffer).getAndAddInt(offset + OBSERVATION_COUNT_OFFSET, 1); inOrder.verify(buffer).putLongOrdered(offset + LAST_OBSERVATION_TIMESTAMP_OFFSET, timestampOne); final int recordTwoOffset = BitUtil.align(lengthArg.getValue(), RECORD_ALIGNMENT); inOrder.verify(buffer).putBytes(eq(recordTwoOffset + ENCODED_ERROR_OFFSET), any(byte[].class)); inOrder.verify(buffer).putLong(recordTwoOffset + FIRST_OBSERVATION_TIMESTAMP_OFFSET, timestampTwo); inOrder.verify(buffer).putIntOrdered(eq(recordTwoOffset + LENGTH_OFFSET), anyInt()); inOrder.verify(buffer).getAndAddInt(recordTwoOffset + OBSERVATION_COUNT_OFFSET, 1); inOrder.verify(buffer).putLongOrdered(recordTwoOffset + LAST_OBSERVATION_TIMESTAMP_OFFSET, timestampTwo); }
public static VersionRange parse(String rangeString) { validateRangeString(rangeString); Inclusiveness minVersionInclusiveness = rangeString.startsWith("[") ? Inclusiveness.INCLUSIVE : Inclusiveness.EXCLUSIVE; Inclusiveness maxVersionInclusiveness = rangeString.endsWith("]") ? Inclusiveness.INCLUSIVE : Inclusiveness.EXCLUSIVE; int commaIndex = rangeString.indexOf(','); String minVersionString = rangeString.substring(1, commaIndex).trim(); Version minVersion; if (minVersionString.isEmpty()) { minVersionInclusiveness = Inclusiveness.EXCLUSIVE; minVersion = Version.minimum(); } else { minVersion = Version.fromString(minVersionString); } String maxVersionString = rangeString.substring(commaIndex + 1, rangeString.length() - 1).trim(); Version maxVersion; if (maxVersionString.isEmpty()) { maxVersionInclusiveness = Inclusiveness.EXCLUSIVE; maxVersion = Version.maximum(); } else { maxVersion = Version.fromString(maxVersionString); } if (!minVersion.isLessThan(maxVersion)) { throw new IllegalArgumentException( String.format( "Min version in range must be less than max version in range, got '%s'", rangeString)); } return builder() .setMinVersion(minVersion) .setMinVersionInclusiveness(minVersionInclusiveness) .setMaxVersion(maxVersion) .setMaxVersionInclusiveness(maxVersionInclusiveness) .build(); }
@Test public void parse_withTooManyParenthesis_throwsIllegalArgumentException() { IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> VersionRange.parse("(,1.0]]")); assertThat(exception) .hasMessageThat() .isEqualTo("Parenthesis and/or brackets not allowed within version range, got '(,1.0]]'"); }
@Override public Object createWebSocket(final JettyServerUpgradeRequest request, final JettyServerUpgradeResponse response) { try { Optional<WebSocketAuthenticator<T>> authenticator = Optional.ofNullable(environment.getAuthenticator()); final ReusableAuth<T> authenticated; if (authenticator.isPresent()) { authenticated = authenticator.get().authenticate(request); if (authenticated.invalidCredentialsProvided()) { response.sendForbidden("Unauthorized"); return null; } } else { authenticated = ReusableAuth.anonymous(); } return new WebSocketResourceProvider<>(getRemoteAddress(request), remoteAddressPropertyName, this.jerseyApplicationHandler, this.environment.getRequestLog(), authenticated, this.environment.getMessageFactory(), ofNullable(this.environment.getConnectListener()), this.environment.getIdleTimeout()); } catch (AuthenticationException | IOException e) { logger.warn("Authentication failure", e); try { response.sendError(500, "Failure"); } catch (IOException ignored) { } return null; } }
@Test void testUnauthorized() throws AuthenticationException, IOException { when(environment.getAuthenticator()).thenReturn(authenticator); when(authenticator.authenticate(eq(request))).thenReturn(ReusableAuth.invalid()); when(environment.jersey()).thenReturn(jerseyEnvironment); WebSocketResourceProviderFactory<?> factory = new WebSocketResourceProviderFactory<>(environment, Account.class, mock(WebSocketConfiguration.class), REMOTE_ADDRESS_PROPERTY_NAME); Object connection = factory.createWebSocket(request, response); assertNull(connection); verify(response).sendForbidden(eq("Unauthorized")); verify(authenticator).authenticate(eq(request)); }
public static String toUnicode(char c) { return HexUtil.toUnicodeHex(c); }
@Test public void issueI50MI6Test(){ String s = UnicodeUtil.toUnicode("烟", true); assertEquals("\\u70df", s); }
public URI getHttpExternalUri() { return httpExternalUri == null ? getHttpPublishUri() : httpExternalUri; }
@Test public void testHttpExternalUriIsAbsoluteURI() throws RepositoryException, ValidationException { jadConfig.setRepository(new InMemoryRepository(ImmutableMap.of("http_external_uri", "http://www.example.com:12900/foo/"))).addConfigurationBean(configuration).process(); assertThat(configuration.getHttpExternalUri()).isEqualTo(URI.create("http://www.example.com:12900/foo/")); }
public Optional<Session> login(@Nullable String currentSessionId, String host, ActorAwareAuthenticationToken authToken) throws AuthenticationServiceUnavailableException { final String previousSessionId = StringUtils.defaultIfBlank(currentSessionId, null); final Subject subject = new Subject.Builder().sessionId(previousSessionId).host(host).buildSubject(); ThreadContext.bind(subject); try { final Session session = subject.getSession(); subject.login(authToken); return createSession(subject, session, host); } catch (AuthenticationServiceUnavailableException e) { log.info("Session creation failed due to authentication service being unavailable. Actor: \"{}\"", authToken.getActor().urn()); final Map<String, Object> auditEventContext = ImmutableMap.of( "remote_address", host, "message", "Authentication service unavailable: " + e.getMessage() ); auditEventSender.failure(authToken.getActor(), SESSION_CREATE, auditEventContext); throw e; } catch (AuthenticationException e) { log.info("Invalid credentials in session create request. Actor: \"{}\"", authToken.getActor().urn()); final Map<String, Object> auditEventContext = ImmutableMap.of( "remote_address", host ); auditEventSender.failure(authToken.getActor(), SESSION_CREATE, auditEventContext); return Optional.empty(); } }
@Test public void throwingRealmDoesNotInhibitAuthentication() { setUpUserMock(); assertFalse(SecurityUtils.getSubject().isAuthenticated()); // Put a throwing realm in the first position. Authentication should still be successful, because the second // realm will find an account for the user final List<Realm> realms = new ArrayList<>(securityManager.getRealms()); realms.add(0, throwingRealm()); securityManager.setRealms(realms); assertThat(sessionCreator.login(null, "host", validToken)).isPresent(); assertThat(SecurityUtils.getSubject().isAuthenticated()).isTrue(); verify(auditEventSender).success(eq(AuditActor.user("username")), anyString(), anyMap()); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testStateParameterAlwaysFetchNonReadableState() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("ReadableStates"); DoFnSignatures.getSignature( new DoFn<KV<String, Integer>, Long>() { @StateId("my-id") private final StateSpec<MapState<Integer, Integer>> myfield = StateSpecs.map(VarIntCoder.of(), VarIntCoder.of()); @ProcessElement public void myProcessElement( ProcessContext context, @AlwaysFetched @StateId("my-id") MapState<Integer, Integer> one) {} }.getClass()); }
public RouteResult<T> route(HttpMethod method, String path) { return route(method, path, Collections.emptyMap()); }
@Test void testOrder() { RouteResult<String> routed1 = router.route(GET, "/articles/new"); assertThat(routed1.target()).isEqualTo("new"); assertThat(routed1.pathParams()).isEmpty(); RouteResult<String> routed2 = router.route(GET, "/articles/123"); assertThat(routed2.target()).isEqualTo("show"); assertThat(routed2.pathParams()).hasSize(1); assertThat(routed2.pathParams().get("id")).isEqualTo("123"); RouteResult<String> routed3 = router.route(GET, "/notfound"); assertThat(routed3.target()).isEqualTo("404"); assertThat(routed3.pathParams()).isEmpty(); RouteResult<String> routed4 = router.route(GET, "/articles/overview"); assertThat(routed4.target()).isEqualTo("overview"); assertThat(routed4.pathParams()).isEmpty(); RouteResult<String> routed5 = router.route(GET, "/articles/overview/detailed"); assertThat(routed5.target()).isEqualTo("detailed"); assertThat(routed5.pathParams()).isEmpty(); }
public static void checkMapConfig(Config config, MapConfig mapConfig, SplitBrainMergePolicyProvider mergePolicyProvider) { checkNotNativeWhenOpenSource(mapConfig.getInMemoryFormat()); checkNotBitmapIndexWhenNativeMemory(mapConfig.getInMemoryFormat(), mapConfig.getIndexConfigs()); checkTSEnabledOnEnterpriseJar(mapConfig.getTieredStoreConfig()); if (getBuildInfo().isEnterprise()) { checkTieredStoreMapConfig(config, mapConfig); checkMapNativeConfig(mapConfig, config.getNativeMemoryConfig()); } checkMapEvictionConfig(mapConfig.getEvictionConfig()); checkMapMaxSizePolicyPerInMemoryFormat(mapConfig); checkMapMergePolicy(mapConfig, mapConfig.getMergePolicyConfig().getPolicy(), mergePolicyProvider); }
@Test public void checkMapConfig_BINARY() { checkMapConfig(new Config(), getMapConfig(BINARY), splitBrainMergePolicyProvider); }
public static int[] invertPermutation(int... input){ int[] target = new int[input.length]; for(int i = 0 ; i < input.length ; i++){ target[input[i]] = i; } return target; }
@Test public void testInvertPermutationLong(){ assertArrayEquals( new long[]{ 2, 4, 3, 0, 1 }, ArrayUtil.invertPermutation(3L, 4L, 0L, 2L, 1L) ); }
public IterationResult<T> iterate(@Nonnull UUID cursorId, int maxCount) { requireNonNull(cursorId); if (cursorId.equals(this.prevCursorId)) { access(); // no progress, no need to forget a cursor id, so null return new IterationResult<>(this.page, this.cursorId, null); } else if (!cursorId.equals(this.cursorId)) { throw new IllegalStateException("The cursor id " + cursorId + " is not the current cursor id nor the previous cursor id."); } List<T> currentPage = new ArrayList<>(maxCount); while (currentPage.size() < maxCount && iterator.hasNext()) { currentPage.add(iterator.next()); } UUID cursorIdToForget = this.prevCursorId; this.prevCursorId = this.cursorId; this.cursorId = UuidUtil.newUnsecureUUID(); this.page = currentPage; access(); return new IterationResult<>(this.page, this.cursorId, cursorIdToForget); }
@Test public void testIterate_ContinuesWhereLeftOff() { int pageSize = 100; UUID cursorId = iterator.iterate(initialCursorId, pageSize).getCursorId(); assertIterationResult(iterator.iterate(cursorId, 100), cursorId, initialCursorId, 100, pageSize); }
@VisibleForTesting static File checkHadoopHomeInner(String home) throws FileNotFoundException { // couldn't find either setting for hadoop's home directory if (home == null) { throw new FileNotFoundException(E_HADOOP_PROPS_UNSET); } // strip off leading and trailing double quotes while (home.startsWith("\"")) { home = home.substring(1); } while (home.endsWith("\"")) { home = home.substring(0, home.length() - 1); } // after stripping any quotes, check for home dir being non-empty if (home.isEmpty()) { throw new FileNotFoundException(E_HADOOP_PROPS_EMPTY); } // check that the hadoop home dir value // is an absolute reference to a directory File homedir = new File(home); if (!homedir.isAbsolute()) { throw new FileNotFoundException("Hadoop home directory " + homedir + " " + E_IS_RELATIVE); } if (!homedir.exists()) { throw new FileNotFoundException("Hadoop home directory " + homedir + " " + E_DOES_NOT_EXIST); } if (!homedir.isDirectory()) { throw new FileNotFoundException("Hadoop home directory " + homedir + " "+ E_NOT_DIRECTORY); } return homedir; }
@Test public void testHadoopHomeValid() throws Throwable { File f = checkHadoopHomeInner(rootTestDir.getCanonicalPath()); assertEquals(rootTestDir, f); }
@Override public OAuth2AccessTokenDO refreshAccessToken(String refreshToken, String clientId) { // 查询访问令牌 OAuth2RefreshTokenDO refreshTokenDO = oauth2RefreshTokenMapper.selectByRefreshToken(refreshToken); if (refreshTokenDO == null) { throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "无效的刷新令牌"); } // 校验 Client 匹配 OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId); if (ObjectUtil.notEqual(clientId, refreshTokenDO.getClientId())) { throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "刷新令牌的客户端编号不正确"); } // 移除相关的访问令牌 List<OAuth2AccessTokenDO> accessTokenDOs = oauth2AccessTokenMapper.selectListByRefreshToken(refreshToken); if (CollUtil.isNotEmpty(accessTokenDOs)) { oauth2AccessTokenMapper.deleteBatchIds(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getId)); oauth2AccessTokenRedisDAO.deleteList(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getAccessToken)); } // 已过期的情况下,删除刷新令牌 if (DateUtils.isExpired(refreshTokenDO.getExpiresTime())) { oauth2RefreshTokenMapper.deleteById(refreshTokenDO.getId()); throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "刷新令牌已过期"); } // 创建访问令牌 return createOAuth2AccessToken(refreshTokenDO, clientDO); }
@Test public void testRefreshAccessToken_null() { // 准备参数 String refreshToken = randomString(); String clientId = randomString(); // mock 方法 // 调用,并断言 assertServiceException(() -> oauth2TokenService.refreshAccessToken(refreshToken, clientId), new ErrorCode(400, "无效的刷新令牌")); }
public static <T> AsIterable<T> asIterable() { return new AsIterable<>(); }
@Test @Category(ValidatesRunner.class) public void testWindowedIterableSideInput() { final PCollectionView<Iterable<Integer>> view = pipeline .apply( "CreateSideInput", Create.timestamped( TimestampedValue.of(11, new Instant(1)), TimestampedValue.of(13, new Instant(1)), TimestampedValue.of(17, new Instant(1)), TimestampedValue.of(23, new Instant(1)), TimestampedValue.of(31, new Instant(11)), TimestampedValue.of(33, new Instant(11)), TimestampedValue.of(37, new Instant(11)), TimestampedValue.of(43, new Instant(11)))) .apply("SideWindowInto", Window.into(FixedWindows.of(Duration.millis(10)))) .apply(View.asIterable()); PCollection<Integer> output = pipeline .apply( "CreateMainInput", Create.timestamped( TimestampedValue.of(29, new Instant(1)), TimestampedValue.of(35, new Instant(11)))) .apply("MainWindowInto", Window.into(FixedWindows.of(Duration.millis(10)))) .apply( "OutputSideInputs", ParDo.of( new DoFn<Integer, Integer>() { @ProcessElement public void processElement(ProcessContext c) { for (Integer i : c.sideInput(view)) { c.output(i); } } }) .withSideInputs(view)); PAssert.that(output).containsInAnyOrder(11, 13, 17, 23, 31, 33, 37, 43); pipeline.run(); }
@Override public long read() { return gaugeSource.read(); }
@Test public void whenCreatedForDynamicDoubleMetricWithExtractedValue() { SomeObject someObject = new SomeObject(); someObject.doubleField = 41.65D; metricsRegistry.registerDynamicMetricsProvider(someObject); LongGaugeImpl longGauge = metricsRegistry.newLongGauge("foo.doubleField"); // needed to collect dynamic metrics and update the gauge created from them metricsRegistry.collect(mock(MetricsCollector.class)); assertEquals(42, longGauge.read()); someObject.doubleField = 42.65D; assertEquals(43, longGauge.read()); }
protected ByteArrayOutputStream createGnpyRequest(ConnectPoint ingress, ConnectPoint egress, boolean bidirectional) { /* { "path-request": [ { "request-id": "first", "source": "trx-Amsterdam", "destination": "trx-Bremen", "src-tp-id": "trx-Amsterdam", "dst-tp-id": "trx-Bremen", "bidirectional": false, "path-constraints": { "te-bandwidth": { "technology": "flexi-grid", "trx_type": "Voyager", "trx_mode": null, "effective-freq-slot": [ { "N": "null", "M": "null" } ], "spacing": 50000000000.0, "max-nb-of-channel": null, "output-power": null, "path_bandwidth": 100000000000.0 } } } ] } */ ByteArrayOutputStream stream = new ByteArrayOutputStream(); try { JsonGenerator generator = getJsonGenerator(stream); generator.writeStartObject(); generator.writeArrayFieldStart("path-request"); generator.writeStartObject(); generator.writeStringField("request-id", "onos-" + counter.getAndIncrement()); generator.writeStringField("source", ingress.deviceId().toString()); generator.writeStringField("destination", egress.deviceId().toString()); generator.writeStringField("src-tp-id", ingress.deviceId().toString()); generator.writeStringField("dst-tp-id", egress.deviceId().toString()); generator.writeBooleanField("bidirectional", bidirectional); generator.writeObjectFieldStart("path-constraints"); generator.writeObjectFieldStart("te-bandwidth"); generator.writeStringField("technology", "flexi-grid"); generator.writeStringField("trx_type", "Cassini"); //TODO make variable generator.writeNullField("trx_mode"); generator.writeArrayFieldStart("effective-freq-slot"); generator.writeStartObject(); generator.writeStringField("N", "null"); generator.writeStringField("M", "null"); generator.writeEndObject(); generator.writeEndArray(); generator.writeNumberField("spacing", 50000000000.0); generator.writeNullField("max-nb-of-channel"); generator.writeNullField("output-power"); generator.writeNumberField("path_bandwidth", 100000000000.0); generator.writeEndObject(); generator.writeEndObject(); generator.writeEndObject(); generator.writeEndArray(); generator.writeEndObject(); generator.close(); return stream; } catch (IOException e) { log.error("Cant' create json", e); } return stream; }
@Test public void testCreateGnpyRequest() { ConnectPoint ingress = ConnectPoint.fromString("netconf:10.0.254.93:830/1"); ConnectPoint egress = ConnectPoint.fromString("netconf:10.0.254.94:830/1"); String output = manager.createGnpyRequest(ingress, egress, true).toString(); System.out.println(output); assertEquals("Json to create network connectivity is wrong", REQUEST, output); }
private void handleAddSplitsEvent(AddSplitEvent<SplitT> event) { try { List<SplitT> newSplits = event.splits(splitSerializer); numSplits += newSplits.size(); if (operatingMode == OperatingMode.OUTPUT_NOT_INITIALIZED) { // For splits arrived before the main output is initialized, store them into the // pending list. Outputs of these splits will be created once the main output is // ready. splitsToInitializeOutput.addAll(newSplits); } else { // Create output directly for new splits if the main output is already initialized. createOutputForSplits(newSplits); } sourceReader.addSplits(newSplits); } catch (IOException e) { throw new FlinkRuntimeException("Failed to deserialize the splits.", e); } }
@Test void testHandleAddSplitsEvent() throws Exception { operator.initializeState(context.createStateContext()); operator.open(); MockSourceSplit newSplit = new MockSourceSplit((2)); operator.handleOperatorEvent( new AddSplitEvent<>( Collections.singletonList(newSplit), new MockSourceSplitSerializer())); // The source reader should have been assigned two splits. assertThat(mockSourceReader.getAssignedSplits()) .containsExactly(SourceOperatorTestContext.MOCK_SPLIT, newSplit); }
@Override public TenantDO getTenantByName(String name) { return tenantMapper.selectByName(name); }
@Test public void testGetTenantByName() { // mock 数据 TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setName("芋道")); tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据 // 调用 TenantDO result = tenantService.getTenantByName("芋道"); // 校验存在 assertPojoEquals(result, dbTenant); }
@Override public Optional<IndexSetConfig> get(String id) { return get(new ObjectId(id)); }
@Test public void getReturnsAbsentOptionalIfIndexSetConfigDoesNotExist() throws Exception { final Optional<IndexSetConfig> indexSetConfig = indexSetService.get(new ObjectId("57f3d3f0a43c2d595eb0a348")); assertThat(indexSetConfig).isEmpty(); }
@Nullable static String getPropertyIfString(Message message, String name) { try { Object o = message.getObjectProperty(name); if (o instanceof String) return o.toString(); return null; } catch (Throwable t) { propagateIfFatal(t); log(t, "error getting property {0} from message {1}", name, message); return null; } }
@Test void getPropertyIfString_notString() throws Exception { message.setByteProperty("b3", (byte) 0); assertThat(MessageProperties.getPropertyIfString(message, "b3")) .isNull(); }
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) { log.info("Starting to validate internal topics {}.", topicConfigs.keySet()); final long now = time.milliseconds(); final long deadline = now + retryTimeoutMs; final ValidationResult validationResult = new ValidationResult(); final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet()); final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet()); while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) { Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap(); if (!topicDescriptionsStillToValidate.isEmpty()) { final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate); descriptionsForTopic = describeTopicsResult.topicNameValues(); } Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap(); if (!topicConfigsStillToValidate.isEmpty()) { final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs( topicConfigsStillToValidate.stream() .map(topic -> new ConfigResource(Type.TOPIC, topic)) .collect(Collectors.toSet()) ); configsForTopic = describeConfigsResult.values().entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue)); } while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { if (!descriptionsForTopic.isEmpty()) { doValidateTopic( validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide) ); } if (!configsForTopic.isEmpty()) { doValidateTopic( validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide) ); } maybeThrowTimeoutException( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs) ); if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { Utils.sleep(100); } } maybeSleep( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated" ); } log.info("Completed validation of internal topics {}.", topicConfigs.keySet()); return validationResult; }
@Test public void shouldOnlyRetryDescribeTopicsWhenDescribeTopicsThrowsLeaderNotAvailableExceptionDuringValidation() { final AdminClient admin = mock(AdminClient.class); final InternalTopicManager topicManager = new InternalTopicManager( time, admin, new StreamsConfig(config) ); final KafkaFutureImpl<TopicDescription> topicDescriptionFailFuture = new KafkaFutureImpl<>(); topicDescriptionFailFuture.completeExceptionally(new LeaderNotAvailableException("Leader Not Available!")); final KafkaFutureImpl<TopicDescription> topicDescriptionSuccessfulFuture = new KafkaFutureImpl<>(); topicDescriptionSuccessfulFuture.complete(new TopicDescription( topic1, false, Collections.singletonList(new TopicPartitionInfo(0, broker1, cluster, Collections.emptyList())) )); when(admin.describeTopics(Collections.singleton(topic1))) .thenAnswer(answer -> new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionFailFuture)))) .thenAnswer(answer -> new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionSuccessfulFuture)))); final KafkaFutureImpl<Config> topicConfigSuccessfulFuture = new KafkaFutureImpl<>(); topicConfigSuccessfulFuture.complete( new Config(repartitionTopicConfig().entrySet().stream() .map(entry -> new ConfigEntry(entry.getKey(), entry.getValue())).collect(Collectors.toSet())) ); final ConfigResource topicResource = new ConfigResource(Type.TOPIC, topic1); when(admin.describeConfigs(Collections.singleton(topicResource))) .thenAnswer(answer -> new MockDescribeConfigsResult(mkMap(mkEntry(topicResource, topicConfigSuccessfulFuture)))); final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1); final ValidationResult validationResult = topicManager.validate(Collections.singletonMap(topic1, internalTopicConfig)); assertThat(validationResult.missingTopics(), empty()); assertThat(validationResult.misconfigurationsForTopics(), anEmptyMap()); }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testUnloadBundlesGreaterThanTargetThroughputAfterSplit() throws IllegalAccessException { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); var ctx = getContext(); var brokerRegistry = mock(BrokerRegistry.class); doReturn(brokerRegistry).when(ctx).brokerRegistry(); doReturn(CompletableFuture.completedFuture(Map.of( "broker1:8080", mock(BrokerLookupData.class), "broker2:8080", mock(BrokerLookupData.class) ))).when(brokerRegistry).getAvailableBrokerLookupDataAsync(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); topBundlesLoadDataStore.pushAsync("broker1:8080", getTopBundlesLoad("my-tenant/my-namespaceA", 2400000, 2400000)); topBundlesLoadDataStore.pushAsync("broker2:8080", getTopBundlesLoad("my-tenant/my-namespaceB", 5000000, 5000000)); var brokerLoadDataStore = ctx.brokerLoadDataStore(); brokerLoadDataStore.pushAsync("broker1:8080", getCpuLoad(ctx, 48, "broker1:8080")); brokerLoadDataStore.pushAsync("broker2:8080", getCpuLoad(ctx, 100, "broker2:8080")); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet<UnloadDecision>(); expected.add(new UnloadDecision( new Unload("broker1:8080", res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker1:8080")).findFirst().get() .getUnload().serviceUnit(), Optional.of("broker2:8080")), Success, Overloaded)); expected.add(new UnloadDecision( new Unload("broker2:8080", res.stream().filter(x -> x.getUnload().sourceBroker().equals("broker2:8080")).findFirst().get() .getUnload().serviceUnit(), Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(counter.getLoadAvg(), 0.74); assertEquals(counter.getLoadStd(), 0.26); assertEquals(res, expected); var stats = (TransferShedder.LoadStats) FieldUtils.readDeclaredField(transferShedder, "stats", true); assertEquals(stats.std(), 2.5809568279517847E-8); }
static Map<String, String> resolveVariables(String expression, String str) { if (expression == null || str == null) return Collections.emptyMap(); Map<String, String> resolvedVariables = new HashMap<>(); StringBuilder variableBuilder = new StringBuilder(); State state = State.TEXT; int j = 0; int expressionLength = expression.length(); for (int i = 0; i < expressionLength; i++) { char e = expression.charAt(i); switch (e) { case '{': if (state == END_VAR) return Collections.emptyMap(); state = VAR; break; case '}': if (state != VAR) return Collections.emptyMap(); state = END_VAR; if (i != expressionLength - 1) break; default: switch (state) { case VAR: variableBuilder.append(e); break; case END_VAR: String replacement; boolean ec = i == expressionLength - 1; if (ec) { replacement = str.substring(j); } else { int k = str.indexOf(e, j); if (k == -1) return Collections.emptyMap(); replacement = str.substring(j, str.indexOf(e, j)); } resolvedVariables.put(variableBuilder.toString(), replacement); j += replacement.length(); if (j == str.length() && ec) return resolvedVariables; variableBuilder.setLength(0); state = TEXT; case TEXT: if (str.charAt(j) != e) return Collections.emptyMap(); j++; } } } return resolvedVariables; }
@Test public void testMalformedExpression() { Map<String, String> res = resolveVariables("{counter {id}}", "whatever"); assertEquals(0, res.size()); }
public static DistCpOptions parse(String[] args) throws IllegalArgumentException { CommandLineParser parser = new CustomParser(); CommandLine command; try { command = parser.parse(cliOptions, args, true); } catch (ParseException e) { throw new IllegalArgumentException("Unable to parse arguments. " + Arrays.toString(args), e); } DistCpOptions.Builder builder = parseSourceAndTargetPaths(command); builder .withAtomicCommit( command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch())) .withSyncFolder( command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch())) .withDeleteMissing( command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch())) .withIgnoreFailures( command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch())) .withOverwrite( command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch())) .withAppend( command.hasOption(DistCpOptionSwitch.APPEND.getSwitch())) .withSkipCRC( command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch())) .withBlocking( !command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch())) .withVerboseLog( command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch())) .withDirectWrite( command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch())) .withUseIterator( command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch())) .withUpdateRoot( command.hasOption(DistCpOptionSwitch.UPDATE_ROOT.getSwitch())); if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseDiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.RDIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.RDIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseRdiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) { builder.withFiltersFile( getVal(command, DistCpOptionSwitch.FILTERS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) { builder.withLogPath( new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) { final String workPath = getVal(command, DistCpOptionSwitch.WORK_PATH.getSwitch()); if (workPath != null && !workPath.isEmpty()) { builder.withAtomicWorkPath(new Path(workPath)); } } if (command.hasOption(DistCpOptionSwitch.TRACK_MISSING.getSwitch())) { builder.withTrackMissing( new Path(getVal( command, DistCpOptionSwitch.TRACK_MISSING.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) { try { final Float mapBandwidth = Float.parseFloat( getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch())); builder.withMapBandwidth(mapBandwidth); } catch (NumberFormatException e) { throw new IllegalArgumentException("Bandwidth specified is invalid: " + getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e); } } if (command.hasOption( DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) { try { final Integer numThreads = Integer.parseInt(getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())); builder.withNumListstatusThreads(numThreads); } catch (NumberFormatException e) { throw new IllegalArgumentException( "Number of liststatus threads is invalid: " + getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) { try { final Integer maps = Integer.parseInt( getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch())); builder.maxMaps(maps); } catch (NumberFormatException e) { throw new IllegalArgumentException("Number of maps is invalid: " + getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) { builder.withCopyStrategy( getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) { builder.preserve( getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch())) { final String chunkSizeStr = getVal(command, DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch().trim()); try { int csize = Integer.parseInt(chunkSizeStr); csize = csize > 0 ? csize : 0; LOG.info("Set distcp blocksPerChunk to " + csize); builder.withBlocksPerChunk(csize); } catch (NumberFormatException e) { throw new IllegalArgumentException("blocksPerChunk is invalid: " + chunkSizeStr, e); } } if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) { final String copyBufferSizeStr = getVal(command, DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim()); try { int copyBufferSize = Integer.parseInt(copyBufferSizeStr); builder.withCopyBufferSize(copyBufferSize); } catch (NumberFormatException e) { throw new IllegalArgumentException("copyBufferSize is invalid: " + copyBufferSizeStr, e); } } return builder.build(); }
@Test public void testSourceListing() { DistCpOptions options = OptionsParser.parse(new String[] { "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getSourceFileListing(), new Path("hdfs://localhost:8020/source/first")); }
@Override public MergeAppend appendFile(DataFile file) { add(file); return this; }
@TestTemplate public void testEmptyTableAppend() { assertThat(listManifestFiles()).isEmpty(); TableMetadata base = readMetadata(); assertThat(base.currentSnapshot()).isNull(); assertThat(base.lastSequenceNumber()).isEqualTo(0); Snapshot committedSnapshot = commit(table, table.newAppend().appendFile(FILE_A).appendFile(FILE_B), branch); assertThat(committedSnapshot).isNotNull(); V1Assert.assertEquals( "Last sequence number should be 0", 0, table.ops().current().lastSequenceNumber()); V2Assert.assertEquals( "Last sequence number should be 1", 1, table.ops().current().lastSequenceNumber()); assertThat(committedSnapshot.allManifests(table.io())).hasSize(1); long snapshotId = committedSnapshot.snapshotId(); validateManifest( committedSnapshot.allManifests(table.io()).get(0), dataSeqs(1L, 1L), fileSeqs(1L, 1L), ids(snapshotId, snapshotId), files(FILE_A, FILE_B), statuses(Status.ADDED, Status.ADDED)); }
public Class<?> getSerializedClass() { return serializedClass; }
@Test void testConstructorWithSerializedClass() { NacosSerializationException exception = new NacosSerializationException(NacosSerializationExceptionTest.class); assertEquals(Constants.Exception.SERIALIZE_ERROR_CODE, exception.getErrCode()); assertEquals(String.format("errCode: 100, errMsg: Nacos serialize for class [%s] failed. ", NacosSerializationExceptionTest.class.getName()), exception.getMessage()); assertEquals(NacosSerializationExceptionTest.class, exception.getSerializedClass()); }
public static Predicate parse(String expression) { final Stack<Predicate> predicateStack = new Stack<>(); final Stack<Character> operatorStack = new Stack<>(); final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll(""); final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true); boolean isTokenMode = true; while (true) { final Character operator; final String token; if (isTokenMode) { if (tokenizer.hasMoreTokens()) { token = tokenizer.nextToken(); } else { break; } if (OPERATORS.contains(token)) { operator = token.charAt(0); } else { operator = null; } } else { operator = operatorStack.pop(); token = null; } isTokenMode = true; if (operator == null) { try { predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance()); } catch (ClassCastException e) { throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e); } catch (Exception e) { throw new RuntimeException(e); } } else { if (operatorStack.empty() || operator == '(') { operatorStack.push(operator); } else if (operator == ')') { while (operatorStack.peek() != '(') { evaluate(predicateStack, operatorStack); } operatorStack.pop(); } else { if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek())) { evaluate(predicateStack, operatorStack); isTokenMode = false; } operatorStack.push(operator); } } } while (!operatorStack.empty()) { evaluate(predicateStack, operatorStack); } if (predicateStack.size() > 1) { throw new RuntimeException("Invalid logical expression"); } return predicateStack.pop(); }
@Test public void testDoubleNot() { final Predicate parsed = PredicateExpressionParser.parse("!!com.linkedin.data.it.AlwaysTruePredicate"); Assert.assertEquals(parsed.getClass(), NotPredicate.class); final Predicate intermediate1 = ((NotPredicate) parsed).getChildPredicate(); Assert.assertEquals(intermediate1.getClass(), NotPredicate.class); final Predicate intermediate2 = ((NotPredicate) intermediate1).getChildPredicate(); Assert.assertEquals(intermediate2.getClass(), AlwaysTruePredicate.class); }
@POST @Path(KMSRESTConstants.KEYS_RESOURCE) @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8) @SuppressWarnings("unchecked") public Response createKey(Map jsonKey) throws Exception { try{ LOG.trace("Entering createKey Method."); KMSWebApp.getAdminCallsMeter().mark(); UserGroupInformation user = HttpUserGroupInformation.get(); final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD); checkNotEmpty(name, KMSRESTConstants.NAME_FIELD); assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name); String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD); final String material; material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD); int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD)) ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0; String description = (String) jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD); LOG.debug("Creating key with name {}, cipher being used{}, " + "length of key {}, description of key {}", name, cipher, length, description); Map<String, String> attributes = (Map<String, String>) jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD); if (material != null) { assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user, KMSOp.CREATE_KEY, name); } final KeyProvider.Options options = new KeyProvider.Options( KMSWebApp.getConfiguration()); if (cipher != null) { options.setCipher(cipher); } if (length != 0) { options.setBitLength(length); } options.setDescription(description); options.setAttributes(attributes); KeyProvider.KeyVersion keyVersion = user.doAs( new PrivilegedExceptionAction<KeyVersion>() { @Override public KeyVersion run() throws Exception { KeyProvider.KeyVersion keyVersion = (material != null) ? provider.createKey(name, Base64.decodeBase64(material), options) : provider.createKey(name, options); provider.flush(); return keyVersion; } } ); kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" + (material != null) + " Description:" + description); if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) { keyVersion = removeKeyMaterial(keyVersion); } Map json = KMSUtil.toJSON(keyVersion); String requestURL = KMSMDCFilter.getURL(); int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE); requestURL = requestURL.substring(0, idx); LOG.trace("Exiting createKey Method."); return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name)) .type(MediaType.APPLICATION_JSON) .header("Location", getKeyURI(requestURL, name)).entity(json).build(); } catch (Exception e) { LOG.debug("Exception in createKey.", e); throw e; } }
@Test public void testDelegationTokenAccess() throws Exception { Configuration conf = new Configuration(); conf.set("hadoop.security.authentication", "kerberos"); final File testDir = getTestDir(); conf = createBaseKMSConf(testDir, conf); conf.set("hadoop.kms.authentication.type", "kerberos"); conf.set("hadoop.kms.authentication.kerberos.keytab", keytab.getAbsolutePath()); conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost"); conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT"); final String keyA = "key_a"; final String keyD = "key_d"; conf.set(KeyAuthorizationKeyProvider.KEY_ACL + keyA + ".ALL", "*"); conf.set(KeyAuthorizationKeyProvider.KEY_ACL + keyD + ".ALL", "*"); writeConf(testDir, conf); runServer(null, null, testDir, new KMSCallable<Void>() { @Override public Void call() throws Exception { final Configuration conf = new Configuration(); conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128); final URI uri = createKMSUri(getKMSUrl()); final Credentials credentials = new Credentials(); final UserGroupInformation nonKerberosUgi = UserGroupInformation.getCurrentUser(); try { KeyProvider kp = createProvider(uri, conf); kp.createKey(keyA, new KeyProvider.Options(conf)); } catch (IOException ex) { System.out.println(ex.getMessage()); } doAs("client", new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { KeyProvider kp = createProvider(uri, conf); KeyProviderDelegationTokenExtension kpdte = KeyProviderDelegationTokenExtension. createKeyProviderDelegationTokenExtension(kp); kpdte.addDelegationTokens("foo", credentials); return null; } }); nonKerberosUgi.addCredentials(credentials); try { KeyProvider kp = createProvider(uri, conf); kp.createKey(keyA, new KeyProvider.Options(conf)); } catch (IOException ex) { System.out.println(ex.getMessage()); } nonKerberosUgi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { KeyProvider kp = createProvider(uri, conf); kp.createKey(keyD, new KeyProvider.Options(conf)); return null; } }); return null; } }); }
public String getBroadcastAddress() { return this.endAddress.getHostAddress(); }
@Test public void getBroadcastAddress() { assertThat(ipSubnet.getBroadcastAddress()).isEqualTo(broadcastAddress); }
@Override public Long createTag(MemberTagCreateReqVO createReqVO) { // 校验名称唯一 validateTagNameUnique(null, createReqVO.getName()); // 插入 MemberTagDO tag = MemberTagConvert.INSTANCE.convert(createReqVO); memberTagMapper.insert(tag); // 返回 return tag.getId(); }
@Test public void testCreateTag_success() { // 准备参数 MemberTagCreateReqVO reqVO = randomPojo(MemberTagCreateReqVO.class); // 调用 Long tagId = tagService.createTag(reqVO); // 断言 assertNotNull(tagId); // 校验记录的属性是否正确 MemberTagDO tag = tagMapper.selectById(tagId); assertPojoEquals(reqVO, tag); }
@Override public TCreatePartitionResult createPartition(TCreatePartitionRequest request) throws TException { LOG.info("Receive create partition: {}", request); TCreatePartitionResult result; try { if (partitionRequestNum.incrementAndGet() >= Config.thrift_server_max_worker_threads / 4) { result = new TCreatePartitionResult(); TStatus errorStatus = new TStatus(SERVICE_UNAVAILABLE); errorStatus.setError_msgs(Lists.newArrayList( String.format("Too many create partition requests, please try again later txn_id=%d", request.getTxn_id()))); result.setStatus(errorStatus); return result; } result = createPartitionProcess(request); } catch (Exception t) { LOG.warn(DebugUtil.getStackTrace(t)); result = new TCreatePartitionResult(); TStatus errorStatus = new TStatus(RUNTIME_ERROR); errorStatus.setError_msgs(Lists.newArrayList(String.format("txn_id=%d failed. %s", request.getTxn_id(), t.getMessage()))); result.setStatus(errorStatus); } finally { partitionRequestNum.decrementAndGet(); } return result; }
@Test public void testAutomaticPartitionLimitExceed() throws TException { Config.max_partition_number_per_table = 1; Database db = GlobalStateMgr.getCurrentState().getDb("test"); Table table = db.getTable("site_access_slice"); List<List<String>> partitionValues = Lists.newArrayList(); List<String> values = Lists.newArrayList(); values.add("1991-04-24"); partitionValues.add(values); List<String> values2 = Lists.newArrayList(); values2.add("1991-04-25"); partitionValues.add(values2); FrontendServiceImpl impl = new FrontendServiceImpl(exeEnv); TCreatePartitionRequest request = new TCreatePartitionRequest(); request.setDb_id(db.getId()); request.setTable_id(table.getId()); request.setPartition_values(partitionValues); TCreatePartitionResult partition = impl.createPartition(request); Assert.assertEquals(partition.getStatus().getStatus_code(), TStatusCode.RUNTIME_ERROR); Assert.assertTrue(partition.getStatus().getError_msgs().get(0).contains("max_partition_number_per_table")); Config.max_partition_number_per_table = 100000; }
public void sendMailWithAttachment (String to, String subject, String content, String filename) throws MessagingException{ Properties props = new Properties(); props.put("mail.smtp.user", emailConfig.getUser()); props.put("mail.smtp.host", emailConfig.getHost()); props.put("mail.smtp.port", emailConfig.getPort()); props.put("mail.smtp.starttls.enable","true"); props.put("mail.smtp.debug", emailConfig.getDebug()); props.put("mail.smtp.auth", emailConfig.getAuth()); props.put("mail.smtp.ssl.trust", emailConfig.host); String pass = emailConfig.getPass(); SMTPAuthenticator auth = new SMTPAuthenticator(emailConfig.getUser(), pass); Session session = Session.getInstance(props, auth); MimeMessage message = new MimeMessage(session); message.setFrom(new InternetAddress(emailConfig.getUser())); message.addRecipient(Message.RecipientType.TO, new InternetAddress(to)); message.setSubject(subject); // Create the message part BodyPart messageBodyPart = new MimeBodyPart(); // Now set the actual message messageBodyPart.setText(content); // Create a multipar message Multipart multipart = new MimeMultipart(); // Set text message part multipart.addBodyPart(messageBodyPart); // Part two is attachment messageBodyPart = new MimeBodyPart(); DataSource source = new FileDataSource(filename); messageBodyPart.setDataHandler(new DataHandler(source)); messageBodyPart.setFileName(filename); multipart.addBodyPart(messageBodyPart); // Send the complete message parts message.setContent(multipart); // Send message Transport.send(message); if(logger.isInfoEnabled()) logger.info("An email has been sent to {} with subject {}", to, subject); }
@Test @Ignore public void testEmailWithAttachment() { EmailSender sender = new EmailSender(); try { File file = new File("pom.xml"); String absolutePath = file.getAbsolutePath(); sender.sendMailWithAttachment("stevehu@gmail.com", "with attachment", "This is message body", absolutePath); } catch (MessagingException e) { e.printStackTrace(); } }
@Override public void accept(MetadataShellState state) { String fullGlob = glob.startsWith("/") ? glob : state.workingDirectory() + "/" + glob; List<String> globComponents = CommandUtils.stripDotPathComponents(CommandUtils.splitPath(fullGlob)); MetadataNode root = state.root(); if (root == null) { throw new RuntimeException("Invalid null root"); } if (!accept(globComponents, 0, root, new String[0])) { handler.accept(Optional.empty()); } }
@Test public void testStarGlob() { InfoConsumer consumer = new InfoConsumer(); GlobVisitor visitor = new GlobVisitor("*", consumer); visitor.accept(DATA); assertEquals(Optional.of(Arrays.asList( new MetadataNodeInfo(new String[] {"foo", "a"}, DATA.root().child("foo").child("a")), new MetadataNodeInfo(new String[] {"foo", "beta"}, DATA.root().child("foo").child("beta")))), consumer.infos); }
public List<ConfigValue> validate(Map<String, String> props) { return new ArrayList<>(validateAll(props).values()); }
@Test public void testValidate() { Map<String, ConfigValue> expected = new HashMap<>(); String errorMessageB = "Missing required configuration \"b\" which has no default value."; String errorMessageC = "Missing required configuration \"c\" which has no default value."; ConfigValue configA = new ConfigValue("a", 1, Arrays.asList(1, 2, 3), Collections.emptyList()); ConfigValue configB = new ConfigValue("b", null, Arrays.asList(4, 5), Arrays.asList(errorMessageB, errorMessageB)); ConfigValue configC = new ConfigValue("c", null, Arrays.asList(4, 5), singletonList(errorMessageC)); ConfigValue configD = new ConfigValue("d", 10, Arrays.asList(1, 2, 3), Collections.emptyList()); expected.put("a", configA); expected.put("b", configB); expected.put("c", configC); expected.put("d", configD); ConfigDef def = new ConfigDef() .define("a", Type.INT, Importance.HIGH, "docs", "group", 1, Width.SHORT, "a", Arrays.asList("b", "c"), new IntegerRecommender(false)) .define("b", Type.INT, Importance.HIGH, "docs", "group", 2, Width.SHORT, "b", new IntegerRecommender(true)) .define("c", Type.INT, Importance.HIGH, "docs", "group", 3, Width.SHORT, "c", new IntegerRecommender(true)) .define("d", Type.INT, Importance.HIGH, "docs", "group", 4, Width.SHORT, "d", singletonList("b"), new IntegerRecommender(false)); Map<String, String> props = new HashMap<>(); props.put("a", "1"); props.put("d", "10"); List<ConfigValue> configs = def.validate(props); for (ConfigValue config : configs) { String name = config.name(); ConfigValue expectedConfig = expected.get(name); assertEquals(expectedConfig, config); } }
public static String wrap(String input, Formatter formatter) throws FormatterException { return StringWrapper.wrap(Formatter.MAX_LINE_LENGTH, input, formatter); }
@Test public void textBlockSpaceTabMix() throws Exception { assumeTrue(Runtime.version().feature() >= 15); String input = lines( "public class T {", " String s =", " \"\"\"", " lorem", " \tipsum", " \"\"\";", "}"); String expected = lines( "public class T {", " String s =", " \"\"\"", " lorem", " ipsum", " \"\"\";", "}"); String actual = StringWrapper.wrap(100, input, new Formatter()); assertThat(actual).isEqualTo(expected); }
public static MappingAllocator buildMappingAllocator(Map<Integer, String> idToBroker, Map<String, Integer> brokerNumMap, Map<String, Integer> brokerNumMapBeforeRemapping) { return new MappingAllocator(idToBroker, brokerNumMap, brokerNumMapBeforeRemapping); }
@Test public void testRemappingAllocator() { for (int i = 0; i < 10; i++) { int num = (i + 2) * 2; Map<String, Integer> brokerNumMap = buildBrokerNumMap(num); Map<String, Integer> brokerNumMapBeforeRemapping = buildBrokerNumMap(num, num); TopicQueueMappingUtils.MappingAllocator allocator = TopicQueueMappingUtils.buildMappingAllocator(new HashMap<>(), brokerNumMap, brokerNumMapBeforeRemapping); allocator.upToNum(num * num + 1); Assert.assertEquals(brokerNumMapBeforeRemapping, allocator.getBrokerNumMap()); } }
@VisibleForTesting static byte[] padBigEndianBytes(byte[] bigEndianBytes, int newLength) { if (bigEndianBytes.length == newLength) { return bigEndianBytes; } else if (bigEndianBytes.length < newLength) { byte[] result = new byte[newLength]; if (bigEndianBytes.length == 0) { return result; } int start = newLength - bigEndianBytes.length; if (bigEndianBytes[0] < 0) { Arrays.fill(result, 0, start, (byte) 0xFF); } System.arraycopy(bigEndianBytes, 0, result, start, bigEndianBytes.length); return result; } throw new IllegalArgumentException( String.format( "Buffer size of %d is larger than requested size of %d", bigEndianBytes.length, newLength)); }
@Test public void testPadBigEndianBytesZero() { byte[] bytes = BigInteger.ZERO.toByteArray(); byte[] paddedBytes = DecimalVectorUtil.padBigEndianBytes(bytes, 16); assertThat(paddedBytes).hasSize(16); BigInteger result = new BigInteger(paddedBytes); assertThat(result).isEqualTo(BigInteger.ZERO); bytes = new byte[0]; paddedBytes = DecimalVectorUtil.padBigEndianBytes(bytes, 16); assertThat(paddedBytes).hasSize(16); result = new BigInteger(paddedBytes); assertThat(result).isEqualTo(BigInteger.ZERO); }
@Override public ConfigInfo findConfigInfo(long id) { try { ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); return this.jt.queryForObject(configInfoMapper.select( Arrays.asList("id", "data_id", "group_id", "tenant_id", "app_name", "content"), Collections.singletonList("id")), new Object[] {id}, CONFIG_INFO_ROW_MAPPER); } catch (EmptyResultDataAccessException e) { // Indicates that the data does not exist, returns null. return null; } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } }
@Test void testFindConfigInfoByDataIdNull() { String dataId = "dataId4567"; String group = "group3456789"; String tenant = "tenant4567890"; Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER))) .thenThrow(new EmptyResultDataAccessException(1)); ConfigInfoWrapper configReturn = externalConfigInfoPersistService.findConfigInfo(dataId, group, tenant); assertNull(configReturn); }
@Override public long getMax() { if (values.length == 0) { return 0; } return values[values.length - 1]; }
@Test public void calculatesAMaxOfZeroForAnEmptySnapshot() { final Snapshot emptySnapshot = new WeightedSnapshot( weightedArray(new long[]{}, new double[]{})); assertThat(emptySnapshot.getMax()) .isZero(); }
public static void deleteDirThenMkdir(String path) throws IOException { deleteDirectory(path); forceMkdir(path); }
@Test void deleteDirThenMkdir() throws IOException { Path path = Paths.get(TMP_PATH, UUID.randomUUID().toString()); DiskUtils.forceMkdir(path.toString()); DiskUtils.createTmpFile(path.toString(), UUID.randomUUID().toString(), ".ut"); DiskUtils.createTmpFile(path.toString(), UUID.randomUUID().toString(), ".ut"); DiskUtils.deleteDirThenMkdir(path.toString()); File file = path.toFile(); assertTrue(file.exists()); assertTrue(file.isDirectory()); assertTrue(file.list() == null || file.list().length == 0); file.deleteOnExit(); }
public static RobustActorSystem create(String name, Config applicationConfig) { return create(name, applicationConfig, FatalExitExceptionHandler.INSTANCE); }
@Test void testUncaughtExceptionHandlerFromActor() { final Error error = new UnknownError(); final ActorRef actor = robustActorSystem.actorOf(Props.create(UncaughtExceptionActor.class, error)); actor.tell(new Failure(), null); final Throwable uncaughtException = testingUncaughtExceptionHandler.waitForUncaughtException(); assertThat(uncaughtException).isSameAs(error); }
public static String join(String[] strings, String separator) { if (strings == null || strings.length == 0) { return EMPTY; } StringBuilder sb = new StringBuilder(); for (String string : strings) { if (isNotBlank(string)) { sb.append(string).append(separator); } } return sb.length() > 0 ? sb.substring(0, sb.length() - separator.length()) : StringUtils.EMPTY; }
@Test public void testJoin() throws Exception { String[] src = new String[] { "1", "2", "3" }; String arrayString = StringUtils.join(src, ""); Assert.assertEquals(arrayString, "123"); arrayString = StringUtils.join(src, ","); Assert.assertEquals(arrayString, "1,2,3"); arrayString = StringUtils.join(new String[] {}, ","); Assert.assertEquals(arrayString, ""); arrayString = StringUtils.join(null, ""); Assert.assertEquals(arrayString, ""); }
@Override public long lastHeartbeat() { return lastHeartbeatMillis; }
@Test public void lastHeartbeat() { long timestamp = Clock.currentTimeMillis(); failureDetector.heartbeat(timestamp); long lastHeartbeat = failureDetector.lastHeartbeat(); assertEquals(timestamp, lastHeartbeat); }
static ArgumentParser argParser() { ArgumentParser parser = ArgumentParsers .newArgumentParser("producer-performance") .defaultHelp(true) .description("This tool is used to verify the producer performance. To enable transactions, " + "you can specify a transaction id or set a transaction duration using --transaction-duration-ms. " + "There are three ways to specify the transaction id: set transaction.id=<id> via --producer-props, " + "set transaction.id=<id> in the config file via --producer.config, or use --transaction-id <id>."); MutuallyExclusiveGroup payloadOptions = parser .addMutuallyExclusiveGroup() .required(true) .description("either --record-size or --payload-file must be specified but not both."); parser.addArgument("--topic") .action(store()) .required(true) .type(String.class) .metavar("TOPIC") .help("produce messages to this topic"); parser.addArgument("--num-records") .action(store()) .required(true) .type(Long.class) .metavar("NUM-RECORDS") .dest("numRecords") .help("number of messages to produce"); payloadOptions.addArgument("--record-size") .action(store()) .required(false) .type(Integer.class) .metavar("RECORD-SIZE") .dest("recordSize") .help("message size in bytes. Note that you must provide exactly one of --record-size or --payload-file " + "or --payload-monotonic."); payloadOptions.addArgument("--payload-file") .action(store()) .required(false) .type(String.class) .metavar("PAYLOAD-FILE") .dest("payloadFile") .help("file to read the message payloads from. This works only for UTF-8 encoded text files. " + "Payloads will be read from this file and a payload will be randomly selected when sending messages. " + "Note that you must provide exactly one of --record-size or --payload-file or --payload-monotonic."); payloadOptions.addArgument("--payload-monotonic") .action(storeTrue()) .type(Boolean.class) .metavar("PAYLOAD-MONOTONIC") .dest("payloadMonotonic") .help("payload is monotonically increasing integer. Note that you must provide exactly one of --record-size " + "or --payload-file or --payload-monotonic."); parser.addArgument("--payload-delimiter") .action(store()) .required(false) .type(String.class) .metavar("PAYLOAD-DELIMITER") .dest("payloadDelimiter") .setDefault("\\n") .help("provides delimiter to be used when --payload-file is provided. " + "Defaults to new line. " + "Note that this parameter will be ignored if --payload-file is not provided."); parser.addArgument("--throughput") .action(store()) .required(true) .type(Double.class) .metavar("THROUGHPUT") .help("throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. Set this to -1 to disable throttling."); parser.addArgument("--producer-props") .nargs("+") .required(false) .metavar("PROP-NAME=PROP-VALUE") .type(String.class) .dest("producerConfig") .help("kafka producer related configuration properties like bootstrap.servers,client.id etc. " + "These configs take precedence over those passed via --producer.config."); parser.addArgument("--producer.config") .action(store()) .required(false) .type(String.class) .metavar("CONFIG-FILE") .dest("producerConfigFile") .help("producer config properties file."); parser.addArgument("--print-metrics") .action(storeTrue()) .type(Boolean.class) .metavar("PRINT-METRICS") .dest("printMetrics") .help("print out metrics at the end of the test."); parser.addArgument("--transactional-id") .action(store()) .required(false) .type(String.class) .metavar("TRANSACTIONAL-ID") .dest("transactionalId") .help("The transactional id to use. This config takes precedence over the transactional.id " + "specified via --producer.config or --producer-props. Note that if the transactional id " + "is not specified while --transaction-duration-ms is provided, the default value for the " + "transactional id will be performance-producer- followed by a random uuid."); parser.addArgument("--transaction-duration-ms") .action(store()) .required(false) .type(Long.class) .metavar("TRANSACTION-DURATION") .dest("transactionDurationMs") .help("The max age of each transaction. The commitTransaction will be called after this time has elapsed. " + "The value should be greater than 0. If the transactional id is specified via --producer-props, " + "--producer.config, or --transactional-id but --transaction-duration-ms is not specified, " + "the default value will be 3000."); return parser; }
@Test public void testInvalidConfigPostProcessor() { ArgumentParser parser = ProducerPerformance.argParser(); String[] invalidProducerProps = new String[]{ "--topic", "Hello-Kafka", "--num-records", "5", "--throughput", "100", "--record-size", "100"}; assertEquals("Either --producer-props or --producer.config must be specified.", assertThrows(ArgumentParserException.class, () -> new ProducerPerformance.ConfigPostProcessor(parser, invalidProducerProps)).getMessage()); String[] invalidTransactionDurationMs = new String[]{ "--topic", "Hello-Kafka", "--num-records", "5", "--throughput", "100", "--record-size", "100", "--producer-props", "bootstrap.servers=localhost:9000", "--transaction-duration-ms", "0"}; assertEquals("--transaction-duration-ms should > 0", assertThrows(ArgumentParserException.class, () -> new ProducerPerformance.ConfigPostProcessor(parser, invalidTransactionDurationMs)).getMessage()); }
@Override @Transactional(rollbackFor = Exception.class) public void onChange(final DiscoveryDataChangedEvent event) { DiscoveryDataChangedEvent.Event currentEvent = event.getEvent(); if (DiscoveryDataChangedEvent.Event.IGNORED.equals(currentEvent)) { return; } DiscoverySyncData discoverySyncData = buildProxySelectorData(event.getValue()); final List<DiscoveryUpstreamData> upstreamDataList = discoverySyncData.getUpstreamDataList(); if (CollectionUtils.isEmpty(upstreamDataList)) { LOGGER.warn("shenyu proxySelectorData#discoveryUpstreamList is empty"); return; } switch (currentEvent) { case ADDED: upstreamDataList.forEach(d -> { try { DiscoveryUpstreamDO discoveryUpstreamDO = discoveryUpstreamMapper.selectByDiscoveryHandlerIdAndUrl(discoveryHandlerId, d.getUrl()); if (Objects.isNull(discoveryUpstreamDO)) { d.setId(UUIDUtils.getInstance().generateShortUuid()); d.setDateCreated(new Timestamp(System.currentTimeMillis())); d.setDateUpdated(new Timestamp(System.currentTimeMillis())); discoveryUpstreamMapper.insert(DiscoveryTransfer.INSTANCE.mapToDo(d)); LOGGER.info("shenyu [DiscoveryDataChangedEventSyncListener] ADDED Upstream {}", d.getUrl()); } } catch (DuplicateKeyException ex) { LOGGER.info("shenyu [DiscoveryDataChangedEventSyncListener] Upstream {} exist", d.getUrl()); } }); break; case UPDATED: upstreamDataList.stream().map(DiscoveryTransfer.INSTANCE::mapToDo).forEach(discoveryUpstreamDO -> { discoveryUpstreamDO.setDiscoveryHandlerId(discoveryHandlerId); int effect = discoveryUpstreamMapper.updateDiscoveryHandlerIdAndUrl(discoveryUpstreamDO); LOGGER.info("shenyu [DiscoveryDataChangedEventSyncListener] UPDATE Upstream {}, effect = {} ", discoveryUpstreamDO.getUrl(), effect); }); break; case DELETED: if (CollectionUtils.isNotEmpty(upstreamDataList)) { upstreamDataList.forEach(up -> { discoveryUpstreamMapper.deleteByUrl(discoveryHandlerId, up.getUrl()); LOGGER.info("shenyu [DiscoveryDataChangedEventSyncListener] DELETE Upstream {}", up.getUrl()); }); } break; default: throw new IllegalStateException("shenyu DiscoveryDataChangedEventSyncListener find IllegalState"); } fillFullyDiscoverySyncData(discoverySyncData); DataChangedEvent dataChangedEvent = new DataChangedEvent(ConfigGroupEnum.DISCOVER_UPSTREAM, DataEventTypeEnum.UPDATE, Collections.singletonList(discoverySyncData)); eventPublisher.publishEvent(dataChangedEvent); }
@Test public void testOnChange() { List<DiscoveryUpstreamData> discoveryUpstreamDTOS = new ArrayList<>(); DiscoveryUpstreamData discoveryUpstreamData = new DiscoveryUpstreamData(); discoveryUpstreamData.setProtocol("http"); discoveryUpstreamData.setUrl("1111"); discoveryUpstreamDTOS.add(discoveryUpstreamData); doNothing().when(eventPublisher).publishEvent(any(DataChangedEvent.class)); when(keyValueParser.parseValue(anyString())).thenReturn(discoveryUpstreamDTOS); DiscoveryDataChangedEvent event = new DiscoveryDataChangedEvent("key", "value", DiscoveryDataChangedEvent.Event.ADDED); discoveryDataChangedEventSyncListener.onChange(event); verify(discoveryUpstreamMapper).insert(any(DiscoveryUpstreamDO.class)); DiscoveryDataChangedEvent event2 = new DiscoveryDataChangedEvent("key", "value", DiscoveryDataChangedEvent.Event.UPDATED); discoveryDataChangedEventSyncListener.onChange(event2); verify(discoveryUpstreamMapper).updateDiscoveryHandlerIdAndUrl(any(DiscoveryUpstreamDO.class)); DiscoveryDataChangedEvent event3 = new DiscoveryDataChangedEvent("key", "value", DiscoveryDataChangedEvent.Event.DELETED); discoveryDataChangedEventSyncListener.onChange(event3); verify(discoveryUpstreamMapper).deleteByUrl(anyString(), anyString()); }
@Override public void close() throws Exception { if (closed.compareAndSet(false, true)) { disposer.run(); } }
@Test void testCloseIsIdempotent() throws Exception { final CountingCloseable disposer = new CountingCloseable(); final OpaqueMemoryResource<Object> resource = new OpaqueMemoryResource<>(new Object(), 10, disposer); resource.close(); resource.close(); assertThat(disposer.count).isOne(); }
@Override public void handle(InputStream stream) throws IOException { FileUtils.copyInputStreamToFile(stream, checksumFile); }
@Test public void shouldStoreTheMd5ChecksumOnTheAgent() throws IOException { checksumFileHandler.handle(new ByteArrayInputStream("Hello World".getBytes())); assertThat(FileUtils.readFileToString(file, UTF_8), is("Hello World")); }
@Override public Object handle(ProceedingJoinPoint proceedingJoinPoint, Bulkhead bulkhead, String methodName) throws Throwable { BulkheadOperator<?> bulkheadOperator = BulkheadOperator.of(bulkhead); Object returnValue = proceedingJoinPoint.proceed(); return executeRxJava2Aspect(bulkheadOperator, returnValue); }
@Test public void testReactorTypes() throws Throwable { Bulkhead bulkhead = Bulkhead.ofDefaults("test"); when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test")); assertThat(rxJava2BulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod")) .isNotNull(); when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test")); assertThat(rxJava2BulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod")) .isNotNull(); }
@Override Function<Request.Builder, Request.Builder> addVerbToBuilder() { return Request.Builder::delete; }
@Test public void addVerbToBuilder_shouldReturnNonNullResult() { assertThat(deleteRequest.addVerbToBuilder()).isNotNull(); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(SQLSERVER_BIT); builder.dataType(SQLSERVER_BIT); break; case TINYINT: builder.columnType(SQLSERVER_TINYINT); builder.dataType(SQLSERVER_TINYINT); break; case SMALLINT: builder.columnType(SQLSERVER_SMALLINT); builder.dataType(SQLSERVER_SMALLINT); break; case INT: builder.columnType(SQLSERVER_INT); builder.dataType(SQLSERVER_INT); break; case BIGINT: builder.columnType(SQLSERVER_BIGINT); builder.dataType(SQLSERVER_BIGINT); break; case FLOAT: builder.columnType(SQLSERVER_REAL); builder.dataType(SQLSERVER_REAL); break; case DOUBLE: builder.columnType(SQLSERVER_FLOAT); builder.dataType(SQLSERVER_FLOAT); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", SQLSERVER_DECIMAL, precision, scale)); builder.dataType(SQLSERVER_DECIMAL); builder.precision(precision); builder.scale(scale); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(MAX_NVARCHAR); builder.dataType(MAX_NVARCHAR); } else if (column.getColumnLength() <= MAX_NVARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", SQLSERVER_NVARCHAR, column.getColumnLength())); builder.dataType(SQLSERVER_NVARCHAR); builder.length(column.getColumnLength()); } else { builder.columnType(MAX_NVARCHAR); builder.dataType(MAX_NVARCHAR); builder.length(column.getColumnLength()); } break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(MAX_VARBINARY); builder.dataType(SQLSERVER_VARBINARY); } else if (column.getColumnLength() <= MAX_BINARY_LENGTH) { builder.columnType( String.format("%s(%s)", SQLSERVER_VARBINARY, column.getColumnLength())); builder.dataType(SQLSERVER_VARBINARY); builder.length(column.getColumnLength()); } else { builder.columnType(MAX_VARBINARY); builder.dataType(SQLSERVER_VARBINARY); builder.length(column.getColumnLength()); } break; case DATE: builder.columnType(SQLSERVER_DATE); builder.dataType(SQLSERVER_DATE); break; case TIME: if (column.getScale() != null && column.getScale() > 0) { int timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", SQLSERVER_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(SQLSERVER_TIME); } builder.dataType(SQLSERVER_TIME); break; case TIMESTAMP: if (column.getScale() != null && column.getScale() > 0) { int timestampScale = column.getScale(); if (timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType( String.format("%s(%s)", SQLSERVER_DATETIME2, timestampScale)); builder.scale(timestampScale); } else { builder.columnType(SQLSERVER_DATETIME2); } builder.dataType(SQLSERVER_DATETIME2); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.SQLSERVER, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertDate() { Column column = PhysicalColumn.builder() .name("test") .dataType(LocalTimeType.LOCAL_DATE_TYPE) .build(); BasicTypeDefine typeDefine = SqlServerTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(SqlServerTypeConverter.SQLSERVER_DATE, typeDefine.getColumnType()); Assertions.assertEquals(SqlServerTypeConverter.SQLSERVER_DATE, typeDefine.getDataType()); }
protected static boolean isLabelMatch(URL url, String protocolServiceKey, Map<String, String> inputMap) { if (inputMap == null || inputMap.size() == 0) { return true; } for (Map.Entry<String, String> entry : inputMap.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); String originMapValue = url.getOriginalServiceParameter(protocolServiceKey, key); if (!value.equals(originMapValue)) { return false; } } return true; }
@Test void containMapKeyValue() { URL url = mock(URL.class); when(url.getOriginalServiceParameter("test", "key1")).thenReturn("value1"); when(url.getOriginalServiceParameter("test", "key2")).thenReturn("value2"); when(url.getOriginalServiceParameter("test", "key3")).thenReturn("value3"); when(url.getOriginalServiceParameter("test", "key4")).thenReturn("value4"); Map<String, String> originMap = new HashMap<>(); originMap.put("key1", "value1"); originMap.put("key2", "value2"); originMap.put("key3", "value3"); Map<String, String> inputMap = new HashMap<>(); inputMap.put("key1", "value1"); inputMap.put("key2", "value2"); assertTrue(MeshRuleCache.isLabelMatch(url, "test", inputMap)); inputMap.put("key4", "value4"); assertTrue(MeshRuleCache.isLabelMatch(url, "test", inputMap)); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final AttributedList<Path> result = new AttributedList<>(); try { final SMBSession.DiskShareWrapper share = session.openShare(directory); final List<FileIdBothDirectoryInformation> info; try { info = share.get().list(new SMBPathContainerService(session).getKey(directory)); } finally { session.releaseShare(share); } for(FileIdBothDirectoryInformation f : info) { final String filename = f.getFileName(); if(filename.equals(".") || filename.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", f.getFileName())); } continue; } final EnumSet<Type> type = EnumSet.noneOf(Type.class); long fileAttributes = f.getFileAttributes(); // check for all relevant file types and add them to the EnumSet if((fileAttributes & FileAttributes.FILE_ATTRIBUTE_DIRECTORY.getValue()) != 0) { type.add(Type.directory); } else { type.add(Type.file); } final PathAttributes attr = new PathAttributes(); attr.setAccessedDate(f.getLastAccessTime().toEpochMillis()); attr.setModificationDate(f.getLastWriteTime().toEpochMillis()); attr.setCreationDate(f.getCreationTime().toEpochMillis()); attr.setSize(f.getEndOfFile()); attr.setDisplayname(f.getFileName()); result.add(new Path(directory, filename, type, attr)); listener.chunk(directory, result); } } catch(SMBRuntimeException e) { throw new SMBExceptionMappingService().map("Listing directory {0} failed", e, directory); } return result; }
@Test public void testList() throws Exception { final Path home = new DefaultHomeFinderService(session).find(); final Path testFolder = new SMBDirectoryFeature(session).mkdir( new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path testFile = new SMBTouchFeature(session).touch(new Path(testFolder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final Path innerFolder = new SMBDirectoryFeature(session).mkdir( new Path(testFolder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final AttributedList<Path> result = new SMBListService(session).list(testFolder, new DisabledListProgressListener()); assertEquals(2, result.size()); assertTrue(result.contains(testFile)); assertTrue(result.contains(innerFolder)); new SMBDeleteFeature(session).delete(Arrays.asList(innerFolder, testFile, testFolder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public KsqlArray values() { return values.copy(); }
@Test public void shouldGetValues() { assertThat(row.values(), is(new KsqlArray(VALUES.getList()))); }
@Override public String getName() { if (_distinctResult == 1) { return TransformFunctionType.IS_DISTINCT_FROM.getName(); } return TransformFunctionType.IS_NOT_DISTINCT_FROM.getName(); }
@Test public void testDistinctFromLeftLiteralRightIdentifier() throws Exception { ExpressionContext expression = RequestContextUtils.getExpression(String.format(_expression, "NULL", INT_SV_NULL_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertEquals(transformFunction.getName(), _isDistinctFrom ? "is_distinct_from" : "is_not_distinct_from"); boolean[] expectedIntValues = new boolean[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { if (isNullRow(i)) { expectedIntValues[i] = !_isDistinctFrom; } else { expectedIntValues[i] = _isDistinctFrom; } } testTransformFunction(expression, expectedIntValues, _projectionBlock, _dataSourceMap); }
void setRemoteIpAndPort(Span span, ServerAddress address) { // default to no-op instead of crash on future drift }
@Test void setRemoteIpAndPort() { when(serverAddress.getHost()).thenReturn("127.0.0.1"); when(serverAddress.getPort()).thenReturn(27017); MongoDBDriver.get().setRemoteIpAndPort(span, serverAddress); verify(span).remoteIpAndPort("127.0.0.1", 27017); }
public static SortOrder fromJson(Schema schema, String json) { return fromJson(json).bind(schema); }
@TestTemplate public void invalidSortDirection() { String jsonString = "{\n" + " \"order-id\" : 10,\n" + " \"fields\" : [ {\n" + " \"transform\" : \"custom_transform\",\n" + " \"source-id\" : 2,\n" + " \"direction\" : \"invalid\",\n" + " \"null-order\" : \"nulls-first\"\n" + " } ]\n" + "}"; assertThatThrownBy(() -> SortOrderParser.fromJson(table.schema(), jsonString)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid sort direction: invalid"); }
public ReencryptionPendingInodeIdCollector getTraverser() { return traverser; }
@Test public void testThrottleNoOp() throws Exception { final Configuration conf = new Configuration(); conf.setDouble(DFS_NAMENODE_REENCRYPT_THROTTLE_LIMIT_HANDLER_RATIO_KEY, 0.5); final ReencryptionHandler rh = mockReencryptionhandler(conf); // mock StopWatches so all = 30s, locked = 10s. With ratio = .5, throttle // should not happen. StopWatch mockAll = Mockito.mock(StopWatch.class); Mockito.when(mockAll.now()).thenReturn(new Long(30000)); Mockito.when(mockAll.reset()).thenReturn(mockAll); StopWatch mockLocked = Mockito.mock(StopWatch.class); Mockito.when(mockLocked.now()).thenReturn(new Long(10000)); Mockito.when(mockLocked.reset()).thenReturn(mockLocked); final BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>(); Whitebox.setInternalState(rh, "throttleTimerAll", mockAll); Whitebox.setInternalState(rh, "throttleTimerLocked", mockLocked); Whitebox.setInternalState(rh, "taskQueue", queue); final Map<Long, ReencryptionUpdater.ZoneSubmissionTracker> submissions = new HashMap<>(); Whitebox.setInternalState(rh, "submissions", submissions); StopWatch sw = new StopWatch().start(); rh.getTraverser().throttle(); sw.stop(); assertTrue("should not have throttled", sw.now(TimeUnit.MILLISECONDS) < 1000); }
@Override public Object[] getRowImmediate() { return getRow(); }
@Test public void testGetRowImmediate() throws Exception { rowSet.putRow( new RowMeta(), row ); assertSame( row, rowSet.getRowImmediate() ); }
@Override public double cdf(double x) { if (x < 0.0) { throw new IllegalArgumentException("Invalid x: " + x); } return Beta.regularizedIncompleteBetaFunction(0.5 * nu1, 0.5 * nu2, nu1 * x / (nu2 + nu1 * x)); }
@Test public void testCdf() { System.out.println("cdf"); FDistribution instance = new FDistribution(10, 20); instance.rand(); assertEquals(5.878315e-09, instance.cdf(0.01), 1E-15); assertEquals(0.0003410974, instance.cdf(0.1), 1E-10); assertEquals(0.006161513, instance.cdf(0.2), 1E-9); assertEquals(0.1298396, instance.cdf(0.5), 1E-6); assertEquals(0.5244995, instance.cdf(1), 1E-7); assertEquals(0.9999914, instance.cdf(10), 1E-7); }
protected abstract Pane<T> resetPaneTo(final Pane<T> pane, long startInMs);
@Test void testResetPaneTo() { Pane<LongAdder> currentPane = window.currentPane(); currentPane.getValue().add(2); currentPane.getValue().add(1); assertEquals(3, currentPane.getValue().sum()); window.resetPaneTo(currentPane, System.currentTimeMillis()); assertEquals(0, currentPane.getValue().sum()); currentPane.getValue().add(1); assertEquals(1, currentPane.getValue().sum()); }
@Override public int getPoolSize() { return poolSize.get(); }
@Test void testPoolSizeCheck() throws Exception { final int requiredBuffers = 10; final int maxBuffers = 100; CompletableFuture<Void> triggerGlobalDecision = new CompletableFuture<>(); NetworkBufferPool networkBufferPool = new NetworkBufferPool(maxBuffers, bufferSize); BufferPool bufferPool = networkBufferPool.createBufferPool(requiredBuffers, maxBuffers); assertThat(bufferPool.getNumBuffers()).isEqualTo(maxBuffers); HsSpillingStrategy spillingStrategy = TestingSpillingStrategy.builder() .setDecideActionWithGlobalInfoFunction( (spillingInfoProvider) -> { assertThat(spillingInfoProvider.getPoolSize()) .isEqualTo(requiredBuffers); triggerGlobalDecision.complete(null); return Decision.NO_ACTION; }) .build(); createMemoryDataManager(spillingStrategy, bufferPool); networkBufferPool.createBufferPool(maxBuffers - requiredBuffers, maxBuffers); assertThat(bufferPool.getNumBuffers()).isEqualTo(requiredBuffers); assertThatFuture(triggerGlobalDecision).eventuallySucceeds(); }
@Override public SocialUserDO getSocialUser(Long id) { return socialUserMapper.selectById(id); }
@Test public void testGetSocialUser_id() { // mock 数据 SocialUserDO socialUserDO = randomPojo(SocialUserDO.class); socialUserMapper.insert(socialUserDO); // 参数准备 Long id = socialUserDO.getId(); // 调用 SocialUserDO dbSocialUserDO = socialUserService.getSocialUser(id); // 断言 assertPojoEquals(socialUserDO, dbSocialUserDO); }
static JavaInput reorderModifiers(String text) throws FormatterException { return reorderModifiers( new JavaInput(text), ImmutableList.of(Range.closedOpen(0, text.length()))); }
@Test public void everything() throws FormatterException { assertThat( ModifierOrderer.reorderModifiers( "strictfp native synchronized volatile transient final static abstract" + " private protected public") .getText()) .isEqualTo( "public protected private abstract static final transient volatile synchronized" + " native strictfp"); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { if (!(statement.getStatement() instanceof CreateSource) && !(statement.getStatement() instanceof CreateAsSelect)) { return statement; } try { if (statement.getStatement() instanceof CreateSource) { final ConfiguredStatement<CreateSource> createStatement = (ConfiguredStatement<CreateSource>) statement; return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement); } else { final ConfiguredStatement<CreateAsSelect> createStatement = (ConfiguredStatement<CreateAsSelect>) statement; return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse( createStatement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } }
@Test public void shouldReturnStatementUnchangedIfCtAlreadyHasSchemas() { // Given: givenKeyAndValueInferenceSupported(); when(ct.getElements()).thenReturn(SOME_KEY_AND_VALUE_ELEMENTS_TABLE); // When: final ConfiguredStatement<?> result = injector.inject(ctStatement); // Then: assertThat(result, is(sameInstance(ctStatement))); }
static CompletableFuture<Void> updateSubscriptions(Pattern topicsPattern, java.util.function.Consumer<String> topicsHashSetter, GetTopicsResult getTopicsResult, TopicsChangedListener topicsChangedListener, List<String> oldTopics, String subscriptionForLog) { topicsHashSetter.accept(getTopicsResult.getTopicsHash()); if (!getTopicsResult.isChanged()) { return CompletableFuture.completedFuture(null); } List<String> newTopics; if (getTopicsResult.isFiltered()) { newTopics = getTopicsResult.getNonPartitionedOrPartitionTopics(); } else { newTopics = getTopicsResult.filterTopics(topicsPattern).getNonPartitionedOrPartitionTopics(); } final List<CompletableFuture<?>> listenersCallback = new ArrayList<>(2); Set<String> topicsAdded = TopicList.minus(newTopics, oldTopics); Set<String> topicsRemoved = TopicList.minus(oldTopics, newTopics); if (log.isDebugEnabled()) { log.debug("Pattern consumer [{}] Recheck pattern consumer's topics. topicsAdded: {}, topicsRemoved: {}", subscriptionForLog, topicsAdded, topicsRemoved); } listenersCallback.add(topicsChangedListener.onTopicsAdded(topicsAdded)); listenersCallback.add(topicsChangedListener.onTopicsRemoved(topicsRemoved)); return FutureUtil.waitForAll(Collections.unmodifiableList(listenersCallback)); }
@Test public void testChangedUnfilteredResponse() { PatternMultiTopicsConsumerImpl.updateSubscriptions( Pattern.compile("tenant/my-ns/name-.*"), mockTopicsHashSetter, new GetTopicsResult(Arrays.asList( "persistent://tenant/my-ns/name-1", "persistent://tenant/my-ns/name-2", "persistent://tenant/my-ns/non-matching"), null, false, true), mockListener, Collections.emptyList(), ""); verify(mockListener).onTopicsAdded(Sets.newHashSet( "persistent://tenant/my-ns/name-1", "persistent://tenant/my-ns/name-2")); verify(mockListener).onTopicsRemoved(Collections.emptySet()); verify(mockTopicsHashSetter).accept(null); }
public LatencyProbe newProbe(String serviceName, String dataStructureName, String methodName) { ServiceProbes serviceProbes = getOrPutIfAbsent( metricsPerServiceMap, serviceName, metricsPerServiceConstructorFunction); return serviceProbes.newProbe(dataStructureName, methodName); }
@Test public void getProbe_whenSameProbeRequestedMoreThanOnce() { LatencyProbe probe1 = plugin.newProbe("foo", "queue", "somemethod"); LatencyProbe probe2 = plugin.newProbe("foo", "queue", "somemethod"); assertSame(probe1, probe2); }
@Override public Ring<T> createRing(Map<T, Integer> pointsMap) { return _ringFactory.createRing(pointsMap); }
@Test(groups = { "small", "back-end" }) public void testFactoryWithPointBased() { RingFactory<String> factory = new DelegatingRingFactory<>(configBuilder("pointBased", "uriRegex")); Ring<String> ring = factory.createRing(buildPointsMap(10)); assertTrue(ring instanceof ConsistentHashRing); }
public static TopicRemappingDetailWrapper remappingStaticTopic(String topic, Map<String, TopicConfigAndQueueMapping> brokerConfigMap, Set<String> targetBrokers) { Map.Entry<Long, Integer> maxEpochAndNum = TopicQueueMappingUtils.checkNameEpochNumConsistence(topic, brokerConfigMap); Map<Integer, TopicQueueMappingOne> globalIdMap = TopicQueueMappingUtils.checkAndBuildMappingItems(getMappingDetailFromConfig(brokerConfigMap.values()), false, true); TopicQueueMappingUtils.checkPhysicalQueueConsistence(brokerConfigMap); TopicQueueMappingUtils.checkIfReusePhysicalQueue(globalIdMap.values()); //the check is ok, now do the mapping allocation int maxNum = maxEpochAndNum.getValue(); Map<String, Integer> brokerNumMap = new HashMap<>(); for (String broker: targetBrokers) { brokerNumMap.put(broker, 0); } Map<String, Integer> brokerNumMapBeforeRemapping = new HashMap<>(); for (TopicQueueMappingOne mappingOne: globalIdMap.values()) { if (brokerNumMapBeforeRemapping.containsKey(mappingOne.bname)) { brokerNumMapBeforeRemapping.put(mappingOne.bname, brokerNumMapBeforeRemapping.get(mappingOne.bname) + 1); } else { brokerNumMapBeforeRemapping.put(mappingOne.bname, 1); } } TopicQueueMappingUtils.MappingAllocator allocator = TopicQueueMappingUtils.buildMappingAllocator(new HashMap<>(), brokerNumMap, brokerNumMapBeforeRemapping); allocator.upToNum(maxNum); Map<String, Integer> expectedBrokerNumMap = allocator.getBrokerNumMap(); Queue<Integer> waitAssignQueues = new ArrayDeque<>(); //cannot directly use the idBrokerMap from allocator, for the number of globalId maybe not in the natural order Map<Integer, String> expectedIdToBroker = new HashMap<>(); //the following logic will make sure that, for one broker, either "map in" or "map out" //It can't both, map in some queues but also map out some queues. for (Map.Entry<Integer, TopicQueueMappingOne> entry : globalIdMap.entrySet()) { Integer queueId = entry.getKey(); TopicQueueMappingOne mappingOne = entry.getValue(); String leaderBroker = mappingOne.getBname(); if (expectedBrokerNumMap.containsKey(leaderBroker)) { if (expectedBrokerNumMap.get(leaderBroker) > 0) { expectedIdToBroker.put(queueId, leaderBroker); expectedBrokerNumMap.put(leaderBroker, expectedBrokerNumMap.get(leaderBroker) - 1); } else { waitAssignQueues.add(queueId); expectedBrokerNumMap.remove(leaderBroker); } } else { waitAssignQueues.add(queueId); } } for (Map.Entry<String, Integer> entry: expectedBrokerNumMap.entrySet()) { String broker = entry.getKey(); Integer queueNum = entry.getValue(); for (int i = 0; i < queueNum; i++) { Integer queueId = waitAssignQueues.poll(); assert queueId != null; expectedIdToBroker.put(queueId, broker); } } long newEpoch = Math.max(maxEpochAndNum.getKey() + 1000, System.currentTimeMillis()); //Now construct the remapping info Set<String> brokersToMapOut = new HashSet<>(); Set<String> brokersToMapIn = new HashSet<>(); for (Map.Entry<Integer, String> mapEntry : expectedIdToBroker.entrySet()) { Integer queueId = mapEntry.getKey(); String broker = mapEntry.getValue(); TopicQueueMappingOne topicQueueMappingOne = globalIdMap.get(queueId); assert topicQueueMappingOne != null; if (topicQueueMappingOne.getBname().equals(broker)) { continue; } //remapping final String mapInBroker = broker; final String mapOutBroker = topicQueueMappingOne.getBname(); brokersToMapIn.add(mapInBroker); brokersToMapOut.add(mapOutBroker); TopicConfigAndQueueMapping mapInConfig = brokerConfigMap.get(mapInBroker); TopicConfigAndQueueMapping mapOutConfig = brokerConfigMap.get(mapOutBroker); if (mapInConfig == null) { mapInConfig = new TopicConfigAndQueueMapping(new TopicConfig(topic, 0, 0), new TopicQueueMappingDetail(topic, maxNum, mapInBroker, newEpoch)); brokerConfigMap.put(mapInBroker, mapInConfig); } mapInConfig.setWriteQueueNums(mapInConfig.getWriteQueueNums() + 1); mapInConfig.setReadQueueNums(mapInConfig.getReadQueueNums() + 1); List<LogicQueueMappingItem> items = new ArrayList<>(topicQueueMappingOne.getItems()); LogicQueueMappingItem last = items.get(items.size() - 1); items.add(new LogicQueueMappingItem(last.getGen() + 1, mapInConfig.getWriteQueueNums() - 1, mapInBroker, -1, 0, -1, -1, -1)); //Use the same object TopicQueueMappingDetail.putMappingInfo(mapInConfig.getMappingDetail(), queueId, items); TopicQueueMappingDetail.putMappingInfo(mapOutConfig.getMappingDetail(), queueId, items); } for (Map.Entry<String, TopicConfigAndQueueMapping> entry : brokerConfigMap.entrySet()) { TopicConfigAndQueueMapping configMapping = entry.getValue(); configMapping.getMappingDetail().setEpoch(newEpoch); configMapping.getMappingDetail().setTotalQueues(maxNum); } //double check { TopicQueueMappingUtils.checkNameEpochNumConsistence(topic, brokerConfigMap); globalIdMap = TopicQueueMappingUtils.checkAndBuildMappingItems(getMappingDetailFromConfig(brokerConfigMap.values()), false, true); TopicQueueMappingUtils.checkPhysicalQueueConsistence(brokerConfigMap); TopicQueueMappingUtils.checkIfReusePhysicalQueue(globalIdMap.values()); TopicQueueMappingUtils.checkLeaderInTargetBrokers(globalIdMap.values(), targetBrokers); } return new TopicRemappingDetailWrapper(topic, TopicRemappingDetailWrapper.TYPE_REMAPPING, newEpoch, brokerConfigMap, brokersToMapIn, brokersToMapOut); }
@Test public void testRemappingStaticTopic() { String topic = "static"; int queueNum = 7; Map<String, TopicConfigAndQueueMapping> brokerConfigMap = new HashMap<>(); Set<String> originalBrokers = buildTargetBrokers(2); TopicRemappingDetailWrapper wrapper = TopicQueueMappingUtils.createTopicConfigMapping(topic, queueNum, originalBrokers, brokerConfigMap); Assert.assertEquals(wrapper.getBrokerConfigMap(), brokerConfigMap); Assert.assertEquals(2, brokerConfigMap.size()); { //do the check manually TopicQueueMappingUtils.checkNameEpochNumConsistence(topic, brokerConfigMap); TopicQueueMappingUtils.checkPhysicalQueueConsistence(brokerConfigMap); Map<Integer, TopicQueueMappingOne> globalIdMap = TopicQueueMappingUtils.checkAndBuildMappingItems(new ArrayList<>(TopicQueueMappingUtils.getMappingDetailFromConfig(brokerConfigMap.values())), false, true); TopicQueueMappingUtils.checkIfReusePhysicalQueue(globalIdMap.values()); } for (int i = 0; i < 10; i++) { Set<String> targetBrokers = buildTargetBrokers(2, "test" + i); TopicQueueMappingUtils.remappingStaticTopic(topic, brokerConfigMap, targetBrokers); //do the check manually TopicQueueMappingUtils.checkNameEpochNumConsistence(topic, brokerConfigMap); TopicQueueMappingUtils.checkPhysicalQueueConsistence(brokerConfigMap); Map<Integer, TopicQueueMappingOne> globalIdMap = TopicQueueMappingUtils.checkAndBuildMappingItems(new ArrayList<>(TopicQueueMappingUtils.getMappingDetailFromConfig(brokerConfigMap.values())), false, true); TopicQueueMappingUtils.checkIfReusePhysicalQueue(globalIdMap.values()); TopicQueueMappingUtils.checkLeaderInTargetBrokers(globalIdMap.values(), targetBrokers); Assert.assertEquals((i + 2) * 2, brokerConfigMap.size()); //check and complete the logicOffset for (Map.Entry<String, TopicConfigAndQueueMapping> entry : brokerConfigMap.entrySet()) { TopicConfigAndQueueMapping configMapping = entry.getValue(); if (!targetBrokers.contains(configMapping.getMappingDetail().bname)) { continue; } for (List<LogicQueueMappingItem> items: configMapping.getMappingDetail().getHostedQueues().values()) { Assert.assertEquals(i + 2, items.size()); items.get(items.size() - 1).setLogicOffset(i + 1); } } } }
public void build(@Nullable SegmentVersion segmentVersion, ServerMetrics serverMetrics) throws Exception { SegmentGeneratorConfig genConfig = new SegmentGeneratorConfig(_tableConfig, _dataSchema); // The segment generation code in SegmentColumnarIndexCreator will throw // exception if start and end time in time column are not in acceptable // range. We don't want the realtime consumption to stop (if an exception // is thrown) and thus the time validity check is explicitly disabled for // realtime segment generation genConfig.setSegmentTimeValueCheck(false); if (_columnIndicesForRealtimeTable.getInvertedIndexColumns() != null) { genConfig.setIndexOn(StandardIndexes.inverted(), IndexConfig.ENABLED, _columnIndicesForRealtimeTable.getInvertedIndexColumns()); } if (_columnIndicesForRealtimeTable.getVarLengthDictionaryColumns() != null) { genConfig.setVarLengthDictionaryColumns(_columnIndicesForRealtimeTable.getVarLengthDictionaryColumns()); } if (segmentVersion != null) { genConfig.setSegmentVersion(segmentVersion); } genConfig.setTableName(_tableName); genConfig.setOutDir(_outputPath); genConfig.setSegmentName(_segmentName); addIndexOrDefault(genConfig, StandardIndexes.text(), _columnIndicesForRealtimeTable.getTextIndexColumns(), new TextIndexConfigBuilder(genConfig.getFSTIndexType()).build()); addIndexOrDefault(genConfig, StandardIndexes.fst(), _columnIndicesForRealtimeTable.getFstIndexColumns(), new FstIndexConfig(genConfig.getFSTIndexType())); SegmentPartitionConfig segmentPartitionConfig = _realtimeSegmentImpl.getSegmentPartitionConfig(); genConfig.setSegmentPartitionConfig(segmentPartitionConfig); genConfig.setNullHandlingEnabled(_nullHandlingEnabled); genConfig.setSegmentZKPropsConfig(_segmentZKPropsConfig); // flush any artifacts to disk to improve mutable to immutable segment conversion _realtimeSegmentImpl.commit(); SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl(); try (PinotSegmentRecordReader recordReader = new PinotSegmentRecordReader()) { int[] sortedDocIds = _columnIndicesForRealtimeTable.getSortedColumn() != null ? _realtimeSegmentImpl.getSortedDocIdIterationOrderWithSortedColumn( _columnIndicesForRealtimeTable.getSortedColumn()) : null; recordReader.init(_realtimeSegmentImpl, sortedDocIds); RealtimeSegmentSegmentCreationDataSource dataSource = new RealtimeSegmentSegmentCreationDataSource(_realtimeSegmentImpl, recordReader); driver.init(genConfig, dataSource, RecordEnricherPipeline.getPassThroughPipeline(), TransformPipeline.getPassThroughPipeline()); if (!_enableColumnMajor) { driver.build(); } else { driver.buildByColumn(_realtimeSegmentImpl); } } if (segmentPartitionConfig != null) { Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap(); for (String columnName : columnPartitionMap.keySet()) { int numPartitions = driver.getSegmentStats().getColumnProfileFor(columnName).getPartitions().size(); serverMetrics.addValueToTableGauge(_tableName, ServerGauge.REALTIME_SEGMENT_NUM_PARTITIONS, numPartitions); } } }
@Test(dataProvider = "reuseParams") public void testSegmentBuilderWithReuse(boolean columnMajorSegmentBuilder, String sortedColumn, boolean reuseMutableIndex, int luceneNRTCachingDirectoryMaxBufferSizeMB) throws Exception { File tmpDir = new File(TMP_DIR, "tmp_" + System.currentTimeMillis()); FieldConfig textIndexFieldConfig = new FieldConfig.Builder(STRING_COLUMN1).withEncodingType(FieldConfig.EncodingType.RAW) .withIndexTypes(Collections.singletonList(FieldConfig.IndexType.TEXT)).build(); List<FieldConfig> fieldConfigList = Collections.singletonList(textIndexFieldConfig); TableConfig tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName("testTable").setTimeColumnName(DATE_TIME_COLUMN) .setInvertedIndexColumns(Lists.newArrayList(STRING_COLUMN1)) .setSortedColumn(sortedColumn).setColumnMajorSegmentBuilderEnabled(columnMajorSegmentBuilder) .setFieldConfigList(fieldConfigList).build(); Schema schema = new Schema.SchemaBuilder().addSingleValueDimension(STRING_COLUMN1, FieldSpec.DataType.STRING) .addDateTime(DATE_TIME_COLUMN, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build(); String tableNameWithType = tableConfig.getTableName(); String segmentName = "testTable__0__0__123456"; IndexingConfig indexingConfig = tableConfig.getIndexingConfig(); TextIndexConfig textIndexConfig = new TextIndexConfigBuilder() .withUseANDForMultiTermQueries(false) .withReuseMutableIndex(reuseMutableIndex) .withLuceneNRTCachingDirectoryMaxBufferSizeMB(luceneNRTCachingDirectoryMaxBufferSizeMB) .build(); RealtimeSegmentConfig.Builder realtimeSegmentConfigBuilder = new RealtimeSegmentConfig.Builder().setTableNameWithType(tableNameWithType).setSegmentName(segmentName) .setStreamName(tableNameWithType).setSchema(schema).setTimeColumnName(DATE_TIME_COLUMN).setCapacity(1000) .setIndex(Sets.newHashSet(STRING_COLUMN1), StandardIndexes.inverted(), IndexConfig.ENABLED) .setIndex(Sets.newHashSet(STRING_COLUMN1), StandardIndexes.text(), textIndexConfig) .setFieldConfigList(fieldConfigList).setSegmentZKMetadata(getSegmentZKMetadata(segmentName)) .setOffHeap(true).setMemoryManager(new DirectMemoryManager(segmentName)) .setStatsHistory(RealtimeSegmentStatsHistory.deserialzeFrom(new File(tmpDir, "stats"))) .setConsumerDir(new File(tmpDir, "consumerDir").getAbsolutePath()); // create mutable segment impl RealtimeLuceneTextIndexSearcherPool.init(1); RealtimeLuceneIndexRefreshManager.init(1, 10); MutableSegmentImpl mutableSegmentImpl = new MutableSegmentImpl(realtimeSegmentConfigBuilder.build(), null); List<GenericRow> rows = generateTestDataForReusePath(); for (GenericRow row : rows) { mutableSegmentImpl.index(row, null); } // build converted segment File outputDir = new File(new File(tmpDir, segmentName), "tmp-" + segmentName + "-" + System.currentTimeMillis()); SegmentZKPropsConfig segmentZKPropsConfig = new SegmentZKPropsConfig(); segmentZKPropsConfig.setStartOffset("1"); segmentZKPropsConfig.setEndOffset("100"); ColumnIndicesForRealtimeTable cdc = new ColumnIndicesForRealtimeTable(sortedColumn, indexingConfig.getInvertedIndexColumns(), Collections.singletonList(STRING_COLUMN1), null, indexingConfig.getNoDictionaryColumns(), indexingConfig.getVarLengthDictionaryColumns()); RealtimeSegmentConverter converter = new RealtimeSegmentConverter(mutableSegmentImpl, segmentZKPropsConfig, outputDir.getAbsolutePath(), schema, tableNameWithType, tableConfig, segmentName, cdc, false); converter.build(SegmentVersion.v3, null); File indexDir = new File(outputDir, segmentName); SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(indexDir); assertEquals(segmentMetadata.getVersion(), SegmentVersion.v3); assertEquals(segmentMetadata.getTotalDocs(), rows.size()); assertEquals(segmentMetadata.getTimeColumn(), DATE_TIME_COLUMN); assertEquals(segmentMetadata.getTimeUnit(), TimeUnit.MILLISECONDS); long expectedStartTime = (long) rows.get(0).getValue(DATE_TIME_COLUMN); assertEquals(segmentMetadata.getStartTime(), expectedStartTime); long expectedEndTime = (long) rows.get(rows.size() - 1).getValue(DATE_TIME_COLUMN); assertEquals(segmentMetadata.getEndTime(), expectedEndTime); assertTrue(segmentMetadata.getAllColumns().containsAll(schema.getColumnNames())); assertEquals(segmentMetadata.getStartOffset(), "1"); assertEquals(segmentMetadata.getEndOffset(), "100"); // read converted segment SegmentLocalFSDirectory segmentDir = new SegmentLocalFSDirectory(indexDir, segmentMetadata, ReadMode.mmap); SegmentDirectory.Reader segmentReader = segmentDir.createReader(); Map<String, ColumnIndexContainer> indexContainerMap = new HashMap<>(); Map<String, ColumnMetadata> columnMetadataMap = segmentMetadata.getColumnMetadataMap(); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, tableConfig); for (Map.Entry<String, ColumnMetadata> entry : columnMetadataMap.entrySet()) { indexContainerMap.put(entry.getKey(), new PhysicalColumnIndexContainer(segmentReader, entry.getValue(), indexLoadingConfig)); } ImmutableSegmentImpl segmentFile = new ImmutableSegmentImpl(segmentDir, segmentMetadata, indexContainerMap, null); // test forward index contents GenericRow readRow = new GenericRow(); int docId = 0; for (int i = 0; i < rows.size(); i++) { GenericRow row; if (sortedColumn == null) { row = rows.get(i); } else { row = rows.get(rows.size() - i - 1); } segmentFile.getRecord(docId, readRow); assertEquals(readRow.getValue(STRING_COLUMN1), row.getValue(STRING_COLUMN1)); assertEquals(readRow.getValue(DATE_TIME_COLUMN), row.getValue(DATE_TIME_COLUMN)); docId += 1; } // test docId conversion TextIndexReader textIndexReader = segmentFile.getIndex(STRING_COLUMN1, StandardIndexes.text()); if (sortedColumn == null) { assertEquals(textIndexReader.getDocIds("str-8"), ImmutableRoaringBitmap.bitmapOf(0)); assertEquals(textIndexReader.getDocIds("str-4"), ImmutableRoaringBitmap.bitmapOf(4)); } else { assertEquals(textIndexReader.getDocIds("str-8"), ImmutableRoaringBitmap.bitmapOf(7)); assertEquals(textIndexReader.getDocIds("str-4"), ImmutableRoaringBitmap.bitmapOf(3)); } }
public static Iterator<Row> computeUpdates( Iterator<Row> rowIterator, StructType rowType, String[] identifierFields) { Iterator<Row> carryoverRemoveIterator = removeCarryovers(rowIterator, rowType); ChangelogIterator changelogIterator = new ComputeUpdateIterator(carryoverRemoveIterator, rowType, identifierFields); return Iterators.filter(changelogIterator, Objects::nonNull); }
@Test public void testRowsWithNullValue() { final List<Row> rowsWithNull = Lists.newArrayList( new GenericRowWithSchema(new Object[] {2, null, null, DELETE, 0, 0}, null), new GenericRowWithSchema(new Object[] {3, null, null, INSERT, 0, 0}, null), new GenericRowWithSchema(new Object[] {4, null, null, DELETE, 0, 0}, null), new GenericRowWithSchema(new Object[] {4, null, null, INSERT, 0, 0}, null), // mixed null and non-null value in non-identifier columns new GenericRowWithSchema(new Object[] {5, null, null, DELETE, 0, 0}, null), new GenericRowWithSchema(new Object[] {5, null, "data", INSERT, 0, 0}, null), // mixed null and non-null value in identifier columns new GenericRowWithSchema(new Object[] {6, null, null, DELETE, 0, 0}, null), new GenericRowWithSchema(new Object[] {6, "name", null, INSERT, 0, 0}, null)); Iterator<Row> iterator = ChangelogIterator.computeUpdates(rowsWithNull.iterator(), SCHEMA, IDENTIFIER_FIELDS); List<Row> result = Lists.newArrayList(iterator); assertEquals( "Rows should match", Lists.newArrayList( new Object[] {2, null, null, DELETE, 0, 0}, new Object[] {3, null, null, INSERT, 0, 0}, new Object[] {5, null, null, UPDATE_BEFORE, 0, 0}, new Object[] {5, null, "data", UPDATE_AFTER, 0, 0}, new Object[] {6, null, null, DELETE, 0, 0}, new Object[] {6, "name", null, INSERT, 0, 0}), rowsToJava(result)); }
@Override public void importData(JsonReader reader) throws IOException { logger.info("Reading configuration for 1.0"); // this *HAS* to start as an object reader.beginObject(); while (reader.hasNext()) { JsonToken tok = reader.peek(); switch (tok) { case NAME: String name = reader.nextName(); // find out which member it is if (name.equals(CLIENTS)) { readClients(reader); } else if (name.equals(GRANTS)) { readGrants(reader); } else if (name.equals(WHITELISTEDSITES)) { readWhitelistedSites(reader); } else if (name.equals(BLACKLISTEDSITES)) { readBlacklistedSites(reader); } else if (name.equals(AUTHENTICATIONHOLDERS)) { readAuthenticationHolders(reader); } else if (name.equals(ACCESSTOKENS)) { readAccessTokens(reader); } else if (name.equals(REFRESHTOKENS)) { readRefreshTokens(reader); } else if (name.equals(SYSTEMSCOPES)) { readSystemScopes(reader); } else { for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { if (extension.supportsVersion(THIS_VERSION)) { extension.importExtensionData(name, reader); break; } } } // unknown token, skip it reader.skipValue(); } break; case END_OBJECT: // the object ended, we're done here reader.endObject(); continue; default: logger.debug("Found unexpected entry"); reader.skipValue(); continue; } } fixObjectReferences(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.fixExtensionObjectReferences(maps); break; } } maps.clearAll(); }
@Test public void testImportAccessTokens() throws IOException, ParseException { Date expirationDate1 = formatter.parse("2014-09-10T22:49:44.090+00:00", Locale.ENGLISH); ClientDetailsEntity mockedClient1 = mock(ClientDetailsEntity.class); when(mockedClient1.getClientId()).thenReturn("mocked_client_1"); AuthenticationHolderEntity mockedAuthHolder1 = mock(AuthenticationHolderEntity.class); when(mockedAuthHolder1.getId()).thenReturn(1L); OAuth2AccessTokenEntity token1 = new OAuth2AccessTokenEntity(); token1.setId(1L); token1.setClient(mockedClient1); token1.setExpiration(expirationDate1); token1.setJwt(JWTParser.parse("eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3ODk5NjgsInN1YiI6IjkwMzQyLkFTREZKV0ZBIiwiYXRfaGFzaCI6InptTmt1QmNRSmNYQktNaVpFODZqY0EiLCJhdWQiOlsiY2xpZW50Il0sImlzcyI6Imh0dHA6XC9cL2xvY2FsaG9zdDo4MDgwXC9vcGVuaWQtY29ubmVjdC1zZXJ2ZXItd2ViYXBwXC8iLCJpYXQiOjE0MTI3ODkzNjh9.xkEJ9IMXpH7qybWXomfq9WOOlpGYnrvGPgey9UQ4GLzbQx7JC0XgJK83PmrmBZosvFPCmota7FzI_BtwoZLgAZfFiH6w3WIlxuogoH-TxmYbxEpTHoTsszZppkq9mNgOlArV4jrR9y3TPo4MovsH71dDhS_ck-CvAlJunHlqhs0")); token1.setAuthenticationHolder(mockedAuthHolder1); token1.setScope(ImmutableSet.of("id-token")); token1.setTokenType("Bearer"); String expiration2 = "2015-01-07T18:31:50.079+00:00"; Date expirationDate2 = formatter.parse(expiration2, Locale.ENGLISH); ClientDetailsEntity mockedClient2 = mock(ClientDetailsEntity.class); when(mockedClient2.getClientId()).thenReturn("mocked_client_2"); AuthenticationHolderEntity mockedAuthHolder2 = mock(AuthenticationHolderEntity.class); when(mockedAuthHolder2.getId()).thenReturn(2L); OAuth2RefreshTokenEntity mockRefreshToken2 = mock(OAuth2RefreshTokenEntity.class); when(mockRefreshToken2.getId()).thenReturn(1L); OAuth2AccessTokenEntity token2 = new OAuth2AccessTokenEntity(); token2.setId(2L); token2.setClient(mockedClient2); token2.setExpiration(expirationDate2); token2.setJwt(JWTParser.parse("eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3OTI5NjgsImF1ZCI6WyJjbGllbnQiXSwiaXNzIjoiaHR0cDpcL1wvbG9jYWxob3N0OjgwODBcL29wZW5pZC1jb25uZWN0LXNlcnZlci13ZWJhcHBcLyIsImp0aSI6IjBmZGE5ZmRiLTYyYzItNGIzZS05OTdiLWU0M2VhMDUwMzNiOSIsImlhdCI6MTQxMjc4OTM2OH0.xgaVpRLYE5MzbgXfE0tZt823tjAm6Oh3_kdR1P2I9jRLR6gnTlBQFlYi3Y_0pWNnZSerbAE8Tn6SJHZ9k-curVG0-ByKichV7CNvgsE5X_2wpEaUzejvKf8eZ-BammRY-ie6yxSkAarcUGMvGGOLbkFcz5CtrBpZhfd75J49BIQ")); token2.setAuthenticationHolder(mockedAuthHolder2); token2.setRefreshToken(mockRefreshToken2); token2.setScope(ImmutableSet.of("openid", "offline_access", "email", "profile")); token2.setTokenType("Bearer"); String configJson = "{" + "\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " + "\"" + MITREidDataService.REFRESHTOKENS + "\": [], " + "\"" + MITREidDataService.CLIENTS + "\": [], " + "\"" + MITREidDataService.GRANTS + "\": [], " + "\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " + "\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " + "\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " + "\"" + MITREidDataService.ACCESSTOKENS + "\": [" + "{\"id\":1,\"clientId\":\"mocked_client_1\",\"expiration\":\"2014-09-10T22:49:44.090+00:00\"," + "\"refreshTokenId\":null,\"idTokenId\":null,\"scope\":[\"id-token\"],\"type\":\"Bearer\"," + "\"authenticationHolderId\":1,\"value\":\"eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3ODk5NjgsInN1YiI6IjkwMzQyLkFTREZKV0ZBIiwiYXRfaGFzaCI6InptTmt1QmNRSmNYQktNaVpFODZqY0EiLCJhdWQiOlsiY2xpZW50Il0sImlzcyI6Imh0dHA6XC9cL2xvY2FsaG9zdDo4MDgwXC9vcGVuaWQtY29ubmVjdC1zZXJ2ZXItd2ViYXBwXC8iLCJpYXQiOjE0MTI3ODkzNjh9.xkEJ9IMXpH7qybWXomfq9WOOlpGYnrvGPgey9UQ4GLzbQx7JC0XgJK83PmrmBZosvFPCmota7FzI_BtwoZLgAZfFiH6w3WIlxuogoH-TxmYbxEpTHoTsszZppkq9mNgOlArV4jrR9y3TPo4MovsH71dDhS_ck-CvAlJunHlqhs0\"}," + "{\"id\":2,\"clientId\":\"mocked_client_2\",\"expiration\":\"2015-01-07T18:31:50.079+00:00\"," + "\"refreshTokenId\":1,\"idTokenId\":1,\"scope\":[\"openid\",\"offline_access\",\"email\",\"profile\"],\"type\":\"Bearer\"," + "\"authenticationHolderId\":2,\"value\":\"eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3OTI5NjgsImF1ZCI6WyJjbGllbnQiXSwiaXNzIjoiaHR0cDpcL1wvbG9jYWxob3N0OjgwODBcL29wZW5pZC1jb25uZWN0LXNlcnZlci13ZWJhcHBcLyIsImp0aSI6IjBmZGE5ZmRiLTYyYzItNGIzZS05OTdiLWU0M2VhMDUwMzNiOSIsImlhdCI6MTQxMjc4OTM2OH0.xgaVpRLYE5MzbgXfE0tZt823tjAm6Oh3_kdR1P2I9jRLR6gnTlBQFlYi3Y_0pWNnZSerbAE8Tn6SJHZ9k-curVG0-ByKichV7CNvgsE5X_2wpEaUzejvKf8eZ-BammRY-ie6yxSkAarcUGMvGGOLbkFcz5CtrBpZhfd75J49BIQ\"}" + " ]" + "}"; System.err.println(configJson); JsonReader reader = new JsonReader(new StringReader(configJson)); final Map<Long, OAuth2AccessTokenEntity> fakeDb = new HashMap<>(); when(tokenRepository.saveAccessToken(isA(OAuth2AccessTokenEntity.class))).thenAnswer(new Answer<OAuth2AccessTokenEntity>() { Long id = 343L; @Override public OAuth2AccessTokenEntity answer(InvocationOnMock invocation) throws Throwable { OAuth2AccessTokenEntity _token = (OAuth2AccessTokenEntity) invocation.getArguments()[0]; if(_token.getId() == null) { _token.setId(id++); } fakeDb.put(_token.getId(), _token); return _token; } }); when(tokenRepository.getAccessTokenById(anyLong())).thenAnswer(new Answer<OAuth2AccessTokenEntity>() { @Override public OAuth2AccessTokenEntity answer(InvocationOnMock invocation) throws Throwable { Long _id = (Long) invocation.getArguments()[0]; return fakeDb.get(_id); } }); when(clientRepository.getClientByClientId(anyString())).thenAnswer(new Answer<ClientDetailsEntity>() { @Override public ClientDetailsEntity answer(InvocationOnMock invocation) throws Throwable { String _clientId = (String) invocation.getArguments()[0]; ClientDetailsEntity _client = mock(ClientDetailsEntity.class); when(_client.getClientId()).thenReturn(_clientId); return _client; } }); when(authHolderRepository.getById(isNull(Long.class))).thenAnswer(new Answer<AuthenticationHolderEntity>() { Long id = 234L; @Override public AuthenticationHolderEntity answer(InvocationOnMock invocation) throws Throwable { AuthenticationHolderEntity _auth = mock(AuthenticationHolderEntity.class); when(_auth.getId()).thenReturn(id); id++; return _auth; } }); dataService.importData(reader); //2 times for token, 2 times to update client, 2 times to update authHolder, 1 times to update refresh token verify(tokenRepository, times(7)).saveAccessToken(capturedAccessTokens.capture()); List<OAuth2AccessTokenEntity> savedAccessTokens = new ArrayList(fakeDb.values()); //capturedAccessTokens.getAllValues(); Collections.sort(savedAccessTokens, new accessTokenIdComparator()); assertThat(savedAccessTokens.size(), is(2)); assertThat(savedAccessTokens.get(0).getClient().getClientId(), equalTo(token1.getClient().getClientId())); assertThat(savedAccessTokens.get(0).getExpiration(), equalTo(token1.getExpiration())); assertThat(savedAccessTokens.get(0).getValue(), equalTo(token1.getValue())); assertThat(savedAccessTokens.get(1).getClient().getClientId(), equalTo(token2.getClient().getClientId())); assertThat(savedAccessTokens.get(1).getExpiration(), equalTo(token2.getExpiration())); assertThat(savedAccessTokens.get(1).getValue(), equalTo(token2.getValue())); }
@Deprecated public static <T> T mapToBeanIgnoreCase(Map<?, ?> map, Class<T> beanClass, boolean isIgnoreError) { return fillBeanWithMapIgnoreCase(map, ReflectUtil.newInstanceIfPossible(beanClass), isIgnoreError); }
@Test public void mapToBeanIgnoreCaseTest() { final HashMap<String, Object> map = MapUtil.newHashMap(); map.put("Name", "Joe"); map.put("aGe", 12); final Person person = BeanUtil.toBeanIgnoreCase(map, Person.class, false); assertEquals("Joe", person.getName()); assertEquals(12, person.getAge()); }
public static Builder reportMaxDepth(Component.Type reportMaxDepth) { return new Builder(reportMaxDepth); }
@Test @UseDataProvider("reportTypes") public void reportMaxDepth_accepts_type_if_report_type(Type reportType) { CrawlerDepthLimit.reportMaxDepth(reportType); }
@Override public boolean processLine(String line) throws IOException { BugPatternInstance pattern = new Gson().fromJson(line, BugPatternInstance.class); pattern.severity = severityRemapper.apply(pattern); result.add(pattern); // replace spaces in filename with underscores Path checkPath = Paths.get(pattern.name.replace(' ', '_') + ".md"); try (Writer writer = Files.newBufferedWriter(outputDir.resolve(checkPath), UTF_8)) { // load side-car explanation file, if it exists Path sidecarExplanation = explanationDir.resolve(checkPath); if (Files.exists(sidecarExplanation)) { if (!pattern.explanation.isEmpty()) { throw new AssertionError( String.format( "%s specifies an explanation via @BugPattern and side-car", pattern.name)); } pattern.explanation = new String(Files.readAllBytes(sidecarExplanation), UTF_8).trim(); } // Construct an appropriate page for this {@code BugPattern}. Include altNames if // there are any, and explain the correct way to suppress. ImmutableMap.Builder<String, Object> templateData = ImmutableMap.<String, Object>builder() .put("tags", Joiner.on(", ").join(pattern.tags)) .put("severity", pattern.severity) .put("name", pattern.name) .put("className", pattern.className) .put("summary", pattern.summary.trim()) .put("altNames", Joiner.on(", ").join(pattern.altNames)) .put("explanation", pattern.explanation.trim()); if (baseUrl != null) { templateData.put("baseUrl", baseUrl); } if (generateFrontMatter) { ImmutableMap<String, String> frontmatterData = ImmutableMap.<String, String>builder() .put("title", pattern.name) .put("summary", pattern.summary) .put("layout", "bugpattern") .put("tags", Joiner.on(", ").join(pattern.tags)) .put("severity", pattern.severity.toString()) .buildOrThrow(); DumperOptions options = new DumperOptions(); options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); Yaml yaml = new Yaml( new SafeConstructor(new LoaderOptions()), new Representer(new DumperOptions()), options); Writer yamlWriter = new StringWriter(); yamlWriter.write("---\n"); yaml.dump(frontmatterData, yamlWriter); yamlWriter.write("---\n"); templateData.put("frontmatter", yamlWriter.toString()); } if (pattern.documentSuppression) { String suppressionString; if (pattern.suppressionAnnotations.length == 0) { suppressionString = "This check may not be suppressed."; } else { suppressionString = pattern.suppressionAnnotations.length == 1 ? "Suppress false positives by adding the suppression annotation %s to the " + "enclosing element." : "Suppress false positives by adding one of these suppression annotations to " + "the enclosing element: %s"; suppressionString = String.format( suppressionString, Arrays.stream(pattern.suppressionAnnotations) .map((String anno) -> standardizeAnnotation(anno, pattern.name)) .collect(Collectors.joining(", "))); } templateData.put("suppression", suppressionString); } MustacheFactory mf = new DefaultMustacheFactory(); Mustache mustache = mf.compile("com/google/errorprone/resources/bugpattern.mustache"); mustache.execute(writer, templateData.buildOrThrow()); } return true; }
@Test public void regressionTest_sidecar() throws Exception { BugPatternFileGenerator generator = new BugPatternFileGenerator( wikiDir, explanationDirBase, false, null, input -> input.severity); Files.write( explanationDirBase.resolve("DeadException.md"), Arrays.asList( "The exception is created with new, but is not thrown, and the reference is lost."), UTF_8); generator.processLine(BUGPATTERN_LINE_SIDECAR); String expected = CharStreams.toString( new InputStreamReader( getClass().getResourceAsStream("testdata/DeadException_nofrontmatter_gfm.md"), UTF_8)); String actual = new String(Files.readAllBytes(wikiDir.resolve("DeadException.md")), UTF_8); assertThat(actual.trim()).isEqualTo(expected.trim()); }
public abstract void filter(Metadata metadata) throws TikaException;
@Test public void testAttachmentTypeMetadataFilter() throws Exception { TikaConfig config = getConfig("TIKA-4261-clear-by-embedded-type.xml"); Metadata metadata = new Metadata(); metadata.set(TikaCoreProperties.EMBEDDED_RESOURCE_TYPE, TikaCoreProperties.EmbeddedResourceType.INLINE.name()); metadata.set(Metadata.CONTENT_TYPE, "text/html; charset=UTF-8"); MetadataFilter filter = config.getMetadataFilter(); filter.filter(metadata); assertEquals(0, metadata.names().length); metadata = new Metadata(); metadata.set(TikaCoreProperties.EMBEDDED_RESOURCE_TYPE, TikaCoreProperties.EmbeddedResourceType.ALTERNATE_FORMAT_CHUNK .name()); metadata.set(Metadata.CONTENT_TYPE, "text/html; charset=UTF-8"); filter.filter(metadata); assertEquals(2, metadata.names().length); }
@Override public int hashCode() { long xor = mostSignificantBits ^ leastSignificantBits; return (int) (xor >> 32) ^ (int) xor; }
@Test public void testUuidEquality() { Uuid id1 = new Uuid(12L, 13L); Uuid id2 = new Uuid(12L, 13L); Uuid id3 = new Uuid(24L, 38L); assertEquals(Uuid.ZERO_UUID, Uuid.ZERO_UUID); assertEquals(id1, id2); assertNotEquals(id1, id3); assertEquals(Uuid.ZERO_UUID.hashCode(), Uuid.ZERO_UUID.hashCode()); assertEquals(id1.hashCode(), id2.hashCode()); assertNotEquals(id1.hashCode(), id3.hashCode()); }
@Override public KsMaterializedQueryResult<Row> get( final GenericKey key, final int partition, final Optional<Position> position ) { try { final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key); StateQueryRequest<ValueAndTimestamp<GenericRow>> request = inStore(stateStore.getStateStoreName()) .withQuery(query) .withPartitions(ImmutableSet.of(partition)); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<ValueAndTimestamp<GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<ValueAndTimestamp<GenericRow>> queryResult = result.getPartitionResults().get(partition); // Some of these failures are retriable, and in the future, we may want to retry // locally before throwing. if (queryResult.isFailure()) { throw failedQueryException(queryResult); } else if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } else { final ValueAndTimestamp<GenericRow> row = queryResult.getResult(); return KsMaterializedQueryResult.rowIteratorWithPosition( ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp())) .iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldRangeQueryWithCorrectParams_bothBounds() { // Given: when(kafkaStreams.query(any())).thenReturn(getIteratorResult()); // When: table.get(PARTITION, A_KEY, A_KEY2); // Then: verify(kafkaStreams).query(queryTypeCaptor.capture()); StateQueryRequest request = queryTypeCaptor.getValue(); assertThat(request.getQuery(), instanceOf(RangeQuery.class)); RangeQuery rangeQuery = (RangeQuery)request.getQuery(); assertThat(rangeQuery.getLowerBound(), is(Optional.of(A_KEY))); assertThat(rangeQuery.getUpperBound(), is(Optional.of(A_KEY2))); }
@Override public Collection<String> getSystemSchemas() { return SYSTEM_DATABASE_SCHEMA_MAP.keySet(); }
@Test void assertGetSystemSchemas() { assertThat(systemDatabase.getSystemSchemas(), is(new HashSet<>(Arrays.asList("information_schema", "performance_schema", "mysql", "sys", "shardingsphere")))); }
public static Expression fromJson(String json) { return fromJson(json, null); }
@Test public void invalidOperationType() { assertThatThrownBy( () -> ExpressionParser.fromJson( "{\n" + " \"type\" : \"not\",\n" + " \"child\" : {\n" + " \"type\" : \"illegal\",\n" + " \"term\" : \"column-name\",\n" + " \"values\" : [ \"a\" ]\n" + " }\n" + "}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid operation type: illegal"); assertThatThrownBy( () -> ExpressionParser.fromJson( "{\n" + " \"type\" : \"ILLEGAL\",\n" + " \"child\" : {\n" + " \"type\" : \"lt\",\n" + " \"term\" : \"column-name\",\n" + " \"values\" : [ \"a\" ]\n" + " }\n" + "}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid operation type: ILLEGAL"); }
@InvokeOnHeader(Web3jConstants.WEB3_CLIENT_VERSION) void web3ClientVersion(Message message) throws IOException { Request<?, Web3ClientVersion> web3ClientVersionRequest = web3j.web3ClientVersion(); setRequestId(message, web3ClientVersionRequest); Web3ClientVersion response = web3ClientVersionRequest.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getWeb3ClientVersion()); } }
@Test public void clientVersionTest() throws Exception { Web3ClientVersion response = Mockito.mock(Web3ClientVersion.class); Mockito.when(mockWeb3j.web3ClientVersion()).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getWeb3ClientVersion()).thenReturn("Geth-123"); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.WEB3_CLIENT_VERSION); template.send(exchange); String body = exchange.getIn().getBody(String.class); assertTrue(body.startsWith("Geth")); }
@Udf(schema = "ARRAY<STRUCT<K STRING, V DOUBLE>>") public List<Struct> entriesDouble( @UdfParameter(description = "The map to create entries from") final Map<String, Double> map, @UdfParameter(description = "If true then the resulting entries are sorted by key") final boolean sorted ) { return entries(map, DOUBLE_STRUCT_SCHEMA, sorted); }
@Test public void shouldReturnNullListForNullMapDouble() { assertNull(entriesUdf.entriesDouble(null, false)); }
public void checkExecutePrerequisites(final ExecutionContext executionContext) { ShardingSpherePreconditions.checkState(isValidExecutePrerequisites(executionContext), () -> new TableModifyInTransactionException(getTableName(executionContext))); }
@Test void assertCheckExecutePrerequisitesWhenExecuteDMLInPostgreSQLTransaction() { when(transactionRule.getDefaultType()).thenReturn(TransactionType.LOCAL); ExecutionContext executionContext = new ExecutionContext( new QueryContext(createPostgreSQLInsertStatementContext(), "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)), Collections.emptyList(), mock(RouteContext.class)); new ProxySQLExecutor(JDBCDriverType.STATEMENT, databaseConnectionManager, mock(DatabaseConnector.class), mockQueryContext()).checkExecutePrerequisites(executionContext); }
public Set<String> upstreamClusters() throws InterruptedException { return listTopics().stream() .filter(this::isHeartbeatTopic) .flatMap(x -> allSources(x).stream()) .collect(Collectors.toSet()); }
@Test public void upstreamClustersTest() throws InterruptedException { MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "heartbeats", "source1.heartbeats", "source1.source2.heartbeats", "source3.source4.source5.heartbeats")); Set<String> sources = client.upstreamClusters(); assertTrue(sources.contains("source1")); assertTrue(sources.contains("source2")); assertTrue(sources.contains("source3")); assertTrue(sources.contains("source4")); assertTrue(sources.contains("source5")); assertFalse(sources.contains("sourceX")); assertFalse(sources.contains("")); assertFalse(sources.contains(null)); }
public static <K> ShardedKey<K> of(K key, byte[] shardId) { checkArgument(key != null, "Key should not be null!"); checkArgument(shardId != null, "Shard id should not be null!"); return new ShardedKey<K>(key, shardId); }
@Test public void testEquality() { assertEquals(ShardedKey.of("key", new byte[0]), ShardedKey.of("key", new byte[0])); assertEquals( ShardedKey.of("key", "shard_id".getBytes(UTF_8)), ShardedKey.of("key", "shard_id".getBytes(UTF_8))); }
@Override public void onChannelIdle(String remoteAddr, Channel channel) { this.namesrvController.getRouteInfoManager().onChannelDestroy(channel); }
@Test public void testOnChannelIdle() { brokerHousekeepingService.onChannelException("127.0.0.1:9876", null); }
public InternalComponentPropertyDto setValue(@Nullable String value) { if (value != null) { checkArgument(value.length() <= MAX_VALUE_LENGTH, "value length (%s) is longer than the maximum authorized (%s). '%s' was provided", value.length(), MAX_VALUE_LENGTH, value); } this.value = value; return this; }
@Test void setValue_throws_IAE_if_value_is_too_long() { String veryLongValue = StringUtils.repeat("a", 4001); assertThatThrownBy(() -> new InternalComponentPropertyDto().setValue(veryLongValue)) .isInstanceOf(IllegalArgumentException.class) .hasMessage(String.format("value length (4001) is longer than the maximum authorized (4000). '%s' was provided", veryLongValue)); }