focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public T build(ConfigurationSourceProvider provider, String path) throws IOException, ConfigurationException { try (InputStream input = provider.open(requireNonNull(path))) { final JsonNode node = mapper.readTree(createParser(input)); if (node == null) { throw ConfigurationParsingException .builder("Configuration at " + path + " must not be empty") .build(path); } return build(node, path); } catch (JsonParseException e) { throw ConfigurationParsingException .builder("Malformed " + formatName) .setCause(e) .setLocation(e.getLocation()) .setDetail(e.getMessage()) .build(path); } }
@Test void handlesArrayOverrideEscaped() throws Exception { System.setProperty("dw.type", "coder,wizard,overr\\,idden"); final Example example = factory.build(configurationSourceProvider, validFile); assertThat(example.getType()) .hasSize(3) .element(2) .isEqualTo("overr,idden"); }
public static Autoscaling empty() { return empty(""); }
@Test public void test_autoscaling_in_dev_with_required_unspecified_resources_preprovisioned() { var requiredCapacity = Capacity.from(new ClusterResources(1, 1, NodeResources.unspecified()), new ClusterResources(3, 1, NodeResources.unspecified()), IntRange.empty(), true, true, Optional.empty(), ClusterInfo.empty()); var fixture = DynamicProvisioningTester.fixture() .hostCount(5) .capacity(requiredCapacity) .zone(new Zone(Environment.dev, RegionName.from("us-east"))) .build(); fixture.tester().clock().advance(Duration.ofDays(2)); fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0, 0, 0), 200); fixture.tester().assertResources("We scale even in dev because resources are required", 3, 1, 1.5, 8, 50, fixture.autoscale()); }
@Override public void loginFailure(HttpRequest request, AuthenticationException e) { checkRequest(request); requireNonNull(e, "AuthenticationException can't be null"); if (!LOGGER.isDebugEnabled()) { return; } Source source = e.getSource(); LOGGER.debug("login failure [cause|{}][method|{}][provider|{}|{}][IP|{}|{}][login|{}]", emptyIfNull(e.getMessage()), source.getMethod(), source.getProvider(), source.getProviderName(), request.getRemoteAddr(), getAllIps(request), preventLogFlood(emptyIfNull(e.getLogin()))); }
@Test public void login_failure_creates_DEBUG_log_with_empty_cause_if_AuthenticationException_has_no_message() { AuthenticationException exception = newBuilder().setSource(Source.sso()).setLogin("FoO").build(); underTest.loginFailure(mockRequest(), exception); verifyLog("login failure [cause|][method|SSO][provider|SSO|sso][IP||][login|FoO]", Set.of("logout", "login success")); }
abstract HttpTracing httpTracing(ApplicationContext ctx);
@Test void WebMvc31_httpTracing_byType() { ApplicationContext context = mock(ApplicationContext.class); new WebMvc31().httpTracing(context); verify(context).getBean(HttpTracing.class); verifyNoMoreInteractions(context); }
@Deprecated public static String getJwt(JwtClaims claims) throws JoseException { String jwt; RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey( jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName()); // A JWT is a JWS and/or a JWE with JSON claims as the payload. // In this example it is a JWS nested inside a JWE // So we first create a JsonWebSignature object. JsonWebSignature jws = new JsonWebSignature(); // The payload of the JWS is JSON content of the JWT Claims jws.setPayload(claims.toJson()); // The JWT is signed using the sender's private key jws.setKey(privateKey); // Get provider from security config file, it should be two digit // And the provider id will set as prefix for keyid in the token header, for example: 05100 // if there is no provider id, we use "00" for the default value String provider_id = ""; if (jwtConfig.getProviderId() != null) { provider_id = jwtConfig.getProviderId(); if (provider_id.length() == 1) { provider_id = "0" + provider_id; } else if (provider_id.length() > 2) { logger.error("provider_id defined in the security.yml file is invalid; the length should be 2"); provider_id = provider_id.substring(0, 2); } } jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid()); // Set the signature algorithm on the JWT/JWS that will integrity protect the claims jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); // Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS // representation, which is a string consisting of three dot ('.') separated // base64url-encoded parts in the form Header.Payload.Signature jwt = jws.getCompactSerialization(); return jwt; }
@Test public void GroupToRoleAccessControlWrong() throws Exception { JwtClaims claims = ClaimsUtil.getTestClaimsGroup("stevehu", "EMPLOYEE", "f7d42348-c647-4efb-a52d-4c5787421e72", Arrays.asList("portal.r", "portal.w"), "User_API_Wrong"); claims.setExpirationTimeMinutesInTheFuture(5256000); String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA)); System.out.println("***Long lived token Authorization code customer with a wrong controller groups that cannot be converted to roles ***: " + jwt); }
@Override public ExecuteContext before(ExecuteContext context) throws Exception { final FlowControlConfig pluginConfig = PluginConfigManager.getPluginConfig(FlowControlConfig.class); FlowControlServiceMeta.getInstance().setDubboService(true); if (!pluginConfig.isUseCseRule() || !pluginConfig.isBaseSdk()) { return context; } FlowControlServiceMeta.getInstance().setVersion(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_VERSION, CseConstants.DEFAULT_DUBBO_VERSION)); FlowControlServiceMeta.getInstance().setProject(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_KIE_PROJECT, CseConstants.DEFAULT_PROJECT)); FlowControlServiceMeta.getInstance().setServiceName(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_SERVICE_NAME, CseConstants.DEFAULT_DUBBO_SERVICE_NAME)); FlowControlServiceMeta.getInstance().setEnvironment(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_ENVIRONMENT, CseConstants.DEFAULT_DUBBO_ENVIRONMENT)); FlowControlServiceMeta.getInstance().setApp(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_APP_NAME, CseConstants.DEFAULT_DUBBO_APP_NAME)); FlowControlServiceMeta.getInstance().setCustomLabel(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_CUSTOM_LABEL, CseConstants.DEFAULT_CUSTOM_LABEL)); FlowControlServiceMeta.getInstance().setCustomLabelValue(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_CUSTOM_LABEL_VALUE, CseConstants.DEFAULT_CUSTOM_LABEL_VALUE)); return context; }
@Test public void testClose() throws Exception { final AbstractInterceptor interceptor = getInterceptor(); interceptor.before(buildContext()); assertNull(FlowControlServiceMeta.getInstance().getVersion()); }
static SortKey[] rangeBounds( int numPartitions, Comparator<StructLike> comparator, SortKey[] samples) { // sort the keys first Arrays.sort(samples, comparator); int numCandidates = numPartitions - 1; SortKey[] candidates = new SortKey[numCandidates]; int step = (int) Math.ceil((double) samples.length / numPartitions); int position = step - 1; int numChosen = 0; while (position < samples.length && numChosen < numCandidates) { SortKey candidate = samples[position]; // skip duplicate values if (numChosen > 0 && candidate.equals(candidates[numChosen - 1])) { // linear probe for the next distinct value position += 1; } else { candidates[numChosen] = candidate; position += step; numChosen += 1; } } return candidates; }
@Test public void testRangeBoundsDivisible() { assertThat( SketchUtil.rangeBounds( 3, SORT_ORDER_COMPARTOR, new SortKey[] { CHAR_KEYS.get("a"), CHAR_KEYS.get("b"), CHAR_KEYS.get("c"), CHAR_KEYS.get("d"), CHAR_KEYS.get("e"), CHAR_KEYS.get("f") })) .containsExactly(CHAR_KEYS.get("b"), CHAR_KEYS.get("d")); }
@Override public ResultSet executeQuery(String sql) throws SQLException { validateState(); try { if (!DriverUtils.queryContainsLimitStatement(sql)) { sql += " " + LIMIT_STATEMENT + " " + _maxRows; } String enabledSql = DriverUtils.enableQueryOptions(sql, _connection.getQueryOptions()); ResultSetGroup resultSetGroup = _session.execute(enabledSql); if (resultSetGroup.getResultSetCount() == 0) { _resultSet = PinotResultSet.empty(); return _resultSet; } _resultSet = new PinotResultSet(resultSetGroup.getResultSet(0)); return _resultSet; } catch (PinotClientException e) { throw new SQLException(String.format("Failed to execute query : %s", sql), e); } }
@Test public void testSetUseMultistageEngine() throws Exception { Properties props = new Properties(); props.put(QueryOptionKey.USE_MULTISTAGE_ENGINE, "true"); PinotConnection pinotConnection = new PinotConnection(props, "dummy", _dummyPinotClientTransport, "dummy", _dummyPinotControllerTransport); Statement statement = pinotConnection.createStatement(); Assert.assertNotNull(statement); statement.executeQuery(BASIC_TEST_QUERY); String expectedSql = DriverUtils.createSetQueryOptionString(QueryOptionKey.USE_MULTISTAGE_ENGINE, true) + BASIC_TEST_QUERY; Assert.assertEquals(_dummyPinotClientTransport.getLastQuery().substring(0, expectedSql.length()), expectedSql); }
@Subscribe public void onMenuOptionClicked(MenuOptionClicked e) { if (!isCompostAction(e)) { return; } ObjectComposition patchDef = client.getObjectDefinition(e.getId()); WorldPoint actionLocation = WorldPoint.fromScene(client, e.getParam0(), e.getParam1(), client.getPlane()); FarmingPatch targetPatch = farmingWorld.getRegionsForLocation(actionLocation) .stream() .flatMap(fr -> Arrays.stream(fr.getPatches())) .filter(fp -> fp.getVarbit() == patchDef.getVarbitId()) .filter(fp -> fp.getImplementation() != PatchImplementation.COMPOST && fp.getImplementation() != PatchImplementation.BIG_COMPOST) .findFirst() .orElse(null); if (targetPatch == null) { return; } log.debug("Storing pending compost action for patch [{}]", targetPatch); PendingCompost pc = new PendingCompost( Instant.now().plus(COMPOST_ACTION_TIMEOUT), actionLocation, targetPatch ); pendingCompostActions.put(targetPatch, pc); }
@Test public void onMenuOptionClicked_queuesPendingCompostForInspectActions() { MenuOptionClicked inspectPatchAction = mock(MenuOptionClicked.class); when(inspectPatchAction.getMenuAction()).thenReturn(MenuAction.GAME_OBJECT_SECOND_OPTION); when(inspectPatchAction.getMenuOption()).thenReturn("Inspect"); when(inspectPatchAction.getId()).thenReturn(PATCH_ID); when(inspectPatchAction.getParam0()).thenReturn(1); when(inspectPatchAction.getParam1()).thenReturn(2); compostTracker.onMenuOptionClicked(inspectPatchAction); CompostTracker.PendingCompost actual = compostTracker.pendingCompostActions.get(farmingPatch); assertThat(actual.getFarmingPatch(), is(farmingPatch)); assertThat(actual.getPatchLocation(), is(new WorldPoint(1, 2, 0))); }
@Override public LookupResult lookupResource(Method method, String path, String action) { List<PathItem> pathItems = new ArrayList<>(); final int l = path.length(); int s = 0, pos = 0; while (pos < l) { char ch = path.charAt(pos); if (ch == '/') { if (pos == 0) { pathItems.add(ROOT); } else if (pos > s) { pathItems.add(new StringPathItem(QueryStringDecoder.decodeComponent(path.substring(s, pos)))); } s = ++pos; } else { ++pos; } } if (s < pos) { pathItems.add(new StringPathItem(QueryStringDecoder.decodeComponent(path.substring(s)))); } return resourceTree.find(method, pathItems, action); }
@Test public void testLookupHandler() { registerHandler("root", "CompaniesHandler", "/companies", "/companies/{company}", "/companies/{company}/{id}"); registerHandler("root", "StocksHandler", "/stocks/{stock}", "/stocks/{stock}/{currency}"); registerHandler("root", "DirectorsHandler", "/directors", "/directors/director", "/directors/director/{personId}"); registerHandler("root", "InfoHandler", "/info", "/info/jvm", "/info/jvm/{format}", "/info/{format}/{encoding}"); assertEquals(NOT_FOUND, resourceManager.lookupResource(GET, "/root/dummy").getStatus()); assertEquals(NOT_FOUND, resourceManager.lookupResource(GET, "/fake/").getStatus()); assertEquals(NOT_FOUND, resourceManager.lookupResource(GET, "/").getStatus()); assertEquals(NOT_FOUND, resourceManager.lookupResource(GET, "/root/stocks").getStatus()); assertEquals(NOT_FOUND, resourceManager.lookupResource(GET, "/root/stocks/2/USD/1").getStatus()); assertInvocation(resourceManager.lookupResource(GET, "/root/stocks/2"), "StocksHandler"); assertInvocation(resourceManager.lookupResource(GET, "/root/stocks/2/USD"), "StocksHandler"); assertInvocation(resourceManager.lookupResource(GET, "/root/directors"), "DirectorsHandler"); assertInvocation(resourceManager.lookupResource(GET, "/root/directors/director"), "DirectorsHandler"); assertInvocation(resourceManager.lookupResource(GET, "/root/directors/director/John"), "DirectorsHandler"); assertEquals(NOT_FOUND, resourceManager.lookupResource(GET, "/root/directors/1345").getStatus()); assertEquals(NOT_FOUND, resourceManager.lookupResource(GET, "/root/directors/director/Tim/123").getStatus()); assertInvocation(resourceManager.lookupResource(GET, "/root/companies"), "CompaniesHandler"); assertInvocation(resourceManager.lookupResource(GET, "/root/info"), "InfoHandler"); assertInvocation(resourceManager.lookupResource(GET, "/root/info/jvm"), "InfoHandler"); assertInvocation(resourceManager.lookupResource(GET, "/root/info/jvm/json"), "InfoHandler"); assertInvocation(resourceManager.lookupResource(GET, "/root/info/json/zip"), "InfoHandler"); }
@Override public int rpcPortOffset() { return Integer.parseInt(System.getProperty(GrpcConstants.NACOS_SERVER_GRPC_PORT_OFFSET_KEY, String.valueOf(Constants.SDK_GRPC_PORT_DEFAULT_OFFSET))); }
@Test void testRpcPortOffsetFromSystemProperty() { System.setProperty(GrpcConstants.NACOS_SERVER_GRPC_PORT_OFFSET_KEY, "10000"); grpcSdkClient = new GrpcSdkClient("test", 8, 8, Collections.emptyMap()); assertEquals(10000, grpcSdkClient.rpcPortOffset()); }
@Override public Stream<HoodieInstant> getCandidateInstants(HoodieTableMetaClient metaClient, HoodieInstant currentInstant, Option<HoodieInstant> lastSuccessfulInstant) { HoodieActiveTimeline activeTimeline = metaClient.reloadActiveTimeline(); if (ClusteringUtils.isClusteringInstant(activeTimeline, currentInstant) || COMPACTION_ACTION.equals(currentInstant.getAction())) { return getCandidateInstantsForTableServicesCommits(activeTimeline, currentInstant); } else { return getCandidateInstantsForNonTableServicesCommits(activeTimeline, currentInstant); } }
@Test public void testConcurrentWritesWithInterleavingScheduledCluster() throws Exception { createCommit(metaClient.createNewInstantTime(), metaClient); HoodieActiveTimeline timeline = metaClient.getActiveTimeline(); // consider commits before this are all successful Option<HoodieInstant> lastSuccessfulInstant = timeline.getCommitsTimeline().filterCompletedInstants().lastInstant(); // writer 1 starts String currentWriterInstant = metaClient.createNewInstantTime(); createInflightCommit(currentWriterInstant, metaClient); // clustering 1 gets scheduled String newInstantTime = metaClient.createNewInstantTime(); createClusterRequested(newInstantTime, metaClient); createClusterInflight(newInstantTime, WriteOperationType.CLUSTER, metaClient); Option<HoodieInstant> currentInstant = Option.of(new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, currentWriterInstant)); PreferWriterConflictResolutionStrategy strategy = new PreferWriterConflictResolutionStrategy(); List<HoodieInstant> candidateInstants = strategy.getCandidateInstants(metaClient, currentInstant.get(), lastSuccessfulInstant).collect( Collectors.toList()); // Since we give preference to ingestion over clustering, there won't be a conflict with replacecommit. Assertions.assertEquals(0, candidateInstants.size()); }
public static ImmutableList<HttpRequest> fuzzGetParametersWithDefaultParameter( HttpRequest request, String payload, String defaultParameter) { return fuzzGetParameters(request, payload, Optional.of(defaultParameter), ImmutableSet.of()); }
@Test public void fuzzGetParametersWithDefaultParameter_whenGetParameters_doesNotAddDefaultParameter() { HttpRequest requestWithDefaultParameter = HttpRequest.get("https://google.com?default=<payload>").withEmptyHeaders().build(); assertThat( FuzzingUtils.fuzzGetParametersWithDefaultParameter( REQUEST_WITH_GET_PARAMETERS, "<payload>", "default")) .doesNotContain(requestWithDefaultParameter); }
@Override public void run() { // top-level command, do nothing }
@Test public void test_listJobs_dirtyName() { // Given String jobName = "job\n\tname\u0000"; Job job = newJob(jobName); // When run("list-jobs"); // Then String actual = captureOut(); assertContains(actual, jobName); assertContains(actual, job.getIdString()); assertContains(actual, job.getStatus().toString()); }
@Override public boolean add(String str) { boolean flag = false; for (BloomFilter filter : filters) { flag |= filter.add(str); } return flag; }
@Test @Disabled public void testLongMap(){ LongMap longMap = new LongMap(); for (int i = 0 ; i < 64; i++) { longMap.add(i); } longMap.remove(30); for (int i = 0; i < 64; i++) { System.out.println(i + "是否存在-->" + longMap.contains(i)); } }
CacheConfig<K, V> asCacheConfig() { return this.copy(new CacheConfig<>(), false); }
@Test public void serializationSucceeds_whenValueTypeNotResolvable() { PreJoinCacheConfig preJoinCacheConfig = new PreJoinCacheConfig(newDefaultCacheConfig("test")); preJoinCacheConfig.setKeyClassName("java.lang.String"); preJoinCacheConfig.setValueClassName("some.inexistent.Class"); Data data = serializationService.toData(preJoinCacheConfig); PreJoinCacheConfig deserialized = serializationService.toObject(data); assertEquals(deserialized, preJoinCacheConfig); try { Class klass = deserialized.asCacheConfig().getValueType(); fail("Getting the value type on deserialized CacheConfig should fail because the value type cannot be resolved"); } catch (HazelcastException e) { if (!(e.getCause() instanceof ClassNotFoundException)) { fail("Unexpected exception: " + e.getCause()); } } }
public boolean send(TransferableBlock block) throws Exception { if (block.isErrorBlock()) { // Send error block to all mailboxes to propagate the error for (SendingMailbox sendingMailbox : _sendingMailboxes) { sendBlock(sendingMailbox, block); } return false; } if (block.isSuccessfulEndOfStreamBlock()) { // Send metadata to only one randomly picked mailbox, and empty EOS block to other mailboxes int numMailboxes = _sendingMailboxes.size(); int mailboxIdToSendMetadata = ThreadLocalRandom.current().nextInt(numMailboxes); assert block.getQueryStats() != null; for (int i = 0; i < numMailboxes; i++) { SendingMailbox sendingMailbox = _sendingMailboxes.get(i); TransferableBlock blockToSend = i == mailboxIdToSendMetadata ? block : TransferableBlockUtils.getEndOfStreamTransferableBlock(); sendBlock(sendingMailbox, blockToSend); } return false; } assert block.isDataBlock(); boolean isEarlyTerminated = true; for (SendingMailbox sendingMailbox : _sendingMailboxes) { if (!sendingMailbox.isEarlyTerminated()) { isEarlyTerminated = false; break; } } if (!isEarlyTerminated) { route(_sendingMailboxes, block); } return isEarlyTerminated; }
@Test public void shouldSignalEarlyTerminationProperly() throws Exception { // Given: List<SendingMailbox> destinations = ImmutableList.of(_mailbox1, _mailbox2); BlockExchange exchange = new TestBlockExchange(destinations); TransferableBlock block = new TransferableBlock(ImmutableList.of(new Object[]{"val"}), new DataSchema(new String[]{"foo"}, new ColumnDataType[]{ColumnDataType.STRING}), DataBlock.Type.ROW); // When send normal block and some mailbox has terminated when(_mailbox1.isEarlyTerminated()).thenReturn(true); boolean isEarlyTerminated = exchange.send(block); // Then: Assert.assertFalse(isEarlyTerminated); // When send normal block and both terminated when(_mailbox2.isTerminated()).thenReturn(true); isEarlyTerminated = exchange.send(block); // Then: Assert.assertFalse(isEarlyTerminated); // When send metadata block when(_mailbox2.isEarlyTerminated()).thenReturn(true); isEarlyTerminated = exchange.send(block); // Then: Assert.assertTrue(isEarlyTerminated); }
public static String printUnitFromBytesDot(long bytes) { return printUnitFromBytes(bytes, '.'); }
@Test public void testPrintUnitFromBytesDot() { char decimalSeparator = '.'; assertEquals("999 B", printUnitFromBytes(999)); assertEquals("1" + decimalSeparator + "0 kB", printUnitFromBytesDot(1000)); assertEquals("1" + decimalSeparator + "0 kB", printUnitFromBytesDot(1001)); assertEquals("1" + decimalSeparator + "2 kB", printUnitFromBytesDot(1201)); assertEquals("1000" + decimalSeparator + "0 kB", printUnitFromBytesDot(999999)); assertEquals("1" + decimalSeparator + "0 MB", printUnitFromBytesDot(1000000)); assertEquals("1" + decimalSeparator + "0 MB", printUnitFromBytesDot(1000001)); assertEquals("1" + decimalSeparator + "5 MB", printUnitFromBytesDot(1500001)); }
public static void checkForInstantiation(Class<?> clazz) { final String errorMessage = checkForInstantiationError(clazz); if (errorMessage != null) { throw new RuntimeException( "The class '" + clazz.getName() + "' is not instantiable: " + errorMessage); } }
@Test void testCheckForInstantiationOfPrivateClass() { assertThatThrownBy(() -> InstantiationUtil.checkForInstantiation(TestClass.class)) .isInstanceOf(RuntimeException.class); }
@SuppressWarnings("unchecked") public QueryMetadataHolder handleStatement( final ServiceContext serviceContext, final Map<String, Object> configOverrides, final Map<String, Object> requestProperties, final PreparedStatement<?> statement, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Context context, final boolean excludeTombstones ) { if (statement.getStatement() instanceof Query) { return handleQuery( serviceContext, (PreparedStatement<Query>) statement, isInternalRequest, metricsCallbackHolder, configOverrides, requestProperties, context, excludeTombstones ); } else { return QueryMetadataHolder.unhandled(); } }
@Test public void shouldRateLimit() { when(ksqlEngine.executeTablePullQuery(any(), any(), any(), any(), any(), any(), any(), anyBoolean(), any())) .thenReturn(pullQueryResult); when(mockDataSource.getDataSourceType()).thenReturn(DataSourceType.KTABLE); // When: queryExecutor.handleStatement(serviceContext, ImmutableMap.of(), ImmutableMap.of(), pullQuery, Optional.empty(), metricsCallbackHolder, context, false); Exception e = assertThrows(KsqlException.class, () -> queryExecutor.handleStatement(serviceContext, ImmutableMap.of(), ImmutableMap.of(), pullQuery, Optional.empty(), metricsCallbackHolder, context, false)); // Then: assertThat(e.getMessage(), is("Host is at rate limit for pull queries. Currently set to 1.0 qps.")); }
public List<Path> list(final Path file) throws BackgroundException { try { final Path container = containerService.getContainer(file); final Map<String, List<StorageObject>> segments = session.getClient().listObjectSegments(regionService.lookup(container), container.getName(), containerService.getKey(file)); if(null == segments) { // Not a large object return Collections.emptyList(); } final List<Path> objects = new ArrayList<>(); for(final String containerName : segments.keySet()) { final Path containerPath = new Path(containerName, container.getType(), container.attributes()); for(StorageObject s : segments.get(containerName)) { final Path segment = new Path(containerPath, s.getName(), EnumSet.of(Path.Type.file)); segment.attributes().setSize(s.getSize()); try { segment.attributes().setModificationDate(dateParser.parse(s.getLastModified()).getTime()); } catch(InvalidDateException e) { log.warn(String.format("%s is not ISO 8601 format %s", s.getLastModified(), e.getMessage())); } if(StringUtils.isNotBlank(s.getMd5sum())) { segment.attributes().setChecksum(Checksum.parse(s.getMd5sum())); } objects.add(segment); } } return objects; } catch(GenericException e) { throw new SwiftExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Failure to read attributes of {0}", e, file); } }
@Test public void testList() throws Exception { final Path container = new Path("/test.cyberduck.ch", EnumSet.of(Path.Type.volume, Path.Type.directory)); container.attributes().setRegion("IAD"); assertTrue(new SwiftSegmentService(session).list(new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file))).isEmpty()); }
public List<ChangeStreamRecord> toChangeStreamRecords( PartitionMetadata partition, ChangeStreamResultSet resultSet, ChangeStreamResultSetMetadata resultSetMetadata) { if (this.isPostgres()) { // In PostgresQL, change stream records are returned as JsonB. return Collections.singletonList( toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata)); } // In GoogleSQL, change stream records are returned as an array of structs. return resultSet.getCurrentRowAsStruct().getStructList(0).stream() .flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata)) .collect(Collectors.toList()); }
@Test public void testMappingInsertStructRowToDataChangeRecord() { final DataChangeRecord dataChangeRecord = new DataChangeRecord( "partitionToken", Timestamp.ofTimeSecondsAndNanos(10L, 20), "transactionId", false, "1", "tableName", Arrays.asList( new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L), new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)), Collections.singletonList( new Mod("{\"column1\":\"value1\"}", null, "{\"column2\":\"newValue2\"}")), ModType.INSERT, ValueCaptureType.OLD_AND_NEW_VALUES, 10L, 2L, "transactionTag", true, null); final Struct jsonFieldsStruct = recordsToStructWithJson(dataChangeRecord); ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class); when(resultSet.getCurrentRowAsStruct()).thenReturn(jsonFieldsStruct); assertEquals( Collections.singletonList(dataChangeRecord), mapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata)); }
public Span handleSendWithParent(HttpClientRequest request, @Nullable TraceContext parent) { if (request == null) throw new NullPointerException("request == null"); return handleSend(request, tracer.nextSpanWithParent(httpSampler, request, parent)); }
@Test void handleSendWithParent_overrideNull() { try (CurrentTraceContext.Scope scope = httpTracing.tracing.currentTraceContext().newScope(null)) { brave.Span span = handler.handleSendWithParent(request, context); // If the overwrite was successful, we have a child span. assertThat(span.context().parentIdAsLong()).isEqualTo(context.spanId()); } }
static void filterProperties(Message message, Set<String> namesToClear) { List<Object> retainedProperties = messagePropertiesBuffer(); try { filterProperties(message, namesToClear, retainedProperties); } finally { retainedProperties.clear(); // ensure no object references are held due to any exception } }
@Test void filterProperties_message_handlesOnSetException() throws JMSException { Message message = mock(Message.class); when(message.getPropertyNames()).thenReturn( Collections.enumeration(Collections.singletonList("JMS_SQS_DeduplicationId"))); when(message.getObjectProperty("JMS_SQS_DeduplicationId")).thenReturn(""); doThrow(new IllegalArgumentException()).when(message).setObjectProperty(anyString(), eq("")); assertThatCode(() -> PropertyFilter.filterProperties(message, Collections.singleton("b3"))).doesNotThrowAnyException(); }
public String toString() { return string; }
@Test public void testBasics() { assertEquals("application/octet-stream", new MediaType("application", "octet-stream").toString()); assertEquals("text/plain", new MediaType("text", "plain").toString()); Map<String, String> parameters = new HashMap<>(); assertEquals("text/plain", new MediaType("text", "plain", parameters).toString()); parameters.put("charset", "UTF-8"); assertEquals("text/plain; charset=UTF-8", new MediaType("text", "plain", parameters).toString()); parameters.put("x-eol-style", "crlf"); assertEquals("text/plain; charset=UTF-8; x-eol-style=crlf", new MediaType("text", "plain", parameters).toString()); }
@JsonCreator public static TimeZoneKey getTimeZoneKey(short timeZoneKey) { checkArgument(timeZoneKey < TIME_ZONE_KEYS.length && TIME_ZONE_KEYS[timeZoneKey] != null, "Invalid time zone key %d", timeZoneKey); return TIME_ZONE_KEYS[timeZoneKey]; }
@Test public void testHourOffsetZone() { assertSame(TimeZoneKey.getTimeZoneKey("GMT0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("GMT+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("GMT-0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("GMT+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("GMT-0"), UTC_KEY); assertTimeZoneNotSupported("GMT7"); assertSame(TimeZoneKey.getTimeZoneKey("GMT+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("GMT-7"), MINUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("GMT+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("GMT-7"), MINUS_7_KEY); assertTimeZoneNotSupported("UT0"); assertSame(TimeZoneKey.getTimeZoneKey("UT+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UT-0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UT+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UT-0"), UTC_KEY); assertTimeZoneNotSupported("UT7"); assertSame(TimeZoneKey.getTimeZoneKey("UT+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UT-7"), MINUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UT+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UT-7"), MINUS_7_KEY); assertTimeZoneNotSupported("UTC0"); assertSame(TimeZoneKey.getTimeZoneKey("UTC+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UTC-0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UTC+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UTC-0"), UTC_KEY); assertTimeZoneNotSupported("UTC7"); assertSame(TimeZoneKey.getTimeZoneKey("UTC+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UTC-7"), MINUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UTC+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("UTC-7"), MINUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/GMT0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/GMT+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/GMT-0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/GMT+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/GMT-0"), UTC_KEY); assertTimeZoneNotSupported("Etc/GMT7"); assertSame(TimeZoneKey.getTimeZoneKey("Etc/GMT+7"), MINUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/GMT-7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/GMT+7"), MINUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/GMT-7"), PLUS_7_KEY); assertTimeZoneNotSupported("Etc/UT0"); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UT+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UT-0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UT+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UT-0"), UTC_KEY); assertTimeZoneNotSupported("Etc/UT7"); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UT+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UT-7"), MINUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UT+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UT-7"), MINUS_7_KEY); assertTimeZoneNotSupported("Etc/UTC0"); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UTC+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UTC-0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UTC+0"), UTC_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UTC-0"), UTC_KEY); assertTimeZoneNotSupported("Etc/UTC7"); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UTC+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UTC-7"), MINUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UTC+7"), PLUS_7_KEY); assertSame(TimeZoneKey.getTimeZoneKey("Etc/UTC-7"), MINUS_7_KEY); }
@Override public Optional<SimpleLock> lock(LockConfiguration lockConfiguration) { boolean lockObtained = doLock(lockConfiguration); if (lockObtained) { return Optional.of(new StorageLock(lockConfiguration, storageAccessor)); } else { return Optional.empty(); } }
@Test void shouldRethrowExceptionFromInsert() { when(storageAccessor.insertRecord(LOCK_CONFIGURATION)).thenThrow(LOCK_EXCEPTION); assertThatThrownBy(() -> lockProvider.lock(LOCK_CONFIGURATION)).isSameAs(LOCK_EXCEPTION); }
@Override public Object invoke(MethodInvocation methodInvocation) throws Throwable { // 入栈 DataPermission dataPermission = this.findAnnotation(methodInvocation); if (dataPermission != null) { DataPermissionContextHolder.add(dataPermission); } try { // 执行逻辑 return methodInvocation.proceed(); } finally { // 出栈 if (dataPermission != null) { DataPermissionContextHolder.remove(); } } }
@Test // 在 Method 上有 @DataPermission 注解 public void testInvoke_method() throws Throwable { // 参数 mockMethodInvocation(TestMethod.class); // 调用 Object result = interceptor.invoke(methodInvocation); // 断言 assertEquals("method", result); assertEquals(1, interceptor.getDataPermissionCache().size()); assertFalse(CollUtil.getFirst(interceptor.getDataPermissionCache().values()).enable()); }
@Override protected Optional<ErrorResponse> filter(DiscFilterRequest request) { return request.getClientCertificateChain().isEmpty() ? Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden to access this path")) : Optional.empty(); }
@Test void testFilter() { assertSuccess(createRequest(List.of(createCertificate()))); assertForbidden(createRequest(List.of())); }
public final Logger getLogger(final Class<?> clazz) { return getLogger(clazz.getName()); }
@Test public void testLoggerMultipleChildren() { assertEquals(1, instanceCount()); Logger xy0 = lc.getLogger("x.y0"); LoggerTestHelper.assertNameEquals(xy0, "x.y0"); Logger xy1 = lc.getLogger("x.y1"); LoggerTestHelper.assertNameEquals(xy1, "x.y1"); LoggerTestHelper.assertLevels(null, xy0, Level.DEBUG); LoggerTestHelper.assertLevels(null, xy1, Level.DEBUG); assertEquals(4, instanceCount()); for (int i = 0; i < 100; i++) { Logger xy_i = lc.getLogger("x.y" + i); LoggerTestHelper.assertNameEquals(xy_i, "x.y" + i); LoggerTestHelper.assertLevels(null, xy_i, Level.DEBUG); } assertEquals(102, instanceCount()); }
@GET @Produces(MediaType.APPLICATION_JSON) public List<AppEntry> getList() { AppCatalogSolrClient sc = new AppCatalogSolrClient(); return sc.listAppEntries(); }
@Test void testGetList() throws Exception { AppListController ac = Mockito.mock(AppListController.class); List<AppEntry> actual = new ArrayList<AppEntry>(); when(ac.getList()).thenReturn(actual); final List<AppEntry> result = ac.getList(); assertEquals(result, actual); }
@Nullable public static Method findPropertySetter( @Nonnull Class<?> clazz, @Nonnull String propertyName, @Nonnull Class<?> propertyType ) { String setterName = "set" + toUpperCase(propertyName.charAt(0)) + propertyName.substring(1); Method method; try { method = clazz.getMethod(setterName, propertyType); } catch (NoSuchMethodException e) { return null; } if (!Modifier.isPublic(method.getModifiers())) { return null; } if (Modifier.isStatic(method.getModifiers())) { return null; } Class<?> returnType = method.getReturnType(); if (returnType != void.class && returnType != Void.class && returnType != clazz) { return null; } return method; }
@Test public void when_findPropertySetter_public_then_returnsIt() { assertNotNull(findPropertySetter(JavaProperties.class, "publicField", int.class)); }
public <T> T create(Class<T> clazz) { return create(clazz, new Class<?>[]{}, new Object[]{}); }
@Test void testCanBeConfiguredWithACustomAspect() { final SessionDao sessionDao = new SessionDao(sessionFactory); final UnitOfWorkAwareProxyFactory unitOfWorkAwareProxyFactory = new UnitOfWorkAwareProxyFactory("default", sessionFactory) { @Override public UnitOfWorkAspect newAspect(Map<String, SessionFactory> sessionFactories) { return new CustomAspect(sessionFactories); } }; final OAuthAuthenticator oAuthAuthenticator = unitOfWorkAwareProxyFactory .create(OAuthAuthenticator.class, SessionDao.class, sessionDao); assertThat(oAuthAuthenticator.authenticate("gr6f9y0")).isTrue(); }
@Override public Optional<GaugeMetricFamilyMetricsCollector> export(final String pluginType) { GaugeMetricFamilyMetricsCollector result = MetricsCollectorRegistry.get(config, pluginType); result.cleanMetrics(); for (Entry<String, ShardingSphereDataSourceContext> entry : ShardingSphereDataSourceContextHolder.getShardingSphereDataSourceContexts().entrySet()) { Optional.ofNullable(entry.getValue().getContextManager().getDatabase(entry.getValue().getDatabaseName())) .ifPresent(optional -> result.addMetric(Arrays.asList(entry.getKey(), optional.getName()), entry.getValue().getContextManager().getComputeNodeInstanceContext().getInstance().getState().getCurrentState().ordinal())); } return Optional.of(result); }
@Test void assertExport() { Optional<GaugeMetricFamilyMetricsCollector> collector = new JDBCStateExporter().export("FIXTURE"); assertTrue(collector.isPresent()); assertThat(collector.get().toString(), containsString(instanceId)); assertThat(collector.get().toString(), containsString(databaseName)); }
@Override public FetchContext planFetchForProcessing(IndexSegment indexSegment, QueryContext queryContext) { return new FetchContext(UUID.randomUUID(), indexSegment.getSegmentName(), getColumns(indexSegment, queryContext)); }
@Test public void testPlanFetchForProcessing() { DefaultFetchPlanner planner = new DefaultFetchPlanner(); IndexSegment indexSegment = mock(IndexSegment.class); when(indexSegment.getSegmentName()).thenReturn("s0"); when(indexSegment.getColumnNames()).thenReturn(ImmutableSet.of("c0", "c1", "c2")); String query = "SELECT COUNT(*) FROM testTable WHERE c0 = 0 OR (c1 < 10 AND c2 IN (1, 2))"; QueryContext queryContext = QueryContextConverterUtils.getQueryContext(query); FetchContext fetchContext = planner.planFetchForProcessing(indexSegment, queryContext); assertEquals(fetchContext.getSegmentName(), "s0"); Map<String, List<IndexType<?, ?, ?>>> columns = fetchContext.getColumnToIndexList(); assertEquals(columns.size(), 3); // null means to get all index types created for the column. assertNull(columns.get("c0")); assertNull(columns.get("c1")); assertNull(columns.get("c2")); }
public static String getIpByHost(String hostName) { try { return InetAddress.getByName(hostName).getHostAddress(); } catch (UnknownHostException e) { return hostName; } }
@Test void testGetIpByHost() { assertThat(NetUtils.getIpByHost("localhost"), equalTo("127.0.0.1")); assertThat(NetUtils.getIpByHost("dubbo.local"), equalTo("dubbo.local")); }
@VisibleForTesting String upload(Configuration config, String artifactUriStr) throws IOException, URISyntaxException { final URI artifactUri = PackagedProgramUtils.resolveURI(artifactUriStr); if (!"local".equals(artifactUri.getScheme())) { return artifactUriStr; } final String targetDir = config.get(KubernetesConfigOptions.LOCAL_UPLOAD_TARGET); checkArgument( !StringUtils.isNullOrWhitespaceOnly(targetDir), String.format( "Setting '%s' to a valid remote path is required.", KubernetesConfigOptions.LOCAL_UPLOAD_TARGET.key())); final FileSystem.WriteMode writeMode = config.get(KubernetesConfigOptions.LOCAL_UPLOAD_OVERWRITE) ? FileSystem.WriteMode.OVERWRITE : FileSystem.WriteMode.NO_OVERWRITE; final File src = new File(artifactUri.getPath()); final Path target = new Path(targetDir, src.getName()); if (target.getFileSystem().exists(target) && writeMode == FileSystem.WriteMode.NO_OVERWRITE) { LOG.info( "Skip uploading artifact '{}', as it already exists." + " To overwrite existing artifacts, please set the '{}' config option.", target, KubernetesConfigOptions.LOCAL_UPLOAD_OVERWRITE.key()); } else { final long start = System.currentTimeMillis(); final FileSystem fs = target.getFileSystem(); try (FSDataOutputStream os = fs.create(target, writeMode)) { FileUtils.copyFile(src, os); } LOG.debug( "Copied file from {} to {}, cost {} ms", src, target, System.currentTimeMillis() - start); } return target.toString(); }
@Test void testUpload() throws Exception { File jar = getFlinkKubernetesJar(); String localUri = "local://" + jar.getAbsolutePath(); String expectedUri = "dummyfs:" + tmpDir.resolve(jar.getName()); String resultUri = artifactUploader.upload(config, localUri); assertThat(resultUri).isEqualTo(expectedUri); }
public static String insertedIdAsString(@Nonnull InsertOneResult result) { return insertedId(result).toHexString(); }
@Test void testInsertedIdAsString() { final var id = "6627add0ee216425dd6df37c"; final var a = new DTO(id, "a"); assertThat(insertedIdAsString(collection.insertOne(a))).isEqualTo(id); assertThat(insertedIdAsString(collection.insertOne(new DTO(null, "b")))).isNotBlank(); }
@Override public Flux<ReactiveRedisConnection.BooleanResponse<RenameCommand>> renameNX(Publisher<RenameCommand> commands) { return execute(commands, command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getNewName(), "New name must not be null!"); byte[] keyBuf = toByteArray(command.getKey()); byte[] newKeyBuf = toByteArray(command.getNewName()); if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) { return super.renameNX(commands); } return exists(command.getNewName()) .zipWith(read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf)) .filter(newKeyExistsAndDump -> !newKeyExistsAndDump.getT1() && Objects.nonNull(newKeyExistsAndDump.getT2())) .map(Tuple2::getT2) .zipWhen(value -> pTtl(command.getKey()) .filter(Objects::nonNull) .map(ttl -> Math.max(0, ttl)) .switchIfEmpty(Mono.just(0L)) ) .flatMap(valueAndTtl -> write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1()) .then(Mono.just(true))) .switchIfEmpty(Mono.just(false)) .doOnSuccess(didRename -> { if (didRename) { del(command.getKey()); } }) .map(didRename -> new BooleanResponse<>(command, didRename)); }); }
@Test public void testRenameNX() { connection.stringCommands().set(originalKey, value).block(); if (hasTtl) { connection.keyCommands().expire(originalKey, Duration.ofSeconds(1000)).block(); } Integer originalSlot = getSlotForKey(originalKey); newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot)); Boolean result = connection.keyCommands().renameNX(originalKey, newKey).block(); assertThat(result).isTrue(); assertThat(connection.stringCommands().get(newKey).block()).isEqualTo(value); if (hasTtl) { assertThat(connection.keyCommands().ttl(newKey).block()).isGreaterThan(0); } else { assertThat(connection.keyCommands().ttl(newKey).block()).isEqualTo(-1); } connection.stringCommands().set(originalKey, value).block(); result = connection.keyCommands().renameNX(originalKey, newKey).block(); assertThat(result).isFalse(); }
@Override public GetClusterMetricsResponse getClusterMetrics( GetClusterMetricsRequest request) throws YarnException { GetClusterMetricsResponse response = recordFactory .newRecordInstance(GetClusterMetricsResponse.class); YarnClusterMetrics ymetrics = recordFactory .newRecordInstance(YarnClusterMetrics.class); ymetrics.setNumNodeManagers(this.rmContext.getRMNodes().size()); ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); ymetrics.setNumDecommissioningNodeManagers(clusterMetrics.getNumDecommissioningNMs()); ymetrics.setNumDecommissionedNodeManagers(clusterMetrics .getNumDecommisionedNMs()); ymetrics.setNumActiveNodeManagers(clusterMetrics.getNumActiveNMs()); ymetrics.setNumLostNodeManagers(clusterMetrics.getNumLostNMs()); ymetrics.setNumUnhealthyNodeManagers(clusterMetrics.getUnhealthyNMs()); ymetrics.setNumRebootedNodeManagers(clusterMetrics.getNumRebootedNMs()); ymetrics.setNumShutdownNodeManagers(clusterMetrics.getNumShutdownNMs()); response.setClusterMetrics(ymetrics); return response; }
@Test public void testGetClusterMetrics() throws Exception { MockRM rm = new MockRM() { protected ClientRMService createClientRMService() { return new ClientRMService(this.rmContext, scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.getRMContext().getRMDelegationTokenSecretManager()); }; }; resourceManager = rm; rm.start(); ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); clusterMetrics.incrDecommissioningNMs(); repeat(2, clusterMetrics::incrDecommisionedNMs); repeat(3, clusterMetrics::incrNumActiveNodes); repeat(4, clusterMetrics::incrNumLostNMs); repeat(5, clusterMetrics::incrNumUnhealthyNMs); repeat(6, clusterMetrics::incrNumRebootedNMs); repeat(7, clusterMetrics::incrNumShutdownNMs); // Create a client. conf = new Configuration(); rpc = YarnRPC.create(conf); InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress(); LOG.info("Connecting to ResourceManager at " + rmAddress); client = (ApplicationClientProtocol) rpc.getProxy( ApplicationClientProtocol.class, rmAddress, conf); YarnClusterMetrics ymetrics = client.getClusterMetrics( GetClusterMetricsRequest.newInstance()).getClusterMetrics(); Assert.assertEquals(0, ymetrics.getNumNodeManagers()); Assert.assertEquals(1, ymetrics.getNumDecommissioningNodeManagers()); Assert.assertEquals(2, ymetrics.getNumDecommissionedNodeManagers()); Assert.assertEquals(3, ymetrics.getNumActiveNodeManagers()); Assert.assertEquals(4, ymetrics.getNumLostNodeManagers()); Assert.assertEquals(5, ymetrics.getNumUnhealthyNodeManagers()); Assert.assertEquals(6, ymetrics.getNumRebootedNodeManagers()); Assert.assertEquals(7, ymetrics.getNumShutdownNodeManagers()); }
@Override public String named() { return PluginEnum.REDIRECT.getName(); }
@Test public void testNamed() { final String result = redirectPlugin.named(); assertThat(PluginEnum.REDIRECT.getName(), Matchers.is(result)); }
public static Optional<Object> getAdjacentValue(Type type, Object value, boolean isPrevious) { if (!type.isOrderable()) { throw new IllegalStateException("Type is not orderable: " + type); } requireNonNull(value, "value is null"); if (type.equals(BIGINT) || type instanceof TimestampType) { return getBigintAdjacentValue(value, isPrevious); } if (type.equals(INTEGER) || type.equals(DATE)) { return getIntegerAdjacentValue(value, isPrevious); } if (type.equals(SMALLINT)) { return getSmallIntAdjacentValue(value, isPrevious); } if (type.equals(TINYINT)) { return getTinyIntAdjacentValue(value, isPrevious); } if (type.equals(DOUBLE)) { return getDoubleAdjacentValue(value, isPrevious); } if (type.equals(REAL)) { return getRealAdjacentValue(value, isPrevious); } return Optional.empty(); }
@Test public void testNextValueForIntegerAndDate() { long minValue = Integer.MIN_VALUE; long maxValue = Integer.MAX_VALUE; assertThat(getAdjacentValue(INTEGER, minValue, false)) .isEqualTo(Optional.of(minValue + 1)); assertThat(getAdjacentValue(INTEGER, minValue + 1, false)) .isEqualTo(Optional.of(minValue + 2)); assertThat(getAdjacentValue(DATE, minValue, false)) .isEqualTo(Optional.of(minValue + 1)); assertThat(getAdjacentValue(DATE, minValue + 1, false)) .isEqualTo(Optional.of(minValue + 2)); assertThat(getAdjacentValue(INTEGER, 1234L, false)) .isEqualTo(Optional.of(1235L)); assertThat(getAdjacentValue(DATE, 1234L, false)) .isEqualTo(Optional.of(1235L)); assertThat(getAdjacentValue(INTEGER, maxValue - 1, false)) .isEqualTo(Optional.of(maxValue)); assertThat(getAdjacentValue(INTEGER, maxValue, false)) .isEqualTo(Optional.empty()); assertThat(getAdjacentValue(DATE, maxValue - 1, false)) .isEqualTo(Optional.of(maxValue)); assertThat(getAdjacentValue(DATE, maxValue, false)) .isEqualTo(Optional.empty()); }
@Override public <R> R evalSha(Mode mode, String shaDigest, ReturnType returnType) { return evalSha(null, mode, shaDigest, returnType, Collections.emptyList()); }
@Test public void testEvalSha() { RScript s = redisson.getScript(); String res = s.scriptLoad("return redis.call('get', 'foo')"); Assertions.assertEquals("282297a0228f48cd3fc6a55de6316f31422f5d17", res); redisson.getBucket("foo").set("bar"); String r1 = s.evalSha(Mode.READ_ONLY, "282297a0228f48cd3fc6a55de6316f31422f5d17", RScript.ReturnType.VALUE, Collections.emptyList()); Assertions.assertEquals("bar", r1); }
@Override @SuppressFBWarnings({ "SERVLET_HEADER_REFERER", "SERVLET_HEADER_USER_AGENT" }) public String format(ContainerRequestType servletRequest, ContainerResponseType servletResponse, SecurityContext ctx) { //LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" combined StringBuilder logLineBuilder = new StringBuilder(); AwsProxyRequestContext gatewayContext = (AwsProxyRequestContext)servletRequest.getAttribute(API_GATEWAY_CONTEXT_PROPERTY); HttpApiV2ProxyRequestContext httpApiContext = (HttpApiV2ProxyRequestContext)servletRequest.getAttribute(HTTP_API_CONTEXT_PROPERTY); // %h logLineBuilder.append(servletRequest.getRemoteAddr()); logLineBuilder.append(" "); // %l if (servletRequest.getUserPrincipal() != null) { logLineBuilder.append(servletRequest.getUserPrincipal().getName()); } else { logLineBuilder.append("-"); } if (gatewayContext != null && gatewayContext.getIdentity() != null && gatewayContext.getIdentity().getUserArn() != null) { logLineBuilder.append(gatewayContext.getIdentity().getUserArn()); } else { logLineBuilder.append("-"); } logLineBuilder.append(" "); // %u if (servletRequest.getUserPrincipal() != null) { logLineBuilder.append(servletRequest.getUserPrincipal().getName()); } logLineBuilder.append(" "); // %t long timeEpoch = ZonedDateTime.now(clock).toEpochSecond(); if (gatewayContext != null && gatewayContext.getRequestTimeEpoch() > 0) { timeEpoch = gatewayContext.getRequestTimeEpoch() / 1000; } else if (httpApiContext != null && httpApiContext.getTimeEpoch() > 0) { timeEpoch = httpApiContext.getTimeEpoch() / 1000; } logLineBuilder.append( dateFormat.format(ZonedDateTime.of( LocalDateTime.ofEpochSecond(timeEpoch, 0, ZoneOffset.UTC), clock.getZone()) )); logLineBuilder.append(" "); // %r logLineBuilder.append("\""); logLineBuilder.append(servletRequest.getMethod().toUpperCase(Locale.ENGLISH)); logLineBuilder.append(" "); logLineBuilder.append(servletRequest.getRequestURI()); logLineBuilder.append(" "); logLineBuilder.append(servletRequest.getProtocol()); logLineBuilder.append("\" "); // %>s logLineBuilder.append(servletResponse.getStatus()); logLineBuilder.append(" "); // %b if (servletResponse instanceof AwsHttpServletResponse) { AwsHttpServletResponse awsResponse = (AwsHttpServletResponse)servletResponse; if (awsResponse.getAwsResponseBodyBytes().length > 0) { logLineBuilder.append(awsResponse.getAwsResponseBodyBytes().length); } else { logLineBuilder.append("-"); } } else { logLineBuilder.append("-"); } logLineBuilder.append(" "); // \"%{Referer}i\" logLineBuilder.append("\""); if (servletRequest.getHeader("referer") != null) { logLineBuilder.append(servletRequest.getHeader("referer")); } else { logLineBuilder.append("-"); } logLineBuilder.append("\" "); // \"%{User-agent}i\" logLineBuilder.append("\""); if (servletRequest.getHeader("user-agent") != null) { logLineBuilder.append(servletRequest.getHeader("user-agent")); } else { logLineBuilder.append("-"); } logLineBuilder.append("\" "); logLineBuilder.append("combined"); return logLineBuilder.toString(); }
@Test void logsCurrentTimeWhenRequestTimeZero() { // given context.setRequestTimeEpoch(0); // when String actual = sut.format(mockServletRequest, mockServletResponse, null); // then assertThat(actual, containsString("[07/02/1991:01:02:03Z]")); }
@Override public ConfigOperateResult insertOrUpdateTagCas(final ConfigInfo configInfo, final String tag, final String srcIp, final String srcUser) { if (findConfigInfo4TagState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), tag) == null) { return addConfigInfo4Tag(configInfo, tag, srcIp, srcUser); } else { return updateConfigInfo4TagCas(configInfo, tag, srcIp, srcUser); } }
@Test void testInsertOrUpdateTagCasOfUpdate() { String dataId = "dataId111222"; String group = "group"; String tenant = "tenant"; String appName = "appname1234"; String content = "c12345"; ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); configInfo.setEncryptedDataKey("key23456"); configInfo.setMd5("casMd5"); //mock query config state and return obj after update ConfigInfoStateWrapper configInfoStateWrapper = new ConfigInfoStateWrapper(); configInfoStateWrapper.setLastModified(System.currentTimeMillis()); configInfoStateWrapper.setId(234567890L); String tag = "tag123"; Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(new ConfigInfoStateWrapper()).thenReturn(configInfoStateWrapper); String srcIp = "ip345678"; String srcUser = "user1234567"; //mock cas update return 1 Mockito.when(databaseOperate.blockUpdate()).thenReturn(true); ConfigOperateResult configOperateResult = embeddedConfigInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp, srcUser); //verify update to be invoked embeddedStorageContextHolderMockedStatic.verify(() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(content), eq(MD5Utils.md5Hex(content, Constants.PERSIST_ENCODE)), eq(srcIp), eq(srcUser), any(Timestamp.class), eq(appName), eq(dataId), eq(group), eq(tenant), eq(tag), eq(configInfo.getMd5())), times(1)); assertEquals(configInfoStateWrapper.getId(), configOperateResult.getId()); assertEquals(configInfoStateWrapper.getLastModified(), configOperateResult.getLastModified()); }
public Transfer deserialize(final T serialized) { final Deserializer<T> dict = factory.create(serialized); final T hostObj = dict.objectForKey("Host"); if(null == hostObj) { log.warn("Missing host in transfer"); return null; } final Host host = new HostDictionary<>(protocols, factory).deserialize(hostObj); if(null == host) { log.warn("Invalid host in transfer"); return null; } host.setWorkdir(null); final List<T> itemsObj = dict.listForKey("Items"); final List<TransferItem> roots = new ArrayList<>(); if(itemsObj != null) { for(T rootDict : itemsObj) { final TransferItem item = new TransferItemDictionary<>(factory).deserialize(rootDict); if(null == item) { log.warn("Invalid item in transfer"); continue; } roots.add(item); } } // Legacy final List<T> rootsObj = dict.listForKey("Roots"); if(rootsObj != null) { for(T rootDict : rootsObj) { final Path remote = new PathDictionary<>(factory).deserialize(rootDict); if(null == remote) { log.warn("Invalid remote in transfer"); continue; } final TransferItem item = new TransferItem(remote); // Legacy final String localObjDeprecated = factory.create(rootDict).stringForKey("Local"); if(localObjDeprecated != null) { Local local = LocalFactory.get(localObjDeprecated); item.setLocal(local); } final T localObj = factory.create(rootDict).objectForKey("Local Dictionary"); if(localObj != null) { Local local = new LocalDictionary<>(factory).deserialize(localObj); if(null == local) { log.warn("Invalid local in transfer item"); continue; } item.setLocal(local); } roots.add(item); } } if(roots.isEmpty()) { log.warn("No files in transfer"); return null; } final Transfer transfer; Transfer.Type type = null; final String kindObj = dict.stringForKey("Kind"); if(kindObj != null) { // Legacy type = Transfer.Type.values()[Integer.parseInt(kindObj)]; } final String typeObj = dict.stringForKey("Type"); if(typeObj != null) { type = Transfer.Type.valueOf(typeObj); } if(null == type) { log.warn("Missing transfer type"); return null; } switch(type) { case download: case upload: case sync: // Verify we have valid items for(TransferItem item : roots) { if(null == item.remote) { log.warn(String.format("Missing remote in transfer item %s", item)); return null; } if(null == item.local) { log.warn(String.format("Missing local in transfer item %s", item)); return null; } } } switch(type) { case download: transfer = new DownloadTransfer(host, roots); break; case upload: transfer = new UploadTransfer(host, roots); break; case sync: final String actionObj = dict.stringForKey("Action"); if(null == actionObj) { transfer = new SyncTransfer(host, roots.iterator().next()); } else { transfer = new SyncTransfer(host, roots.iterator().next(), TransferAction.forName(actionObj)); } break; case copy: final T destinationObj = dict.objectForKey("Destination"); if(null == destinationObj) { log.warn("Missing destination for copy transfer"); return null; } final List<T> destinations = dict.listForKey("Destinations"); if(destinations.isEmpty()) { log.warn("No destinations in copy transfer"); return null; } if(roots.size() == destinations.size()) { final Map<Path, Path> files = new HashMap<>(); for(int i = 0; i < roots.size(); i++) { final Path target = new PathDictionary<>(factory).deserialize(destinations.get(i)); if(null == target) { continue; } files.put(roots.get(i).remote, target); } final Host target = new HostDictionary<>(protocols, factory).deserialize(destinationObj); if(null == target) { log.warn("Missing target host in copy transfer"); return null; } transfer = new CopyTransfer(host, target, files); } else { log.warn("Invalid file mapping for copy transfer"); return null; } break; default: log.warn(String.format("Unknown transfer type %s", kindObj)); return null; } final Object uuidObj = dict.stringForKey("UUID"); if(uuidObj != null) { transfer.setUuid(uuidObj.toString()); } final Object sizeObj = dict.stringForKey("Size"); if(sizeObj != null) { transfer.setSize((long) Double.parseDouble(sizeObj.toString())); } final Object timestampObj = dict.stringForKey("Timestamp"); if(timestampObj != null) { transfer.setTimestamp(new Date(Long.parseLong(timestampObj.toString()))); } final Object currentObj = dict.stringForKey("Current"); if(currentObj != null) { transfer.setTransferred((long) Double.parseDouble(currentObj.toString())); } final Object bandwidthObj = dict.stringForKey("Bandwidth"); if(bandwidthObj != null) { transfer.getBandwidth().setRate(Float.parseFloat(bandwidthObj.toString())); } return transfer; }
@Test public void testSerializeComplete() throws Exception { // Test transfer to complete with existing directory final Host host = new Host(new TestProtocol()); final Transfer t = new DownloadTransfer(host, new Path("/t", EnumSet.of(Path.Type.directory)), new NullLocal("t") { @Override public boolean exists() { return true; } @Override public AttributedList<Local> list(final Filter<String> filter) { return AttributedList.emptyList(); } @Override public boolean isFile() { return false; } @Override public boolean isDirectory() { return true; } }); final NullSession session = new NullTransferSession(host); new SingleTransferWorker(session, session, t, new TransferOptions(), new TransferSpeedometer(t), new DisabledTransferPrompt() { @Override public TransferAction prompt(final TransferItem file) { return TransferAction.overwrite; } }, new DisabledTransferErrorCallback(), new DisabledProgressListener(), new DisabledStreamListener(), new DisabledLoginCallback(), new DisabledNotificationService()).run(session); assertTrue(t.isComplete()); final Transfer serialized = new TransferDictionary<>().deserialize(t.serialize(SerializerFactory.get())); assertNotSame(t, serialized); assertTrue(serialized.isComplete()); }
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext, final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof TCLStatement) { return new ShardingDatabaseBroadcastRoutingEngine(); } if (sqlStatement instanceof DDLStatement) { if (sqlStatementContext instanceof CursorAvailable) { return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props); } return getDDLRoutingEngine(shardingRule, database, sqlStatementContext); } if (sqlStatement instanceof DALStatement) { return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext); } if (sqlStatement instanceof DCLStatement) { return getDCLRoutingEngine(shardingRule, database, sqlStatementContext); } return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext); }
@Test void assertNewInstanceForCursorStatementWithSingleTable() { CursorStatementContext cursorStatementContext = mock(CursorStatementContext.class, RETURNS_DEEP_STUBS); OpenGaussCursorStatement cursorStatement = mock(OpenGaussCursorStatement.class); when(cursorStatementContext.getSqlStatement()).thenReturn(cursorStatement); Collection<SimpleTableSegment> tableSegments = createSimpleTableSegments(); when(cursorStatementContext.getTablesContext().getSimpleTables()).thenReturn(tableSegments); when(cursorStatementContext.getTablesContext().getDatabaseName()).thenReturn(Optional.empty()); QueryContext queryContext = new QueryContext(cursorStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); ShardingRouteEngine actual = ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet)); assertThat(actual, instanceOf(ShardingIgnoreRoutingEngine.class)); }
public Future<?> scheduleWithFixedDelay(Runnable task, long initialDelay, long delay, TimeUnit unit) { Preconditions.checkState(isOpen.get(), "CloseableExecutorService is closed"); ScheduledFuture<?> scheduledFuture = scheduledExecutorService.scheduleWithFixedDelay(task, initialDelay, delay, unit); return new InternalScheduledFutureTask(scheduledFuture); }
@Test public void testCloseableScheduleWithFixedDelayAndAdditionalTasks() throws InterruptedException { final AtomicInteger outerCounter = new AtomicInteger(0); Runnable command = new Runnable() { @Override public void run() { outerCounter.incrementAndGet(); } }; executorService.scheduleWithFixedDelay(command, DELAY_MS, DELAY_MS, TimeUnit.MILLISECONDS); CloseableScheduledExecutorService service = new CloseableScheduledExecutorService(executorService); final AtomicInteger innerCounter = new AtomicInteger(0); service.scheduleWithFixedDelay( new Runnable() { @Override public void run() { innerCounter.incrementAndGet(); } }, DELAY_MS, DELAY_MS, TimeUnit.MILLISECONDS); Thread.sleep(DELAY_MS * 4); service.close(); Thread.sleep(DELAY_MS * 2); int innerValue = innerCounter.get(); assertTrue(innerValue > 0); int value = outerCounter.get(); Thread.sleep(DELAY_MS * 2); int newValue = outerCounter.get(); assertTrue(newValue > value); assertEquals(innerValue, innerCounter.get()); value = newValue; Thread.sleep(DELAY_MS * 2); newValue = outerCounter.get(); assertTrue(newValue > value); assertEquals(innerValue, innerCounter.get()); }
@Description("Recursively flattens GeometryCollections") @ScalarFunction("flatten_geometry_collections") @SqlType("array(" + GEOMETRY_TYPE_NAME + ")") public static Block flattenGeometryCollections(@SqlType(GEOMETRY_TYPE_NAME) Slice input) { OGCGeometry geometry = EsriGeometrySerde.deserialize(input); List<OGCGeometry> components = Streams.stream( flattenCollection(geometry)).collect(toImmutableList()); BlockBuilder blockBuilder = GEOMETRY.createBlockBuilder(null, components.size()); for (OGCGeometry component : components) { GEOMETRY.writeSlice(blockBuilder, EsriGeometrySerde.serialize(component)); } return blockBuilder.build(); }
@Test public void testFlattenGeometryCollections() { assertFlattenGeometryCollections("POINT (0 0)", "POINT (0 0)"); assertFlattenGeometryCollections("MULTIPOINT ((0 0), (1 1))", "MULTIPOINT ((0 0), (1 1))"); assertFlattenGeometryCollections("GEOMETRYCOLLECTION EMPTY"); assertFlattenGeometryCollections("GEOMETRYCOLLECTION (POINT EMPTY)", "POINT EMPTY"); assertFlattenGeometryCollections("GEOMETRYCOLLECTION (MULTIPOLYGON EMPTY)", "MULTIPOLYGON EMPTY"); assertFlattenGeometryCollections("GEOMETRYCOLLECTION (POINT (0 0))", "POINT (0 0)"); assertFlattenGeometryCollections( "GEOMETRYCOLLECTION (POINT (0 0), GEOMETRYCOLLECTION (POINT (1 1)))", "POINT (1 1)", "POINT (0 0)"); }
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); }
@Test public void testAggregateWithUpdatedCluster() throws NotEnoughValidWindowsException { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); Metadata metadata = getMetadata(Collections.singleton(TP)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator); TopicPartition tp1 = new TopicPartition(TOPIC0 + "1", 0); Cluster cluster = getCluster(Arrays.asList(TP, tp1)); List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(2); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, TOPIC0, false, Collections.singletonList(new MetadataResponse.PartitionMetadata( Errors.NONE, TP, Optional.of(NODE_0.id()), Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH), nodeIds(), nodeIds(), Collections.emptyList())))); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, TOPIC0 + "1", false, Collections.singletonList(new MetadataResponse.PartitionMetadata( Errors.NONE, tp1, Optional.of(NODE_0.id()), Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH), nodeIds(), nodeIds(), Collections.emptyList())))); MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), MetadataResponse.NO_CONTROLLER_ID, topicMetadata); metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, false, 1); Map<PartitionEntity, ValuesAndExtrapolations> aggregateResult = metricSampleAggregator.aggregate(cluster, Long.MAX_VALUE, new OperationProgress()).valuesAndExtrapolations(); // Partition "topic-0" should be valid in all NUM_WINDOW windows and Partition "topic1-0" should not since // there is no sample for it. assertEquals(1, aggregateResult.size()); assertEquals(NUM_WINDOWS, aggregateResult.get(PE).windows().size()); ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, true); MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(cluster, -1, Long.MAX_VALUE, requirements, new OperationProgress()); aggregateResult = result.valuesAndExtrapolations(); assertNotNull("tp1 should be included because includeAllTopics is set to true", aggregateResult.get(new PartitionEntity(tp1))); Map<Integer, Extrapolation> extrapolations = aggregateResult.get(new PartitionEntity(tp1)).extrapolations(); assertEquals(NUM_WINDOWS, extrapolations.size()); for (int i = 0; i < NUM_WINDOWS; i++) { assertEquals(Extrapolation.NO_VALID_EXTRAPOLATION, extrapolations.get(i)); } }
public static String getProperty(final String propertyName) { final String propertyValue = System.getProperty(propertyName); return NULL_PROPERTY_VALUE.equals(propertyValue) ? null : propertyValue; }
@Test void shouldGetNullProperty() { final String key = "org.agrona.test.case"; final String value = "@null"; System.setProperty(key, value); try { assertNull(SystemUtil.getProperty(key)); } finally { System.clearProperty(key); } }
public Protocol forNameOrDefault(final String identifier) { return forNameOrDefault(identifier, null); }
@Test public void testForNameOrDefault() throws Exception { final TestProtocol ftp = new TestProtocol(Scheme.ftp); final TestProtocol dav = new TestProtocol(Scheme.dav); final ProtocolFactory f = new ProtocolFactory(new LinkedHashSet<>(Arrays.asList(ftp, dav))); assertEquals(dav, f.forNameOrDefault("dav")); assertEquals(ftp, f.forNameOrDefault("invalid")); assertEquals(ftp, f.forNameOrDefault("ftp")); }
public static void registerHook(TransactionHook transactionHook) { if (transactionHook == null) { throw new NullPointerException("transactionHook must not be null"); } List<TransactionHook> transactionHooks = LOCAL_HOOKS.get(); if (transactionHooks == null) { LOCAL_HOOKS.set(new ArrayList<>()); } LOCAL_HOOKS.get().add(transactionHook); }
@Test public void testRegisterHook() { TransactionHookAdapter transactionHookAdapter = new TransactionHookAdapter(); TransactionHookManager.registerHook(transactionHookAdapter); List<TransactionHook> hooks = TransactionHookManager.getHooks(); assertThat(hooks).isNotEmpty(); assertThat(hooks.get(0)).isEqualTo(transactionHookAdapter); }
@Override public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) { return delegated.create(sqlStatementContext); }
@Test void assertCreateExecutorForSelectTables() { SelectStatementContext selectStatementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS); when(selectStatementContext.getTablesContext().getTableNames()).thenReturn(Collections.singletonList("pg_tables")); Optional<DatabaseAdminExecutor> actual = new OpenGaussAdminExecutorCreator() .create(selectStatementContext, "select schemaname, tablename from pg_tables where schemaname = 'sharding_db'", "postgres", Collections.emptyList()); assertTrue(actual.isPresent()); assertThat(actual.get(), instanceOf(OpenGaussSystemCatalogAdminQueryExecutor.class)); }
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) { FunctionConfig mergedConfig = existingConfig.toBuilder().build(); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Function Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getJar())) { mergedConfig.setJar(newConfig.getJar()); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getCustomSerdeInputs() != null) { newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getCustomSchemaInputs() != null) { newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } mergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName() .equals(existingConfig.getOutputSerdeClassName())) { throw new IllegalArgumentException("Output Serde mismatch"); } if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType() .equals(existingConfig.getOutputSchemaType())) { throw new IllegalArgumentException("Output Schema mismatch"); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (!StringUtils.isEmpty(newConfig.getOutput())) { mergedConfig.setOutput(newConfig.getOutput()); } if (newConfig.getUserConfig() != null) { mergedConfig.setUserConfig(newConfig.getUserConfig()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) { throw new IllegalArgumentException("Runtime cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getMaxMessageRetries() != null) { mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries()); } if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) { mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic()); } if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName() .equals(existingConfig.getSubName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getWindowConfig() != null) { mergedConfig.setWindowConfig(newConfig.getWindowConfig()); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getProducerConfig() != null) { mergedConfig.setProducerConfig(newConfig.getProducerConfig()); } return mergedConfig; }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Output Schema mismatch") public void testMergeDifferentOutputSchemaTypes() { FunctionConfig functionConfig = createFunctionConfig(); FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("outputSchemaType", "avro"); FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig); }
public static void latchAwait(CountDownLatch latch) { try { latch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }
@Test void testLatchAwait() { final CountDownLatch countDownLatch = new CountDownLatch(1); long currentTime = System.currentTimeMillis(); executorService.execute(() -> { ThreadUtils.sleep(100); ThreadUtils.countDown(countDownLatch); }); ThreadUtils.latchAwait(countDownLatch); assertTrue(System.currentTimeMillis() - currentTime >= 100); }
public static Optional<String> getDatabaseName(final String nodePath) { Pattern pattern = Pattern.compile(getRootNodePath() + "/(\\w+)$", Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(nodePath); return matcher.find() ? Optional.of(matcher.group(1)) : Optional.empty(); }
@Test void assertGetDatabaseName() { Optional<String> actual = ListenerAssistedNodePath.getDatabaseName("/listener_assisted/foo_db"); assertTrue(actual.isPresent()); assertThat(actual.get(), Matchers.is("foo_db")); }
@Override public synchronized KafkaMessageBatch fetchMessages(StreamPartitionMsgOffset startMsgOffset, int timeoutMs) { long startOffset = ((LongMsgOffset) startMsgOffset).getOffset(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Polling partition: {}, startOffset: {}, timeout: {}ms", _topicPartition, startOffset, timeoutMs); } if (_lastFetchedOffset < 0 || _lastFetchedOffset != startOffset - 1) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Seeking to offset: {}", startOffset); } _consumer.seek(_topicPartition, startOffset); } ConsumerRecords<String, Bytes> consumerRecords = _consumer.poll(Duration.ofMillis(timeoutMs)); List<ConsumerRecord<String, Bytes>> records = consumerRecords.records(_topicPartition); List<BytesStreamMessage> filteredRecords = new ArrayList<>(records.size()); long firstOffset = -1; long offsetOfNextBatch = startOffset; StreamMessageMetadata lastMessageMetadata = null; if (!records.isEmpty()) { firstOffset = records.get(0).offset(); _lastFetchedOffset = records.get(records.size() - 1).offset(); offsetOfNextBatch = _lastFetchedOffset + 1; for (ConsumerRecord<String, Bytes> record : records) { StreamMessageMetadata messageMetadata = extractMessageMetadata(record); Bytes message = record.value(); if (message != null) { String key = record.key(); byte[] keyBytes = key != null ? key.getBytes(StandardCharsets.UTF_8) : null; filteredRecords.add(new BytesStreamMessage(keyBytes, message.get(), messageMetadata)); } else if (LOGGER.isDebugEnabled()) { LOGGER.debug("Tombstone message at offset: {}", record.offset()); } lastMessageMetadata = messageMetadata; } } return new KafkaMessageBatch(filteredRecords, records.size(), offsetOfNextBatch, firstOffset, lastMessageMetadata, firstOffset > startOffset); }
@Test public void testFetchMessages() { String streamType = "kafka"; String streamKafkaTopicName = "theTopic"; String streamKafkaBrokerList = _kafkaBrokerAddress; String streamKafkaConsumerType = "simple"; String clientId = "clientId"; String tableNameWithType = "tableName_REALTIME"; Map<String, String> streamConfigMap = new HashMap<>(); streamConfigMap.put("streamType", streamType); streamConfigMap.put("stream.kafka.topic.name", streamKafkaTopicName); streamConfigMap.put("stream.kafka.broker.list", streamKafkaBrokerList); streamConfigMap.put("stream.kafka.consumer.type", streamKafkaConsumerType); streamConfigMap.put("stream.kafka.consumer.factory.class.name", getKafkaConsumerFactoryName()); streamConfigMap.put("stream.kafka.decoder.class.name", "decoderClass"); StreamConfig streamConfig = new StreamConfig(tableNameWithType, streamConfigMap); int partition = 0; KafkaPartitionLevelConsumer kafkaSimpleStreamConsumer = new KafkaPartitionLevelConsumer(clientId, streamConfig, partition); kafkaSimpleStreamConsumer.fetchMessages(new LongMsgOffset(12345L), 10000); }
public T orElseGet(Supplier<T> defaultSupplier) { return valid ? value : defaultSupplier.get(); }
@Test public void orElseGet() { assertThat(ValueWrapper.of(1).orElseGet(() -> 3)).isEqualTo((Integer) 1); assertThat(ValueWrapper.errorWithValidValue(null, null).orElseGet(() -> 3)).isEqualTo(3); assertThat(ValueWrapper.of(null).orElseGet(() -> 3)).isNull(); }
@VisibleForTesting void validateMobileUnique(Long id, String mobile) { if (StrUtil.isBlank(mobile)) { return; } AdminUserDO user = userMapper.selectByMobile(mobile); if (user == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的用户 if (id == null) { throw exception(USER_MOBILE_EXISTS); } if (!user.getId().equals(id)) { throw exception(USER_MOBILE_EXISTS); } }
@Test public void testValidateMobileUnique_mobileExistsForCreate() { // 准备参数 String mobile = randomString(); // mock 数据 userMapper.insert(randomAdminUserDO(o -> o.setMobile(mobile))); // 调用,校验异常 assertServiceException(() -> userService.validateMobileUnique(null, mobile), USER_MOBILE_EXISTS); }
@Override @CanIgnoreReturnValue public Key register(Watchable watchable, Iterable<? extends WatchEvent.Kind<?>> eventTypes) throws IOException { JimfsPath path = checkWatchable(watchable); Key key = super.register(path, eventTypes); Snapshot snapshot = takeSnapshot(path); synchronized (this) { snapshots.put(key, snapshot); if (pollingFuture == null) { startPolling(); } } return key; }
@Test public void testRegister() throws IOException { Key key = watcher.register(createDirectory(), ImmutableList.of(ENTRY_CREATE)); assertThat(key.isValid()).isTrue(); assertThat(watcher.isPolling()).isTrue(); }
@SneakyThrows(GeneralSecurityException.class) @Override public Object decrypt(final Object cipherValue) { if (null == cipherValue) { return null; } byte[] result = getCipher(Cipher.DECRYPT_MODE).doFinal(Base64.getDecoder().decode(cipherValue.toString().trim())); return new String(result, StandardCharsets.UTF_8); }
@Test void assertDecryptNullValue() { assertNull(cryptographicAlgorithm.decrypt(null)); }
@Override public long size() { return hsa.size(); }
@Test public void testSize() { assertEquals(0, map.size()); int expected = 100; for (long i = 0; i < expected; i++) { long value = newValue(); map.put(i, value); } assertEquals(map.toString(), expected, map.size()); }
public long removeKey(final K key) { final int mask = values.length - 1; int index = Hashing.hash(key, mask); long value; while (missingValue != (value = values[index])) { if (key.equals(keys[index])) { keys[index] = null; values[index] = missingValue; --size; compactChain(index); break; } index = ++index & mask; } return value; }
@Test public void removeShouldReturnMissing() { assertEquals(MISSING_VALUE, map.removeKey("1")); }
public void runExtractor(Message msg) { try(final Timer.Context ignored = completeTimer.time()) { final String field; try (final Timer.Context ignored2 = conditionTimer.time()) { // We can only work on Strings. if (!(msg.getField(sourceField) instanceof String)) { conditionMissesCounter.inc(); return; } field = (String) msg.getField(sourceField); // Decide if to extract at all. if (conditionType.equals(ConditionType.STRING)) { if (field.contains(conditionValue)) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } else if (conditionType.equals(ConditionType.REGEX)) { if (regexConditionPattern.matcher(field).find()) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } } try (final Timer.Context ignored2 = executionTimer.time()) { Result[] results; try { results = run(field); } catch (ExtractorException e) { final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>"; msg.addProcessingError(new Message.ProcessingError( ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e))); return; } if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) { return; } else if (results.length == 1 && results[0].target == null) { // results[0].target is null if this extractor cannot produce multiple fields use targetField in that case msg.addField(targetField, results[0].getValue()); } else { for (final Result result : results) { msg.addField(result.getTarget(), result.getValue()); } } // Remove original from message? if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) { final StringBuilder sb = new StringBuilder(field); final List<Result> reverseList = Arrays.stream(results) .sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed()) .collect(Collectors.toList()); // remove all from reverse so that the indices still match for (final Result result : reverseList) { sb.delete(result.getBeginIndex(), result.getEndIndex()); } final String builtString = sb.toString(); final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString; msg.removeField(sourceField); // TODO don't add an empty field back, or rather don't add fullyCutByExtractor msg.addField(sourceField, finalResult); } runConverters(msg); } } }
@Test public void testCursorStrategyCopy() throws Exception { final TestExtractor extractor = new TestExtractor.Builder() .cursorStrategy(COPY) .sourceField("msg") .callback(new Callable<Result[]>() { @Override public Result[] call() throws Exception { return new Result[] { new Result("the", 0, 3) }; } }) .build(); final Message msg = createMessage("message"); msg.addField("msg", "the hello"); extractor.runExtractor(msg); // With the copy strategy, the source field should not be modified. assertThat(msg.getField("msg")).isEqualTo("the hello"); }
public PeriodicityType getPeriodicityType() { return periodicityType; }
@Test public void testMillisecondPeriodicity() { // The length of the 'S' pattern letter matters on different platforms, // and can render different results on different versions of Android. // This test verifies that the periodicity is correct for different // pattern lengths. { RollingCalendar rc = new RollingCalendar("yyyy-MM-dd-S"); assertEquals(PeriodicityType.TOP_OF_MILLISECOND, rc.getPeriodicityType()); } { RollingCalendar rc = new RollingCalendar("yyyy-MM-dd-SS"); assertEquals(PeriodicityType.TOP_OF_MILLISECOND, rc.getPeriodicityType()); } { RollingCalendar rc = new RollingCalendar("yyyy-MM-dd-SSS"); assertEquals(PeriodicityType.TOP_OF_MILLISECOND, rc.getPeriodicityType()); } }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testBundleThroughputLargerThanOffloadThreshold() { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); var ctx = setupContext(); var topBundlesLoadDataStore = ctx.topBundleLoadDataStore(); topBundlesLoadDataStore.pushAsync("broker4:8080", getTopBundlesLoad("my-tenant/my-namespaceD", 1000000000, 1000000000)); topBundlesLoadDataStore.pushAsync("broker5:8080", getTopBundlesLoad("my-tenant/my-namespaceE", 1000000000, 1000000000)); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); var expected = new HashSet<UnloadDecision>(); expected.add(new UnloadDecision(new Unload("broker3:8080", "my-tenant/my-namespaceC/0x00000000_0x0FFFFFFF", Optional.of("broker1:8080")), Success, Overloaded)); assertEquals(res, expected); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); }
@Override public boolean isIncomplete() { return incomplete; }
@Test public void testConstruction() { LispReferralRecord record = record1; LispIpv4Address ipv4Address1 = new LispIpv4Address(IpAddress.valueOf(IP_ADDRESS1)); assertThat(record.getRecordTtl(), is(100)); assertThat(record.isAuthoritative(), is(true)); assertThat(record.isIncomplete(), is(false)); assertThat(record.getMapVersionNumber(), is((short) 1)); assertThat(record.getMaskLength(), is((byte) 0x01)); assertThat(record.getAction(), is(LispMapReplyAction.NativelyForward)); assertThat(record.getEidPrefixAfi(), is(ipv4Address1)); }
public String digestHex16(String data, Charset charset) { return DigestUtil.md5HexTo16(digestHex(data, charset)); }
@Test public void md5To16Test() { String hex16 = new MD5().digestHex16("中国"); assertEquals(16, hex16.length()); assertEquals("cb143acd6c929826", hex16); }
@Override public boolean addAll(Collection<? extends E> c) { return c.stream() .map(e -> map.putIfAbsent(e, e) == null) .reduce(Boolean::logicalOr) .orElse(false); }
@Test public void testAddAll() { ExtendedSet<TestValue> nonemptyset = new ExtendedSet<>(Maps.newConcurrentMap()); TestValue val = new TestValue("foo", 1); assertTrue(nonemptyset.add(val)); TestValue nextval = new TestValue("goo", 2); TestValue finalval = new TestValue("shoo", 3); TestValue extremeval = new TestValue("who", 4); assertTrue(nonemptyset.add(extremeval)); ArrayList<TestValue> vals = new ArrayList<TestValue>(); vals.add(nextval); vals.add(finalval); vals.add(extremeval); assertTrue(nonemptyset.addAll(vals)); assertTrue(nonemptyset.contains(nextval)); assertTrue(nonemptyset.contains(finalval)); ExtendedSet<TestValue> emptyset = new ExtendedSet<>(Maps.newConcurrentMap()); vals = new ArrayList<TestValue>(); vals.add(val); vals.add(nextval); vals.add(finalval); assertTrue(emptyset.addAll(vals)); assertTrue(emptyset.contains(val)); assertTrue(emptyset.contains(nextval)); assertTrue(emptyset.contains(finalval)); }
public static void maxValueCheck(String paramName, long value, long maxValue) { if (value > maxValue) { throw new IllegalArgumentException(paramName + " cannot be bigger than <" + maxValue + ">!"); } }
@Test public void testMaxValueCheck() { assertThrows(IllegalArgumentException.class, () -> ValueValidationUtil.maxValueCheck("param1", 11L, 10L)); ValueValidationUtil.maxValueCheck("param2", 10L, 10L); ValueValidationUtil.maxValueCheck("param3", 9L, 10L); }
@GET @Produces(MediaType.APPLICATION_JSON) public UserRemoteConfigList getAll(@ReadOnly @Auth AuthenticatedDevice auth) { try { MessageDigest digest = MessageDigest.getInstance("SHA1"); final Stream<UserRemoteConfig> globalConfigStream = globalConfig.entrySet().stream() .map(entry -> new UserRemoteConfig(GLOBAL_CONFIG_PREFIX + entry.getKey(), true, entry.getValue())); return new UserRemoteConfigList(Stream.concat(remoteConfigsManager.getAll().stream().map(config -> { final byte[] hashKey = config.getHashKey() != null ? config.getHashKey().getBytes(StandardCharsets.UTF_8) : config.getName().getBytes(StandardCharsets.UTF_8); boolean inBucket = isInBucket(digest, auth.getAccount().getUuid(), hashKey, config.getPercentage(), config.getUuids()); return new UserRemoteConfig(config.getName(), inBucket, inBucket ? config.getValue() : config.getDefaultValue()); }), globalConfigStream).collect(Collectors.toList()), clock.instant()); } catch (NoSuchAlgorithmException e) { throw new AssertionError(e); } }
@Test void testRetrieveConfig() { UserRemoteConfigList configuration = resources.getJerseyTest() .target("/v1/config/") .request() .header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD)) .get(UserRemoteConfigList.class); verify(remoteConfigsManager, times(1)).getAll(); assertThat(configuration.getConfig()).hasSize(11); assertThat(configuration.getConfig().get(0).getName()).isEqualTo("android.stickers"); assertThat(configuration.getConfig().get(1).getName()).isEqualTo("ios.stickers"); assertThat(configuration.getConfig().get(2).getName()).isEqualTo("always.true"); assertThat(configuration.getConfig().get(2).isEnabled()).isEqualTo(true); assertThat(configuration.getConfig().get(2).getValue()).isNull(); assertThat(configuration.getConfig().get(3).getName()).isEqualTo("only.special"); assertThat(configuration.getConfig().get(3).isEnabled()).isEqualTo(true); assertThat(configuration.getConfig().get(2).getValue()).isNull(); assertThat(configuration.getConfig().get(4).getName()).isEqualTo("value.always.true"); assertThat(configuration.getConfig().get(4).isEnabled()).isEqualTo(true); assertThat(configuration.getConfig().get(4).getValue()).isEqualTo("bar"); assertThat(configuration.getConfig().get(5).getName()).isEqualTo("value.only.special"); assertThat(configuration.getConfig().get(5).isEnabled()).isEqualTo(true); assertThat(configuration.getConfig().get(5).getValue()).isEqualTo("xyz"); assertThat(configuration.getConfig().get(6).getName()).isEqualTo("value.always.false"); assertThat(configuration.getConfig().get(6).isEnabled()).isEqualTo(false); assertThat(configuration.getConfig().get(6).getValue()).isEqualTo("red"); assertThat(configuration.getConfig().get(7).getName()).isEqualTo("linked.config.0"); assertThat(configuration.getConfig().get(8).getName()).isEqualTo("linked.config.1"); assertThat(configuration.getConfig().get(9).getName()).isEqualTo("unlinked.config"); assertThat(configuration.getConfig().get(10).getName()).isEqualTo("global.maxGroupSize"); }
@VisibleForTesting public List<PlacementRule> getPlacementRules() { return rules; }
@Test public void testPlacementRuleUpdationOrder() throws Exception { List<QueueMapping> queueMappings = new ArrayList<>(); QueueMapping userQueueMapping = QueueMappingBuilder.create() .type(MappingType.USER).source(USER1) .queue(getQueueMapping(PARENT_QUEUE, USER1)).build(); CSMappingPlacementRule ugRule = new CSMappingPlacementRule(); conf.set(YarnConfiguration.QUEUE_PLACEMENT_RULES, ugRule.getName()); queueMappings.add(userQueueMapping); conf.setQueueMappings(queueMappings); mockRM = new MockRM(conf); CapacityScheduler cs = (CapacityScheduler) mockRM.getResourceScheduler(); mockRM.start(); PlacementManager pm = cs.getRMContext().getQueuePlacementManager(); // As we are setting placement rule, It shouldn't update default // placement rule ie user-group. Number of placement rules should be 1. Assert.assertEquals(1, pm.getPlacementRules().size()); // Verifying if placement rule set is same as the one we configured Assert.assertEquals(ugRule.getName(), pm.getPlacementRules().get(0).getName()); }
@ScalarOperator(CAST) @SqlType(StandardTypes.TINYINT) public static long castToTinyint(@SqlType(StandardTypes.SMALLINT) long value) { try { return SignedBytes.checkedCast(value); } catch (IllegalArgumentException e) { throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, "Out of range for tinyint: " + value, e); } }
@Test public void testCastToTinyint() { assertFunction("cast(SMALLINT'37' as tinyint)", TINYINT, (byte) 37); assertFunction("cast(SMALLINT'17' as tinyint)", TINYINT, (byte) 17); }
public String getStyle() { return getCOSObject().getNameAsString(COSName.S, PDTransitionStyle.R.name()); }
@Test void defaultStyle() { PDTransition transition = new PDTransition(); assertEquals(COSName.TRANS, transition.getCOSObject().getCOSName(COSName.TYPE)); assertEquals(PDTransitionStyle.R.name(), transition.getStyle()); }
public static String replaceNodeName(Document document, String containerNodeName, String childNodeNameToReplace, String childNodeNameReplacement) throws TransformerException { final NodeList containerNodes = document.getElementsByTagName(containerNodeName); if (containerNodes != null) { for (int i = 0; i < containerNodes.getLength(); i++) { Node containerNode = containerNodes.item(i); final NodeList childNodes = containerNode.getChildNodes(); for (int j = 0; j < childNodes.getLength(); j++) { Node childNode = childNodes.item(j); if (Objects.equals(childNode.getNodeName(), childNodeNameToReplace)) { document.renameNode(childNode, null, childNodeNameReplacement); } } } } return getString(document); }
@Test public void replaceNodeName() throws Exception { final String replacement = "replacement"; Document document = DOMParserUtil.getDocument(XML); DOMParserUtil.replaceNodeName(document, MAIN_NODE, TEST_NODE, replacement); final Map<Node, List<Node>> retrieved = DOMParserUtil.getChildrenNodesMap(document, MAIN_NODE, replacement); assertThat(retrieved).isNotNull(); assertThat(retrieved).hasSize(1); List<Node> testNodes = retrieved.values().iterator().next(); assertThat(testNodes).isNotNull(); assertThat(testNodes).hasSize(1); assertThat(testNodes.get(0).getNodeName()).isEqualTo("replacement"); }
@Override public String lock(final Path file) throws BackgroundException { if(!containerService.getContainer(file).getType().contains(Path.Type.shared)) { log.warn(String.format("Skip attempting to lock file %s not in shared folder", file)); throw new UnsupportedException(); } try { for(LockFileResultEntry result : new DbxUserFilesRequests(session.getClient(file)).lockFileBatch(Collections.singletonList( new LockFileArg(containerService.getKey(file)))).getEntries()) { if(result.isFailure()) { throw this.failure(result); } if(result.isSuccess()) { if(log.isDebugEnabled()) { log.debug(String.format("Locked file %s with result %s", file, result.getSuccessValue())); } return String.valueOf(true); } } return null; } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Failure to write attributes of {0}", e, file); } }
@Test public void testLockNotShared() throws Exception { final DropboxTouchFeature touch = new DropboxTouchFeature(session); final Path file = touch.touch(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final DropboxLockFeature f = new DropboxLockFeature(session); try { final String lock = f.lock(file); fail(); } catch(UnsupportedException e) { // } new DropboxDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testTemporalOrder() throws Exception { NaturalDateParser.Result result1 = naturalDateParserUtc.parse("last hour"); assertThat(result1.getFrom()).as("from should be before to in").isBefore(result1.getTo()); NaturalDateParser.Result result2 = naturalDateParserUtc.parse("last one hour"); assertThat(result2.getFrom()).as("from should be before to in").isBefore(result2.getTo()); }
boolean tryCloseGateway(long checkpointId) { checkRunsInMainThread(); if (currentMarkedCheckpointIds.contains(checkpointId)) { blockedEventsMap.putIfAbsent(checkpointId, new LinkedList<>()); return true; } return false; }
@Test void notClosingUnmarkedGateway() { final EventReceivingTasks receiver = EventReceivingTasks.createForRunningTasks(); final SubtaskGatewayImpl gateway = new SubtaskGatewayImpl( getUniqueElement(receiver.getAccessesForSubtask(11)), ComponentMainThreadExecutorServiceAdapter.forMainThread(), new IncompleteFuturesTracker()); final boolean isClosed = gateway.tryCloseGateway(123L); assertThat(isClosed).isFalse(); }
@Override public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) { return toConnectData(topic, value); }
@Test public void stringHeaderToConnect() { assertEquals(new SchemaAndValue(Schema.STRING_SCHEMA, "foo-bar-baz"), converter.toConnectHeader(TOPIC, "headerName", "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\" }".getBytes())); }
@Override public ModeConfiguration swapToObject(final YamlModeConfiguration yamlConfig) { if (null == yamlConfig.getRepository()) { return new ModeConfiguration(yamlConfig.getType(), null); } YamlPersistRepositoryConfigurationSwapper<PersistRepositoryConfiguration> swapper = TypedSPILoader.getService(YamlPersistRepositoryConfigurationSwapper.class, yamlConfig.getType()); return new ModeConfiguration(yamlConfig.getType(), swapper.swapToObject(yamlConfig.getRepository())); }
@Test void assertSwapToObject() { YamlModeConfiguration yamlConfig = new YamlModeConfiguration(); yamlConfig.setType(TEST_TYPE); ModeConfiguration actual = swapper.swapToObject(yamlConfig); assertThat(actual.getType(), is(TEST_TYPE)); }
@Override public void clearChanged() { changed_steps = false; changed_hops = false; for ( int i = 0; i < nrSteps(); i++ ) { getStep( i ).setChanged( false ); if ( getStep( i ).getStepPartitioningMeta() != null ) { getStep( i ).getStepPartitioningMeta().hasChanged( false ); } } for ( int i = 0; i < nrTransHops(); i++ ) { getTransHop( i ).setChanged( false ); } for ( int i = 0; i < partitionSchemas.size(); i++ ) { partitionSchemas.get( i ).setChanged( false ); } for ( int i = 0; i < clusterSchemas.size(); i++ ) { clusterSchemas.get( i ).setChanged( false ); } super.clearChanged(); }
@Test public void testContentChangeListener() { ContentChangedListener listener = mock( ContentChangedListener.class ); transMeta.addContentChangedListener( listener ); transMeta.setChanged(); transMeta.setChanged( true ); verify( listener, times( 2 ) ).contentChanged( same( transMeta ) ); transMeta.clearChanged(); transMeta.setChanged( false ); verify( listener, times( 2 ) ).contentSafe( same( transMeta ) ); transMeta.removeContentChangedListener( listener ); transMeta.setChanged(); transMeta.setChanged( true ); verifyNoMoreInteractions( listener ); }
public static boolean isValidKsqlModuleType(final String moduleType) { for (KSqlValidModuleType type: KSqlValidModuleType.values()) { if (moduleType.equals(type.name())) { return true; } } return false; }
@Test public void testValidKsqlDeploymentMode() { List<String> validDeploymentModes = Arrays.asList( "LOCAL_CLI", "REMOTE_CLI", "SERVER", "EMBEDDED", "CLI" ); for (String mode: validDeploymentModes) { assertTrue("Expected KSQL deployment mode '" + mode + "' to be valid", MetricsValidation.isValidKsqlModuleType(mode)); } }
public static Pair<Double, String> getByteUint(long value) { double doubleValue = (double) value; String unit; if (value >= TERABYTE) { unit = "TB"; doubleValue /= TERABYTE; } else if (value >= GIGABYTE) { unit = "GB"; doubleValue /= GIGABYTE; } else if (value >= MEGABYTE) { unit = "MB"; doubleValue /= MEGABYTE; } else if (value >= KILOBYTE) { unit = "KB"; doubleValue /= KILOBYTE; } else { unit = "B"; } return Pair.create(doubleValue, unit); }
@Test public void testGetByteUint() { Pair<Double, String> result; result = DebugUtil.getByteUint(0); Assert.assertEquals(result.first, Double.valueOf(0.0)); Assert.assertEquals(result.second, "B"); result = DebugUtil.getByteUint(123); // B Assert.assertEquals(result.first, Double.valueOf(123.0)); Assert.assertEquals(result.second, "B"); result = DebugUtil.getByteUint(123456); // K Assert.assertEquals(result.first, Double.valueOf(120.5625)); Assert.assertEquals(result.second, "KB"); result = DebugUtil.getByteUint(1234567); // M Assert.assertEquals(result.first, Double.valueOf(1.1773748397827148)); Assert.assertEquals(result.second, "MB"); result = DebugUtil.getByteUint(1234567890L); // G Assert.assertEquals(result.first, Double.valueOf(1.1497809458523989)); Assert.assertEquals(result.second, "GB"); }
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext, final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof TCLStatement) { return new ShardingDatabaseBroadcastRoutingEngine(); } if (sqlStatement instanceof DDLStatement) { if (sqlStatementContext instanceof CursorAvailable) { return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props); } return getDDLRoutingEngine(shardingRule, database, sqlStatementContext); } if (sqlStatement instanceof DALStatement) { return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext); } if (sqlStatement instanceof DCLStatement) { return getDCLRoutingEngine(shardingRule, database, sqlStatementContext); } return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext); }
@Test void assertNewInstanceForSubqueryWithSameConditions() { SelectStatementContext sqlStatementContext = mock(SelectStatementContext.class, RETURNS_DEEP_STUBS); tableNames.add("t_order"); when(sqlStatementContext.getTablesContext().getTableNames()).thenReturn(tableNames); when(sqlStatementContext.getTablesContext().getDatabaseName()).thenReturn(Optional.empty()); ShardingRule shardingRule = mock(ShardingRule.class, RETURNS_DEEP_STUBS); when(shardingRule.getShardingLogicTableNames(tableNames)).thenReturn(tableNames); when(shardingRule.getShardingTable("t_order").getActualDataSourceNames()).thenReturn(Arrays.asList("ds_0", "ds_1")); when(shardingRule.isAllShardingTables(Collections.singletonList("t_order"))).thenReturn(true); QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); ShardingRouteEngine actual = ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, mock(ConfigurationProperties.class), new ConnectionContext(Collections::emptySet)); assertThat(actual, instanceOf(ShardingStandardRoutingEngine.class)); }
public void fetchLongValues(String column, int[] inDocIds, int length, long[] outValues) { _columnValueReaderMap.get(column).readLongValues(inDocIds, length, outValues); }
@Test public void testFetchLongValues() { testFetchLongValues(INT_COLUMN); testFetchLongValues(LONG_COLUMN); testFetchLongValues(FLOAT_COLUMN); testFetchLongValues(DOUBLE_COLUMN); testFetchLongValues(BIG_DECIMAL_COLUMN); testFetchLongValues(STRING_COLUMN); testFetchLongValues(NO_DICT_INT_COLUMN); testFetchLongValues(NO_DICT_LONG_COLUMN); testFetchLongValues(NO_DICT_FLOAT_COLUMN); testFetchLongValues(NO_DICT_DOUBLE_COLUMN); testFetchLongValues(NO_DICT_BIG_DECIMAL_COLUMN); testFetchLongValues(NO_DICT_STRING_COLUMN); }
public void validateTabNameUniqueness(ArrayList<Tab> tabs) { for (Tab tab : tabs) { if(name.equals(tab.getName())){ this.addError(NAME, String.format("Tab name '%s' is not unique.", name)); tab.addError(NAME, String.format("Tab name '%s' is not unique.", name)); return; } } tabs.add(this); }
@Test public void shouldNotErrorOutWhenNamesAreOfDifferentCase() { Tab tab = new Tab("foO", "bar"); ArrayList<Tab> visitedTabs = new ArrayList<>(); Tab existingTab = new Tab("foo", "bar"); visitedTabs.add(existingTab); tab.validateTabNameUniqueness(visitedTabs); assertThat(visitedTabs.size(), is(2)); }
public boolean initWithCommittedOffsetsIfNeeded(Timer timer) { final Set<TopicPartition> initializingPartitions = subscriptions.initializingPartitions(); final Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(initializingPartitions, timer); // "offsets" will be null if the offset fetch requests did not receive responses within the given timeout if (offsets == null) return false; refreshCommittedOffsets(offsets, this.metadata, this.subscriptions); return true; }
@Test public void testRefreshOffsetWithNoFetchableOffsets() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); subscriptions.assignFromUser(singleton(t1p)); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", -1L)); coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE)); assertEquals(Collections.singleton(t1p), subscriptions.initializingPartitions()); assertEquals(Collections.emptySet(), subscriptions.partitionsNeedingReset(time.milliseconds())); assertFalse(subscriptions.hasAllFetchPositions()); assertNull(subscriptions.position(t1p)); }
public static void incrementIpCountWithBatchRegister(InstancePublishInfo old, BatchInstancePublishInfo instancePublishInfo) { int newSize = instancePublishInfo.getInstancePublishInfos().size(); if (null == old) { // First time increment batchPublishInfo, add all into metrics. getIpCountMonitor().addAndGet(newSize); } else if (old instanceof BatchInstancePublishInfo) { // Not first time increment batchPublishInfo, calculate the diff, and add the diff to metrics, the diff may be negative. int oldSize = ((BatchInstancePublishInfo) old).getInstancePublishInfos().size(); getIpCountMonitor().addAndGet(newSize - oldSize); } else { // Not first time increment batchPublishInfo and the old one is not batch, also diff it. getIpCountMonitor().addAndGet(newSize - 1); } }
@Test void testIncrementIpCountWithBatchRegister() { BatchInstancePublishInfo test = new BatchInstancePublishInfo(); List<InstancePublishInfo> instancePublishInfos = new LinkedList<>(); instancePublishInfos.add(new InstancePublishInfo()); test.setInstancePublishInfos(instancePublishInfos); assertEquals(0, MetricsMonitor.getIpCountMonitor().get()); MetricsMonitor.incrementIpCountWithBatchRegister(null, test); assertEquals(1, MetricsMonitor.getIpCountMonitor().get()); BatchInstancePublishInfo newTest = new BatchInstancePublishInfo(); List<InstancePublishInfo> newInstances = new LinkedList<>(); newInstances.add(new InstancePublishInfo()); newInstances.add(new InstancePublishInfo()); newTest.setInstancePublishInfos(newInstances); MetricsMonitor.incrementIpCountWithBatchRegister(test, newTest); assertEquals(2, MetricsMonitor.getIpCountMonitor().get()); MetricsMonitor.incrementIpCountWithBatchRegister(newTest, test); assertEquals(1, MetricsMonitor.getIpCountMonitor().get()); }
public static String getChineseZodiac(Date date) { return getChineseZodiac(DateUtil.calendar(date)); }
@Test public void getChineseZodiacTest() { assertEquals("狗", Zodiac.getChineseZodiac(1994)); assertEquals("狗", Zodiac.getChineseZodiac(2018)); assertEquals("猪", Zodiac.getChineseZodiac(2019)); final Calendar calendar = Calendar.getInstance(); calendar.set(2022, Calendar.JULY, 17); assertEquals("虎", Zodiac.getChineseZodiac(calendar.getTime())); assertEquals("虎", Zodiac.getChineseZodiac(calendar)); assertNull(Zodiac.getChineseZodiac(1899)); assertNull(Zodiac.getChineseZodiac((Calendar) null)); }
public SubscriptionGroupWrapper getAllSubscriptionGroup(final String brokerAddr, long timeoutMillis) throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQBrokerException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_SUBSCRIPTIONGROUP_CONFIG, null); RemotingCommand response = this.remotingClient .invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), brokerAddr), request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return SubscriptionGroupWrapper.decode(response.getBody(), SubscriptionGroupWrapper.class); } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark(), brokerAddr); }
@Test public void assertGetAllSubscriptionGroupForSubscriptionGroupWrapper() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); SubscriptionGroupWrapper responseBody = new SubscriptionGroupWrapper(); responseBody.getSubscriptionGroupTable().put("key", new SubscriptionGroupConfig()); setResponseBody(responseBody); SubscriptionGroupWrapper actual = mqClientAPI.getAllSubscriptionGroup(defaultBrokerAddr, defaultTimeout); assertNotNull(actual); assertEquals(1, actual.getSubscriptionGroupTable().size()); assertNotNull(actual.getDataVersion()); assertEquals(0, actual.getDataVersion().getStateVersion()); }
public ImmutableSet<GrantDTO> getForGrantee(GRN grantee) { return streamQuery(DBQuery.is(GrantDTO.FIELD_GRANTEE, grantee)) .collect(ImmutableSet.toImmutableSet()); }
@Test @MongoDBFixtures("grants.json") public void getForGrantee() { final GRN jane = grnRegistry.newGRN("user", "jane"); final GRN john = grnRegistry.newGRN("user", "john"); assertThat(dbService.getForGrantee(jane)).hasSize(3); assertThat(dbService.getForGrantee(john)).hasSize(2); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNoopWithNewProtocolVersion(VertxTestContext context) { String kafkaVersion = VERSIONS.defaultVersion().version(); String interBrokerProtocolVersion = "3.2"; String logMessageFormatVersion = "2.8"; VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), mockNewCluster( null, mockSps(kafkaVersion), mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), nullValue()); assertThat(c.logMessageFormatVersion(), nullValue()); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
public Sensor sensor(String name) { return this.sensor(name, Sensor.RecordingLevel.INFO); }
@Test public void testBadSensorHierarchy() { Sensor p = metrics.sensor("parent"); Sensor c1 = metrics.sensor("child1", p); Sensor c2 = metrics.sensor("child2", p); assertThrows(IllegalArgumentException.class, () -> metrics.sensor("gc", c1, c2)); }
@PublicAPI(usage = ACCESS) public static PackageMatcher of(String packageIdentifier) { return new PackageMatcher(packageIdentifier); }
@Test @UseDataProvider public void test_reject_nesting_of_groups(String packageIdentifier) { assertThatThrownBy(() -> PackageMatcher.of(packageIdentifier)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Package Identifier does not support nesting '()' or '[]' within other '()' or '[]'"); }
public static boolean isValidIp(String ip, boolean validLocalAndAny) { if (ip == null) { return false; } ip = convertIpIfNecessary(ip); if (validLocalAndAny) { return isValidIPv4(ip) || isValidIPv6(ip); } else { return !FORBIDDEN_HOSTS.contains(ip) && (isValidIPv4(ip) || isValidIPv6(ip)); } }
@Test public void testIsValidIp() { String localIp = "127.0.0.1"; String someIp = "8.210.212.91"; String someHostName = "seata.io"; String unknownHost = "knownHost"; assertThat(NetUtil.isValidIp(localIp, true)).isTrue(); assertThat(NetUtil.isValidIp(localIp, false)).isFalse(); assertThat(NetUtil.isValidIp(someIp, true)).isTrue(); assertThat(NetUtil.isValidIp(someIp, false)).isTrue(); assertThat(NetUtil.isValidIp(someHostName, true)).isTrue(); assertThat(NetUtil.isValidIp(someHostName, false)).isTrue(); assertThatThrownBy(() -> { NetUtil.isValidIp(unknownHost, false); }).isInstanceOf(RuntimeException.class).hasMessageContaining("UnknownHostException"); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testAnalyzePinnedInstallJsonV2() throws Exception { try (Engine engine = new Engine(getSettings())) { final Dependency result = new Dependency(BaseTest.getResourceAsFile(this, "maven_install_v2.json")); engine.addDependency(result); analyzer.analyze(result, engine); assertFalse(ArrayUtils.contains(engine.getDependencies(), result)); assertEquals(113, engine.getDependencies().length); boolean found = false; for (Dependency d : engine.getDependencies()) { if ("io.grpc:grpc-protobuf".equals(d.getName())) { found = true; assertEquals("1.48.1", d.getVersion()); assertEquals(Ecosystem.JAVA, d.getEcosystem()); } } assertTrue("Expected to find com.google.errorprone:error_prone_annotations:2.3.4", found); } }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void sendVideoNote() { SendResponse response = bot.execute(new SendVideoNote(chatId, "DQADAgADmQADYgwpSbum1JrxPsbmAg")); VideoNoteCheck.check(response.message().videoNote()); }
public static long readLengthCodedBinary(byte[] data, int index) throws IOException { int firstByte = data[index] & 0xFF; switch (firstByte) { case 251: return NULL_LENGTH; case 252: return readUnsignedShortLittleEndian(data, index + 1); case 253: return readUnsignedMediumLittleEndian(data, index + 1); case 254: return readUnsignedLongLittleEndian(data, index + 1); default: return firstByte; } }
@Test public void testReadLengthCodedBinary() throws IOException { Assert.assertEquals(0L, ByteHelper.readLengthCodedBinary(new byte[] {0}, 0)); Assert.assertEquals(-1L, ByteHelper.readLengthCodedBinary(new byte[] {-5, -1, -7, 4, -7}, 0)); Assert.assertEquals(65_021L, ByteHelper.readLengthCodedBinary(new byte[] {-3, -3, -3, 0, -3}, 0)); Assert.assertEquals(37_119L, ByteHelper.readLengthCodedBinary(new byte[] {-4, -1, -112, 1, -112}, 0)); }
public static OpensslCipher getInstance(String transformation) throws NoSuchAlgorithmException, NoSuchPaddingException { return getInstance(transformation, null); }
@Test(timeout=120000) public void testGetInstance() throws Exception { Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null); OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding"); Assert.assertTrue(cipher != null); try { cipher = OpensslCipher.getInstance("AES2/CTR/NoPadding"); Assert.fail("Should specify correct algorithm."); } catch (NoSuchAlgorithmException e) { // Expect NoSuchAlgorithmException } try { cipher = OpensslCipher.getInstance("AES/CTR/NoPadding2"); Assert.fail("Should specify correct padding."); } catch (NoSuchPaddingException e) { // Expect NoSuchPaddingException } }