focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public HealthCheckResponse check(HealthCheckRequest request) { HealthCheckResponse.ServingStatus status = statusMap.get(request.getService()); if (status != null) { return HealthCheckResponse.newBuilder().setStatus(status).build(); } throw TriRpcStatus.NOT_FOUND .withDescription("unknown service " + request.getService()) .asException(); }
@Test void testCheck() { TriHealthImpl triHealth = new TriHealthImpl(); HealthCheckRequest request = HealthCheckRequest.newBuilder().build(); HealthCheckResponse response = triHealth.check(request); Assertions.assertEquals(response.getStatus(), HealthCheckResponse.ServingStatus.SERVING); HealthCheckRequest badRequest = HealthCheckRequest.newBuilder().setService("test").build(); Assertions.assertThrows(RpcException.class, () -> triHealth.check(badRequest)); }
public static ComposeCombineFnBuilder compose() { return new ComposeCombineFnBuilder(); }
@Test @Category({ValidatesRunner.class, UsesSideInputs.class}) public void testComposedCombineWithContext() { p.getCoderRegistry().registerCoderForClass(UserString.class, UserStringCoder.of()); PCollectionView<String> view = p.apply(Create.of("I")).apply(View.asSingleton()); PCollection<KV<String, KV<Integer, UserString>>> perKeyInput = p.apply( Create.timestamped( Arrays.asList( KV.of("a", KV.of(1, UserString.of("1"))), KV.of("a", KV.of(1, UserString.of("1"))), KV.of("a", KV.of(4, UserString.of("4"))), KV.of("b", KV.of(1, UserString.of("1"))), KV.of("b", KV.of(13, UserString.of("13")))), Arrays.asList(0L, 4L, 7L, 10L, 16L)) .withCoder( KvCoder.of( StringUtf8Coder.of(), KvCoder.of(BigEndianIntegerCoder.of(), UserStringCoder.of())))); TupleTag<Integer> maxIntTag = new TupleTag<>(); TupleTag<UserString> concatStringTag = new TupleTag<>(); PCollection<KV<String, KV<Integer, String>>> combineGlobally = perKeyInput .apply(Values.create()) .apply( Combine.globally( CombineFns.compose() .with(new GetIntegerFunction(), Max.ofIntegers(), maxIntTag) .with( new GetUserStringFunction(), new ConcatStringWithContext(view), concatStringTag)) .withoutDefaults() .withSideInputs(ImmutableList.of(view))) .apply(WithKeys.of("global")) .apply( "ExtractGloballyResult", ParDo.of(new ExtractResultDoFn(maxIntTag, concatStringTag))); PCollection<KV<String, KV<Integer, String>>> combinePerKey = perKeyInput .apply( Combine.<String, KV<Integer, UserString>, CoCombineResult>perKey( CombineFns.compose() .with(new GetIntegerFunction(), Max.ofIntegers(), maxIntTag) .with( new GetUserStringFunction(), new ConcatStringWithContext(view), concatStringTag)) .withSideInputs(ImmutableList.of(view))) .apply( "ExtractPerKeyResult", ParDo.of(new ExtractResultDoFn(maxIntTag, concatStringTag))); PAssert.that(combineGlobally).containsInAnyOrder(KV.of("global", KV.of(13, "111134I"))); PAssert.that(combinePerKey) .containsInAnyOrder(KV.of("a", KV.of(4, "114I")), KV.of("b", KV.of(13, "113I"))); p.run(); }
public EsIndexBolt(EsConfig esConfig) { this(esConfig, new DefaultEsTupleMapper()); }
@Test public void testEsIndexBolt() throws IOException { Tuple tuple = createTestTuple(index, type); bolt.execute(tuple); verify(outputCollector).ack(tuple); RestHighLevelClient client = EsTestUtil.getRestHighLevelClient(node); RefreshRequest request = new RefreshRequest(index); client.indices().refresh(request, RequestOptions.DEFAULT); SearchRequest searchRequest = new SearchRequest(index); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(new TermQueryBuilder("_type", type)); searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); SearchResponse resp = client.search(searchRequest, RequestOptions.DEFAULT); assertEquals(1, resp.getHits().getTotalHits()); }
@UdafFactory(description = "collect values of a field into a single Array") public static <T> TableUdaf<T, List<T>, List<T>> createCollectListT() { return new Collect<>(); }
@Test public void shouldCollectInts() { final TableUdaf<Integer, List<Integer>, List<Integer>> udaf = CollectListUdaf.createCollectListT(); final Integer[] values = new Integer[] {3, 4, 5, 3}; List<Integer> runningList = udaf.initialize(); for (final Integer i : values) { runningList = udaf.aggregate(i, runningList); } assertThat(runningList, contains(3, 4, 5, 3)); }
@Override public SQLRecognizer getUpdateRecognizer(String sql, SQLStatement ast) { return new SqlServerUpdateRecognizer(sql, ast); }
@Test public void getUpdateRecognizerTest() { String sql = "UPDATE t1 SET name = 'name1' WHERE id = 'id1'"; SQLStatement sqlStatement = getSQLStatement(sql); Assertions.assertNotNull(new SqlServerOperateRecognizerHolder().getUpdateRecognizer(sql, sqlStatement)); }
public static <R> Iterator<R> limit(final Iterator<R> iterator, final int limit) { return new Iterator<>() { private int iterated; @Override public boolean hasNext() { return iterated < limit && iterator.hasNext(); } @Override public R next() { iterated++; return iterator.next(); } @Override public void remove() { iterator.remove(); } }; }
@Test public void testUpToNElement_whenIteratorLimited() { Iterator<Integer> limitedIterator = IterableUtil.limit(numbers.iterator(), 2); assertEquals(Integer.valueOf(1), limitedIterator.next()); assertEquals(Integer.valueOf(2), limitedIterator.next()); assertFalse(limitedIterator.hasNext()); }
@Override public Set<TopicAnomaly> topicAnomalies() { LOG.info("Start to detect topic replication factor anomaly."); Cluster cluster = _kafkaCruiseControl.kafkaCluster(); Set<String> topicsToCheck; if (_topicExcludedFromCheck.pattern().isEmpty()) { topicsToCheck = new HashSet<>(cluster.topics()); } else { topicsToCheck = new HashSet<>(); cluster.topics().stream().filter(topic -> !_topicExcludedFromCheck.matcher(topic).matches()).forEach(topicsToCheck::add); } refreshTopicMinISRCache(); if (!topicsToCheck.isEmpty()) { maybeRetrieveAndCacheTopicMinISR(topicsToCheck); Map<Short, Set<TopicReplicationFactorAnomalyEntry>> badTopicsByDesiredRF = populateBadTopicsByDesiredRF(topicsToCheck, cluster); if (!badTopicsByDesiredRF.isEmpty()) { return Collections.singleton(createTopicReplicationFactorAnomaly(badTopicsByDesiredRF, _targetReplicationFactor)); } } return Collections.emptySet(); }
@Test public void testAdjustTopicWithLargeMinISR() throws InterruptedException, ExecutionException, TimeoutException { KafkaCruiseControl mockKafkaCruiseControl = mockKafkaCruiseControl(); short expectedMinISR = 2; AdminClient mockAdminClient = mockAdminClient(expectedMinISR); TopicReplicationFactorAnomalyFinder anomalyFinder = new TopicReplicationFactorAnomalyFinder(mockKafkaCruiseControl, TARGET_TOPIC_REPLICATION_FACTOR, TOPIC_REPLICATION_FACTOR_MARGIN, mockAdminClient); Set<TopicAnomaly> topicAnomalies = anomalyFinder.topicAnomalies(); assertEquals(1, topicAnomalies.size()); TopicReplicationFactorAnomaly topicReplicationFactorAnomaly = (TopicReplicationFactorAnomaly) topicAnomalies.iterator().next(); assertEquals(1, topicReplicationFactorAnomaly.badTopicsByDesiredRF().size()); // We expect the desired replication factor to be 4 (i.e. TOPIC_REPLICATION_FACTOR_MARGIN + expectedMinISR) assertEquals(TOPIC, topicReplicationFactorAnomaly.badTopicsByDesiredRF() .get((short) (TOPIC_REPLICATION_FACTOR_MARGIN + expectedMinISR)) .iterator().next().topicName()); // We expect 1 out of 2 partitions of the topic to violate the target RF. assertEquals(0.5, topicReplicationFactorAnomaly.badTopicsByDesiredRF() .get((short) (TOPIC_REPLICATION_FACTOR_MARGIN + expectedMinISR)) .iterator().next().violationRatio(), DELTA); EasyMock.verify(mockKafkaCruiseControl, mockAdminClient); }
public static Optional<String> findMatchedTargetName(final Collection<String> availableTargetNames, final String suffix, final DataNodeInfo dataNodeInfo) { String targetName = dataNodeInfo.getPrefix() + Strings.padStart(suffix, dataNodeInfo.getSuffixMinLength(), dataNodeInfo.getPaddingChar()); return availableTargetNames.contains(targetName) ? Optional.of(targetName) : Optional.empty(); }
@Test void assertFindMatchedTargetNameWhenTableNotExist() { Optional<String> output = ShardingAutoTableAlgorithmUtils.findMatchedTargetName(availableTargetNames, "3", dataNodeInfo); assertFalse(output.isPresent()); }
public static String getJwt(final String authorizationHeader) { return authorizationHeader.replace(TOKEN_PREFIX, ""); }
@Test void testGetJwt_WithEmptyHeader() { // Given String authorizationHeader = ""; // When String jwt = Token.getJwt(authorizationHeader); // Then assertEquals("", jwt); }
public synchronized static void clear(){ fallbackProviderCache.clear(); }
@Test public void clear() { MyNullResponseFallBackProvider myNullResponseFallBackProvider = new MyNullResponseFallBackProvider(); ZuulBlockFallbackManager.registerProvider(myNullResponseFallBackProvider); Assert.assertEquals(myNullResponseFallBackProvider.getRoute(), ROUTE); ZuulBlockFallbackManager.clear(); Assert.assertEquals(ZuulBlockFallbackManager.getFallbackProvider(ROUTE).getRoute(), DEFAULT_ROUTE); }
public void resetPositionsIfNeeded() { Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp(); if (offsetResetTimestamps.isEmpty()) return; resetPositionsAsync(offsetResetTimestamps); }
@Test public void testChangeResetWithInFlightReset() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); // Send the ListOffsets request to reset the position offsetFetcher.resetPositionsIfNeeded(); consumerClient.pollNoWakeup(); assertFalse(subscriptions.hasValidPosition(tp0)); assertTrue(client.hasInFlightRequests()); // Now we get a seek from the user subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.EARLIEST); // The response returns and is discarded client.respond(listOffsetResponse(Errors.NONE, 1L, 5L)); consumerClient.pollNoWakeup(); assertFalse(client.hasPendingResponses()); assertFalse(client.hasInFlightRequests()); assertTrue(subscriptions.isOffsetResetNeeded(tp0)); assertEquals(OffsetResetStrategy.EARLIEST, subscriptions.resetStrategy(tp0)); }
@VisibleForTesting List<Container> getContainersFromPreviousAttemptsUnsafe(final Object response) { if (getContainersFromPreviousAttemptsMethod.isPresent() && response != null) { try { @SuppressWarnings("unchecked") final List<Container> containers = (List<Container>) getContainersFromPreviousAttemptsMethod.get().invoke(response); if (containers != null && !containers.isEmpty()) { return containers; } } catch (Exception t) { logger.error("Error invoking 'getContainersFromPreviousAttempts()'", t); } } return Collections.emptyList(); }
@Test void testDoesntCallGetContainersFromPreviousAttemptsMethodIfAbsent() { final RegisterApplicationMasterResponseReflector registerApplicationMasterResponseReflector = new RegisterApplicationMasterResponseReflector(LOG, HasMethod.class); final List<Container> containersFromPreviousAttemptsUnsafe = registerApplicationMasterResponseReflector.getContainersFromPreviousAttemptsUnsafe( new Object()); assertThat(containersFromPreviousAttemptsUnsafe).isEmpty(); }
public static InfluxDBSinkConfig load(String yamlFile) throws IOException { ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); return mapper.readValue(new File(yamlFile), InfluxDBSinkConfig.class); }
@Test public final void loadFromMapCredentialFromSecretTest() throws IOException { Map<String, Object> map = new HashMap<>(); map.put("influxdbUrl", "http://localhost:8086"); map.put("database", "test_db"); map.put("consistencyLevel", "ONE"); map.put("logLevel", "NONE"); map.put("retentionPolicy", "autogen"); map.put("gzipEnable", "false"); map.put("batchTimeMs", "1000"); map.put("batchSize", "100"); SinkContext sinkContext = Mockito.mock(SinkContext.class); Mockito.when(sinkContext.getSecret("username")) .thenReturn("admin"); Mockito.when(sinkContext.getSecret("password")) .thenReturn("admin"); InfluxDBSinkConfig config = InfluxDBSinkConfig.load(map, sinkContext); assertNotNull(config); assertEquals("http://localhost:8086", config.getInfluxdbUrl()); assertEquals("test_db", config.getDatabase()); assertEquals("ONE", config.getConsistencyLevel()); assertEquals("NONE", config.getLogLevel()); assertEquals("autogen", config.getRetentionPolicy()); assertEquals(Boolean.parseBoolean("false"), config.isGzipEnable()); assertEquals(Long.parseLong("1000"), config.getBatchTimeMs()); assertEquals(Integer.parseInt("100"), config.getBatchSize()); assertEquals("admin", config.getUsername()); assertEquals("admin", config.getPassword()); }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("System"); setAttribute(protobuf, "Version", server.getVersion()); setAttribute(protobuf, "Official Distribution", officialDistribution.check()); setAttribute(protobuf, "Home Dir", config.get(PATH_HOME.getKey()).orElse(null)); setAttribute(protobuf, "Data Dir", config.get(PATH_DATA.getKey()).orElse(null)); setAttribute(protobuf, "Temp Dir", config.get(PATH_TEMP.getKey()).orElse(null)); setAttribute(protobuf, "Processors", Runtime.getRuntime().availableProcessors()); return protobuf.build(); }
@Test public void return_dir_paths() { settings.setProperty(PATH_HOME.getKey(), "/home"); settings.setProperty(PATH_DATA.getKey(), "/data"); settings.setProperty(PATH_TEMP.getKey(), "/temp"); settings.setProperty(PATH_LOGS.getKey(), "/logs"); settings.setProperty(PATH_WEB.getKey(), "/web"); ProtobufSystemInfo.Section section = underTest.toProtobuf(); assertThatAttributeIs(section, "Home Dir", "/home"); assertThatAttributeIs(section, "Data Dir", "/data"); assertThatAttributeIs(section, "Temp Dir", "/temp"); // logs dir is part of LoggingSection assertThat(attribute(section, "Logs Dir")).isNull(); // for internal usage assertThat(attribute(section, "Web Dir")).isNull(); }
public WebsocketConfig getWebsocket() { return websocket; }
@Test public void testWebsocketConfig() { ShenyuConfig.WebsocketConfig websocket = config.getWebsocket(); websocket.setMaxFramePayloadSize(5); websocket.setEnableProxyPing(true); assertEquals(5, (int) websocket.getMaxFramePayloadSize()); assertEquals(true, websocket.getEnableProxyPing()); }
@Override public Num calculate(BarSeries series, Position position) { Num bars = numberOfBars.calculate(series, position); // If a simple division was used (grossreturn/bars), compounding would not be // considered, leading to inaccuracies in the calculation. // Therefore we need to use "pow" to accurately capture the compounding effect. return bars.isZero() ? series.one() : grossReturn.calculate(series, position).pow(series.one().dividedBy(bars)); }
@Test public void calculateOnlyWithGainPositions() { series = new MockBarSeries(numFunction, 100d, 105d, 110d, 100d, 95d, 105d); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series), Trade.buyAt(3, series), Trade.sellAt(5, series)); AnalysisCriterion averageProfit = getCriterion(); assertNumEquals(1.0243, averageProfit.calculate(series, tradingRecord)); }
public Optional<Projection> createProjection(final ProjectionSegment projectionSegment) { if (projectionSegment instanceof ShorthandProjectionSegment) { return Optional.of(createProjection((ShorthandProjectionSegment) projectionSegment)); } if (projectionSegment instanceof ColumnProjectionSegment) { return Optional.of(createProjection((ColumnProjectionSegment) projectionSegment)); } if (projectionSegment instanceof ExpressionProjectionSegment) { return Optional.of(createProjection((ExpressionProjectionSegment) projectionSegment)); } if (projectionSegment instanceof AggregationDistinctProjectionSegment) { return Optional.of(createProjection((AggregationDistinctProjectionSegment) projectionSegment)); } if (projectionSegment instanceof AggregationProjectionSegment) { return Optional.of(createProjection((AggregationProjectionSegment) projectionSegment)); } if (projectionSegment instanceof SubqueryProjectionSegment) { return Optional.of(createProjection((SubqueryProjectionSegment) projectionSegment)); } if (projectionSegment instanceof ParameterMarkerExpressionSegment) { return Optional.of(createProjection((ParameterMarkerExpressionSegment) projectionSegment)); } return Optional.empty(); }
@Test void assertCreateProjectionWhenProjectionSegmentInstanceOfAggregationProjectionSegmentAndAggregationTypeIsAvg() { AggregationProjectionSegment aggregationProjectionSegment = new AggregationProjectionSegment(0, 10, AggregationType.AVG, "AVG(1)"); Optional<Projection> actual = new ProjectionEngine(databaseType).createProjection(aggregationProjectionSegment); assertTrue(actual.isPresent()); assertThat(actual.get(), instanceOf(AggregationProjection.class)); }
@Override public void open(Map<String, Object> config, SourceContext sourceContext) throws Exception { log.info("Open MongoDB Source"); mongoSourceConfig = MongoSourceConfig.load(config, sourceContext); mongoSourceConfig.validate(); if (clientProvider != null) { mongoClient = clientProvider.get(); } else { mongoClient = MongoClients.create(mongoSourceConfig.getMongoUri()); } String mongoDatabase = mongoSourceConfig.getDatabase(); if (StringUtils.isEmpty(mongoDatabase)) { // Watch all databases log.info("Watch all databases"); stream = mongoClient.watch(); } else { final MongoDatabase db = mongoClient.getDatabase(mongoDatabase); String mongoCollection = mongoSourceConfig.getCollection(); if (StringUtils.isEmpty(mongoCollection)) { // Watch all collections in a database log.info("Watch db: {}", db.getName()); stream = db.watch(); } else { // Watch a collection final MongoCollection<Document> collection = db.getCollection(mongoCollection); log.info("Watch collection: {}.{}", db.getName(), mongoCollection); stream = collection.watch(); } } stream.batchSize(mongoSourceConfig.getBatchSize()) .fullDocument(FullDocument.UPDATE_LOOKUP); if (mongoSourceConfig.getSyncType() == SyncType.FULL_SYNC) { // sync currently existing messages // startAtOperationTime is the starting point for the change stream // setting startAtOperationTime to 0 means the start point is the earliest // see https://www.mongodb.com/docs/v4.2/reference/method/db.collection.watch/ for more information stream.startAtOperationTime(new BsonTimestamp(0L)); } stream.subscribe(new Subscriber<ChangeStreamDocument<Document>>() { private ObjectMapper mapper = new ObjectMapper(); private Subscription subscription; @Override public void onSubscribe(Subscription subscription) { this.subscription = subscription; this.subscription.request(Integer.MAX_VALUE); } @Override public void onNext(ChangeStreamDocument<Document> doc) { try { log.info("New change doc: {}", doc); BsonDocument documentKey = doc.getDocumentKey(); if (documentKey == null) { log.warn("The document key is null"); return; } // Build a record with the essential information final Map<String, Object> recordValue = new HashMap<>(); recordValue.put("fullDocument", doc.getFullDocument()); recordValue.put("ns", doc.getNamespace()); recordValue.put("operation", doc.getOperationType()); consume(new DocRecord( Optional.of(documentKey.toJson()), mapper.writeValueAsString(recordValue).getBytes(StandardCharsets.UTF_8))); } catch (JsonProcessingException e) { log.error("Processing doc from mongo", e); } } @Override public void onError(Throwable error) { log.error("Subscriber error", error); } @Override public void onComplete() { log.info("Subscriber complete"); } }); }
@Test public void testWriteBadMessage() throws Exception { source.open(map, mockSourceContext); subscriber.onNext(new ChangeStreamDocument<>( OperationType.INSERT, BsonDocument.parse("{token: true}"), BsonDocument.parse("{db: \"hello\", coll: \"pulsar\"}"), BsonDocument.parse("{db: \"hello2\", coll: \"pulsar2\"}"), new Document("hello", "pulsar"), BsonDocument.parse("{_id: 1}"), new BsonTimestamp(1234, 2), null, new BsonInt64(1), BsonDocument.parse("{id: 1, uid: 1}"))); Record<byte[]> record = source.read(); assertEquals(new String(record.getValue()), "{\"fullDocument\":{\"hello\":\"pulsar\"}," + "\"ns\":{\"databaseName\":\"hello\",\"collectionName\":\"pulsar\",\"fullName\":\"hello.pulsar\"}," + "\"operation\":\"INSERT\"}"); }
public final void containsNoneOf( @Nullable Object firstExcluded, @Nullable Object secondExcluded, @Nullable Object @Nullable ... restOfExcluded) { containsNoneIn(accumulate(firstExcluded, secondExcluded, restOfExcluded)); }
@Test public void iterableContainsNoneOfFailure() { expectFailureWhenTestingThat(asList(1, 2, 3)).containsNoneOf(1, 2, 4); assertFailureKeys("expected not to contain any of", "but contained", "full contents"); assertFailureValue("expected not to contain any of", "[1, 2, 4]"); assertFailureValue("but contained", "[1, 2]"); assertFailureValue("full contents", "[1, 2, 3]"); }
@Override public void setConfigAttributes(Object attributes) { if (attributes == null) { return; } super.setConfigAttributes(attributes); Map map = (Map) attributes; if (map.containsKey(URL)) { this.url = new UrlArgument((String) map.get(URL)); } if (map.containsKey(USERNAME)) { this.userName = (String) map.get(USERNAME); } if (map.containsKey(PASSWORD_CHANGED) && "1".equals(map.get(PASSWORD_CHANGED))) { String passwordToSet = (String) map.get(PASSWORD); resetPassword(passwordToSet); } this.checkExternals = "true".equals(map.get(CHECK_EXTERNALS)); }
@Test void setConfigAttributes_shouldUpdatePasswordWhenPasswordChangedBooleanChanged() throws Exception { SvnMaterialConfig svnMaterial = svn("", "", "notSoSecret", false); Map<String, String> map = new HashMap<>(); map.put(SvnMaterialConfig.PASSWORD, "secret"); map.put(SvnMaterialConfig.PASSWORD_CHANGED, "1"); svnMaterial.setConfigAttributes(map); assertThat((String) ReflectionUtil.getField(svnMaterial, "password")).isNull(); assertThat(svnMaterial.getPassword()).isEqualTo("secret"); assertThat(svnMaterial.getEncryptedPassword()).isEqualTo(new GoCipher().encrypt("secret")); //Dont change map.put(SvnMaterialConfig.PASSWORD, "Hehehe"); map.put(SvnMaterialConfig.PASSWORD_CHANGED, "0"); svnMaterial.setConfigAttributes(map); assertThat((String) ReflectionUtil.getField(svnMaterial, "password")).isNull(); assertThat(svnMaterial.getPassword()).isEqualTo("secret"); assertThat(svnMaterial.getEncryptedPassword()).isEqualTo(new GoCipher().encrypt("secret")); map.put(SvnMaterialConfig.PASSWORD, ""); map.put(SvnMaterialConfig.PASSWORD_CHANGED, "1"); svnMaterial.setConfigAttributes(map); assertThat(svnMaterial.getPassword()).isNull(); assertThat(svnMaterial.getEncryptedPassword()).isNull(); }
@Override public void applyFlowRule(NetworkId networkId, FlowRule... flowRules) { for (FlowRule flowRule : flowRules) { devirtualize(networkId, flowRule).forEach( r -> flowRuleService.applyFlowRules(r)); } }
@Test public void devirtualizeFlowRuleWithoutInPort() { TrafficSelector ts = DefaultTrafficSelector.builder().build(); TrafficTreatment tr = DefaultTrafficTreatment.builder() .setOutput(PORT_NUM2).build(); FlowRule r1 = DefaultFlowRule.builder() .forDevice(VDID) .withSelector(ts) .withTreatment(tr) .withPriority(10) .fromApp(vAppId) .makeTemporary(TIMEOUT) .build(); virtualProvider.applyFlowRule(VNET_ID, r1); assertEquals("3 rules should exist", 3, virtualProvider.flowRuleService.getFlowRuleCount()); FlowRule inFromDID1 = null; FlowRule inFromDID2 = null; FlowRule out = null; Set<FlowEntry> phyRules = new HashSet<>(); for (FlowEntry i : virtualProvider.flowRuleService.getFlowEntries(DID1)) { phyRules.add(i); } for (FlowEntry i : virtualProvider.flowRuleService.getFlowEntries(DID2)) { phyRules.add(i); } for (FlowRule rule : phyRules) { for (Instruction inst : rule.treatment().allInstructions()) { if (inst.type() == Instruction.Type.L2MODIFICATION) { L2ModificationInstruction i = (L2ModificationInstruction) inst; if (i.subtype() == L2ModificationInstruction.L2SubType.VLAN_PUSH) { inFromDID1 = rule; break; } else { out = rule; break; } } else { inFromDID2 = rule; break; } } } assertEquals(DID1, inFromDID1.deviceId()); assertEquals(DID2, inFromDID2.deviceId()); assertEquals(DID2, out.deviceId()); }
public String getPath() { return null == parentNode ? String.join("/", type) : String.join("/", parentNode, type); }
@Test void assertGetPathWithParentNode() { UniqueRuleItemNodePath uniqueRuleItemNodePath = new UniqueRuleItemNodePath(new RuleRootNodePath("foo"), "test_parent", "test_path"); assertThat(uniqueRuleItemNodePath.getPath(), is("test_parent/test_path")); }
public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() { Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(); return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved); }
@Test public void testWithRealData() throws IOException { CapacityCheckerTester tester = new CapacityCheckerTester(); String path = "./src/test/resources/zookeeper_dump.json"; tester.populateNodeRepositoryFromJsonFile(Paths.get(path)); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue(tester.nodeRepository.nodes().list().nodeType(NodeType.host).asList().containsAll(failurePath.get().hostsCausingFailure)); assertEquals(5, failurePath.get().hostsCausingFailure.size()); }
public Set<Long> findCmdIds(List<Status> statusList) throws JobDoesNotExistException { Set<Long> set = new HashSet<>(); for (Map.Entry<Long, CmdInfo> x : mInfoMap.entrySet()) { if (statusList.isEmpty() || statusList.contains(getCmdStatus( x.getValue().getJobControlId()))) { Long key = x.getKey(); set.add(key); } } return set; }
@Test public void testFindCmdIdsForRunning() throws Exception { long runningId = generateMigrateCommandForStatus(Status.RUNNING); mSearchingCriteria.add(Status.RUNNING); Set<Long> runningCmdIds = mCmdJobTracker.findCmdIds(mSearchingCriteria); Assert.assertEquals(runningCmdIds.size(), 1); Assert.assertTrue(runningCmdIds.contains(runningId)); }
@Override public void report(SortedMap<MetricName, Gauge> gauges, SortedMap<MetricName, Counter> counters, SortedMap<MetricName, Histogram> histograms, SortedMap<MetricName, Meter> meters, SortedMap<MetricName, Timer> timers) { if (loggerProxy.isEnabled(marker)) { for (Entry<MetricName, Gauge> entry : gauges.entrySet()) { logGauge(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Counter> entry : counters.entrySet()) { logCounter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Histogram> entry : histograms.entrySet()) { logHistogram(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Meter> entry : meters.entrySet()) { logMeter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Timer> entry : timers.entrySet()) { logTimer(entry.getKey(), entry.getValue()); } } }
@Test public void reportsCounterValuesAtError() throws Exception { final Counter counter = mock(Counter.class); when(counter.getCount()).thenReturn(100L); when(logger.isErrorEnabled(marker)).thenReturn(true); errorReporter.report(this.map(), map("test.counter", counter), this.map(), this.map(), this.map()); verify(logger).error(marker, "type={}, name={}, count={}", "COUNTER", "test.counter", 100L); }
@Override public OAuth2AccessTokenDO grantClientCredentials(String clientId, List<String> scopes) { // TODO 芋艿:项目中使用 OAuth2 解决的是三方应用的授权,内部的 SSO 等问题,所以暂时不考虑 client_credentials 这个场景 throw new UnsupportedOperationException("暂时不支持 client_credentials 授权模式"); }
@Test public void testGrantClientCredentials() { assertThrows(UnsupportedOperationException.class, () -> oauth2GrantService.grantClientCredentials(randomString(), emptyList()), "暂时不支持 client_credentials 授权模式"); }
@Override public Iterable<Link> getLinks() { return links.values(); }
@Test public final void testGetLinks() { assertEquals("initialy empty", 0, Iterables.size(linkStore.getLinks())); LinkKey linkId1 = LinkKey.linkKey(new ConnectPoint(DID1, P1), new ConnectPoint(DID2, P2)); LinkKey linkId2 = LinkKey.linkKey(new ConnectPoint(DID2, P2), new ConnectPoint(DID1, P1)); putLink(linkId1, DIRECT); putLink(linkId2, DIRECT); putLink(linkId1, DIRECT); assertEquals("expecting 2 unique link", 2, Iterables.size(linkStore.getLinks())); Map<LinkKey, Link> links = new HashMap<>(); for (Link link : linkStore.getLinks()) { links.put(LinkKey.linkKey(link), link); } assertLink(linkId1, DIRECT, links.get(linkId1)); assertLink(linkId2, DIRECT, links.get(linkId2)); }
@Override public ObjectNode encode(Instruction instruction, CodecContext context) { checkNotNull(instruction, "Instruction cannot be null"); return new EncodeInstructionCodecHelper(instruction, context).encode(); }
@Test public void modIPSrcInstructionTest() { final Ip4Address ip = Ip4Address.valueOf("1.2.3.4"); final L3ModificationInstruction.ModIPInstruction instruction = (L3ModificationInstruction.ModIPInstruction) Instructions.modL3Src(ip); final ObjectNode instructionJson = instructionCodec.encode(instruction, context); assertThat(instructionJson, matchesInstruction(instruction)); }
@Override public long computeLocalQuota(long confUsage, long myUsage, long[] allUsages) throws PulsarAdminException { // ToDo: work out the initial conditions: we may allow a small number of "first few iterations" to go // unchecked as we get some history of usage, or follow some other "TBD" method. if (confUsage < 0) { // This can happen if the RG is not configured with this particular limit (message or byte count) yet. val retVal = -1; if (log.isDebugEnabled()) { log.debug("Configured usage ({}) is not set; returning a special value ({}) for calculated quota", confUsage, retVal); } return retVal; } long totalUsage = 0; for (long usage : allUsages) { totalUsage += usage; } if (myUsage < 0 || totalUsage < 0) { String errMesg = String.format("Local usage (%d) or total usage (%d) is negative", myUsage, totalUsage); log.error(errMesg); throw new PulsarAdminException(errMesg); } // If the total usage is zero (which may happen during initial transients), just return the configured value. // The caller is expected to check the value returned, or not call here with a zero global usage. // [This avoids a division by zero when calculating the local share.] if (totalUsage == 0) { if (log.isDebugEnabled()) { log.debug("computeLocalQuota: totalUsage is zero; " + "returning the configured usage ({}) as new local quota", confUsage); } return confUsage; } if (myUsage > totalUsage) { String errMesg = String.format("Local usage (%d) is greater than total usage (%d)", myUsage, totalUsage); // Log as a warning [in case this can happen transiently (?)]. log.warn(errMesg); } // How much unused capacity is left over? float residual = confUsage - totalUsage; // New quota is the old usage incremented by any residual as a ratio of the local usage to the total usage. // This should result in the calculatedQuota increasing proportionately if total usage is less than the // configured usage, and reducing proportionately if the total usage is greater than the configured usage. // Capped to 1, to prevent negative or zero setting of quota. // the rate limiter code assumes that rate value of 0 or less to mean that no rate limit should be applied float myUsageFraction = (float) myUsage / totalUsage; float calculatedQuota = max(myUsage + residual * myUsageFraction, 1); val longCalculatedQuota = (long) calculatedQuota; if (log.isDebugEnabled()) { log.debug("computeLocalQuota: myUsage={}, totalUsage={}, myFraction={}; newQuota returned={} [long: {}]", myUsage, totalUsage, myUsageFraction, calculatedQuota, longCalculatedQuota); } return longCalculatedQuota; }
@Test public void testRQCalcProportionalIncrementTest() throws PulsarAdminException { final long config = 100; final long[] allUsage = { 60 }; final long localUsed1 = 20; final long localUsed2 = 40; final float initialUsageRatio = (float) localUsed1 / localUsed2; final long newQuota1 = this.rqCalc.computeLocalQuota(config, localUsed1, allUsage); final long newQuota2 = this.rqCalc.computeLocalQuota(config, localUsed2, allUsage); final float proposedUsageRatio = (float) newQuota1 / newQuota2; Assert.assertEquals(initialUsageRatio, proposedUsageRatio); }
public static <R> Mono<R> entryWith(String resourceName, Mono<R> actual) { return entryWith(resourceName, EntryType.OUT, actual); }
@Test public void testReactorEntryWithCommon() { String resourceName = createResourceName("testReactorEntryWithCommon"); StepVerifier.create(ReactorSphU.entryWith(resourceName, Mono.just(60)) .subscribeOn(Schedulers.elastic()) .map(e -> e * 3)) .expectNext(180) .verifyComplete(); ClusterNode cn = ClusterBuilderSlot.getClusterNode(resourceName); assertNotNull(cn); assertEquals(1, cn.passQps(), 0.01); }
@Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { Map<String, String> mdcContextMap = getMdcContextMap(); return super.schedule(ContextPropagator.decorateRunnable(contextPropagators, () -> { try { setMDCContext(mdcContextMap); command.run(); } finally { MDC.clear(); } }), delay, unit); }
@Test public void testScheduleCallableWithDelayPropagatesContext() { TestThreadLocalContextHolder.put("ValueShouldCrossThreadBoundary"); final ScheduledFuture<?> schedule = schedulerService.schedule(() -> TestThreadLocalContextHolder.get().orElse(null), 100, TimeUnit.MILLISECONDS); waitAtMost(200, TimeUnit.MILLISECONDS).until(matches(() -> assertThat(schedule.get()).isEqualTo("ValueShouldCrossThreadBoundary"))); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { try { final AttributedList<Path> objects = new AttributedList<>(); Marker marker = new Marker(null, null); final String containerId = fileid.getVersionId(containerService.getContainer(directory)); // Seen placeholders final Map<String, Long> revisions = new HashMap<>(); boolean hasDirectoryPlaceholder = containerService.isContainer(directory); do { if(log.isDebugEnabled()) { log.debug(String.format("List directory %s with marker %s", directory, marker)); } final B2ListFilesResponse response; if(versioning.isEnabled()) { // In alphabetical order by file name, and by reverse of date/time uploaded for // versions of files with the same name. response = session.getClient().listFileVersions(containerId, marker.nextFilename, marker.nextFileId, chunksize, this.createPrefix(directory), String.valueOf(Path.DELIMITER)); } else { response = session.getClient().listFileNames(containerId, marker.nextFilename, chunksize, this.createPrefix(directory), String.valueOf(Path.DELIMITER)); } marker = this.parse(directory, objects, response, revisions); if(null == marker.nextFileId) { if(!response.getFiles().isEmpty()) { hasDirectoryPlaceholder = true; } } listener.chunk(directory, objects); } while(marker.hasNext()); if(!hasDirectoryPlaceholder && objects.isEmpty()) { if(log.isWarnEnabled()) { log.warn(String.format("No placeholder found for directory %s", directory)); } throw new NotfoundException(directory.getAbsolute()); } return objects; } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Listing directory {0} failed", e, directory); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } }
@Test(expected = NotfoundException.class) public void testListNotFoundFolder() throws Exception { final B2VersionIdProvider fileid = new B2VersionIdProvider(session); final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); new B2ObjectListService(session, fileid).list(new Path(bucket, "notfound", EnumSet.of(Path.Type.directory)), new DisabledListProgressListener()); }
public static String convertToHtml(String input) { return new Markdown().convert(StringEscapeUtils.escapeHtml4(input)); }
@Test public void shouldDecorateDocumentedLink() { assertThat(Markdown.convertToHtml("For more details, please [check online documentation](http://docs.sonarsource.com/sonarqube/display/SONAR).")) .isEqualTo("For more details, please <a href=\"http://docs.sonarsource.com/sonarqube/display/SONAR\" target=\"_blank\" rel=\"noopener noreferrer\">check online documentation</a>."); }
@Deprecated public static Type resolveLastTypeParameter(Type genericContext, Class<?> supertype) throws IllegalStateException { return Types.resolveLastTypeParameter(genericContext, supertype); }
@Test void resolveLastTypeParameterWhenParameterizedSubtype() throws Exception { Type context = LastTypeParameter.class.getDeclaredField("PARAMETERIZED_DECODER_LIST_STRING") .getGenericType(); Type listStringType = LastTypeParameter.class.getDeclaredField("LIST_STRING").getGenericType(); Type last = resolveLastTypeParameter(context, ParameterizedDecoder.class); assertThat(last).isEqualTo(listStringType); }
public static <T extends Comparable<T>> Filter<T> greaterThanEq(final T value) { return by((ProcessFunction<T, Boolean>) input -> input.compareTo(value) >= 0) .described(String.format("x ≥ %s", value)); }
@Test @Category(NeedsRunner.class) public void testFilterGreaterThanEq() { PCollection<Integer> output = p.apply(Create.of(1, 2, 3, 4, 5, 6, 7)).apply(Filter.greaterThanEq(4)); PAssert.that(output).containsInAnyOrder(4, 5, 6, 7); p.run(); }
@Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("["); sb.append("isSuccess:").append(isSuccess); if (StringUtils.isNotBlank(message)) { sb.append(", msg:").append(message); } sb.append("]"); return sb.toString(); }
@Test public void testToStringEmptyMessage() { assertEquals("[isSuccess:false]", result.toString()); }
@Override public X509Certificate[] getAcceptedIssuers() { X509Certificate[] issuers = EMPTY; X509TrustManager tm = trustManagerRef.get(); if (tm != null) { issuers = tm.getAcceptedIssuers(); } return issuers; }
@Test (timeout = 30000) public void testReloadCorruptTrustStore() throws Exception { KeyPair kp = generateKeyPair("RSA"); cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA"); cert2 = generateCertificate("CN=Cert2", kp, 30, "SHA1withRSA"); String truststoreLocation = BASEDIR + "/testcorrupt.jks"; createTrustStore(truststoreLocation, "password", "cert1", cert1); long reloadInterval = 10; Timer fileMonitoringTimer = new Timer(FileBasedKeyStoresFactory.SSL_MONITORING_THREAD_NAME, true); ReloadingX509TrustManager tm = new ReloadingX509TrustManager("jks", truststoreLocation, "password"); try { fileMonitoringTimer.schedule(new FileMonitoringTimerTask( Paths.get(truststoreLocation), tm::loadFrom,null), reloadInterval, reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); final X509Certificate cert = tm.getAcceptedIssuers()[0]; // Wait so that the file modification time is different Thread.sleep((reloadInterval + 1000)); assertFalse(reloaderLog.getOutput().contains( FileMonitoringTimerTask.PROCESS_ERROR_MESSAGE)); OutputStream os = new FileOutputStream(truststoreLocation); os.write(1); os.close(); waitForFailedReloadAtLeastOnce((int) reloadInterval); assertEquals(1, tm.getAcceptedIssuers().length); assertEquals(cert, tm.getAcceptedIssuers()[0]); } finally { reloaderLog.stopCapturing(); fileMonitoringTimer.cancel(); } }
public static <T> Inner<T> create() { return new Inner<T>(); }
@Test @Category(NeedsRunner.class) public void testMissingFieldName() { thrown.expect(IllegalArgumentException.class); pipeline .apply(Create.of(new AutoValue_FilterTest_Simple("pass", 52, 2))) .apply(Filter.<AutoValue_FilterTest_Simple>create().whereFieldName("missing", f -> true)); pipeline.run(); }
@Override public boolean supportsMigration() { return false; }
@Test public void h2_does_not_supportMigration() { assertThat(underTest.supportsMigration()).isFalse(); }
public static List<FieldValueSetter> getSetters( TypeDescriptor<?> typeDescriptor, Schema schema, FieldValueTypeSupplier fieldValueTypeSupplier, TypeConversionsFactory typeConversionsFactory) { // Return the setters, ordered by their position in the schema. return CACHED_SETTERS.computeIfAbsent( TypeDescriptorWithSchema.create(typeDescriptor, schema), c -> { List<FieldValueTypeInformation> types = fieldValueTypeSupplier.get(typeDescriptor, schema); return types.stream() .map(t -> createSetter(t, typeConversionsFactory)) .collect(Collectors.toList()); }); }
@Test public void testGeneratedSimpleSetters() { SimplePOJO simplePojo = new SimplePOJO(); List<FieldValueSetter> setters = POJOUtils.getSetters( new TypeDescriptor<SimplePOJO>() {}, SIMPLE_POJO_SCHEMA, JavaFieldTypeSupplier.INSTANCE, new DefaultTypeConversionsFactory()); assertEquals(12, setters.size()); setters.get(0).set(simplePojo, "field1"); setters.get(1).set(simplePojo, (byte) 41); setters.get(2).set(simplePojo, (short) 42); setters.get(3).set(simplePojo, (int) 43); setters.get(4).set(simplePojo, (long) 44); setters.get(5).set(simplePojo, true); setters.get(6).set(simplePojo, DATE.toInstant()); setters.get(7).set(simplePojo, INSTANT); setters.get(8).set(simplePojo, BYTE_ARRAY); setters.get(9).set(simplePojo, BYTE_BUFFER.array()); setters.get(10).set(simplePojo, new BigDecimal(42)); setters.get(11).set(simplePojo, "stringBuilder"); assertEquals("field1", simplePojo.str); assertEquals((byte) 41, simplePojo.aByte); assertEquals((short) 42, simplePojo.aShort); assertEquals((int) 43, simplePojo.anInt); assertEquals((long) 44, simplePojo.aLong); assertTrue(simplePojo.aBoolean); assertEquals(DATE, simplePojo.dateTime); assertEquals(INSTANT, simplePojo.instant); assertArrayEquals("Unexpected bytes", BYTE_ARRAY, simplePojo.bytes); assertEquals(BYTE_BUFFER, simplePojo.byteBuffer); assertEquals(new BigDecimal(42), simplePojo.bigDecimal); assertEquals("stringBuilder", simplePojo.stringBuilder.toString()); }
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext, final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof TCLStatement) { return new ShardingDatabaseBroadcastRoutingEngine(); } if (sqlStatement instanceof DDLStatement) { if (sqlStatementContext instanceof CursorAvailable) { return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props); } return getDDLRoutingEngine(shardingRule, database, sqlStatementContext); } if (sqlStatement instanceof DALStatement) { return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext); } if (sqlStatement instanceof DCLStatement) { return getDCLRoutingEngine(shardingRule, database, sqlStatementContext); } return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext); }
@Test void assertNewInstanceForCursorStatementWithShardingTable() { CursorStatementContext cursorStatementContext = mock(CursorStatementContext.class, RETURNS_DEEP_STUBS); OpenGaussCursorStatement cursorStatement = mock(OpenGaussCursorStatement.class); when(cursorStatementContext.getSqlStatement()).thenReturn(cursorStatement); Collection<SimpleTableSegment> tableSegments = createSimpleTableSegments(); Collection<String> tableNames = tableSegments.stream().map(each -> each.getTableName().getIdentifier().getValue()).collect(Collectors.toSet()); when(cursorStatementContext.getTablesContext().getSimpleTables()).thenReturn(tableSegments); when(cursorStatementContext.getTablesContext().getTableNames()).thenReturn(tableNames); when(cursorStatementContext.getTablesContext().getDatabaseName()).thenReturn(Optional.empty()); when(shardingRule.isAllShardingTables(tableNames)).thenReturn(true); when(shardingRule.getShardingRuleTableNames(tableNames)).thenReturn(tableNames); when(shardingRule.getShardingLogicTableNames(tableNames)).thenReturn(tableNames); QueryContext queryContext = new QueryContext(cursorStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); ShardingRouteEngine actual = ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet)); assertThat(actual, instanceOf(ShardingStandardRoutingEngine.class)); }
public static int toUnsignedInt(short value) { return Short.toUnsignedInt(value); }
@Test public void testShortToUnsignedInt() { getShortTestData().forEach( val -> assertEquals(val.toString(), toUnsignedIntPreviousImplementation(val), BitmapUtils.toUnsignedInt(val))); }
@Override public OverlayData createOverlayData(ComponentName remoteApp) { if (mOverrides.containsKey(remoteApp.getPackageName())) { return mOverrides.get(remoteApp.getPackageName()); } else { return mOriginal.createOverlayData(remoteApp); } }
@Test public void testReturnsOriginalIfNotInMap() { Assert.assertEquals( Color.GRAY, mUnderTest .createOverlayData(new ComponentName("com.example4", "Activity")) .getPrimaryColor()); Mockito.verify(mOriginal).createOverlayData(new ComponentName("com.example4", "Activity")); }
long capacity() { return capacity; }
@Test void shouldAllowMultipleInstancesForSameStream() { try (Catalog catalog = new Catalog(archiveDir, clock)) { assertEquals(CAPACITY, catalog.capacity()); final long newRecordingId = newRecording(); assertNotEquals(recordingOneId, newRecordingId); } }
@Override public byte[] evaluateResponse(byte[] responseBytes) throws SaslAuthenticationException { /* * Message format (from https://tools.ietf.org/html/rfc4616): * * message = [authzid] UTF8NUL authcid UTF8NUL passwd * authcid = 1*SAFE ; MUST accept up to 255 octets * authzid = 1*SAFE ; MUST accept up to 255 octets * passwd = 1*SAFE ; MUST accept up to 255 octets * UTF8NUL = %x00 ; UTF-8 encoded NUL character * * SAFE = UTF1 / UTF2 / UTF3 / UTF4 * ;; any UTF-8 encoded Unicode character except NUL */ String response = new String(responseBytes, StandardCharsets.UTF_8); List<String> tokens = extractTokens(response); String authorizationIdFromClient = tokens.get(0); String username = tokens.get(1); String password = tokens.get(2); if (username.isEmpty()) { throw new SaslAuthenticationException("Authentication failed: username not specified"); } if (password.isEmpty()) { throw new SaslAuthenticationException("Authentication failed: password not specified"); } NameCallback nameCallback = new NameCallback("username", username); PlainAuthenticateCallback authenticateCallback = new PlainAuthenticateCallback(password.toCharArray()); try { callbackHandler.handle(new Callback[]{nameCallback, authenticateCallback}); } catch (Throwable e) { throw new SaslAuthenticationException("Authentication failed: credentials for user could not be verified", e); } if (!authenticateCallback.authenticated()) throw new SaslAuthenticationException("Authentication failed: Invalid username or password"); if (!authorizationIdFromClient.isEmpty() && !authorizationIdFromClient.equals(username)) throw new SaslAuthenticationException("Authentication failed: Client requested an authorization id that is different from username"); this.authorizationId = username; complete = true; return new byte[0]; }
@Test public void authorizationIdNotEqualsAuthenticationId() { assertThrows(SaslAuthenticationException.class, () -> saslServer.evaluateResponse(saslMessage(USER_B, USER_A, PASSWORD_A))); }
public <T> IfrVfrStatus statusOf(Track<T> track, Instant time) { checkNotNull(track); checkNotNull(time); checkArgument( track.asTimeWindow().contains(time), "This track does not exist at this moment in time" ); EnumMultiset<IfrVfrStatus> counts = EnumMultiset.create(IfrVfrStatus.class); Collection<Point<T>> localPoints = track.kNearestPoints(time, numPointsToConsider); for (Point<T> point : localPoints) { counts.add(statusOf(point)); } return (counts.count(IFR) > counts.count(VFR)) ? IFR : VFR; }
@Test public void testStatusOfPoint_zeroBeaconNoCallsign() { //this raw source was manually edited to remove the flight rules (VFR/IFR) field String rawNop = "[RH],STARS,A11_B,03/31/2018,23:05:47.612,,,,0000,000,140,315,061.42058,-149.52365,2766,0000,14.2051,14.6263,,,,A11,,,,,,ACT,,,00000,,,,,,,1,,1,{RH}"; Point<NopHit> point = NopHit.from(rawNop); assertEquals(0, point.rawData().beaconActualAsInt()); assertFalse(point.rawData().hasFlightRules()); assertFalse(point.hasValidCallsign()); IfrVfrAssigner assigner = new IfrVfrAssigner(); assertEquals( VFR, assigner.statusOf(point), "When the beacon is 0000 the aircraft should be VFR" ); }
public static ContainerEndpoint endpointFromSlime(Inspector inspector) { String clusterId = inspector.field(clusterIdField).asString(); String scope = inspector.field(scopeField).asString(); Inspector namesInspector = inspector.field(namesField); OptionalInt weight = SlimeUtils.optionalInteger(inspector.field(weightField)); // assign default routingmethod. Remove when 7.507 is latest version // Cannot be used before all endpoints are assigned explicit routing method (from controller) ApplicationClusterEndpoint.RoutingMethod routingMethod = SlimeUtils.optionalString(inspector.field(routingMethodField)) .map(ContainerEndpointSerializer::routingMethodFrom) .orElse(ApplicationClusterEndpoint.RoutingMethod.sharedLayer4); ApplicationClusterEndpoint.AuthMethod authMethod = SlimeUtils.optionalString(inspector.field(authMethodField)) .map(ContainerEndpointSerializer::authMethodFrom) .orElse(ApplicationClusterEndpoint.AuthMethod.mtls); if (clusterId.isEmpty()) { throw new IllegalStateException("'clusterId' missing on serialized ContainerEndpoint"); } if (scope.isEmpty()) { throw new IllegalStateException("'scope' missing on serialized ContainerEndpoint"); } if (!namesInspector.valid()) { throw new IllegalStateException("'names' missing on serialized ContainerEndpoint"); } List<String> names = new ArrayList<>(); namesInspector.traverse((ArrayTraverser) (idx, nameInspector) -> { final var containerName = nameInspector.asString(); names.add(containerName); }); return new ContainerEndpoint(clusterId, scopeFrom(scope), names, weight, routingMethod, authMethod); }
@Test public void readSingleEndpoint() { final var slime = new Slime(); final var entry = slime.setObject(); entry.setString("clusterId", "foobar"); entry.setString("scope", "application"); final var entryNames = entry.setArray("names"); entryNames.addString("a"); entryNames.addString("b"); final var endpoint = ContainerEndpointSerializer.endpointFromSlime(slime.get()); assertEquals("foobar", endpoint.clusterId()); assertEquals(ApplicationClusterEndpoint.Scope.application, endpoint.scope()); assertEquals(List.of("a", "b"), endpoint.names()); }
@Override public void fatal(Throwable t) { fatalHandler.accept(t); }
@Test public void testFatal() { Throwable fatalException = new Exception("test-fatal-exception"); AtomicBoolean fatalInvoked = new AtomicBoolean(false); context = new ContextImpl( config, logger, client, new EnvironmentBasedSecretsProvider(), FunctionCollectorRegistry.getDefaultImplementation(), new String[0], FunctionDetails.ComponentType.FUNCTION, null, new InstanceStateManager(), pulsarAdmin, clientBuilder, t -> { assertEquals(t, fatalException); fatalInvoked.set(true); }, producerCache); context.fatal(fatalException); assertTrue(fatalInvoked.get()); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void incompleteForgeInstallation() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/logs/incomplete_forge_installation.txt")), CrashReportAnalyzer.Rule.INCOMPLETE_FORGE_INSTALLATION); }
@InvokeOnHeader(CONTROL_ACTION_SUBSCRIBE) public void performSubscribe(final Message message, AsyncCallback callback) { String filterId; if (message.getBody() instanceof DynamicRouterControlMessage) { filterId = subscribeFromMessage(dynamicRouterControlService, message, false); } else { filterId = subscribeFromHeaders(dynamicRouterControlService, message, false); } message.setBody(filterId); callback.done(false); }
@Test void performSubscribeActionWithMessageInBodyWithEmptyExpression() { String subscribeChannel = "testChannel"; DynamicRouterControlMessage subMsg = DynamicRouterControlMessage.Builder.newBuilder() .subscribeChannel(subscribeChannel) .subscriptionId("testId") .destinationUri("mock://test") .priority(10) .predicate("") .expressionLanguage("simple") .build(); when(message.getBody()).thenReturn(subMsg); when(message.getBody(DynamicRouterControlMessage.class)).thenReturn(subMsg); Exception ex = assertThrows(IllegalStateException.class, () -> producer.performSubscribe(message, callback)); assertEquals("Predicate bean could not be found", ex.getMessage()); }
public static String convertToString(Object parsedValue, Type type) { if (parsedValue == null) { return null; } if (type == null) { return parsedValue.toString(); } switch (type) { case BOOLEAN: case SHORT: case INT: case LONG: case DOUBLE: case STRING: case PASSWORD: return parsedValue.toString(); case LIST: List<?> valueList = (List<?>) parsedValue; return valueList.stream().map(Object::toString).collect(Collectors.joining(",")); case CLASS: Class<?> clazz = (Class<?>) parsedValue; return clazz.getName(); default: throw new IllegalStateException("Unknown type."); } }
@Test public void testConvertValueToStringClass() throws ClassNotFoundException { String actual = ConfigDef.convertToString(ConfigDefTest.class, Type.CLASS); assertEquals("org.apache.kafka.common.config.ConfigDefTest", actual); // Additionally validate that we can look up this class by this name assertEquals(ConfigDefTest.class, Class.forName(actual)); assertNull(ConfigDef.convertToString(null, Type.CLASS)); }
@Override public void execute(List<RegisteredMigrationStep> steps, MigrationStatusListener listener) { Profiler globalProfiler = Profiler.create(LOGGER); globalProfiler.startInfo(GLOBAL_START_MESSAGE, databaseMigrationState.getTotalMigrations()); boolean allStepsExecuted = false; try { for (RegisteredMigrationStep step : steps) { this.execute(step); listener.onMigrationStepCompleted(); } allStepsExecuted = true; } finally { long dbMigrationDuration = 0L; if (allStepsExecuted) { dbMigrationDuration = globalProfiler.stopInfo(GLOBAL_END_MESSAGE, databaseMigrationState.getCompletedMigrations(), databaseMigrationState.getTotalMigrations(), "success"); } else { dbMigrationDuration = globalProfiler.stopError(GLOBAL_END_MESSAGE, databaseMigrationState.getCompletedMigrations(), databaseMigrationState.getTotalMigrations(), "failure"); } telemetryDbMigrationTotalTimeProvider.setDbMigrationTotalTime(dbMigrationDuration); telemetryDbMigrationStepsProvider.setDbMigrationCompletedSteps(databaseMigrationState.getCompletedMigrations()); telemetryDbMigrationSuccessProvider.setDbMigrationSuccess(allStepsExecuted); } }
@Test void whenExecute_TelemetryDataIsProperlyAdded() { migrationContainer.add(MigrationStep2.class, MigrationStep1.class, MigrationStep3.class); when(databaseMigrationState.getCompletedMigrations()).thenReturn(3); List<RegisteredMigrationStep> steps = asList( registeredStepOf(1, MigrationStep2.class), registeredStepOf(2, MigrationStep1.class), registeredStepOf(3, MigrationStep3.class)); ((SpringComponentContainer) migrationContainer).startComponents(); underTest.execute(steps, migrationStatusListener); assertThat(telemetryDbMigrationTotalTimeProvider.getValue().get()).isPositive(); assertThat(telemetryDbMigrationStepsProvider.getValue()).hasValue(3); assertThat(telemetryDbMigrationSuccessProvider.getValue()).hasValue(true); }
public static Builder forCurrentMagic(ProduceRequestData data) { return forMagic(RecordBatch.CURRENT_MAGIC_VALUE, data); }
@Test public void testV6AndBelowCannotUseZStdCompression() { ByteBuffer buffer = ByteBuffer.allocate(256); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, Compression.zstd().build(), TimestampType.CREATE_TIME, 0L); builder.append(10L, null, "a".getBytes()); ProduceRequestData produceData = new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() .setName("test") .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(0) .setRecords(builder.build())))) .iterator())) .setAcks((short) 1) .setTimeoutMs(1000); // Can't create ProduceRequest instance with version within [3, 7) for (short version = 3; version < 7; version++) { ProduceRequest.Builder requestBuilder = new ProduceRequest.Builder(version, version, produceData); assertThrowsForAllVersions(requestBuilder, UnsupportedCompressionTypeException.class); } // Works fine with current version (>= 7) ProduceRequest.forCurrentMagic(produceData); }
String getFileName(double lat, double lon) { int lonInt = getMinLonForTile(lon); int latInt = getMinLatForTile(lat); return toLowerCase(getNorthString(latInt) + getPaddedLatString(latInt) + getEastString(lonInt) + getPaddedLonString(lonInt)); }
@Disabled @Test public void testGetEleVerticalBorder() { // Border between the tiles n42e011 and n43e011 assertEquals("n42e011", instance.getFileName(42.999999, 11.48)); assertEquals(420, instance.getEle(42.999999, 11.48), precision); assertEquals("n43e011", instance.getFileName(43.000001, 11.48)); assertEquals(420, instance.getEle(43.000001, 11.48), precision); }
@VisibleForTesting public List<ResourceGroupSelector> getSelectors() { return selectors; }
@Test public void testQueryTypeConfiguration() throws IOException { FileResourceGroupConfigurationManager manager = parse("resource_groups_config_query_type.json"); List<ResourceGroupSelector> selectors = manager.getSelectors(); assertMatch(selectors, new SelectionCriteria(true, "test_user", Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.of("select"), Optional.empty(), Optional.empty(), Optional.empty()), "global.select"); assertMatch(selectors, new SelectionCriteria(true, "test_user", Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.of("explain"), Optional.empty(), Optional.empty(), Optional.empty()), "global.explain"); assertMatch(selectors, new SelectionCriteria(true, "test_user", Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.of("insert"), Optional.empty(), Optional.empty(), Optional.empty()), "global.insert"); assertMatch(selectors, new SelectionCriteria(true, "test_user", Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.of("delete"), Optional.empty(), Optional.empty(), Optional.empty()), "global.delete"); assertMatch(selectors, new SelectionCriteria(true, "test_user", Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.of("describe"), Optional.empty(), Optional.empty(), Optional.empty()), "global.describe"); assertMatch(selectors, new SelectionCriteria(true, "test_user", Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.of("data_definition"), Optional.empty(), Optional.empty(), Optional.empty()), "global.data_definition"); assertMatch(selectors, new SelectionCriteria(true, "test_user", Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.of("sth_else"), Optional.empty(), Optional.empty(), Optional.empty()), "global.other"); assertMatch(selectors, new SelectionCriteria(true, "test_user", Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.of("sth_else"), Optional.of("client2_34"), Optional.empty(), Optional.empty()), "global.other-2"); }
public NumericIndicator sqrt() { return NumericIndicator.of(UnaryOperation.sqrt(this)); }
@Test public void sqrt() { final NumericIndicator numericIndicator = NumericIndicator.of(cp1); final NumericIndicator dynamicOp = numericIndicator.sqrt(); assertNumEquals(1, dynamicOp.getValue(0)); assertNumEquals(Math.sqrt(2.0), dynamicOp.getValue(1)); assertNumEquals(3, dynamicOp.getValue(8)); }
@Override public KeyGroupRangeInputSplit[] createInputSplits(int minNumSplits) throws IOException { final int maxParallelism = operatorState.getMaxParallelism(); final List<KeyGroupRange> keyGroups = sortedKeyGroupRanges(minNumSplits, maxParallelism); return CollectionUtil.mapWithIndex( keyGroups, (keyGroupRange, index) -> createKeyGroupRangeInputSplit( operatorState, maxParallelism, keyGroupRange, index)) .toArray(KeyGroupRangeInputSplit[]::new); }
@Test public void testReadTime() throws Exception { OperatorID operatorID = OperatorIDGenerator.fromUid("uid"); OperatorSubtaskState state = createOperatorSubtaskState( new KeyedProcessOperator<>(new StatefulFunctionWithTime())); OperatorState operatorState = new OperatorState(operatorID, 1, 128); operatorState.putState(0, state); KeyedStateInputFormat<?, ?, ?> format = new KeyedStateInputFormat<>( operatorState, new MemoryStateBackend(), new Configuration(), new KeyedStateReaderOperator<>(new TimerReaderFunction(), Types.INT), new ExecutionConfig()); KeyGroupRangeInputSplit split = format.createInputSplits(1)[0]; KeyedStateReaderFunction<Integer, Integer> userFunction = new TimerReaderFunction(); List<Integer> data = readInputSplit(split, userFunction); Assert.assertEquals( "Incorrect data read from input split", Arrays.asList(1, 1, 2, 2, 3, 3), data); }
@Bean("ComputationTempFolder") public TempFolder provide(ServerFileSystem fs) { File tempDir = new File(fs.getTempDir(), "ce"); try { FileUtils.forceMkdir(tempDir); } catch (IOException e) { throw new IllegalStateException("Unable to create computation temp directory " + tempDir, e); } File computationDir = new DefaultTempFolder(tempDir).newDir(); return new DefaultTempFolder(computationDir, true); }
@Test public void create_temp_dir_if_missing() throws Exception { ServerFileSystem fs = mock(ServerFileSystem.class); File tmpDir = temp.newFolder(); when(fs.getTempDir()).thenReturn(tmpDir); FileUtils.forceDelete(tmpDir); TempFolder folder = underTest.provide(fs); assertThat(folder).isNotNull(); File newDir = folder.newDir(); assertThat(newDir).exists().isDirectory(); assertThat(newDir.getParentFile().getCanonicalPath()).startsWith(tmpDir.getCanonicalPath()); }
protected Object createAndFillObject(ObjectNode json, Object toReturn, String className, List<String> genericClasses) { Iterator<Map.Entry<String, JsonNode>> fields = json.fields(); while (fields.hasNext()) { Map.Entry<String, JsonNode> element = fields.next(); String key = element.getKey(); JsonNode jsonNode = element.getValue(); if (isSimpleTypeNode(jsonNode)) { Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); setField(toReturn, key, internalLiteralEvaluation(getSimpleTypeNodeTextValue(jsonNode), fieldDescriptor.getKey())); } else if (jsonNode.isArray()) { List<Object> nestedList = new ArrayList<>(); Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); List<Object> returnedList = createAndFillList((ArrayNode) jsonNode, nestedList, fieldDescriptor.getKey(), fieldDescriptor.getValue()); setField(toReturn, key, returnedList); } else if (jsonNode.isObject()) { Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); Object nestedObject = createObject(fieldDescriptor.getKey(), fieldDescriptor.getValue()); Object returnedObject = createAndFillObject((ObjectNode) jsonNode, nestedObject, fieldDescriptor.getKey(), fieldDescriptor.getValue()); setField(toReturn, key, returnedObject); } else if (!isEmptyText(jsonNode)) { Map.Entry<String, List<String>> fieldDescriptor = getFieldClassNameAndGenerics(toReturn, key, className, genericClasses); setField(toReturn, key, internalLiteralEvaluation(jsonNode.textValue(), fieldDescriptor.getKey())); } else { // empty strings are skipped } } return toReturn; }
@Test public void convertObject_nestedObject() { ObjectNode objectNode = new ObjectNode(factory); ObjectNode nestedObject = new ObjectNode(factory); objectNode.set("nested", nestedObject); nestedObject.put("field", "fieldValue"); Object result = expressionEvaluator.createAndFillObject(objectNode, new HashMap<>(), String.class.getCanonicalName(), List.of()); assertThat(result).isInstanceOf(Map.class); Map<String, Object>resultMap = (Map<String, Object>) result; assertThat(resultMap).hasSize(1); Map<String, Object> nested = (Map<String, Object>) resultMap.get("nested"); assertThat(nested).hasSize(1).containsEntry("field", "fieldValue"); }
public FEELFnResult<Boolean> invoke(@ParameterName( "range" ) Range range, @ParameterName( "point" ) Comparable point) { if ( point == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null")); } if ( range == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null")); } try { boolean result = ( range.getLowBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getLowEndPoint() ) == 0 ); return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range")); } }
@Test void invokeParamIsNull() { FunctionTestUtil.assertResultError(startedByFunction.invoke(null, "b"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(startedByFunction.invoke(new RangeImpl(), (Comparable) null), InvalidParametersEvent.class); }
@Override public ResultSubpartitionView createSubpartitionView( ResultSubpartitionIndexSet indexSet, BufferAvailabilityListener availabilityListener) throws IOException { if (indexSet.size() == 1) { return createSubpartitionView( indexSet.values().iterator().next(), availabilityListener); } else { UnionResultSubpartitionView unionView = new UnionResultSubpartitionView(availabilityListener, indexSet.size()); try { for (int i : indexSet.values()) { ResultSubpartitionView view = createSubpartitionView(i, unionView); unionView.notifyViewCreated(i, view); } return unionView; } catch (Exception e) { unionView.releaseAllResources(); throw e; } } }
@Test void testReleaseAllResourcesAtFailure() { final int maxNumSubpartitions = 4; final ResultSubpartitionIndexSet indexSet = new ResultSubpartitionIndexSet(0, maxNumSubpartitions); final BufferAvailabilityListener availabilityListener = (ResultSubpartitionView view) -> {}; for (int numSubpartitions = 1; numSubpartitions < maxNumSubpartitions; numSubpartitions++) { List<ResultSubpartitionView> views = new ArrayList<>(); for (int i = 0; i < numSubpartitions; i++) { views.add(new NoOpResultSubpartitionViewWithReleaseListener()); } ResultPartition partition = TestingResultPartition.newBuilder() .setCreateSubpartitionViewFunction( (index, listener) -> views.get(index)) .build(); assertThatThrownBy( () -> partition.createSubpartitionView(indexSet, availabilityListener)) .isInstanceOf(IndexOutOfBoundsException.class); assertThat(views).allMatch(ResultSubpartitionView::isReleased); } }
@Override public void handlerRule(final RuleData ruleData) { Optional.ofNullable(ruleData.getHandle()).ifPresent(s -> { RequestHandle requestHandle = GsonUtils.getInstance().fromJson(s, RequestHandle.class); CACHED_HANDLE.get().cachedHandle(CacheKeyUtils.INST.getKey(ruleData), requestHandle); }); }
@Test public void testHandlerRule() { this.requestPluginHandler.handlerRule(this.ruleData); assertNotNull(RequestPluginHandler.CACHED_HANDLE.get().obtainHandle(CacheKeyUtils.INST.getKey(this.ruleData))); }
@Override public boolean add(E e) { final int priorityLevel = e.getPriorityLevel(); // try offering to all queues. if (!offerQueues(priorityLevel, e, true)) { CallQueueOverflowException ex; if (serverFailOverEnabled) { // Signal clients to failover and try a separate server. ex = CallQueueOverflowException.FAILOVER; } else if (priorityLevel == queues.size() - 1){ // only disconnect the lowest priority users that overflow the queue. ex = CallQueueOverflowException.DISCONNECT; } else { ex = CallQueueOverflowException.KEEPALIVE; } throw ex; } return true; }
@SuppressWarnings("unchecked") @Test public void testInsertionWithFailover() { Configuration conf = new Configuration(); // Config for server to throw StandbyException instead of the // regular RetriableException if call queue is full. // 3 queues, 2 slots each. fcq = Mockito.spy(new FairCallQueue<>(3, 6, "ns", true, conf)); Schedulable p0 = mockCall("a", 0); Schedulable p1 = mockCall("b", 1); // add to first queue. addToQueueAndVerify(p0, 1, 0, 0); // 0:x- 1:-- 2:-- // add to second queue. addToQueueAndVerify(p1, 0, 1, 0); // 0:x- 1:x- 2:-- // add to first queue. addToQueueAndVerify(p0, 1, 0, 0); // 0:xx 1:x- 2:-- // add to first full queue spills over to second. addToQueueAndVerify(p0, 1, 1, 0); // 0:xx 1:xx 2:-- // add to second full queue spills over to third. addToQueueAndVerify(p1, 0, 1, 1); // 0:xx 1:xx 2:x- // add to first and second full queue spills over to third. addToQueueAndVerify(p0, 1, 1, 1); // 0:xx 1:xx 2:xx // adding non-lowest priority with all queues full throws a // standby exception for client to try another server. Mockito.reset(fcq); try { fcq.add(p0); fail("didn't fail"); } catch (IllegalStateException ise) { checkOverflowException(ise, RpcStatusProto.FATAL, true); } }
public static ObjectMapper ofJson() { return MAPPER; }
@Test @DefaultTimeZone("Europe/Athens") void json() throws IOException { ObjectMapper mapper = JacksonMapper .ofJson() .copy() .setTimeZone(TimeZone.getDefault()); Pojo original = pojo(); String s = mapper.writeValueAsString(original); Pojo deserialize = mapper.readValue(s, Pojo.class); test(original, deserialize); }
public Extent getExtent() { return extent; }
@Test public void testGetWorld() throws Exception { World world = mock(World.class); Location location = new Location(world); assertEquals(world, location.getExtent()); }
public FEELFnResult<List> invoke(@ParameterName("list") List list, @ParameterName("position") BigDecimal position, @ParameterName("newItem") Object newItem) { if (list == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", CANNOT_BE_NULL)); } if (position == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", CANNOT_BE_NULL)); } int intPosition = position.intValue(); if (intPosition == 0 || Math.abs(intPosition) > list.size()) { String paramProblem = String.format("%s outside valid boundaries (1-%s)", intPosition, list.size()); return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", paramProblem)); } Object e = NumberEvalHelper.coerceNumber(newItem); List toReturn = new ArrayList(list); int replacementPosition = intPosition > 0 ? intPosition -1 : list.size() - Math.abs(intPosition); toReturn.set(replacementPosition, e); return FEELFnResult.ofResult(toReturn); }
@Test void invokeReplaceByMatchWithNotNull() { String validMatchFunction = "function(item, newItem) item < newItem"; Object expressionObject = parseCodegenCompileEvaluate(validMatchFunction); assertThat(expressionObject).isInstanceOf(AbstractCustomFEELFunction.class); List list = Arrays.asList(BigDecimal.valueOf(2), BigDecimal.valueOf(4), BigDecimal.valueOf(7), BigDecimal.valueOf(8)); List expected = new ArrayList<>(list); expected.set(0, BigDecimal.valueOf(5)); expected.set(1, BigDecimal.valueOf(5)); FunctionTestUtil.assertResult(listReplaceFunction.invoke(list, (AbstractCustomFEELFunction)expressionObject, 5), expected); }
public int runScript(final String scriptFile) { int errorCode = NO_ERROR; RemoteServerSpecificCommand.validateClient(terminal.writer(), restClient); try { // RUN SCRIPT calls the `makeKsqlRequest` directly, which does not support PRINT/SELECT. // // To avoid interfere with the RUN SCRIPT behavior, this code loads the content of the // script and execute it with the 'handleLine', which supports PRINT/SELECT statements. // // RUN SCRIPT should be fixed to support PRINT/SELECT, but also should prevent override // variables and properties from the CLI session. final String content = Files.readAllLines(Paths.get(scriptFile), StandardCharsets.UTF_8) .stream().collect(Collectors.joining(System.lineSeparator())); handleLine(content); } catch (final Exception exception) { errorCode = ERROR; LOGGER.error("An error occurred while running a script file. Error = " + exception.getMessage(), exception); terminal.printError(ErrorMessageUtil.buildErrorMessage(exception), exception.toString()); } terminal.flush(); return errorCode; }
@Test public void shouldPrintErrorIfCantConnectToRestServerOnRunScript() throws Exception { // Given final KsqlRestClient mockRestClient = givenMockRestClient(); when(mockRestClient.getServerInfo()) .thenThrow(new KsqlRestClientException("Boom", new IOException(""))); new Cli(1L, 1L, mockRestClient, console) .runScript("script_file_ignored"); assertThat(terminal.getOutputString(), containsString("Please ensure that the URL provided is for an active KSQL server.")); }
@Override public void start() { if (taskExecutorThread == null) { taskExecutorThread = new TaskExecutorThread(name); taskExecutorThread.start(); shutdownGate = new CountDownLatch(1); } }
@Test public void shouldNotFlushOnException() { final StreamsException exception = mock(StreamsException.class); when(task.process(anyLong())).thenThrow(exception); when(taskManager.hasUncaughtException(task.id())).thenReturn(true); taskExecutor.start(); verify(taskManager, timeout(VERIFICATION_TIMEOUT)).unassignTask(task, taskExecutor); verify(task, never()).flush(); }
@Override public ListenableFuture<?> execute(StartTransaction statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, QueryStateMachine stateMachine, List<Expression> parameters) { Session session = stateMachine.getSession(); if (!session.isClientTransactionSupport()) { throw new PrestoException(StandardErrorCode.INCOMPATIBLE_CLIENT, "Client does not support transactions"); } if (session.getTransactionId().isPresent()) { throw new PrestoException(StandardErrorCode.NOT_SUPPORTED, "Nested transactions not supported"); } Optional<IsolationLevel> isolationLevel = extractIsolationLevel(statement); Optional<Boolean> readOnly = extractReadOnly(statement); TransactionId transactionId = transactionManager.beginTransaction( isolationLevel.orElse(TransactionManager.DEFAULT_ISOLATION), readOnly.orElse(TransactionManager.DEFAULT_READ_ONLY), false); stateMachine.setStartedTransactionId(transactionId); // Since the current session does not contain this new transaction ID, we need to manually mark it as inactive // when this statement completes. transactionManager.trySetInactive(transactionId); return immediateFuture(null); }
@Test public void testStartTransactionExplicitModes() { Session session = sessionBuilder() .setClientTransactionSupport() .build(); TransactionManager transactionManager = createTestTransactionManager(); QueryStateMachine stateMachine = createQueryStateMachine("START TRANSACTION", session, true, transactionManager, executor, metadata); assertFalse(stateMachine.getSession().getTransactionId().isPresent()); StartTransactionTask startTransactionTask = new StartTransactionTask(); getFutureValue(startTransactionTask.execute( new StartTransaction(ImmutableList.of(new Isolation(Isolation.Level.SERIALIZABLE), new TransactionAccessMode(true))), transactionManager, metadata, new AllowAllAccessControl(), stateMachine, emptyList())); assertFalse(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId()); assertTrue(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().isPresent()); assertEquals(transactionManager.getAllTransactionInfos().size(), 1); TransactionInfo transactionInfo = transactionManager.getTransactionInfo(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().get()); assertEquals(transactionInfo.getIsolationLevel(), IsolationLevel.SERIALIZABLE); assertTrue(transactionInfo.isReadOnly()); assertFalse(transactionInfo.isAutoCommitContext()); }
public static WorkflowInstanceAggregatedInfo computeAggregatedView( WorkflowInstance workflowInstance, boolean statusKnown) { if (workflowInstance == null) { // returning empty object since cannot access state of the current instance run return new WorkflowInstanceAggregatedInfo(); } WorkflowInstanceAggregatedInfo instanceAggregated = computeAggregatedViewNoStatus(workflowInstance); if (statusKnown || workflowInstance.getAggregatedInfo() == null) { instanceAggregated.setWorkflowInstanceStatus(workflowInstance.getStatus()); } else { computeAndSetAggregatedInstanceStatus(workflowInstance, instanceAggregated); } return instanceAggregated; }
@Test public void testComputeAggregatedViewWithoutRuntimeOverview() { WorkflowInstance instance = getGenericWorkflowInstance( 2, WorkflowInstance.Status.STOPPED, RunPolicy.RESTART_FROM_BEGINNING, RestartPolicy.RESTART_FROM_BEGINNING); instance.setAggregatedInfo(new WorkflowInstanceAggregatedInfo()); instance.getAggregatedInfo().setStepAggregatedViews(Collections.emptyMap()); instance.getAggregatedInfo().setWorkflowInstanceStatus(WorkflowInstance.Status.SUCCEEDED); Workflow runtimeWorkflow = mock(Workflow.class); instance.setRuntimeWorkflow(runtimeWorkflow); Step step1 = mock(Step.class); when(step1.getId()).thenReturn("step1"); Step step2 = mock(Step.class); when(step2.getId()).thenReturn("step2"); Step step3 = mock(Step.class); when(step3.getId()).thenReturn("step3"); when(runtimeWorkflow.getSteps()).thenReturn(Arrays.asList(step1, step2, step3)); WorkflowInstanceAggregatedInfo aggregated = AggregatedViewHelper.computeAggregatedView(instance, true); assertEquals( StepInstance.Status.NOT_CREATED, aggregated.getStepAggregatedViews().get("step1").getStatus()); assertEquals( StepInstance.Status.NOT_CREATED, aggregated.getStepAggregatedViews().get("step2").getStatus()); assertEquals( StepInstance.Status.NOT_CREATED, aggregated.getStepAggregatedViews().get("step3").getStatus()); assertEquals(WorkflowInstance.Status.STOPPED, aggregated.getWorkflowInstanceStatus()); }
@Override public File getFile(JobID jobId, PermanentBlobKey key) throws IOException { checkNotNull(jobId); return getFileInternal(jobId, key); }
@Test void permanentBlobCacheChecksForCorruptedBlobsAtStart(@TempDir Path storageDirectory) throws IOException { final JobID jobId = new JobID(); final PermanentBlobKey blobKey = TestingBlobUtils.writePermanentBlob( storageDirectory, jobId, new byte[] {1, 2, 3, 4}); final File blobFile = new File( BlobUtils.getStorageLocationPath( storageDirectory.toString(), jobId, blobKey)); FileUtils.writeByteArrayToFile(blobFile, new byte[] {4, 3, 2, 1}); try (PermanentBlobCache permanentBlobCache = new PermanentBlobCache( new Configuration(), storageDirectory.toFile(), new VoidBlobStore(), null)) { assertThatThrownBy(() -> permanentBlobCache.getFile(jobId, blobKey)) .isInstanceOf(IOException.class); } }
protected RequestMappingInfo getPluginMappingForMethod(String pluginId, Method method, Class<?> handlerType) { RequestMappingInfo info = super.getMappingForMethod(method, handlerType); if (info != null) { ApiVersion apiVersion = handlerType.getAnnotation(ApiVersion.class); if (apiVersion == null) { return info; } info = RequestMappingInfo.paths(buildPrefix(pluginId, apiVersion.value())).build() .combine(info); } return info; }
@Test public void shouldAddPathPrefixWhenExistingApiVersion() throws Exception { Method method = UserController.class.getMethod("getUser"); RequestMappingInfo info = this.handlerMapping.getPluginMappingForMethod("fakePlugin", method, UserController.class); assertThat(info).isNotNull(); assertThat(info.getPatternsCondition().getPatterns()).isEqualTo( Collections.singleton( new PathPatternParser().parse( "/apis/api.plugin.halo.run/v1alpha1/plugins/fakePlugin/user/{id}"))); }
private <T> T accept(Expression<T> expr) { return expr.accept(this); }
@Test public void testOr() throws Exception { final Expr.Greater trueExpr = Expr.Greater.create(Expr.NumberValue.create(2), Expr.NumberValue.create(1)); final Expr.Greater falseExpr = Expr.Greater.create(Expr.NumberValue.create(1), Expr.NumberValue.create(2)); assertThat(Expr.Or.create(trueExpr, trueExpr).accept(new BooleanNumberConditionsVisitor())) .isTrue(); assertThat(Expr.Or.create(trueExpr, falseExpr).accept(new BooleanNumberConditionsVisitor())) .isTrue(); assertThat(Expr.Or.create(falseExpr, trueExpr).accept(new BooleanNumberConditionsVisitor())) .isTrue(); assertThat(Expr.Or.create(falseExpr, falseExpr).accept(new BooleanNumberConditionsVisitor())) .isFalse(); assertThat(loadCondition("condition-or.json").accept(new BooleanNumberConditionsVisitor())) .isTrue(); }
@Nullable @Override public Set<String> getRemoteRegionAppWhitelist(@Nullable String regionName) { if (null == regionName) { regionName = "global"; } else { regionName = regionName.trim().toLowerCase(); } DynamicStringProperty appWhiteListProp = configInstance.getStringProperty(namespace + "remoteRegion." + regionName + ".appWhiteList", null); if (null == appWhiteListProp || null == appWhiteListProp.get()) { return null; } else { String appWhiteListStr = appWhiteListProp.get(); String[] whitelistEntries = appWhiteListStr.split(","); return new HashSet<String>(Arrays.asList(whitelistEntries)); } }
@Test public void testGetRegionAppWhiteList() throws Exception { String globalWhiteListApp = "myapp"; String regionWhiteListApp = "myapp"; ConfigurationManager.getConfigInstance().setProperty("eureka.remoteRegion.global.appWhiteList", globalWhiteListApp); ConfigurationManager.getConfigInstance().setProperty("eureka.remoteRegion.region1.appWhiteList", regionWhiteListApp); DefaultEurekaServerConfig config = new DefaultEurekaServerConfig(); Set<String> regionList = config.getRemoteRegionAppWhitelist(null); Assert.assertNotNull("Region whitelist is null.", regionList); Assert.assertEquals("Region whitelist not as expected.", 1, regionList.size()); Assert.assertEquals("Region whitelist not as expected.", regionWhiteListApp, regionList.iterator().next()); }
public static Date toDateTime(String time, String pattern) { LocalDateTime formatted = LocalDateTime.parse(time, DateTimeFormatter.ofPattern(pattern)); return Date.from(formatted.atZone(ZoneId.systemDefault()).toInstant()); }
@Test public void testToDateTime() { Date date = DateKit.toDate("2017-09-09", "yyyy-MM-dd"); Assert.assertNotNull(date); date = DateKit.toDate(time); Assert.assertNotNull(date); Date dateTime = DateKit.toDateTime("2017-09-09 11:22:33", "yyyy-MM-dd HH:mm:ss"); Assert.assertNotNull(dateTime); LocalDate localDate = DateKit.toLocalDate("2017-09-09", "yyyy-MM-dd"); Assert.assertNotNull(localDate); LocalDateTime localDateTime = DateKit.toLocalDateTime("2017-09-09 11:22:33", "yyyy-MM-dd HH:mm:ss"); Assert.assertNotNull(localDateTime); }
@Override public List<Namespace> listNamespaces() { List<Namespace> namespaces = Lists.newArrayList(); namespaces.addAll( fetch( row -> JdbcUtil.stringToNamespace(row.getString(JdbcUtil.TABLE_NAMESPACE)), JdbcUtil.LIST_ALL_NAMESPACES_SQL, catalogName)); namespaces.addAll( fetch( row -> JdbcUtil.stringToNamespace(row.getString(JdbcUtil.NAMESPACE_NAME)), JdbcUtil.LIST_ALL_PROPERTY_NAMESPACES_SQL, catalogName)); namespaces = namespaces.stream() // only get sub namespaces/children .filter(n -> n.levels().length >= 1) // only get sub namespaces/children .map(n -> Namespace.of(Arrays.stream(n.levels()).limit(1).toArray(String[]::new))) // remove duplicates .distinct() .collect(Collectors.toList()); return namespaces; }
@Test public void testListNamespace() { TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata"); TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2"); TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4"); TableIdentifier tbl4 = TableIdentifier.of("db", "metadata"); TableIdentifier tbl5 = TableIdentifier.of("db2", "metadata"); TableIdentifier tbl6 = TableIdentifier.of("tbl6"); TableIdentifier tbl7 = TableIdentifier.of("db2", "ns4", "tbl5"); TableIdentifier tbl8 = TableIdentifier.of("d_", "ns5", "tbl6"); TableIdentifier tbl9 = TableIdentifier.of("d%", "ns6", "tbl7"); Lists.newArrayList(tbl1, tbl2, tbl3, tbl4, tbl5, tbl6, tbl7, tbl8, tbl9) .forEach(t -> catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned())); List<Namespace> nsp1 = catalog.listNamespaces(Namespace.of("db")); assertThat(nsp1).hasSize(3); Set<String> tblSet = Sets.newHashSet(nsp1.stream().map(Namespace::toString).iterator()); assertThat(tblSet).hasSize(3).contains("db.ns1", "db.ns2", "db.ns3"); List<Namespace> nsp2 = catalog.listNamespaces(Namespace.of("db", "ns1")); assertThat(nsp2).hasSize(1); assertThat(nsp2.get(0)).hasToString("db.ns1.ns2"); List<Namespace> nsp3 = catalog.listNamespaces(); Set<String> tblSet2 = Sets.newHashSet(nsp3.stream().map(Namespace::toString).iterator()); assertThat(tblSet2).hasSize(5).contains("db", "db2", "d_", "d%", ""); List<Namespace> nsp4 = catalog.listNamespaces(); Set<String> tblSet3 = Sets.newHashSet(nsp4.stream().map(Namespace::toString).iterator()); assertThat(tblSet3).hasSize(5).contains("db", "db2", "d_", "d%", ""); List<Namespace> nsp5 = catalog.listNamespaces(Namespace.of("d_")); assertThat(nsp5).hasSize(1); assertThat(nsp5.get(0)).hasToString("d_.ns5"); List<Namespace> nsp6 = catalog.listNamespaces(Namespace.of("d%")); assertThat(nsp6).hasSize(1); assertThat(nsp6.get(0)).hasToString("d%.ns6"); assertThatThrownBy(() -> catalog.listNamespaces(Namespace.of("db", "db2", "ns2"))) .isInstanceOf(NoSuchNamespaceException.class) .hasMessage("Namespace does not exist: db.db2.ns2"); }
@Override public List<?> deserialize(final String topic, final byte[] bytes) { if (bytes == null) { return null; } try { final String recordCsvString = new String(bytes, StandardCharsets.UTF_8); final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, csvFormat) .getRecords(); if (csvRecords.isEmpty()) { throw new SerializationException("No fields in record"); } final CSVRecord csvRecord = csvRecords.get(0); if (csvRecord == null || csvRecord.size() == 0) { throw new SerializationException("No fields in record."); } SerdeUtils.throwOnColumnCountMismatch(parsers.size(), csvRecord.size(), false, topic); final List<Object> values = new ArrayList<>(parsers.size()); final Iterator<Parser> pIt = parsers.iterator(); for (int i = 0; i < csvRecord.size(); i++) { final String value = csvRecord.get(i); final Parser parser = pIt.next(); final Object parsed = value == null || value.isEmpty() ? null : parser.parse(value); values.add(parsed); } return values; } catch (final Exception e) { throw new SerializationException("Error deserializing delimited", e); } }
@Test public void shouldDeserializeDelimitedCorrectlyWithBarDelimiter() { // Given: final byte[] bytes = "1511897796092|1|item_1|10.0|10.10|100|10|100|ew==\r\n".getBytes(StandardCharsets.UTF_8); final KsqlDelimitedDeserializer deserializer = new KsqlDelimitedDeserializer(ORDER_SCHEMA, CSVFormat.DEFAULT.withDelimiter('|')); // When: final List<?> result = deserializer.deserialize("", bytes); // Then: assertThat(result, contains(1511897796092L, 1L, "item_1", 10.0d, new BigDecimal("10.10"), new Time(100), new Date(864000000), new Timestamp(100), ByteBuffer.wrap(new byte[] {123}))); }
@Override public void update(Observable o, Object arg) { if (!(o instanceof NodeListener)) { return; } if (arg == null || !(arg instanceof NodeEvent[])) { return; } NodeEvent[] events = (NodeEvent[]) arg; if (events.length <= 0) { return; } LOG.info("Waiting for Lock to start processing NodeEvents."); lock.lock(); try { LOG.info("Start processing the NodeEvent[" + events.length + "]."); for (NodeEvent e : events) { if (e.getType() == NodeEventTypeEnum.ADD) { addNode(e); } else if (e.getType() == NodeEventTypeEnum.DELETE) { deleteNode(e); } } } catch (Exception e) { LOG.error("Exception occurred while updating Pool.", e); } finally { lock.unlock(); } }
@Test public void testUpdate() { updater.init(); assertTrue(updater.isInited()); haDataSource.getDataSourceMap().put("foo", new MockDataSource("foo")); haDataSource.getDataSourceMap().put("bar", new MockDataSource("bar")); NodeEvent event = new NodeEvent(); event.setNodeName("foo"); event.setType(NodeEventTypeEnum.DELETE); updater.update(new FileNodeListener(), new NodeEvent[]{event}); validateDeleteNode(); event = new NodeEvent(); event.setNodeName("foo"); event.setType(NodeEventTypeEnum.ADD); updater.update(new FileNodeListener(), new NodeEvent[]{event}); assertFalse(updater.getNodesToDel().contains("foo")); assertFalse(haDataSource.isInBlackList("foo")); }
public void estimatorStats() { expressionContext.getOp().accept(this, expressionContext); }
@Test public void testLogicalOlapTableScanPartitionPrune1(@Mocked CachedStatisticStorage cachedStatisticStorage) throws Exception { FeConstants.runningUnitTest = true; ColumnRefOperator idDate = columnRefFactory.create("id_date", Type.DATE, true); GlobalStateMgr globalStateMgr = connectContext.getGlobalStateMgr(); Table table = globalStateMgr.getDb("statistics_test").getTable("test_all_type"); new Expectations() { { cachedStatisticStorage.getColumnStatistics(table, Lists.newArrayList("id_date")); result = new ColumnStatistic(0, Utils.getLongFromDateTime(LocalDateTime.of(2014, 12, 01, 0, 0, 0)), 0, 0, 30); minTimes = 0; cachedStatisticStorage.getColumnStatistic(table, "id_date"); result = new ColumnStatistic(0, Utils.getLongFromDateTime(LocalDateTime.of(2014, 12, 01, 0, 0, 0)), 0, 0, 30); minTimes = 0; } }; Collection<Partition> partitions = ((OlapTable) table).getPartitions(); // select partition p1 List<Long> partitionIds = partitions.stream().filter(partition -> partition.getName().equalsIgnoreCase("p1")). mapToLong(Partition::getId).boxed().collect(Collectors.toList()); for (Partition partition : partitions) { partition.getBaseIndex().setRowCount(1000); } LogicalOlapScanOperator olapScanOperator = new LogicalOlapScanOperator(table, ImmutableMap.of(idDate, new Column("id_date", Type.DATE, true)), ImmutableMap.of(new Column("id_date", Type.DATE, true), idDate), null, -1, new BinaryPredicateOperator(BinaryType.EQ, idDate, ConstantOperator.createDate(LocalDateTime.of(2013, 12, 30, 0, 0, 0))), ((OlapTable) table).getBaseIndexId(), partitionIds, null, false, Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), false); GroupExpression groupExpression = new GroupExpression(olapScanOperator, Lists.newArrayList()); groupExpression.setGroup(new Group(0)); ExpressionContext expressionContext = new ExpressionContext(groupExpression); StatisticsCalculator statisticsCalculator = new StatisticsCalculator(expressionContext, columnRefFactory, optimizerContext); statisticsCalculator.estimatorStats(); // partition column count distinct values is 30 in table level, after partition prune, // the column statistic distinct values is 10, so the estimate row count is 1000 * (1/10) Assert.assertEquals(100, expressionContext.getStatistics().getOutputRowCount(), 0.001); ColumnStatistic columnStatistic = expressionContext.getStatistics().getColumnStatistic(idDate); Assert.assertEquals(Utils.getLongFromDateTime(LocalDateTime.of(2013, 12, 30, 0, 0, 0)), columnStatistic.getMaxValue(), 0.001); // select partition p2, p3 partitionIds.clear(); partitionIds = partitions.stream().filter(partition -> !(partition.getName().equalsIgnoreCase("p1"))). mapToLong(Partition::getId).boxed().collect(Collectors.toList()); olapScanOperator = new LogicalOlapScanOperator(table, ImmutableMap.of(idDate, new Column("id_date", Type.DATE, true)), ImmutableMap.of(new Column("id_date", Type.DATE, true), idDate), null, -1, null, ((OlapTable) table).getBaseIndexId(), partitionIds, null, false, Lists.newArrayList(), Lists.newArrayList(), Lists.newArrayList(), false); olapScanOperator.setPredicate(new BinaryPredicateOperator(BinaryType.GE, idDate, ConstantOperator.createDate(LocalDateTime.of(2014, 5, 1, 0, 0, 0)))); groupExpression = new GroupExpression(olapScanOperator, Lists.newArrayList()); groupExpression.setGroup(new Group(0)); expressionContext = new ExpressionContext(groupExpression); statisticsCalculator = new StatisticsCalculator(expressionContext, columnRefFactory, optimizerContext); statisticsCalculator.estimatorStats(); columnStatistic = expressionContext.getStatistics().getColumnStatistic(idDate); Assert.assertEquals(1281.4371, expressionContext.getStatistics().getOutputRowCount(), 0.001); Assert.assertEquals(Utils.getLongFromDateTime(LocalDateTime.of(2014, 5, 1, 0, 0, 0)), columnStatistic.getMinValue(), 0.001); Assert.assertEquals(Utils.getLongFromDateTime(LocalDateTime.of(2014, 12, 1, 0, 0, 0)), columnStatistic.getMaxValue(), 0.001); Assert.assertEquals(20, columnStatistic.getDistinctValuesCount(), 0.001); FeConstants.runningUnitTest = false; }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 7) { onInvalidDataReceived(device, data); return; } // First byte: flags int offset = 0; final int flags = data.getIntValue(Data.FORMAT_UINT8, offset++); // See UNIT_* for unit options final int unit = (flags & 0x01) == UNIT_mmHg ? UNIT_mmHg : UNIT_kPa; final boolean timestampPresent = (flags & 0x02) != 0; final boolean pulseRatePresent = (flags & 0x04) != 0; final boolean userIdPresent = (flags & 0x08) != 0; final boolean measurementStatusPresent = (flags & 0x10) != 0; if (data.size() < 7 + (timestampPresent ? 7 : 0) + (pulseRatePresent ? 2 : 0) + (userIdPresent ? 1 : 0) + (measurementStatusPresent ? 2 : 0)) { onInvalidDataReceived(device, data); return; } // Following bytes - systolic, diastolic and mean arterial pressure final float systolic = data.getFloatValue(Data.FORMAT_SFLOAT, offset); final float diastolic = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 2); final float meanArterialPressure = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 4); offset += 6; // Parse timestamp if present Calendar calendar = null; if (timestampPresent) { calendar = DateTimeDataCallback.readDateTime(data, offset); offset += 7; } // Parse pulse rate if present Float pulseRate = null; if (pulseRatePresent) { pulseRate = data.getFloatValue(Data.FORMAT_SFLOAT, offset); offset += 2; } // Read user id if present Integer userId = null; if (userIdPresent) { userId = data.getIntValue(Data.FORMAT_UINT8, offset); offset += 1; } // Read measurement status if present BPMStatus status = null; if (measurementStatusPresent) { final int measurementStatus = data.getIntValue(Data.FORMAT_UINT16_LE, offset); // offset += 2; status = new BPMStatus(measurementStatus); } onBloodPressureMeasurementReceived(device, systolic, diastolic, meanArterialPressure, unit, pulseRate, userId, status, calendar); }
@Test public void onBloodPressureMeasurementReceived_some() { final DataReceivedCallback callback = new BloodPressureMeasurementDataCallback() { @Override public void onBloodPressureMeasurementReceived(@NonNull final BluetoothDevice device, final float systolic, final float diastolic, final float meanArterialPressure, final int unit, @Nullable final Float pulseRate, @Nullable final Integer userID, @Nullable final BPMStatus status, @Nullable final Calendar calendar) { assertEquals("Systolic", 18.9, systolic, 0.01); assertEquals("Diastolic", 11.0, diastolic, 0); assertEquals("Mean AP", 15.9, meanArterialPressure, 0.01); assertEquals("Unit: kPa", 1, unit); assertNotNull("Pulse rate set", pulseRate); assertEquals("Pulse rate", 60.0, pulseRate, 0); assertNull("User ID not set", userID); assertNotNull("Status set", status); assertFalse(status.bodyMovementDetected); assertTrue(status.cuffTooLose); assertFalse(status.irregularPulseDetected); assertFalse(status.pulseRateInRange); assertFalse(status.pulseRateExceedsUpperLimit); assertTrue(status.pulseRateIsLessThenLowerLimit); assertFalse(status.improperMeasurementPosition); assertNull("Calendar not set", calendar); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct BPM reported as invalid", 1, 2); } }; final MutableData data = new MutableData(new byte[11]); // Flags data.setByte((byte) 0b10101, 0); // Systolic, diastolic and mean AP in mmHg data.setValue(189, -1, Data.FORMAT_SFLOAT, 1); data.setValue(11, 0, Data.FORMAT_SFLOAT, 3); data.setValue(159, -1, Data.FORMAT_SFLOAT, 5); // Pulse rate data.setValue(60, 0, Data.FORMAT_SFLOAT, 7); // Measurement status data.setValue(0b010010, Data.FORMAT_UINT16_LE, 9); assertArrayEquals( new byte[] { 0x15, (byte) 0xBD, (byte) 0xF0, 0xB, 0x0, (byte) 0x9F, (byte) 0xF0, 0x3C, 0x0, 0x12, 0x0 }, data.getValue() ); callback.onDataReceived(null, data); }
public static void createServletExceptionResponse( HttpServletResponse response, int status, Throwable ex) throws IOException { response.setStatus(status); response.setContentType(APPLICATION_JSON_MIME); Map<String, Object> json = new LinkedHashMap<String, Object>(); json.put(ERROR_MESSAGE_JSON, getOneLineMessage(ex)); json.put(ERROR_EXCEPTION_JSON, ex.getClass().getSimpleName()); json.put(ERROR_CLASSNAME_JSON, ex.getClass().getName()); Map<String, Object> jsonResponse = Collections.singletonMap(ERROR_JSON, json); Writer writer = response.getWriter(); JsonSerialization.writer().writeValue(writer, jsonResponse); writer.flush(); }
@Test public void testCreateServletException() throws IOException { StringWriter writer = new StringWriter(); PrintWriter printWriter = new PrintWriter(writer); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); Mockito.when(response.getWriter()).thenReturn(printWriter); int status = HttpServletResponse.SC_INTERNAL_SERVER_ERROR; Exception ex = new IOException("Hello IOEX"); HttpExceptionUtils.createServletExceptionResponse(response, status, ex); Mockito.verify(response).setStatus(status); Mockito.verify(response).setContentType(Mockito.eq("application/json")); ObjectMapper mapper = new ObjectMapper(); Map json = mapper.readValue(writer.toString(), Map.class); json = (Map) json.get(HttpExceptionUtils.ERROR_JSON); Assert.assertEquals(IOException.class.getName(), json.get(HttpExceptionUtils.ERROR_CLASSNAME_JSON)); Assert.assertEquals(IOException.class.getSimpleName(), json.get(HttpExceptionUtils.ERROR_EXCEPTION_JSON)); Assert.assertEquals("Hello IOEX", json.get(HttpExceptionUtils.ERROR_MESSAGE_JSON)); }
static Timestamp parseTimeStamp(final String value) { try { // JDK format in Timestamp.valueOf is compatible with TIMESTAMP_FORMAT return Timestamp.valueOf(value); } catch (IllegalArgumentException e) { return throwRuntimeParseException(value, new ParseException(e.getMessage(), 0), TIMESTAMP_FORMAT); } }
@Test public void testTimestampWithLeadingZeros() throws Exception { // Given Timestamp expectedTimestamp = new Timestamp(new SimpleDateFormat(TIMESTAMP_FORMAT) .parse("2000-01-02 03:04:05.006") .getTime()); // When Timestamp actualTimestamp = DateHelper.parseTimeStamp(expectedTimestamp.toString()); // Then assertTimestampsEqual(expectedTimestamp, actualTimestamp); }
@Override public DirectPipelineResult run(Pipeline pipeline) { try { options = MAPPER .readValue(MAPPER.writeValueAsBytes(options), PipelineOptions.class) .as(DirectOptions.class); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } performRewrites(pipeline); MetricsEnvironment.setMetricsSupported(true); try { DirectGraphVisitor graphVisitor = new DirectGraphVisitor(); pipeline.traverseTopologically(graphVisitor); @SuppressWarnings("rawtypes") KeyedPValueTrackingVisitor keyedPValueVisitor = KeyedPValueTrackingVisitor.create(); pipeline.traverseTopologically(keyedPValueVisitor); DisplayDataValidator.validatePipeline(pipeline); DisplayDataValidator.validateOptions(options); ExecutorService metricsPool = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setThreadFactory(MoreExecutors.platformThreadFactory()) .setDaemon(false) // otherwise you say you want to leak, please don't! .setNameFormat("direct-metrics-counter-committer") .build()); DirectGraph graph = graphVisitor.getGraph(); EvaluationContext context = EvaluationContext.create( clockSupplier.get(), Enforcement.bundleFactoryFor(enabledEnforcements, graph), graph, keyedPValueVisitor.getKeyedPValues(), metricsPool); TransformEvaluatorRegistry registry = TransformEvaluatorRegistry.javaSdkNativeRegistry(context, options); PipelineExecutor executor = ExecutorServiceParallelExecutor.create( options.getTargetParallelism(), registry, Enforcement.defaultModelEnforcements(enabledEnforcements), context, metricsPool); executor.start(graph, RootProviderRegistry.javaNativeRegistry(context, options)); DirectPipelineResult result = new DirectPipelineResult(executor, context); if (options.isBlockOnRun()) { try { result.waitUntilFinish(); } catch (UserCodeException userException) { throw new PipelineExecutionException(userException.getCause()); } catch (Throwable t) { if (t instanceof RuntimeException) { throw (RuntimeException) t; } throw new RuntimeException(t); } } return result; } finally { MetricsEnvironment.setMetricsSupported(false); } }
@Test public void testMutatingOutputWithEnforcementDisabledSucceeds() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); options.setRunner(DirectRunner.class); options.as(DirectOptions.class).setEnforceImmutability(false); Pipeline pipeline = Pipeline.create(options); pipeline .apply(Create.of(42)) .apply( ParDo.of( new DoFn<Integer, List<Integer>>() { @ProcessElement public void processElement(ProcessContext c) { List<Integer> outputList = Arrays.asList(1, 2, 3, 4); c.output(outputList); outputList.set(0, 37); c.output(outputList); } })); pipeline.run(); }
public static EndpointResponse mapException(final Throwable exception) { if (exception instanceof KsqlRestException) { final KsqlRestException restException = (KsqlRestException) exception; return restException.getResponse(); } return EndpointResponse.create() .status(INTERNAL_SERVER_ERROR.code()) .type("application/json") .entity(new KsqlErrorMessage(Errors.ERROR_CODE_SERVER_ERROR, exception)) .build(); }
@Test public void shouldReturnEmbeddedResponseForKsqlRestException() { final EndpointResponse response = EndpointResponse.failed(400); assertThat( OldApiUtils.mapException(new KsqlRestException(response)), sameInstance(response)); }
@Override public void addJobStorageOnChangeListener(StorageProviderChangeListener listener) { onChangeListeners.add(listener); startTimerToSendUpdates(); }
@Test void JobChangeListenersAreNotifiedOfJobs() { final Job job = anEnqueuedJob().build(); storageProvider.save(job); final JobChangeListenerForTest changeListener = new JobChangeListenerForTest(new JobId(job.getId())); storageProvider.addJobStorageOnChangeListener(changeListener); await() .untilAsserted(() -> assertThat(changeListener).has(jobNotNull)); }
static void cleanStackTrace(Throwable throwable) { new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet()); }
@Test public void suppressedThrowablesAreAlsoCleaned() { if (Platform.isAndroid()) { return; // suppressed exceptions aren't supported under Ice Cream Sandwich, where we test } Throwable throwable = createThrowableWithStackTrace("com.example.Foo", "org.junit.FilterMe"); Throwable suppressed1 = createThrowableWithStackTrace("com.example.Bar", "org.junit.FilterMe"); Throwable suppressed2 = createThrowableWithStackTrace("com.example.Car", "org.junit.FilterMe"); throwable.addSuppressed(suppressed1); throwable.addSuppressed(suppressed2); StackTraceCleaner.cleanStackTrace(throwable); assertThat(throwable.getStackTrace()).isEqualTo(createStackTrace("com.example.Foo")); assertThat(suppressed1.getStackTrace()).isEqualTo(createStackTrace("com.example.Bar")); assertThat(suppressed2.getStackTrace()).isEqualTo(createStackTrace("com.example.Car")); }
public static void verifyIncrementPubContent(String content) { if (content == null || content.length() == 0) { throw new IllegalArgumentException("publish/delete content can not be null"); } for (int i = 0; i < content.length(); i++) { char c = content.charAt(i); if (c == '\r' || c == '\n') { throw new IllegalArgumentException("publish/delete content can not contain return and linefeed"); } if (c == Constants.WORD_SEPARATOR.charAt(0)) { throw new IllegalArgumentException("publish/delete content can not contain(char)2"); } } }
@Test void testVerifyIncrementPubContentFail2() { Throwable exception = assertThrows(IllegalArgumentException.class, () -> { String content = "aa\rbbb"; ContentUtils.verifyIncrementPubContent(content); }); assertTrue(exception.getMessage().contains("publish/delete content can not contain return and linefeed")); }
@Override public ConfigOperateResult insertOrUpdateBetaCas(final ConfigInfo configInfo, final String betaIps, final String srcIp, final String srcUser) { if (findConfigInfo4BetaState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant()) == null) { return addConfigInfo4Beta(configInfo, betaIps, srcIp, srcUser); } else { return updateConfigInfo4BetaCas(configInfo, betaIps, srcIp, srcUser); } }
@Test void testInsertOrUpdateBetaCasOfAdd() { String dataId = "betaDataId113"; String group = "group113"; String tenant = "tenant113"; //mock exist beta ConfigInfoStateWrapper mockedConfigInfoStateWrapper = new ConfigInfoStateWrapper(); mockedConfigInfoStateWrapper.setDataId(dataId); mockedConfigInfoStateWrapper.setGroup(group); mockedConfigInfoStateWrapper.setTenant(tenant); mockedConfigInfoStateWrapper.setId(123456L); mockedConfigInfoStateWrapper.setLastModified(System.currentTimeMillis()); Mockito.when( databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))) .thenReturn(null).thenReturn(mockedConfigInfoStateWrapper); String betaIps = "betaips..."; String srcIp = "srcUp..."; String srcUser = "srcUser..."; String appName = "appname"; String content = "content111"; ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); configInfo.setEncryptedDataKey("key34567"); //execute ConfigOperateResult configOperateResult = embeddedConfigInfoBetaPersistService.insertOrUpdateBetaCas(configInfo, betaIps, srcIp, srcUser); //expect return obj assertEquals(mockedConfigInfoStateWrapper.getId(), configOperateResult.getId()); assertEquals(mockedConfigInfoStateWrapper.getLastModified(), configOperateResult.getLastModified()); //verify add to be invoked embeddedStorageContextHolderMockedStatic.verify( () -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(dataId), eq(group), eq(tenant), eq(configInfo.getAppName()), eq(configInfo.getContent()), eq(configInfo.getMd5()), eq(betaIps), eq(srcIp), eq(srcUser), eq(configInfo.getEncryptedDataKey())), times(1)); }
@VisibleForTesting synchronized List<StorageDirectory> addStorageLocations(DataNode datanode, NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs, StartupOption startOpt) throws IOException { final int numThreads = getParallelVolumeLoadThreadsNum( dataDirs.size(), datanode.getConf()); final ExecutorService executor = Executors.newFixedThreadPool(numThreads); try { final List<StorageLocation> successLocations = loadDataStorage( datanode, nsInfo, dataDirs, startOpt, executor); if (successLocations.isEmpty()) { return Lists.newArrayList(); } return loadBlockPoolSliceStorage( datanode, nsInfo, successLocations, startOpt, executor); } finally { executor.shutdown(); } }
@Test public void testAddStorageDirectoriesFailure() throws IOException { final int numLocations = 1; List<StorageLocation> locations = createStorageLocations(numLocations); assertEquals(numLocations, locations.size()); NamespaceInfo namespaceInfo = new NamespaceInfo(0, CLUSTER_ID, DEFAULT_BPID, CTIME, BUILD_VERSION, SOFTWARE_VERSION); List<StorageDirectory> successLocations = storage.addStorageLocations( mockDN, namespaceInfo, locations, START_OPT); assertEquals(1, successLocations.size()); // After the DataNode restarts, the value of the clusterId is different // from the value before the restart. storage.unlockAll(); DataNode newMockDN = Mockito.mock(DataNode.class); Mockito.when(newMockDN.getConf()).thenReturn(new HdfsConfiguration()); DataStorage newStorage = new DataStorage(); NamespaceInfo newNamespaceInfo = new NamespaceInfo(0, CLUSTER_ID2, DEFAULT_BPID, CTIME, BUILD_VERSION, SOFTWARE_VERSION); successLocations = newStorage.addStorageLocations( newMockDN, newNamespaceInfo, locations, START_OPT); assertEquals(0, successLocations.size()); newStorage.unlockAll(); newMockDN.shutdown(); }
@Override public boolean release(final PooledConnection conn) { conn.stopRequestTimer(); releaseConnCounter.increment(); connsInUse.decrementAndGet(); final DiscoveryResult discoveryResult = conn.getServer(); updateServerStatsOnRelease(conn); boolean released = false; if (conn.isShouldClose()) { // Close and discard the connection, as it has been flagged (possibly due to receiving a non-channel error // like a 503). conn.setInPool(false); conn.close(); LOG.debug( "[{}] closing conn flagged to be closed", conn.getChannel().id()); } else if(isConnectionExpired(conn.getUsageCount())) { conn.setInPool(false); conn.close(); closeExpiredConnLifetimeCounter.increment(); LOG.debug( "[{}] closing conn lifetime expired, usage: {}", conn.getChannel().id(), conn.getUsageCount()); } else if (connPoolConfig.isCloseOnCircuitBreakerEnabled() && discoveryResult.isCircuitBreakerTripped()) { LOG.debug( "[{}] closing conn, server circuit breaker tripped", conn.getChannel().id()); circuitBreakerClose.increment(); // Don't put conns for currently circuit-tripped servers back into the pool. conn.setInPool(false); conn.close(); } else if (!conn.isActive()) { LOG.debug("[{}] conn inactive, cleaning up", conn.getChannel().id()); // Connection is already closed, so discard. alreadyClosedCounter.increment(); // make sure to decrement OpenConnectionCounts conn.updateServerStats(); conn.setInPool(false); } else { releaseHandlers(conn); // Attempt to return connection to the pool. IConnectionPool pool = perServerPools.get(discoveryResult); if (pool != null) { released = pool.release(conn); } else { // The pool for this server no longer exists (maybe due to it falling out of // discovery). conn.setInPool(false); released = false; conn.close(); } LOG.debug("PooledConnection released: {}", conn); } return released; }
@Test void closeOnCircuitBreaker() { final OriginName originName = OriginName.fromVipAndApp("whatever", "whatever"); DefaultClientChannelManager manager = new DefaultClientChannelManager( originName, new DefaultClientConfigImpl(), Mockito.mock(DynamicServerResolver.class), new NoopRegistry()) { @Override protected void updateServerStatsOnRelease(PooledConnection conn) {} }; PooledConnection connection = mock(PooledConnection.class); DiscoveryResult discoveryResult = mock(DiscoveryResult.class); doReturn(discoveryResult).when(connection).getServer(); doReturn(true).when(discoveryResult).isCircuitBreakerTripped(); doReturn(new EmbeddedChannel()).when(connection).getChannel(); Truth.assertThat(manager.release(connection)).isFalse(); verify(connection).setInPool(false); verify(connection).close(); }
public static String quoteStringLiteral(String string) { return "'" + string.replace("'", "''") + "'"; }
@Test public void testQuote() { assertEquals("'foo'", quoteStringLiteral("foo")); assertEquals("'Presto''s'", quoteStringLiteral("Presto's")); }
public void close() throws IOException { try { finish(); } catch (IOException ignored) { } if (deflater != null) { deflater.end(); } super.close(); }
@Test public void testSerializeDataset() throws Exception { ObjectOutputStream out = new ObjectOutputStream( new FileOutputStream(file)); try { out.writeObject(dataset()); out.writeUTF("END"); } finally { out.close(); } Attributes dataset = deserializeAttributes(); assertTrue(dataset.getValue(Tag.PixelData) instanceof BulkData); Object fragments = dataset.getValue("DicomOutputStreamTest", 0x99990010); assertTrue(fragments instanceof Fragments); assertTrue(((Fragments) fragments).get(2) instanceof BulkDataWithPrefix); }
@VisibleForTesting protected List<NotificationChannel> getChannels() { return Arrays.asList(notificationChannels); }
@Test public void shouldProvideChannelList() { assertThat(underTest.getChannels()).containsOnly(emailChannel, twitterChannel); underTest = new DefaultNotificationManager(new NotificationChannel[] {}, dbClient); assertThat(underTest.getChannels()).isEmpty(); }
@Udf(description = "Converts a string representation of a date in the given format" + " into a DATE value.") public Date parseDate( @UdfParameter( description = "The string representation of a date.") final String formattedDate, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.text.SimpleDateFormat.") final String formatPattern) { if (formattedDate == null || formatPattern == null) { return null; } try { final long time = formatters.get(formatPattern).parse(formattedDate).getTime(); if (time % MILLIS_IN_DAY != 0) { throw new KsqlFunctionException("Date format contains time field."); } return new Date(time); } catch (final ExecutionException | RuntimeException | ParseException e) { throw new KsqlFunctionException("Failed to parse date '" + formattedDate + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldConvertYearToDate() { // When: final Date result = udf.parseDate("2022", "yyyy"); // Then: assertThat(result.getTime(), is(1640995200000L)); }
public static MemberVersion of(int major, int minor, int patch) { if (major == 0 && minor == 0 && patch == 0) { return MemberVersion.UNKNOWN; } else { return new MemberVersion(major, minor, patch); } }
@Test public void testVersionOf_whenVersionStringIsBeta() { assertEquals(MemberVersion.of(3, 8, 0), MemberVersion.of(VERSION_3_8_BETA_2_STRING)); assertEquals(MemberVersion.of(3, 8, 1), MemberVersion.of(VERSION_3_8_1_BETA_1_STRING)); }
@Override public void init() { beforeHandoverMode = false; clientClosed = false; singleThreaded = new Object(); dataListListenerStack = new ArrayDeque<>(); networkIsInitialized = false; isInitialized = true; }
@Test void testRenderingOfListsAddedTwice() throws Exception { StringDataList dataList = createDataListWithReusedSublist(); TestRenderer renderer = new TestRenderer(); renderer.init(); assertEquals(" beginResponse beginList[[[]], []] beginList[[]] beginList[] endList[] endList[[]] beginList[] endList[] endList[[[]], []] endResponse", render(renderer, dataList)); }
@Override public Num calculate(BarSeries series, Position position) { final Num maxDrawdown = maxDrawdownCriterion.calculate(series, position); if (maxDrawdown.isZero()) { return NaN.NaN; } else { final Num totalProfit = grossReturnCriterion.calculate(series, position); return totalProfit.dividedBy(maxDrawdown); } }
@Test public void testNoDrawDownForTradingRecord() { final MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 95, 100, 90, 95, 80, 120); final TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(1, series), Trade.buyAt(2, series), Trade.sellAt(3, series)); final Num result = rrc.calculate(series, tradingRecord); assertNumEquals(NaN.NaN, result); }
public NeedEnhanceEnum getNeedEnhanceEnum() { return needEnhanceEnum; }
@Test public void testGetNeedEnhanceEnum() { enhanceBean.setNeedEnhanceEnum(NeedEnhanceEnum.SERVICE_BEAN); assertEquals(NeedEnhanceEnum.SERVICE_BEAN, enhanceBean.getNeedEnhanceEnum()); }
boolean shouldRetry(GetQueryExecutionResponse getQueryExecutionResponse) { String stateChangeReason = getQueryExecutionResponse.queryExecution().status().stateChangeReason(); if (this.retry.contains("never")) { LOG.trace("AWS Athena start query execution detected error ({}), marked as not retryable", stateChangeReason); return false; } if (this.retry.contains("always")) { LOG.trace("AWS Athena start query execution detected error ({}), marked as retryable", stateChangeReason); return true; } // Generic errors happen sometimes in Athena. It's possible that a retry will fix the problem. if (stateChangeReason != null && stateChangeReason.contains("GENERIC_INTERNAL_ERROR") && (this.retry.contains("generic") || this.retry.contains("retryable"))) { LOG.trace("AWS Athena start query execution detected generic error ({}), marked as retryable", stateChangeReason); return true; } // Resource exhaustion happens sometimes in Athena. It's possible that a retry will fix the problem. if (stateChangeReason != null && stateChangeReason.contains("exhausted resources at this scale factor") && (this.retry.contains("exhausted") || this.retry.contains("retryable"))) { LOG.trace("AWS Athena start query execution detected resource exhaustion error ({}), marked as retryable", stateChangeReason); return true; } return false; }
@Test public void shouldRetryReturnsFalseForUnexpectedError() { Athena2QueryHelper helper = athena2QueryHelperWithRetry("retryable"); assertFalse(helper.shouldRetry(newGetQueryExecutionResponse(QueryExecutionState.FAILED, "unexpected"))); }
public LoggerContext configure() { LoggerContext ctx = helper.getRootContext(); ctx.reset(); helper.enableJulChangePropagation(ctx); configureConsole(ctx); configureWithLogbackWritingToFile(ctx); helper.apply( LogLevelConfig.newBuilder(helper.getRootLoggerName()) .rootLevelFor(ProcessId.APP) .immutableLevel("com.hazelcast", Level.toLevel("WARN")) .build(), appSettings.getProps()); return ctx; }
@Test public void root_logger_writes_to_console_with_formatting_and_to_sonar_log_file_when_running_from_command_line() { emulateRunFromCommandLine(false); LoggerContext ctx = underTest.configure(); Logger rootLogger = ctx.getLogger(ROOT_LOGGER_NAME); verifyAppConsoleAppender(rootLogger.getAppender("APP_CONSOLE")); verifySonarLogFileAppender(rootLogger.getAppender("file_sonar")); assertThat(rootLogger.iteratorForAppenders()).toIterable().hasSize(2); // verify no other logger except startup logger writes to sonar.log ctx.getLoggerList() .stream() .filter(logger -> !ROOT_LOGGER_NAME.equals(logger.getName()) && !LOGGER_STARTUP.equals(logger.getName())) .forEach(AppLoggingTest::verifyNoFileAppender); }