focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void rebuild() { rules.removeIf(YamlGlobalRuleConfiguration.class::isInstance); if (null != authority) { rules.add(authority); } if (null != sqlParser) { rules.add(sqlParser); } if (null != transaction) { rules.add(transaction); } if (null != globalClock) { rules.add(globalClock); } if (null != sqlFederation) { rules.add(sqlFederation); } if (null != sqlTranslator) { rules.add(sqlTranslator); } if (null != logging) { rules.add(logging); } }
@Test void assertRebuild() { YamlJDBCConfiguration actual = new YamlJDBCConfiguration(); YamlAuthorityRuleConfiguration authorityRuleConfig = new YamlAuthorityRuleConfiguration(); actual.setAuthority(authorityRuleConfig); YamlSQLParserRuleConfiguration sqlParserRuleConfig = new YamlSQLParserRuleConfiguration(); actual.setSqlParser(sqlParserRuleConfig); YamlTransactionRuleConfiguration transactionRuleConfig = new YamlTransactionRuleConfiguration(); actual.setTransaction(transactionRuleConfig); YamlGlobalClockRuleConfiguration globalClockRuleConfig = new YamlGlobalClockRuleConfiguration(); actual.setGlobalClock(globalClockRuleConfig); YamlSQLFederationRuleConfiguration sqlFederationRuleConfig = new YamlSQLFederationRuleConfiguration(); actual.setSqlFederation(sqlFederationRuleConfig); YamlSQLTranslatorRuleConfiguration sqlTranslatorRuleConfig = new YamlSQLTranslatorRuleConfiguration(); actual.setSqlTranslator(sqlTranslatorRuleConfig); YamlLoggingRuleConfiguration loggingRuleConfig = new YamlLoggingRuleConfiguration(); actual.setLogging(loggingRuleConfig); actual.rebuild(); assertThat(actual.getRules(), is(Arrays.asList( authorityRuleConfig, sqlParserRuleConfig, transactionRuleConfig, globalClockRuleConfig, sqlFederationRuleConfig, sqlTranslatorRuleConfig, loggingRuleConfig))); }
@Udf public Long round(@UdfParameter final long val) { return val; }
@Test public void shouldRoundSimpleBigDecimalNegative() { assertThat(udf.round(new BigDecimal("-1.23")), is(new BigDecimal("-1"))); assertThat(udf.round(new BigDecimal("-1.0")), is(new BigDecimal("-1"))); assertThat(udf.round(new BigDecimal("-1.5")), is(new BigDecimal("-1"))); assertThat(udf.round(new BigDecimal("-1530000")), is(new BigDecimal("-1530000"))); assertThat(udf.round(new BigDecimal("-10.1")), is(new BigDecimal("-10"))); assertThat(udf.round(new BigDecimal("-12345.5")), is(new BigDecimal("-12345"))); assertThat(udf.round(new BigDecimal("-9.99")), is(new BigDecimal("-10"))); assertThat(udf.round(new BigDecimal("-110.1")), is(new BigDecimal("-110"))); assertThat(udf.round(new BigDecimal("-1530000.01")), is(new BigDecimal("-1530000"))); assertThat(udf.round(new BigDecimal("-9999999.99")), is(new BigDecimal("-10000000"))); }
protected boolean inList(String includeMethods, String excludeMethods, String methodName) { //判断是否在白名单中 if (!StringUtils.ALL.equals(includeMethods)) { if (!inMethodConfigs(includeMethods, methodName)) { return false; } } //判断是否在黑白单中 if (inMethodConfigs(excludeMethods, methodName)) { return false; } //默认还是要发布 return true; }
@Test public void notInListTest() { ProviderConfig providerConfig = new ProviderConfig(); DefaultProviderBootstrap defaultProviderBootstra = new DefaultProviderBootstrap(providerConfig); boolean result = defaultProviderBootstra.inList("hello1", "hello2", "hello3"); Assert.assertTrue(!result); }
@Override public void incrementWindowSize(Http2Stream stream, int delta) throws Http2Exception { assert ctx == null || ctx.executor().inEventLoop(); monitor.incrementWindowSize(state(stream), delta); }
@Test public void windowUpdateShouldChangeStreamWindow() throws Http2Exception { incrementWindowSize(STREAM_A, 100); assertEquals(DEFAULT_WINDOW_SIZE, window(CONNECTION_STREAM_ID)); assertEquals(DEFAULT_WINDOW_SIZE + 100, window(STREAM_A)); assertEquals(DEFAULT_WINDOW_SIZE, window(STREAM_B)); assertEquals(DEFAULT_WINDOW_SIZE, window(STREAM_C)); assertEquals(DEFAULT_WINDOW_SIZE, window(STREAM_D)); verifyZeroInteractions(listener); }
@Udf public String rpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); sb.append(input); final int padChars = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padChars; i += padding.length()) { sb.append(padding); } sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldPadEmptyInputString() { final String result = udf.rpad("", 4, "foo"); assertThat(result, is("foof")); }
public String kafkaClusterId() { if (kafkaClusterId == null) { kafkaClusterId = lookupKafkaClusterId(this); } return kafkaClusterId; }
@Test public void testKafkaClusterId() { Map<String, String> props = baseProps(); WorkerConfig config = new WorkerConfig(WorkerConfig.baseConfigDef(), props); assertEquals(CLUSTER_ID, config.kafkaClusterId()); workerConfigMockedStatic.verify(() -> WorkerConfig.lookupKafkaClusterId(any(WorkerConfig.class)), times(1)); // next calls hit the cache assertEquals(CLUSTER_ID, config.kafkaClusterId()); workerConfigMockedStatic.verify(() -> WorkerConfig.lookupKafkaClusterId(any(WorkerConfig.class)), times(1)); }
@Override public Instant currentProcessingTime() { return processingTimeClock.now(); }
@Test public void getProcessingTimeIsClockNow() { assertThat(internals.currentProcessingTime(), equalTo(clock.now())); Instant oldProcessingTime = internals.currentProcessingTime(); clock.advance(Duration.standardHours(12)); assertThat(internals.currentProcessingTime(), equalTo(clock.now())); assertThat( internals.currentProcessingTime(), equalTo(oldProcessingTime.plus(Duration.standardHours(12)))); }
public String getThumbnailContent() { return Arrays.stream(content.split(LINE_BREAK)) .limit(THUMBNAIL_LINE_HEIGHT) .collect(Collectors.joining(LINE_BREAK)); }
@Test @DisplayName("성공: 썸네일(5줄) 추출 잘 되는지 확인") void getThumbnailContent() { // given Member member = MemberFixture.getFirstMember(); Category category = CategoryFixture.getFirstCategory(); Template template = new Template(member, "title", "description", category); SourceCode sourceCode = new SourceCode( 1L, template, "file", "1\n2\n3\n4\n5\n6\n7\n8\n9\n", 1 ); // when String thumbnail = sourceCode.getThumbnailContent(); // then assertThat(thumbnail).isEqualTo("1\n2\n3\n4\n5"); }
@Override public int sendSms(String numberTo, String message) throws SmsException { try { checkSmppSession(); SubmitSM request = new SubmitSM(); if (StringUtils.isNotEmpty(config.getServiceType())) { request.setServiceType(config.getServiceType()); } if (StringUtils.isNotEmpty(config.getSourceAddress())) { request.setSourceAddr(new Address(config.getSourceTon(), config.getSourceNpi(), config.getSourceAddress())); } request.setDestAddr(new Address(config.getDestinationTon(), config.getDestinationNpi(), prepareNumber(numberTo))); request.setShortMessage(message); request.setDataCoding(Optional.ofNullable(config.getCodingScheme()).orElse((byte) 0)); request.setReplaceIfPresentFlag((byte) 0); request.setEsmClass((byte) 0); request.setProtocolId((byte) 0); request.setPriorityFlag((byte) 0); request.setRegisteredDelivery((byte) 0); request.setSmDefaultMsgId((byte) 0); SubmitSMResp response = smppSession.submit(request); log.debug("SMPP submit command status: {}", response.getCommandStatus()); } catch (Exception e) { throw new RuntimeException(e); } return countMessageSegments(message); }
@Test public void testSendSms() throws Exception { when(smppSession.isOpened()).thenReturn(true); when(smppSession.submit(any())).thenReturn(new SubmitSMResp()); setDefaultSmppConfig(); String number = "123545"; String message = "message"; smppSmsSender.sendSms(number, message); verify(smppSmsSender, never()).initSmppSession(); verify(smppSession).submit(argThat(submitRequest -> { try { return submitRequest.getShortMessage().equals(message) && submitRequest.getDestAddr().getAddress().equals(number) && submitRequest.getServiceType().equals(smppConfig.getServiceType()) && (StringUtils.isEmpty(smppConfig.getSourceAddress()) ? submitRequest.getSourceAddr().getAddress().equals("") : submitRequest.getSourceAddr().getAddress().equals(smppConfig.getSourceAddress()) && submitRequest.getSourceAddr().getTon() == smppConfig.getSourceTon() && submitRequest.getSourceAddr().getNpi() == smppConfig.getSourceNpi()) && submitRequest.getDestAddr().getTon() == smppConfig.getDestinationTon() && submitRequest.getDestAddr().getNpi() == smppConfig.getDestinationNpi() && submitRequest.getDataCoding() == smppConfig.getCodingScheme() && submitRequest.getReplaceIfPresentFlag() == 0 && submitRequest.getEsmClass() == 0 && submitRequest.getProtocolId() == 0 && submitRequest.getPriorityFlag() == 0 && submitRequest.getRegisteredDelivery() == 0 && submitRequest.getSmDefaultMsgId() == 0; } catch (Exception e) { fail(e.getMessage()); return false; } })); }
public String getArgs() { return args; }
@Test @DirtiesContext public void testCreateEndpointWithArgs2() throws Exception { String args = "arg1 \"arg2 \" arg3"; ExecEndpoint e = createExecEndpoint("exec:test?args=" + UnsafeUriCharactersEncoder.encode(args)); assertEquals(args, e.getArgs()); }
@Override protected String getKeyName() { return RateLimitEnum.LEAKY_BUCKET.getKeyName(); }
@Test public void getKeyNameTest() { assertThat("request_leaky_rate_limiter", is(leakyBucketRateLimiterAlgorithm.getKeyName())); }
protected void setDone() { done = true; }
@Test public void testEmpty() throws Exception { final TestSubscriber<String> testSubscriber = new TestSubscriber<>(); final TestPublisher testPublisher = new TestPublisher() { @Override TestPollingSubscription createSubscription( final Subscriber<String> subscriber ) { return new TestPollingSubscription(subscriber, exec) { @Override String poll() { setDone(); return null; } }; } }; testPublisher.subscribe(testSubscriber); assertTrue(testSubscriber.await()); assertTrue(exec.shutdownNow().isEmpty()); assertTrue(testPublisher.subscription.closed); assertNull(testSubscriber.getError()); assertEquals(ImmutableList.of(), testSubscriber.getElements()); }
public static void sort(short[] array, ShortComparator comparator) { sort(array, 0, array.length, comparator); }
@Test void sorting_random_arrays_should_produce_identical_result_as_java_sort() { Random r = new Random(4234); for (int i = 0; i < 10000; i++) { short[] original = makeRandomArray(r); short[] javaSorted = Arrays.copyOf(original, original.length); short[] customSorted = Arrays.copyOf(original, original.length); PrimitiveArraySorter.sort(customSorted, Short::compare); Arrays.sort(javaSorted); String errorMsg = String.format("%s != %s (before sorting: %s)", Arrays.toString(customSorted), Arrays.toString(javaSorted), Arrays.toString(original)); assertArrayEquals(customSorted, javaSorted, errorMsg); } }
static <S> FieldProbe createFieldProbe(Field field, Probe probe, SourceMetadata sourceMetadata) { ProbeType type = getType(field.getType()); if (type == null) { throw new IllegalArgumentException(format("@Probe field '%s' is of an unhandled type", field)); } if (type.getMapsTo() == double.class) { return new DoubleFieldProbe<S>(field, probe, type, sourceMetadata); } else if (type.getMapsTo() == long.class) { return new LongFieldProbe<S>(field, probe, type, sourceMetadata); } else { throw new IllegalArgumentException(type.toString()); } }
@Test(expected = IllegalArgumentException.class) public void whenUnknownType() throws NoSuchFieldException { UnknownFieldType unknownFieldType = new UnknownFieldType(); Field field = unknownFieldType.getClass().getDeclaredField("field"); Probe probe = field.getAnnotation(Probe.class); SourceMetadata ignoredSourceMetadata = new SourceMetadata(Object.class); createFieldProbe(field, probe, ignoredSourceMetadata); }
public static URI parse(String gluePath) { requireNonNull(gluePath, "gluePath may not be null"); if (gluePath.isEmpty()) { return rootPackageUri(); } // Legacy from the Cucumber Eclipse plugin // Older versions of Cucumber allowed it. if (CLASSPATH_SCHEME_PREFIX.equals(gluePath)) { return rootPackageUri(); } if (nonStandardPathSeparatorInUse(gluePath)) { String standardized = replaceNonStandardPathSeparator(gluePath); return parseAssumeClasspathScheme(standardized); } if (isProbablyPackage(gluePath)) { String path = resourceNameOfPackageName(gluePath); return parseAssumeClasspathScheme(path); } return parseAssumeClasspathScheme(gluePath); }
@Test void can_parse_classpath_form() { URI uri = GluePath.parse("classpath:com/example/app"); assertAll( () -> assertThat(uri.getScheme(), is("classpath")), () -> assertThat(uri.getSchemeSpecificPart(), is("com/example/app"))); }
@Override public synchronized void unregisterSource(String name) { if (sources.containsKey(name)) { sources.get(name).stop(); sources.remove(name); } if (allSources.containsKey(name)) { allSources.remove(name); } if (namedCallbacks.containsKey(name)) { namedCallbacks.remove(name); } DefaultMetricsSystem.removeSourceName(name); }
@Test public void testUnregisterSource() { MetricsSystem ms = new MetricsSystemImpl(); TestSource ts1 = new TestSource("ts1"); TestSource ts2 = new TestSource("ts2"); ms.register("ts1", "", ts1); ms.register("ts2", "", ts2); MetricsSource s1 = ms.getSource("ts1"); assertNotNull(s1); // should work when metrics system is not started ms.unregisterSource("ts1"); s1 = ms.getSource("ts1"); assertNull(s1); MetricsSource s2 = ms.getSource("ts2"); assertNotNull(s2); ms.shutdown(); }
@Override public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext) { doEvaluateDisruptContext(request, requestContext); return _client.sendRequest(request, requestContext); }
@Test public void testSendRequest12() { when(_controller.getDisruptContext(any(String.class))).thenReturn(_disrupt); _client.sendRequest(_multiplexed, _multiplexedCallback); verify(_underlying, times(1)).sendRequest(eq(_multiplexed), any(RequestContext.class), eq(_multiplexedCallback)); }
public void retrieveDocuments() throws DocumentRetrieverException { boolean first = true; String route = params.cluster.isEmpty() ? params.route : resolveClusterRoute(params.cluster); MessageBusParams messageBusParams = createMessageBusParams(params.configId, params.timeout, route); documentAccess = documentAccessFactory.createDocumentAccess(messageBusParams); session = documentAccess.createSyncSession(new SyncParameters.Builder().build()); int trace = params.traceLevel; if (trace > 0) { session.setTraceLevel(trace); } Iterator<String> iter = params.documentIds; if (params.jsonOutput && !params.printIdsOnly) { System.out.println('['); } while (iter.hasNext()) { if (params.jsonOutput && !params.printIdsOnly) { if (!first) { System.out.println(','); } else { first = false; } } String docid = iter.next(); Message msg = createDocumentRequest(docid); Reply reply = session.syncSend(msg); printReply(reply); } if (params.jsonOutput && !params.printIdsOnly) { System.out.println(']'); } }
@Test void testEmptyClusterList() throws DocumentRetrieverException { Throwable exception = assertThrows(DocumentRetrieverException.class, () -> { ClientParameters params = createParameters() .setCluster("invalidclustername") .build(); DocumentRetriever documentRetriever = createDocumentRetriever(params); documentRetriever.retrieveDocuments(); }); assertTrue(exception.getMessage().contains("The Vespa cluster does not have any content clusters declared.")); }
@VisibleForTesting String[] findClassNames(AbstractConfiguration config) { // Find individually-specified filter classes. String[] filterClassNamesStrArray = config.getStringArray("zuul.filters.classes"); Stream<String> classNameStream = Arrays.stream(filterClassNamesStrArray).map(String::trim).filter(blank.negate()); // Find filter classes in specified packages. String[] packageNamesStrArray = config.getStringArray("zuul.filters.packages"); ClassPath cp; try { cp = ClassPath.from(this.getClass().getClassLoader()); } catch (IOException e) { throw new RuntimeException("Error attempting to read classpath to find filters!", e); } Stream<String> packageStream = Arrays.stream(packageNamesStrArray) .map(String::trim) .filter(blank.negate()) .flatMap(packageName -> cp.getTopLevelClasses(packageName).stream()) .map(ClassPath.ClassInfo::load) .filter(ZuulFilter.class::isAssignableFrom) .map(Class::getCanonicalName); String[] filterClassNames = Stream.concat(classNameStream, packageStream).toArray(String[]::new); if (filterClassNames.length != 0) { LOG.info("Using filter classnames: "); for (String location : filterClassNames) { LOG.info(" {}", location); } } return filterClassNames; }
@Test void testMultiPackages() { Class<?> expectedClass1 = TestZuulFilter.class; Class<?> expectedClass2 = TestZuulFilter2.class; Mockito.when(configuration.getStringArray("zuul.filters.classes")).thenReturn(new String[0]); Mockito.when(configuration.getStringArray("zuul.filters.packages")) .thenReturn(new String[] {"com.netflix.zuul.init", "com.netflix.zuul.init2"}); String[] classNames = module.findClassNames(configuration); assertThat(classNames.length, equalTo(2)); assertThat(classNames[0], equalTo(expectedClass1.getCanonicalName())); assertThat(classNames[1], equalTo(expectedClass2.getCanonicalName())); }
@Override public String toString() { return parameterValue; }
@Test void assertToString() { assertThat(new PostgreSQLTypeUnspecifiedSQLParameter("2020-08-23 15:57:03+08").toString(), is("2020-08-23 15:57:03+08")); }
public static boolean stop(final ThreadId id) { id.setError(RaftError.ESTOP.getNumber()); return true; }
@Test public void testStop() { final Replicator r = getReplicator(); this.id.unlock(); assertNotNull(r.getHeartbeatTimer()); assertNotNull(r.getRpcInFly()); Replicator.stop(this.id); assertNull(r.id); assertNull(r.getHeartbeatTimer()); assertNull(r.getRpcInFly()); }
public static String name(Class<?> clazz, String... parts) { return name(clazz.getSimpleName(), parts); }
@Test void name() { assertEquals("chat.MetricsUtilTest.metric", MetricsUtil.name(MetricsUtilTest.class, "metric")); assertEquals("chat.MetricsUtilTest.namespace.metric", MetricsUtil.name(MetricsUtilTest.class, "namespace", "metric")); }
public static ILogger getLogger(@Nonnull Class<?> clazz) { checkNotNull(clazz, "class must not be null"); return getLoggerInternal(clazz.getName()); }
@Test public void getLogger_whenInvalidConfiguration_thenCreateStandardLogger() { isolatedLoggingRule.setLoggingType("invalid"); assertInstanceOf(StandardLoggerFactory.StandardLogger.class, Logger.getLogger(getClass())); }
Map<String, DocumentField> readFields(String[] externalNames, String dataConnectionName, Map<String, String> options, boolean stream) { String collectionName = externalNames.length == 2 ? externalNames[1] : externalNames[0]; String databaseName = Options.getDatabaseName(nodeEngine, externalNames, dataConnectionName); Map<String, DocumentField> fields = new LinkedHashMap<>(); try (MongoClient client = connect(dataConnectionName, options)) { requireNonNull(client); ResourceChecks resourceChecks = readExistenceChecksFlag(options); if (resourceChecks.isEverPerformed()) { checkDatabaseAndCollectionExists(client, databaseName, collectionName); } MongoDatabase database = client.getDatabase(databaseName); List<Document> collections = database.listCollections() .filter(eq("name", collectionName)) .into(new ArrayList<>()); if (collections.isEmpty()) { ArrayList<String> list = database.listCollectionNames().into(new ArrayList<>()); throw new IllegalArgumentException("collection " + collectionName + " was not found, maybe you mean: " + list); } Document collectionInfo = collections.get(0); Document properties = getIgnoringNulls(collectionInfo, "options", "validator", "$jsonSchema", "properties"); if (properties != null) { for (Entry<String, Object> property : properties.entrySet()) { Document props = (Document) property.getValue(); BsonType bsonType = getBsonType(props); String key = property.getKey(); if (stream) { key = "fullDocument." + key; } fields.put(key, new DocumentField(bsonType, key)); } } else { // fall back to sampling ArrayList<Document> samples = database.getCollection(collectionName).find().limit(1).into(new ArrayList<>()); if (samples.isEmpty()) { throw new IllegalStateException("Cannot infer schema of collection " + collectionName + ", no documents found"); } Document sample = samples.get(0); for (Entry<String, Object> entry : sample.entrySet()) { if (entry.getValue() == null) { continue; } String key = entry.getKey(); if (stream) { key = "fullDocument." + key; } DocumentField field = new DocumentField(resolveTypeFromJava(entry.getValue()), key); fields.put(key, field); } } if (stream) { fields.put("operationType", new DocumentField(BsonType.STRING, "operationType")); fields.put("resumeToken", new DocumentField(BsonType.STRING, "resumeToken")); fields.put("wallTime", new DocumentField(BsonType.DATE_TIME, "wallTime")); fields.put("ts", new DocumentField(BsonType.TIMESTAMP, "ts")); fields.put("clusterTime", new DocumentField(BsonType.TIMESTAMP, "clusterTime")); } } return fields; }
@Test public void testResolvesFieldsViaSample() { try (MongoClient client = MongoClients.create(mongoContainer.getConnectionString())) { String databaseName = "testDatabase"; String collectionName = "people_2"; MongoDatabase testDatabase = client.getDatabase(databaseName); MongoCollection<Document> collection = testDatabase.getCollection(collectionName); collection.insertOne(new Document("firstName", "Tomasz") .append("lastName", "Gawęda") .append("birthYear", 1992) .append("citizenship", new HashSet<>(singletonList("Polish"))) .append("citizenshipButList", singletonList("Polish")) ); FieldResolver resolver = new FieldResolver(null); Map<String, String> readOpts = new HashMap<>(); readOpts.put("connectionString", mongoContainer.getConnectionString()); readOpts.put("database", databaseName); Map<String, DocumentField> fields = resolver.readFields(new String[]{databaseName, collectionName}, null, readOpts, false); assertThat(fields).containsOnlyKeys("_id", "firstName", "lastName", "birthYear", "citizenship", "citizenshipButList"); assertThat(fields.get("lastName").columnType).isEqualTo(BsonType.STRING); assertThat(fields.get("birthYear").columnType).isEqualTo(BsonType.INT32); assertThat(fields.get("citizenship").columnType).isEqualTo(BsonType.ARRAY); assertThat(fields.get("citizenshipButList").columnType).isEqualTo(BsonType.ARRAY); } }
@VisibleForTesting static boolean isCompressed(String contentEncoding) { return contentEncoding.contains(HttpHeaderValues.GZIP.toString()) || contentEncoding.contains(HttpHeaderValues.DEFLATE.toString()) || contentEncoding.contains(HttpHeaderValues.BR.toString()) || contentEncoding.contains(HttpHeaderValues.COMPRESS.toString()); }
@Test void detectsNonGzip() { assertFalse(HttpUtils.isCompressed("identity")); }
public URL getInterNodeListener( final Function<URL, Integer> portResolver ) { return getInterNodeListener(portResolver, LOGGER); }
@Test public void shouldResolveInterNodeListenerToFirstListenerWithAutoPortAssignmentAndTrailingSlash() { // Given: final URL autoPort = url("https://example.com:0/"); when(portResolver.apply(any())).thenReturn(2222); final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") .put(LISTENERS_CONFIG, autoPort.toString() + ",http://localhost:2589/") .build() ); // When: final URL actual = config.getInterNodeListener(portResolver, logger); // Then: final URL expected = url("https://example.com:2222"); assertThat(actual, is(expected)); verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG); verifyNoMoreInteractions(logger); }
public PlanNodeStatsEstimate addStatsAndSumDistinctValues(PlanNodeStatsEstimate left, PlanNodeStatsEstimate right) { return addStats(left, right, RangeAdditionStrategy.ADD_AND_SUM_DISTINCT); }
@Test public void testAddRowCount() { PlanNodeStatsEstimate unknownStats = statistics(NaN, NaN, NaN, NaN, StatisticRange.empty()); PlanNodeStatsEstimate first = statistics(10, NaN, NaN, NaN, StatisticRange.empty()); PlanNodeStatsEstimate second = statistics(20, NaN, NaN, NaN, StatisticRange.empty()); assertEquals(calculator.addStatsAndSumDistinctValues(unknownStats, unknownStats), PlanNodeStatsEstimate.unknown()); assertEquals(calculator.addStatsAndSumDistinctValues(first, unknownStats), PlanNodeStatsEstimate.unknown()); assertEquals(calculator.addStatsAndSumDistinctValues(unknownStats, second), PlanNodeStatsEstimate.unknown()); assertEquals(calculator.addStatsAndSumDistinctValues(first, second).getOutputRowCount(), 30.0); }
@Override public ZonedDateTime createdAt() { return ZonedDateTime.parse("2017-01-10T15:01:00Z"); }
@Test public void createdAt() throws Exception { assertThat(migration.createdAt()).isEqualTo(ZonedDateTime.parse("2017-01-10T15:01:00Z")); }
public static Finder filteredFinder(String query, String... excludeQueries) { var finder = Finder.contains(query); for (String q : excludeQueries) { finder = finder.not(Finder.contains(q)); } return finder; }
@Test void filteredFinderTest() { var res = filteredFinder(" was ", "many", "child").find(text()); assertEquals(1, res.size()); assertEquals("But we loved with a love that was more than love-", res.get(0)); }
public SendResult sendMessage( final String addr, final String brokerName, final Message msg, final SendMessageRequestHeader requestHeader, final long timeoutMillis, final CommunicationMode communicationMode, final SendMessageContext context, final DefaultMQProducerImpl producer ) throws RemotingException, MQBrokerException, InterruptedException { return sendMessage(addr, brokerName, msg, requestHeader, timeoutMillis, communicationMode, null, null, null, 0, context, producer); }
@Test public void testSendMessageSync_WithException() throws InterruptedException, RemotingException { doAnswer(mock -> { RemotingCommand request = mock.getArgument(1); RemotingCommand response = RemotingCommand.createResponseCommand(SendMessageResponseHeader.class); response.setCode(ResponseCode.SYSTEM_ERROR); response.setOpaque(request.getOpaque()); response.setRemark("Broker is broken."); return response; }).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong()); SendMessageRequestHeader requestHeader = createSendMessageRequestHeader(); try { mqClientAPI.sendMessage(brokerAddr, brokerName, msg, requestHeader, 3 * 1000, CommunicationMode.SYNC, new SendMessageContext(), defaultMQProducerImpl); failBecauseExceptionWasNotThrown(MQBrokerException.class); } catch (MQBrokerException e) { assertThat(e).hasMessageContaining("Broker is broken."); } }
@Nullable public static TNetworkAddress getHost(long nodeId, List<TScanRangeLocation> locations, ImmutableMap<Long, ComputeNode> computeNodes, Reference<Long> nodeIdRef) { if (locations == null || computeNodes == null) { return null; } LOG.debug("getHost nodeID={}, nodeSize={}", nodeId, computeNodes.size()); ComputeNode node = computeNodes.get(nodeId); if (node != null && node.isAlive() && !HOST_BLACKLIST.contains(nodeId)) { nodeIdRef.setRef(nodeId); return new TNetworkAddress(node.getHost(), node.getBePort()); } else { for (TScanRangeLocation location : locations) { if (location.backend_id == nodeId) { continue; } // choose the first alive backend(in analysis stage, the locations are random) ComputeNode candidateNode = computeNodes.get(location.backend_id); if (candidateNode != null && candidateNode.isAlive() && !HOST_BLACKLIST.contains(location.backend_id)) { nodeIdRef.setRef(location.backend_id); return new TNetworkAddress(candidateNode.getHost(), candidateNode.getBePort()); } } // In shared data mode, we can select any alive node to replace the original dead node for query if (RunMode.isSharedDataMode()) { List<ComputeNode> allNodes = new ArrayList<>(computeNodes.size()); allNodes.addAll(computeNodes.values()); List<ComputeNode> candidateNodes = allNodes.stream() .filter(x -> x.getId() != nodeId && x.isAlive() && !HOST_BLACKLIST.contains(x.getId())).collect(Collectors.toList()); if (!candidateNodes.isEmpty()) { // use modulo operation to ensure that the same node is selected for the dead node ComputeNode candidateNode = candidateNodes.get((int) (nodeId % candidateNodes.size())); nodeIdRef.setRef(candidateNode.getId()); return new TNetworkAddress(candidateNode.getHost(), candidateNode.getBePort()); } } } // no backend or compute node returned return null; }
@Test public void testGetHostWithBackendIdInSharedDataMode() { // locations List<TScanRangeLocation> locations = new ArrayList<TScanRangeLocation>(); TScanRangeLocation locationA = new TScanRangeLocation(); locationA.setBackend_id(0); locations.add(locationA); // backends Backend backendA = new Backend(0, "addressA", 0); backendA.updateOnce(0, 0, 0); Backend backendB = new Backend(1, "addressB", 0); backendB.updateOnce(0, 0, 0); Backend backendC = new Backend(2, "addressC", 0); backendC.updateOnce(0, 0, 0); Map<Long, Backend> backends = Maps.newHashMap(); backends.put((long) 0, backendA); backends.put((long) 1, backendB); backends.put((long) 2, backendC); new MockUp<RunMode>() { @Mock public RunMode getCurrentRunMode() { return RunMode.SHARED_DATA; } }; ImmutableMap<Long, ComputeNode> immutableBackends = null; { // backendA in locations is alive backendA.setAlive(true); backendB.setAlive(true); backendC.setAlive(true); immutableBackends = ImmutableMap.copyOf(backends); Assert.assertEquals( SimpleScheduler.getHost(0, locations, immutableBackends, ref).hostname, "addressA"); } { // backendA in locations is not alive backendA.setAlive(false); backendB.setAlive(false); backendC.setAlive(true); immutableBackends = ImmutableMap.copyOf(backends); Assert.assertEquals( SimpleScheduler.getHost(0, locations, immutableBackends, ref).hostname, "addressC"); } }
public TesseractOCRConfig getDefaultConfig() { return defaultConfig; }
@Test public void testArbitraryParams() throws Exception { try (InputStream is = getResourceAsStream( "/test-configs/tika-config-tesseract-arbitrary.xml")) { TikaConfig config = new TikaConfig(is); Parser p = config.getParser(); Parser tesseractOCRParser = findParser(p, org.apache.tika.parser.ocr.TesseractOCRParser.class); assertNotNull(tesseractOCRParser); TesseractOCRConfig tesseractOCRConfig = ((TesseractOCRParser) tesseractOCRParser).getDefaultConfig(); assertEquals("0.75", tesseractOCRConfig.getOtherTesseractConfig().get("textord_initialx_ile")); assertEquals("0.15625", tesseractOCRConfig.getOtherTesseractConfig().get("textord_noise_hfract")); } }
static boolean firstIsCapitalized(String string) { if (string.isEmpty()) { return false; } return Character.isUpperCase(string.charAt(0)); }
@Test public void testFirstIsCapitalized() { assertFalse(MessageGenerator.firstIsCapitalized("")); assertTrue(MessageGenerator.firstIsCapitalized("FORTRAN")); assertFalse(MessageGenerator.firstIsCapitalized("java")); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public AppInfo get() { return getAppInfo(); }
@Test public void testInfoSlash() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("info/").accept(MediaType.APPLICATION_JSON) .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); verifyAMInfo(json.getJSONObject("info"), appContext); }
@Override public CRMaterial deserialize(JsonElement json, Type type, JsonDeserializationContext context) throws JsonParseException { return determineJsonElementForDistinguishingImplementers(json, context, TYPE, ARTIFACT_ORIGIN); }
@Test public void shouldDeserializePackageMaterialType() { JsonObject jsonObject = new JsonObject(); jsonObject.addProperty("type", "package"); materialTypeAdapter.deserialize(jsonObject, type, jsonDeserializationContext); verify(jsonDeserializationContext).deserialize(jsonObject, CRPackageMaterial.class); }
@Override public RouteContext route(final RouteContext routeContext, final BroadcastRule broadcastRule) { RouteMapper dataSourceMapper = getDataSourceRouteMapper(broadcastRule.getDataSourceNames()); routeContext.getRouteUnits().add(new RouteUnit(dataSourceMapper, createTableRouteMappers())); return routeContext; }
@Test void assertRouteWithCreateViewStatementContext() { CreateViewStatementContext sqlStatementContext = mock(CreateViewStatementContext.class); Collection<String> logicTables = Collections.singleton("t_address"); ConnectionContext connectionContext = mock(ConnectionContext.class); BroadcastUnicastRoutingEngine engine = new BroadcastUnicastRoutingEngine(sqlStatementContext, logicTables, connectionContext); RouteContext routeContext = engine.route(new RouteContext(), broadcastRule); assertThat(routeContext.getRouteUnits().size(), is(1)); RouteMapper dataSourceRouteMapper = routeContext.getRouteUnits().iterator().next().getDataSourceMapper(); assertThat(dataSourceRouteMapper.getLogicName(), is("ds_0")); assertTableRouteMapper(routeContext); }
@Override public void execute(final List<String> args, final PrintWriter terminal) { CliCmdUtil.ensureArgCountBounds(args, 0, 1, HELP); if (args.isEmpty()) { terminal.println(restClient.getServerAddress()); return; } else { final String serverAddress = args.get(0); restClient.setServerAddress(serverAddress); terminal.println("Server now: " + serverAddress); resetCliForNewServer.fire(); } validateClient(terminal, restClient); }
@Test public void shouldReportErrorIfFailedToGetRemoteKsqlServerInfo() { // Given: reset(restClient); givenServerAddressHandling(); when(restClient.getServerInfo()).thenThrow(genericConnectionIssue()); // When: command.execute(ImmutableList.of(VALID_SERVER_ADDRESS), terminal); // Then: assertThat(out.toString(), containsString( "Remote server at " + VALID_SERVER_ADDRESS + " does not appear to be a valid KSQL" + System.lineSeparator() + "server. Please ensure that the URL provided is for an active KSQL server.")); }
public static Tag<HttpRequest> requestHeader(String headerName) { return requestHeader(headerName, headerName); }
@Test void requestHeader() { when(request.header("User-Agent")).thenReturn("Mozilla/5.0"); HttpTags.requestHeader("User-Agent").tag(request, span); verify(span).tag("User-Agent", "Mozilla/5.0"); }
public static ChannelFuture sendUnsupportedVersionResponse(Channel channel) { return sendUnsupportedVersionResponse(channel, channel.newPromise()); }
@Test public void testUnsupportedVersion() throws Exception { EmbeddedChannel ch = new EmbeddedChannel(); WebSocketServerHandshakerFactory.sendUnsupportedVersionResponse(ch); ch.runPendingTasks(); Object msg = ch.readOutbound(); if (!(msg instanceof FullHttpResponse)) { fail("Got wrong response " + msg); } FullHttpResponse response = (FullHttpResponse) msg; assertEquals(HttpResponseStatus.UPGRADE_REQUIRED, response.status()); assertEquals(WebSocketVersion.V13.toHttpHeaderValue(), response.headers().get(HttpHeaderNames.SEC_WEBSOCKET_VERSION)); assertTrue(HttpUtil.isContentLengthSet(response)); assertEquals(0, HttpUtil.getContentLength(response)); ReferenceCountUtil.release(response); assertFalse(ch.finish()); }
@Override public V getOrDefault(Object key, V defaultValue) { if (key instanceof String) { return Map.super.getOrDefault(((String) key).toLowerCase(), defaultValue); } return defaultValue; }
@Test void getOrDefault() { Assertions.assertEquals("Value", lowerCaseLinkHashMap.getOrDefault("Key", "abc")); Assertions.assertEquals("Value", lowerCaseLinkHashMap.getOrDefault("key", "abc")); Assertions.assertEquals("abc", lowerCaseLinkHashMap.getOrDefault("default", "abc")); }
@Override public Map<String, Object> getAttributes(boolean addSecureFields) { Map<String, Object> materialMap = new HashMap<>(); materialMap.put("type", "package"); materialMap.put("plugin-id", getPluginId()); Map<String, String> repositoryConfigurationMap = packageDefinition.getRepository().getConfiguration().getConfigurationAsMap(addSecureFields); materialMap.put("repository-configuration", repositoryConfigurationMap); Map<String, String> packageConfigurationMap = packageDefinition.getConfiguration().getConfigurationAsMap(addSecureFields); materialMap.put("package-configuration", packageConfigurationMap); return materialMap; }
@Test void shouldGetAttributesWithoutSecureFields() { PackageMaterial material = createPackageMaterialWithSecureConfiguration(); Map<String, Object> attributes = material.getAttributes(false); assertThat(attributes.get("type")).isEqualTo("package"); assertThat(attributes.get("plugin-id")).isEqualTo("pluginid"); Map<String, Object> repositoryConfiguration = (Map<String, Object>) attributes.get("repository-configuration"); assertThat(repositoryConfiguration.get("k1")).isEqualTo("repo-v1"); assertThat(repositoryConfiguration.get("k2")).isNull(); Map<String, Object> packageConfiguration = (Map<String, Object>) attributes.get("package-configuration"); assertThat(packageConfiguration.get("k3")).isEqualTo("package-v1"); assertThat(packageConfiguration.get("k4")).isNull(); }
public void addComplexProperty(String name, Object complexProperty) { Method adderMethod = aggregationAssessor.findAdderMethod(name); // first let us use the addXXX method if (adderMethod != null) { Class<?>[] paramTypes = adderMethod.getParameterTypes(); if (!isSanityCheckSuccessful(name, adderMethod, paramTypes, complexProperty)) { return; } invokeMethodWithSingleParameterOnThisObject(adderMethod, complexProperty); } else { addError("Could not find method [" + "add" + name + "] in class [" + objClass.getName() + "]."); } }
@Test public void testComplexCollection() { Window w1 = new Window(); w1.handle = 10; Window w2 = new Window(); w2.handle = 20; setter.addComplexProperty("window", w1); setter.addComplexProperty("window", w2); assertEquals(2, house.windowList.size()); assertEquals(10, house.windowList.get(0).handle); assertEquals(20, house.windowList.get(1).handle); }
public boolean isUserAllowed(UserGroupInformation ugi) { return isUserInList(ugi); }
@Test public void testUseRealUserAclsForProxiedUser() { String realUser = "realUser"; AccessControlList acl = new AccessControlList(realUser); UserGroupInformation realUserUgi = UserGroupInformation.createRemoteUser(realUser); UserGroupInformation user1 = UserGroupInformation.createProxyUserForTesting("regularJane", realUserUgi, new String [] {"group1"}); assertFalse("User " + user1 + " should not have been granted access.", acl.isUserAllowed(user1)); acl = new AccessControlList(AccessControlList.USE_REAL_ACLS + realUser); assertTrue("User " + user1 + " should have access but was denied.", acl.isUserAllowed(user1)); }
public ConcurrentLongHashMap<CompletableFuture<Producer>> getProducers() { return producers; }
@Test(timeOut = 30000) public void testProducerFailureOnEncryptionRequiredOnBroker() throws Exception { // (a) Set encryption-required at broker level pulsar.getConfig().setEncryptionRequireOnProducer(true); resetChannel(); setChannelConnected(); // (b) Set encryption_required to false on policy Policies policies = mock(Policies.class); // Namespace policy doesn't require encryption policies.encryption_required = false; policies.topicDispatchRate = new HashMap<>(); policies.clusterSubscribeRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE policies.clusterDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceSubscriptionDispatchRate` policies.subscriptionDispatchRate = new HashMap<>(); // add `clusterDispatchRate` otherwise there will be a NPE // `org.apache.pulsar.broker.service.AbstractTopic.updateNamespaceReplicatorDispatchRate` policies.replicatorDispatchRate = new HashMap<>(); pulsarTestContext.getPulsarResources().getNamespaceResources() .createPolicies(TopicName.get(encryptionRequiredTopicName).getNamespaceObject(), policies); // test failure case: unencrypted producer cannot connect ByteBuf clientCommand = Commands.newProducer(encryptionRequiredTopicName, 2 /* producer id */, 2 /* request id */, "unencrypted-producer", false, Collections.emptyMap(), false); channel.writeInbound(clientCommand); Object response = getResponse(); assertEquals(response.getClass(), CommandError.class); CommandError errorResponse = (CommandError) response; assertEquals(errorResponse.getError(), ServerError.MetadataError); PersistentTopic topicRef = (PersistentTopic) brokerService.getTopicReference(encryptionRequiredTopicName).get(); assertNotNull(topicRef); assertEquals(topicRef.getProducers().size(), 0); channel.finish(); }
@Override public double mean() { return r * (1 - p) / p; }
@Test public void testMean() { System.out.println("mean"); NegativeBinomialDistribution instance = new NegativeBinomialDistribution(3, 0.3); instance.rand(); assertEquals(7, instance.mean(), 1E-7); }
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload, final ConnectionSession connectionSession) { switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitPacket(); case COM_INIT_DB: return new MySQLComInitDbPacket(payload); case COM_FIELD_LIST: return new MySQLComFieldListPacket(payload); case COM_QUERY: return new MySQLComQueryPacket(payload); case COM_STMT_PREPARE: return new MySQLComStmtPreparePacket(payload); case COM_STMT_EXECUTE: MySQLServerPreparedStatement serverPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex())); return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount()); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataPacket(payload); case COM_STMT_RESET: return new MySQLComStmtResetPacket(payload); case COM_STMT_CLOSE: return new MySQLComStmtClosePacket(payload); case COM_SET_OPTION: return new MySQLComSetOptionPacket(payload); case COM_PING: return new MySQLComPingPacket(); case COM_RESET_CONNECTION: return new MySQLComResetConnectionPacket(); default: return new MySQLUnsupportedCommandPacket(commandPacketType); } }
@Test void assertNewInstanceWithComStmtPreparePacket() { when(payload.readStringEOF()).thenReturn(""); assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_STMT_PREPARE, payload, connectionSession), instanceOf(MySQLComStmtPreparePacket.class)); }
static void cleanStackTrace(Throwable throwable) { new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet()); }
@Test public void causingThrowablesAreAlsoCleaned() { Throwable cause2 = createThrowableWithStackTrace("com.example.Foo", "org.junit.FilterMe"); Throwable cause1 = createThrowableWithStackTrace(cause2, "com.example.Bar", "org.junit.FilterMe"); Throwable rootThrowable = createThrowableWithStackTrace(cause1, "com.example.Car", "org.junit.FilterMe"); StackTraceCleaner.cleanStackTrace(rootThrowable); assertThat(rootThrowable.getStackTrace()).isEqualTo(createStackTrace("com.example.Car")); assertThat(cause1.getStackTrace()).isEqualTo(createStackTrace("com.example.Bar")); assertThat(cause2.getStackTrace()).isEqualTo(createStackTrace("com.example.Foo")); }
@Override public DictTypeDO getDictType(Long id) { return dictTypeMapper.selectById(id); }
@Test public void testGetDictType_id() { // mock 数据 DictTypeDO dbDictType = randomDictTypeDO(); dictTypeMapper.insert(dbDictType); // 准备参数 Long id = dbDictType.getId(); // 调用 DictTypeDO dictType = dictTypeService.getDictType(id); // 断言 assertNotNull(dictType); assertPojoEquals(dbDictType, dictType); }
public static SchemaAnnotationProcessResult process(List<SchemaAnnotationHandler> handlers, DataSchema dataSchema, AnnotationProcessOption options) { return process(handlers, dataSchema, options, true); }
@Test public void testHandlerResolveException() throws Exception { String failureMessage = "Intentional failure"; SchemaAnnotationHandler testHandler = new SchemaAnnotationHandler() { @Override public ResolutionResult resolve(List<Pair<String, Object>> propertiesOverrides, ResolutionMetaData resolutionMetadata) { throw new RuntimeException(failureMessage); } @Override public String getAnnotationNamespace() { return TEST_ANNOTATION_LABEL; } @Override public AnnotationValidationResult validate(Map<String, Object> resolvedProperties, ValidationMetaData metaData) { return new AnnotationValidationResult(); } }; RecordDataSchema testSchema = (RecordDataSchema) TestUtil.dataSchemaFromPdlString(simpleTestSchema); try { SchemaAnnotationProcessor.SchemaAnnotationProcessResult result = SchemaAnnotationProcessor.process(Arrays.asList(testHandler), testSchema, new SchemaAnnotationProcessor.AnnotationProcessOption()); } catch (IllegalStateException e) { e.getMessage() .equals(String.format( "Annotation processing failed when resolving annotations in the schema using the handler for " + "annotation namespace \"%s\"", TEST_ANNOTATION_LABEL)); } }
CompletableFuture<Void> create(List<TopicInfo> topicInfos) { return CompletableFuture.runAsync(() -> createBlocking(topicInfos)); }
@Test void created() { KafkaFuture<Void> future = KafkaFuture.completedFuture(null); when(createTopicsResult.values()).thenReturn(singletonMap(topic, future)); topicCreator.create(singletonList(topicInfo)).join(); verify(admin).createTopics(captor.capture()); List<List<NewTopic>> allValues = captor.getAllValues(); assertThat(allValues).hasSize(1); assertNewTopics(allValues.get(0)); }
@Override public BuiltInScalarFunctionImplementation specialize(BoundVariables boundVariables, int arity, FunctionAndTypeManager functionAndTypeManager) { ImmutableList.Builder<ScalarFunctionImplementationChoice> implementationChoices = ImmutableList.builder(); for (PolymorphicScalarFunctionChoice choice : choices) { implementationChoices.add(getScalarFunctionImplementationChoice(boundVariables, functionAndTypeManager, choice)); } return new BuiltInScalarFunctionImplementation(implementationChoices.build()); }
@Test public void testSetsHiddenToTrueForOperators() { Signature signature = SignatureBuilder.builder() .operatorType(ADD) .kind(SCALAR) .returnType(parseTypeSignature("varchar(x)", ImmutableSet.of("x"))) .argumentTypes(parseTypeSignature("varchar(x)", ImmutableSet.of("x"))) .build(); SqlScalarFunction function = SqlScalarFunction.builder(TestMethods.class, ADD) .signature(signature) .deterministic(true) .choice(choice -> choice .implementation(methodsGroup -> methodsGroup.methods("varcharToVarchar"))) .build(); BuiltInScalarFunctionImplementation functionImplementation = function.specialize(BOUND_VARIABLES, 1, FUNCTION_AND_TYPE_MANAGER); }
@Override public BranchRollbackRequestProto convert2Proto(BranchRollbackRequest branchRollbackRequest) { final short typeCode = branchRollbackRequest.getTypeCode(); final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType( MessageTypeProto.forNumber(typeCode)).build(); final AbstractTransactionRequestProto abstractTransactionRequestProto = AbstractTransactionRequestProto .newBuilder().setAbstractMessage(abstractMessage).build(); final String applicationData = branchRollbackRequest.getApplicationData(); final String resourceId = branchRollbackRequest.getResourceId(); final AbstractBranchEndRequestProto abstractBranchEndRequestProto = AbstractBranchEndRequestProto.newBuilder(). setAbstractTransactionRequest(abstractTransactionRequestProto).setXid(branchRollbackRequest.getXid()) .setBranchId(branchRollbackRequest.getBranchId()).setBranchType( BranchTypeProto.valueOf(branchRollbackRequest.getBranchType().name())).setApplicationData( applicationData == null ? "" : applicationData).setResourceId(resourceId == null ? "" : resourceId) .build(); BranchRollbackRequestProto result = BranchRollbackRequestProto.newBuilder().setAbstractBranchEndRequest( abstractBranchEndRequestProto).build(); return result; }
@Test public void convert2Proto() { BranchRollbackRequest branchRegisterRequest = new BranchRollbackRequest(); branchRegisterRequest.setApplicationData("data"); branchRegisterRequest.setBranchType(BranchType.AT); branchRegisterRequest.setResourceId("resourceId"); branchRegisterRequest.setXid("xid"); branchRegisterRequest.setBranchId(123); BranchRollbackRequestConvertor convertor = new BranchRollbackRequestConvertor(); BranchRollbackRequestProto proto = convertor.convert2Proto( branchRegisterRequest); BranchRollbackRequest real = convertor.convert2Model(proto); assertThat((real.getTypeCode())).isEqualTo(branchRegisterRequest.getTypeCode()); assertThat((real.getApplicationData())).isEqualTo(branchRegisterRequest.getApplicationData()); assertThat((real.getBranchType())).isEqualTo(branchRegisterRequest.getBranchType()); assertThat((real.getXid())).isEqualTo(branchRegisterRequest.getXid()); assertThat((real.getResourceId())).isEqualTo(branchRegisterRequest.getResourceId()); assertThat((real.getBranchId())).isEqualTo(branchRegisterRequest.getBranchId()); }
@PublicEvolving public static CongestionControlRateLimitingStrategyBuilder builder() { return new CongestionControlRateLimitingStrategyBuilder(); }
@Test void testInvalidAimdStrategy() { assertThatExceptionOfType(NullPointerException.class) .isThrownBy( () -> CongestionControlRateLimitingStrategy.builder() .setMaxInFlightRequests(10) .setInitialMaxInFlightMessages(10) .build()) .withMessageContaining("scalingStrategy must be provided."); }
static GlobalPhaseSetup maybeMakeSetup(RankProfilesConfig.Rankprofile rp, RankProfilesEvaluator modelEvaluator) { var model = modelEvaluator.modelForRankProfile(rp.name()); Map<String, RankProfilesConfig.Rankprofile.Normalizer> availableNormalizers = new HashMap<>(); for (var n : rp.normalizer()) { availableNormalizers.put(n.name(), n); } Supplier<FunctionEvaluator> functionEvaluatorSource = null; int rerankCount = -1; Set<String> namesToHide = new HashSet<>(); Set<String> matchFeatures = new HashSet<>(); Map<String, String> renameFeatures = new HashMap<>(); String toRename = null; for (var prop : rp.fef().property()) { if (prop.name().equals("vespa.globalphase.rerankcount")) { rerankCount = Integer.parseInt(prop.value()); } if (prop.name().equals("vespa.rank.globalphase")) { functionEvaluatorSource = () -> model.evaluatorOf("globalphase"); } if (prop.name().equals("vespa.hidden.matchfeature")) { namesToHide.add(prop.value()); } if (prop.name().equals("vespa.match.feature")) { matchFeatures.add(prop.value()); } if (prop.name().equals("vespa.feature.rename")) { if (toRename == null) { toRename = prop.value(); } else { renameFeatures.put(toRename, prop.value()); toRename = null; } } } if (rerankCount < 0) { rerankCount = 100; } if (functionEvaluatorSource != null) { var mainResolver = new InputResolver(matchFeatures, renameFeatures, availableNormalizers.keySet()); var evaluator = functionEvaluatorSource.get(); var allInputs = List.copyOf(evaluator.function().arguments()); mainResolver.resolve(allInputs); List<NormalizerSetup> normalizers = new ArrayList<>(); for (var input : mainResolver.usedNormalizers) { var cfg = availableNormalizers.get(input); String normInput = cfg.input(); if (matchFeatures.contains(normInput) || renameFeatures.containsValue(normInput)) { Supplier<Evaluator> normSource = () -> new DummyEvaluator(normInput); normalizers.add(makeNormalizerSetup(cfg, matchFeatures, renameFeatures, normSource, List.of(normInput), rerankCount)); } else { Supplier<FunctionEvaluator> normSource = () -> model.evaluatorOf(normInput); var normInputs = List.copyOf(normSource.get().function().arguments()); var normSupplier = SimpleEvaluator.wrap(normSource); normalizers.add(makeNormalizerSetup(cfg, matchFeatures, renameFeatures, normSupplier, normInputs, rerankCount)); } } Supplier<Evaluator> supplier = SimpleEvaluator.wrap(functionEvaluatorSource); var gfun = new FunEvalSpec(supplier, mainResolver.fromQuery, mainResolver.fromMF); var defaultValues = extraDefaultQueryFeatureValues(rp, mainResolver.fromQuery, normalizers); return new GlobalPhaseSetup(gfun, rerankCount, namesToHide, normalizers, defaultValues); } return null; }
@Test void queryFeaturesWithDefaults() { RankProfilesConfig rpCfg = readConfig("qf_defaults"); assertEquals(1, rpCfg.rankprofile().size()); RankProfilesEvaluator rpEvaluator = createEvaluator(rpCfg); var setup = GlobalPhaseSetup.maybeMakeSetup(rpCfg.rankprofile().get(0), rpEvaluator); assertNotNull(setup); assertEquals(0, setup.normalizers.size()); assertEquals(0, setup.matchFeaturesToHide.size()); assertEquals(5, setup.globalPhaseEvalSpec.fromQuery().size()); assertEquals(2, setup.globalPhaseEvalSpec.fromMF().size()); assertEquals(5, setup.defaultValues.size()); assertEquals(Tensor.from(0.0), setup.defaultValues.get("query(w_no_def)")); assertEquals(Tensor.from(1.0), setup.defaultValues.get("query(w_has_def)")); assertEquals(Tensor.from("tensor(m{}):{}"), setup.defaultValues.get("query(m_no_def)")); assertEquals(Tensor.from("tensor(v[3]):[0,0,0]"), setup.defaultValues.get("query(v_no_def)")); assertEquals(Tensor.from("tensor(v[3]):[2,0.25,1.5]"), setup.defaultValues.get("query(v_has_def)")); }
@Override public AppState getAppState(HttpServletRequest hsr, String appId) throws AuthorizationException { try { DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorByAppId(appId); if (interceptor != null) { return interceptor.getAppState(hsr, appId); } } catch (YarnException | IllegalArgumentException e) { LOG.error("getHomeSubClusterInfoByAppId error, applicationId = {}.", appId, e); } return new AppState(); }
@Test public void testGetApplicationStateNotExists() throws IOException { ApplicationId appId = ApplicationId.newInstance(Time.now(), 1); AppState response = interceptor.getAppState(null, appId.toString()); Assert.assertNull(response); }
@Override public void execute(List<RegisteredMigrationStep> steps, MigrationStatusListener listener) { Profiler globalProfiler = Profiler.create(LOGGER); globalProfiler.startInfo(GLOBAL_START_MESSAGE, databaseMigrationState.getTotalMigrations()); boolean allStepsExecuted = false; try { for (RegisteredMigrationStep step : steps) { this.execute(step); listener.onMigrationStepCompleted(); } allStepsExecuted = true; } finally { long dbMigrationDuration = 0L; if (allStepsExecuted) { dbMigrationDuration = globalProfiler.stopInfo(GLOBAL_END_MESSAGE, databaseMigrationState.getCompletedMigrations(), databaseMigrationState.getTotalMigrations(), "success"); } else { dbMigrationDuration = globalProfiler.stopError(GLOBAL_END_MESSAGE, databaseMigrationState.getCompletedMigrations(), databaseMigrationState.getTotalMigrations(), "failure"); } telemetryDbMigrationTotalTimeProvider.setDbMigrationTotalTime(dbMigrationDuration); telemetryDbMigrationStepsProvider.setDbMigrationCompletedSteps(databaseMigrationState.getCompletedMigrations()); telemetryDbMigrationSuccessProvider.setDbMigrationSuccess(allStepsExecuted); } }
@Test void execute_does_not_fail_when_stream_is_empty_and_log_start_stop_INFO() { underTest.execute(Collections.emptyList(), migrationStatusListener); verifyNoInteractions(migrationStatusListener); assertThat(logTester.logs()).hasSize(2); assertLogLevel(Level.INFO, "Executing 5 DB migrations...", "Executed 2/5 DB migrations: success | time="); }
@CheckForNull public String getDecoratedSourceAsHtml(@Nullable String sourceLine, @Nullable String highlighting, @Nullable String symbols) { if (sourceLine == null) { return null; } DecorationDataHolder decorationDataHolder = new DecorationDataHolder(); if (StringUtils.isNotBlank(highlighting)) { decorationDataHolder.loadSyntaxHighlightingData(highlighting); } if (StringUtils.isNotBlank(symbols)) { decorationDataHolder.loadLineSymbolReferences(symbols); } HtmlTextDecorator textDecorator = new HtmlTextDecorator(); List<String> decoratedSource = textDecorator.decorateTextWithHtml(sourceLine, decorationDataHolder, 1, 1); if (decoratedSource == null) { return null; } else { if (decoratedSource.isEmpty()) { return ""; } else { return decoratedSource.get(0); } } }
@Test public void should_decorate_single_line() { String sourceLine = "package org.polop;"; String highlighting = "0,7,k"; String symbols = "8,17,42"; assertThat(sourceDecorator.getDecoratedSourceAsHtml(sourceLine, highlighting, symbols)).isEqualTo( "<span class=\"k\">package</span> <span class=\"sym-42 sym\">org.polop</span>;"); }
protected static boolean startMarketActivity( @NonNull Context context, @NonNull String marketKeyword) { try { Intent search = new Intent(Intent.ACTION_VIEW); Uri uri = new Uri.Builder() .scheme("market") .authority("search") .appendQueryParameter("q", "AnySoftKeyboard " + marketKeyword) .build(); search.setData(uri); search.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK); context.startActivity(search); } catch (Exception ex) { Logger.e(TAG, "Could not launch Store search!", ex); return false; } return true; }
@Test public void testUtilityStart() { Application context = ApplicationProvider.getApplicationContext(); Application spy = Mockito.spy(context); Mockito.doThrow(new RuntimeException()).when(spy).startActivity(Mockito.any()); Assert.assertFalse(AddOnStoreSearchController.startMarketActivity(spy, "play")); }
@Override public E next(E reuse) throws IOException { /* There are three ways to handle object reuse: * 1) reuse and return the given object * 2) ignore the given object and return a new object * 3) exchange the given object for an existing object * * The first option is not available here as the return value has * already been deserialized from the heap's top iterator. The second * option avoids object reuse. The third option is implemented below * by passing the given object to the heap's top iterator into which * the next value will be deserialized. */ if (this.heap.size() > 0) { // get the smallest element final HeadStream<E> top = this.heap.peek(); E result = top.getHead(); // read an element if (!top.nextHead(reuse)) { this.heap.poll(); } else { this.heap.adjustTop(); } return result; } else { return null; } }
@Test void testInvalidMerge() throws Exception { // iterators List<MutableObjectIterator<Tuple2<Integer, String>>> iterators = new ArrayList<>(); iterators.add( newIterator(new int[] {1, 2, 17, 23, 23}, new String[] {"A", "B", "C", "D", "E"})); iterators.add( newIterator(new int[] {2, 6, 7, 8, 9}, new String[] {"A", "B", "C", "D", "E"})); iterators.add( newIterator(new int[] {4, 10, 11, 11, 12}, new String[] {"A", "B", "C", "D", "E"})); iterators.add( newIterator(new int[] {3, 6, 10, 7, 12}, new String[] {"A", "B", "C", "D", "E"})); iterators.add( newIterator(new int[] {7, 10, 15, 19, 44}, new String[] {"A", "B", "C", "D", "E"})); iterators.add( newIterator(new int[] {6, 6, 11, 17, 18}, new String[] {"A", "B", "C", "D", "E"})); iterators.add( newIterator(new int[] {1, 2, 4, 5, 10}, new String[] {"A", "B", "C", "D", "E"})); iterators.add( newIterator(new int[] {5, 10, 19, 23, 29}, new String[] {"A", "B", "C", "D", "E"})); iterators.add( newIterator(new int[] {9, 9, 9, 9, 9}, new String[] {"A", "B", "C", "D", "E"})); iterators.add( newIterator(new int[] {8, 8, 14, 14, 15}, new String[] {"A", "B", "C", "D", "E"})); // comparator TypeComparator<Integer> comparator = new IntComparator(true); // merge iterator MutableObjectIterator<Tuple2<Integer, String>> iterator = new MergeIterator<>(iterators, this.comparator); boolean violationFound = false; // check expected order Tuple2<Integer, String> rec1 = new Tuple2<>(); Tuple2<Integer, String> rec2 = new Tuple2<>(); assertThat(rec1 = iterator.next(rec1)).isNotNull(); while ((rec2 = iterator.next(rec2)) != null) { if (comparator.compare(rec1.f0, rec2.f0) > 0) { violationFound = true; break; } Tuple2<Integer, String> tmp = rec1; rec1 = rec2; rec2 = tmp; } assertThat(violationFound) .withFailMessage("Merge must have returned a wrong result") .isTrue(); }
public static Result calcPaths(List<Snap> snaps, FlexiblePathCalculator pathCalculator) { RoundTripCalculator roundTripCalculator = new RoundTripCalculator(pathCalculator); Result result = new Result(snaps.size() - 1); Snap start = snaps.get(0); for (int snapIndex = 1; snapIndex < snaps.size(); snapIndex++) { // instead getClosestNode (which might be a virtual one and introducing unnecessary tails of the route) // use next tower node -> getBaseNode or getAdjNode // Later: remove potential route tail, maybe we can just enforce the heading at the start and when coming // back, and for tower nodes it does not matter anyway Snap startSnap = snaps.get(snapIndex - 1); int startNode = (startSnap == start) ? startSnap.getClosestNode() : startSnap.getClosestEdge().getBaseNode(); Snap endSnap = snaps.get(snapIndex); int endNode = (endSnap == start) ? endSnap.getClosestNode() : endSnap.getClosestEdge().getBaseNode(); Path path = roundTripCalculator.calcPath(startNode, endNode); if (snapIndex == 1) { result.wayPoints = new PointList(snaps.size(), path.graph.getNodeAccess().is3D()); result.wayPoints.add(path.graph.getNodeAccess(), startNode); } result.wayPoints.add(path.graph.getNodeAccess(), endNode); result.visitedNodes += pathCalculator.getVisitedNodes(); result.paths.add(path); } return result; }
@Test public void testCalcRoundTrip() { BaseGraph g = createTestGraph(); LocationIndex locationIndex = new LocationIndexTree(g, new RAMDirectory()).prepareIndex(); Snap snap4 = locationIndex.findClosest(0.05, 0.25, EdgeFilter.ALL_EDGES); assertEquals(4, snap4.getClosestNode()); Snap snap5 = locationIndex.findClosest(0.00, 0.05, EdgeFilter.ALL_EDGES); assertEquals(5, snap5.getClosestNode()); Snap snap6 = locationIndex.findClosest(0.00, 0.10, EdgeFilter.ALL_EDGES); assertEquals(6, snap6.getClosestNode()); QueryGraph qGraph = QueryGraph.create(g, Arrays.asList(snap4, snap5)); FlexiblePathCalculator pathCalculator = new FlexiblePathCalculator( qGraph, new RoutingAlgorithmFactorySimple(), weighting, new AlgorithmOptions().setAlgorithm(DIJKSTRA_BI).setTraversalMode(tMode)); List<Path> paths = RoundTripRouting.calcPaths(Arrays.asList(snap5, snap4, snap5), pathCalculator).paths; assertEquals(2, paths.size()); assertEquals(IntArrayList.from(5, 6, 3), paths.get(0).calcNodes()); assertEquals(IntArrayList.from(3, 2, 9, 1, 5), paths.get(1).calcNodes()); qGraph = QueryGraph.create(g, Arrays.asList(snap4, snap6)); pathCalculator = new FlexiblePathCalculator( qGraph, new RoutingAlgorithmFactorySimple(), weighting, new AlgorithmOptions().setAlgorithm(DIJKSTRA_BI).setTraversalMode(tMode)); paths = RoundTripRouting.calcPaths(Arrays.asList(snap6, snap4, snap6), pathCalculator).paths; assertEquals(2, paths.size()); assertEquals(IntArrayList.from(6, 3), paths.get(0).calcNodes()); assertEquals(IntArrayList.from(3, 4, 8, 7, 6), paths.get(1).calcNodes()); }
public static StatementExecutorResponse execute( final ConfiguredStatement<ListQueries> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final RemoteHostExecutor remoteHostExecutor = RemoteHostExecutor.create( statement, sessionProperties, executionContext, serviceContext.getKsqlClient() ); return statement.getStatement().getShowExtended() ? executeExtended(statement, sessionProperties, executionContext, remoteHostExecutor) : executeSimple(statement, executionContext, remoteHostExecutor); }
@Test public void shouldScatterGatherAndMergeShowQueriesExtended() { // Given when(sessionProperties.getInternalRequest()).thenReturn(false); final ConfiguredStatement<?> showQueries = engine.configure("SHOW QUERIES EXTENDED;"); final StreamsTaskMetadata localTaskMetadata = mock(StreamsTaskMetadata.class); final StreamsTaskMetadata remoteTaskMetadata = mock(StreamsTaskMetadata.class); final PersistentQueryMetadata localMetadata = givenPersistentQuery("id", RUNNING_QUERY_STATE, Collections.singleton(localTaskMetadata)); final PersistentQueryMetadata remoteMetadata = givenPersistentQuery("id", ERROR_QUERY_STATE, Collections.singleton(remoteTaskMetadata)); when(mockKsqlEngine.getAllLiveQueries()).thenReturn(ImmutableList.of(localMetadata)); when(mockKsqlEngine.getPersistentQueries()).thenReturn(ImmutableList.of(localMetadata)); final List<QueryDescription> remoteQueryDescriptions = Collections.singletonList( QueryDescriptionFactory.forQueryMetadata( remoteMetadata, Collections.singletonMap(REMOTE_KSQL_HOST_INFO_ENTITY, KsqlQueryStatus.ERROR)) ); when(remoteQueryDescriptionList.getQueryDescriptions()).thenReturn(remoteQueryDescriptions); when(ksqlEntityList.get(anyInt())).thenReturn(remoteQueryDescriptionList); when(response.getResponse()).thenReturn(ksqlEntityList); final Map<KsqlHostInfoEntity, KsqlQueryStatus> mergedMap = new HashMap<>(); mergedMap.put(REMOTE_KSQL_HOST_INFO_ENTITY, KsqlQueryStatus.ERROR); mergedMap.put(LOCAL_KSQL_HOST_INFO_ENTITY, KsqlQueryStatus.RUNNING); // When final QueryDescriptionList queries = (QueryDescriptionList) CustomExecutors.LIST_QUERIES.execute( showQueries, sessionProperties, mockKsqlEngine, serviceContext ).getEntity().orElseThrow(IllegalStateException::new); // Then final QueryDescription mergedQueryDescription = QueryDescriptionFactory.forQueryMetadata(localMetadata, mergedMap); mergedQueryDescription.updateTaskMetadata(Collections.singleton(remoteTaskMetadata)); assertThat(queries.getQueryDescriptions(), containsInAnyOrder(mergedQueryDescription)); }
List<List<RingRange>> generateSplits(long totalSplitCount, List<BigInteger> ringTokens) { int tokenRangeCount = ringTokens.size(); List<RingRange> splits = new ArrayList<>(); for (int i = 0; i < tokenRangeCount; i++) { BigInteger start = ringTokens.get(i); BigInteger stop = ringTokens.get((i + 1) % tokenRangeCount); if (!isInRange(start) || !isInRange(stop)) { throw new RuntimeException( String.format("Tokens (%s,%s) not in range of %s", start, stop, partitioner)); } if (start.equals(stop) && tokenRangeCount != 1) { throw new RuntimeException( String.format("Tokens (%s,%s): two nodes have the same token", start, stop)); } BigInteger rs = stop.subtract(start); if (rs.compareTo(BigInteger.ZERO) <= 0) { // wrap around case rs = rs.add(rangeSize); } // the below, in essence, does this: // splitCount = ceiling((rangeSize / RANGE_SIZE) * totalSplitCount) BigInteger[] splitCountAndRemainder = rs.multiply(BigInteger.valueOf(totalSplitCount)).divideAndRemainder(rangeSize); int splitCount = splitCountAndRemainder[0].intValue() + (splitCountAndRemainder[1].equals(BigInteger.ZERO) ? 0 : 1); LOG.debug("Dividing token range [{},{}) into {} splits", start, stop, splitCount); // Make big0 list of all the endpoints for the splits, including both start and stop List<BigInteger> endpointTokens = new ArrayList<>(); for (int j = 0; j <= splitCount; j++) { BigInteger offset = rs.multiply(BigInteger.valueOf(j)).divide(BigInteger.valueOf(splitCount)); BigInteger token = start.add(offset); if (token.compareTo(rangeMax) > 0) { token = token.subtract(rangeSize); } // Long.MIN_VALUE is not a valid token and has to be silently incremented. // See https://issues.apache.org/jira/browse/CASSANDRA-14684 endpointTokens.add( token.equals(BigInteger.valueOf(Long.MIN_VALUE)) ? token.add(BigInteger.ONE) : token); } // Append the splits between the endpoints for (int j = 0; j < splitCount; j++) { splits.add(RingRange.of(endpointTokens.get(j), endpointTokens.get(j + 1))); LOG.debug("Split #{}: [{},{})", j + 1, endpointTokens.get(j), endpointTokens.get(j + 1)); } } BigInteger total = BigInteger.ZERO; for (RingRange split : splits) { BigInteger size = split.span(rangeSize); total = total.add(size); } if (!total.equals(rangeSize)) { throw new RuntimeException( "Some tokens are missing from the splits. " + "This should not happen."); } return coalesceSplits(getTargetSplitSize(totalSplitCount), splits); }
@Test public void testGenerateSegments() { List<BigInteger> tokens = Stream.of( "0", "1", "56713727820156410577229101238628035242", "56713727820156410577229101238628035243", "113427455640312821154458202477256070484", "113427455640312821154458202477256070485") .map(BigInteger::new) .collect(Collectors.toList()); SplitGenerator generator = new SplitGenerator("foo.bar.RandomPartitioner"); List<List<RingRange>> segments = generator.generateSplits(10, tokens); assertEquals(12, segments.size()); assertEquals("[(0,1], (1,14178431955039102644307275309657008811]]", segments.get(0).toString()); assertEquals( "[(14178431955039102644307275309657008811,28356863910078205288614550619314017621]]", segments.get(1).toString()); assertEquals( "[(70892159775195513221536376548285044053,85070591730234615865843651857942052863]]", segments.get(5).toString()); tokens = Stream.of( "5", "6", "56713727820156410577229101238628035242", "56713727820156410577229101238628035243", "113427455640312821154458202477256070484", "113427455640312821154458202477256070485") .map(BigInteger::new) .collect(Collectors.toList()); segments = generator.generateSplits(10, tokens); assertEquals(12, segments.size()); assertEquals("[(5,6], (6,14178431955039102644307275309657008815]]", segments.get(0).toString()); assertEquals( "[(70892159775195513221536376548285044053,85070591730234615865843651857942052863]]", segments.get(5).toString()); assertEquals( "[(141784319550391026443072753096570088109,155962751505430129087380028406227096921]]", segments.get(10).toString()); }
@Udf(description = "Converts a string representation of a time in the given format" + " into the TIME value.") public Time parseTime( @UdfParameter( description = "The string representation of a time.") final String formattedTime, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (formattedTime == null | formatPattern == null) { return null; } try { final TemporalAccessor ta = formatters.get(formatPattern).parse(formattedTime); final Optional<ChronoField> dateField = Arrays.stream(ChronoField.values()) .filter(ChronoField::isDateBased) .filter(ta::isSupported) .findFirst(); if (dateField.isPresent()) { throw new KsqlFunctionException("Time format contains date field."); } return new Time(TimeUnit.NANOSECONDS.toMillis(LocalTime.from(ta).toNanoOfDay())); } catch (ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to parse time '" + formattedTime + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldThrowOnEmptyString() { // When: final Exception e = assertThrows( KsqlFunctionException.class, () -> udf.parseTime("", "HHmmss") ); // Then: assertThat(e.getMessage(), containsString("Failed to parse time '' with formatter 'HHmmss'")); }
@VisibleForTesting /*package*/ ExpansionApi.ExpansionResponse expand(ExpansionApi.ExpansionRequest request) { LOG.info( "Expanding '{}' with URN '{}'", request.getTransform().getUniqueName(), request.getTransform().getSpec().getUrn()); LOG.debug("Full transform: {}", request.getTransform()); Set<String> existingTransformIds = request.getComponents().getTransformsMap().keySet(); Pipeline pipeline = createPipeline(PipelineOptionsTranslation.fromProto(request.getPipelineOptions())); boolean isUseDeprecatedRead = ExperimentalOptions.hasExperiment(pipelineOptions, "use_deprecated_read") || ExperimentalOptions.hasExperiment( pipelineOptions, "beam_fn_api_use_deprecated_read"); if (!isUseDeprecatedRead) { ExperimentalOptions.addExperiment( pipeline.getOptions().as(ExperimentalOptions.class), "beam_fn_api"); // TODO(https://github.com/apache/beam/issues/20530): Remove this when we address performance // issue. ExperimentalOptions.addExperiment( pipeline.getOptions().as(ExperimentalOptions.class), "use_sdf_read"); } else { LOG.warn( "Using use_depreacted_read in portable runners is runner-dependent. The " + "ExpansionService will respect that, but if your runner does not have support for " + "native Read transform, your Pipeline will fail during Pipeline submission."); } RehydratedComponents rehydratedComponents = RehydratedComponents.forComponents(request.getComponents()).withPipeline(pipeline); Map<String, PCollection<?>> inputs = request.getTransform().getInputsMap().entrySet().stream() .collect( Collectors.toMap( Map.Entry::getKey, input -> { try { return rehydratedComponents.getPCollection(input.getValue()); } catch (IOException exn) { throw new RuntimeException(exn); } })); String urn = request.getTransform().getSpec().getUrn(); TransformProvider transformProvider = getRegisteredTransforms().get(urn); if (transformProvider == null) { if (getUrn(ExpansionMethods.Enum.JAVA_CLASS_LOOKUP).equals(urn)) { AllowList allowList = pipelineOptions.as(ExpansionServiceOptions.class).getJavaClassLookupAllowlist(); assert allowList != null; transformProvider = new JavaClassLookupTransformProvider(allowList); } else if (getUrn(SCHEMA_TRANSFORM).equals(urn)) { try { String underlyingIdentifier = ExternalTransforms.SchemaTransformPayload.parseFrom( request.getTransform().getSpec().getPayload()) .getIdentifier(); transformProvider = getRegisteredTransforms().get(underlyingIdentifier); } catch (InvalidProtocolBufferException e) { throw new RuntimeException(e); } transformProvider = transformProvider != null ? transformProvider : ExpansionServiceSchemaTransformProvider.of(); } else { throw new UnsupportedOperationException( "Unknown urn: " + request.getTransform().getSpec().getUrn()); } } List<String> classpathResources = transformProvider.getDependencies(request.getTransform().getSpec(), pipeline.getOptions()); pipeline.getOptions().as(PortablePipelineOptions.class).setFilesToStage(classpathResources); Map<String, PCollection<?>> outputs = transformProvider.apply( pipeline, request.getTransform().getUniqueName(), request.getTransform().getSpec(), inputs); // Needed to find which transform was new... SdkComponents sdkComponents = rehydratedComponents .getSdkComponents(request.getRequirementsList()) .withNewIdPrefix(request.getNamespace()); RunnerApi.Environment defaultEnvironment = Environments.createOrGetDefaultEnvironment( pipeline.getOptions().as(PortablePipelineOptions.class)); if (pipelineOptions.as(ExpansionServiceOptions.class).getAlsoStartLoopbackWorker()) { PortablePipelineOptions externalOptions = PipelineOptionsFactory.create().as(PortablePipelineOptions.class); externalOptions.setDefaultEnvironmentType(Environments.ENVIRONMENT_EXTERNAL); externalOptions.setDefaultEnvironmentConfig(loopbackAddress); defaultEnvironment = Environments.createAnyOfEnvironment( defaultEnvironment, Environments.createOrGetDefaultEnvironment(externalOptions)); } sdkComponents.registerEnvironment(defaultEnvironment); Map<String, String> outputMap = outputs.entrySet().stream() .collect( Collectors.toMap( Map.Entry::getKey, output -> { try { return sdkComponents.registerPCollection(output.getValue()); } catch (IOException exn) { throw new RuntimeException(exn); } })); if (isUseDeprecatedRead) { SplittableParDo.convertReadBasedSplittableDoFnsToPrimitiveReadsIfNecessary(pipeline); } RunnerApi.Pipeline pipelineProto = PipelineTranslation.toProto(pipeline, sdkComponents); String expandedTransformId = Iterables.getOnlyElement( pipelineProto.getRootTransformIdsList().stream() .filter(id -> !existingTransformIds.contains(id)) .collect(Collectors.toList())); RunnerApi.Components components = pipelineProto.getComponents(); RunnerApi.PTransform expandedTransform = components .getTransformsOrThrow(expandedTransformId) .toBuilder() .setUniqueName(expandedTransformId) .clearOutputs() .putAllOutputs(outputMap) .build(); LOG.debug("Expanded to {}", expandedTransform); return ExpansionApi.ExpansionResponse.newBuilder() .setComponents(components.toBuilder().removeTransforms(expandedTransformId)) .setTransform(expandedTransform) .addAllRequirements(pipelineProto.getRequirementsList()) .build(); }
@Test public void testConstructGenerateSequenceWithRegistration() { ExternalTransforms.ExternalConfigurationPayload payload = encodeRowIntoExternalConfigurationPayload( Row.withSchema( Schema.of( Field.of("start", FieldType.INT64), Field.nullable("stop", FieldType.INT64))) .withFieldValue("start", 0L) .withFieldValue("stop", 1L) .build()); Pipeline p = Pipeline.create(); RunnerApi.Pipeline pipelineProto = PipelineTranslation.toProto(p); ExpansionApi.ExpansionRequest request = ExpansionApi.ExpansionRequest.newBuilder() .setComponents(pipelineProto.getComponents()) .setTransform( RunnerApi.PTransform.newBuilder() .setUniqueName(TEST_NAME) .setSpec( RunnerApi.FunctionSpec.newBuilder() .setUrn(GenerateSequence.External.URN) .setPayload(payload.toByteString()))) .setNamespace(TEST_NAMESPACE) .build(); ExpansionApi.ExpansionResponse response = expansionService.expand(request); RunnerApi.PTransform expandedTransform = response.getTransform(); assertEquals(TEST_NAMESPACE + TEST_NAME, expandedTransform.getUniqueName()); assertThat(expandedTransform.getInputsCount(), Matchers.is(0)); assertThat(expandedTransform.getOutputsCount(), Matchers.is(1)); assertThat(expandedTransform.getSubtransformsCount(), greaterThan(0)); }
public static String getPathWithoutScheme(Path path) { return path.toUri().getPath(); }
@Test public void testGetPathWithoutSchema() { final Path path = new Path("/foo/bar/baz"); final String output = HadoopUtils.getPathWithoutScheme(path); assertEquals("/foo/bar/baz", output); }
@Override public URI uploadSegment(File segmentFile, LLCSegmentName segmentName) { return uploadSegment(segmentFile, segmentName, _timeoutInMs); }
@Test public void testSuccessfulUpload() { SegmentUploader segmentUploader = new PinotFSSegmentUploader("hdfs://root", TIMEOUT_IN_MS, _serverMetrics); URI segmentURI = segmentUploader.uploadSegment(_file, _llcSegmentName); Assert.assertTrue(segmentURI.toString().startsWith(StringUtil .join(File.separator, "hdfs://root", _llcSegmentName.getTableName(), _llcSegmentName.getSegmentName()))); }
public McastConfig setEgressVlan(VlanId vlanId) { if (vlanId == null) { object.remove(EGRESS_VLAN); } else { object.put(EGRESS_VLAN, vlanId.toString()); } return this; }
@Test public void setEgressVlan() throws Exception { config.setEgressVlan(EGRESS_VLAN_2); VlanId egressVlan = config.egressVlan(); assertNotNull("egressVlan should not be null", egressVlan); assertThat(egressVlan, is(EGRESS_VLAN_2)); }
void restoreBatch(final Collection<ConsumerRecord<byte[], byte[]>> records) { // compute the observed stream time at the end of the restore batch, in order to speed up // restore by not bothering to read from/write to segments which will have expired by the // time the restoration process is complete. long endOfBatchStreamTime = observedStreamTime; for (final ConsumerRecord<byte[], byte[]> record : records) { endOfBatchStreamTime = Math.max(endOfBatchStreamTime, record.timestamp()); } final VersionedStoreClient<?> restoreClient = restoreWriteBuffer.getClient(); // note: there is increased risk for hitting an out-of-memory during this restore loop, // compared to for non-versioned key-value stores, because this versioned store // implementation stores multiple records (for the same key) together in a single RocksDB // "segment" entry -- restoring a single changelog entry could require loading multiple // records into memory. how high this memory amplification will be is very much dependent // on the specific workload and the value of the "segment interval" parameter. synchronized (position) { for (final ConsumerRecord<byte[], byte[]> record : records) { if (record.timestamp() < observedStreamTime - gracePeriod) { // record is older than grace period and was therefore never written to the store continue; } // advance observed stream time as usual, for use in deciding whether records have // exceeded the store's grace period and should be dropped. observedStreamTime = Math.max(observedStreamTime, record.timestamp()); ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition( record, consistencyEnabled, position ); // put records to write buffer doPut( restoreClient, endOfBatchStreamTime, new Bytes(record.key()), record.value(), record.timestamp() ); } try { restoreWriteBuffer.flush(); } catch (final RocksDBException e) { throw new ProcessorStateException("Error restoring batch to store " + name, e); } } }
@Test public void shouldRestoreWithNullsAndRepeatTimestamps() { final List<DataRecord> records = new ArrayList<>(); records.add(new DataRecord("k", "to_be_replaced", SEGMENT_INTERVAL + 20)); records.add(new DataRecord("k", null, SEGMENT_INTERVAL - 10)); records.add(new DataRecord("k", "to_be_replaced", SEGMENT_INTERVAL - 10)); // replaces existing null with non-null, with timestamps spanning segments records.add(new DataRecord("k", null, SEGMENT_INTERVAL - 10)); // replaces existing non-null with null records.add(new DataRecord("k", "to_be_replaced", SEGMENT_INTERVAL - 1)); records.add(new DataRecord("k", "to_be_replaced", SEGMENT_INTERVAL + 1)); records.add(new DataRecord("k", null, SEGMENT_INTERVAL - 1)); // replaces existing non-null with null records.add(new DataRecord("k", null, SEGMENT_INTERVAL + 1)); // replaces existing non-null with null, with timestamps spanning segments records.add(new DataRecord("k", null, SEGMENT_INTERVAL + 10)); records.add(new DataRecord("k", null, SEGMENT_INTERVAL + 5)); records.add(new DataRecord("k", "vp5", SEGMENT_INTERVAL + 5)); // replaces existing null with non-null records.add(new DataRecord("k", "to_be_replaced", SEGMENT_INTERVAL - 5)); records.add(new DataRecord("k", "vn5", SEGMENT_INTERVAL - 5)); // replaces existing non-null with non-null records.add(new DataRecord("k", null, SEGMENT_INTERVAL + 20)); // replaces existing non-null (latest value) with null records.add(new DataRecord("k", null, SEGMENT_INTERVAL + 20)); // replaces existing null with null records.add(new DataRecord("k", "vn6", SEGMENT_INTERVAL - 6)); store.restoreBatch(getChangelogRecords(records)); verifyGetNullFromStore("k"); verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL + 30); verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL + 15); verifyTimestampedGetValueFromStore("k", SEGMENT_INTERVAL + 6, "vp5", SEGMENT_INTERVAL + 5, SEGMENT_INTERVAL + 10); verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL + 2); verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL); verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL - 1); verifyTimestampedGetValueFromStore("k", SEGMENT_INTERVAL - 5, "vn5", SEGMENT_INTERVAL - 5, SEGMENT_INTERVAL - 1); verifyTimestampedGetValueFromStore("k", SEGMENT_INTERVAL - 6, "vn6", SEGMENT_INTERVAL - 6, SEGMENT_INTERVAL - 5); verifyTimestampedGetNullFromStore("k", SEGMENT_INTERVAL - 8); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testViewWithUppercaseColumn() { analyze("SELECT * FROM v4"); }
public static SchemaKStream<?> buildSource( final PlanBuildContext buildContext, final DataSource dataSource, final QueryContext.Stacker contextStacker ) { final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed(); switch (dataSource.getDataSourceType()) { case KSTREAM: return windowed ? buildWindowedStream( buildContext, dataSource, contextStacker ) : buildStream( buildContext, dataSource, contextStacker ); case KTABLE: return windowed ? buildWindowedTable( buildContext, dataSource, contextStacker ) : buildTable( buildContext, dataSource, contextStacker ); default: throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType()); } }
@Test public void shouldBuildCorrectFormatsForV2NonWindowedTable() { // Given: givenNonWindowedTable(); // When: final SchemaKStream<?> result = SchemaKSourceFactory.buildSource( buildContext, dataSource, contextStacker ); // Then: assertThat(((TableSource) result.getSourceStep()).getStateStoreFormats(), is( InternalFormats.of( keyFormat, valueFormatInfo ) )); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("list", concurrency); try { final String prefix = this.createPrefix(directory); if(log.isDebugEnabled()) { log.debug(String.format("List with prefix %s", prefix)); } final Path bucket = containerService.getContainer(directory); final AttributedList<Path> objects = new AttributedList<>(); String priorLastKey = null; String priorLastVersionId = null; long revision = 0L; String lastKey = null; boolean hasDirectoryPlaceholder = bucket.isRoot() || containerService.isContainer(directory); do { final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), prefix, String.valueOf(Path.DELIMITER), new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"), priorLastKey, priorLastVersionId, false); // Amazon S3 returns object versions in the order in which they were stored, with the most recently stored returned first. for(BaseVersionOrDeleteMarker marker : chunk.getItems()) { final String key = URIEncoder.decode(marker.getKey()); if(new SimplePathPredicate(PathNormalizer.compose(bucket, key)).test(directory)) { if(log.isDebugEnabled()) { log.debug(String.format("Skip placeholder key %s", key)); } hasDirectoryPlaceholder = true; continue; } final PathAttributes attr = new PathAttributes(); attr.setVersionId(marker.getVersionId()); if(!StringUtils.equals(lastKey, key)) { // Reset revision for next file revision = 0L; } attr.setRevision(++revision); attr.setDuplicate(marker.isDeleteMarker() && marker.isLatest() || !marker.isLatest()); if(marker.isDeleteMarker()) { attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, String.valueOf(true))); } attr.setModificationDate(marker.getLastModified().getTime()); attr.setRegion(bucket.attributes().getRegion()); if(marker instanceof S3Version) { final S3Version object = (S3Version) marker; attr.setSize(object.getSize()); if(StringUtils.isNotBlank(object.getEtag())) { attr.setETag(StringUtils.remove(object.getEtag(), "\"")); // The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted // using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is // not the MD5 of the object data. attr.setChecksum(Checksum.parse(StringUtils.remove(object.getEtag(), "\""))); } if(StringUtils.isNotBlank(object.getStorageClass())) { attr.setStorageClass(object.getStorageClass()); } } final Path f = new Path(directory.isDirectory() ? directory : directory.getParent(), PathNormalizer.name(key), EnumSet.of(Path.Type.file), attr); if(metadata) { f.withAttributes(attributes.find(f)); } objects.add(f); lastKey = key; } final String[] prefixes = chunk.getCommonPrefixes(); final List<Future<Path>> folders = new ArrayList<>(); for(String common : prefixes) { if(new SimplePathPredicate(PathNormalizer.compose(bucket, URIEncoder.decode(common))).test(directory)) { continue; } folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(common))); } for(Future<Path> f : folders) { try { objects.add(Uninterruptibles.getUninterruptibly(f)); } catch(ExecutionException e) { log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage())); for(Throwable cause : ExceptionUtils.getThrowableList(e)) { Throwables.throwIfInstanceOf(cause, BackgroundException.class); } throw new DefaultExceptionMappingService().map(Throwables.getRootCause(e)); } } priorLastKey = null != chunk.getNextKeyMarker() ? URIEncoder.decode(chunk.getNextKeyMarker()) : null; priorLastVersionId = chunk.getNextVersionIdMarker(); listener.chunk(directory, objects); } while(priorLastKey != null); if(!hasDirectoryPlaceholder && objects.isEmpty()) { // Only for AWS if(S3Session.isAwsHostname(session.getHost().getHostname())) { if(StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) { if(log.isWarnEnabled()) { log.warn(String.format("No placeholder found for directory %s", directory)); } throw new NotfoundException(directory.getAbsolute()); } } else { // Handle missing prefix for directory placeholders in Minio final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), String.format("%s%s", this.createPrefix(directory.getParent()), directory.getName()), String.valueOf(Path.DELIMITER), 1, null, null, false); if(Arrays.stream(chunk.getCommonPrefixes()).map(URIEncoder::decode).noneMatch(common -> common.equals(prefix))) { throw new NotfoundException(directory.getAbsolute()); } } } return objects; } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, directory); } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testListFileDot() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path file = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch( new Path(container, ".", EnumSet.of(Path.Type.file)), new TransferStatus()); assertEquals(".", file.getName()); assertEquals(container, file.getParent()); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); file.withAttributes(new S3AttributesFinderFeature(session, acl).find(file)); assertNotNull(new S3VersionedObjectListService(session, new S3AccessControlListFeature(session)).list(container, new DisabledListProgressListener()) .find(new SimplePathPredicate(file))); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public String getFileId(final DriveItem.Metadata metadata) { final ItemReference parent = metadata.getParentReference(); if(metadata.getRemoteItem() != null) { final DriveItem.Metadata remoteMetadata = metadata.getRemoteItem(); final ItemReference remoteParent = remoteMetadata.getParentReference(); if(parent == null) { return String.join(String.valueOf(Path.DELIMITER), remoteParent.getDriveId(), remoteMetadata.getId()); } else { return String.join(String.valueOf(Path.DELIMITER), parent.getDriveId(), metadata.getId(), remoteParent.getDriveId(), remoteMetadata.getId()); } } else { return String.join(String.valueOf(Path.DELIMITER), parent.getDriveId(), metadata.getId()); } }
@Test public void testRealConsumerFileIdResponseSharedWithMe() throws Exception { final DriveItem.Metadata metadata; try (final InputStream test = getClass().getResourceAsStream("/RealConsumerFileIdResponseSharedWithMe.json")) { final InputStreamReader reader = new InputStreamReader(test); metadata = DriveItem.parseJson(session.getClient(), (JsonObject) Json.parse(reader)); } assertEquals("A/A!0/B/B!1", session.getFileId(metadata)); }
public FEELFnResult<Range> invoke(@ParameterName("from") String from) { if (from == null || from.isEmpty() || from.isBlank()) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "cannot be null")); } Range.RangeBoundary startBoundary; if (from.startsWith("(") || from.startsWith("]")) { startBoundary = RangeBoundary.OPEN; } else if (from.startsWith("[")) { startBoundary = RangeBoundary.CLOSED; } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not start with a valid character")); } Range.RangeBoundary endBoundary; if (from.endsWith(")") || from.endsWith("[")) { endBoundary = RangeBoundary.OPEN; } else if (from.endsWith("]")) { endBoundary = RangeBoundary.CLOSED; } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not end with a valid character")); } String[] split = from.split("\\.\\."); if (split.length != 2) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not include two literals separated by `..` two dots characters")); } String leftString = split[0].substring(1); String rightString = split[1].substring(0, split[1].length() - 1); if ((leftString.isEmpty() || leftString.isBlank()) && (rightString.isEmpty() || rightString.isBlank())) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "at least one endpoint must not be null")); } BaseNode leftNode = parse(leftString); if (!nodeIsAllowed(leftNode)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "left endpoint is not a recognised valid literal")); } BaseNode rightNode = parse(rightString); if (!nodeIsAllowed(rightNode)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "right endpoint is not a recognised valid literal")); } Object left = leftNode.evaluate(getStubbed()); if (!nodeValueIsAllowed(left)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "left endpoint is not a valid value " + left.getClass())); } Object right = rightNode.evaluate(getStubbed()); if (!nodeValueIsAllowed(right)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "right endpoint is not a valid value " + right.getClass())); } if (!nodesReturnsSameType(left, right)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "endpoints must be of equivalent types")); } return FEELFnResult.ofResult(new RangeImpl(startBoundary, (Comparable) left, (Comparable) right, endBoundary)); }
@Test void evaluateWithInvalidFunctionInvocationNode() { Object[][] data = invalidFunctionInvocationNodeData(); Arrays.stream(data).forEach(objects -> { String expression = String.format("[%1$s..%1$s]", objects[0]); FEELFnResult<Range> retrieved = rangeFunction.invoke(expression); assertThat(retrieved.isLeft()) .withFailMessage(() -> String.format("Expected 'retrieved.isLeft()' from, %s", expression)) .isTrue(); }); }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldEvaluateTypeForMultiParamUdaf() { // Given: givenUdafWithNameAndReturnType("TEST_ETM", SqlTypes.STRING, aggregateFactory, aggregateFunction); final Expression expression = new FunctionCall( FunctionName.of("TEST_ETM"), ImmutableList.of( COL7, new UnqualifiedColumnReferenceExp(ColumnName.of("COL4")), new StringLiteral("hello world") ) ); // When: final SqlType exprType = expressionTypeManager.getExpressionSqlType(expression); // Then: assertThat(exprType, is(SqlTypes.STRING)); verify(aggregateFactory).getFunction(ImmutableList.of(SqlTypes.INTEGER, SqlArray.of(SqlTypes.DOUBLE), SqlTypes.STRING)); verify(aggCreator).apply("hello world"); verify(aggregateFunction).returnType(); }
protected Set<MediaType> getSupportedMediaTypesForInput() { return mSupportedMediaTypesUnmodifiable; }
@Test(expected = UnsupportedOperationException.class) public void testMediaTypesIsUnmodifiable() { mPackageScope.getSupportedMediaTypesForInput().add(MediaType.Image); }
public static String getByFilename(String filename) { String extension = FilenameUtils.getExtension(filename); String mime = null; if (!isNullOrEmpty(extension)) { mime = MAP.get(extension.toLowerCase(Locale.ENGLISH)); } return mime != null ? mime : DEFAULT; }
@Test public void getByFilename() { assertThat(MediaTypes.getByFilename("static/sqale/sqale.css")).isEqualTo("text/css"); assertThat(MediaTypes.getByFilename("sqale.css")).isEqualTo("text/css"); }
@VisibleForTesting static Map<String, Object> forCodec( Map<String, Object> codec, boolean replaceWithByteArrayCoder) { String coderType = (String) codec.get(PropertyNames.OBJECT_TYPE_NAME); // Handle well known coders. if (LENGTH_PREFIX_CODER_TYPE.equals(coderType)) { if (replaceWithByteArrayCoder) { return CloudObjects.asCloudObject( LENGTH_PREFIXED_BYTE_ARRAY_CODER, /*sdkComponents=*/ null); } return codec; } else if (WELL_KNOWN_CODER_TYPES.contains(coderType)) { // The runner knows about these types and can instantiate them so handle their // component encodings. Map<String, Object> prefixedCodec = new HashMap<>(codec); // Recursively replace component encodings if (codec.containsKey(PropertyNames.COMPONENT_ENCODINGS)) { List<Map<String, Object>> prefixedComponents = new ArrayList<>(); for (Map<String, Object> component : (Iterable<Map<String, Object>>) codec.get(PropertyNames.COMPONENT_ENCODINGS)) { prefixedComponents.add(forCodec(component, replaceWithByteArrayCoder)); } prefixedCodec.put(PropertyNames.COMPONENT_ENCODINGS, prefixedComponents); } return prefixedCodec; } // Wrap unknown coders with length prefix coder. if (replaceWithByteArrayCoder) { return CloudObjects.asCloudObject(LENGTH_PREFIXED_BYTE_ARRAY_CODER, /*sdkComponents=*/ null); } else { Map<String, Object> prefixedCodec = new HashMap<>(); prefixedCodec.put(PropertyNames.OBJECT_TYPE_NAME, LENGTH_PREFIX_CODER_TYPE); prefixedCodec.put(PropertyNames.COMPONENT_ENCODINGS, ImmutableList.of(codec)); return prefixedCodec; } }
@Test public void testLengthPrefixForLengthPrefixCoder() throws Exception { Coder<WindowedValue<KV<String, Integer>>> windowedValueCoder = WindowedValue.getFullCoder( KvCoder.of(StringUtf8Coder.of(), LengthPrefixCoder.of(VarIntCoder.of())), GlobalWindow.Coder.INSTANCE); Map<String, Object> lengthPrefixedCoderCloudObject = forCodec(CloudObjects.asCloudObject(windowedValueCoder, /*sdkComponents=*/ null), false); Coder<WindowedValue<KV<String, Integer>>> expectedCoder = WindowedValue.getFullCoder( KvCoder.of(StringUtf8Coder.of(), LengthPrefixCoder.of(VarIntCoder.of())), GlobalWindow.Coder.INSTANCE); assertEqualsAsJson( CloudObjects.asCloudObject(expectedCoder, /*sdkComponents=*/ null), lengthPrefixedCoderCloudObject); }
@Override public void run() { try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) { doRun(); } finally { terminationFuture.complete(executionState); } }
@Test public void testCleanupWhenRestoreFails() throws Exception { createTaskBuilder() .setInvokable(InvokableWithExceptionInRestore.class) .build(Executors.directExecutor()) .run(); assertTrue(wasCleanedUp); }
@Override public float readFloat() throws EOFException { return Float.intBitsToFloat(readInt()); }
@Test public void testReadFloatForPositionByteOrder() throws Exception { double readFloat = in.readFloat(2, LITTLE_ENDIAN); int intB = Bits.readIntL(INIT_DATA, 2); double aFloat = Float.intBitsToFloat(intB); assertEquals(aFloat, readFloat, 0); }
public double getWeight() { return weight; }
@Test void getWeight() { final double weight = 33.45; assertThat(KIE_PMML_SEGMENT.getWeight()).isCloseTo(1.0, Offset.offset(0.0)); KIE_PMML_SEGMENT = BUILDER.withWeight(weight).build(); assertThat(KIE_PMML_SEGMENT.getWeight()).isCloseTo(weight, Offset.offset(0.0)); }
public static Ip6Prefix valueOf(byte[] address, int prefixLength) { return new Ip6Prefix(Ip6Address.valueOf(address), prefixLength); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfAddressNegativePrefixLengthIPv6() { Ip6Address ipAddress; Ip6Prefix ipPrefix; ipAddress = Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8888"); ipPrefix = Ip6Prefix.valueOf(ipAddress, -1); }
public StickyPartitionCache() { this.indexCache = new ConcurrentHashMap<>(); }
@Test public void testStickyPartitionCache() { List<PartitionInfo> allPartitions = asList(new PartitionInfo(TOPIC_A, 0, NODES[0], NODES, NODES), new PartitionInfo(TOPIC_A, 1, NODES[1], NODES, NODES), new PartitionInfo(TOPIC_A, 2, NODES[2], NODES, NODES), new PartitionInfo(TOPIC_B, 0, NODES[0], NODES, NODES) ); Cluster testCluster = new Cluster("clusterId", asList(NODES), allPartitions, Collections.emptySet(), Collections.emptySet()); StickyPartitionCache stickyPartitionCache = new StickyPartitionCache(); int partA = stickyPartitionCache.partition(TOPIC_A, testCluster); assertEquals(partA, stickyPartitionCache.partition(TOPIC_A, testCluster)); int partB = stickyPartitionCache.partition(TOPIC_B, testCluster); assertEquals(partB, stickyPartitionCache.partition(TOPIC_B, testCluster)); int changedPartA = stickyPartitionCache.nextPartition(TOPIC_A, testCluster, partA); assertEquals(changedPartA, stickyPartitionCache.partition(TOPIC_A, testCluster)); assertNotEquals(partA, changedPartA); int changedPartA2 = stickyPartitionCache.partition(TOPIC_A, testCluster); assertEquals(changedPartA2, changedPartA); // We do not want to change partitions because the previous partition does not match the current sticky one. int changedPartA3 = stickyPartitionCache.nextPartition(TOPIC_A, testCluster, partA); assertEquals(changedPartA3, changedPartA2); // Check that we can still use the partitioner when there is only one partition int changedPartB = stickyPartitionCache.nextPartition(TOPIC_B, testCluster, partB); assertEquals(changedPartB, stickyPartitionCache.partition(TOPIC_B, testCluster)); }
public static <K, E> Collector<E, ImmutableListMultimap.Builder<K, E>, ImmutableListMultimap<K, E>> index(Function<? super E, K> keyFunction) { return index(keyFunction, Function.identity()); }
@Test public void index_fails_if_key_function_is_null() { assertThatThrownBy(() -> index(null)) .isInstanceOf(NullPointerException.class) .hasMessage("Key function can't be null"); }
@Override public void completeRestoration(final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) { switch (state()) { case RUNNING: return; case RESTORING: resetOffsetsIfNeededAndInitializeMetadata(offsetResetter); initializeTopology(); processorContext.initialize(); if (!eosEnabled) { maybeCheckpoint(true); } transitionTo(State.RUNNING); log.info("Restored and ready to run"); break; case CREATED: case SUSPENDED: case CLOSED: throw new IllegalStateException("Illegal state " + state() + " while completing restoration for active task " + id); default: throw new IllegalStateException("Unknown state " + state() + " while completing restoration for active task " + id); } }
@Test public void shouldThrowStreamsExceptionWhenFetchCommittedFailed() { when(stateManager.taskId()).thenReturn(taskId); when(stateManager.taskType()).thenReturn(TaskType.ACTIVE); final Consumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) { @Override public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) { throw new KafkaException("KABOOM!"); } }; task = createOptimizedStatefulTask(createConfig("100"), consumer); task.transitionTo(RESTORING); assertThrows(StreamsException.class, () -> task.completeRestoration(noOpResetter -> { })); }
public boolean isSameProtocolAndStorageTypes() { return storageTypes.values().stream().allMatch(protocolType::equals); }
@Test void assertIsDifferentProtocolAndStorageTypes() { GenericSchemaBuilderMaterial material = new GenericSchemaBuilderMaterial(TypedSPILoader.getService(DatabaseType.class, "FIXTURE"), Collections.singletonMap("foo", null), Collections.emptyMap(), Collections.emptyList(), new ConfigurationProperties(new Properties()), ""); assertFalse(material.isSameProtocolAndStorageTypes()); }
public static FileIO loadFileIO(String impl, Map<String, String> properties, Object hadoopConf) { LOG.info("Loading custom FileIO implementation: {}", impl); DynConstructors.Ctor<FileIO> ctor; try { ctor = DynConstructors.builder(FileIO.class) .loader(CatalogUtil.class.getClassLoader()) .impl(impl) .buildChecked(); } catch (NoSuchMethodException e) { throw new IllegalArgumentException( String.format("Cannot initialize FileIO implementation %s: %s", impl, e.getMessage()), e); } FileIO fileIO; try { fileIO = ctor.newInstance(); } catch (ClassCastException e) { throw new IllegalArgumentException( String.format("Cannot initialize FileIO, %s does not implement FileIO.", impl), e); } configureHadoopConf(fileIO, hadoopConf); fileIO.initialize(properties); return fileIO; }
@Test public void loadCustomFileIO_hadoopConfigConstructor() { Configuration configuration = new Configuration(); configuration.set("key", "val"); FileIO fileIO = CatalogUtil.loadFileIO(HadoopFileIO.class.getName(), Maps.newHashMap(), configuration); assertThat(fileIO).isInstanceOf(HadoopFileIO.class); assertThat(((HadoopFileIO) fileIO).conf().get("key")).isEqualTo("val"); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "Test SchemaProperties and additionalProperties annotations") public void testSchemaProperties() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(SchemaPropertiesResource.class); String yaml = "openapi: 3.0.1\n" + "paths:\n" + " /:\n" + " get:\n" + " summary: Simple get operation\n" + " description: Defines a simple get operation with no inputs and a complex output\n" + " object\n" + " operationId: getWithPayloadResponse\n" + " responses:\n" + " \"200\":\n" + " description: voila!\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: object\n" + " properties:\n" + " foo:\n" + " maximum: 1\n" + " type: integer\n" + " default:\n" + " description: boo\n" + " content:\n" + " application/json:\n" + " schema:\n" + " maxProperties: 3\n" + " type: object\n" + " properties:\n" + " foo:\n" + " maximum: 1\n" + " type: integer\n" + " description: various properties\n" + " \"400\":\n" + " description: additionalProperties schema\n" + " content:\n" + " application/json:\n" + " schema:\n" + " maxProperties: 2\n" + " type: object\n" + " additionalProperties:\n" + " type: string\n" + " \"401\":\n" + " description: additionalProperties boolean\n" + " content:\n" + " application/json:\n" + " schema:\n" + " maxProperties: 2\n" + " type: object\n" + " additionalProperties: false\n" + " deprecated: true\n" + " /one:\n" + " get:\n" + " operationId: requestBodySchemaPropertyNoSchema\n" + " requestBody:\n" + " content:\n" + " application/yaml:\n" + " schema:\n" + " type: object\n" + " properties:\n" + " foo:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/MultipleBaseBean'\n" + " /two:\n" + " get:\n" + " operationId: requestBodySchemaPropertySchema\n" + " requestBody:\n" + " content:\n" + " application/yaml:\n" + " schema:\n" + " required:\n" + " - foo\n" + " type: object\n" + " properties:\n" + " foo:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/MultipleBaseBean'\n" + " /three:\n" + " get:\n" + " operationId: requestBodySchemaPropertySchemaArray\n" + " requestBody:\n" + " content:\n" + " application/yaml:\n" + " schema:\n" + " type: array\n" + " items:\n" + " required:\n" + " - foo\n" + " type: object\n" + " properties:\n" + " foo:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/MultipleBaseBean'\n" + "components:\n" + " schemas:\n" + " MultipleBaseBean:\n" + " type: object\n" + " properties:\n" + " beanType:\n" + " type: string\n" + " a:\n" + " type: integer\n" + " format: int32\n" + " b:\n" + " type: string\n" + " description: MultipleBaseBean\n" + " MultipleSub1Bean:\n" + " type: object\n" + " description: MultipleSub1Bean\n" + " allOf:\n" + " - $ref: '#/components/schemas/MultipleBaseBean'\n" + " - type: object\n" + " properties:\n" + " c:\n" + " type: integer\n" + " format: int32\n" + " MultipleSub2Bean:\n" + " type: object\n" + " description: MultipleSub2Bean\n" + " allOf:\n" + " - $ref: '#/components/schemas/MultipleBaseBean'\n" + " - type: object\n" + " properties:\n" + " d:\n" + " type: integer\n" + " format: int32\n"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) { return invoke(n, BigDecimal.ZERO); }
@Test void invokeNegative() { FunctionTestUtil.assertResultBigDecimal(ceilingFunction.invoke(BigDecimal.valueOf(-10.2)), BigDecimal.valueOf(-10)); }
@Override public void execute(SensorContext context) { FilePredicates p = context.fileSystem().predicates(); List<InputFile> sourceFiles = StreamSupport.stream( context.fileSystem().inputFiles( p.and( p.hasType(InputFile.Type.MAIN), p.hasLanguage("java") ) ).spliterator(), false) .filter(f -> !((DefaultInputFile) f).isExcludedForDuplication()) .toList(); if (sourceFiles.isEmpty()) { return; } createIndex(sourceFiles); }
@Test public void testExclusions() { file.setExcludedForDuplication(true); new JavaCpdBlockIndexerSensor(index).execute(context); verifyNoInteractions(index); }
void processSingleResource(FileStatus resource) { Path path = resource.getPath(); // indicates the processing status of the resource ResourceStatus resourceStatus = ResourceStatus.INIT; // first, if the path ends with the renamed suffix, it indicates the // directory was moved (as stale) but somehow not deleted (probably due to // SCM failure); delete the directory if (path.toString().endsWith(RENAMED_SUFFIX)) { LOG.info("Found a renamed directory that was left undeleted at " + path.toString() + ". Deleting."); try { if (fs.delete(path, true)) { resourceStatus = ResourceStatus.DELETED; } } catch (IOException e) { LOG.error("Error while processing a shared cache resource: " + path, e); } } else { // this is the path to the cache resource directory // the directory name is the resource key (i.e. a unique identifier) String key = path.getName(); try { store.cleanResourceReferences(key); } catch (YarnException e) { LOG.error("Exception thrown while removing dead appIds.", e); } if (store.isResourceEvictable(key, resource)) { try { /* * TODO See YARN-2663: There is a race condition between * store.removeResource(key) and * removeResourceFromCacheFileSystem(path) operations because they do * not happen atomically and resources can be uploaded with different * file names by the node managers. */ // remove the resource from scm (checks for appIds as well) if (store.removeResource(key)) { // remove the resource from the file system boolean deleted = removeResourceFromCacheFileSystem(path); if (deleted) { resourceStatus = ResourceStatus.DELETED; } else { LOG.error("Failed to remove path from the file system." + " Skipping this resource: " + path); resourceStatus = ResourceStatus.ERROR; } } else { // we did not delete the resource because it contained application // ids resourceStatus = ResourceStatus.PROCESSED; } } catch (IOException e) { LOG.error( "Failed to remove path from the file system. Skipping this resource: " + path, e); resourceStatus = ResourceStatus.ERROR; } } else { resourceStatus = ResourceStatus.PROCESSED; } } // record the processing switch (resourceStatus) { case DELETED: metrics.reportAFileDelete(); break; case PROCESSED: metrics.reportAFileProcess(); break; case ERROR: metrics.reportAFileError(); break; default: LOG.error("Cleaner encountered an invalid status (" + resourceStatus + ") while processing resource: " + path.getName()); } }
@Test void testProcessEvictableResource() throws Exception { FileSystem fs = mock(FileSystem.class); CleanerMetrics metrics = mock(CleanerMetrics.class); SCMStore store = mock(SCMStore.class); CleanerTask task = createSpiedTask(fs, store, metrics, new ReentrantLock()); // mock an evictable resource when(store.isResourceEvictable(isA(String.class), isA(FileStatus.class))) .thenReturn(true); FileStatus status = mock(FileStatus.class); when(status.getPath()).thenReturn(new Path(ROOT + "/a/b/c/abc")); when(store.removeResource(isA(String.class))).thenReturn(true); // rename succeeds when(fs.rename(isA(Path.class), isA(Path.class))).thenReturn(true); // delete returns true when(fs.delete(isA(Path.class), anyBoolean())).thenReturn(true); // process the resource task.processSingleResource(status); // the directory should be renamed verify(fs).rename(eq(status.getPath()), isA(Path.class)); // metrics should record a deleted file verify(metrics).reportAFileDelete(); verify(metrics, never()).reportAFileProcess(); }
public static double parseDouble(String number) { if (StrUtil.isBlank(number)) { return 0D; } try { return Double.parseDouble(number); } catch (NumberFormatException e) { return parseNumber(number).doubleValue(); } }
@Test public void parseDoubleTest() { // -------------------------- Parse failed ----------------------- assertNull(NumberUtil.parseDouble("abc", null)); assertNull(NumberUtil.parseDouble("a123.33", null)); assertNull(NumberUtil.parseDouble("..123", null)); assertEquals(1233D, NumberUtil.parseDouble(StrUtil.EMPTY, 1233D)); // -------------------------- Parse success ----------------------- assertEquals(123.33D, NumberUtil.parseDouble("123.33a", null)); assertEquals(0.123D, NumberUtil.parseDouble(".123", null)); }
@Override public S3ClientBuilder createBuilder(S3Options s3Options) { return createBuilder(S3Client.builder(), s3Options); }
@Test public void testSetRegion() { when(s3Options.getAwsRegion()).thenReturn(Region.US_WEST_1); DefaultS3ClientBuilderFactory.createBuilder(builder, s3Options); verify(builder).region(Region.US_WEST_1); verifyNoMoreInteractions(builder); }
@Override public void unregister(String pluginId) { }
@Test public void testUnregister() { manager.register(new TestPlugin()); manager.unregister(TestPlugin.class.getSimpleName()); Assert.assertTrue(isEmpty(manager)); }
@CanIgnoreReturnValue public GsonBuilder excludeFieldsWithModifiers(int... modifiers) { Objects.requireNonNull(modifiers); excluder = excluder.withModifiers(modifiers); return this; }
@Test public void testExcludeFieldsWithModifiers() { Gson gson = new GsonBuilder().excludeFieldsWithModifiers(Modifier.VOLATILE, Modifier.PRIVATE).create(); assertThat(gson.toJson(new HasModifiers())).isEqualTo("{\"d\":\"d\"}"); }
@Override public Long createNotifyTemplate(NotifyTemplateSaveReqVO createReqVO) { // 校验站内信编码是否重复 validateNotifyTemplateCodeDuplicate(null, createReqVO.getCode()); // 插入 NotifyTemplateDO notifyTemplate = BeanUtils.toBean(createReqVO, NotifyTemplateDO.class); notifyTemplate.setParams(parseTemplateContentParams(notifyTemplate.getContent())); notifyTemplateMapper.insert(notifyTemplate); return notifyTemplate.getId(); }
@Test public void testCreateNotifyTemplate_success() { // 准备参数 NotifyTemplateSaveReqVO reqVO = randomPojo(NotifyTemplateSaveReqVO.class, o -> o.setStatus(randomCommonStatus())) .setId(null); // 防止 id 被赋值 // 调用 Long notifyTemplateId = notifyTemplateService.createNotifyTemplate(reqVO); // 断言 assertNotNull(notifyTemplateId); // 校验记录的属性是否正确 NotifyTemplateDO notifyTemplate = notifyTemplateMapper.selectById(notifyTemplateId); assertPojoEquals(reqVO, notifyTemplate, "id"); }
@Override public int hashCode() { return Objects.hash(username, password); }
@Test void hasAWorkingHashCode() { assertThat(credentials.hashCode()) .hasSameHashCodeAs(new BasicCredentials("u", "p")) .isNotEqualTo(new BasicCredentials("u1", "p").hashCode()) .isNotEqualTo(new BasicCredentials("u", "p1").hashCode()); }
public static ExpressionEvaluator compileExpression( String expression, List<String> argumentNames, List<Class<?>> argumentClasses, Class<?> returnClass) { ExpressionEvaluator expressionEvaluator = new ExpressionEvaluator(); expressionEvaluator.setParameters( argumentNames.toArray(new String[0]), argumentClasses.toArray(new Class[0])); expressionEvaluator.setExpressionType(returnClass); try { expressionEvaluator.cook(expression); return expressionEvaluator; } catch (CompileException e) { throw new InvalidProgramException( "Expression cannot be compiled. This is a bug. Please file an issue.\nExpression: " + expression, e); } }
@Test public void testJaninoCharCompare() throws InvocationTargetException { String expression = "String.valueOf('2').equals(col1)"; List<String> columnNames = Arrays.asList("col1"); List<Class<?>> paramTypes = Arrays.asList(String.class); List<Object> params = Arrays.asList("2"); ExpressionEvaluator expressionEvaluator = JaninoCompiler.compileExpression( expression, columnNames, paramTypes, Boolean.class); Object evaluate = expressionEvaluator.evaluate(params.toArray()); Assert.assertEquals(true, evaluate); }
@Override public void copyParametersFrom( NamedParams aParam ) { if ( params != null && aParam != null ) { params.clear(); String[] keys = aParam.listParameters(); for ( int idx = 0; idx < keys.length; idx++ ) { String desc; try { desc = aParam.getParameterDescription( keys[idx] ); } catch ( UnknownParamException e ) { desc = ""; } String defValue; try { defValue = aParam.getParameterDefault( keys[idx] ); } catch ( UnknownParamException e ) { defValue = ""; } String value; try { value = aParam.getParameterValue( keys[idx] ); } catch ( UnknownParamException e ) { value = ""; } try { addParameterDefinition( keys[idx], defValue, desc ); } catch ( DuplicateParamException e ) { // Do nothing, just overwrite. } setParameterValue( keys[idx], value ); } } }
@Test public void testCopyParametersFrom() throws Exception { NamedParams namedParams2 = new NamedParamsDefault(); namedParams2.addParameterDefinition( "key", "default value", "description" ); namedParams2.setParameterValue( "key", "value" ); assertNull( namedParams.getParameterValue( "key" ) ); namedParams.copyParametersFrom( namedParams2 ); assertEquals( "value", namedParams.getParameterValue( "key" ) ); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthNewBlockFilter() throws Exception { web3j.ethNewBlockFilter().send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_newBlockFilter\"," + "\"params\":[],\"id\":1}"); }
public static RetryRegistry of(Configuration configuration, CompositeCustomizer<RetryConfigCustomizer> customizer) { CommonRetryConfigurationProperties retryConfiguration = CommonsConfigurationRetryConfiguration.of(configuration); Map<String, RetryConfig> retryConfigMap = retryConfiguration.getInstances() .entrySet().stream() .collect(Collectors.toMap( Map.Entry::getKey, entry -> retryConfiguration.createRetryConfig(entry.getValue(), customizer, entry.getKey()))); return RetryRegistry.of(retryConfigMap); }
@Test public void testRetryRegistryFromYamlFile() throws ConfigurationException { Configuration config = CommonsConfigurationUtil.getConfiguration(YAMLConfiguration.class, TestConstants.RESILIENCE_CONFIG_YAML_FILE_NAME); RetryRegistry registry = CommonsConfigurationRetryRegistry.of(config, new CompositeCustomizer<>(List.of())); Assertions.assertThat(registry.retry(TestConstants.BACKEND_A).getName()).isEqualTo(TestConstants.BACKEND_A); Assertions.assertThat(registry.retry(TestConstants.BACKEND_B).getName()).isEqualTo(TestConstants.BACKEND_B); }