focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
static String generateClassName(final OpenAPI document) { final Info info = document.getInfo(); if (info == null) { return DEFAULT_CLASS_NAME; } final String title = info.getTitle(); if (title == null) { return DEFAULT_CLASS_NAME; } final String className = title.chars().filter(Character::isJavaIdentifierPart).filter(c -> c < 'z').boxed() .collect(Collector.of(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append, StringBuilder::toString)); if (className.isEmpty() || !Character.isJavaIdentifierStart(className.charAt(0))) { return DEFAULT_CLASS_NAME; } return className; }
@Test public void shouldGenerateClassNameFromTitle() { final OpenAPI openapi = new OpenAPI(); final Info info = new Info(); info.setTitle("Example API"); openapi.setInfo(info); assertThat(RestDslSourceCodeGenerator.generateClassName(openapi)).isEqualTo("ExampleAPI"); }
public static BigDecimal roundHalfEven(Number number, int scale) { return roundHalfEven(toBigDecimal(number), scale); }
@Test public void roundHalfEvenTest() { String roundStr = NumberUtil.roundHalfEven(4.245, 2).toString(); assertEquals(roundStr, "4.24"); roundStr = NumberUtil.roundHalfEven(4.2450, 2).toString(); assertEquals(roundStr, "4.24"); roundStr = NumberUtil.roundHalfEven(4.2451, 2).toString(); assertEquals(roundStr, "4.25"); roundStr = NumberUtil.roundHalfEven(4.2250, 2).toString(); assertEquals(roundStr, "4.22"); roundStr = NumberUtil.roundHalfEven(1.2050, 2).toString(); assertEquals(roundStr, "1.20"); roundStr = NumberUtil.roundHalfEven(1.2150, 2).toString(); assertEquals(roundStr, "1.22"); roundStr = NumberUtil.roundHalfEven(1.2250, 2).toString(); assertEquals(roundStr, "1.22"); roundStr = NumberUtil.roundHalfEven(1.2350, 2).toString(); assertEquals(roundStr, "1.24"); roundStr = NumberUtil.roundHalfEven(1.2450, 2).toString(); assertEquals(roundStr, "1.24"); roundStr = NumberUtil.roundHalfEven(1.2550, 2).toString(); assertEquals(roundStr, "1.26"); roundStr = NumberUtil.roundHalfEven(1.2650, 2).toString(); assertEquals(roundStr, "1.26"); roundStr = NumberUtil.roundHalfEven(1.2750, 2).toString(); assertEquals(roundStr, "1.28"); roundStr = NumberUtil.roundHalfEven(1.2850, 2).toString(); assertEquals(roundStr, "1.28"); roundStr = NumberUtil.roundHalfEven(1.2950, 2).toString(); assertEquals(roundStr, "1.30"); }
public boolean matches(String text) { if (text == null) { return false; } if (this.regex) { final Pattern rx; if (this.caseSensitive) { rx = Pattern.compile(this.value); } else { rx = Pattern.compile(this.value, Pattern.CASE_INSENSITIVE); } return rx.matcher(text).matches(); } else { if (this.caseSensitive) { return value.equals(text); } else { return value.equalsIgnoreCase(text); } } }
@Test public void testMatches() { String text = "Simple"; PropertyType instance = new PropertyType(); instance.setValue("simple"); assertTrue(instance.matches(text)); instance.setCaseSensitive(true); assertFalse(instance.matches(text)); instance.setValue("s.*le"); instance.setRegex(true); assertFalse(instance.matches(text)); instance.setCaseSensitive(false); assertTrue(instance.matches(text)); }
public final synchronized List<E> getAllAddOns() { Logger.d(mTag, "getAllAddOns has %d add on for %s", mAddOns.size(), getClass().getName()); if (mAddOns.size() == 0) { loadAddOns(); } Logger.d( mTag, "getAllAddOns will return %d add on for %s", mAddOns.size(), getClass().getName()); return unmodifiableList(mAddOns); }
@Test(expected = UnsupportedOperationException.class) public void testGetAllAddOnsReturnsUnmodifiableList() throws Exception { TestableAddOnsFactory factory = new TestableAddOnsFactory(true); List<TestAddOn> list = factory.getAllAddOns(); list.remove(0); }
public static void writeFixedLengthBytesFromStart(byte[] data, int length, ByteArrayOutputStream out) { writeFixedLengthBytes(data, 0, length, out); }
@Test public void testWriteFixedLengthBytesFromStart() { ByteHelper.writeFixedLengthBytesFromStart(null, 0, null); }
@Override public boolean contains(Object o) { for (M member : members) { if (selector.select(member) && o.equals(member)) { return true; } } return false; }
@Test public void testDoesNotContainThisMemberWhenDataMembersSelected() { Collection<MemberImpl> collection = new MemberSelectingCollection<>(members, DATA_MEMBER_SELECTOR); assertFalse(collection.contains(thisMember)); }
static int internalEncodeLogHeader( final MutableDirectBuffer encodingBuffer, final int offset, final int captureLength, final int length, final NanoClock nanoClock) { if (captureLength < 0 || captureLength > length || captureLength > MAX_CAPTURE_LENGTH) { throw new IllegalArgumentException("invalid input: captureLength=" + captureLength + ", length=" + length); } int encodedLength = 0; /* * Stream of values: * - capture buffer length (int) * - total buffer length (int) * - timestamp (long) * - buffer (until end) */ encodingBuffer.putInt(offset + encodedLength, captureLength, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; encodingBuffer.putInt(offset + encodedLength, length, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; encodingBuffer.putLong(offset + encodedLength, nanoClock.nanoTime(), LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; return encodedLength; }
@Test void encodeLogHeaderThrowsIllegalArgumentExceptionIfCaptureLengthIsNegative() { assertThrows(IllegalArgumentException.class, () -> internalEncodeLogHeader(buffer, 0, -1, Integer.MAX_VALUE, () -> 0)); }
@Override public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) { if (!joinKey.isForeignKey()) { ensureMatchingPartitionCounts(buildContext.getServiceContext().getTopicClient()); } final JoinerFactory joinerFactory = new JoinerFactory( buildContext, this, buildContext.buildNodeContext(getId().toString())); return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join(); }
@Test public void shouldPerformStreamToStreamRightJoin() { // Given: setupStream(left, leftSchemaKStream); setupStream(right, rightSchemaKStream); final JoinNode joinNode = new JoinNode(nodeId, RIGHT, joinKey, true, left, right, WITHIN_EXPRESSION, "KAFKA"); // When: joinNode.buildStream(planBuildContext); // Then: verify(leftSchemaKStream).rightJoin( rightSchemaKStream, SYNTH_KEY, WITHIN_EXPRESSION.get(), VALUE_FORMAT.getFormatInfo(), OTHER_FORMAT.getFormatInfo(), CONTEXT_STACKER ); }
void setFieldInSObject( SObject sobjPass, XmlObject element ) { Iterator<XmlObject> children = element.getChildren(); String name = element.getName().getLocalPart(); if ( !children.hasNext() ) { sobjPass.setSObjectField( name, element.getValue() ); } else { SObject child = new SObject(); child.setName( new QName( name ) ); while ( children.hasNext() ) { setFieldInSObject( child, children.next() ); } sobjPass.setSObjectField( name, child ); } }
@Test public void testSetFieldInSObjectForeignKey() throws Exception { SalesforceUpsert salesforceUpsert = new SalesforceUpsert( smh.stepMeta, smh.stepDataInterface, 0, smh.transMeta, smh.trans ); SObject sobjPass = new SObject(); XmlObject parentObject = new XmlObject(); String parentParam = "parentParam"; String parentValue = "parentValue"; parentObject.setName( new QName( parentParam ) ); parentObject.setValue( parentValue ); String child = "child"; String childParam = "childParam"; String childValue = "childValue"; XmlObject childObject = new XmlObject(); childObject.setName( new QName( child ) ); childObject.setField( childParam, childValue ); salesforceUpsert.setFieldInSObject( sobjPass, parentObject ); salesforceUpsert.setFieldInSObject( sobjPass, childObject ); Assert.assertEquals( parentValue, sobjPass.getField( parentParam ) ); Assert.assertEquals( childValue, ( (SObject) sobjPass.getField( child ) ).getField( childParam ) ); }
@Override public Integer doCall() throws Exception { JsonObject pluginConfig = loadConfig(); JsonObject plugins = pluginConfig.getMap("plugins"); Optional<PluginType> camelPlugin = PluginType.findByName(name); if (camelPlugin.isPresent()) { if (command == null) { command = camelPlugin.get().getCommand(); } if (description == null) { description = camelPlugin.get().getDescription(); } if (firstVersion == null) { firstVersion = camelPlugin.get().getFirstVersion(); } } if (command == null) { // use plugin name as command command = name; } if (firstVersion == null) { // fallback to version specified firstVersion = version; } JsonObject plugin = new JsonObject(); plugin.put("name", name); plugin.put("command", command); if (firstVersion != null) { plugin.put("firstVersion", firstVersion); } plugin.put("description", description != null ? description : "Plugin %s called with command %s".formatted(name, command)); if (gav == null && (groupId != null && artifactId != null)) { if (version == null) { CamelCatalog catalog = new DefaultCamelCatalog(); version = catalog.getCatalogVersion(); } gav = "%s:%s:%s".formatted(groupId, artifactId, version); } if (gav != null) { plugin.put("dependency", gav); } plugins.put(name, plugin); saveConfig(pluginConfig); return 0; }
@Test public void shouldUseArtifactIdAndVersion() throws Exception { PluginAdd command = new PluginAdd(new CamelJBangMain().withPrinter(printer)); command.name = "foo-plugin"; command.command = "foo"; command.groupId = "org.foo"; command.artifactId = "foo-bar"; command.version = "1.0.0"; command.doCall(); Assertions.assertEquals("", printer.getOutput()); Assertions.assertEquals("{\"plugins\":{\"foo-plugin\":{\"name\":\"foo-plugin\",\"command\":\"foo\"," + "\"firstVersion\":\"1.0.0\",\"description\":\"Plugin foo-plugin called with command foo\"" + ",\"dependency\":\"org.foo:foo-bar:1.0.0\"}}}", PluginHelper.getOrCreatePluginConfig().toJson()); }
public static ConfigInfos generateResult(String connType, Map<String, ConfigKey> configKeys, List<ConfigValue> configValues, List<String> groups) { int errorCount = 0; List<ConfigInfo> configInfoList = new LinkedList<>(); Map<String, ConfigValue> configValueMap = new HashMap<>(); for (ConfigValue configValue: configValues) { String configName = configValue.name(); configValueMap.put(configName, configValue); if (!configKeys.containsKey(configName)) { configInfoList.add(new ConfigInfo(null, convertConfigValue(configValue, null))); errorCount += configValue.errorMessages().size(); } } for (Map.Entry<String, ConfigKey> entry : configKeys.entrySet()) { String configName = entry.getKey(); ConfigKeyInfo configKeyInfo = convertConfigKey(entry.getValue()); Type type = entry.getValue().type; ConfigValueInfo configValueInfo = null; if (configValueMap.containsKey(configName)) { ConfigValue configValue = configValueMap.get(configName); configValueInfo = convertConfigValue(configValue, type); errorCount += configValue.errorMessages().size(); } configInfoList.add(new ConfigInfo(configKeyInfo, configValueInfo)); } return new ConfigInfos(connType, errorCount, groups, configInfoList); }
@Test public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithNoErrors() { String name = "com.acme.connector.MyConnector"; Map<String, ConfigDef.ConfigKey> keys = new HashMap<>(); addConfigKey(keys, "config.a1", null); addConfigKey(keys, "config.b1", "group B"); addConfigKey(keys, "config.b2", "group B"); addConfigKey(keys, "config.c1", "group C"); List<String> groups = Arrays.asList("groupB", "group C"); List<ConfigValue> values = new ArrayList<>(); addValue(values, "config.a1", "value.a1"); addValue(values, "config.b1", "value.b1"); addValue(values, "config.b2", "value.b2"); addValue(values, "config.c1", "value.c1"); ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); assertEquals(values.size(), infos.values().size()); assertEquals(0, infos.errorCount()); assertInfoKey(infos, "config.a1", null); assertInfoKey(infos, "config.b1", "group B"); assertInfoKey(infos, "config.b2", "group B"); assertInfoKey(infos, "config.c1", "group C"); assertInfoValue(infos, "config.a1", "value.a1"); assertInfoValue(infos, "config.b1", "value.b1"); assertInfoValue(infos, "config.b2", "value.b2"); assertInfoValue(infos, "config.c1", "value.c1"); }
public static void trimRecordTemplate(RecordTemplate recordTemplate, MaskTree override, final boolean failOnMismatch) { trimRecordTemplate(recordTemplate.data(), recordTemplate.schema(), override, failOnMismatch); }
@Test public void testRecordRefArrayTrim() throws CloneNotSupportedException { TyperefTest test = new TyperefTest(); RecordBarArrayArray recordBarArrayArray = new RecordBarArrayArray(); RecordBarArray recordBarArray = new RecordBarArray(); RecordBar recordBar = new RecordBar(); recordBar.setLocation("mountain view"); recordBarArray.add(recordBar); RecordBar recordBar2 = new RecordBar(); recordBar2.setLocation("palo alto"); recordBarArray.add(recordBar2); recordBarArrayArray.add(recordBarArray); test.setRecordRefArray(recordBarArrayArray); // Generate expected copy. TyperefTest expected = test.copy(); // Introduce bad elements. test.getRecordRefArray().get(0).get(0).data().put("evil", "bar"); test.getRecordRefArray().get(0).get(0).data().put("evil2", "bar"); test.getRecordRefArray().get(0).get(1).data().put("evil", "foo"); test.getRecordRefArray().get(0).get(1).data().put("evil2", "foo"); Assert.assertEquals(test.getRecordRefArray().get(0).get(0).data().size(), 3); Assert.assertEquals(test.getRecordRefArray().get(0).get(1).data().size(), 3); RestUtils.trimRecordTemplate(test, false); Assert.assertEquals(test, expected); }
public ShareFetchContext newContext(String groupId, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData, List<TopicIdPartition> toForget, ShareRequestMetadata reqMetadata, Boolean isAcknowledgeDataPresent) { ShareFetchContext context; // TopicPartition with maxBytes as 0 should not be added in the cachedPartitions Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchDataWithMaxBytes = new HashMap<>(); shareFetchData.forEach((tp, sharePartitionData) -> { if (sharePartitionData.maxBytes > 0) shareFetchDataWithMaxBytes.put(tp, sharePartitionData); }); // If the request's epoch is FINAL_EPOCH or INITIAL_EPOCH, we should remove the existing sessions. Also, start a // new session in case it is INITIAL_EPOCH. Hence, we need to treat them as special cases. if (reqMetadata.isFull()) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); if (reqMetadata.epoch() == ShareRequestMetadata.FINAL_EPOCH) { // If the epoch is FINAL_EPOCH, don't try to create a new session. if (!shareFetchDataWithMaxBytes.isEmpty()) { throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } else { log.debug("Removed share session with key " + key); } context = new FinalContext(); } else { if (isAcknowledgeDataPresent) { log.error("Acknowledge data present in Initial Fetch Request for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) != null) { log.debug("Removed share session with key {}", key); } ImplicitLinkedHashCollection<CachedSharePartition> cachedSharePartitions = new ImplicitLinkedHashCollection<>(shareFetchDataWithMaxBytes.size()); shareFetchDataWithMaxBytes.forEach((topicIdPartition, reqData) -> cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, reqData, false))); ShareSessionKey responseShareSessionKey = cache.maybeCreateSession(groupId, reqMetadata.memberId(), time.milliseconds(), cachedSharePartitions); if (responseShareSessionKey == null) { log.error("Could not create a share session for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } context = new ShareSessionContext(reqMetadata, shareFetchDataWithMaxBytes); log.debug("Created a new ShareSessionContext with key {} isSubsequent {} returning {}. A new share " + "session will be started.", responseShareSessionKey, false, partitionsToLogString(shareFetchDataWithMaxBytes.keySet())); } } else { // We update the already existing share session. synchronized (cache) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); ShareSession shareSession = cache.get(key); if (shareSession == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } if (shareSession.epoch != reqMetadata.epoch()) { log.debug("Share session error for {}: expected epoch {}, but got {} instead", key, shareSession.epoch, reqMetadata.epoch()); throw Errors.INVALID_SHARE_SESSION_EPOCH.exception(); } Map<ShareSession.ModifiedTopicIdPartitionType, List<TopicIdPartition>> modifiedTopicIdPartitions = shareSession.update( shareFetchDataWithMaxBytes, toForget); cache.touch(shareSession, time.milliseconds()); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); log.debug("Created a new ShareSessionContext for session key {}, epoch {}: " + "added {}, updated {}, removed {}", shareSession.key(), shareSession.epoch, partitionsToLogString(modifiedTopicIdPartitions.get( ShareSession.ModifiedTopicIdPartitionType.ADDED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.UPDATED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.REMOVED)) ); context = new ShareSessionContext(reqMetadata, shareSession); } } return context; }
@Test public void testNewContextReturnsFinalContextError() { Time time = new MockTime(); ShareSessionCache cache = new ShareSessionCache(10, 1000); SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache).withTime(time).build(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); String groupId = "grp"; Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> reqData1 = new LinkedHashMap<>(); reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), PARTITION_MAX_BYTES)); reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), PARTITION_MAX_BYTES)); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); assertEquals(ShareSessionContext.class, context1.getClass()); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); // shareFetchData is not empty and the maxBytes of topic partition is not 0, which means this is trying to fetch on a Final request. // New context should throw an error Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> reqData3 = Collections.singletonMap(new TopicIdPartition(tpId1, new TopicPartition("foo", 0)), new ShareFetchRequest.SharePartitionData(tpId1, PARTITION_MAX_BYTES)); assertThrows(InvalidRequestException.class, () -> sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true)); }
@Override public void includePattern(String[] regexp) { if (regexp != null && regexp.length > 0) { INCPTRN = regexp; this.PTRNFILTER = true; // now we create the compiled pattern and // add it to the arraylist for (String includePattern : INCPTRN) { this.INCPATTERNS.add(this.createPattern(includePattern)); this.includePatternStrings.add(includePattern); } } }
@Test public void testIncludePattern() { testf.includePattern(PATTERNS); for (TestData td : TESTDATA) { String theFile = td.file; boolean expect = td.inclpatt; assertPrimitiveEquals(!expect, testf.isFiltered(theFile, null)); String line = testf.filter(theFile); if (line != null) { assertTrue(expect, "Expect to accept " + theFile); } else { assertFalse(expect, "Expect to reject " + theFile); } } }
public static SslContextFactory.Server createSslContextFactory(String sslProviderString, PulsarSslFactory pulsarSslFactory, boolean requireTrustedClientCertOnConnect, Set<String> ciphers, Set<String> protocols) { return new JettySslContextFactory.Server(sslProviderString, pulsarSslFactory, requireTrustedClientCertOnConnect, ciphers, protocols); }
@Test(expectedExceptions = SSLHandshakeException.class) public void testJettyTlsServerInvalidTlsProtocol() throws Exception { @Cleanup("stop") Server server = new Server(); List<ServerConnector> connectors = new ArrayList<>(); PulsarSslConfiguration sslConfiguration = PulsarSslConfiguration.builder() .tlsProtocols(new HashSet<String>() { { this.add("TLSv1.3"); } }) .tlsTrustCertsFilePath(Resources.getResource("ssl/my-ca/ca.pem").getPath()) .tlsCertificateFilePath(Resources.getResource("ssl/my-ca/server-ca.pem").getPath()) .tlsKeyFilePath(Resources.getResource("ssl/my-ca/server-key.pem").getPath()) .allowInsecureConnection(false) .requireTrustedClientCertOnConnect(true) .tlsEnabledWithKeystore(false) .isHttps(true) .build(); PulsarSslFactory sslFactory = new DefaultPulsarSslFactory(); sslFactory.initialize(sslConfiguration); sslFactory.createInternalSslContext(); SslContextFactory factory = JettySslContextFactory.createSslContextFactory(null, sslFactory, true, null, new HashSet<String>() { { this.add("TLSv1.3"); } }); factory.setHostnameVerifier((s, sslSession) -> true); ServerConnector connector = new ServerConnector(server, factory); connector.setPort(0); connectors.add(connector); server.setConnectors(connectors.toArray(new ServerConnector[0])); server.start(); // client connect HttpClientBuilder httpClientBuilder = HttpClients.custom(); RegistryBuilder<ConnectionSocketFactory> registryBuilder = RegistryBuilder.create(); registryBuilder.register("https", new SSLConnectionSocketFactory(getClientSslContext(), new String[]{"TLSv1.2"}, null, new NoopHostnameVerifier())); PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(registryBuilder.build()); httpClientBuilder.setConnectionManager(cm); @Cleanup CloseableHttpClient httpClient = httpClientBuilder.build(); HttpGet httpGet = new HttpGet("https://localhost:" + connector.getLocalPort()); httpClient.execute(httpGet); }
@Override public Map<K, V> loadAll(Collection<K> keys) { long startNanos = Timer.nanos(); try { return delegate.loadAll(keys); } finally { loadAllProbe.recordValue(Timer.nanosElapsed(startNanos)); } }
@Test public void loadAll() { Collection<String> keys = asList("key1", "key2"); Map<String, String> values = new HashMap<>(); values.put("key1", "value1"); values.put("key2", "value2"); when(delegate.loadAll(keys)).thenReturn(values); Map<String, String> result = cacheLoader.loadAll(keys); assertSame(values, result); assertProbeCalledOnce("loadAll"); }
@JsonProperty public URI getUri() { return uri; }
@Test public void testQueryWithTableNameNeedingURLEncodeInSplits() throws URISyntaxException { Instant now = LocalDateTime.of(2019, 10, 2, 7, 26, 56, 0).toInstant(UTC); PrometheusConnectorConfig config = getCommonConfig(prometheusHttpServer.resolve("/prometheus-data/prom-metrics-non-standard-name.json")); PrometheusClient client = new PrometheusClient(config, METRIC_CODEC, TYPE_MANAGER); PrometheusTable table = client.getTable("default", "up+now"); PrometheusTableHandle tableHandle = new PrometheusTableHandle("default", table.getName()); TupleDomain<ColumnHandle> columnConstraints = TupleDomain.withColumnDomains( ImmutableMap.of( new PrometheusColumnHandle("value", BIGINT, 1), Domain.all(VARCHAR), new PrometheusColumnHandle("text", createUnboundedVarcharType(), 0), Domain.all(VARCHAR))); PrometheusTableLayoutHandle tableLayoutHandle = new PrometheusTableLayoutHandle(tableHandle, columnConstraints); PrometheusSplitManager splitManager = new PrometheusSplitManager(client, fixedClockAt(now), config); ConnectorSplitSource splits = splitManager.getSplits( null, null, tableLayoutHandle, null); PrometheusSplit split = (PrometheusSplit) splits.getNextBatch(NOT_PARTITIONED, 1).getNow(null).getSplits().get(0); String queryInSplit = split.getUri().getQuery(); String timeShouldBe = decimalSecondString(now.toEpochMilli() - config.getMaxQueryRangeDuration().toMillis() + config.getQueryChunkSizeDuration().toMillis() - OFFSET_MILLIS * 20); assertEquals(queryInSplit, new URI("http://doesnotmatter.example:9090/api/v1/query?query=up+now[" + getQueryChunkSizeDurationAsPrometheusCompatibleDurationString(config) + "]" + "&time=" + timeShouldBe).getQuery()); }
public void retain(IndexSet indexSet, IndexLifetimeConfig config, RetentionExecutor.RetentionAction action, String actionName) { final Map<String, Set<String>> deflectorIndices = indexSet.getAllIndexAliases(); // Account for DST and time zones in determining age final DateTime now = clock.nowUTC(); final long cutoffSoft = now.minus(config.indexLifetimeMin()).getMillis(); final long cutoffHard = now.minus(config.indexLifetimeMax()).getMillis(); final int removeCount = (int) deflectorIndices.keySet() .stream() .filter(indexName -> !indices.isReopened(indexName)) .filter(indexName -> !hasCurrentWriteAlias(indexSet, deflectorIndices, indexName)) .filter(indexName -> exceedsAgeLimit(indexName, cutoffSoft, cutoffHard)) .count(); if (LOG.isDebugEnabled()) { var debug = deflectorIndices.keySet().stream() .collect(Collectors.toMap(k -> k, k -> Map.of( "isReopened", indices.isReopened(k), "hasCurrentWriteAlias", hasCurrentWriteAlias(indexSet, deflectorIndices, k), "exceedsAgeLimit", exceedsAgeLimit(k, cutoffSoft, cutoffHard), "closingDate", indices.indexClosingDate(k), "creationDate", indices.indexCreationDate(k) ))); Joiner.MapJoiner mapJoiner = Joiner.on("\n").withKeyValueSeparator("="); LOG.debug("Debug info retain for indexSet <{}>: (min {}, max {}) removeCount: {} details: <{}>", indexSet.getIndexPrefix(), config.indexLifetimeMin(), config.indexLifetimeMax(), removeCount, mapJoiner.join(debug)); } if (removeCount > 0) { final String msg = "Running retention for " + removeCount + " aged-out indices."; LOG.info(msg); activityWriter.write(new Activity(msg, TimeBasedRetentionExecutor.class)); retentionExecutor.runRetention(indexSet, removeCount, action, actionName); } }
@Test public void retainTimeBasedNothing() { underTest.retain(indexSet, getIndexLifetimeConfig(20, 30), action, "action"); verify(action, times(0)).retain(any(), any()); }
public AdditionalServletWithClassLoader load( AdditionalServletMetadata metadata, String narExtractionDirectory) throws IOException { final File narFile = metadata.getArchivePath().toAbsolutePath().toFile(); NarClassLoader ncl = NarClassLoaderBuilder.builder() .narFile(narFile) .parentClassLoader(AdditionalServlet.class.getClassLoader()) .extractionDirectory(narExtractionDirectory) .build(); AdditionalServletDefinition def = getAdditionalServletDefinition(ncl); if (StringUtils.isBlank(def.getAdditionalServletClass())) { throw new IOException("Additional servlets `" + def.getName() + "` does NOT provide an " + "additional servlets implementation"); } try { Class additionalServletClass = ncl.loadClass(def.getAdditionalServletClass()); Object additionalServlet = additionalServletClass.getDeclaredConstructor().newInstance(); if (!(additionalServlet instanceof AdditionalServlet)) { throw new IOException("Class " + def.getAdditionalServletClass() + " does not implement additional servlet interface"); } AdditionalServlet servlet = (AdditionalServlet) additionalServlet; return new AdditionalServletWithClassLoader(servlet, ncl); } catch (Throwable t) { rethrowIOException(t); return null; } }
@Test(expectedExceptions = IOException.class) public void testLoadEventListenerWithBlankListenerClass() throws Exception { AdditionalServletDefinition def = new AdditionalServletDefinition(); def.setDescription("test-proxy-listener"); String archivePath = "/path/to/proxy/listener/nar"; AdditionalServletMetadata metadata = new AdditionalServletMetadata(); metadata.setDefinition(def); metadata.setArchivePath(Paths.get(archivePath)); NarClassLoader mockLoader = mock(NarClassLoader.class); when(mockLoader.getServiceDefinition(eq(AdditionalServletUtils.ADDITIONAL_SERVLET_FILE))) .thenReturn(ObjectMapperFactory.getYamlMapper().writer().writeValueAsString(def)); Class listenerClass = MockAdditionalServlet.class; when(mockLoader.loadClass(eq(MockAdditionalServlet.class.getName()))) .thenReturn(listenerClass); final NarClassLoaderBuilder mockedBuilder = mock(NarClassLoaderBuilder.class, RETURNS_SELF); when(mockedBuilder.build()).thenReturn(mockLoader); try (MockedStatic<NarClassLoaderBuilder> builder = Mockito.mockStatic(NarClassLoaderBuilder.class)) { builder.when(() -> NarClassLoaderBuilder.builder()).thenReturn(mockedBuilder); AdditionalServletUtils.load(metadata, ""); } }
@Override public Integer clusterGetSlotForKey(byte[] key) { RFuture<Integer> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.KEYSLOT, key); return syncFuture(f); }
@Test public void testClusterGetSlotForKey() { Integer slot = connection.clusterGetSlotForKey("123".getBytes()); assertThat(slot).isNotNull(); }
@Override public void onMsg(TbContext ctx, TbMsg msg) throws ExecutionException, InterruptedException, TbNodeException { TbMsgMetaData metaDataCopy = msg.getMetaData().copy(); String data = msg.getData(); boolean msgChanged = false; switch (renameIn) { case METADATA: Map<String, String> metaDataMap = metaDataCopy.getData(); for (Map.Entry<String, String> entry : renameKeysMapping.entrySet()) { String currentKeyName = entry.getKey(); String newKeyName = entry.getValue(); if (metaDataMap.containsKey(currentKeyName)) { msgChanged = true; String value = metaDataMap.get(currentKeyName); metaDataMap.put(newKeyName, value); metaDataMap.remove(currentKeyName); } } metaDataCopy = new TbMsgMetaData(metaDataMap); break; case DATA: JsonNode dataNode = JacksonUtil.toJsonNode(data); if (dataNode.isObject()) { ObjectNode msgData = (ObjectNode) dataNode; for (Map.Entry<String, String> entry : renameKeysMapping.entrySet()) { String currentKeyName = entry.getKey(); String newKeyName = entry.getValue(); if (msgData.has(currentKeyName)) { msgChanged = true; JsonNode value = msgData.get(currentKeyName); msgData.set(newKeyName, value); msgData.remove(currentKeyName); } } data = JacksonUtil.toString(msgData); } break; default: log.debug("Unexpected RenameIn value: {}. Allowed values: {}", renameIn, TbMsgSource.values()); } ctx.tellSuccess(msgChanged ? TbMsg.transformMsg(msg, metaDataCopy, data) : msg); }
@Test void givenMsgDataNotJSONObject_whenOnMsg_thenVerifyOutput() throws Exception { TbMsg msg = getTbMsg(deviceId, TbMsg.EMPTY_JSON_ARRAY); node.onMsg(ctx, msg); ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx, times(1)).tellSuccess(newMsgCaptor.capture()); verify(ctx, never()).tellFailure(any(), any()); TbMsg newMsg = newMsgCaptor.getValue(); assertThat(newMsg).isNotNull(); assertThat(newMsg).isSameAs(msg); }
static Set<Dependency> tryCreateFromAccess(JavaAccess<?> access) { JavaClass originOwner = access.getOriginOwner(); JavaClass targetOwner = access.getTargetOwner(); ImmutableSet.Builder<Dependency> dependencies = ImmutableSet.<Dependency>builder() .addAll(createComponentTypeDependencies(originOwner, access.getOrigin().getDescription(), targetOwner, access.getSourceCodeLocation())); if (!originOwner.equals(targetOwner) && !targetOwner.isPrimitive()) { dependencies.add(new Dependency.FromAccess(access)); } return dependencies.build(); }
@Test public void convert_dependency_from_access() { JavaMethodCall call = simulateCall().from(getClass(), "toString").to(Object.class, "toString"); Dependency dependency = getOnlyElement(Dependency.tryCreateFromAccess(call)); assertThatConversionOf(dependency) .satisfiesStandardConventions() .isPossibleToSingleElement(JavaAccess.class, it -> assertThat(it).isEqualTo(call)) .isPossibleToSingleElement(JavaMethodCall.class, it -> assertThat(it).isEqualTo(call)); }
public static Matcher<HttpRequest> methodEquals(String method) { if (method == null) throw new NullPointerException("method == null"); if (method.isEmpty()) throw new NullPointerException("method is empty"); return new MethodEquals(method); }
@Test void methodEquals_matched() { when(httpRequest.method()).thenReturn("GET"); assertThat(methodEquals("GET").matches(httpRequest)).isTrue(); }
@Override public void run() { boolean isNeedFlush = false; boolean sqlShowEnabled = ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getProps().getValue(ConfigurationPropertyKey.SQL_SHOW); try { if (sqlShowEnabled) { fillLogMDC(); } isNeedFlush = executeCommand(context, databaseProtocolFrontendEngine.getCodecEngine().createPacketPayload((ByteBuf) message, context.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get())); // CHECKSTYLE:OFF } catch (final Exception ex) { // CHECKSTYLE:ON processException(ex); // CHECKSTYLE:OFF } catch (final Error error) { // CHECKSTYLE:ON processException(new RuntimeException(error)); } finally { connectionSession.clearQueryContext(); Collection<SQLException> exceptions = Collections.emptyList(); try { connectionSession.getDatabaseConnectionManager().closeExecutionResources(); } catch (final BackendConnectionException ex) { exceptions = ex.getExceptions().stream().filter(SQLException.class::isInstance).map(SQLException.class::cast).collect(Collectors.toList()); } if (isNeedFlush) { context.flush(); } processClosedExceptions(exceptions); context.pipeline().fireUserEventTriggered(new WriteCompleteEvent()); if (sqlShowEnabled) { clearLogMDC(); } if (message instanceof CompositeByteBuf) { releaseCompositeByteBuf((CompositeByteBuf) message); } ((ByteBuf) message).release(); } }
@Test void assertRunWithOOMError() throws BackendConnectionException, SQLException { doThrow(OutOfMemoryError.class).when(commandExecutor).execute(); when(engine.getCodecEngine().createPacketPayload(message, StandardCharsets.UTF_8)).thenReturn(payload); when(engine.getCommandExecuteEngine().getCommandPacket(payload, commandPacketType, connectionSession)).thenReturn(commandPacket); when(engine.getCommandExecuteEngine().getCommandPacketType(payload)).thenReturn(commandPacketType); when(engine.getCommandExecuteEngine().getCommandExecutor(commandPacketType, commandPacket, connectionSession)).thenReturn(commandExecutor); when(engine.getCommandExecuteEngine().getErrorPacket(any(RuntimeException.class))).thenReturn(databasePacket); when(engine.getCommandExecuteEngine().getOtherPacket(connectionSession)).thenReturn(Optional.of(databasePacket)); CommandExecutorTask actual = new CommandExecutorTask(engine, connectionSession, handlerContext, message); actual.run(); verify(handlerContext, times(2)).write(databasePacket); verify(handlerContext).flush(); verify(databaseConnectionManager).closeExecutionResources(); }
@Override public ValidationResult validate(RuleBuilderStep step) { final RuleFragment ruleFragment = actions.get(step.function()); FunctionDescriptor<?> functionDescriptor = ruleFragment.descriptor(); String functionName = functionDescriptor.name(); if (functionName.equals(SetField.NAME)) { return validateSetField(step.parameters()); } return new ValidationResult(false, ""); }
@Test void validateOtherFunctionsAreSkipped() { HashMap<String, Object> parameters = new HashMap<>(); parameters.put(FIELD_PARAM, WITH_SPACES); RuleBuilderStep randomOtherFunction = RuleBuilderStep .builder() .parameters(parameters) .function(ValidVariablesTest.STRING_FUNCTION) .build(); ValidationResult result = classUnderTest.validate(randomOtherFunction); assertThat(result.failed()).isFalse(); }
public static RocksDbIndexedTimeOrderedWindowBytesStoreSupplier create(final String name, final Duration retentionPeriod, final Duration windowSize, final boolean retainDuplicates, final boolean hasIndex) { Objects.requireNonNull(name, "name cannot be null"); final String rpMsgPrefix = prepareMillisCheckFailMsgPrefix(retentionPeriod, "retentionPeriod"); final long retentionMs = validateMillisecondDuration(retentionPeriod, rpMsgPrefix); final String wsMsgPrefix = prepareMillisCheckFailMsgPrefix(windowSize, "windowSize"); final long windowSizeMs = validateMillisecondDuration(windowSize, wsMsgPrefix); final long defaultSegmentInterval = Math.max(retentionMs / 2, 60_000L); if (retentionMs < 0L) { throw new IllegalArgumentException("retentionPeriod cannot be negative"); } if (windowSizeMs < 0L) { throw new IllegalArgumentException("windowSize cannot be negative"); } if (defaultSegmentInterval < 1L) { throw new IllegalArgumentException("segmentInterval cannot be zero or negative"); } if (windowSizeMs > retentionMs) { throw new IllegalArgumentException("The retention period of the window store " + name + " must be no smaller than its window size. Got size=[" + windowSizeMs + "], retention=[" + retentionMs + "]"); } return new RocksDbIndexedTimeOrderedWindowBytesStoreSupplier(name, retentionMs, defaultSegmentInterval, windowSizeMs, retainDuplicates, hasIndex); }
@Test public void shouldThrowIfWindowSizeIsNegative() { final Exception e = assertThrows(IllegalArgumentException.class, () -> RocksDbIndexedTimeOrderedWindowBytesStoreSupplier.create("anyName", ofMillis(0L), ofMillis(-1L), false, false)); assertEquals("windowSize cannot be negative", e.getMessage()); }
@Override public int getClusterSize() { return networkRetry.run("fetchClusterSize", this::fetchClusterSize); }
@Test public void testNodeResource() { assertEquals(client.getClusterSize(), 0); }
public static <T extends Throwable> void checkNotEmpty(final String value, final Supplier<T> exceptionSupplierIfUnexpected) throws T { if (Strings.isNullOrEmpty(value)) { throw exceptionSupplierIfUnexpected.get(); } }
@Test void assertCheckNotEmptyWithStringToNotThrowException() { assertDoesNotThrow(() -> ShardingSpherePreconditions.checkNotEmpty("foo", SQLException::new)); }
public EnumSet<CreateFlag> createFlag() { String cf = ""; if (param(CreateFlagParam.NAME) != null) { QueryStringDecoder decoder = new QueryStringDecoder( param(CreateFlagParam.NAME), StandardCharsets.UTF_8); cf = decoder.path(); } return new CreateFlagParam(cf).getValue(); }
@Test public void testCreateFlag() { final String path = "/test1?createflag=append,sync_block"; Configuration conf = new Configuration(); QueryStringDecoder decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + path); ParameterParser testParser = new ParameterParser(decoder, conf); EnumSet<CreateFlag> actual = testParser.createFlag(); EnumSet<CreateFlag> expected = EnumSet.of(CreateFlag.APPEND, CreateFlag.SYNC_BLOCK); Assert.assertEquals(expected.toString(), actual.toString()); final String path1 = "/test1?createflag=append"; decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + path1); testParser = new ParameterParser(decoder, conf); actual = testParser.createFlag(); expected = EnumSet.of(CreateFlag.APPEND); Assert.assertEquals(expected, actual); final String path2 = "/test1"; decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + path2); testParser = new ParameterParser(decoder, conf); actual = testParser.createFlag(); Assert.assertEquals(0, actual.size()); final String path3 = "/test1?createflag=create,overwrite"; decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + path3); testParser = new ParameterParser(decoder, conf); actual = testParser.createFlag(); expected = EnumSet.of(CreateFlag.CREATE, CreateFlag .OVERWRITE); Assert.assertEquals(expected.toString(), actual.toString()); final String path4 = "/test1?createflag="; decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + path4); testParser = new ParameterParser(decoder, conf); actual = testParser.createFlag(); Assert.assertEquals(0, actual.size()); //Incorrect value passed to createflag try { final String path5 = "/test1?createflag=overwrite,"; decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + path5); testParser = new ParameterParser(decoder, conf); actual = testParser.createFlag(); fail("It should throw Illegal Argument Exception"); } catch (Exception e) { GenericTestUtils .assertExceptionContains("No enum constant", e); } //Incorrect value passed to createflag try { final String path6 = "/test1?createflag=,"; decoder = new QueryStringDecoder( WebHdfsHandler.WEBHDFS_PREFIX + path6); testParser = new ParameterParser(decoder, conf); actual = testParser.createFlag(); fail("It should throw Illegal Argument Exception"); } catch (Exception e) { GenericTestUtils .assertExceptionContains("No enum constant", e); } }
@Override public V pollLast(long timeout, TimeUnit unit) throws InterruptedException { return commandExecutor.getInterrupted(pollLastAsync(timeout, unit)); }
@Test public void testPollLast() throws InterruptedException { RBlockingDeque<Integer> queue1 = redisson.getPriorityBlockingDeque("queue1"); queue1.add(3); queue1.add(1); queue1.add(2); assertThat(queue1.pollLast(2, TimeUnit.SECONDS)).isEqualTo(3); assertThat(queue1.pollLast(2, TimeUnit.SECONDS)).isEqualTo(2); assertThat(queue1.pollLast(2, TimeUnit.SECONDS)).isEqualTo(1); long s = System.currentTimeMillis(); assertThat(queue1.pollLast(5, TimeUnit.SECONDS)).isNull(); assertThat(System.currentTimeMillis() - s).isGreaterThanOrEqualTo(5000); }
public KafkaMetadataState computeNextMetadataState(KafkaStatus kafkaStatus) { KafkaMetadataState currentState = metadataState; metadataState = switch (currentState) { case KRaft -> onKRaft(kafkaStatus); case ZooKeeper -> onZooKeeper(kafkaStatus); case KRaftMigration -> onKRaftMigration(kafkaStatus); case KRaftDualWriting -> onKRaftDualWriting(kafkaStatus); case KRaftPostMigration -> onKRaftPostMigration(kafkaStatus); case PreKRaft -> onPreKRaft(kafkaStatus); }; if (metadataState != currentState) { LOGGER.infoCr(reconciliation, "Transitioning metadata state from [{}] to [{}] with strimzi.io/kraft annotation [{}]", currentState, metadataState, kraftAnno); } else { LOGGER.debugCr(reconciliation, "Metadata state [{}] with strimzi.io/kraft annotation [{}]", metadataState, kraftAnno); } return metadataState; }
@Test public void testFromZookeeperToKRaftMigration() { // test with no metadata state set Kafka kafka = new KafkaBuilder(KAFKA) .editMetadata() .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "migration") .endMetadata() .build(); KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), KRaftMigration); // test with ZooKeeper metadata state set kafka = new KafkaBuilder(KAFKA) .editMetadata() .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "migration") .endMetadata() .withNewStatus() .withKafkaMetadataState(ZooKeeper) .endStatus() .build(); kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), KRaftMigration); }
public boolean isAtLeast(final KsqlVersion version) { if (versionType == version.versionType) { return isAtLeastVersion(this, version.majorVersion, version.minorVersion); } if (versionType == VersionType.KSQLDB_STANDALONE) { final KsqlVersion otherStandalone = versionMapping.get(version); return otherStandalone != null && isAtLeastVersion( this, otherStandalone.majorVersion, otherStandalone.minorVersion ); } final KsqlVersion standalone = versionMapping.get(this); return standalone != null && isAtLeastVersion(standalone, version.majorVersion, version.minorVersion); }
@Test public void shouldCompareCpVersionToStandaloneVersionIsAtLeast() { // known mappings assertThat(new KsqlVersion("6.0.").isAtLeast(new KsqlVersion("0.10.")), is(true)); assertThat(new KsqlVersion("0.10.").isAtLeast(new KsqlVersion("6.0.")), is(true)); assertThat(new KsqlVersion("6.1.").isAtLeast(new KsqlVersion("0.10.")), is(true)); assertThat(new KsqlVersion("6.1.").isAtLeast(new KsqlVersion("0.14.")), is(true)); assertThat(new KsqlVersion("0.14.").isAtLeast(new KsqlVersion("6.0.")), is(true)); assertThat(new KsqlVersion("0.14.").isAtLeast(new KsqlVersion("6.1.")), is(true)); assertThat(new KsqlVersion("6.0.").isAtLeast(new KsqlVersion("0.14.")), is(false)); assertThat(new KsqlVersion("0.10.").isAtLeast(new KsqlVersion("6.1.")), is(false)); // unknown mappings assertThat(new KsqlVersion("6.2.").isAtLeast(new KsqlVersion("0.10.")), is(false)); assertThat(new KsqlVersion("0.10.").isAtLeast(new KsqlVersion("6.2.")), is(false)); }
public static String formatTimer(long timerInSeconds) { final long min = TimeUnit.SECONDS.toMinutes(timerInSeconds); final long sec = TimeUnit.SECONDS.toSeconds(timerInSeconds - TimeUnit.MINUTES.toSeconds(min)); return String.format("%02d:%02d", min, sec); }
@Test public void testFormatTimer() { assertEquals("10:00", formatTimer(600)); assertEquals("00:00", formatTimer(0)); assertEquals("00:45", formatTimer(45)); assertEquals("02:45", formatTimer(165)); assertEquals("30:33", formatTimer(1833)); }
@Override public void validateTransientQuery( final SessionConfig config, final ExecutionPlan executionPlan, final Collection<QueryMetadata> runningQueries ) { validateCacheBytesUsage( runningQueries.stream() .filter(q -> q instanceof TransientQueryMetadata) .collect(Collectors.toList()), config, config.getConfig(false) .getLong(KsqlConfig.KSQL_TOTAL_CACHE_MAX_BYTES_BUFFERING_TRANSIENT) ); }
@Test public void shouldIgnoreBufferCacheLimitIfNotSetForTransientQueries() { // Given: final SessionConfig config = configWithLimits(100000000000L, OptionalLong.empty()); // When/Then (no throw) queryValidator.validateTransientQuery(config, plan, queries); }
@Override public ReservationSubmissionResponse submitReservation( ReservationSubmissionRequest request) throws YarnException, IOException { if (request == null || request.getReservationId() == null || request.getReservationDefinition() == null || request.getQueue() == null) { routerMetrics.incrSubmitReservationFailedRetrieved(); String msg = "Missing submitReservation request or reservationId " + "or reservation definition or queue."; RouterAuditLogger.logFailure(user.getShortUserName(), SUBMIT_RESERVATION, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, null); } long startTime = clock.getTime(); ReservationId reservationId = request.getReservationId(); for (int i = 0; i < numSubmitRetries; i++) { try { // First, Get SubClusterId according to specific strategy. SubClusterId subClusterId = policyFacade.getReservationHomeSubCluster(request); LOG.info("submitReservation ReservationId {} try #{} on SubCluster {}.", reservationId, i, subClusterId); ReservationHomeSubCluster reservationHomeSubCluster = ReservationHomeSubCluster.newInstance(reservationId, subClusterId); // Second, determine whether the current ReservationId has a corresponding subCluster. // If it does not exist, add it. If it exists, update it. Boolean exists = existsReservationHomeSubCluster(reservationId); // We may encounter the situation of repeated submission of Reservation, // at this time we should try to use the reservation that has been allocated // !exists indicates that the reservation does not exist and needs to be added // i==0, mainly to consider repeated submissions, // so the first time to apply for reservation, try to use the original reservation if (!exists || i == 0) { addReservationHomeSubCluster(reservationId, reservationHomeSubCluster); } else { updateReservationHomeSubCluster(subClusterId, reservationId, reservationHomeSubCluster); } // Third, Submit a Reservation request to the subCluster ApplicationClientProtocol clientRMProxy = getClientRMProxyForSubCluster(subClusterId); ReservationSubmissionResponse response = clientRMProxy.submitReservation(request); if (response != null) { LOG.info("Reservation {} submitted on subCluster {}.", reservationId, subClusterId); long stopTime = clock.getTime(); routerMetrics.succeededSubmitReservationRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(user.getShortUserName(), SUBMIT_RESERVATION, TARGET_CLIENT_RM_SERVICE); return response; } } catch (Exception e) { LOG.warn("Unable to submit(try #{}) the Reservation {}.", i, reservationId, e); } } routerMetrics.incrSubmitReservationFailedRetrieved(); String msg = String.format("Reservation %s failed to be submitted.", reservationId); RouterAuditLogger.logFailure(user.getShortUserName(), SUBMIT_RESERVATION, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); throw new YarnException(msg); }
@Test public void testSubmitReservation() throws Exception { LOG.info("Test FederationClientInterceptor : SubmitReservation request."); // get new reservationId GetNewReservationRequest request = GetNewReservationRequest.newInstance(); GetNewReservationResponse response = interceptor.getNewReservation(request); Assert.assertNotNull(response); // Submit Reservation ReservationId reservationId = response.getReservationId(); ReservationDefinition rDefinition = createReservationDefinition(1024, 1); ReservationSubmissionRequest rSubmissionRequest = ReservationSubmissionRequest.newInstance( rDefinition, "decided", reservationId); ReservationSubmissionResponse submissionResponse = interceptor.submitReservation(rSubmissionRequest); Assert.assertNotNull(submissionResponse); SubClusterId subClusterId = stateStoreUtil.queryReservationHomeSC(reservationId); Assert.assertNotNull(subClusterId); Assert.assertTrue(subClusters.contains(subClusterId)); }
@Override public List<SensitiveWordDO> getSensitiveWordList() { return sensitiveWordMapper.selectList(); }
@Test public void testGetSensitiveWordList() { // mock 数据 SensitiveWordDO sensitiveWord01 = randomPojo(SensitiveWordDO.class); sensitiveWordMapper.insert(sensitiveWord01); SensitiveWordDO sensitiveWord02 = randomPojo(SensitiveWordDO.class); sensitiveWordMapper.insert(sensitiveWord02); // 调用 List<SensitiveWordDO> list = sensitiveWordService.getSensitiveWordList(); // 断言 assertEquals(2, list.size()); assertEquals(sensitiveWord01, list.get(0)); assertEquals(sensitiveWord02, list.get(1)); }
@Override protected void doProcess(Exchange exchange, MetricsEndpoint endpoint, MetricRegistry registry, String metricsName) throws Exception { Message in = exchange.getIn(); MetricsTimerAction action = endpoint.getAction(); MetricsTimerAction finalAction = in.getHeader(HEADER_TIMER_ACTION, action, MetricsTimerAction.class); if (finalAction == MetricsTimerAction.start) { handleStart(exchange, registry, metricsName); } else if (finalAction == MetricsTimerAction.stop) { handleStop(exchange, metricsName); } else { LOG.warn("No action provided for timer \"{}\"", metricsName); } }
@Test public void testProcessStartWithOverride() throws Exception { when(endpoint.getAction()).thenReturn(MetricsTimerAction.start); when(in.getHeader(HEADER_TIMER_ACTION, MetricsTimerAction.start, MetricsTimerAction.class)) .thenReturn(MetricsTimerAction.stop); when(exchange.getProperty(PROPERTY_NAME, Timer.Context.class)).thenReturn(context); producer.doProcess(exchange, endpoint, registry, METRICS_NAME); inOrder.verify(exchange, times(1)).getIn(); inOrder.verify(endpoint, times(1)).getAction(); inOrder.verify(in, times(1)).getHeader(HEADER_TIMER_ACTION, MetricsTimerAction.start, MetricsTimerAction.class); inOrder.verify(exchange, times(1)).getProperty(PROPERTY_NAME, Timer.Context.class); inOrder.verify(context, times(1)).stop(); inOrder.verify(exchange, times(1)).removeProperty(PROPERTY_NAME); inOrder.verifyNoMoreInteractions(); }
void runOnce() { if (transactionManager != null) { try { transactionManager.maybeResolveSequences(); RuntimeException lastError = transactionManager.lastError(); // do not continue sending if the transaction manager is in a failed state if (transactionManager.hasFatalError()) { if (lastError != null) maybeAbortBatches(lastError); client.poll(retryBackoffMs, time.milliseconds()); return; } if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) { return; } // Check whether we need a new producerId. If so, we will enqueue an InitProducerId // request which will be sent below transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); if (maybeSendAndPollTransactionalRequest()) { return; } } catch (AuthenticationException e) { // This is already logged as error, but propagated here to perform any clean ups. log.trace("Authentication exception while processing transactional request", e); transactionManager.authenticationFailed(e); } } long currentTimeMs = time.milliseconds(); long pollTimeout = sendProducerData(currentTimeMs); client.poll(pollTimeout, currentTimeMs); }
@Test public void testCorrectHandlingOfOutOfOrderResponses() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = createTransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); assertEquals(0, transactionManager.sequenceNumber(tp0)); // Send first ProduceRequest Future<RecordMetadata> request1 = appendToAccumulator(tp0); sender.runOnce(); String nodeId = client.requests().peek().destination(); Node node = new Node(Integer.parseInt(nodeId), "localhost", 0); assertEquals(1, client.inFlightRequestCount()); assertEquals(1, transactionManager.sequenceNumber(tp0)); assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); // Send second ProduceRequest Future<RecordMetadata> request2 = appendToAccumulator(tp0); sender.runOnce(); assertEquals(2, client.inFlightRequestCount()); assertEquals(2, transactionManager.sequenceNumber(tp0)); assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); assertFalse(request1.isDone()); assertFalse(request2.isDone()); assertTrue(client.isReady(node, time.milliseconds())); ClientRequest firstClientRequest = client.requests().peek(); ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1]; client.respondToRequest(secondClientRequest, produceResponse(tp0, -1, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, -1)); sender.runOnce(); // receive response 1 Deque<ProducerBatch> queuedBatches = accumulator.getDeque(tp0); // Make sure that we are queueing the second batch first. assertEquals(1, queuedBatches.size()); assertEquals(1, queuedBatches.peekFirst().baseSequence()); assertEquals(1, client.inFlightRequestCount()); assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); client.respondToRequest(firstClientRequest, produceResponse(tp0, -1, Errors.NOT_LEADER_OR_FOLLOWER, -1)); sender.runOnce(); // receive response 0 // Make sure we requeued both batches in the correct order. assertEquals(2, queuedBatches.size()); assertEquals(0, queuedBatches.peekFirst().baseSequence()); assertEquals(1, queuedBatches.peekLast().baseSequence()); assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); assertEquals(0, client.inFlightRequestCount()); assertFalse(request1.isDone()); assertFalse(request2.isDone()); sender.runOnce(); // send request 0 assertEquals(1, client.inFlightRequestCount()); sender.runOnce(); // don't do anything, only one inflight allowed once we are retrying. assertEquals(1, client.inFlightRequestCount()); assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0)); // Make sure that the requests are sent in order, even though the previous responses were not in order. sendIdempotentProducerResponse(0, tp0, Errors.NONE, 0L); sender.runOnce(); // receive response 0 assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0)); assertEquals(0, client.inFlightRequestCount()); assertTrue(request1.isDone()); assertEquals(0, request1.get().offset()); sender.runOnce(); // send request 1 assertEquals(1, client.inFlightRequestCount()); sendIdempotentProducerResponse(1, tp0, Errors.NONE, 1L); sender.runOnce(); // receive response 1 assertFalse(client.hasInFlightRequests()); assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0)); assertTrue(request2.isDone()); assertEquals(1, request2.get().offset()); }
@Beta public static Application fromBuilder(Builder builder) throws Exception { return builder.build(); }
@Test void renderer() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .renderer("mock", MockRenderer.class)))) ) { Request request = new Request("http://localhost:" + getDefaults().vespaWebServicePort() + "/search/?format=mock"); Response response = app.handleRequest(request); assertNotNull(response); assertEquals(response.getStatus(), 200); assertEquals(response.getBodyAsString(), "<mock hits=\"0\" />"); } }
@Override public void updateDataSourceConfig(DataSourceConfigSaveReqVO updateReqVO) { // 校验存在 validateDataSourceConfigExists(updateReqVO.getId()); DataSourceConfigDO updateObj = BeanUtils.toBean(updateReqVO, DataSourceConfigDO.class); validateConnectionOK(updateObj); // 更新 dataSourceConfigMapper.updateById(updateObj); }
@Test public void testUpdateDataSourceConfig_notExists() { // 准备参数 DataSourceConfigSaveReqVO reqVO = randomPojo(DataSourceConfigSaveReqVO.class); // 调用, 并断言异常 assertServiceException(() -> dataSourceConfigService.updateDataSourceConfig(reqVO), DATA_SOURCE_CONFIG_NOT_EXISTS); }
static Expression createIntervalsExpression(List<Interval> intervals) { ExpressionStmt arraysAsListStmt = CommonCodegenUtils.createArraysAsListExpression(); MethodCallExpr arraysCallExpression = arraysAsListStmt.getExpression().asMethodCallExpr(); NodeList<Expression> arguments = new NodeList<>(); intervals.forEach(value -> arguments.add(getObjectCreationExprFromInterval(value))); arraysCallExpression.setArguments(arguments); arraysAsListStmt.setExpression(arraysCallExpression); return arraysAsListStmt.getExpression(); }
@Test void createIntervalsExpression() { List<Interval> intervals = IntStream.range(0, 3) .mapToObj(i -> { int leftMargin = new Random().nextInt(40); int rightMargin = leftMargin + 13; return new Interval(leftMargin, rightMargin); }) .collect(Collectors.toList()); Expression retrieved = org.kie.pmml.compiler.commons.codegenfactories.KiePMMLModelFactoryUtils.createIntervalsExpression(intervals); assertThat(retrieved).isNotNull(); assertThat(retrieved).isInstanceOf(MethodCallExpr.class); MethodCallExpr mtdExp = (MethodCallExpr) retrieved; String expected = "java.util.Arrays"; assertThat(mtdExp.getScope().get().asNameExpr().toString()).isEqualTo(expected); expected = "asList"; assertThat(mtdExp.getName().asString()).isEqualTo(expected); NodeList<Expression> arguments = mtdExp.getArguments(); assertThat(arguments).hasSameSizeAs(intervals); arguments.forEach(argument -> { assertThat(argument).isInstanceOf(ObjectCreationExpr.class); ObjectCreationExpr objCrt = (ObjectCreationExpr) argument; assertThat(objCrt.getType().asString()).isEqualTo(Interval.class.getCanonicalName()); Optional<Interval> intervalOpt = intervals.stream() .filter(interval -> String.valueOf(interval.getLeftMargin()).equals(objCrt.getArgument(0).asNameExpr().toString()) && String.valueOf(interval.getRightMargin()).equals(objCrt.getArgument(1).asNameExpr().toString())) .findFirst(); assertThat(intervalOpt).isPresent(); }); }
@Override public Stream<MappingField> resolveAndValidateFields( boolean isKey, List<MappingField> userFields, Map<String, String> options, InternalSerializationService serializationService ) { Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey); for (QueryPath path : fieldsByPath.keySet()) { if (path.isTopLevel()) { throw QueryException.error("Cannot use the '" + path + "' field with Avro serialization"); } } Schema schema = getSchema(fieldsByPath, options, isKey); if (schema != null && options.containsKey("schema.registry.url")) { throw new IllegalArgumentException("Inline schema cannot be used with schema registry"); } if (userFields.isEmpty()) { if (schema == null) { throw QueryException.error( "Either a column list or an inline schema is required to create Avro-based mapping"); } return resolveFields(schema, (name, type) -> new MappingField(name, type, new QueryPath(name, isKey).toString())); } else { if (schema != null) { validate(schema, getFields(fieldsByPath).collect(toList())); } return fieldsByPath.values().stream(); } }
@Test public void when_schemaHasUnsupportedType_then_fieldResolutionFails() { List<Schema> unsupportedSchemaTypes = List.of( Schema.create(Schema.Type.BYTES), SchemaBuilder.array().items(Schema.create(Schema.Type.INT)), SchemaBuilder.map().values(Schema.create(Schema.Type.INT)), SchemaBuilder.enumeration("enum").symbols("symbol"), SchemaBuilder.fixed("fixed").size(0) ); for (Schema schema : unsupportedSchemaTypes) { assertThatThrownBy(() -> INSTANCE.resolveAndValidateFields( isKey, emptyList(), Map.of(isKey ? OPTION_KEY_AVRO_SCHEMA : OPTION_VALUE_AVRO_SCHEMA, SchemaBuilder.record("jet.sql").fields() .name("field").type(schema).noDefault() .endRecord().toString()), null ).toArray()).hasMessage("Unsupported schema type: " + schema.getType()); } }
public XmlConfiguration(final LoggerContext loggerContext, final ConfigurationSource configSource) { super(loggerContext, configSource); }
@Test public void testXmlConfiguration() throws IOException { Path path = getConfigPath().resolve("log4j2-test.xml"); try (InputStream is = Files.newInputStream(path)) { ConfigurationSource configurationSource = new ConfigurationSource(is, path.toFile()); LoggerContext context = new LoggerContext("test"); XmlConfiguration xmlConfiguration = new XmlConfiguration(context, configurationSource); xmlConfiguration.initialize(); assertEquals("InfinispanTestConfig", xmlConfiguration.getName()); } }
@Deprecated public static StreamException buildStreamException(RestLiResponseException restLiResponseException, StreamDataCodec codec) { RestLiResponse restLiResponse = restLiResponseException.getRestLiResponse(); StreamResponseBuilder responseBuilder = new StreamResponseBuilder() .setHeaders(restLiResponse.getHeaders()) .setCookies(CookieUtil.encodeSetCookies(restLiResponse.getCookies())) .setStatus(restLiResponse.getStatus() == null ? HttpStatus.S_500_INTERNAL_SERVER_ERROR.getCode() : restLiResponse.getStatus().getCode()); EntityStream<ByteString> entityStream = codec.encodeMap(restLiResponse.getDataMap()); StreamResponse response = responseBuilder.build(EntityStreamAdapters.fromGenericEntityStream(entityStream)); return new StreamException(response, restLiResponseException.getCause()); }
@Test public void testContentTypeHeaderForStreamException() { RestLiResponseException restLiResponseException = new RestLiResponseException( new RuntimeException("this is a test"), new RestLiResponse.Builder() .status(HttpStatus.S_500_INTERNAL_SERVER_ERROR) .entity(new TestRecord()) .headers(Collections.emptyMap()) .cookies(Collections.emptyList()) .build()); StreamException streamException = ResponseUtils.buildStreamException(restLiResponseException, ContentType.PROTOBUF2); Assert.assertEquals(streamException.getResponse().getHeader(RestConstants.HEADER_CONTENT_TYPE), ContentType.PROTOBUF2.getHeaderKey()); }
public static <T> RemoteIterator<T> remoteIteratorFromArray(T[] array) { return new WrappedJavaIterator<>(Arrays.stream(array).iterator()); }
@Test public void testIterateArray() throws Throwable { verifyInvoked(remoteIteratorFromArray(DATA), DATA.length, (s) -> LOG.info(s)); }
public void patchCommentById( final Long memberId, final Long commentId, final CommentPatchRequest request ) { Comment comment = findComment(commentId); comment.update(request.comment(), memberId); }
@Test void 댓글을_수정한다() { // given Comment saved = commentRepository.save(댓글_생성()); String text = "edit"; CommentPatchRequest req = new CommentPatchRequest(text); // when commentService.patchCommentById(1L, 1L, req); // then assertThat(saved.getContent()).isEqualTo(text); }
@Override public Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { ICache cache = CacheUtils.getCache(); if (Objects.nonNull(cache)) { return cache.getData(CacheUtils.dataKey(exchange)) .zipWith(cache.getData(CacheUtils.contentTypeKey(exchange))) .flatMap(data -> Mono.just(Optional.of(data))) .defaultIfEmpty(Optional.empty()) .flatMap(dataFlg -> { if (dataFlg.isPresent()) { byte[] bytes = dataFlg.get().getT1(); byte[] contentTypeBytes = dataFlg.get().getT2(); cache.setContentType(exchange, contentTypeBytes); return exchange.getResponse().writeWith(Mono.just(exchange.getResponse().bufferFactory().wrap(bytes)) .doOnNext(data -> exchange.getResponse().getHeaders().setContentLength(data.readableByteCount()))); } CacheRuleHandle cacheRuleHandle = buildRuleHandle(rule); return chain.execute(exchange.mutate().response(new CacheHttpResponse(exchange, cacheRuleHandle)).build()); }); } CacheRuleHandle cacheRuleHandle = buildRuleHandle(rule); return chain.execute(exchange.mutate().response(new CacheHttpResponse(exchange, cacheRuleHandle)).build()); }
@Test public void pluginTest() { ServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("localhost").build()); final CachePlugin cachePlugin = new CachePlugin(); final ShenyuPluginChain shenyuPluginChain = mock(ShenyuPluginChain.class); final RuleData ruleData = new RuleData(); CachePluginDataHandler.CACHED_HANDLE.get().cachedHandle(CacheKeyUtils.INST.getKey(ruleData), new CacheRuleHandle()); Mockito.when(shenyuPluginChain.execute(any())).thenReturn(Mono.empty()); final Mono<Void> result = cachePlugin.doExecute(exchange, shenyuPluginChain, null, ruleData); StepVerifier.create(result).expectSubscription().verifyComplete(); final MemoryCache memoryCache = new MemoryCache(); Singleton.INST.single(ICache.class, memoryCache); final Mono<Void> result2 = cachePlugin.doExecute(exchange, shenyuPluginChain, null, ruleData); StepVerifier.create(result2).expectSubscription().verifyComplete(); memoryCache.cacheData(CacheUtils.dataKey(exchange), MediaType.APPLICATION_JSON_VALUE.getBytes(StandardCharsets.UTF_8), 60L).subscribeOn(Schedulers.boundedElastic()).subscribe(); memoryCache.cacheData(CacheUtils.contentTypeKey(exchange), MediaType.APPLICATION_JSON_VALUE.getBytes(StandardCharsets.UTF_8), 60L).subscribeOn(Schedulers.boundedElastic()).subscribe(); final Mono<Void> result3 = cachePlugin.doExecute(exchange, shenyuPluginChain, null, ruleData); StepVerifier.create(result3).expectSubscription().verifyComplete(); }
@Override public boolean isTrusted(Address address) { if (address == null) { return false; } if (trustedInterfaces.isEmpty()) { return true; } String host = address.getHost(); if (matchAnyInterface(host, trustedInterfaces)) { return true; } else { if (logger.isFineEnabled()) { logger.fine( "Address %s doesn't match any trusted interface", host); } return false; } }
@Test public void givenNoInterfaceIsConfigured_whenMessageArrives_thenTrust() throws UnknownHostException { AddressCheckerImpl joinMessageTrustChecker = new AddressCheckerImpl(emptySet(), logger); Address address = createAddress("127.0.0.1"); assertTrue(joinMessageTrustChecker.isTrusted(address)); }
@SuppressWarnings("unchecked") public <IN, OUT> AvroDatumConverter<IN, OUT> create(Class<IN> inputClass) { boolean isMapOnly = ((JobConf) getConf()).getNumReduceTasks() == 0; if (AvroKey.class.isAssignableFrom(inputClass)) { Schema schema; if (isMapOnly) { schema = AvroJob.getMapOutputKeySchema(getConf()); if (null == schema) { schema = AvroJob.getOutputKeySchema(getConf()); } } else { schema = AvroJob.getOutputKeySchema(getConf()); } if (null == schema) { throw new IllegalStateException("Writer schema for output key was not set. Use AvroJob.setOutputKeySchema()."); } return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema); } if (AvroValue.class.isAssignableFrom(inputClass)) { Schema schema; if (isMapOnly) { schema = AvroJob.getMapOutputValueSchema(getConf()); if (null == schema) { schema = AvroJob.getOutputValueSchema(getConf()); } } else { schema = AvroJob.getOutputValueSchema(getConf()); } if (null == schema) { throw new IllegalStateException( "Writer schema for output value was not set. Use AvroJob.setOutputValueSchema()."); } return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema); } if (BooleanWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new BooleanWritableConverter(); } if (BytesWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new BytesWritableConverter(); } if (ByteWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new ByteWritableConverter(); } if (DoubleWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new DoubleWritableConverter(); } if (FloatWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new FloatWritableConverter(); } if (IntWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new IntWritableConverter(); } if (LongWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new LongWritableConverter(); } if (NullWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new NullWritableConverter(); } if (Text.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new TextConverter(); } throw new UnsupportedOperationException("Unsupported input type: " + inputClass.getName()); }
@Test void convertByteWritable() { AvroDatumConverter<ByteWritable, GenericFixed> converter = mFactory.create(ByteWritable.class); assertEquals(42, converter.convert(new ByteWritable((byte) 42)).bytes()[0]); }
public void resetPositionsIfNeeded() { Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp(); if (offsetResetTimestamps.isEmpty()) return; resetPositionsAsync(offsetResetTimestamps); }
@Test public void testListOffsetNoUpdateMissingEpoch() { buildFetcher(); // Set up metadata with no leader epoch subscriptions.assignFromUser(singleton(tp0)); MetadataResponse metadataWithNoLeaderEpochs = RequestTestUtils.metadataUpdateWithIds( "kafka-cluster", 1, Collections.emptyMap(), singletonMap(topicName, 4), tp -> null, topicIds); client.updateMetadata(metadataWithNoLeaderEpochs); // Return a ListOffsets response with leaderEpoch=1, we should ignore it subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP), listOffsetResponse(tp0, Errors.NONE, 1L, 5L, 1)); offsetFetcher.resetPositionsIfNeeded(); consumerClient.pollNoWakeup(); // Reset should be satisfied and no metadata update requested assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertFalse(metadata.updateRequested()); assertFalse(metadata.lastSeenLeaderEpoch(tp0).isPresent()); }
void put(String key, SelType obj) { symtab.put(key, obj); }
@Test public void put() { state.put("foo", SelString.of("bar")); SelType res = state.get("foo"); assertEquals("STRING: bar", res.type() + ": " + res); }
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { if (in.readableBytes() < 1) { return; } // read one byte to guess protocol final int magic = in.getByte(in.readerIndex()); ChannelPipeline p = ctx.pipeline(); p.addLast(new ForeignHostPermitHandler(qosConfiguration)); if (isHttp(magic)) { // no welcome output for http protocol if (welcomeFuture != null && welcomeFuture.isCancellable()) { welcomeFuture.cancel(false); } p.addLast(new HttpServerCodec()); p.addLast(new HttpObjectAggregator(1048576)); p.addLast(new HttpProcessHandler(frameworkModel, qosConfiguration)); p.remove(this); } else { p.addLast(new LineBasedFrameDecoder(2048)); p.addLast(new StringDecoder(CharsetUtil.UTF_8)); p.addLast(new StringEncoder(CharsetUtil.UTF_8)); p.addLast(new IdleStateHandler(0, 0, 5 * 60)); p.addLast(new TelnetIdleEventHandler()); p.addLast(new TelnetProcessHandler(frameworkModel, qosConfiguration)); p.remove(this); } }
@Test void testDecodeTelnet() throws Exception { ByteBuf buf = Unpooled.wrappedBuffer(new byte[] {'A'}); ChannelHandlerContext context = Mockito.mock(ChannelHandlerContext.class); ChannelPipeline pipeline = Mockito.mock(ChannelPipeline.class); Mockito.when(context.pipeline()).thenReturn(pipeline); QosProcessHandler handler = new QosProcessHandler( FrameworkModel.defaultModel(), QosConfiguration.builder() .welcome("welcome") .acceptForeignIp(false) .acceptForeignIpWhitelist(StringUtils.EMPTY_STRING) .build()); handler.decode(context, buf, Collections.emptyList()); verify(pipeline).addLast(any(LineBasedFrameDecoder.class)); verify(pipeline).addLast(any(StringDecoder.class)); verify(pipeline).addLast(any(StringEncoder.class)); verify(pipeline).addLast(any(TelnetProcessHandler.class)); verify(pipeline).remove(handler); }
@Override public @Nullable V put(K key, V value) { return put(key, value, expiry(), /* onlyIfAbsent */ false); }
@Test(dataProvider = "caches") @CacheSpec(compute = Compute.SYNC, population = Population.EMPTY, initialCapacity = InitialCapacity.FULL, expireAfterWrite = Expire.ONE_MINUTE) public void put_expireTolerance_expireAfterWrite( BoundedLocalCache<Int, Int> cache, CacheContext context) { boolean mayCheckReads = context.isStrongKeys() && context.isStrongValues() && (cache.readBuffer != Buffer.<Node<Int, Int>>disabled()); var initialValue = cache.put(Int.valueOf(1), Int.valueOf(1)); assertThat(initialValue).isNull(); assertThat(cache.writeBuffer.producerIndex).isEqualTo(2); // If within the tolerance, treat the update as a read var oldValue = cache.put(Int.valueOf(1), Int.valueOf(2)); assertThat(oldValue).isEqualTo(1); if (mayCheckReads) { assertThat(cache.readBuffer.reads()).isEqualTo(0); assertThat(cache.readBuffer.writes()).isEqualTo(1); } assertThat(cache.writeBuffer.producerIndex).isEqualTo(2); // If exceeds the tolerance, treat the update as a write context.ticker().advance(Duration.ofNanos(EXPIRE_WRITE_TOLERANCE + 1)); var lastValue = cache.put(Int.valueOf(1), Int.valueOf(3)); assertThat(lastValue).isEqualTo(2); if (mayCheckReads) { assertThat(cache.readBuffer.reads()).isEqualTo(1); assertThat(cache.readBuffer.writes()).isEqualTo(1); } assertThat(cache.writeBuffer.producerIndex).isEqualTo(4); }
@Override public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) { if (client.getId() != null) { // if it's not null, it's already been saved, this is an error throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId()); } if (client.getRegisteredRedirectUri() != null) { for (String uri : client.getRegisteredRedirectUri()) { if (blacklistedSiteService.isBlacklisted(uri)) { throw new IllegalArgumentException("Client URI is blacklisted: " + uri); } } } // assign a random clientid if it's empty // NOTE: don't assign a random client secret without asking, since public clients have no secret if (Strings.isNullOrEmpty(client.getClientId())) { client = generateClientId(client); } // make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa ensureRefreshTokenConsistency(client); // make sure we don't have both a JWKS and a JWKS URI ensureKeyConsistency(client); // check consistency when using HEART mode checkHeartMode(client); // timestamp this to right now client.setCreatedAt(new Date()); // check the sector URI checkSectorIdentifierUri(client); ensureNoReservedScopes(client); ClientDetailsEntity c = clientRepository.saveClient(client); statsService.resetCache(); return c; }
@Test public void saveNewClient_noOfflineAccess() { ClientDetailsEntity client = new ClientDetailsEntity(); client = service.saveNewClient(client); Mockito.verify(scopeService, Mockito.atLeastOnce()).removeReservedScopes(Matchers.anySet()); assertThat(client.getScope().contains(SystemScopeService.OFFLINE_ACCESS), is(equalTo(false))); }
@Override public RouteContext createRouteContext(final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final BroadcastRule rule, final ConfigurationProperties props, final ConnectionContext connectionContext) { RouteContext result = new RouteContext(); BroadcastRouteEngineFactory.newInstance(rule, database, queryContext, connectionContext).route(result, rule); return result; }
@Test void assertCreateBroadcastRouteContextWithMultiDataSource() throws SQLException { BroadcastRuleConfiguration currentConfig = mock(BroadcastRuleConfiguration.class); when(currentConfig.getTables()).thenReturn(Collections.singleton("t_order")); BroadcastRule broadcastRule = new BroadcastRule(currentConfig, DefaultDatabase.LOGIC_NAME, createMultiDataSourceMap(), Collections.emptyList()); RouteContext routeContext = new BroadcastSQLRouter().createRouteContext(createQueryContext(), mock(RuleMetaData.class), mockDatabaseWithMultipleResources(), broadcastRule, new ConfigurationProperties(new Properties()), new ConnectionContext(Collections::emptySet)); List<RouteUnit> routeUnits = new ArrayList<>(routeContext.getRouteUnits()); assertThat(routeContext.getRouteUnits().size(), is(2)); assertThat(routeUnits.get(0).getDataSourceMapper().getLogicName(), is(routeUnits.get(0).getDataSourceMapper().getActualName())); assertThat(routeUnits.get(0).getTableMappers().size(), is(1)); RouteMapper tableMapper = routeUnits.get(0).getTableMappers().iterator().next(); assertThat(tableMapper.getActualName(), is("t_order")); assertThat(tableMapper.getLogicName(), is("t_order")); }
public static ParamType getVarArgsSchemaFromType(final Type type) { return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE); }
@Test public void shouldGetArraySchemaFromListClassVariadic() throws NoSuchMethodException { final Type type = getClass().getDeclaredMethod("listType", List.class) .getGenericParameterTypes()[0]; final ParamType schema = UdfUtil.getVarArgsSchemaFromType(type); assertThat(schema, instanceOf(ArrayType.class)); assertThat(((ArrayType) schema).element(), equalTo(ParamTypes.DOUBLE)); }
public static <T extends Comparable<? super T>> T max(T[] numberArray) { return max(numberArray, null); }
@Test public void maxTest() { int max = ArrayUtil.max(1, 2, 13, 4, 5); assertEquals(13, max); long maxLong = ArrayUtil.max(1L, 2L, 13L, 4L, 5L); assertEquals(13, maxLong); double maxDouble = ArrayUtil.max(1D, 2.4D, 13.0D, 4.55D, 5D); assertEquals(13.0, maxDouble, 0); BigDecimal one = new BigDecimal("1.00"); BigDecimal two = new BigDecimal("2.0"); BigDecimal three = new BigDecimal("3"); BigDecimal[] bigDecimals = {two, one, three}; BigDecimal minAccuracy = ArrayUtil.min(bigDecimals, Comparator.comparingInt(BigDecimal::scale)); assertEquals(minAccuracy, three); BigDecimal maxAccuracy = ArrayUtil.max(bigDecimals, Comparator.comparingInt(BigDecimal::scale)); assertEquals(maxAccuracy, one); }
public static boolean isStorageSpaceError(final Throwable error) { Throwable cause = error; while (null != cause) { if (cause instanceof IOException) { final String msg = cause.getMessage(); if ("No space left on device".equals(msg) || "There is not enough space on the disk".equals(msg)) { return true; } } cause = cause.getCause(); } return false; }
@Test void isStorageSpaceErrorReturnsFalseIfNotIOException() { assertFalse(isStorageSpaceError(new IllegalArgumentException("No space left on device"))); }
@Description("Returns TRUE if this Geometry has no anomalous geometric points, such as self intersection or self tangency") @ScalarFunction("ST_IsSimple") @SqlType(BOOLEAN) public static boolean stIsSimple(@SqlType(GEOMETRY_TYPE_NAME) Slice input) { try { return deserialize(input).isSimple(); } catch (PrestoException e) { if (e.getCause() instanceof TopologyException) { return false; } throw e; } }
@Test public void testSTIsSimple() { assertSimpleGeometry("POINT (1.5 2.5)"); assertSimpleGeometry("MULTIPOINT (1 2, 2 4, 3 6, 4 8)"); assertNotSimpleGeometry("MULTIPOINT (1 2, 2 4, 3 6, 1 2)"); assertSimpleGeometry("LINESTRING (8 4, 5 7)"); assertSimpleGeometry("LINESTRING (1 1, 2 2, 1 3, 1 1)"); assertNotSimpleGeometry("LINESTRING (0 0, 1 1, 1 0, 0 1)"); assertSimpleGeometry("MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))"); assertNotSimpleGeometry("MULTILINESTRING ((1 1, 5 1), (2 4, 4 0))"); assertSimpleGeometry("POLYGON EMPTY"); assertSimpleGeometry("POLYGON ((2 0, 2 1, 3 1, 2 0))"); assertSimpleGeometry("MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((2 4, 2 6, 6 6, 6 4, 2 4)))"); assertNotSimpleGeometry("LINESTRING (0 0, -1 0.5, 0 1, 1 1, 1 0, 0 1, 0 0)"); assertNotSimpleGeometry("MULTIPOINT ((0 0), (0 1), (1 1), (0 1))"); assertNotSimpleGeometry("LINESTRING (0 0, -1 0.5, 0 1, 1 1, 1 0, 0 1, 0 0)"); }
@Override public InetSocketAddress getLocalAddress() { return client.getLocalAddress(); }
@Test void test_multi_share_connect() { // here a three shared connection is established between a consumer process and a provider process. final int shareConnectionNum = 3; init(0, shareConnectionNum); List<ReferenceCountExchangeClient> helloReferenceClientList = getReferenceClientList(helloServiceInvoker); Assertions.assertEquals(shareConnectionNum, helloReferenceClientList.size()); List<ReferenceCountExchangeClient> demoReferenceClientList = getReferenceClientList(demoServiceInvoker); Assertions.assertEquals(shareConnectionNum, demoReferenceClientList.size()); // because helloServiceInvoker and demoServiceInvoker use share connect, so client list must be equal Assertions.assertEquals(helloReferenceClientList, demoReferenceClientList); Assertions.assertEquals(demoClient.getLocalAddress(), helloClient.getLocalAddress()); Assertions.assertEquals(demoClient, helloClient); destroy(); }
@Override public void removeRule(final RuleData ruleData) { Optional.ofNullable(ruleData.getHandle()).ifPresent(s -> CACHED_HANDLE.get().removeHandle(CacheKeyUtils.INST.getKey(ruleData))); }
@Test public void removeSelectorTest() { modifyResponsePluginDataHandler.removeRule(ruleData); ModifyResponseRuleHandle modifyResponseRuleHandle = ModifyResponsePluginDataHandler.CACHED_HANDLE.get().obtainHandle(CacheKeyUtils.INST.getKey(ruleData)); assertNull(modifyResponseRuleHandle); }
public void add(TProtocol p) { events.addLast(p); }
@Test public void testSet() throws TException { final Set<String> names = new HashSet<String>(); names.add("John"); names.add("Jack"); final TestNameSet o = new TestNameSet("name", names); validate(o); }
@Override protected String ruleHandler() { return ""; }
@Test public void testRuleHandler() { assertEquals(StringUtils.EMPTY, shenyuClientRegisterGrpcService.ruleHandler()); }
@Override @Transactional(rollbackFor = Exception.class) public void updateCombinationActivity(CombinationActivityUpdateReqVO updateReqVO) { // 校验存在 CombinationActivityDO activityDO = validateCombinationActivityExists(updateReqVO.getId()); // 校验状态 if (ObjectUtil.equal(activityDO.getStatus(), CommonStatusEnum.DISABLE.getStatus())) { throw exception(COMBINATION_ACTIVITY_STATUS_DISABLE_NOT_UPDATE); } // 校验商品冲突 validateProductConflict(updateReqVO.getSpuId(), updateReqVO.getId()); // 校验商品是否存在 validateProductExists(updateReqVO.getSpuId(), updateReqVO.getProducts()); // 更新活动 CombinationActivityDO updateObj = CombinationActivityConvert.INSTANCE.convert(updateReqVO); combinationActivityMapper.updateById(updateObj); // 更新商品 updateCombinationProduct(updateObj, updateReqVO.getProducts()); }
@Test public void testUpdateCombinationActivity_notExists() { // 准备参数 CombinationActivityUpdateReqVO reqVO = randomPojo(CombinationActivityUpdateReqVO.class); // 调用, 并断言异常 assertServiceException(() -> combinationActivityService.updateCombinationActivity(reqVO), COMBINATION_ACTIVITY_NOT_EXISTS); }
@Override public Set<IndexRange> indexRangesForStreamsInTimeRange(Set<String> streamIds, TimeRange timeRange) { return indexLookup.indexRangesForStreamsInTimeRange(streamIds, timeRange); }
@Test public void testExplain() { when(indexLookup.indexRangesForStreamsInTimeRange(anySet(), any())).thenAnswer(a -> { if (a.getArgument(1, TimeRange.class).getFrom().getYear() < 2024) { return Set.of( MongoIndexRange.create("graylog_0", nowUTC(), nowUTC(), nowUTC(), 0), MongoIndexRange.create("graylog_1", nowUTC(), nowUTC(), nowUTC(), 0), MongoIndexRange.create("graylog_warm_2", nowUTC(), nowUTC(), nowUTC(), 0) ); } return Set.of(MongoIndexRange.create("graylog_0", nowUTC(), nowUTC(), nowUTC(), 0)); }); final Query query = Query.builder() .id("query1") .query(ElasticsearchQueryString.of("needle")) .searchTypes(Set.of( MessageList.builder() .id("messagelist-1") .build(), Pivot.builder() .id("pivot-1") .rowGroups(Time.builder().field("source").interval(AutoInterval.create()).build()) .timerange(AbsoluteRange.create(DateTime.parse("2016-05-19T00:00:00.000Z"), DateTime.parse("2022-01-09T00:00:00.000Z"))) .series() .rollup(false) .build() ) ) .timerange(RelativeRange.create(300)) .build(); final Search search = Search.builder().queries(ImmutableSet.of(query)).build(); final SearchJob job = new SearchJob("deadbeef", search, "admin", "test-node-id"); final GeneratedQueryContext generatedQueryContext = createContext(query); var explainResult = backend.explain(job, query, generatedQueryContext); assertThat(explainResult.searchTypes()).isNotNull(); assertThat(explainResult.searchTypes().get("messagelist-1")).satisfies(ml -> { assertThat(ml).isNotNull(); assertThat(ml.searchedIndexRanges()).hasSize(1); assertThat(ml.searchedIndexRanges()).allMatch(r -> r.indexName().equals("graylog_0")); var ctx = JsonPath.parse(ml.queryString()); JsonPathAssert.assertThat(ctx).jsonPathAsString("$.query.bool.must[0].bool.filter[0].query_string.query").isEqualTo("needle"); }); assertThat(explainResult.searchTypes().get("pivot-1")).satisfies(ml -> { assertThat(ml).isNotNull(); assertThat(ml.searchedIndexRanges()).hasSize(3); assertThat(ml.searchedIndexRanges()).anyMatch(r -> r.indexName().equals("graylog_0") && !r.isWarmTiered()); assertThat(ml.searchedIndexRanges()).anyMatch(r -> r.indexName().equals("graylog_warm_2") && r.isWarmTiered()); var ctx = JsonPath.parse(ml.queryString()); JsonPathAssert.assertThat(ctx).jsonPathAsString("$.query.bool.must[0].bool.filter[0].query_string.query").isEqualTo("needle"); JsonPathAssert.assertThat(ctx).jsonPathAsString("$.aggregations.agg.date_histogram.field").isEqualTo("source"); }); }
@Udf(description = "Returns the cosine of an INT value") public Double cos( @UdfParameter( value = "value", description = "The value in radians to get the cosine of." ) final Integer value ) { return cos(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleNegative() { assertThat(udf.cos(-0.43), closeTo(0.9089657496748851, 0.000000000000001)); assertThat(udf.cos(-Math.PI), closeTo(-1.0, 0.000000000000001)); assertThat(udf.cos(-2 * Math.PI), closeTo(1.0, 0.000000000000001)); assertThat(udf.cos(-6), closeTo(0.960170286650366, 0.000000000000001)); assertThat(udf.cos(-6L), closeTo(0.960170286650366, 0.000000000000001)); }
static <T extends Type> String encodeDynamicArray(DynamicArray<T> value) { int size = value.getValue().size(); String encodedLength = encode(new Uint(BigInteger.valueOf(size))); String valuesOffsets = encodeArrayValuesOffsets(value); String encodedValues = encodeArrayValues(value); StringBuilder result = new StringBuilder(); result.append(encodedLength); result.append(valuesOffsets); result.append(encodedValues); return result.toString(); }
@Test public void testDynamicArrayOfDynamicArraysOfStaticStructs() { DynamicArray<DynamicArray<Bar>> array = new DynamicArray( DynamicArray.class, Arrays.asList( new DynamicArray( Bar.class, new Bar( new Uint256(BigInteger.ZERO), new Uint256(BigInteger.ZERO))), new DynamicArray( Bar.class, new Bar( new Uint256(BigInteger.ONE), new Uint256(BigInteger.ZERO))))); assertEquals( ("0000000000000000000000000000000000000000000000000000000000000002" + "0000000000000000000000000000000000000000000000000000000000000040" + "00000000000000000000000000000000000000000000000000000000000000a0" + "0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000001" + "0000000000000000000000000000000000000000000000000000000000000000"), TypeEncoder.encodeDynamicArray(array)); }
public List<PlainAccessConfig> getPlainAccessConfigs() { return plainAccessConfigs; }
@Test public void testGetPlainAccessConfigsWhenNull() { AclConfig aclConfig = new AclConfig(); Assert.assertNull("The plainAccessConfigs should return null", aclConfig.getPlainAccessConfigs()); }
@Override public String addResourceReference(String key, SharedCacheResourceReference ref) { String interned = intern(key); synchronized (interned) { SharedCacheResource resource = cachedResources.get(interned); if (resource == null) { // it's not mapped return null; } resource.addReference(ref); resource.updateAccessTime(); return resource.getFileName(); } }
@Test void testAddResourceRefNonExistentResource() throws Exception { startEmptyStore(); String key = "key1"; ApplicationId id = createAppId(1, 1L); // try adding an app id without adding the key first assertNull(store.addResourceReference(key, new SharedCacheResourceReference(id, "user"))); }
@Override public void close() throws IllegalStateException { try { session.close(); } catch (UncheckedOrtException e) { throw new IllegalStateException("Failed to close ONNX session", e); } catch (IllegalStateException e) { throw new IllegalStateException("Already closed", e); } }
@Test public void testLoggingMessages() throws IOException { assumeTrue(OnnxRuntime.isRuntimeAvailable()); Logger logger = Logger.getLogger(OnnxEvaluator.class.getName()); CustomLogHandler logHandler = new CustomLogHandler(); logger.addHandler(logHandler); var runtime = new OnnxRuntime(); var model = Files.readAllBytes(Paths.get("src/test/models/onnx/simple/simple.onnx")); OnnxEvaluatorOptions options = new OnnxEvaluatorOptions(); options.setGpuDevice(0); var evaluator = runtime.evaluatorOf(model,options); evaluator.close(); List<LogRecord> records = logHandler.getLogRecords(); assertEquals(1,records.size()); assertEquals(Level.INFO,records.get(0).getLevel()); String message = records.get(0).getMessage(); assertEquals("Failed to create session with CUDA using GPU device 0. " + "Falling back to CPU. Reason: Error code - ORT_EP_FAIL - message:" + " Failed to find CUDA shared provider", message); logger.removeHandler(logHandler); }
public static KiePMMLRegressionTable getRegressionTable(final RegressionTable regressionTable, final RegressionCompilationDTO compilationDTO) { logger.trace("getRegressionTable {}", regressionTable); final Map<String, SerializableFunction<Double, Double>> numericPredictorsMap = getNumericPredictorsMap(regressionTable.getNumericPredictors()); final Map<String, SerializableFunction<String, Double>> categoricalPredictorsMap = getCategoricalPredictorsMap(regressionTable.getCategoricalPredictors()); final Map<String, SerializableFunction<Map<String, Object>, Double>> predictorTermFunctionMap = getPredictorTermsMap(regressionTable.getPredictorTerms()); final SerializableFunction<Double, Double> resultUpdater = getResultUpdaterFunction(compilationDTO.getDefaultNormalizationMethod()); final Double intercept = regressionTable.getIntercept() != null ? regressionTable.getIntercept().doubleValue() : null; return KiePMMLRegressionTable.builder(UUID.randomUUID().toString(), Collections.emptyList()) .withNumericFunctionMap(numericPredictorsMap) .withCategoricalFunctionMap(categoricalPredictorsMap) .withPredictorTermsFunctionMap(predictorTermFunctionMap) .withResultUpdater(resultUpdater) .withIntercept(intercept) .withTargetField(compilationDTO.getTargetFieldName()) .withTargetCategory(regressionTable.getTargetCategory()) .build(); }
@Test void getRegressionTable() { regressionTable = getRegressionTable(3.5, "professional"); RegressionModel regressionModel = new RegressionModel(); regressionModel.setNormalizationMethod(RegressionModel.NormalizationMethod.CAUCHIT); regressionModel.addRegressionTables(regressionTable); regressionModel.setModelName(getGeneratedClassName("RegressionModel")); String targetField = "targetField"; DataField dataField = new DataField(); dataField.setName(targetField); dataField.setOpType(OpType.CATEGORICAL); DataDictionary dataDictionary = new DataDictionary(); dataDictionary.addDataFields(dataField); MiningField miningField = new MiningField(); miningField.setUsageType(MiningField.UsageType.TARGET); miningField.setName(dataField.getName()); MiningSchema miningSchema = new MiningSchema(); miningSchema.addMiningFields(miningField); regressionModel.setMiningSchema(miningSchema); PMML pmml = new PMML(); pmml.setDataDictionary(dataDictionary); pmml.addModels(regressionModel); final CommonCompilationDTO<RegressionModel> source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, regressionModel, new PMMLCompilationContextMock(), "FILENAME"); final RegressionCompilationDTO compilationDTO = RegressionCompilationDTO.fromCompilationDTORegressionTablesAndNormalizationMethod(source, new ArrayList<>(), regressionModel.getNormalizationMethod()); KiePMMLRegressionTable retrieved = KiePMMLRegressionTableFactory.getRegressionTable(regressionTable, compilationDTO); assertThat(retrieved).isNotNull(); commonEvaluateRegressionTable(retrieved, regressionTable); }
@Override public Optional<ExecuteResult> getSaneQueryResult(final SQLStatement sqlStatement, final SQLException ex) { if (ER_PARSE_ERROR == ex.getErrorCode()) { return Optional.empty(); } if (sqlStatement instanceof SelectStatement) { return createQueryResult((SelectStatement) sqlStatement); } if (sqlStatement instanceof MySQLShowOtherStatement) { return Optional.of(createQueryResult()); } if (sqlStatement instanceof MySQLSetStatement) { return Optional.of(new UpdateResult(0, 0L)); } return Optional.empty(); }
@Test void assertGetSaneQueryResultForShowOtherStatement() { Optional<ExecuteResult> actual = new MySQLDialectSaneQueryResultEngine().getSaneQueryResult(new MySQLShowOtherStatement(), new SQLException("")); assertTrue(actual.isPresent()); assertThat(actual.get(), instanceOf(RawMemoryQueryResult.class)); RawMemoryQueryResult actualResult = (RawMemoryQueryResult) actual.get(); assertThat(actualResult.getRowCount(), is(1L)); assertTrue(actualResult.next()); assertThat(actualResult.getValue(1, String.class), is("1")); assertFalse(actualResult.next()); }
@Override public WebhookDelivery call(Webhook webhook, WebhookPayload payload) { WebhookDelivery.Builder builder = new WebhookDelivery.Builder(); long startedAt = system.now(); builder .setAt(startedAt) .setPayload(payload) .setWebhook(webhook); try { HttpUrl url = HttpUrl.parse(webhook.getUrl()); if (url == null) { throw new IllegalArgumentException("Webhook URL is not valid: " + webhook.getUrl()); } builder.setEffectiveUrl(HttpUrlHelper.obfuscateCredentials(webhook.getUrl(), url)); Request request = buildHttpRequest(url, webhook, payload); try (Response response = execute(request)) { builder.setHttpStatus(response.code()); } } catch (Exception e) { builder.setError(e); } return builder .setDurationInMs((int) (system.now() - startedAt)) .build(); }
@Test public void post_payload_to_http_server() throws Exception { Webhook webhook = new Webhook(WEBHOOK_UUID, PROJECT_UUID, CE_TASK_UUID, randomAlphanumeric(40), "my-webhook", server.url("/ping").toString(), null); server.enqueue(new MockResponse().setBody("pong").setResponseCode(201)); WebhookDelivery delivery = newSender(false).call(webhook, PAYLOAD); assertThat(delivery.getHttpStatus()).hasValue(201); assertThat(delivery.getWebhook().getUuid()).isEqualTo(WEBHOOK_UUID); assertThat(delivery.getDurationInMs().get()).isNotNegative(); assertThat(delivery.getError()).isEmpty(); assertThat(delivery.getAt()).isEqualTo(NOW); assertThat(delivery.getWebhook()).isSameAs(webhook); assertThat(delivery.getPayload()).isSameAs(PAYLOAD); RecordedRequest recordedRequest = server.takeRequest(); assertThat(recordedRequest.getMethod()).isEqualTo("POST"); assertThat(recordedRequest.getPath()).isEqualTo("/ping"); assertThat(recordedRequest.getBody().readUtf8()).isEqualTo(PAYLOAD.getJson()); assertThat(recordedRequest.getHeader("User-Agent")).isEqualTo("SonarQube/6.2"); assertThat(recordedRequest.getHeader("Content-Type")).isEqualTo("application/json; charset=utf-8"); assertThat(recordedRequest.getHeader("X-SonarQube-Project")).isEqualTo(PAYLOAD.getProjectKey()); assertThat(recordedRequest.getHeader("X-Sonar-Webhook-HMAC-SHA256")).isNull(); }
public static String hashSecretContent(Secret secret) { if (secret == null) { throw new RuntimeException("Secret not found"); } if (secret.getData() == null || secret.getData().isEmpty()) { throw new RuntimeException("Empty secret"); } StringBuilder sb = new StringBuilder(); secret.getData().entrySet().stream() .sorted(Map.Entry.comparingByKey()) .forEach(entry -> sb.append(entry.getKey()).append(entry.getValue())); return Util.hashStub(sb.toString()); }
@Test public void testHashSecretContentWithNoData() { Secret secret = new SecretBuilder().build(); RuntimeException ex = assertThrows(RuntimeException.class, () -> ReconcilerUtils.hashSecretContent(secret)); assertThat(ex.getMessage(), is("Empty secret")); }
@SuppressWarnings("unchecked") public static <T> T getBean(String name) { return (T) getBeanFactory().getBean(name); }
@Test public void getBeanWithTypeReferenceTest() { Map<String, Object> mapBean = SpringUtil.getBean(new TypeReference<Map<String, Object>>() {}); assertNotNull(mapBean); assertEquals("value1", mapBean.get("key1")); assertEquals("value2", mapBean.get("key2")); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String dmType = typeDefine.getDataType().toUpperCase(); switch (dmType) { case DM_BIT: builder.sourceType(DM_BIT); builder.dataType(BasicType.BOOLEAN_TYPE); break; case DM_TINYINT: builder.sourceType(DM_TINYINT); builder.dataType(BasicType.BYTE_TYPE); break; case DM_BYTE: builder.sourceType(DM_BYTE); builder.dataType(BasicType.BYTE_TYPE); break; case DM_SMALLINT: builder.sourceType(DM_SMALLINT); builder.dataType(BasicType.SHORT_TYPE); break; case DM_INT: builder.sourceType(DM_INT); builder.dataType(BasicType.INT_TYPE); break; case DM_INTEGER: builder.sourceType(DM_INTEGER); builder.dataType(BasicType.INT_TYPE); break; case DM_PLS_INTEGER: builder.sourceType(DM_PLS_INTEGER); builder.dataType(BasicType.INT_TYPE); break; case DM_BIGINT: builder.sourceType(DM_BIGINT); builder.dataType(BasicType.LONG_TYPE); break; case DM_REAL: builder.sourceType(DM_REAL); builder.dataType(BasicType.FLOAT_TYPE); break; case DM_FLOAT: builder.sourceType(DM_FLOAT); builder.dataType(BasicType.DOUBLE_TYPE); break; case DM_DOUBLE: builder.sourceType(DM_DOUBLE); builder.dataType(BasicType.DOUBLE_TYPE); break; case DM_DOUBLE_PRECISION: builder.sourceType(DM_DOUBLE_PRECISION); builder.dataType(BasicType.DOUBLE_TYPE); break; case DM_NUMERIC: case DM_NUMBER: case DM_DECIMAL: case DM_DEC: DecimalType decimalType; if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale()); } else { decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } builder.sourceType( String.format( "%s(%s,%s)", DM_DECIMAL, decimalType.getPrecision(), decimalType.getScale())); builder.dataType(decimalType); builder.columnLength((long) decimalType.getPrecision()); builder.scale(decimalType.getScale()); break; case DM_CHAR: case DM_CHARACTER: builder.sourceType(String.format("%s(%s)", DM_CHAR, typeDefine.getLength())); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); break; case DM_VARCHAR: case DM_VARCHAR2: builder.sourceType(String.format("%s(%s)", DM_VARCHAR2, typeDefine.getLength())); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); break; case DM_TEXT: builder.sourceType(DM_TEXT); builder.dataType(BasicType.STRING_TYPE); // dm text max length is 2147483647 builder.columnLength(typeDefine.getLength()); break; case DM_LONG: builder.sourceType(DM_LONG); builder.dataType(BasicType.STRING_TYPE); // dm long max length is 2147483647 builder.columnLength(typeDefine.getLength()); break; case DM_LONGVARCHAR: builder.sourceType(DM_LONGVARCHAR); builder.dataType(BasicType.STRING_TYPE); // dm longvarchar max length is 2147483647 builder.columnLength(typeDefine.getLength()); break; case DM_CLOB: builder.sourceType(DM_CLOB); builder.dataType(BasicType.STRING_TYPE); // dm clob max length is 2147483647 builder.columnLength(typeDefine.getLength()); break; case DM_BINARY: builder.sourceType(String.format("%s(%s)", DM_BINARY, typeDefine.getLength())); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(typeDefine.getLength()); break; case DM_VARBINARY: builder.sourceType(String.format("%s(%s)", DM_VARBINARY, typeDefine.getLength())); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(typeDefine.getLength()); break; case DM_LONGVARBINARY: builder.sourceType(DM_LONGVARBINARY); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(typeDefine.getLength()); break; case DM_IMAGE: builder.sourceType(DM_IMAGE); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(typeDefine.getLength()); break; case DM_BLOB: builder.sourceType(DM_BLOB); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(typeDefine.getLength()); break; case DM_BFILE: builder.sourceType(DM_BFILE); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(typeDefine.getLength()); break; case DM_DATE: builder.sourceType(DM_DATE); builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case DM_TIME: if (typeDefine.getScale() == null) { builder.sourceType(DM_TIME); } else { builder.sourceType(String.format("%s(%s)", DM_TIME, typeDefine.getScale())); } builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case DM_TIME_WITH_TIME_ZONE: if (typeDefine.getScale() == null) { builder.sourceType(DM_TIME_WITH_TIME_ZONE); } else { builder.sourceType( String.format("TIME(%s) WITH TIME ZONE", typeDefine.getScale())); } builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case DM_TIMESTAMP: if (typeDefine.getScale() == null) { builder.sourceType(DM_TIMESTAMP); } else { builder.sourceType( String.format("%s(%s)", DM_TIMESTAMP, typeDefine.getScale())); } builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case DM_DATETIME: if (typeDefine.getScale() == null) { builder.sourceType(DM_DATETIME); } else { builder.sourceType(String.format("%s(%s)", DM_DATETIME, typeDefine.getScale())); } builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case DM_DATETIME_WITH_TIME_ZONE: if (typeDefine.getScale() == null) { builder.sourceType(DM_DATETIME_WITH_TIME_ZONE); } else { builder.sourceType( String.format("DATETIME(%s) WITH TIME ZONE", typeDefine.getScale())); } builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.DAMENG, typeDefine.getDataType(), typeDefine.getName()); } return builder.build(); }
@Test public void testConvertBlob() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("blob") .dataType("blob") .length(2147483647L) .build(); Column column = DmdbTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType()); Assertions.assertEquals(2147483647L, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase()); }
@Override public void onDiscoveredSplits(Collection<IcebergSourceSplit> splits) { addSplits(splits); }
@Test public void testMultipleFilesInASplit() throws Exception { SplitAssigner assigner = splitAssigner(); assigner.onDiscoveredSplits( SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 4, 2)); assertGetNext(assigner, GetSplitResult.Status.AVAILABLE); assertSnapshot(assigner, 1); assertGetNext(assigner, GetSplitResult.Status.AVAILABLE); assertGetNext(assigner, GetSplitResult.Status.UNAVAILABLE); assertSnapshot(assigner, 0); }
public static StreamingService getService(final int serviceId) throws ExtractionException { return ServiceList.all().stream() .filter(service -> service.getServiceId() == serviceId) .findFirst() .orElseThrow(() -> new ExtractionException( "There's no service with the id = \"" + serviceId + "\"")); }
@Test public void getServiceWithId() throws Exception { assertEquals(NewPipe.getService(YouTube.getServiceId()), YouTube); }
@Override public void execute(String commandName, BufferedReader reader, BufferedWriter writer) throws Py4JException, IOException { String targetObjectId = reader.readLine(); String methodName = reader.readLine(); List<Object> arguments = getArguments(reader); ReturnObject returnObject = invokeMethod(methodName, targetObjectId, arguments); String returnCommand = Protocol.getOutputCommand(returnObject); logger.finest("Returning command: " + returnCommand); writer.write(returnCommand); writer.flush(); }
@Test public void testReflectionException() { String inputCommand = "z:java.lang.String\nvalueOf2\ni123\ne\n"; try { command.execute("c", new BufferedReader(new StringReader(inputCommand)), writer); assertTrue(sWriter.toString().startsWith("!xspy4j.Py4JException: ")); } catch (Exception e) { e.printStackTrace(); fail(); } }
public IndexEntry lookupIndex(final long logIndex) { mapInIfNecessary(); this.readLock.lock(); try { final ByteBuffer byteBuffer = sliceByteBuffer(); final int slot = (int) (logIndex - this.header.getFirstLogIndex()); if (slot < 0) { return IndexEntry.newInstance(); } else { return parseEntry(byteBuffer, slot); } } finally { this.readLock.unlock(); } }
@Test public void testLooUp() { testAppendIndex(); final IndexEntry entry0 = this.offsetIndex.lookupIndex(appendEntry0.getOffset()); assertEquals(appendEntry0.getOffset(), entry0.getOffset()); final IndexEntry entry1 = this.offsetIndex.lookupIndex(appendEntry1.getOffset()); assertEquals(appendEntry1.getOffset(), entry1.getOffset()); final IndexEntry entry2 = this.offsetIndex.lookupIndex(appendEntry2.getOffset()); assertEquals(appendEntry2.getOffset(), entry2.getOffset()); }
@POST @ApiOperation(value = "Retrieve the field list of a given set of streams") @NoAuditEvent("This is not changing any data") public Set<MappedFieldTypeDTO> byStreams(@ApiParam(name = "JSON body", required = true) @Valid @NotNull FieldTypesForStreamsRequest request, @Context SearchUser searchUser) { final ImmutableSet<String> streams = searchUser.streams().readableOrAllIfEmpty(request.streams()); return mappedFieldTypesService.fieldTypesByStreamIds(streams, request.timerange().orElse(RelativeRange.allTime())); }
@Test public void byStreamReturnsTypesFromMappedFieldTypesService() { final SearchUser searchUser = TestSearchUser.builder() .allowStream("2323") .allowStream("4242") .build(); final MappedFieldTypesService fieldTypesService = (streamIds, timeRange) -> { if (ImmutableSet.of("2323", "4242").equals(streamIds) && timeRange.equals(RelativeRange.allTime())) { final FieldTypes.Type fieldType = FieldTypes.Type.createType("long", ImmutableSet.of("numeric", "enumerable")); final MappedFieldTypeDTO field = MappedFieldTypeDTO.create("foobar", fieldType); return Collections.singleton(field); } else { throw new AssertionError("Expected allTime range and 2323, 4242 stream IDs"); } }; final FieldTypesForStreamsRequest request = FieldTypesForStreamsRequest.Builder.builder() .streams(ImmutableSet.of("2323", "4242")) .build(); final Set<MappedFieldTypeDTO> result = new FieldTypesResource(fieldTypesService, mock(IndexFieldTypePollerPeriodical.class)).byStreams( request, searchUser ); assertThat(result) .hasSize(1) .extracting(MappedFieldTypeDTO::name) .containsOnly("foobar"); }
public static WindowBytesStoreSupplier persistentWindowStore(final String name, final Duration retentionPeriod, final Duration windowSize, final boolean retainDuplicates) throws IllegalArgumentException { return persistentWindowStore(name, retentionPeriod, windowSize, retainDuplicates, false); }
@Test public void shouldThrowIfIPersistentWindowStoreIfWindowSizeIsNegative() { final Exception e = assertThrows(IllegalArgumentException.class, () -> Stores.persistentWindowStore("anyName", ofMillis(0L), ofMillis(-1L), false)); assertEquals("windowSize cannot be negative", e.getMessage()); }
static Schema sortKeySchema(Schema schema, SortOrder sortOrder) { List<SortField> sortFields = sortOrder.fields(); int size = sortFields.size(); List<Types.NestedField> transformedFields = Lists.newArrayListWithCapacity(size); for (int i = 0; i < size; ++i) { int sourceFieldId = sortFields.get(i).sourceId(); Types.NestedField sourceField = schema.findField(sourceFieldId); Preconditions.checkArgument( sourceField != null, "Cannot find source field: %s", sourceFieldId); Type transformedType = sortFields.get(i).transform().getResultType(sourceField.type()); // There could be multiple transformations on the same source column, like in the PartitionKey // case. To resolve the collision, field id is set to transform index and field name is set to // sourceFieldName_transformIndex Types.NestedField transformedField = Types.NestedField.of( i, sourceField.isOptional(), sourceField.name() + '_' + i, transformedType, sourceField.doc()); transformedFields.add(transformedField); } return new Schema(transformedFields); }
@Test public void testResultSchema() { Schema schema = new Schema( Types.NestedField.required(1, "id", Types.StringType.get()), Types.NestedField.required(2, "ratio", Types.DoubleType.get()), Types.NestedField.optional( 3, "user", Types.StructType.of( Types.NestedField.required(11, "name", Types.StringType.get()), Types.NestedField.required(12, "ts", Types.TimestampType.withoutZone()), Types.NestedField.optional(13, "device_id", Types.UUIDType.get()), Types.NestedField.optional( 14, "location", Types.StructType.of( Types.NestedField.required(101, "lat", Types.FloatType.get()), Types.NestedField.required(102, "long", Types.FloatType.get()), Types.NestedField.required(103, "blob", Types.BinaryType.get())))))); SortOrder sortOrder = SortOrder.builderFor(schema) .asc("ratio") .sortBy(Expressions.hour("user.ts"), SortDirection.ASC, NullOrder.NULLS_FIRST) .sortBy( Expressions.bucket("user.device_id", 16), SortDirection.ASC, NullOrder.NULLS_FIRST) .sortBy( Expressions.truncate("user.location.blob", 16), SortDirection.ASC, NullOrder.NULLS_FIRST) .build(); assertThat(SortKeyUtil.sortKeySchema(schema, sortOrder).asStruct()) .isEqualTo( Types.StructType.of( Types.NestedField.required(0, "ratio_0", Types.DoubleType.get()), Types.NestedField.required(1, "ts_1", Types.IntegerType.get()), Types.NestedField.optional(2, "device_id_2", Types.IntegerType.get()), Types.NestedField.required(3, "blob_3", Types.BinaryType.get()))); }
@Override public void updateDependencyTree(int childStreamId, int parentStreamId, short weight, boolean exclusive) { // It is assumed there are all validated at a higher level. For example in the Http2FrameReader. assert weight >= MIN_WEIGHT && weight <= MAX_WEIGHT : "Invalid weight"; assert childStreamId != parentStreamId : "A stream cannot depend on itself"; assert childStreamId > 0 && parentStreamId >= 0 : "childStreamId must be > 0. parentStreamId must be >= 0."; streamByteDistributor.updateDependencyTree(childStreamId, parentStreamId, weight, exclusive); }
@Test public void invalidWeightTooBigThrows() { assertThrows(AssertionError.class, new Executable() { @Override public void execute() throws Throwable { controller.updateDependencyTree(STREAM_A, STREAM_D, (short) (MAX_WEIGHT + 1), true); } }); }
@Override public boolean validate(String metricKey) { Metric metric = metricByKey.get(metricKey); if (metric == null) { if (!alreadyLoggedMetricKeys.contains(metricKey)) { LOG.debug("The metric '{}' is ignored and should not be send in the batch report", metricKey); alreadyLoggedMetricKeys.add(metricKey); } return false; } return true; }
@Test public void not_generate_new_log_when_validating_twice_the_same_metric() { when(scannerMetrics.getMetrics()).thenReturn(Collections.emptySet()); ReportMetricValidator validator = new ReportMetricValidatorImpl(scannerMetrics); assertThat(validator.validate(METRIC_KEY)).isFalse(); assertThat(logTester.logs()).filteredOn(expectedLog::equals).hasSize(1); assertThat(validator.validate(METRIC_KEY)).isFalse(); assertThat(logTester.logs()).filteredOn(expectedLog::equals).hasSize(1); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testSpdyWindowUpdateFrame() throws Exception { short type = 9; byte flags = 0; int length = 8; int streamId = RANDOM.nextInt() & 0x7FFFFFFF; int deltaWindowSize = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); buf.writeInt(streamId); buf.writeInt(deltaWindowSize); decoder.decode(buf); verify(delegate).readWindowUpdateFrame(streamId, deltaWindowSize); assertFalse(buf.isReadable()); }
@Override public boolean isSatisfied(int index, TradingRecord tradingRecord) { final boolean satisfied = rule1.isSatisfied(index, tradingRecord) || rule2.isSatisfied(index, tradingRecord); traceIsSatisfied(index, satisfied); return satisfied; }
@Test public void isSatisfied() { assertTrue(satisfiedRule.or(BooleanRule.FALSE).isSatisfied(0)); assertTrue(BooleanRule.FALSE.or(satisfiedRule).isSatisfied(0)); assertFalse(unsatisfiedRule.or(BooleanRule.FALSE).isSatisfied(0)); assertFalse(BooleanRule.FALSE.or(unsatisfiedRule).isSatisfied(0)); assertTrue(satisfiedRule.or(BooleanRule.TRUE).isSatisfied(10)); assertTrue(BooleanRule.TRUE.or(satisfiedRule).isSatisfied(10)); assertTrue(unsatisfiedRule.or(BooleanRule.TRUE).isSatisfied(10)); assertTrue(BooleanRule.TRUE.or(unsatisfiedRule).isSatisfied(10)); }
@Override @Nonnull public List<Sdk> selectSdks(Configuration configuration, UsesSdk usesSdk) { Config config = configuration.get(Config.class); Set<Sdk> sdks = new TreeSet<>(configuredSdks(config, usesSdk)); if (enabledSdks != null) { sdks = Sets.intersection(sdks, enabledSdks); } return Lists.newArrayList(sdks); }
@Test public void withTargetSdkGreaterThanMaxSdk_shouldThrowError() throws Exception { when(usesSdk.getMaxSdkVersion()).thenReturn(21); when(usesSdk.getTargetSdkVersion()).thenReturn(22); try { sdkPicker.selectSdks(buildConfig(new Config.Builder()), usesSdk); fail(); } catch (IllegalArgumentException e) { assertThat(e).hasMessageThat().contains("Package targetSdkVersion=22 > maxSdkVersion=21"); } }
@Nullable public static EpoxyModel<?> getModelFromPayload(List<Object> payloads, long modelId) { if (payloads.isEmpty()) { return null; } for (Object payload : payloads) { DiffPayload diffPayload = (DiffPayload) payload; if (diffPayload.singleModel != null) { if (diffPayload.singleModel.id() == modelId) { return diffPayload.singleModel; } } else { EpoxyModel<?> modelForId = diffPayload.modelsById.get(modelId); if (modelForId != null) { return modelForId; } } } return null; }
@Test public void getSingleModelFromPayload() { TestModel model = new TestModel(); List<Object> payloads = payloadsWithChangedModels(model); EpoxyModel<?> modelFromPayload = getModelFromPayload(payloads, model.id()); assertEquals(model, modelFromPayload); }
public String getServiceAccount() { return flinkConfig.get(KubernetesConfigOptions.JOB_MANAGER_SERVICE_ACCOUNT); }
@Test void testGetServiceAccount() { flinkConfig.set(KubernetesConfigOptions.JOB_MANAGER_SERVICE_ACCOUNT, "flink"); assertThat(kubernetesJobManagerParameters.getServiceAccount()).isEqualTo("flink"); }
public static ModelReference resolveToModelReference( String paramName, Optional<String> id, Optional<String> url, Optional<String> path, Set<String> requiredTags, DeployState state) { if (id.isEmpty()) return createModelReference(Optional.empty(), url, path, state); else if (state.isHosted()) return createModelReference(id, Optional.of(modelIdToUrl(paramName, id.get(), requiredTags)), Optional.empty(), state); else if (url.isEmpty() && path.isEmpty()) throw onlyModelIdInHostedException(paramName); else return createModelReference(id, url, path, state); }
@Test void throws_on_known_model_with_missing_tags() { var state = new DeployState.Builder().properties(new TestProperties().setHostedVespa(true)).build(); var e = assertThrows(IllegalArgumentException.class, () -> ModelIdResolver.resolveToModelReference( "param", Optional.of("minilm-l6-v2"), Optional.empty(), Optional.empty(), Set.of(HF_TOKENIZER), state)); var expectedMsg = "Model 'minilm-l6-v2' on 'param' has tags [onnx-model] but are missing required tags [huggingface-tokenizer]"; assertEquals(expectedMsg, e.getMessage()); assertDoesNotThrow( () -> ModelIdResolver.resolveToModelReference( "param", Optional.of("minilm-l6-v2"), Optional.empty(), Optional.empty(), Set.of(ONNX_MODEL), state)); }
public FEELFnResult<List> invoke(@ParameterName( "list" ) List list, @ParameterName( "position" ) BigDecimal position, @ParameterName( "newItem" ) Object newItem) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } if ( position == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be null")); } if ( position.intValue() == 0 ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be zero (parameter 'position' is 1-based)")); } if ( position.abs().intValue() > list.size() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "inconsistent with 'list' size")); } // spec requires us to return a new list final List<Object> result = new ArrayList<>( list ); if( position.intValue() > 0 ) { result.add( position.intValue() - 1, newItem ); } else { result.add( list.size() + position.intValue(), newItem ); } return FEELFnResult.ofResult( result ); }
@Test void invokeListPositionNull() { FunctionTestUtil.assertResultError(insertBeforeFunction.invoke(null, null, new Object()), InvalidParametersEvent.class); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthGetUncleByBlockNumberAndIndex() throws Exception { web3j.ethGetUncleByBlockNumberAndIndex( DefaultBlockParameter.valueOf(Numeric.toBigInt("0x29c")), BigInteger.ZERO) .send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getUncleByBlockNumberAndIndex\"," + "\"params\":[\"0x29c\",\"0x0\"],\"id\":1}"); }
public FrameworkErrorCode getErrcode() { return errcode; }
@Test public void testGetErrcode() { Throwable throwable = Assertions.assertThrows(FrameworkException.class, () -> { message.print4(); }); assertThat(throwable).hasMessage(FrameworkErrorCode.UnknownAppError.getErrMessage()); assertThat(((FrameworkException)throwable).getErrcode()).isEqualTo(FrameworkErrorCode.UnknownAppError); }
@Override public BrokerResponse executeQuery(String brokerAddress, String query) throws PinotClientException { try { return executeQueryAsync(brokerAddress, query).get(_brokerReadTimeout, TimeUnit.MILLISECONDS); } catch (Exception e) { throw new PinotClientException(e); } }
@Test public void invalidJsonResponseTriggersPinotClientException() { _responseJson = "{"; JsonAsyncHttpPinotClientTransportFactory factory = new JsonAsyncHttpPinotClientTransportFactory(); JsonAsyncHttpPinotClientTransport transport = (JsonAsyncHttpPinotClientTransport) factory.buildTransport(); try { transport.executeQuery("localhost:" + _dummyServer.getAddress().getPort(), "select * from planets"); fail("expected exception was not thrown"); } catch (PinotClientException exception) { Throwable cause = ExceptionUtils.getRootCause(exception); assertEquals(cause.getClass().getName(), "com.fasterxml.jackson.core.io.JsonEOFException"); } }
@Override public int ncol() { return n; }
@Test public void testNcols() { System.out.println("ncol"); assertEquals(3, sparse.ncol()); }
@Override public void resetToCheckpoint(long checkpointId, @Nullable byte[] checkpointData) throws Exception { // the first time this method is called is early during execution graph construction, // before the main thread executor is set. hence this conditional check. if (mainThreadExecutor != null) { mainThreadExecutor.assertRunningInMainThread(); } subtaskGatewayMap.values().forEach(SubtaskGatewayImpl::openGatewayAndUnmarkAllCheckpoint); context.resetFailed(); // when initial savepoints are restored, this call comes before the mainThreadExecutor // is available, which is needed to set up these gateways. So during the initial restore, // we ignore this, and instead the gateways are set up in the "lazyInitialize" method, which // is called when the scheduler is properly set up. // this is a bit clumsy, but it is caused by the non-straightforward initialization of the // ExecutionGraph and Scheduler. if (mainThreadExecutor != null) { setupAllSubtaskGateways(); } coordinator.resetToCheckpoint(checkpointId, checkpointData); }
@Test void restoreOpensGatewayEvents() throws Exception { final EventReceivingTasks tasks = EventReceivingTasks.createForRunningTasks(); final OperatorCoordinatorHolder holder = createCoordinatorHolder(tasks, TestingOperatorCoordinator::new); triggerAndCompleteCheckpoint(holder, 1000L); holder.resetToCheckpoint(1L, new byte[0]); getCoordinator(holder).getSubtaskGateway(1).sendEvent(new TestOperatorEvent(999)); assertThat(tasks.getSentEventsForSubtask(1)).containsExactly(new TestOperatorEvent(999)); }
@Override public void deprecated(String message, Object... params) { logger.warn(message, params); }
@Test public void testDeprecationLoggerWriteOut_root() throws IOException { final DefaultDeprecationLogger deprecationLogger = new DefaultDeprecationLogger(LogManager.getLogger("test")); // Exercise deprecationLogger.deprecated("Simple deprecation message"); String logs = LogTestUtils.loadLogFileContent("logstash-deprecation.log"); assertTrue("Deprecation logs MUST contains the out line", logs.matches(".*\\[deprecation\\.test.*\\].*Simple deprecation message")); }
public Object evaluate(final ProcessingDTO processingDTO, final List<Object> paramValues) { final List<KiePMMLNameValue> kiePMMLNameValues = new ArrayList<>(); if (parameterFields != null) { if (paramValues == null || paramValues.size() < parameterFields.size()) { throw new IllegalArgumentException("Expected at least " + parameterFields.size() + " arguments for " + name + " DefineFunction"); } for (int i = 0; i < parameterFields.size(); i++) { kiePMMLNameValues.add(new KiePMMLNameValue(parameterFields.get(i).getName(), paramValues.get(i))); } } for (KiePMMLNameValue kiePMMLNameValue : kiePMMLNameValues) { processingDTO.addKiePMMLNameValue(kiePMMLNameValue); } return commonEvaluate(kiePMMLExpression.evaluate(processingDTO), dataType); }
@Test void evaluateFromApply() { // <DefineFunction name="CUSTOM_FUNCTION" optype="continuous" dataType="double"> // <ParameterField name="PARAM_1"/> // <ParameterField field="PARAM_2"/> // <Apply function="/"> // <FieldRef>PARAM_1</FieldRef> // <FieldRef>PARAM_2</FieldRef> // </Apply> // </DefineFunction> final KiePMMLFieldRef kiePMMLFieldRef1 = new KiePMMLFieldRef(PARAM_1, Collections.emptyList(), null); final KiePMMLFieldRef kiePMMLFieldRef2 = new KiePMMLFieldRef(PARAM_2, Collections.emptyList(), null); final KiePMMLApply kiePMMLApply = KiePMMLApply.builder("NAME", Collections.emptyList(), "/") .withKiePMMLExpressions(Arrays.asList(kiePMMLFieldRef1, kiePMMLFieldRef2)) .build(); final KiePMMLParameterField parameterField1 = KiePMMLParameterField.builder(PARAM_1, Collections.emptyList()).build(); final KiePMMLParameterField parameterField2 = KiePMMLParameterField.builder(PARAM_2, Collections.emptyList()).build(); final KiePMMLDefineFunction defineFunction = new KiePMMLDefineFunction(CUSTOM_FUNCTION, Collections.emptyList(), DATA_TYPE.DOUBLE, OP_TYPE.CONTINUOUS, Arrays.asList(parameterField1, parameterField2), kiePMMLApply); ProcessingDTO processingDTO = getProcessingDTO(new ArrayList<>()); Object retrieved = defineFunction.evaluate(processingDTO, Arrays.asList(value1, value2)); Object expected = value1 / value2; assertThat(retrieved).isEqualTo(expected); }
public Order status(StatusEnum status) { this.status = status; return this; }
@Test public void statusTest() { // TODO: test status }