focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public RelDataType createRelDataTypeFromSchema(Schema schema) { Builder builder = new Builder(this); boolean enableNullHandling = schema.isEnableColumnBasedNullHandling(); for (Map.Entry<String, FieldSpec> entry : schema.getFieldSpecMap().entrySet()) { builder.add(entry.getKey(), toRelDataType(entry.getValue(), enableNullHandling)); } return builder.build(); }
@Test public void testRelDataTypeConversion() { TypeFactory typeFactory = new TypeFactory(); Schema testSchema = new Schema.SchemaBuilder().addSingleValueDimension("INT_COL", FieldSpec.DataType.INT) .addSingleValueDimension("LONG_COL", FieldSpec.DataType.LONG) .addSingleValueDimension("FLOAT_COL", FieldSpec.DataType.FLOAT) .addSingleValueDimension("DOUBLE_COL", FieldSpec.DataType.DOUBLE) .addSingleValueDimension("STRING_COL", FieldSpec.DataType.STRING) .addSingleValueDimension("BYTES_COL", FieldSpec.DataType.BYTES) .addSingleValueDimension("JSON_COL", FieldSpec.DataType.JSON) .addMultiValueDimension("INT_ARRAY_COL", FieldSpec.DataType.INT) .addMultiValueDimension("LONG_ARRAY_COL", FieldSpec.DataType.LONG) .addMultiValueDimension("FLOAT_ARRAY_COL", FieldSpec.DataType.FLOAT) .addMultiValueDimension("DOUBLE_ARRAY_COL", FieldSpec.DataType.DOUBLE) .addMultiValueDimension("STRING_ARRAY_COL", FieldSpec.DataType.STRING) .addMultiValueDimension("BYTES_ARRAY_COL", FieldSpec.DataType.BYTES) .build(); RelDataType relDataTypeFromSchema = typeFactory.createRelDataTypeFromSchema(testSchema); List<RelDataTypeField> fieldList = relDataTypeFromSchema.getFieldList(); for (RelDataTypeField field : fieldList) { switch (field.getName()) { case "INT_COL": BasicSqlType intBasicSqlType = new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.INTEGER); Assert.assertEquals(field.getType(), intBasicSqlType); checkPrecisionScale(field, intBasicSqlType); break; case "LONG_COL": BasicSqlType bigIntBasicSqlType = new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.BIGINT); Assert.assertEquals(field.getType(), bigIntBasicSqlType); checkPrecisionScale(field, bigIntBasicSqlType); break; case "FLOAT_COL": BasicSqlType realBasicSqlType = new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.REAL); Assert.assertEquals(field.getType(), realBasicSqlType); checkPrecisionScale(field, realBasicSqlType); break; case "DOUBLE_COL": BasicSqlType doubleBasicSqlType = new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.DOUBLE); Assert.assertEquals(field.getType(), doubleBasicSqlType); checkPrecisionScale(field, doubleBasicSqlType); break; case "STRING_COL": case "JSON_COL": Assert.assertEquals(field.getType(), TYPE_FACTORY.createTypeWithCharsetAndCollation(new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.VARCHAR), StandardCharsets.UTF_8, SqlCollation.IMPLICIT)); break; case "BYTES_COL": Assert.assertEquals(field.getType(), new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.VARBINARY)); break; case "INT_ARRAY_COL": Assert.assertEquals(field.getType(), new ArraySqlType(new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.INTEGER), false)); break; case "LONG_ARRAY_COL": Assert.assertEquals(field.getType(), new ArraySqlType(new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.BIGINT), false)); break; case "FLOAT_ARRAY_COL": Assert.assertEquals(field.getType(), new ArraySqlType(new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.REAL), false)); break; case "DOUBLE_ARRAY_COL": Assert.assertEquals(field.getType(), new ArraySqlType(new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.DOUBLE), false)); break; case "STRING_ARRAY_COL": Assert.assertEquals(field.getType(), new ArraySqlType( TYPE_FACTORY.createTypeWithCharsetAndCollation(new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.VARCHAR), StandardCharsets.UTF_8, SqlCollation.IMPLICIT), false)); break; case "BYTES_ARRAY_COL": Assert.assertEquals(field.getType(), new ArraySqlType(new BasicSqlType(TypeSystem.INSTANCE, SqlTypeName.VARBINARY), false)); break; default: Assert.fail("Unexpected column name: " + field.getName()); break; } } }
@Override public KubevirtApiConfig removeApiConfig(String endpoint) { checkArgument(!Strings.isNullOrEmpty(endpoint), ERR_NULL_ENDPOINT); KubevirtApiConfig config = configStore.removeApiConfig(endpoint); log.info(String.format(MSG_CONFIG, endpoint, MSG_REMOVED)); return config; }
@Test(expected = IllegalArgumentException.class) public void testRemoveNullConfig() { target.removeApiConfig(null); }
@Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain chain) throws IOException, ServletException { HttpServletRequest request = (HttpServletRequest) servletRequest; HttpServletResponse response = (HttpServletResponse) servletResponse; DBSessions dbSessions = platform.getContainer().getComponentByType(DBSessions.class); ThreadLocalSettings settings = platform.getContainer().getComponentByType(ThreadLocalSettings.class); UserSessionInitializer userSessionInitializer = platform.getContainer().getOptionalComponentByType(UserSessionInitializer.class).orElse(null); LOG.trace("{} serves {}", Thread.currentThread(), request.getRequestURI()); dbSessions.enableCaching(); try { settings.load(); try { doFilter(request, response, chain, userSessionInitializer); } finally { settings.unload(); } } finally { dbSessions.disableCaching(); } }
@Test public void doFilter_enables_and_disables_caching_in_DbSessions() throws Exception { underTest.doFilter(request, response, chain); InOrder inOrder = inOrder(dbSessions); inOrder.verify(dbSessions).enableCaching(); inOrder.verify(dbSessions).disableCaching(); inOrder.verifyNoMoreInteractions(); }
public boolean isValid() throws IOException { if (contractBinary.equals(BIN_NOT_PROVIDED)) { throw new UnsupportedOperationException( "Contract binary not present in contract wrapper, " + "please generate your wrapper using -abiFile=<file>"); } if (contractAddress.equals("")) { throw new UnsupportedOperationException( "Contract binary not present, you will need to regenerate your smart " + "contract wrapper with web3j v2.2.0+"); } EthGetCode ethGetCode = transactionManager.getCode(contractAddress, DefaultBlockParameterName.LATEST); if (ethGetCode.hasError()) { return false; } String code = cleanHexPrefix(ethGetCode.getCode()); int metadataIndex = -1; for (String metadataIndicator : METADATA_HASH_INDICATORS) { metadataIndex = code.indexOf(metadataIndicator); if (metadataIndex != -1) { code = code.substring(0, metadataIndex); break; } } // There may be multiple contracts in the Solidity bytecode, hence we only check for a // match with a subset return !code.isEmpty() && contractBinary.contains(code); }
@Test public void testIsValidSkipMetadataBzzr1() throws Exception { prepareEthGetCode( TEST_CONTRACT_BINARY + "a265627a7a72315820" + "a9bc86938894dc250f6ea25dd823d4472fad6087edcda429a3504e3713a9fc880029"); Contract contract = deployContract(createTransactionReceipt()); assertTrue(contract.isValid()); }
public void setError(int code, String fmt, Object... args) { this.state = new State(code, String.format(String.valueOf(fmt), args)); }
@Test public void testSetError() { Status s = new Status(); s.setError(RaftError.EACCES.getNumber(), "test %s %d", "world", 100); assertEquals(RaftError.EACCES.getNumber(), s.getCode()); assertEquals(RaftError.EACCES, s.getRaftError()); assertEquals("test world 100", s.getErrorMsg()); assertFalse(s.isOk()); }
public int metadataErrorCount() { return this.metadataErrorCount.get(); }
@Test public void testMetadataErrorCount() { MetricsRegistry registry = new MetricsRegistry(); try (ControllerMetadataMetrics metrics = new ControllerMetadataMetrics(Optional.of(registry))) { @SuppressWarnings("unchecked") Gauge<Integer> metadataErrorCount = (Gauge<Integer>) registry .allMetrics() .get(metricName("KafkaController", "MetadataErrorCount")); assertEquals(0, metadataErrorCount.value()); metrics.incrementMetadataErrorCount(); assertEquals(1, metadataErrorCount.value()); } finally { registry.shutdown(); } }
@Override public int onPostRequest(Integer notificationId) { return postNotification(notificationId); }
@Test public void onPostRequest_withValidDataButNoId_postNotifications() throws Exception { // Arrange setUpForegroundApp(); // Act final PushNotification uut = createUUT(); uut.onPostRequest(null); // Assert ArgumentCaptor<Notification> notificationCaptor = ArgumentCaptor.forClass(Notification.class); verify(mNotificationManager).notify(anyInt(), notificationCaptor.capture()); verifyNotification(notificationCaptor.getValue()); // Shouldn't notify an event on an explicit call to notification posting verify(mJsIOHelper, never()).sendEventToJS(eq(NOTIFICATION_RECEIVED_EVENT_NAME), any(Bundle.class), any(ReactContext.class)); }
@Override protected void doUpdate(final List<SelectorData> dataList) { dataList.forEach(pluginDataSubscriber::onSelectorSubscribe); }
@Test public void testDoUpdate() { List<SelectorData> selectorDataList = createFakeSelectorDataObjects(4); selectorDataHandler.doUpdate(selectorDataList); selectorDataList.forEach(verify(subscriber)::onSelectorSubscribe); }
@Override public String toString() { StringBuilder sb = new StringBuilder("{"); addField(sb, "\"componentUuid\": ", this.componentUuid, true); addField(sb, "\"componentKey\": ", this.componentKey, true); addField(sb, "\"componentName\": ", this.componentName, true); addField(sb, "\"qualifier\": ", getQualifier(qualifier), true); addField(sb, "\"description\": ", this.description, true); addField(sb, "\"path\": ", this.path, true); addField(sb, "\"isPrivate\": ", Objects.toString(this.isPrivate, ""), false); addField(sb, "\"isEnabled\": ", Objects.toString(this.isEnabled, ""), false); endString(sb); return sb.toString(); }
@Test void toString_project_uuid_and_name_and_key_and_isPrivate_and_description() { ComponentNewValue newValue = new ComponentNewValue("uuid", true, "name", "key", "description", "TRK"); assertThat(newValue.toString()) .contains("\"componentUuid\": \"uuid\"") .contains("\"componentName\": \"name\"") .contains("\"qualifier\": \"project\"") .contains("\"isPrivate\": true") .contains("\"componentKey\": \"key\"") .contains("\"description\": \"description\""); }
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) { log.info("Starting to validate internal topics {}.", topicConfigs.keySet()); final long now = time.milliseconds(); final long deadline = now + retryTimeoutMs; final ValidationResult validationResult = new ValidationResult(); final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet()); final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet()); while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) { Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap(); if (!topicDescriptionsStillToValidate.isEmpty()) { final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate); descriptionsForTopic = describeTopicsResult.topicNameValues(); } Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap(); if (!topicConfigsStillToValidate.isEmpty()) { final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs( topicConfigsStillToValidate.stream() .map(topic -> new ConfigResource(Type.TOPIC, topic)) .collect(Collectors.toSet()) ); configsForTopic = describeConfigsResult.values().entrySet().stream() .collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue)); } while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { if (!descriptionsForTopic.isEmpty()) { doValidateTopic( validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide) ); } if (!configsForTopic.isEmpty()) { doValidateTopic( validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide) ); } maybeThrowTimeoutException( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs) ); if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) { Utils.sleep(100); } } maybeSleep( Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated" ); } log.info("Completed validation of internal topics {}.", topicConfigs.keySet()); return validationResult; }
@Test public void shouldReportMisconfigurationsOfCleanupPolicyForUnwindowedUnversionedChangelogTopics() { final Map<String, String> unwindowedUnversionedChangelogConfigWithDeleteCleanupPolicy = unwindowedUnversionedChangelogConfig(); unwindowedUnversionedChangelogConfigWithDeleteCleanupPolicy.put( TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE ); setupTopicInMockAdminClient(topic1, unwindowedUnversionedChangelogConfigWithDeleteCleanupPolicy); final Map<String, String> unwindowedUnversionedChangelogConfigWithDeleteCompactCleanupPolicy = unwindowedUnversionedChangelogConfig(); unwindowedUnversionedChangelogConfigWithDeleteCompactCleanupPolicy.put( TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT + "," + TopicConfig.CLEANUP_POLICY_DELETE ); setupTopicInMockAdminClient(topic2, unwindowedUnversionedChangelogConfigWithDeleteCompactCleanupPolicy); setupTopicInMockAdminClient(topic3, unwindowedUnversionedChangelogConfig()); final InternalTopicConfig internalTopicConfig1 = setupUnwindowedUnversionedChangelogTopicConfig(topic1, 1); final InternalTopicConfig internalTopicConfig2 = setupUnwindowedUnversionedChangelogTopicConfig(topic2, 1); final InternalTopicConfig internalTopicConfig3 = setupUnwindowedUnversionedChangelogTopicConfig(topic3, 1); final ValidationResult validationResult = internalTopicManager.validate(mkMap( mkEntry(topic1, internalTopicConfig1), mkEntry(topic2, internalTopicConfig2), mkEntry(topic3, internalTopicConfig3) )); final Map<String, List<String>> misconfigurationsForTopics = validationResult.misconfigurationsForTopics(); assertThat(validationResult.missingTopics(), empty()); assertThat(misconfigurationsForTopics.size(), is(2)); assertThat(misconfigurationsForTopics, hasKey(topic1)); assertThat(misconfigurationsForTopics.get(topic1).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic1).get(0), is("Cleanup policy (" + TopicConfig.CLEANUP_POLICY_CONFIG + ") of existing internal topic " + topic1 + " should not contain \"" + TopicConfig.CLEANUP_POLICY_DELETE + "\".") ); assertThat(misconfigurationsForTopics, hasKey(topic2)); assertThat(misconfigurationsForTopics.get(topic2).size(), is(1)); assertThat( misconfigurationsForTopics.get(topic2).get(0), is("Cleanup policy (" + TopicConfig.CLEANUP_POLICY_CONFIG + ") of existing internal topic " + topic2 + " should not contain \"" + TopicConfig.CLEANUP_POLICY_DELETE + "\".") ); assertThat(misconfigurationsForTopics, not(hasKey(topic3))); }
@Override public HashSlotCursor8byteKey cursor() { return new Cursor(); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testCursor_key_whenDisposed() { HashSlotCursor8byteKey cursor = hsa.cursor(); hsa.dispose(); cursor.key(); }
static ItemThreshold fromConfigEntry(String entry) { if (Strings.isNullOrEmpty(entry)) { return null; } Inequality operator = Inequality.MORE_THAN; int qty = 0; for (int i = entry.length() - 1; i >= 0; i--) { char c = entry.charAt(i); if (c >= '0' && c <= '9' || Character.isWhitespace(c)) { continue; } switch (c) { case '<': operator = Inequality.LESS_THAN; // fallthrough case '>': if (i + 1 < entry.length()) { try { qty = Integer.parseInt(entry.substring(i + 1).trim()); } catch (NumberFormatException e) { qty = 0; operator = Inequality.MORE_THAN; } entry = entry.substring(0, i); } } break; } return new ItemThreshold(entry.trim(), qty, operator); }
@Test public void test() { Assert.assertEquals(ItemThreshold.fromConfigEntry("Dharok's platebody 100"), new ItemThreshold("Dharok's platebody 100", 0, MORE_THAN)); Assert.assertEquals(ItemThreshold.fromConfigEntry("Dharok's platebody 100<100"), new ItemThreshold("Dharok's platebody 100", 100, LESS_THAN)); Assert.assertEquals(ItemThreshold.fromConfigEntry("Dharok's platebody > 100"), new ItemThreshold("Dharok's platebody", 100, MORE_THAN)); Assert.assertEquals(ItemThreshold.fromConfigEntry("Dharok's platebody < 10 0"), new ItemThreshold("Dharok's platebody", 0, MORE_THAN)); }
@Override public boolean isPasswordConfigurable() { // Only provide Project ID or Number return false; }
@Test public void testPassword() { assertFalse(new GoogleStorageProtocol().isPasswordConfigurable()); }
private boolean isBurstSimilar(long onosBurst, long deviceBurst) { // Rundown removing the decimal part long lowerEnd = (onosBurst / BURST_LOWER_DIVIDER) * BURST_MULTIPLIER; long upperEnd = (onosBurst / BURST_UPPER_DIVIDER) * BURST_MULTIPLIER; if (log.isDebugEnabled()) { log.debug("isBurstSimilar {} in [{}, {}]", deviceBurst, lowerEnd, upperEnd); } return deviceBurst >= lowerEnd && deviceBurst <= upperEnd; }
@Test public void testIsBurstSimilar() { PiMeterBand onosMeterBand; PiMeterBand deviceMeterBand; PiMeterCellConfig onosMeter; PiMeterCellConfig deviceMeter; for (Map.Entry<Long, Long> entry : BURSTS.entrySet()) { onosMeterBand = new PiMeterBand(PiMeterBandType.COMMITTED, 0, entry.getKey()); deviceMeterBand = new PiMeterBand(PiMeterBandType.COMMITTED, 0, entry.getValue()); onosMeter = PiMeterCellConfig.builder() .withMeterCellId(meterCellId) .withMeterBand(onosMeterBand) .withMeterBand(new PiMeterBand(PiMeterBandType.PEAK, 0, 0)) .build(); deviceMeter = PiMeterCellConfig.builder() .withMeterCellId(meterCellId) .withMeterBand(deviceMeterBand) .withMeterBand(new PiMeterBand(PiMeterBandType.PEAK, 0, 0)) .build(); assertTrue(meterProgrammable.isSimilar(onosMeter, deviceMeter)); } }
public static void initRequestEntity(HttpRequestBase requestBase, Object body, Header header) throws Exception { if (body == null) { return; } if (requestBase instanceof HttpEntityEnclosingRequest) { HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestBase; MediaType mediaType = MediaType.valueOf(header.getValue(HttpHeaderConsts.CONTENT_TYPE)); ContentType contentType = ContentType.create(mediaType.getType(), mediaType.getCharset()); HttpEntity entity; if (body instanceof byte[]) { entity = new ByteArrayEntity((byte[]) body, contentType); } else { entity = new StringEntity(body instanceof String ? (String) body : JacksonUtils.toJson(body), contentType); } request.setEntity(entity); } }
@Test void testInitRequestEntity5() throws Exception { HttpDelete httpDelete = new HttpDelete(""); HttpUtils.initRequestEntity(httpDelete, null, null); // nothing change assertEquals(new HttpDelete("").getMethod(), httpDelete.getMethod()); assertArrayEquals(new HttpDelete("").getAllHeaders(), httpDelete.getAllHeaders()); }
public static String[] extractTraitsFromProperties(Properties properties) { if (properties != null && !properties.isEmpty()) { Stream<String> propertyTraits = properties.entrySet().stream() .filter(property -> property.getKey().toString().startsWith("camel.jbang.trait")) .map(property -> StringHelper.after(property.getKey().toString(), "camel.jbang.trait.") + "=" + properties.get(property.getKey()).toString()); return propertyTraits.collect(Collectors.toSet()).toArray(String[]::new); } return new String[0]; }
@Test public void extractTraitsFromPropertiesTest() { Properties properties = new Properties(); properties.setProperty("camel.jbang.trait.container.port", "8080"); properties.setProperty("camel.jbang.trait.container.port-name", "custom"); properties.setProperty("camel.jbang.name", "MyRoute"); String[] result = TraitHelper.extractTraitsFromProperties(properties); Assertions.assertNotNull(result); Assertions.assertEquals(2, result.length); Assertions.assertTrue(Arrays.asList(result).contains("container.port-name=custom")); String[] resultEmpty = TraitHelper.extractTraitsFromProperties(new Properties()); Assertions.assertNotNull(resultEmpty); String[] resultNull = TraitHelper.extractTraitsFromProperties(null); Assertions.assertNotNull(resultNull); }
public void validateUrl(String serverUrl) { HttpUrl url = buildUrl(serverUrl, "/rest/api/1.0/repos"); doGet("", url, body -> buildGson().fromJson(body, RepositoryList.class)); }
@Test public void validate_url_fail_when_not_starting_with_protocol() { assertThatThrownBy(() -> underTest.validateUrl("any_url_not_starting_with_http.com")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("url must start with http:// or https://"); }
public List<String> prefixSearch(String key) { List<String> value = new ArrayList<String>(); if (StringUtil.isEmpty(key)) { return value; } char k = key.charAt(0); int index; if (Character.isUpperCase(k)) { index = k - UPPERCASE_STAR; } else { index = k - LOWERCASE_STAR; } if (root.children != null && root.children[index] != null) { return query(root.children[index], value, key.substring(1), String.valueOf(k)); } return value; }
@Test public void prefixSearch() throws Exception { TrieTree trieTree = new TrieTree(); trieTree.insert("abc"); trieTree.insert("abd"); trieTree.insert("ABe"); List<String> ab = trieTree.prefixSearch("AB"); for (String s : ab) { System.out.println(s); } System.out.println("========"); //char[] chars = new char[3] ; //for (int i = 0; i < 3; i++) { // int a = 97 + i ; // chars[i] = (char) a ; //} // //String s = String.valueOf(chars); //System.out.println(s); }
public List<String> warnings(Flow flow) { if (flow == null) { return Collections.emptyList(); } List<String> warnings = new ArrayList<>(); if (flow.getNamespace() != null && flow.getNamespace().equals(systemFlowNamespace)) { warnings.add("The system namespace is reserved for background workflows intended to perform routine tasks such as sending alerts and purging logs. Please use another namespace name."); } List<AbstractTrigger> triggers = flow.getTriggers(); if ( triggers != null && triggers.stream().anyMatch(trigger -> { if (trigger instanceof io.kestra.plugin.core.trigger.Flow flowTrigger) { return Optional.ofNullable(flowTrigger.getConditions()).map(List::isEmpty).orElse(true); } return false; }) ) { warnings.add("This flow will be triggered for EVERY execution of EVERY flow on your instance. We recommend adding the conditions property to the Flow trigger."); } return warnings; }
@Test void warnings() { Flow flow = create("test", "test", 1).toBuilder() .namespace("system") .triggers(List.of( io.kestra.plugin.core.trigger.Flow.builder() .id("flow-trigger") .type(io.kestra.plugin.core.trigger.Flow.class.getName()) .build() )) .build(); List<String> warnings = flowService.warnings(flow); assertThat(warnings.size(), is(2)); assertThat(warnings, containsInAnyOrder( "The system namespace is reserved for background workflows intended to perform routine tasks such as sending alerts and purging logs. Please use another namespace name.", "This flow will be triggered for EVERY execution of EVERY flow on your instance. We recommend adding the conditions property to the Flow trigger." )); }
@Override public double d(double[] x, double[] y) { if (x.length != sigma.nrow()) { throw new IllegalArgumentException(String.format("Array x[%d] has different dimension with Sigma[%d][%d].", x.length, sigma.nrow(), sigma.ncol())); } if (y.length != sigma.nrow()) { throw new IllegalArgumentException(String.format("Array y[%d] has different dimension with Sigma[%d][%d].", y.length, sigma.nrow(), sigma.ncol())); } int n = x.length; double[] z = new double[n]; for (int i = 0; i < n; i++) z[i] = x[i] - y[i]; double dist = sigmaInv.xAx(z); return Math.sqrt(dist); }
@Test public void testDistance() { System.out.println("distance"); double[] x = {1.2793, -0.1029, -1.5852}; double[] y = {-0.2676, -0.1717, -1.8695}; MahalanobisDistance instance = new MahalanobisDistance(sigma); assertEquals(2.703861, instance.d(x, y), 1E-6); }
@Override public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0; final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); // Check every broker and select loadData.getBrokerData().forEach((broker, brokerData) -> { final LocalBrokerData localData = brokerData.getLocalData(); final double currentUsage = localData.getMaxResourceUsageWithWeight( conf.getLoadBalancerCPUResourceWeight(), conf.getLoadBalancerDirectMemoryResourceWeight(), conf.getLoadBalancerBandwidthInResourceWeight(), conf.getLoadBalancerBandwidthOutResourceWeight()); if (currentUsage < overloadThreshold) { if (log.isDebugEnabled()) { log.debug("[{}] Broker is not overloaded, ignoring at this point ({})", broker, localData.printResourceUsage()); } return; } // We want to offload enough traffic such that this broker will go below the overload threshold // Also, add a small margin so that this broker won't be very close to the threshold edge. double percentOfTrafficToOffload = currentUsage - overloadThreshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN; double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut(); double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload; log.info( "Attempting to shed load on {}, which has resource usage {}% above threshold {}%" + " -- Offloading at least {} MByte/s of traffic ({})", broker, 100 * currentUsage, 100 * overloadThreshold, minimumThroughputToOffload / 1024 / 1024, localData.printResourceUsage()); MutableDouble trafficMarkedToOffload = new MutableDouble(0); MutableBoolean atLeastOneBundleSelected = new MutableBoolean(false); if (localData.getBundles().size() > 1) { // Sort bundles by throughput, then pick the biggest N which combined // make up for at least the minimum throughput to offload loadData.getBundleDataForLoadShedding().entrySet().stream() .filter(e -> localData.getBundles().contains(e.getKey())) .map((e) -> { // Map to throughput value // Consider short-term byte rate to address system resource burden String bundle = e.getKey(); BundleData bundleData = e.getValue(); TimeAverageMessageData shortTermData = bundleData.getShortTermData(); double throughput = shortTermData.getMsgThroughputIn() + shortTermData .getMsgThroughputOut(); return Pair.of(bundle, throughput); }).filter(e -> { // Only consider bundles that were not already unloaded recently return !recentlyUnloadedBundles.containsKey(e.getLeft()); }).sorted((e1, e2) -> { // Sort by throughput in reverse order return Double.compare(e2.getRight(), e1.getRight()); }).forEach(e -> { if (trafficMarkedToOffload.doubleValue() < minimumThroughputToOffload || atLeastOneBundleSelected.isFalse()) { selectedBundlesCache.put(broker, e.getLeft()); trafficMarkedToOffload.add(e.getRight()); atLeastOneBundleSelected.setTrue(); } }); } else if (localData.getBundles().size() == 1) { log.warn( "HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. " + "No Load Shedding will be done on this broker", localData.getBundles().iterator().next(), broker); } else { log.warn("Broker {} is overloaded despite having no bundles", broker); } }); return selectedBundlesCache; }
@Test public void testBrokersWithNoBundles() { LoadData loadData = new LoadData(); LocalBrokerData broker1 = new LocalBrokerData(); broker1.setBandwidthIn(new ResourceUsage(999, 1000)); broker1.setBandwidthOut(new ResourceUsage(999, 1000)); loadData.getBrokerData().put("broker-1", new BrokerData(broker1)); assertTrue(os.findBundlesForUnloading(loadData, conf).isEmpty()); }
@Override public void checkBeforeUpdate(final LoadSingleTableStatement sqlStatement) { checkStorageUnits(sqlStatement); String defaultSchemaName = new DatabaseTypeRegistry(database.getProtocolType()).getDefaultSchemaName(database.getName()); checkDuplicatedTables(sqlStatement, defaultSchemaName); checkActualTableExist(sqlStatement, defaultSchemaName); }
@Test void assertCheckWithDuplicatedTables() { when(database.getName()).thenReturn("foo_db"); when(schema.containsTable("foo")).thenReturn(true); when(database.getResourceMetaData().getNotExistedDataSources(any())).thenReturn(Collections.emptyList()); LoadSingleTableStatement sqlStatement = new LoadSingleTableStatement(Collections.singleton(new SingleTableSegment("ds_0", null, "foo"))); executor.setDatabase(database); assertThrows(TableExistsException.class, () -> executor.checkBeforeUpdate(sqlStatement)); }
@Override public void validateJoinRequest(JoinMessage joinMessage) { // check joining member's major.minor version is same as current cluster version's major.minor numbers MemberVersion memberVersion = joinMessage.getMemberVersion(); Version clusterVersion = node.getClusterService().getClusterVersion(); if (!memberVersion.asVersion().equals(clusterVersion)) { String msg = "Joining node's version " + memberVersion + " is not compatible with cluster version " + clusterVersion; if (clusterVersion.getMajor() != memberVersion.getMajor()) { msg += " (Rolling Member Upgrades are only supported for the same major version)"; } if (clusterVersion.getMinor() > memberVersion.getMinor()) { msg += " (Rolling Member Upgrades are only supported for the next minor version)"; } if (!BuildInfoProvider.getBuildInfo().isEnterprise()) { msg += " (Rolling Member Upgrades are only supported in Hazelcast Enterprise)"; } throw new VersionMismatchException(msg); } }
@Test public void test_joinRequestFails_whenNextMajorVersion() { MemberVersion nextMajorVersion = MemberVersion.of(nodeVersion.getMajor() + 1, nodeVersion.getMinor(), nodeVersion.getPatch()); JoinRequest joinRequest = new JoinRequest(Packet.VERSION, buildNumber, nextMajorVersion, joinAddress, newUnsecureUUID(), false, null, null, null, null, null); assertThatThrownBy(() -> nodeExtension.validateJoinRequest(joinRequest)) .isInstanceOf(VersionMismatchException.class) .hasMessageContaining("Rolling Member Upgrades are only supported in Hazelcast Enterprise") .hasMessageContaining("Rolling Member Upgrades are only supported for the same major version"); }
public static <T, IdT> WithRepresentativeValues<T, IdT> withRepresentativeValueFn( SerializableFunction<T, IdT> fn) { return new WithRepresentativeValues<>(fn, null); }
@Test @Category(NeedsRunner.class) public void testDistinctWithRepresentativeValue() { List<KV<String, String>> strings = Arrays.asList(KV.of("k1", "v1"), KV.of("k1", "v2"), KV.of("k2", "v1")); PCollection<KV<String, String>> input = p.apply(Create.of(strings)); PCollection<KV<String, String>> output = input.apply( Distinct.withRepresentativeValueFn(new Keys<String>()) .withRepresentativeType(TypeDescriptor.of(String.class))); PAssert.that(output).satisfies(new Checker()); p.run(); }
@Nullable public static Type fromArrayOpcode(int opcode) { return switch (opcode) { case Opcodes.ARRAYLENGTH, Opcodes.BALOAD, Opcodes.CALOAD, Opcodes.SALOAD, Opcodes.IALOAD, Opcodes.BASTORE, Opcodes.CASTORE, Opcodes.SASTORE, Opcodes.IASTORE -> Type.INT_TYPE; case Opcodes.AALOAD, Opcodes.AASTORE -> Types.OBJECT_TYPE; case Opcodes.FALOAD, Opcodes.FASTORE -> Type.FLOAT_TYPE; case Opcodes.DALOAD, Opcodes.DASTORE -> Type.DOUBLE_TYPE; case Opcodes.LALOAD, Opcodes.LASTORE -> Type.LONG_TYPE; default -> null; }; }
@Test void testFromArrayOpcode() { int[] ints = {Opcodes.ARRAYLENGTH, Opcodes.BALOAD, Opcodes.CALOAD, Opcodes.SALOAD, Opcodes.IALOAD, Opcodes.BASTORE, Opcodes.CASTORE, Opcodes.SASTORE, Opcodes.IASTORE}; int[] floats = {Opcodes.FALOAD, Opcodes.FASTORE}; int[] doubles = {Opcodes.DALOAD, Opcodes.DASTORE}; int[] longs = {Opcodes.LALOAD, Opcodes.LASTORE}; int[] objects = {Opcodes.AALOAD, Opcodes.AASTORE}; for (int v : ints) assertSame(Type.INT_TYPE, Types.fromArrayOpcode(v)); for (int v : floats) assertSame(Type.FLOAT_TYPE, Types.fromArrayOpcode(v)); for (int v : doubles) assertSame(Type.DOUBLE_TYPE, Types.fromArrayOpcode(v)); for (int v : longs) assertSame(Type.LONG_TYPE, Types.fromArrayOpcode(v)); for (int v : objects) assertSame(Types.OBJECT_TYPE, Types.fromArrayOpcode(v)); assertNull(Types.fromArrayOpcode(-1)); }
@VisibleForTesting long getDecayPeriodMillis() { return decayPeriodMillis; }
@Test @SuppressWarnings("deprecation") public void testParsePeriodWithPortLessIdentityProvider() { // By default scheduler = new DecayRpcScheduler(1, "ipc.50", new Configuration()); assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_PERIOD_DEFAULT, scheduler.getDecayPeriodMillis()); // Custom Configuration conf = new Configuration(); conf.setLong("ipc.51." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_PERIOD_KEY, 1058); conf.unset("ipc.51." + CommonConfigurationKeys.IPC_IDENTITY_PROVIDER_KEY); conf.set("ipc." + CommonConfigurationKeys.IPC_IDENTITY_PROVIDER_KEY, "org.apache.hadoop.ipc.TestDecayRpcScheduler$TestIdentityProvider"); scheduler = new DecayRpcScheduler(1, "ipc.51", conf); assertEquals(1058L, scheduler.getDecayPeriodMillis()); }
@Override public K8sPort port(String portId) { checkArgument(!Strings.isNullOrEmpty(portId), ERR_NULL_PORT_ID); return k8sNetworkStore.port(portId); }
@Test public void testGetPortById() { createBasicNetworks(); assertNotNull("Port did not match", target.port(PORT_ID)); assertNull("Port did not match", target.port(UNKNOWN_ID)); }
public float asFloat() { checkState(type == Type.FLOAT, "Value is not a float"); return Float.parseFloat(value); }
@Test public void asFloat() { ConfigProperty p = defineProperty("foo", FLOAT, "123.0", "Foo Prop"); validate(p, "foo", FLOAT, "123.0", "123.0"); assertEquals("incorrect value", 123.0, p.asFloat(), 0.01); assertEquals("incorrect value", 123.0, p.asDouble(), 0.01); }
@Override public AtomicValue<Long> get() throws Exception { return new AtomicLong(value.get()); }
@Test public void testCorruptedValue() throws Exception { final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start(); try { client.create().forPath("/counter", "foo".getBytes()); DistributedAtomicLong dal = new DistributedAtomicLong(client, "/counter", new RetryOneTime(1)); try { dal.get().postValue(); } catch (BufferUnderflowException e) { fail("", e); } catch (BufferOverflowException e) { fail("", e); } catch (RuntimeException e) { // correct } } finally { client.close(); } }
public static String fix(final String raw) { if ( raw == null || "".equals( raw.trim() )) { return raw; } MacroProcessor macroProcessor = new MacroProcessor(); macroProcessor.setMacros( macros ); return macroProcessor.parse( raw ); }
@Test public void testMultipleMatches() { String result = KnowledgeHelperFixerTest.fixer.fix( "update(myObject); update(myObject );" ); assertEqualsIgnoreWhitespace( "drools.update(myObject); drools.update(myObject );", result ); result = KnowledgeHelperFixerTest.fixer.fix( "xxx update(myObject ); update( myObject ); update( yourObject ); yyy" ); assertEqualsIgnoreWhitespace( "xxx drools.update(myObject ); drools.update( myObject ); drools.update( yourObject ); yyy", result ); }
public static void dump(Object object, Writer writer) { final DumperOptions options = new DumperOptions(); options.setIndent(2); options.setPrettyFlow(true); options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); dump(object, writer, options); }
@Test @Disabled public void dumpTest() { final Dict dict = Dict.create() .set("name", "hutool") .set("count", 1000); YamlUtil.dump( dict , FileUtil.getWriter("d:/test/dump.yaml", CharsetUtil.CHARSET_UTF_8, false)); }
@Override public void filter(DeviceId deviceId, FilteringObjective filteringObjective) { executorService.execute(new ObjectiveInstaller(deviceId, filteringObjective)); }
@Test public void filteringObjective() { TrafficTreatment treatment = DefaultTrafficTreatment.emptyTreatment(); FilteringObjective filter = DefaultFilteringObjective.builder() .fromApp(NetTestTools.APP_ID) .withMeta(treatment) .makePermanent() .deny() .addCondition(Criteria.matchEthType(12)) .add(new ObjectiveContext() { @Override public void onSuccess(Objective objective) { assertEquals("1 flowrule entry expected", 1, flowRuleStore.getFlowRuleCount(vnet1.id())); assertEquals("0 flowrule entry expected", 0, flowRuleStore.getFlowRuleCount(vnet2.id())); } }); service1.filter(VDID1, filter); }
@VisibleForTesting static boolean isProgressFooterEnabled(MavenSession session) { if (!session.getRequest().isInteractiveMode()) { return false; } if ("plain".equals(System.getProperty(PropertyNames.CONSOLE))) { return false; } // Enables progress footer when ANSI is supported (Windows or System.console() not null and TERM // not 'dumb'). if (Os.isFamily(Os.FAMILY_WINDOWS)) { return true; } return System.console() != null && !"dumb".equals(System.getenv("TERM")); }
@Test public void isProgressFooterEnabled() { when(mockMavenRequest.isInteractiveMode()).thenReturn(false); assertThat(MavenProjectProperties.isProgressFooterEnabled(mockMavenSession)).isFalse(); }
public static <T> PTransform<PCollection<T>, PCollection<T>> intersectDistinct( PCollection<T> rightCollection) { checkNotNull(rightCollection, "rightCollection argument is null"); return new SetImpl<>(rightCollection, intersectDistinct()); }
@Test @Category(NeedsRunner.class) public void testIntersectionCollectionList() { PCollection<String> third = p.apply("third", Create.of(Arrays.asList("b", "b", "c", "f"))); PCollection<Row> thirdRows = p.apply("thirdRows", Create.of(toRows("b", "b", "c", "f"))); PAssert.that( PCollectionList.of(first) .and(second) .and(third) .apply("stringsCols", Sets.intersectDistinct())) .containsInAnyOrder("b", "c"); PCollection<Row> results = PCollectionList.of(firstRows) .and(secondRows) .and(thirdRows) .apply("rowCols", Sets.intersectDistinct()); PAssert.that(results).containsInAnyOrder(toRows("b", "c")); assertEquals(schema, results.getSchema()); p.run(); }
public static Serializable decode(final ByteBuf byteBuf) { int valueType = byteBuf.readUnsignedByte() & 0xff; StringBuilder result = new StringBuilder(); decodeValue(valueType, 1, byteBuf, result); return result.toString(); }
@Test void assertDecodeSmallJsonArray() { List<JsonEntry> jsonEntries = new LinkedList<>(); jsonEntries.add(new JsonEntry(JsonValueTypes.INT16, null, 0x00007fff)); jsonEntries.add(new JsonEntry(JsonValueTypes.INT16, null, 0x00008000)); ByteBuf payload = mockJsonArrayByteBuf(jsonEntries, true); String actual = (String) MySQLJsonValueDecoder.decode(payload); assertThat(actual, is("[32767,-32768]")); }
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) { if (list == null) { return FEELFnResult.ofResult(true); } boolean result = true; for (final Object element : list) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" + " a Boolean")); } else { if (element != null) { result &= (Boolean) element; } } } return FEELFnResult.ofResult(result); }
@Test void invokeArrayParamReturnFalse() { FunctionTestUtil.assertResult(nnAllFunction.invoke(new Object[]{Boolean.TRUE, Boolean.FALSE}), false); FunctionTestUtil.assertResult(nnAllFunction.invoke(new Object[]{Boolean.TRUE, null, Boolean.FALSE}), false); }
void commitClusterState(ClusterStateChange newState, Address initiator, UUID txnId) { commitClusterState(newState, initiator, txnId, false); }
@Test(expected = NullPointerException.class) public void test_changeLocalClusterState_nullTransactionId() throws Exception { clusterStateManager.commitClusterState(ClusterStateChange.from(FROZEN), newAddress(), null); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testUpgradeWithKafkaVersionOnly(VertxTestContext context) { String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; String kafkaVersion = VERSIONS.defaultVersion().version(); VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(kafkaVersion, null, null), mockNewCluster( null, mockSps(oldKafkaVersion), mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion)); assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion)); async.flag(); }))); }
@Override public Object clone() { try { BaseFileInputFiles cloned = (BaseFileInputFiles) super.clone(); cloned.fileName = Arrays.copyOf( fileName, fileName.length ); cloned.fileMask = Arrays.copyOf( fileMask, fileMask.length ); cloned.excludeFileMask = Arrays.copyOf( excludeFileMask, excludeFileMask.length ); cloned.fileRequired = Arrays.copyOf( fileRequired, fileRequired.length ); cloned.includeSubFolders = Arrays.copyOf( includeSubFolders, includeSubFolders.length ); return cloned; } catch ( CloneNotSupportedException ex ) { throw new IllegalArgumentException( "Clone not supported for " + this.getClass().getName() ); } }
@Test public void testClone() { BaseFileInputFiles orig = new BaseFileInputFiles(); orig.fileName = new String[] { "1", "2" }; orig.fileMask = new String[] { "3", "4" }; orig.excludeFileMask = new String[] { "5", "6" }; orig.fileRequired = new String[] { "7", "8" }; orig.includeSubFolders = new String[] { "9", "0" }; BaseFileInputFiles clone = (BaseFileInputFiles) orig.clone(); assertNotEquals( orig.fileName, clone.fileName ); assertTrue( Arrays.equals( orig.fileName, clone.fileName ) ); assertNotEquals( orig.fileMask, clone.fileMask ); assertTrue( Arrays.equals( orig.fileMask, clone.fileMask ) ); assertNotEquals( orig.excludeFileMask, clone.excludeFileMask ); assertTrue( Arrays.equals( orig.excludeFileMask, clone.excludeFileMask ) ); assertNotEquals( orig.fileRequired, clone.fileRequired ); assertTrue( Arrays.equals( orig.fileRequired, clone.fileRequired ) ); assertNotEquals( orig.includeSubFolders, clone.includeSubFolders ); assertTrue( Arrays.equals( orig.includeSubFolders, clone.includeSubFolders ) ); }
@Override public Collection<RequestAndKeys<CoordinatorKey>> buildRequest(int coordinatorId, Set<CoordinatorKey> keys) { Set<CoordinatorKey> newConsumerGroupKeys = new HashSet<>(); Set<CoordinatorKey> oldConsumerGroupKeys = new HashSet<>(); List<String> newConsumerGroupIds = new ArrayList<>(); List<String> oldConsumerGroupIds = new ArrayList<>(); keys.forEach(key -> { if (key.type != FindCoordinatorRequest.CoordinatorType.GROUP) { throw new IllegalArgumentException("Invalid group coordinator key " + key + " when building `DescribeGroups` request"); } // By default, we always try using the new consumer group describe API. // If it fails, we fail back to using the classic group API. if (useClassicGroupApi.contains(key.idValue)) { oldConsumerGroupKeys.add(key); oldConsumerGroupIds.add(key.idValue); } else { newConsumerGroupKeys.add(key); newConsumerGroupIds.add(key.idValue); } }); List<RequestAndKeys<CoordinatorKey>> requests = new ArrayList<>(); if (!newConsumerGroupKeys.isEmpty()) { ConsumerGroupDescribeRequestData data = new ConsumerGroupDescribeRequestData() .setGroupIds(newConsumerGroupIds) .setIncludeAuthorizedOperations(includeAuthorizedOperations); requests.add(new RequestAndKeys<>(new ConsumerGroupDescribeRequest.Builder(data), newConsumerGroupKeys)); } if (!oldConsumerGroupKeys.isEmpty()) { DescribeGroupsRequestData data = new DescribeGroupsRequestData() .setGroups(oldConsumerGroupIds) .setIncludeAuthorizedOperations(includeAuthorizedOperations); requests.add(new RequestAndKeys<>(new DescribeGroupsRequest.Builder(data), oldConsumerGroupKeys)); } return requests; }
@Test public void testInvalidBuildRequest() { DescribeConsumerGroupsHandler handler = new DescribeConsumerGroupsHandler(false, logContext); assertThrows(IllegalArgumentException.class, () -> handler.buildRequest(1, singleton(CoordinatorKey.byTransactionalId("tId")))); }
@Override public Map<String, List<V>> pollFirstFromAny(Duration duration, int count, String... queueNames) throws InterruptedException { return commandExecutor.getInterrupted(pollFirstFromAnyAsync(duration, count, queueNames)); }
@Test public void testPollFirstFromAny() throws InterruptedException { RBlockingQueue<Integer> queue1 = redisson.getBlockingQueue("queue:pollany"); RBlockingQueue<Integer> queue2 = redisson.getBlockingQueue("queue:pollany1"); RBlockingQueue<Integer> queue3 = redisson.getBlockingQueue("queue:pollany2"); Assertions.assertDoesNotThrow(() -> { queue3.put(1); queue3.put(2); queue3.put(3); queue1.put(4); queue1.put(5); queue1.put(6); queue2.put(7); queue2.put(8); queue2.put(9); }); Map<String, List<Integer>> res = queue1.pollFirstFromAny(Duration.ofSeconds(4), 2, "queue:pollany1", "queue:pollany2"); assertThat(res.get("queue:pollany")).containsExactly(4, 5); queue1.clear(); Map<String, List<Integer>> res2 = queue1.pollFirstFromAny(Duration.ofSeconds(4), 2); assertThat(res2).isNull(); }
@Override public boolean confirm(String key) { return cache.asMap().containsKey(key); }
@Test void testConfirm() { // add first key and confirm assertTrue(repo.add(key01)); assertTrue(repo.confirm(key01)); // try to confirm a key that isn't there assertFalse(repo.confirm(key02)); }
@Override public double cdf(double x) { if (x < 0) { return 0.0; } else { return Gamma.regularizedIncompleteGamma(k, x / theta); } }
@Test public void testCdf() { System.out.println("cdf"); GammaDistribution instance = new GammaDistribution(3, 2.1); instance.rand(); assertEquals(0.0, instance.cdf(-0.1), 1E-7); assertEquals(0.0, instance.cdf(0.0), 1E-7); assertEquals(0.01264681, instance.cdf(1.0), 1E-7); assertEquals(0.07175418, instance.cdf(2.0), 1E-7); assertEquals(0.1734485, instance.cdf(3.0), 1E-7); assertEquals(0.2975654, instance.cdf(4.0), 1E-7); assertEquals(0.8538087, instance.cdf(10.0), 1E-7); assertEquals(0.995916, instance.cdf(20.0), 1E-7); assertEquals(0.9999267, instance.cdf(30.0), 1E-7); }
private <T> T accept(Expression<T> expr) { return expr.accept(this); }
@Test public void testEqual() throws Exception { assertThat(Expr.Equal.create( Expr.NumberValue.create(2), Expr.NumberValue.create(2) ).accept(new BooleanNumberConditionsVisitor())) .isTrue(); assertThat(Expr.Equal.create( Expr.NumberValue.create(1), Expr.NumberValue.create(2) ).accept(new BooleanNumberConditionsVisitor())) .isFalse(); assertThat(Expr.Equal.create( Expr.NumberValue.create(2), Expr.NumberValue.create(1) ).accept(new BooleanNumberConditionsVisitor())) .isFalse(); assertThat(loadCondition("condition-equal.json").accept(new BooleanNumberConditionsVisitor())) .isTrue(); }
@Override public AppResponse process(Flow flow, MijnDigidSessionRequest request) throws FlowNotDefinedException, IOException, NoSuchAlgorithmException, SharedServiceClientException { appSession = appSessionService.getSession(request.getMijnDigidSessionId()); appAuthenticator = appAuthenticatorService.findByUserAppId(appSession.getUserAppId()); checkSwitchesEnabled(); digidClient.remoteLog("1468", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName(), lowerUnderscore(HUMAN_PROCESS), "get_notifications", lowerUnderscore(APP_CODE), appAuthenticator.getAppCode())); if (!isAppSessionAuthenticated(appSession) || !isAppAuthenticatorActivated(appAuthenticator)){ return new NokResponse("no_session"); } return nsClient.getNotifications(appAuthenticator.getAccountId()); }
@Test public void notAuthenticatedTest() throws FlowNotDefinedException, SharedServiceClientException, IOException, NoSuchAlgorithmException { //given mockedAppSession.setState("NOT-AUTHENTICATED"); when(appSessionService.getSession(any())).thenReturn(mockedAppSession); when(appAuthenticatorService.findByUserAppId(any())).thenReturn(mockedAppAuthenticator); when(switchService.digidAppSwitchEnabled()).thenReturn(true); //when AppResponse appResponse = notificationsGet.process(mockedFlow, mockedRequest); //then assertTrue(appResponse instanceof NokResponse); assertEquals("no_session", ((NokResponse) appResponse).getError()); }
@Override public WindowStoreIterator<V> backwardFetch(final K key, final long timeFrom, final long timeTo) { Objects.requireNonNull(key, "key cannot be null"); return new MeteredWindowStoreIterator<>( wrapped().backwardFetch(keyBytes(key), timeFrom, timeTo), fetchSensor, iteratorDurationSensor, streamsMetrics, serdes::valueFrom, time, numOpenIterators, openIterators ); }
@Test public void shouldThrowNullPointerOnBackwardFetchIfKeyIsNull() { assertThrows(NullPointerException.class, () -> store.backwardFetch(null, 0L, 1L)); }
public static Map<UUID, PartitionIdSet> createPartitionMap( NodeEngine nodeEngine, @Nullable MemberVersion localMemberVersion, boolean failOnUnassignedPartition ) { Collection<Partition> parts = nodeEngine.getHazelcastInstance().getPartitionService().getPartitions(); int partCnt = parts.size(); Map<UUID, PartitionIdSet> partMap = new LinkedHashMap<>(); for (Partition part : parts) { Member owner = part.getOwner(); if (owner == null) { if (failOnUnassignedPartition) { throw QueryException.error( SqlErrorCode.PARTITION_DISTRIBUTION, "Partition is not assigned to any member: " + part.getPartitionId() ); } else { continue; } } if (localMemberVersion != null) { if (!localMemberVersion.equals(owner.getVersion())) { UUID localMemberId = nodeEngine.getLocalMember().getUuid(); throw QueryException.error("Cannot execute SQL query when members have different versions " + "(make sure that all members have the same version) {localMemberId=" + localMemberId + ", localMemberVersion=" + localMemberVersion + ", remoteMemberId=" + owner.getUuid() + ", remoteMemberVersion=" + owner.getVersion() + "}"); } } partMap.computeIfAbsent(owner.getUuid(), (key) -> new PartitionIdSet(partCnt)).add(part.getPartitionId()); } return partMap; }
@Test public void testVersionMismatch() { HazelcastInstance member = factory.newHazelcastInstance(); NodeEngine nodeEngine = Accessors.getNodeEngineImpl(member); String memberId = nodeEngine.getLocalMember().getUuid().toString(); String memberVersion = nodeEngine.getLocalMember().getVersion().toString(); try { QueryUtils.createPartitionMap(nodeEngine, new MemberVersion(0, 0, 0), false); fail("Must fail"); } catch (QueryException e) { assertEquals(SqlErrorCode.GENERIC, e.getCode()); assertEquals("Cannot execute SQL query when members have different versions (make sure that all members " + "have the same version) {localMemberId=" + memberId + ", localMemberVersion=0.0.0, remoteMemberId=" + memberId + ", remoteMemberVersion=" + memberVersion + "}", e.getMessage()); } }
@Override public <K, V> Predicate<K, V> optimize(Predicate<K, V> predicate, IndexRegistry indexes) { return predicate; }
@Test public void optimize_returnsOriginalPredicate() { EmptyOptimizer emptyOptimizer = new EmptyOptimizer(); Predicate predicate = mock(Predicate.class); IndexRegistry indexes = mock(IndexRegistry.class); Predicate result = emptyOptimizer.optimize(predicate, indexes); assertSame(predicate, result); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public HistoryInfo get() { return getHistoryInfo(); }
@Test public void testHSXML() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") .accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8, response.getType().toString()); String xml = response.getEntity(String.class); verifyHSInfoXML(xml, appContext); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema != null && schema.type() != Type.BOOLEAN) throw new DataException("Invalid schema type for BooleanConverter: " + schema.type().toString()); try { return serializer.serialize(topic, (Boolean) value); } catch (ClassCastException e) { throw new DataException("BooleanConverter is not compatible with objects of type " + value.getClass()); } }
@Test public void testFromConnectWrongSchema() { assertThrows(DataException.class, () -> converter.fromConnectData(TOPIC, Schema.INT32_SCHEMA, Boolean.FALSE)); }
public boolean isShardingTable(final String logicTableName) { return shardingTables.containsKey(logicTableName); }
@Test void assertIsShardingTable() { assertTrue(createMaximumShardingRule().isShardingTable("LOGIC_TABLE")); }
public static <K, V> PerKey<K, V> perKey() { return new AutoValue_ApproximateCountDistinct_PerKey.Builder<K, V>() .setPrecision(HllCount.DEFAULT_PRECISION) .build(); }
@Test @Category(NeedsRunner.class) public void testStandardTypesPerKeyForLong() { List<KV<Integer, Long>> longs = new ArrayList<>(); for (int i = 0; i < 3; i++) { for (int k : INTS1) { longs.add(KV.of(i, (long) k)); } } PCollection<KV<Integer, Long>> result = p.apply("Long", Create.of(longs)).apply("LongHLL", ApproximateCountDistinct.perKey()); PAssert.that(result) .containsInAnyOrder( ImmutableList.of( KV.of(0, INTS1_ESTIMATE), KV.of(1, INTS1_ESTIMATE), KV.of(2, INTS1_ESTIMATE))); p.run(); }
@Override public DirectoryTimestamp getDirectoryTimestamp() { return DirectoryTimestamp.implicit; }
@Test public void testFeatures() { assertEquals(Protocol.Case.sensitive, new FTPProtocol().getCaseSensitivity()); assertEquals(Protocol.DirectoryTimestamp.implicit, new FTPProtocol().getDirectoryTimestamp()); }
public static BadRequestException itemNotExists(long itemId) { return new BadRequestException("item not exists for itemId:%s", itemId); }
@Test public void testItemNotExists() { BadRequestException itemNotExists = BadRequestException.itemNotExists(1001); assertEquals("item not exists for itemId:1001", itemNotExists.getMessage()); }
@Override public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception { OptionParser optParser = new OptionParser(); OptionSpec<Long> offsetOpt = optParser.accepts("offset", "offset for reading input").withRequiredArg() .ofType(Long.class).defaultsTo(Long.valueOf(0)); OptionSpec<Long> limitOpt = optParser.accepts("limit", "maximum number of records in the outputfile") .withRequiredArg().ofType(Long.class).defaultsTo(Long.MAX_VALUE); OptionSpec<Double> fracOpt = optParser.accepts("samplerate", "rate at which records will be collected") .withRequiredArg().ofType(Double.class).defaultsTo(Double.valueOf(1)); OptionSet opts = optParser.parse(args.toArray(new String[0])); List<String> nargs = (List<String>) opts.nonOptionArguments(); if (nargs.size() < 2) { printHelp(out); return 0; } inFiles = Util.getFiles(nargs.subList(0, nargs.size() - 1)); System.out.println("List of input files:"); for (Path p : inFiles) { System.out.println(p); } currentInput = -1; nextInput(); OutputStream output = out; String lastArg = nargs.get(nargs.size() - 1); if (nargs.size() > 1 && !lastArg.equals("-")) { output = Util.createFromFS(lastArg); } writer = new DataFileWriter<>(new GenericDatumWriter<>()); String codecName = reader.getMetaString(DataFileConstants.CODEC); CodecFactory codec = (codecName == null) ? CodecFactory.fromString(DataFileConstants.NULL_CODEC) : CodecFactory.fromString(codecName); writer.setCodec(codec); for (String key : reader.getMetaKeys()) { if (!DataFileWriter.isReservedMeta(key)) { writer.setMeta(key, reader.getMeta(key)); } } writer.create(schema, output); long offset = opts.valueOf(offsetOpt); long limit = opts.valueOf(limitOpt); double samplerate = opts.valueOf(fracOpt); sampleCounter = 1; totalCopied = 0; reuse = null; if (limit < 0) { System.out.println("limit has to be non-negative"); this.printHelp(out); return 1; } if (offset < 0) { System.out.println("offset has to be non-negative"); this.printHelp(out); return 1; } if (samplerate < 0 || samplerate > 1) { System.out.println("samplerate has to be a number between 0 and 1"); this.printHelp(out); return 1; } skip(offset); writeRecords(limit, samplerate); System.out.println(totalCopied + " records written."); writer.flush(); writer.close(); Util.close(out); return 0; }
@Test void cat() throws Exception { Map<String, String> metadata = new HashMap<>(); metadata.put("myMetaKey", "myMetaValue"); File input1 = generateData("input1.avro", Type.INT, metadata, DEFLATE); File input2 = generateData("input2.avro", Type.INT, metadata, SNAPPY); File input3 = generateData("input3.avro", Type.INT, metadata, DEFLATE); File output = new File(DIR, name.getMethodName() + ".avro"); output.deleteOnExit(); // file input List<String> args = asList(input1.getAbsolutePath(), input2.getAbsolutePath(), input3.getAbsolutePath(), "--offset", String.valueOf(OFFSET), "--limit", String.valueOf(LIMIT_WITHIN_INPUT_BOUNDS), "--samplerate", String.valueOf(SAMPLERATE), output.getAbsolutePath()); int returnCode = new CatTool().run(System.in, System.out, System.err, args); assertEquals(0, returnCode); assertEquals(LIMIT_WITHIN_INPUT_BOUNDS, numRowsInFile(output)); // folder input args = asList(input1.getParentFile().getAbsolutePath(), output.getAbsolutePath(), "--offset", String.valueOf(OFFSET), "--limit", String.valueOf(LIMIT_WITHIN_INPUT_BOUNDS)); returnCode = new CatTool().run(System.in, System.out, System.err, args); assertEquals(0, returnCode); assertEquals(LIMIT_WITHIN_INPUT_BOUNDS, numRowsInFile(output)); // glob input args = asList(new File(input1.getParentFile(), "/*").getAbsolutePath(), output.getAbsolutePath(), "--offset", String.valueOf(OFFSET), "--limit", String.valueOf(LIMIT_WITHIN_INPUT_BOUNDS)); returnCode = new CatTool().run(System.in, System.out, System.err, args); assertEquals(0, returnCode); assertEquals(LIMIT_WITHIN_INPUT_BOUNDS, numRowsInFile(output)); }
public final boolean recycle(T o, Handle handle) { if (handle == NOOP_HANDLE) { return false; } DefaultHandle h = (DefaultHandle) handle; final Stack<?> stack = h.stack; if (h.lastRecycledId != h.recycleId || stack == null) { throw new IllegalStateException("recycled already"); } if (stack.parent != this) { return false; } if (o != h.value) { throw new IllegalArgumentException("o does not belong to handle"); } h.recycle(); return true; }
@Test public void testRecycle() { final Recyclers<RecyclableObject> recyclers = newRecyclers(16); final RecyclableObject object = recyclers.get(); recyclers.recycle(object, object.handle); final RecyclableObject object2 = recyclers.get(); Assert.assertSame(object, object2); recyclers.recycle(object2, object2.handle); }
public final int incrementAndGet() { return INDEX_UPDATER.incrementAndGet(this) & Integer.MAX_VALUE; }
@Test void testIncrementAndGet() { int get = i1.incrementAndGet(); assertEquals(1, get); assertEquals(1, i1.get()); get = i2.incrementAndGet(); assertEquals(128, get); assertEquals(128, i2.get()); get = i3.incrementAndGet(); assertEquals(0, get); assertEquals(0, i3.get()); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyWithNullThird() { assertThat(asList(1, 2, null)).containsExactly(1, 2, null); }
public static NetworkInterface[] filterBySubnet(final InetAddress address, final int subnetPrefix) throws SocketException { return filterBySubnet(NetworkInterfaceShim.DEFAULT, address, subnetPrefix); }
@Test void shouldFilterBySubnetAndFindMultipleResultsOrderedByMatchLength() throws Exception { final NetworkInterfaceStub stub = new NetworkInterfaceStub(); stub.add("10.0.0.2/8"); final NetworkInterface ifc1 = stub.add("192.0.0.0/8"); final NetworkInterface ifc2 = stub.add("192.168.1.1/24"); final NetworkInterface ifc3 = stub.add("192.168.0.0/16"); final NetworkInterface[] filteredBySubnet = filterBySubnet(stub, getByName("192.0.0.0"), 8); assertEquals(3, filteredBySubnet.length); assertThat(filteredBySubnet[0], sameInstance(ifc2)); assertThat(filteredBySubnet[1], sameInstance(ifc3)); assertThat(filteredBySubnet[2], sameInstance(ifc1)); }
public static void checkSingleChar(final Properties props, final String propKey, final MaskAlgorithm<?, ?> algorithm) { checkRequired(props, propKey, algorithm); ShardingSpherePreconditions.checkState(1 == props.getProperty(propKey).length(), () -> new AlgorithmInitializationException(algorithm, "%s's length must be one", propKey)); }
@Test void assertCheckSingleCharSuccess() { Properties props = PropertiesBuilder.build(new Property("key", "1")); assertDoesNotThrow(() -> MaskAlgorithmPropertiesChecker.checkSingleChar(props, "key", mock(MaskAlgorithm.class))); }
public TextAnswer getAnswerByQuestionId(long questionId) { if (!textAnswers.containsKey(questionId)) { throw new MissingTextAnswerForQuestionException(questionId); } return textAnswers.get(questionId); }
@Test void 질문_ID로_서술형_답변을_반환한다() { // given TextAnswers textAnswers = new TextAnswers(List.of(new TextAnswer(1, "답".repeat(20)))); // when TextAnswer actual = textAnswers.getAnswerByQuestionId(1); // then assertThat(actual.getContent()).isEqualTo("답".repeat(20)); }
@Override protected InputStream getRequestStream(Exchange exchange) throws SalesforceException { InputStream request; Message in = exchange.getIn(); request = in.getBody(InputStream.class); if (request == null) { AbstractDTOBase dto = in.getBody(AbstractDTOBase.class); if (dto != null) { // marshall the DTO request = getRequestStream(in, dto); } else { // if all else fails, get body as String final String body = in.getBody(String.class); if (null == body) { String msg = "Unsupported request message body " + (in.getBody() == null ? null : in.getBody().getClass()); throw new SalesforceException(msg, null); } else { request = new ByteArrayInputStream(body.getBytes(StandardCharsets.UTF_8)); } } } return request; }
@Test public void shouldSerializeNullValues() throws SalesforceException, IOException { final SalesforceComponent salesforce = new SalesforceComponent(); final SalesforceEndpointConfig configuration = new SalesforceEndpointConfig(); final SalesforceEndpoint endpoint = new SalesforceEndpoint("", salesforce, configuration, OperationName.UPDATE_SOBJECT, ""); final JsonRestProcessor jsonProcessor = new JsonRestProcessor(endpoint); final Message in = new DefaultMessage(new DefaultCamelContext()); TestObject testObject = new TestObject(); testObject.getFieldsToNull().add("creationDate"); try (InputStream stream = jsonProcessor.getRequestStream(in, testObject); InputStreamReader reader = new InputStreamReader(stream, StandardCharsets.UTF_8)) { final String json = IOUtils.toString(reader); assertThat(json) .isEqualTo("{\"creationDate\":null,\"attributes\":{\"referenceId\":null,\"type\":null,\"url\":null}}"); } }
@VisibleForTesting double getDecayFactor() { return decayFactor; }
@Test @SuppressWarnings("deprecation") public void testParseFactor() { // Default scheduler = new DecayRpcScheduler(1, "ipc.3", new Configuration()); assertEquals(DecayRpcScheduler.IPC_SCHEDULER_DECAYSCHEDULER_FACTOR_DEFAULT, scheduler.getDecayFactor(), 0.00001); // Custom Configuration conf = new Configuration(); conf.set("ipc.4." + DecayRpcScheduler.IPC_FCQ_DECAYSCHEDULER_FACTOR_KEY, "0.125"); scheduler = new DecayRpcScheduler(1, "ipc.4", conf); assertEquals(0.125, scheduler.getDecayFactor(), 0.00001); }
@Override public OUT nextRecord(OUT record) throws IOException { OUT returnRecord = null; do { returnRecord = super.nextRecord(record); } while (returnRecord == null && !reachedEnd()); return returnRecord; }
@Test void ignoreMultiCharPrefixComments() { try { final String fileContent = "//description of the data\n" + "//successive commented line\n" + "this is|1|2.0|\n" + "a test|3|4.0|\n" + "//next|5|6.0|\n"; final FileInputSplit split = createTempFile(fileContent); final TupleTypeInfo<Tuple3<String, Integer, Double>> typeInfo = TupleTypeInfo.getBasicTupleTypeInfo(String.class, Integer.class, Double.class); final CsvInputFormat<Tuple3<String, Integer, Double>> format = new TupleCsvInputFormat<Tuple3<String, Integer, Double>>( PATH, "\n", "|", typeInfo); format.setCommentPrefix("//"); final Configuration parameters = new Configuration(); format.configure(parameters); format.open(split); Tuple3<String, Integer, Double> result = new Tuple3<String, Integer, Double>(); result = format.nextRecord(result); assertThat(result.f0).isEqualTo("this is"); assertThat(result.f1).isOne(); assertThat(result.f2).isEqualTo(new Double(2.0)); result = format.nextRecord(result); assertThat(result.f0).isEqualTo("a test"); assertThat(result.f1).isEqualTo(Integer.valueOf(3)); assertThat(result.f2).isEqualTo(new Double(4.0)); result = format.nextRecord(result); assertThat(result).isNull(); } catch (Exception ex) { ex.printStackTrace(); fail("Test failed due to a " + ex.getClass().getName() + ": " + ex.getMessage()); } }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testPkcs1Des3EncryptedDsa() throws Exception { PrivateKey key = SslContext.toPrivateKey(new File(getClass().getResource("dsa_pkcs1_des3_encrypted.key") .getFile()), "example"); assertNotNull(key); }
@Override public Optional<CeTaskProcessor> getForCeTask(CeTask ceTask) { return Optional.ofNullable(taskProcessorByCeTaskType.get(ceTask.getType())); }
@Test public void getForTask_returns_TaskProcessor_based_on_CeTask_type_only() { CeTaskProcessor taskProcessor = createCeTaskProcessor(SOME_CE_TASK_TYPE); CeTaskProcessorRepositoryImpl underTest = new CeTaskProcessorRepositoryImpl(new CeTaskProcessor[] {taskProcessor}); assertThat(underTest.getForCeTask(createCeTask(SOME_CE_TASK_TYPE, SOME_COMPONENT_KEY))).containsSame(taskProcessor); assertThat(underTest.getForCeTask(createCeTask(SOME_CE_TASK_TYPE, SOME_COMPONENT_KEY + "2"))).containsSame(taskProcessor); }
@Override public Map<String, Integer> getCounts(UUID jobId) { return counts.computeIfAbsent(jobId, k -> new ConcurrentHashMap<>()); }
@Test public void canAddNewKeysToTheCurrentCountsTest() { addItemToJobStoreCounts(ITEM_NAME); final Map<String, Integer> counts = localJobStore.getCounts(jobId); Truth.assertThat(counts.size()).isEqualTo(1); Truth.assertThat(counts.get(ITEM_NAME)).isEqualTo(1); }
@Override public void encode(final ChannelHandlerContext context, final DatabasePacket message, final ByteBuf out) { boolean isIdentifierPacket = message instanceof PostgreSQLIdentifierPacket; if (isIdentifierPacket) { prepareMessageHeader(out, ((PostgreSQLIdentifierPacket) message).getIdentifier().getValue()); } PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(out, context.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get()); try { message.write(payload); // CHECKSTYLE:OFF } catch (final RuntimeException ex) { // CHECKSTYLE:ON payload.getByteBuf().resetWriterIndex(); // TODO consider what severity to use OpenGaussErrorResponsePacket errorResponsePacket = new OpenGaussErrorResponsePacket( PostgreSQLMessageSeverityLevel.ERROR, PostgreSQLVendorError.SYSTEM_ERROR.getSqlState().getValue(), ex.getMessage()); isIdentifierPacket = true; prepareMessageHeader(out, errorResponsePacket.getIdentifier().getValue()); errorResponsePacket.write(payload); } finally { if (isIdentifierPacket) { updateMessageLength(out); } } }
@Test void assertEncodePostgreSQLIdentifierPacket() { PostgreSQLIdentifierPacket packet = mock(PostgreSQLIdentifierPacket.class); when(packet.getIdentifier()).thenReturn(PostgreSQLMessagePacketType.AUTHENTICATION_REQUEST); when(byteBuf.readableBytes()).thenReturn(9); new OpenGaussPacketCodecEngine().encode(context, packet, byteBuf); verify(byteBuf).writeByte(PostgreSQLMessagePacketType.AUTHENTICATION_REQUEST.getValue()); verify(byteBuf).writeInt(0); verify(packet).write(any(PostgreSQLPacketPayload.class)); verify(byteBuf).setInt(1, 8); }
@Override @SuppressWarnings("rawtypes") public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) { final long timestamp = TimeUnit.MILLISECONDS.toSeconds(clock.getTime()); for (Map.Entry<String, Gauge> entry : gauges.entrySet()) { reportGauge(timestamp, entry.getKey(), entry.getValue()); } for (Map.Entry<String, Counter> entry : counters.entrySet()) { reportCounter(timestamp, entry.getKey(), entry.getValue()); } for (Map.Entry<String, Histogram> entry : histograms.entrySet()) { reportHistogram(timestamp, entry.getKey(), entry.getValue()); } for (Map.Entry<String, Meter> entry : meters.entrySet()) { reportMeter(timestamp, entry.getKey(), entry.getValue()); } for (Map.Entry<String, Timer> entry : timers.entrySet()) { reportTimer(timestamp, entry.getKey(), entry.getValue()); } }
@Test public void reportsGaugeValues() throws Exception { final Gauge<Integer> gauge = () -> 1; reporter.report(map("gauge", gauge), map(), map(), map(), map()); assertThat(fileContents("gauge.csv")) .isEqualTo(csv( "t,value", "19910191,1" )); }
public static boolean isEntropyInjecting(FileSystem fs, Path target) { final EntropyInjectingFileSystem entropyFs = getEntropyFs(fs); return entropyFs != null && entropyFs.getEntropyInjectionKey() != null && target.getPath().contains(entropyFs.getEntropyInjectionKey()); }
@Test void testIsEntropyFsPathDoesNotIncludeEntropyKey() throws Exception { final String entropyKey = "_test_"; final FileSystem efs = new TestEntropyInjectingFs(entropyKey, "ignored"); final File folder = TempDirUtils.newFolder(tempFolder); final Path path = new Path(Path.fromLocalFile(folder), "path"); // no entropy key assertThat(EntropyInjector.isEntropyInjecting(efs, path)).isFalse(); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromLatestSnapshotWithNonEmptyTable() throws Exception { appendTwoSnapshots(); ScanContext scanContext = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_LATEST_SNAPSHOT) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null); ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null); assertThat(initialResult.fromPosition()).isNull(); // For inclusive behavior, the initial result should point to snapshot1 // Then the next incremental scan shall discover files from latest snapshot2 (inclusive) assertThat(initialResult.toPosition().snapshotId().longValue()) .isEqualTo(snapshot1.snapshotId()); assertThat(initialResult.toPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot1.timestampMillis()); assertThat(initialResult.splits()).isEmpty(); ContinuousEnumerationResult secondResult = splitPlanner.planSplits(initialResult.toPosition()); assertThat(secondResult.fromPosition().snapshotId().longValue()) .isEqualTo(snapshot1.snapshotId()); assertThat(secondResult.fromPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot1.timestampMillis()); assertThat(secondResult.toPosition().snapshotId().longValue()) .isEqualTo(snapshot2.snapshotId()); assertThat(secondResult.toPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot2.timestampMillis()); IcebergSourceSplit split = Iterables.getOnlyElement(secondResult.splits()); assertThat(split.task().files()).hasSize(1); Set<String> discoveredFiles = split.task().files().stream() .map(fileScanTask -> fileScanTask.file().path().toString()) .collect(Collectors.toSet()); // should discover dataFile2 appended in snapshot2 Set<String> expectedFiles = ImmutableSet.of(dataFile2.path().toString()); assertThat(discoveredFiles).containsExactlyElementsOf(expectedFiles); IcebergEnumeratorPosition lastPosition = secondResult.toPosition(); for (int i = 0; i < 3; ++i) { lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition; } }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromSnapshotIdWithInvalidIds() throws Exception { appendTwoSnapshots(); // find an invalid snapshotId long invalidSnapshotId = 0L; while (invalidSnapshotId == snapshot1.snapshotId() || invalidSnapshotId == snapshot2.snapshotId()) { invalidSnapshotId++; } ScanContext scanContextWithInvalidSnapshotId = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_ID) .startSnapshotId(invalidSnapshotId) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl( TABLE_RESOURCE.tableLoader().clone(), scanContextWithInvalidSnapshotId, null); assertThatThrownBy(() -> splitPlanner.planSplits(null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Start snapshot id not found in history: " + invalidSnapshotId); }
@ApiOperation(value = "Create a model", tags = { "Models" }, notes = "All request values are optional. For example, you can only include the name attribute in the request body JSON-object, only setting the name of the model, leaving all other fields null.", code = 201) @ApiResponses(value = { @ApiResponse(code = 201, message = "Indicates the model was created.") }) @PostMapping(value = "/repository/models", produces = "application/json") @ResponseStatus(HttpStatus.CREATED) public ModelResponse createModel(@RequestBody ModelRequest modelRequest) { Model model = repositoryService.newModel(); model.setCategory(modelRequest.getCategory()); model.setDeploymentId(modelRequest.getDeploymentId()); model.setKey(modelRequest.getKey()); model.setMetaInfo(modelRequest.getMetaInfo()); model.setName(modelRequest.getName()); model.setVersion(modelRequest.getVersion()); model.setTenantId(modelRequest.getTenantId()); if (restApiInterceptor != null) { restApiInterceptor.createModel(model, modelRequest); } repositoryService.saveModel(model); return restResponseFactory.createModelResponse(model); }
@Test @Deployment(resources = { "org/flowable/rest/service/api/repository/oneTaskProcess.bpmn20.xml" }) public void testCreateModel() throws Exception { Model model = null; try { Calendar createTime = Calendar.getInstance(); createTime.set(Calendar.MILLISECOND, 0); processEngineConfiguration.getClock().setCurrentTime(createTime.getTime()); // Create create request ObjectNode requestNode = objectMapper.createObjectNode(); requestNode.put("name", "Model name"); requestNode.put("category", "Model category"); requestNode.put("key", "Model key"); requestNode.put("metaInfo", "Model metainfo"); requestNode.put("deploymentId", deploymentId); requestNode.put("version", 2); requestNode.put("tenantId", "myTenant"); HttpPost httpPost = new HttpPost(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_MODEL_COLLECTION)); httpPost.setEntity(new StringEntity(requestNode.toString())); CloseableHttpResponse response = executeRequest(httpPost, HttpStatus.SC_CREATED); JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent()); closeResponse(response); assertThat(responseNode).isNotNull(); assertThatJson(responseNode) .when(Option.IGNORING_EXTRA_FIELDS) .isEqualTo("{" + "name: 'Model name'," + "key: 'Model key'," + "category: 'Model category'," + "version: 2," + "metaInfo: 'Model metainfo'," + "deploymentId: '" + deploymentId + "'," + "tenantId: 'myTenant'," + "url: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_MODEL, responseNode.get("id").textValue()) + "'," + "deploymentUrl: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_DEPLOYMENT, deploymentId) + "'," + "createTime: " + new TextNode(getISODateStringWithTZ(createTime.getTime())) + "," + "lastUpdateTime: " + new TextNode(getISODateStringWithTZ(createTime.getTime())) + "}"); model = repositoryService.createModelQuery().modelId(responseNode.get("id").textValue()).singleResult(); assertThat(model).isNotNull(); assertThat(model.getCategory()).isEqualTo("Model category"); assertThat(model.getName()).isEqualTo("Model name"); assertThat(model.getKey()).isEqualTo("Model key"); assertThat(model.getDeploymentId()).isEqualTo(deploymentId); assertThat(model.getMetaInfo()).isEqualTo("Model metainfo"); assertThat(model.getTenantId()).isEqualTo("myTenant"); assertThat(model.getVersion().intValue()).isEqualTo(2); } finally { if (model != null) { try { repositoryService.deleteModel(model.getId()); } catch (Throwable ignore) { } } } }
public static void checkServiceNameFormat(String combineServiceName) { String[] split = combineServiceName.split(Constants.SERVICE_INFO_SPLITER); if (split.length <= 1) { throw new IllegalArgumentException( "Param 'serviceName' is illegal, it should be format as 'groupName@@serviceName'"); } if (split[0].isEmpty()) { throw new IllegalArgumentException("Param 'serviceName' is illegal, groupName can't be empty"); } }
@Test void testCheckServiceNameFormatWithoutGroupAndService() { assertThrows(IllegalArgumentException.class, () -> { String validServiceName = "@@"; NamingUtils.checkServiceNameFormat(validServiceName); }); }
@Override public String getAvailabilityZone() { return awsMetadataApi.availabilityZoneEc2(); }
@Test public void getAvailabilityZone() { // given String expectedResult = "us-east-1a"; given(awsMetadataApi.availabilityZoneEc2()).willReturn(expectedResult); // when String result = awsEc2Client.getAvailabilityZone(); // then assertEquals(expectedResult, result); }
@Override public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return join(otherStream, toValueJoinerWithKey(joiner), windows); }
@SuppressWarnings("deprecation") @Test public void shouldNotAllowNullValueJoinerWithKeyOnJoinWithStreamJoined() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.join( testStream, (ValueJoinerWithKey<? super String, ? super String, ? super String, ?>) null, JoinWindows.of(ofMillis(10)), StreamJoined.as("name"))); assertThat(exception.getMessage(), equalTo("joiner can't be null")); }
public void clear() { checkState( !isClosed, "OrderedList user state is no longer usable because it is closed for %s", requestTemplate.getStateKey()); isCleared = true; // Create a new object for pendingRemoves and clear the mappings in pendingAdds. // The entire tree range set of pendingRemoves and the old values in the pendingAdds are kept, // so that they will still be accessible in pre-existing iterables even after the state is // cleared. pendingRemoves = TreeRangeSet.create(); pendingRemoves.add( Range.range( Instant.ofEpochMilli(Long.MIN_VALUE), BoundType.CLOSED, Instant.ofEpochMilli(Long.MAX_VALUE), BoundType.OPEN)); pendingAdds.clear(); }
@Test public void testClear() throws Exception { FakeBeamFnStateClient fakeClient = new FakeBeamFnStateClient( timestampedValueCoder, ImmutableMap.of( createOrderedListStateKey("A", 1), asList(A1, B1), createOrderedListStateKey("A", 4), Collections.singletonList(A4), createOrderedListStateKey("A", 2), asList(A2, B2), createOrderedListStateKey("A", 3), Collections.singletonList(A3))); OrderedListUserState<String> userState = new OrderedListUserState<>( Caches.noop(), fakeClient, "instructionId", createOrderedListStateKey("A"), StringUtf8Coder.of()); Iterable<TimestampedValue<String>> stateBeforeClear = userState.read(); userState.clear(); assertArrayEquals( asList(A1, B1, A2, B2, A3, A4).toArray(), Iterables.toArray(stateBeforeClear, TimestampedValue.class)); assertThat(userState.read(), is(emptyIterable())); userState.asyncClose(); assertThrows(IllegalStateException.class, () -> userState.clear()); }
public static boolean equals(FlatRecordTraversalObjectNode left, FlatRecordTraversalObjectNode right) { if (left == null && right == null) { return true; } if (left == null || right == null) { return false; } if (!left.getSchema().getName().equals(right.getSchema().getName())) { return false; } extractCommonObjectSchema(left, right); return compare(left, right); }
@Test public void shouldFindRecordsEqualOnDifferentDataModelsWithDifferentValues() { RecordWithSubObject1 left = new RecordWithSubObject1(); left.id = "ID"; left.intField = 1; left.subObject = new RecordSubObject(); left.subObject.stringField = "A"; left.subObject.intField = 1; writer1.reset(); mapper1.writeFlat(left, writer1); FlatRecord leftRec = writer1.generateFlatRecord(); // RecordWithSubObject2 does not have a subObject field RecordWithSubObject2 right = new RecordWithSubObject2(); right.id = "ID"; right.intField = 1; writer2.reset(); mapper2.writeFlat(right, writer2); FlatRecord rightRec = writer2.generateFlatRecord(); // With fuzzy matching, the records are equal if the intersection of the schemas have the same fields. // In this case, `RecordWithSubObject2` does not know about `subObject` so it's not considered in the // equality check. Assertions.assertThat(FlatRecordTraversalObjectNodeEquality.equals(new FlatRecordTraversalObjectNode(leftRec), new FlatRecordTraversalObjectNode(rightRec))).isTrue(); Assertions.assertThat(FlatRecordTraversalObjectNodeEquality.equals(new FlatRecordTraversalObjectNode(rightRec), new FlatRecordTraversalObjectNode(leftRec))).isTrue(); }
@Override public ExecutorService newThreadPool(ThreadPoolProfile profile, ThreadFactory threadFactory) { return new InstrumentedExecutorService( threadPoolFactory.newThreadPool(profile, threadFactory), metricRegistry, profile.getId()); }
@Test public void testNewThreadPool() { final ExecutorService executorService = instrumentedThreadPoolFactory.newThreadPool(profile, threadFactory); assertThat(executorService, is(notNullValue())); assertThat(executorService, is(instanceOf(InstrumentedExecutorService.class))); inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "submitted" })); inOrder.verify(registry, times(1)).counter(MetricRegistry.name(METRICS_NAME, new String[] { "running" })); inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "completed" })); inOrder.verify(registry, times(1)).timer(MetricRegistry.name(METRICS_NAME, new String[] { "duration" })); }
public MutableNullValueVector() { _nullBitmap = new ThreadSafeMutableRoaringBitmap(); }
@Test public void testMutableNullValueVector() { int[] docIds = new int[NUM_DOCS]; for (int i = 0; i < NUM_DOCS; i++) { int docId = RANDOM.nextInt(MAX_DOC_ID); _nullValueVector.setNull(docId); docIds[i] = docId; } for (int i = 0; i < NUM_DOCS; i++) { Assert.assertTrue(_nullValueVector.isNull(docIds[i])); } }
List<String> listTaskPrivateAddresses(String cluster, AwsCredentials credentials) { LOGGER.fine("Listing tasks from cluster: '%s'", cluster); List<String> taskArns = listTasks(cluster, credentials); LOGGER.fine("AWS ECS ListTasks found the following tasks: %s", taskArns); if (!taskArns.isEmpty()) { List<Task> tasks = describeTasks(cluster, taskArns, credentials); if (!tasks.isEmpty()) { return tasks.stream().map(Task::getPrivateAddress).collect(Collectors.toList()); } } return emptyList(); }
@Test public void awsError() { // given int errorCode = HttpURLConnection.HTTP_UNAUTHORIZED; String errorMessage = "Error message retrieved from AWS"; stubFor(post(urlMatching("/.*")) .willReturn(aResponse().withStatus(errorCode).withBody(errorMessage))); // when Exception exception = assertThrows( Exception.class, () -> awsEcsApi.listTaskPrivateAddresses("cluster-arn", CREDENTIALS)); // then assertTrue(exception.getMessage().contains(Integer.toString(errorCode))); assertTrue(exception.getMessage().contains(errorMessage)); }
public static int replaceValue(String regex, String replaceBy, boolean caseSensitive, String value, Consumer<? super String> setter) { if (StringUtils.isBlank(value)) { return 0; } Object[] result = replaceAllWithRegex(value, regex, replaceBy, caseSensitive); int nbReplaced = (Integer) result[1]; if (nbReplaced <= 0) { return 0; } setter.accept((String) result[0]); return nbReplaced; }
@Test public void testReplaceValueWithNullValue() { assertThat(JOrphanUtils.replaceValue(null, null, false, null, null), CoreMatchers.is(0)); }
@Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new); String columnName = shardingValue.getColumnName(); ShardingSpherePreconditions.checkState(algorithmExpression.contains(columnName), () -> new MismatchedInlineShardingAlgorithmExpressionAndColumnException(algorithmExpression, columnName)); try { return InlineExpressionParserFactory.newInstance(algorithmExpression).evaluateWithArgs(Collections.singletonMap(columnName, shardingValue.getValue())); } catch (final MissingMethodException ignored) { throw new MismatchedInlineShardingAlgorithmExpressionAndColumnException(algorithmExpression, columnName); } }
@Test void assertDoShardingWithNegative() { List<String> availableTargetNames = Lists.newArrayList("t_order_0", "t_order_1", "t_order_2", "t_order_3"); assertThat(negativeNumberInlineShardingAlgorithm.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_id", DATA_NODE_INFO, -1)), is("t_order_1")); assertThat(negativeNumberInlineShardingAlgorithm.doSharding(availableTargetNames, new PreciseShardingValue<>("t_order", "order_id", DATA_NODE_INFO, -4)), is("t_order_0")); }
public static <T extends Collection<E>, E> T removeWithAddIf(T targetCollection, T resultCollection, Predicate<? super E> predicate) { Objects.requireNonNull(predicate); final Iterator<E> each = targetCollection.iterator(); while (each.hasNext()) { E next = each.next(); if (predicate.test(next)) { resultCollection.add(next); each.remove(); } } return resultCollection; }
@Test public void testRemoveWithAddIf() { ArrayList<Integer> list = CollUtil.newArrayList(1, 2, 3); final ArrayList<Integer> exceptRemovedList = CollUtil.newArrayList(2, 3); final ArrayList<Integer> exceptResultList = CollUtil.newArrayList(1); List<Integer> resultList = CollUtil.removeWithAddIf(list, ele -> 1 == ele); assertEquals(list, exceptRemovedList); assertEquals(resultList, exceptResultList); list = CollUtil.newArrayList(1, 2, 3); resultList = new ArrayList<>(); CollUtil.removeWithAddIf(list, resultList, ele -> 1 == ele); assertEquals(list, exceptRemovedList); assertEquals(resultList, exceptResultList); }
@SuppressWarnings("unchecked") @Override public <VIn> CogroupedKStream<K, VOut> cogroup(final KGroupedStream<K, VIn> groupedStream, final Aggregator<? super K, ? super VIn, VOut> aggregator) { Objects.requireNonNull(groupedStream, "groupedStream can't be null"); Objects.requireNonNull(aggregator, "aggregator can't be null"); groupPatterns.put((KGroupedStreamImpl<K, ?>) groupedStream, (Aggregator<? super K, ? super Object, VOut>) aggregator); return this; }
@Test public void shouldNotHaveNullAggregatorOnCogroup() { assertThrows(NullPointerException.class, () -> cogroupedStream.cogroup(groupedStream, null)); }
@Override public void onOpened() { digestNotification(); }
@Test public void onOpened_noReactContext_setAsInitialNotification() throws Exception { when(mAppLifecycleFacade.isReactInitialized()).thenReturn(false); Activity currentActivity = mock(Activity.class); when(mReactContext.getCurrentActivity()).thenReturn(currentActivity); final PushNotification uut = createUUT(); uut.onOpened(); verify(InitialNotificationHolder.getInstance()).set(any(PushNotificationProps.class)); }
public ProviderBuilder client(String client) { this.client = client; return getThis(); }
@Test void client() { ProviderBuilder builder = ProviderBuilder.newBuilder(); builder.client("client"); Assertions.assertEquals("client", builder.build().getClient()); }
public static URI parse(String featureIdentifier) { requireNonNull(featureIdentifier, "featureIdentifier may not be null"); if (featureIdentifier.isEmpty()) { throw new IllegalArgumentException("featureIdentifier may not be empty"); } // Legacy from the Cucumber Eclipse plugin // Older versions of Cucumber allowed it. if (CLASSPATH_SCHEME_PREFIX.equals(featureIdentifier)) { return rootPackageUri(); } if (nonStandardPathSeparatorInUse(featureIdentifier)) { String standardized = replaceNonStandardPathSeparator(featureIdentifier); return parseAssumeFileScheme(standardized); } if (isWindowsOS() && pathContainsWindowsDrivePattern(featureIdentifier)) { return parseAssumeFileScheme(featureIdentifier); } if (probablyURI(featureIdentifier)) { return parseProbableURI(featureIdentifier); } return parseAssumeFileScheme(featureIdentifier); }
@Test void can_parse_relative_path_form() { URI uri = FeaturePath.parse("path/to/file.feature"); assertAll( () -> assertThat(uri.getScheme(), is("file")), () -> assertThat(uri.getSchemeSpecificPart(), endsWith("path/to/file.feature"))); }
@Override public CqlSession currentSession() { return cqlSession; }
@Test public void testQuery(CassandraParams params) { params.execute("create table test_table(id int, value varchar, primary key (id));\n"); params.execute("insert into test_table(id, value) values (1,'test1');\n"); record Entity(Integer id, String value) {} var qctx = new QueryContext( "SELECT id, value FROM test_table WHERE value = :value allow filtering", "SELECT id, value FROM test_table WHERE value = ? allow filtering" ); withDb(params, db -> { var result = db.query(qctx, stmt -> { var s = stmt.bind("test1"); return db.currentSession().execute(s).map(row -> { var __id = row.isNull("id") ? null : row.getInt("id"); var __value = row.getString("value"); return new Entity(__id, __value); }); }); Assertions.assertThat(result) .hasSize(1) .first() .isEqualTo(new Entity(1, "test1")); }); }
@Override public Num calculate(BarSeries series, Position position) { Num numberOfLosingPositions = numberOfLosingPositionsCriterion.calculate(series, position); if (numberOfLosingPositions.isZero()) { return series.zero(); } Num grossLoss = grossLossCriterion.calculate(series, position); if (grossLoss.isZero()) { return series.zero(); } return grossLoss.dividedBy(numberOfLosingPositions); }
@Test public void calculateOnlyWithProfitPositions() { MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series), Trade.buyAt(3, series), Trade.sellAt(5, series)); AnalysisCriterion avgLoss = getCriterion(); assertNumEquals(0, avgLoss.calculate(series, tradingRecord)); }
@Override public void deleteProduct(Product product, Customer customer) throws SQLException { var sql = "delete from PURCHASES where product_name = ? and customer_name = ?"; try (var connection = dataSource.getConnection(); var preparedStatement = connection.prepareStatement(sql)) { preparedStatement.setString(1, product.getName()); preparedStatement.setString(2, customer.getName()); preparedStatement.executeUpdate(); } }
@Test void shouldDeleteProductFromPurchases() throws SQLException { TestUtils.executeSQL(INSERT_CUSTOMER_SQL, dataSource); TestUtils.executeSQL(ProductDaoImplTest.INSERT_PRODUCT_SQL, dataSource); TestUtils.executeSQL(INSERT_PURCHASES_SQL, dataSource); customerDao.deleteProduct(product, customer); try (var connection = dataSource.getConnection(); var statement = connection.createStatement(); ResultSet rs = statement.executeQuery(SELECT_PURCHASES_SQL)) { assertFalse(rs.next()); } }
@Override public <T> Set<Class<T>> getSubTypesOf(String pkg, Class<T> requestClass) { Set<Class<T>> set = new HashSet<>(16); String packageSearchPath = ResourcePatternResolver.CLASSPATH_ALL_URL_PREFIX + ClassUtils.convertClassNameToResourcePath(pkg) + '/' + "**/*.class"; try { Resource[] resources = resourcePatternResolver.getResources(packageSearchPath); for (Resource resource : resources) { Class<?> scanClass = getClassByResource(resource); if (requestClass.isAssignableFrom(scanClass)) { set.add((Class<T>) scanClass); } } } catch (IOException | ClassNotFoundException e) { LOGGER.error("scan path: {} failed", packageSearchPath, e); } return set; }
@Test void testGetSubTypesOfWithException() throws NoSuchFieldException, IllegalAccessException, IOException { setResolver(); String path = AnnotationClass.class.getPackage().getName(); when(pathMatchingResourcePatternResolver.getResources(anyString())).thenThrow(new IOException("test")); Set<Class<MockClass>> subTypesOf = packageScan.getSubTypesOf(path, MockClass.class); assertTrue(subTypesOf.isEmpty()); }
public Result execute( Result previousResult, int nr ) { Result result = previousResult; result.setNrErrors( 1 ); result.setResult( false ); String realEmailAddress = environmentSubstitute( emailAddress ); if ( Utils.isEmpty( realEmailAddress ) ) { logError( BaseMessages.getString( PKG, "JobEntryMailValidator.Error.EmailEmpty" ) ); return result; } String realSender = environmentSubstitute( emailSender ); if ( smtpCheck ) { // check sender if ( Utils.isEmpty( realSender ) ) { logError( BaseMessages.getString( PKG, "JobEntryMailValidator.Error.EmailSenderEmpty" ) ); return result; } } String realDefaultSMTP = environmentSubstitute( defaultSMTP ); int timeOut = Const.toInt( environmentSubstitute( timeout ), 0 ); // Split the mail-address: separated by space String[] mailsCheck = realEmailAddress.split( " " ); boolean exitloop = false; boolean mailIsValid = false; String MailError = null; for ( int i = 0; i < mailsCheck.length && !exitloop; i++ ) { String email = mailsCheck[i]; if ( log.isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobEntryMailValidator.CheckingMail", email ) ); } // Check if address is valid MailValidationResult resultValidator = MailValidation.isAddressValid( log, email, realSender, realDefaultSMTP, timeOut, smtpCheck ); mailIsValid = resultValidator.isValide(); MailError = resultValidator.getErrorMessage(); if ( log.isDetailed() ) { if ( mailIsValid ) { logDetailed( BaseMessages.getString( PKG, "JobEntryMailValidator.MailValid", email ) ); } else { logDetailed( BaseMessages.getString( PKG, "JobEntryMailValidator.MailNotValid", email ) ); logDetailed( MailError ); } } // invalid mail? exit loop if ( !resultValidator.isValide() ) { exitloop = true; } } result.setResult( mailIsValid ); if ( mailIsValid ) { result.setNrErrors( 0 ); } // return result return result; }
@Test public void testExecute() { KettleLogStore.init(); Result previousResult = new Result(); JobEntryMailValidator validator = new JobEntryMailValidator(); Result result = validator.execute( previousResult, 0 ); assertNotNull( result ); }
public boolean poll(Timer timer, boolean waitForJoinGroup) { maybeUpdateSubscriptionMetadata(); invokeCompletedOffsetCommitCallbacks(); if (subscriptions.hasAutoAssignedPartitions()) { if (protocol == null) { throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " to empty while trying to subscribe for group protocol to auto assign partitions"); } // Always update the heartbeat last poll time so that the heartbeat thread does not leave the // group proactively due to application inactivity even if (say) the coordinator cannot be found. pollHeartbeat(timer.currentTimeMs()); if (coordinatorUnknownAndUnreadySync(timer)) { return false; } if (rejoinNeededOrPending()) { // due to a race condition between the initial metadata fetch and the initial rebalance, // we need to ensure that the metadata is fresh before joining initially. This ensures // that we have matched the pattern against the cluster's topics at least once before joining. if (subscriptions.hasPatternSubscription()) { // For consumer group that uses pattern-based subscription, after a topic is created, // any consumer that discovers the topic after metadata refresh can trigger rebalance // across the entire consumer group. Multiple rebalances can be triggered after one topic // creation if consumers refresh metadata at vastly different times. We can significantly // reduce the number of rebalances caused by single topic creation by asking consumer to // refresh metadata before re-joining the group as long as the refresh backoff time has // passed. if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) { this.metadata.requestUpdate(true); } if (!client.ensureFreshMetadata(timer)) { return false; } maybeUpdateSubscriptionMetadata(); } // if not wait for join group, we would just use a timer of 0 if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) { // since we may use a different timer in the callee, we'd still need // to update the original timer's current time after the call timer.update(time.milliseconds()); return false; } } } else { // For manually assigned partitions, we do not try to pro-actively lookup coordinator; // instead we only try to refresh metadata when necessary. // If connections to all nodes fail, wakeups triggered while attempting to send fetch // requests result in polls returning immediately, causing a tight loop of polls. Without // the wakeup, poll() with no channels would block for the timeout, delaying re-connection. // awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop. if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) { client.awaitMetadataUpdate(timer); } // if there is pending coordinator requests, ensure they have a chance to be transmitted. client.pollNoWakeup(); } maybeAutoCommitOffsetsAsync(timer.currentTimeMs()); return true; }
@Test public void testUpdateLastHeartbeatPollWhenCoordinatorUnknown() throws Exception { // If we are part of an active group and we cannot find the coordinator, we should nevertheless // continue to update the last poll time so that we do not expire the consumer subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener)); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); // Join the group, but signal a coordinator change after the first heartbeat client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); client.prepareResponse(heartbeatResponse(Errors.NOT_COORDINATOR)); coordinator.poll(time.timer(Long.MAX_VALUE)); time.sleep(heartbeatIntervalMs); // Await the first heartbeat which forces us to find a new coordinator TestUtils.waitForCondition(() -> !client.hasPendingResponses(), "Failed to observe expected heartbeat from background thread"); assertTrue(coordinator.coordinatorUnknown()); assertFalse(coordinator.poll(time.timer(0))); assertEquals(time.milliseconds(), coordinator.heartbeat().lastPollTime()); time.sleep(rebalanceTimeoutMs - 1); assertFalse(coordinator.heartbeat().pollTimeoutExpired(time.milliseconds())); }
@Override public HttpRestResult<String> httpPost(String path, Map<String, String> headers, Map<String, String> paramValues, String encode, long readTimeoutMs) throws Exception { final long endTime = System.currentTimeMillis() + readTimeoutMs; String currentServerAddr = serverListMgr.getCurrentServerAddr(); int maxRetry = this.maxRetry; HttpClientConfig httpConfig = HttpClientConfig.builder() .setReadTimeOutMillis(Long.valueOf(readTimeoutMs).intValue()) .setConTimeOutMillis(ConfigHttpClientManager.getInstance().getConnectTimeoutOrDefault(3000)).build(); do { try { Header newHeaders = Header.newInstance(); if (headers != null) { newHeaders.addAll(headers); } HttpRestResult<String> result = nacosRestTemplate.postForm(getUrl(currentServerAddr, path), httpConfig, newHeaders, paramValues, String.class); if (isFail(result)) { LOGGER.error("[NACOS ConnectException] currentServerAddr: {}, httpCode: {}", currentServerAddr, result.getCode()); } else { // Update the currently available server addr serverListMgr.updateCurrentServerAddr(currentServerAddr); return result; } } catch (ConnectException connectException) { LOGGER.error("[NACOS ConnectException httpPost] currentServerAddr: {}, err : {}", currentServerAddr, connectException.getMessage()); } catch (SocketTimeoutException socketTimeoutException) { LOGGER.error("[NACOS SocketTimeoutException httpPost] currentServerAddr: {}, err : {}", currentServerAddr, socketTimeoutException.getMessage()); } catch (Exception ex) { LOGGER.error("[NACOS Exception httpPost] currentServerAddr: " + currentServerAddr, ex); throw ex; } if (serverListMgr.getIterator().hasNext()) { currentServerAddr = serverListMgr.getIterator().next(); } else { maxRetry--; if (maxRetry < 0) { throw new ConnectException( "[NACOS HTTP-POST] The maximum number of tolerable server reconnection errors has been reached"); } serverListMgr.refreshCurrentServerAddr(); } } while (System.currentTimeMillis() <= endTime); LOGGER.error("no available server, currentServerAddr : {}", currentServerAddr); throw new ConnectException("no available server, currentServerAddr : " + currentServerAddr); }
@Test void testHttpPostWithRequestException() throws Exception { assertThrows(NacosException.class, () -> { when(nacosRestTemplate.<String>postForm(eq(SERVER_ADDRESS_1 + "/test"), any(HttpClientConfig.class), any(Header.class), anyMap(), eq(String.class))).thenThrow(new ConnectException(), new SocketTimeoutException(), new NacosException()); serverHttpAgent.httpPost("/test", Collections.emptyMap(), Collections.emptyMap(), "UTF-8", 1000); }); }
public abstract HttpHeaders add(String name, Object value);
@Test public void testAddSelf() { final HttpHeaders headers = new DefaultHttpHeaders(false); assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { headers.add(headers); } }); }
@Override public void onChange(List<JobRunrMetadata> metadataList) { if (this.serversWithPollIntervalInSecondsTimeBoxTooSmallMetadataList == null || this.serversWithPollIntervalInSecondsTimeBoxTooSmallMetadataList.size() != metadataList.size()) { problems.removeProblemsOfType(PollIntervalInSecondsTimeBoxIsTooSmallProblem.PROBLEM_TYPE); if (!metadataList.isEmpty() && !problems.containsProblemOfType(CpuAllocationIrregularityProblem.PROBLEM_TYPE)) { problems.addProblem(new PollIntervalInSecondsTimeBoxIsTooSmallProblem(metadataList)); } this.serversWithPollIntervalInSecondsTimeBoxTooSmallMetadataList = metadataList; } }
@Test void ifChangesOnPollIntervalInSecondsTimeBoxIsTooSmallDetectedThenProblemCreated() { final JobRunrMetadata jobRunrMetadata = new JobRunrMetadata(PollIntervalInSecondsTimeBoxIsTooSmallNotification.class.getSimpleName(), "BackgroundJobServer " + UUID.randomUUID(), "23"); pollIntervalInSecondsTimeBoxIsTooSmallProblemHandler.onChange(asList(jobRunrMetadata)); verify(problems).addProblem(problemArgumentCaptor.capture()); assertThat(problemArgumentCaptor.getValue()) .isInstanceOf(PollIntervalInSecondsTimeBoxIsTooSmallProblem.class) .hasFieldOrPropertyWithValue("pollIntervalInSecondsTimeBoxIsTooSmallMetadataSet", asList(jobRunrMetadata)); }
final void checkAvailable(int pos, int k) throws EOFException { if (pos < 0) { throw new IllegalArgumentException("Negative pos! -> " + pos); } if ((size - pos) < k) { throw new EOFException("Cannot read " + k + " bytes!"); } }
@Test(expected = EOFException.class) public void testCheckAvailable_EOF() throws Exception { in.checkAvailable(0, INIT_DATA.length + 1); }
@Override public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { switch (request.getCode()) { case RequestCode.UPDATE_AND_CREATE_TOPIC: return this.updateAndCreateTopic(ctx, request); case RequestCode.UPDATE_AND_CREATE_TOPIC_LIST: return this.updateAndCreateTopicList(ctx, request); case RequestCode.DELETE_TOPIC_IN_BROKER: return this.deleteTopic(ctx, request); case RequestCode.GET_ALL_TOPIC_CONFIG: return this.getAllTopicConfig(ctx, request); case RequestCode.GET_TIMER_CHECK_POINT: return this.getTimerCheckPoint(ctx, request); case RequestCode.GET_TIMER_METRICS: return this.getTimerMetrics(ctx, request); case RequestCode.UPDATE_BROKER_CONFIG: return this.updateBrokerConfig(ctx, request); case RequestCode.GET_BROKER_CONFIG: return this.getBrokerConfig(ctx, request); case RequestCode.UPDATE_COLD_DATA_FLOW_CTR_CONFIG: return this.updateColdDataFlowCtrGroupConfig(ctx, request); case RequestCode.REMOVE_COLD_DATA_FLOW_CTR_CONFIG: return this.removeColdDataFlowCtrGroupConfig(ctx, request); case RequestCode.GET_COLD_DATA_FLOW_CTR_INFO: return this.getColdDataFlowCtrInfo(ctx); case RequestCode.SET_COMMITLOG_READ_MODE: return this.setCommitLogReadaheadMode(ctx, request); case RequestCode.SEARCH_OFFSET_BY_TIMESTAMP: return this.searchOffsetByTimestamp(ctx, request); case RequestCode.GET_MAX_OFFSET: return this.getMaxOffset(ctx, request); case RequestCode.GET_MIN_OFFSET: return this.getMinOffset(ctx, request); case RequestCode.GET_EARLIEST_MSG_STORETIME: return this.getEarliestMsgStoretime(ctx, request); case RequestCode.GET_BROKER_RUNTIME_INFO: return this.getBrokerRuntimeInfo(ctx, request); case RequestCode.LOCK_BATCH_MQ: return this.lockBatchMQ(ctx, request); case RequestCode.UNLOCK_BATCH_MQ: return this.unlockBatchMQ(ctx, request); case RequestCode.UPDATE_AND_CREATE_SUBSCRIPTIONGROUP: return this.updateAndCreateSubscriptionGroup(ctx, request); case RequestCode.GET_ALL_SUBSCRIPTIONGROUP_CONFIG: return this.getAllSubscriptionGroup(ctx, request); case RequestCode.DELETE_SUBSCRIPTIONGROUP: return this.deleteSubscriptionGroup(ctx, request); case RequestCode.GET_TOPIC_STATS_INFO: return this.getTopicStatsInfo(ctx, request); case RequestCode.GET_CONSUMER_CONNECTION_LIST: return this.getConsumerConnectionList(ctx, request); case RequestCode.GET_PRODUCER_CONNECTION_LIST: return this.getProducerConnectionList(ctx, request); case RequestCode.GET_ALL_PRODUCER_INFO: return this.getAllProducerInfo(ctx, request); case RequestCode.GET_CONSUME_STATS: return this.getConsumeStats(ctx, request); case RequestCode.GET_ALL_CONSUMER_OFFSET: return this.getAllConsumerOffset(ctx, request); case RequestCode.GET_ALL_DELAY_OFFSET: return this.getAllDelayOffset(ctx, request); case RequestCode.GET_ALL_MESSAGE_REQUEST_MODE: return this.getAllMessageRequestMode(ctx, request); case RequestCode.INVOKE_BROKER_TO_RESET_OFFSET: return this.resetOffset(ctx, request); case RequestCode.INVOKE_BROKER_TO_GET_CONSUMER_STATUS: return this.getConsumerStatus(ctx, request); case RequestCode.QUERY_TOPIC_CONSUME_BY_WHO: return this.queryTopicConsumeByWho(ctx, request); case RequestCode.QUERY_TOPICS_BY_CONSUMER: return this.queryTopicsByConsumer(ctx, request); case RequestCode.QUERY_SUBSCRIPTION_BY_CONSUMER: return this.querySubscriptionByConsumer(ctx, request); case RequestCode.QUERY_CONSUME_TIME_SPAN: return this.queryConsumeTimeSpan(ctx, request); case RequestCode.GET_SYSTEM_TOPIC_LIST_FROM_BROKER: return this.getSystemTopicListFromBroker(ctx, request); case RequestCode.CLEAN_EXPIRED_CONSUMEQUEUE: return this.cleanExpiredConsumeQueue(); case RequestCode.DELETE_EXPIRED_COMMITLOG: return this.deleteExpiredCommitLog(); case RequestCode.CLEAN_UNUSED_TOPIC: return this.cleanUnusedTopic(); case RequestCode.GET_CONSUMER_RUNNING_INFO: return this.getConsumerRunningInfo(ctx, request); case RequestCode.QUERY_CORRECTION_OFFSET: return this.queryCorrectionOffset(ctx, request); case RequestCode.CONSUME_MESSAGE_DIRECTLY: return this.consumeMessageDirectly(ctx, request); case RequestCode.CLONE_GROUP_OFFSET: return this.cloneGroupOffset(ctx, request); case RequestCode.VIEW_BROKER_STATS_DATA: return ViewBrokerStatsData(ctx, request); case RequestCode.GET_BROKER_CONSUME_STATS: return fetchAllConsumeStatsInBroker(ctx, request); case RequestCode.QUERY_CONSUME_QUEUE: return queryConsumeQueue(ctx, request); case RequestCode.UPDATE_AND_GET_GROUP_FORBIDDEN: return this.updateAndGetGroupForbidden(ctx, request); case RequestCode.GET_SUBSCRIPTIONGROUP_CONFIG: return this.getSubscriptionGroup(ctx, request); case RequestCode.UPDATE_AND_CREATE_ACL_CONFIG: return updateAndCreateAccessConfig(ctx, request); case RequestCode.DELETE_ACL_CONFIG: return deleteAccessConfig(ctx, request); case RequestCode.GET_BROKER_CLUSTER_ACL_INFO: return getBrokerAclConfigVersion(ctx, request); case RequestCode.UPDATE_GLOBAL_WHITE_ADDRS_CONFIG: return updateGlobalWhiteAddrsConfig(ctx, request); case RequestCode.RESUME_CHECK_HALF_MESSAGE: return resumeCheckHalfMessage(ctx, request); case RequestCode.GET_TOPIC_CONFIG: return getTopicConfig(ctx, request); case RequestCode.UPDATE_AND_CREATE_STATIC_TOPIC: return this.updateAndCreateStaticTopic(ctx, request); case RequestCode.NOTIFY_MIN_BROKER_ID_CHANGE: return this.notifyMinBrokerIdChange(ctx, request); case RequestCode.EXCHANGE_BROKER_HA_INFO: return this.updateBrokerHaInfo(ctx, request); case RequestCode.GET_BROKER_HA_STATUS: return this.getBrokerHaStatus(ctx, request); case RequestCode.RESET_MASTER_FLUSH_OFFSET: return this.resetMasterFlushOffset(ctx, request); case RequestCode.GET_BROKER_EPOCH_CACHE: return this.getBrokerEpochCache(ctx, request); case RequestCode.NOTIFY_BROKER_ROLE_CHANGED: return this.notifyBrokerRoleChanged(ctx, request); case RequestCode.AUTH_CREATE_USER: return this.createUser(ctx, request); case RequestCode.AUTH_UPDATE_USER: return this.updateUser(ctx, request); case RequestCode.AUTH_DELETE_USER: return this.deleteUser(ctx, request); case RequestCode.AUTH_GET_USER: return this.getUser(ctx, request); case RequestCode.AUTH_LIST_USER: return this.listUser(ctx, request); case RequestCode.AUTH_CREATE_ACL: return this.createAcl(ctx, request); case RequestCode.AUTH_UPDATE_ACL: return this.updateAcl(ctx, request); case RequestCode.AUTH_DELETE_ACL: return this.deleteAcl(ctx, request); case RequestCode.AUTH_GET_ACL: return this.getAcl(ctx, request); case RequestCode.AUTH_LIST_ACL: return this.listAcl(ctx, request); default: return getUnknownCmdResponse(ctx, request); } }
@Test public void testQueryTopicByConsumer() throws RemotingCommandException { QueryTopicsByConsumerRequestHeader requestHeader = new QueryTopicsByConsumerRequestHeader(); requestHeader.setGroup("group"); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_TOPICS_BY_CONSUMER, requestHeader); request.makeCustomHeaderToNet(); when(brokerController.getConsumerOffsetManager()).thenReturn(consumerOffsetManager); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
public String getMountedExternalStorageDirectoryPath() { String path = null; String state = Environment.getExternalStorageState(); if (Environment.MEDIA_MOUNTED.equals(state) || Environment.MEDIA_MOUNTED_READ_ONLY.equals(state)) { path = getExternalStorageDirectoryPath(); } return path; }
@Test public void getMountedExternalStorageDirectoryPathReturnsNullWhenChecking() { ShadowEnvironment.setExternalStorageState(Environment.MEDIA_CHECKING); assertThat(contextUtil.getMountedExternalStorageDirectoryPath(), is(nullValue())); }