focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static boolean acceptEndpoint(String endpointUrl) { return endpointUrl != null && endpointUrl.matches(ENDPOINT_PATTERN_STRING); }
@Test public void testAcceptEndpointFailures() { AsyncTestSpecification specification = new AsyncTestSpecification(); MQTTMessageConsumptionTask task = new MQTTMessageConsumptionTask(specification); assertFalse(MQTTMessageConsumptionTask.acceptEndpoint("localhost:1883/testTopic")); assertFalse(MQTTMessageConsumptionTask.acceptEndpoint("ssl://localhost:1883/testTopic")); assertFalse(MQTTMessageConsumptionTask.acceptEndpoint("mqtt://localhost")); assertFalse(MQTTMessageConsumptionTask.acceptEndpoint("mqtt://localhost:1883")); assertFalse(MQTTMessageConsumptionTask.acceptEndpoint("mqtt://localhost:port/testTopic")); }
@Override public BatchKVResponse<K, EntityResponse<V>> wrapResponse(DataMap dataMap, Map<String, String> headers, ProtocolVersion version) throws InstantiationException, IllegalAccessException, InvocationTargetException, NoSuchMethodException, IOException { if (dataMap == null) { return null; } return new BatchEntityResponse<>(dataMap, _keyType, _entityType, _keyParts, _complexKeyType, version); }
@Test(dataProvider = TestConstants.RESTLI_PROTOCOL_1_2_PREFIX + "batchEntityResponseDataProvider") public void testDecodingWithEmptyDataMap(List<String> keys, ProtocolVersion protocolVersion) throws InstantiationException, IllegalAccessException, InvocationTargetException, NoSuchMethodException, IOException { final BatchEntityResponseDecoder<String, TestRecord> decoder = new BatchEntityResponseDecoder<>(new TypeSpec<>(TestRecord.class), new TypeSpec<>(String.class), Collections.<String, CompoundKey.TypeInfo>emptyMap(), null); final BatchKVResponse<String, EntityResponse<TestRecord>> response = decoder.wrapResponse(null, Collections.<String, String>emptyMap(), protocolVersion); Assert.assertNull(response); }
@Override public <T> T run(Supplier<T> toRun, Function<Throwable, T> fallback) { Entry entry = null; try { entry = SphU.entry(resourceName, entryType); // If the SphU.entry() does not throw `BlockException`, it means that the // request can pass. return toRun.get(); } catch (BlockException ex) { // SphU.entry() may throw BlockException which indicates that // the request was rejected (flow control or circuit breaking triggered). // So it should not be counted as the business exception. return fallback.apply(ex); } catch (Exception ex) { // For other kinds of exceptions, we'll trace the exception count via // Tracer.trace(ex). Tracer.trace(ex); return fallback.apply(ex); } finally { // Guarantee the invocation has been completed. if (entry != null) { entry.exit(); } } }
@Test public void testCreateFromFactoryThenRun() { CircuitBreaker cb = new SentinelCircuitBreakerFactory().create("testSentinelRun"); assertThat(cb.run(() -> "foobar")).isEqualTo("foobar"); }
@Udf public final <T> Map<String, T> asMap( @UdfParameter final List<String> keys, @UdfParameter final List<T> values) { if (keys == null || values == null) { return null; } final Map<String, T> map = new HashMap<>(keys.size()); for (int i = 0; i < keys.size(); i++) { final String key = keys.get(i); final T value = i >= values.size() ? null : values.get(i); map.put(key, value); } return map; }
@Test public void shouldCreateMapWithIntegerValues() { // Given: final List<String> keys = Lists.newArrayList("1", "2"); final List<Integer> values = Lists.newArrayList(1, 2); // When: final Map<String, Integer> map = new AsMap().asMap(keys, values); // Then: assertThat(map, hasEntry("1", 1)); assertThat(map, hasEntry("2", 2)); }
public void deleteTransaction(TransactionState transactionState) { writeLock(); try { // here we only delete the oldest element, so if element exist in finalStatusTransactionStateDeque, // it must at the front of the finalStatusTransactionStateDeque if (!finalStatusTransactionStateDeque.isEmpty() && transactionState.getTransactionId() == finalStatusTransactionStateDeque.getFirst().getTransactionId()) { finalStatusTransactionStateDeque.pop(); clearTransactionState(transactionState); } } finally { writeUnlock(); } }
@Test public void testDeleteTransaction() throws AnalysisException { DatabaseTransactionMgr masterDbTransMgr = masterTransMgr.getDatabaseTransactionMgr(GlobalStateMgrTestUtil.testDbId1); long txnId = lableToTxnId.get(GlobalStateMgrTestUtil.testTxnLable1); TransactionState transactionState = masterDbTransMgr.getTransactionState(txnId); masterDbTransMgr.deleteTransaction(transactionState); assertEquals(6, masterDbTransMgr.getRunningTxnNums()); assertEquals(1, masterDbTransMgr.getRunningRoutineLoadTxnNums()); assertEquals(0, masterDbTransMgr.getFinishedTxnNums()); assertEquals(7, masterDbTransMgr.getTransactionNum()); assertNull(masterDbTransMgr.unprotectedGetTxnIdsByLabel(GlobalStateMgrTestUtil.testTxnLable1)); }
public static String formatTime(final Time time) { return LocalTime.ofSecondOfDay(time.getTime() / 1000).toString(); }
@Test public void shouldFormatTime() { assertThat(SqlTimeTypes.formatTime(new Time(1000)), is("00:00:01")); assertThat(SqlTimeTypes.formatTime(new Time(1005)), is("00:00:01")); }
@Override public boolean isAvailable(DeviceId deviceId) { return getDevice(deviceId) != null; }
@Test(expected = NullPointerException.class) public void testIsAvailableByNullId() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class); // test the isAvailable() method with null device id value. deviceService.isAvailable(null); }
@Udf public boolean check(@UdfParameter(description = "The input JSON string") final String input) { if (input == null) { return false; } try { return !UdfJsonMapper.parseJson(input).isMissingNode(); } catch (KsqlFunctionException e) { return false; } }
@Test public void shouldNotInterpretUnquotedString() { assertFalse(udf.check("abc")); }
@Override public void apply(IntentOperationContext<FlowRuleIntent> context) { Optional<IntentData> toUninstall = context.toUninstall(); Optional<IntentData> toInstall = context.toInstall(); if (toInstall.isPresent() && toUninstall.isPresent()) { Intent intentToInstall = toInstall.get().intent(); if (requireNonDisruptive(intentToInstall) && INSTALLED.equals(toUninstall.get().state())) { reallocate(context); return; } } if (!toInstall.isPresent() && !toUninstall.isPresent()) { // Nothing to do. intentInstallCoordinator.intentInstallSuccess(context); return; } List<FlowRuleIntent> uninstallIntents = context.intentsToUninstall(); List<FlowRuleIntent> installIntents = context.intentsToInstall(); List<FlowRule> flowRulesToUninstall; List<FlowRule> flowRulesToInstall; if (toUninstall.isPresent()) { // Remove tracked resource from both Intent and installable Intents. trackIntentResources(toUninstall.get(), uninstallIntents, REMOVE); // Retrieves all flow rules from all flow rule Intents. flowRulesToUninstall = uninstallIntents.stream() .map(FlowRuleIntent::flowRules) .flatMap(Collection::stream) .filter(flowRule -> flowRuleService.getFlowEntry(flowRule) != null) .collect(Collectors.toList()); } else { // No flow rules to be uninstalled. flowRulesToUninstall = Collections.emptyList(); } if (toInstall.isPresent()) { // Track resource from both Intent and installable Intents. trackIntentResources(toInstall.get(), installIntents, ADD); // Retrieves all flow rules from all flow rule Intents. flowRulesToInstall = installIntents.stream() .map(FlowRuleIntent::flowRules) .flatMap(Collection::stream) .collect(Collectors.toList()); } else { // No flow rules to be installed. flowRulesToInstall = Collections.emptyList(); } List<FlowRule> flowRuleToModify; List<FlowRule> dontTouch; // If both uninstall/install list contained equal (=match conditions are equal) FlowRules, // omit it from remove list, since it will/should be overwritten by install flowRuleToModify = flowRulesToInstall.stream() .filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::equals)) .collect(Collectors.toList()); // If both contained exactMatch-ing FlowRules, remove from both list, // since it will result in no-op. dontTouch = flowRulesToInstall.stream() .filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::exactMatch)) .collect(Collectors.toList()); flowRulesToUninstall.removeAll(flowRuleToModify); flowRulesToUninstall.removeAll(dontTouch); flowRulesToInstall.removeAll(flowRuleToModify); flowRulesToInstall.removeAll(dontTouch); flowRuleToModify.removeAll(dontTouch); if (flowRulesToInstall.isEmpty() && flowRulesToUninstall.isEmpty() && flowRuleToModify.isEmpty()) { // There is no flow rules to install/uninstall intentInstallCoordinator.intentInstallSuccess(context); return; } FlowRuleOperations.Builder builder = FlowRuleOperations.builder(); // Add flows flowRulesToInstall.forEach(builder::add); // Modify flows flowRuleToModify.forEach(builder::modify); // Remove flows flowRulesToUninstall.forEach(builder::remove); FlowRuleOperationsContext flowRuleOperationsContext = new FlowRuleOperationsContext() { @Override public void onSuccess(FlowRuleOperations ops) { intentInstallCoordinator.intentInstallSuccess(context); } @Override public void onError(FlowRuleOperations ops) { intentInstallCoordinator.intentInstallFailed(context); } }; FlowRuleOperations operations = builder.build(flowRuleOperationsContext); log.debug("applying intent {} -> {} with {} rules: {}", toUninstall.map(x -> x.key().toString()).orElse("<empty>"), toInstall.map(x -> x.key().toString()).orElse("<empty>"), operations.stages().stream().mapToLong(Set::size).sum(), operations.stages()); flowRuleService.apply(operations); }
@Test public void testUninstallAndInstallMissing() { List<Intent> intentsToInstall = createAnotherFlowRuleIntents(); List<Intent> intentsToUninstall = createFlowRuleIntents(); IntentData toInstall = new IntentData(createP2PIntent(), IntentState.INSTALLING, new WallClockTimestamp()); toInstall = IntentData.compiled(toInstall, intentsToInstall); IntentData toUninstall = new IntentData(createP2PIntent(), IntentState.INSTALLED, new WallClockTimestamp()); toUninstall = IntentData.compiled(toUninstall, intentsToUninstall); IntentOperationContext<FlowRuleIntent> operationContext; IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall); operationContext = new IntentOperationContext(intentsToUninstall, intentsToInstall, context); installer.apply(operationContext); IntentOperationContext successContext = intentInstallCoordinator.successContext; assertEquals(successContext, operationContext); Set<FlowRule> expectedFlowRules = Sets.newHashSet(); assertEquals(expectedFlowRules, flowRuleService.flowRulesRemove); expectedFlowRules = intentsToInstall.stream() .map(intent -> (FlowRuleIntent) intent) .map(FlowRuleIntent::flowRules) .flatMap(Collection::stream) .collect(Collectors.toSet()); assertEquals(expectedFlowRules, flowRuleService.flowRulesAdd); }
public static Result<Boolean> isFieldEquals(Field f0, Field f1) { if (f0 == null) { return Result.build(f1 == null); } else { if (f1 == null) { return Result.build(false); } else { if (StringUtils.equalsIgnoreCase(f0.getName(), f1.getName()) && f0.getType() == f1.getType()) { if (f0.getValue() == null) { return Result.build(f1.getValue() == null); } else { if (f1.getValue() == null) { return Result.buildWithParams(false, "Field not equals, name {}, new value is null", f0.getName()); } else { String currentSerializer = AbstractUndoLogManager.getCurrentSerializer(); if (StringUtils.equals(currentSerializer, FastjsonUndoLogParser.NAME)) { convertType(f0, f1); } boolean result = Objects.deepEquals(f0.getValue(), f1.getValue()); if (result) { return Result.ok(); } else { return Result.buildWithParams(false, "Field not equals, name {}, old value {}, new value {}", f0.getName(), f0.getValue(), f1.getValue()); } } } } else { return Result.buildWithParams(false, "Field not equals, old name {} type {}, new name {} type {}", f0.getName(), f0.getType(), f1.getName(), f1.getType()); } } } }
@Test public void isFieldEquals() { Field field0 = new Field("name", 0, "111"); Field field1 = new Field("name", 1, "111"); Field field2 = new Field("name", 0, "222"); Field field3 = new Field("age", 0, "222"); Field field4 = new Field("name", 0, null); Assertions.assertFalse(DataCompareUtils.isFieldEquals(field0, null).getResult()); Assertions.assertFalse(DataCompareUtils.isFieldEquals(null, field0).getResult()); Assertions.assertFalse(DataCompareUtils.isFieldEquals(field0, field1).getResult()); Assertions.assertFalse(DataCompareUtils.isFieldEquals(field0, field2).getResult()); Assertions.assertFalse(DataCompareUtils.isFieldEquals(field0, field3).getResult()); Assertions.assertFalse(DataCompareUtils.isFieldEquals(field0, field4).getResult()); Field field10 = new Field("Name", 0, "111"); Field field11 = new Field("Name", 0, null); Assertions.assertTrue(DataCompareUtils.isFieldEquals(field0, field10).getResult()); Assertions.assertTrue(DataCompareUtils.isFieldEquals(field4, field11).getResult()); Field field12 = new Field("information", JDBCType.BLOB.getVendorTypeNumber(), "hello world".getBytes()); Field field13 = new Field("information", JDBCType.BLOB.getVendorTypeNumber(), "hello world".getBytes()); Assertions.assertTrue(DataCompareUtils.isFieldEquals(field12, field13).getResult()); }
public static String getIndexedScenarioMessage(String assertionError, int index, String scenarioDescription, String fileName) { StringBuilder message = new StringBuilder().append("#").append(index); if (scenarioDescription != null && !scenarioDescription.isEmpty()) { message.append(" ").append(scenarioDescription); } message.append(": ").append(assertionError); if (fileName != null) { message.append(" (").append(fileName).append(")"); } return message.toString(); }
@Test public void getIndexedScenarioMessage_manyCases() { String failureMessage = "Failure message"; String scenarioDescription = "First Case"; String fileName = "ScesimTest"; String testResult = getIndexedScenarioMessage(failureMessage, 1, scenarioDescription, fileName); assertThat(testResult).isEqualTo("#1 First Case: Failure message (ScesimTest)"); testResult = getIndexedScenarioMessage(failureMessage, 1, scenarioDescription, null); assertThat(testResult).isEqualTo("#1 First Case: Failure message"); testResult = getIndexedScenarioMessage(failureMessage, 1, "", fileName); assertThat(testResult).isEqualTo("#1: Failure message (ScesimTest)"); testResult = getIndexedScenarioMessage(failureMessage, 1, null, fileName); assertThat(testResult).isEqualTo("#1: Failure message (ScesimTest)"); }
public List<String> tokenize(String text) { List<String> tokens = new ArrayList<>(); Matcher regexMatcher = regexExpression.matcher(text); int lastIndexOfPrevMatch = 0; while (regexMatcher.find(lastIndexOfPrevMatch)) // this is where the magic happens: // the regexp is used to find a matching pattern for substitution { int beginIndexOfNextMatch = regexMatcher.start(); String prevToken = text.substring(lastIndexOfPrevMatch, beginIndexOfNextMatch); if (!prevToken.isEmpty()) { tokens.add(prevToken); } String currentMatch = regexMatcher.group(); tokens.add(currentMatch); lastIndexOfPrevMatch = regexMatcher.end(); if (lastIndexOfPrevMatch < text.length() && text.charAt(lastIndexOfPrevMatch) != '_') { // beause it is sometimes positioned after the "_", but it should be positioned // before the "_" --lastIndexOfPrevMatch; } } String tail = text.substring(lastIndexOfPrevMatch); if (!tail.isEmpty()) { tokens.add(tail); } return tokens; }
@Test void testTokenize_happyPath_5() { // given CompoundCharacterTokenizer tokenizer = new CompoundCharacterTokenizer( new HashSet<>(Arrays.asList(new String[] { "_67_112_", "_76_112_" }))); String text = "_94_167_112_91_103_"; // when List<String> tokens = tokenizer.tokenize(text); // then assertEquals(Arrays.asList("_94_167_112_91_103_"), tokens); }
public static String substVars(String val, PropertyContainer pc1) throws ScanException { return substVars(val, pc1, null); }
@Test public void testSubstVarsWithDefault() throws ScanException { context.putProperty("v1", "if"); String textWithDefault = "Testing ${v1} variable substitution ${v2:-toto}"; String resultWithDefault = "Testing if variable substitution toto"; String result = OptionHelper.substVars(textWithDefault, context); assertEquals(resultWithDefault, result); }
@Override public Selector<List<T>, CmdbContext<T>, String> parse(String expression) throws NacosException { this.expression = expression; doParse(expression); return this; }
@Test void testParse() throws NacosException { MockCmdbSelector cmdbSelector = new MockCmdbSelector(); cmdbSelector.parse("test"); assertEquals("test", cmdbSelector.getExpression()); assertEquals(1, counter.get()); }
public static Builder custom() { return new Builder(); }
@Test(expected = IllegalArgumentException.class) public void testBuildWithIllegalMaxWait() { BulkheadConfig.custom() .maxWaitDuration(Duration.ofMillis(-1)) .build(); }
public static List<Bar> findOverlappingBars(BarSeries barSeries) { List<Bar> bars = barSeries.getBarData(); if (bars == null || bars.isEmpty()) return new ArrayList<>(); Duration period = bars.iterator().next().getTimePeriod(); List<Bar> overlappingBars = new ArrayList<>(); for (int i = 0; i < bars.size(); i++) { Bar bar = bars.get(i); Bar nextBar = i + 1 < bars.size() ? bars.get(i + 1) : null; if (nextBar != null) { if (bar.getEndTime().isAfter(nextBar.getBeginTime()) || bar.getBeginTime().plus(period).isBefore(nextBar.getBeginTime())) { overlappingBars.add(nextBar); } } } return overlappingBars; }
@Test public void findOverlappingBarsTest() { final List<Bar> bars = new ArrayList<>(); time = ZonedDateTime.of(2019, 6, 1, 1, 1, 0, 0, ZoneId.systemDefault()); final Bar bar0 = new MockBar(time, 1d, 2d, 3d, 4d, 5d, 0d, 7, numFunction); final Bar bar1 = new MockBar(time, 1d, 1d, 1d, 1d, 1d, 1d, 1, numFunction); Bar bar8 = BaseBar.builder(DoubleNum::valueOf, Double.class) .timePeriod(Duration.ofDays(1)) .endTime(time.plusDays(8)) .openPrice(NaN.NaN) .highPrice(NaN.NaN) .lowPrice(NaN.NaN) .closePrice(NaN.NaN) .volume(NaN.NaN) .build(); bars.add(bar0); bars.add(bar1); bars.add(bar8); series = new BaseBarSeriesBuilder().withNumTypeOf(numFunction).withName("Series Name").withBars(bars).build(); List<Bar> overlappingBars = BarSeriesUtils.findOverlappingBars(series); // there must be 1 overlapping bars (bar1) assertEquals(overlappingBars.get(0).getBeginTime(), bar1.getBeginTime()); }
public static <T> RestResponse<T> toRestResponse( final ResponseWithBody resp, final String path, final Function<ResponseWithBody, T> mapper ) { final int statusCode = resp.getResponse().statusCode(); return statusCode == OK.code() ? RestResponse.successful(statusCode, mapper.apply(resp)) : createErrorResponse(path, resp); }
@Test public void shouldCreateRestResponseFromForbiddenResponse() { // Given: when(httpClientResponse.statusCode()).thenReturn(FORBIDDEN.code()); // When: final RestResponse<KsqlEntityList> restResponse = KsqlClientUtil.toRestResponse(response, PATH, mapper); // Then: assertThat("is erroneous", restResponse.isErroneous()); assertThat(restResponse.getStatusCode(), is(FORBIDDEN.code())); assertThat(restResponse.getErrorMessage().getMessage(), containsString("You are forbidden from using this cluster")); }
public static String evaluate(final co.elastic.logstash.api.Event event, final String template) throws JsonProcessingException { if (event instanceof Event) { return evaluate((Event) event, template); } else { throw new IllegalStateException("Unknown event concrete class: " + event.getClass().getName()); } }
@Test public void TestValueIsHash() throws IOException { Event event = getTestEvent(); String path = "%{j}"; assertEquals("{\"k1\":\"v\"}", StringInterpolation.evaluate(event, path)); }
@Override public HttpHost getTarget() { return hostDecorateFunc.apply(httpAsyncRequestProducer.getTarget()); }
@Test public void getTarget() { final HttpAsyncRequestProducer delegate = Mockito.mock(HttpAsyncRequestProducer.class); AtomicBoolean isExecute = new AtomicBoolean(); final HttpHost host = Mockito.mock(HttpHost.class); final Function<HttpHost, HttpHost> function = httpRequest -> { isExecute.set(true); return host; }; final HttpAsyncRequestProducerDecorator decorator = new HttpAsyncRequestProducerDecorator( delegate, null, function); final HttpHost httpRequest = decorator.getTarget(); Assert.assertEquals(httpRequest, host); Assert.assertTrue(isExecute.get()); }
public CompletableFuture<Void> handlePullQuery( final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final PullQueryWriteStream pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests ) { final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator() .locate( pullPhysicalPlan.getKeys(), routingOptions, routingFilterFactory, pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN ); final Map<Integer, List<Host>> emptyPartitions = allLocations.stream() .filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected())) .collect(Collectors.toMap( KsqlPartitionLocation::getPartition, loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList()))); if (!emptyPartitions.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Unable to execute pull query. " + emptyPartitions.entrySet() .stream() .map(kv -> String.format( "Partition %s failed to find valid host. Hosts scanned: %s", kv.getKey(), kv.getValue())) .collect(Collectors.joining(", ", "[", "]"))); LOG.debug(materializationException.getMessage()); throw materializationException; } // at this point we should filter out the hosts that we should not route to final List<KsqlPartitionLocation> locations = allLocations .stream() .map(KsqlPartitionLocation::removeFilteredHosts) .collect(Collectors.toList()); final CompletableFuture<Void> completableFuture = new CompletableFuture<>(); coordinatorExecutorService.submit(() -> { try { executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions, locations, pullQueryQueue, shouldCancelRequests); completableFuture.complete(null); } catch (Throwable t) { completableFuture.completeExceptionally(t); } }); return completableFuture; }
@Test public void forwardingError_noRows() { // Given: locate(location4); when(ksqlClient.makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any(), any())) .thenAnswer(i -> { Map<String, ?> requestProperties = i.getArgument(3); WriteStream<List<StreamedRow>> rowConsumer = i.getArgument(4); assertThat(requestProperties.get(KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS), is ("4")); rowConsumer.write(ImmutableList.of()); return RestResponse.successful(200, 0); } ); // When: CompletableFuture<Void> future = haRouting.handlePullQuery( serviceContext, pullPhysicalPlan, statement, routingOptions, pullQueryQueue, disconnect); final Exception e = assertThrows( ExecutionException.class, future::get ); // Then: assertThat(pullQueryQueue.size(), is(0)); assertThat(Throwables.getRootCause(e).getMessage(), containsString("empty response from forwarding call, expected a header row")); }
public static <U> Task<U> withRetryPolicy(String name, RetryPolicy policy, Function1<Integer, Task<U>> taskFunction) { RetriableTask<U> retriableTask = new RetriableTask<>(name, policy, taskFunction); Task<U> retryTaskWrapper = Task.async(name + " retriableTask", retriableTask::run); retryTaskWrapper.getShallowTraceBuilder().setTaskType(TaskType.WITH_RETRY.getName()); return retryTaskWrapper; }
@Test public void testErrorClassification() { Function<Throwable, ErrorClassification> errorClassifier = error -> error instanceof TimeoutException ? ErrorClassification.RECOVERABLE : ErrorClassification.UNRECOVERABLE; RetryPolicy retryPolicy = new RetryPolicyBuilder(). setTerminationPolicy(TerminationPolicy.limitAttempts(3)). setErrorClassifier(errorClassifier). build(); assertEquals(retryPolicy.getName(), "RetryPolicy.LimitAttempts"); Task<Void> task1 = withRetryPolicy("testErrorClassification", retryPolicy, attempt -> Task.failure(new TimeoutException("current attempt: " + attempt))); runAndWaitException(task1, TimeoutException.class); assertTrue(task1.isDone()); assertEquals(task1.getError().getMessage(), "current attempt: 2"); Task<Void> task2 = withRetryPolicy("testErrorClassification", retryPolicy, attempt -> Task.failure(new IllegalArgumentException("current attempt: " + attempt))); runAndWaitException(task2, IllegalArgumentException.class); assertTrue(task2.isDone()); assertEquals(task2.getError().getMessage(), "current attempt: 0"); }
@Override public Validation validate(Validation val) { if (StringUtils.isBlank(systemEnvironment.getPropertyImpl("jetty.home"))) { systemEnvironment.setProperty("jetty.home", systemEnvironment.getPropertyImpl("user.dir")); } systemEnvironment.setProperty("jetty.base", systemEnvironment.getPropertyImpl("jetty.home")); File home = new File(systemEnvironment.getPropertyImpl("jetty.home")); File work = new File(systemEnvironment.getPropertyImpl("jetty.home"), "work"); if (home.exists()) { if (work.exists()) { try { FileUtils.deleteDirectory(work); } catch (IOException e) { String message = format("Error trying to remove Jetty working directory {0}: {1}", work.getAbsolutePath(), e); return val.addError(new RuntimeException(message)); } } work.mkdir(); } return Validation.SUCCESS; }
@Test public void shouldCreateWorkDirIfItDoesNotExist() { when(systemEnvironment.getPropertyImpl("jetty.home")).thenReturn(homeDir.getAbsolutePath()); Validation val = new Validation(); jettyWorkDirValidator.validate(val); assertThat(val.isSuccessful(), is(true)); File work = new File(homeDir, "work"); assertThat(work.exists(), is(true)); }
static int optimalNumOfHashFunctions(long expectEntries, long bitSize) { return Math.max(1, (int) Math.round((double) bitSize / expectEntries * Math.log(2))); }
@Test void testBloomFilterNumHashFunctions() { assertThat(BloomFilter.optimalNumOfHashFunctions(-1, -1)).isOne(); assertThat(BloomFilter.optimalNumOfHashFunctions(0, 0)).isOne(); assertThat(BloomFilter.optimalNumOfHashFunctions(10, 0)).isOne(); assertThat(BloomFilter.optimalNumOfHashFunctions(10, 10)).isOne(); assertThat(BloomFilter.optimalNumOfHashFunctions(10, 100)).isEqualTo(7); assertThat(BloomFilter.optimalNumOfHashFunctions(100, 100)).isOne(); assertThat(BloomFilter.optimalNumOfHashFunctions(1000, 100)).isOne(); assertThat(BloomFilter.optimalNumOfHashFunctions(10000, 100)).isOne(); assertThat(BloomFilter.optimalNumOfHashFunctions(100000, 100)).isOne(); assertThat(BloomFilter.optimalNumOfHashFunctions(1000000, 100)).isOne(); }
@Override public void execute(Context context) { long count = 0; try (StreamWriter<ProjectDump.Rule> writer = dumpWriter.newStreamWriter(DumpElement.RULES)) { ProjectDump.Rule.Builder ruleBuilder = ProjectDump.Rule.newBuilder(); for (Rule rule : ruleRepository.getAll()) { ProjectDump.Rule ruleMessage = toRuleMessage(ruleBuilder, rule); writer.write(ruleMessage); count++; } LoggerFactory.getLogger(getClass()).debug("{} rules exported", count); } catch (Exception e) { throw new IllegalStateException(format("Rule Export failed after processing %d rules successfully", count), e); } }
@Test public void execute_writes_no_rules_when_repository_is_empty() { underTest.execute(new TestComputationStepContext()); assertThat(dumpWriter.getWrittenMessagesOf(DumpElement.RULES)).isEmpty(); }
@Override public int run(InputStream stdin, PrintStream out, PrintStream err, List<String> args) throws Exception { OptionParser optionParser = new OptionParser(); OptionSet optionSet = optionParser.parse(args.toArray(new String[0])); List<String> nargs = (List<String>) optionSet.nonOptionArguments(); if (nargs.isEmpty()) { printHelp(err); err.println(); optionParser.printHelpOn(err); return 0; } long count = 0L; if (ImmutableList.of("-").equals(nargs)) { count = countRecords(stdin); } else { for (Path file : Util.getFiles(nargs)) { try (final InputStream inStream = Util.openFromFS(file)) { count += countRecords(inStream); } } } out.println(count); out.flush(); return 0; }
@Test void fileDoesNotExist() throws Exception { assertThrows(FileNotFoundException.class, () -> { List<String> args = Collections.singletonList(new File(temporaryFolder, "nonExistingFile").getAbsolutePath()); int returnCode = new RecordCountTool().run(System.in, System.out, System.err, args); assertEquals(1, returnCode); }); }
public static Checksum parse(final String hash) { if(StringUtils.isBlank(hash)) { return Checksum.NONE; } switch(hash.length()) { case 8: if(hash.matches("[a-fA-F0-9]{8}")) { return new Checksum(HashAlgorithm.crc32, hash); } break; case 32: if(hash.matches("[a-fA-F0-9]{32}")) { return new Checksum(HashAlgorithm.md5, hash); } break; case 40: if(hash.matches("[a-fA-F0-9]{40}")) { return new Checksum(HashAlgorithm.sha1, hash); } break; case 64: if(hash.matches("[A-Fa-f0-9]{64}")) { return new Checksum(HashAlgorithm.sha256, hash); } break; case 128: if(hash.matches("[A-Fa-f0-9]{128}")) { return new Checksum(HashAlgorithm.sha512, hash); } break; default: log.warn(String.format("Failure to detect algorithm for checksum %s", hash)); } return Checksum.NONE; }
@Test public void testParse() { assertEquals(new Checksum(HashAlgorithm.md5, "d41d8cd98f00b204e9800998ecf8427e"), Checksum.parse("d41d8cd98f00b204e9800998ecf8427e")); assertEquals(new Checksum(HashAlgorithm.sha1, "da39a3ee5e6b4b0d3255bfef95601890afd80709"), Checksum.parse("da39a3ee5e6b4b0d3255bfef95601890afd80709")); assertEquals(new Checksum(HashAlgorithm.sha256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), Checksum.parse("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")); assertEquals(Checksum.NONE, Checksum.parse("da39a3ee5e6b4b0d3255bfef95601890afd80709-2")); assertEquals(Checksum.NONE, Checksum.parse("")); assertEquals(Checksum.NONE, Checksum.parse(null)); assertEquals(new Checksum(HashAlgorithm.sha512, "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"), Checksum.parse("cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")); assertEquals(new Checksum(HashAlgorithm.crc32, "d202ef8d"), Checksum.parse("d202ef8d")); }
int calculateIndexOfTick(Instant tickTime) { return (int) (Duration.between(bucketBaseTime, tickTime).toNanos() / TICK_INTERVAL); }
@Test public void calculateIndexOfTick() { SlidingTimeWindowMovingAverages stwm = new SlidingTimeWindowMovingAverages(clock); assertThat(stwm.calculateIndexOfTick(Instant.ofEpochSecond(0L))).isEqualTo(0); assertThat(stwm.calculateIndexOfTick(Instant.ofEpochSecond(1L))).isEqualTo(1); }
public static void determineCacheVisibilities(Configuration job, Map<URI, FileStatus> statCache) throws IOException { URI[] tarchives = JobContextImpl.getCacheArchives(job); if (tarchives != null) { StringBuilder archiveVisibilities = new StringBuilder(String.valueOf(isPublic(job, tarchives[0], statCache))); for (int i = 1; i < tarchives.length; i++) { archiveVisibilities.append(","); archiveVisibilities.append(String.valueOf(isPublic(job, tarchives[i], statCache))); } setArchiveVisibilities(job, archiveVisibilities.toString()); } URI[] tfiles = JobContextImpl.getCacheFiles(job); if (tfiles != null) { StringBuilder fileVisibilities = new StringBuilder(String.valueOf(isPublic(job, tfiles[0], statCache))); for (int i = 1; i < tfiles.length; i++) { fileVisibilities.append(","); fileVisibilities.append(String.valueOf(isPublic(job, tfiles[i], statCache))); } setFileVisibilities(job, fileVisibilities.toString()); } }
@Test public void testDetermineCacheVisibilities() throws IOException { fs.setPermission(TEST_VISIBILITY_PARENT_DIR, new FsPermission((short)00777)); fs.setPermission(TEST_VISIBILITY_CHILD_DIR, new FsPermission((short)00777)); fs.setWorkingDirectory(TEST_VISIBILITY_CHILD_DIR); Job job = Job.getInstance(conf); Path relativePath = new Path(SECOND_CACHE_FILE); Path wildcardPath = new Path("*"); Map<URI, FileStatus> statCache = new HashMap<>(); Configuration jobConf; job.addCacheFile(firstCacheFile.toUri()); job.addCacheFile(relativePath.toUri()); jobConf = job.getConfiguration(); // skip test if scratch dir is not PUBLIC assumeTrue(TEST_VISIBILITY_PARENT_DIR + " is not public", ClientDistributedCacheManager.isPublic( jobConf, TEST_VISIBILITY_PARENT_DIR.toUri(), statCache)); ClientDistributedCacheManager.determineCacheVisibilities(jobConf, statCache); // We use get() instead of getBoolean() so we can tell the difference // between wrong and missing assertEquals("The file paths were not found to be publicly visible " + "even though the full path is publicly accessible", "true,true", jobConf.get(MRJobConfig.CACHE_FILE_VISIBILITIES)); checkCacheEntries(statCache, null, firstCacheFile, relativePath); job = Job.getInstance(conf); job.addCacheFile(wildcardPath.toUri()); jobConf = job.getConfiguration(); statCache.clear(); ClientDistributedCacheManager.determineCacheVisibilities(jobConf, statCache); // We use get() instead of getBoolean() so we can tell the difference // between wrong and missing assertEquals("The file path was not found to be publicly visible " + "even though the full path is publicly accessible", "true", jobConf.get(MRJobConfig.CACHE_FILE_VISIBILITIES)); checkCacheEntries(statCache, null, wildcardPath.getParent()); Path qualifiedParent = fs.makeQualified(TEST_VISIBILITY_PARENT_DIR); fs.setPermission(TEST_VISIBILITY_PARENT_DIR, new FsPermission((short)00700)); job = Job.getInstance(conf); job.addCacheFile(firstCacheFile.toUri()); job.addCacheFile(relativePath.toUri()); jobConf = job.getConfiguration(); statCache.clear(); ClientDistributedCacheManager.determineCacheVisibilities(jobConf, statCache); // We use get() instead of getBoolean() so we can tell the difference // between wrong and missing assertEquals("The file paths were found to be publicly visible " + "even though the parent directory is not publicly accessible", "false,false", jobConf.get(MRJobConfig.CACHE_FILE_VISIBILITIES)); checkCacheEntries(statCache, qualifiedParent, firstCacheFile, relativePath); job = Job.getInstance(conf); job.addCacheFile(wildcardPath.toUri()); jobConf = job.getConfiguration(); statCache.clear(); ClientDistributedCacheManager.determineCacheVisibilities(jobConf, statCache); // We use get() instead of getBoolean() so we can tell the difference // between wrong and missing assertEquals("The file path was found to be publicly visible " + "even though the parent directory is not publicly accessible", "false", jobConf.get(MRJobConfig.CACHE_FILE_VISIBILITIES)); checkCacheEntries(statCache, qualifiedParent, wildcardPath.getParent()); }
@Override public String getFileId(final Path file) throws BackgroundException { try { if(StringUtils.isNotBlank(file.attributes().getFileId())) { return file.attributes().getFileId(); } final String cached = super.getFileId(file); if(cached != null) { if(log.isDebugEnabled()) { log.debug(String.format("Return cached fileid %s for file %s", cached, file)); } return cached; } if(file.isRoot()) { return ROOT; } int offset = 0; UiFsModel fsModel; final int chunksize = new HostPreferences(session.getHost()).getInteger("eue.listing.chunksize"); do { final String parentResourceId = this.getFileId(file.getParent()); switch(parentResourceId) { case EueResourceIdProvider.ROOT: case EueResourceIdProvider.TRASH: fsModel = new ListResourceAliasApi(new EueApiClient(session)).resourceAliasAliasGet(parentResourceId, null, null, null, null, chunksize, offset, null, null); break; default: fsModel = new ListResourceApi(new EueApiClient(session)).resourceResourceIdGet(parentResourceId, null, null, null, null, chunksize, offset, null, null); } for(Children child : fsModel.getUifs().getChildren()) { // Case insensitive if(child.getUifs().getName().equalsIgnoreCase(normalizer.normalize(file.getName()).toString())) { return getResourceIdFromResourceUri(child.getUifs().getResourceURI()); } } offset += chunksize; } while(fsModel.getUifs().getChildren().size() == chunksize); throw new NotfoundException(file.getAbsolute()); } catch(ApiException e) { throw new EueExceptionMappingService().map("Failure to read attributes of {0}", e, file); } }
@Test public void testFindCaseInsensitive() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path folder = new EueDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus()); folder.withAttributes(new EueAttributesFinderFeature(session, fileid).find(folder)); assertEquals(folder.attributes().getFileId(), fileid.getFileId(folder)); assertEquals(folder.attributes().getFileId(), fileid.getFileId(new Path(StringUtils.lowerCase(folder.getAbsolute()), folder.getType()))); assertEquals(folder.attributes().getFileId(), fileid.getFileId(new Path(StringUtils.upperCase(folder.getAbsolute()), folder.getType()))); final Path file = createFile(fileid, new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), RandomUtils.nextBytes(124)); fileid.clear(); assertEquals(file.attributes().getFileId(), fileid.getFileId(file)); assertEquals(file.attributes().getFileId(), fileid.getFileId(new Path(StringUtils.lowerCase(file.getAbsolute()), file.getType()))); assertEquals(file.attributes().getFileId(), fileid.getFileId(new Path(StringUtils.upperCase(file.getAbsolute()), file.getType()))); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static DateTime parse(CharSequence dateStr, DateFormat dateFormat) { return new DateTime(dateStr, dateFormat); }
@Test public void parseTest8() { final String str = "2020-06-28T02:14:13.000Z"; final DateTime dateTime = DateUtil.parse(str); assert dateTime != null; assertEquals("2020-06-28 02:14:13", dateTime.toString()); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuffer buf = new StringBuffer(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case FORMAT_MODIFIER_STATE: handleFormatModifierState(c, tokenList, buf); break; case OPTION_STATE: processOption(c, tokenList, buf); break; case KEYWORD_STATE: handleKeywordState(c, tokenList, buf); break; case RIGHT_PARENTHESIS_STATE: handleRightParenthesisState(c, tokenList, buf); break; default: } } // EOS switch (state) { case LITERAL_STATE: addValuedToken(Token.LITERAL, buf, tokenList); break; case KEYWORD_STATE: tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString())); break; case RIGHT_PARENTHESIS_STATE: tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN); break; case FORMAT_MODIFIER_STATE: case OPTION_STATE: throw new ScanException("Unexpected end of pattern string"); } return tokenList; }
@Test public void testEmptyP() throws ScanException { List<Token> tl = new TokenStream("()").tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(new Token(Token.LITERAL, "(")); witness.add(Token.RIGHT_PARENTHESIS_TOKEN); assertEquals(witness, tl); }
public static <T> PCollections<T> pCollections() { return new PCollections<>(); }
@Test @Category(NeedsRunner.class) public void testFlattenNoListsNoCoder() { // not ValidatesRunner because it should fail at pipeline construction time anyhow. thrown.expect(IllegalStateException.class); thrown.expectMessage("Unable to return a default Coder"); PCollectionList.<ClassWithoutCoder>empty(p).apply(Flatten.pCollections()); p.run(); }
public boolean matchStage(StageConfigIdentifier stageIdentifier, StageEvent event) { return this.event.include(event) && appliesTo(stageIdentifier.getPipelineName(), stageIdentifier.getStageName()); }
@Test void specificStageShouldMatchWithinAnyPipeline() { NotificationFilter filter = new NotificationFilter(GoConstants.ANY_PIPELINE, "dev", StageEvent.Breaks, false); assertThat(filter.matchStage(new StageConfigIdentifier("cruise1", "dev"), StageEvent.Breaks)).isTrue(); assertThat(filter.matchStage(new StageConfigIdentifier("cruise2", "dev"), StageEvent.Breaks)).isTrue(); assertThat(filter.matchStage(new StageConfigIdentifier("cruise2", "not-dev"), StageEvent.Breaks)).isFalse(); }
public static java.util.Date toLogical(Schema schema, int value) { if (!(LOGICAL_NAME.equals(schema.name()))) throw new DataException("Requested conversion of Date object but the schema does not match."); if (value < 0 || value > MILLIS_PER_DAY) throw new DataException("Time values must use number of milliseconds greater than 0 and less than 86400000"); return new java.util.Date(value); }
@Test public void testToLogical() { assertEquals(EPOCH.getTime(), Time.toLogical(Time.SCHEMA, 0)); assertEquals(EPOCH_PLUS_TEN_THOUSAND_MILLIS.getTime(), Time.toLogical(Time.SCHEMA, 10000)); }
public ZkService chooseService() { if (zkService != null) { return zkService; } synchronized (this) { if (zkService == null) { final String version = lbConfig.getZkServerVersion(); if (version.startsWith(VERSION_34_PREFIX)) { zkService = PluginServiceManager.getPluginService(ZkService34.class); } } } if (zkService == null) { throw new IllegalArgumentException(String.format(Locale.ENGLISH, "Can not get target zookeeper client version(%s) service", lbConfig.getZkServerVersion())); } return zkService; }
@Test public void chooseServiceWithVersion() { lbConfig.setZkServerVersion("3.4.14"); final ZkService34 service34 = Mockito.mock(ZkService34.class); try (final MockedStatic<PluginServiceManager> pluginServiceManagerMockedStatic = Mockito.mockStatic(PluginServiceManager.class)){ pluginServiceManagerMockedStatic.when(() -> PluginServiceManager.getPluginService(ZkService34.class)) .thenReturn(service34); final ZkServiceManager zkServiceManager = new ZkServiceManager(); Assert.assertEquals(zkServiceManager.chooseService(), service34); } }
public static String fromPartsAndSeparator(char separator, String... parts) { return Arrays.stream(parts) .filter(part -> part != null) .collect(Collectors.joining(String.valueOf(separator))); }
@Test void fromPartsAndSeparator() { String id = IdUtils.fromPartsAndSeparator('|', "namespace", "flow"); assertThat(id, notNullValue()); assertThat(id, is("namespace|flow")); String idWithNull = IdUtils.fromPartsAndSeparator('|', null, "namespace", "flow"); assertThat(idWithNull, notNullValue()); assertThat(idWithNull, is("namespace|flow")); }
@Override public void recursiveDownloadFolder(String sourcePath, File destDir) { String fileName; String srcFile; File destFile; for (FTPFile ftpFile : lsFiles(sourcePath, null)) { fileName = ftpFile.getName(); srcFile = StrUtil.format("{}/{}", sourcePath, fileName); destFile = FileUtil.file(destDir, fileName); if (false == ftpFile.isDirectory()) { // 本地不存在文件或者ftp上文件有修改则下载 if (false == FileUtil.exist(destFile) || (ftpFile.getTimestamp().getTimeInMillis() > destFile.lastModified())) { download(srcFile, destFile); } } else { // 服务端依旧是目录,继续递归 FileUtil.mkdir(destFile); recursiveDownloadFolder(srcFile, destFile); } } }
@Test @Disabled public void recursiveDownloadFolder() { final Ftp ftp = new Ftp("looly.centos"); ftp.recursiveDownloadFolder("/",FileUtil.file("d:/test/download")); IoUtil.close(ftp); }
public void goToBattle() { strategy.execute(); }
@Test void testGoToBattle() { final var strategy = mock(DragonSlayingStrategy.class); final var dragonSlayer = new DragonSlayer(strategy); dragonSlayer.goToBattle(); verify(strategy).execute(); verifyNoMoreInteractions(strategy); }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("Search State"); try { setAttribute(protobuf, "State", getStateAsEnum().name()); completeNodeAttributes(protobuf); } catch (Exception es) { LoggerFactory.getLogger(EsStateSection.class).warn("Failed to retrieve ES attributes. There will be only a single \"state\" attribute.", es); setAttribute(protobuf, "State", es.getCause() instanceof ElasticsearchException ? es.getCause().getMessage() : es.getMessage()); } return protobuf.build(); }
@Test public void es_state() { assertThatAttributeIs(underTest.toProtobuf(), "State", ClusterHealthStatus.GREEN.name()); }
public static String decode(InputStream qrCodeInputStream) { BufferedImage image = null; try{ image = ImgUtil.read(qrCodeInputStream); return decode(image); } finally { ImgUtil.flush(image); } }
@Test @Disabled public void decodeTest() { final String decode = QrCodeUtil.decode(FileUtil.file("d:/test/pic/qr.png")); //Console.log(decode); }
@Nonnull public static String[] splitNewlineSkipEmpty(@Nonnull String input) { String[] split = input.split("[\r\n]+"); // If the first line of the file is a newline split will still have // one blank entry at the start. if (split.length > 1 && split[0].isEmpty()) return Arrays.copyOfRange(split, 1, split.length); return split; }
@Test void testSplitNewlineSkipEmpty() { assertEquals(1, StringUtil.splitNewlineSkipEmpty("").length); assertEquals(1, StringUtil.splitNewlineSkipEmpty("a").length); assertEquals(2, StringUtil.splitNewlineSkipEmpty("a\nb").length); assertEquals(2, StringUtil.splitNewlineSkipEmpty("a\n\rb").length); assertEquals(2, StringUtil.splitNewlineSkipEmpty("a\r\nb").length); assertEquals(2, StringUtil.splitNewlineSkipEmpty("a\n\nb").length); assertEquals(2, StringUtil.splitNewlineSkipEmpty("a\n\r\r\nb").length); assertEquals(2, StringUtil.splitNewlineSkipEmpty("a\r\n\r\nb").length); assertEquals(2, StringUtil.splitNewlineSkipEmpty("a\r\n\n\rb").length); assertEquals(2, StringUtil.splitNewlineSkipEmpty("a\r\n\n\n\n\n\rb").length); }
@Override public Result invoke(Invocation invocation) throws RpcException { Result result; String value = getUrl().getMethodParameter( RpcUtils.getMethodName(invocation), MOCK_KEY, Boolean.FALSE.toString()) .trim(); if (ConfigUtils.isEmpty(value)) { // no mock result = this.invoker.invoke(invocation); } else if (value.startsWith(FORCE_KEY)) { if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "force mock", "", "force-mock: " + RpcUtils.getMethodName(invocation) + " force-mock enabled , url : " + getUrl()); } // force:direct mock result = doMockInvoke(invocation, null); } else { // fail-mock try { result = this.invoker.invoke(invocation); // fix:#4585 if (result.getException() != null && result.getException() instanceof RpcException) { RpcException rpcException = (RpcException) result.getException(); if (rpcException.isBiz()) { throw rpcException; } else { result = doMockInvoke(invocation, rpcException); } } } catch (RpcException e) { if (e.isBiz()) { throw e; } if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "failed to mock invoke", "", "fail-mock: " + RpcUtils.getMethodName(invocation) + " fail-mock enabled , url : " + getUrl(), e); } result = doMockInvoke(invocation, e); } } return result; }
@SuppressWarnings("unchecked") @Test void testMockInvokerFromOverride_Invoke_check_ListPojo() { URL url = URL.valueOf("remote://1.2.3.4/" + IHelloService.class.getName()) .addParameter( REFER_KEY, URL.encode(PATH_KEY + "=" + IHelloService.class.getName() + "&" + "getUsers.mock=force:return [{id:1, name:\"hi1\"}, {id:2, name:\"hi2\"}]")) .addParameter("invoke_return_error", "true"); Invoker<IHelloService> cluster = getClusterInvoker(url); // Configured with mock RpcInvocation invocation = new RpcInvocation(); invocation.setMethodName("getUsers"); Result ret = cluster.invoke(invocation); List<User> rl = (List<User>) ret.getValue(); System.out.println(rl); Assertions.assertEquals(2, rl.size()); Assertions.assertEquals("hi1", rl.get(0).getName()); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void sendContact() { String phone = "000111", name = "first", lastName = "last", vcard = "ok vcard"; Contact contact = bot.execute(new SendContact(chatId, phone, name).lastName(lastName).vcard(vcard)).message().contact(); assertEquals(phone, contact.phoneNumber()); assertEquals(name, contact.firstName()); assertEquals(lastName, contact.lastName()); assertEquals(vcard, contact.vcard()); assertNull(contact.userId()); }
public synchronized boolean tryReadLock() { if (isWriteLocked()) { return false; } else { status++; return true; } }
@Test public void singleTryReadLockTest() { SimpleReadWriteLock simpleReadWriteLock = new SimpleReadWriteLock(); boolean result = simpleReadWriteLock.tryReadLock(); Assert.isTrue(result); }
public static String reformatParam(@Nullable Object param) { if (param == null) { return PARAM_NULL; } String abbreviated = abbreviate(param.toString(), PARAM_MAX_WIDTH); return NEWLINE_PATTERN.matcher(abbreviated).replaceAll("\\\\n"); }
@Test public void reformatParam() { assertThat(SqlLogFormatter.reformatParam(null)).isEqualTo("[null]"); assertThat(SqlLogFormatter.reformatParam("")).isEmpty(); assertThat(SqlLogFormatter.reformatParam("foo")).isEqualTo("foo"); assertThat(SqlLogFormatter.reformatParam("foo bar ")).isEqualTo("foo bar "); }
public PrepareAndActivateResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) { DeployHandlerLogger logger = DeployHandlerLogger.forPrepareParams(prepareParams); File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); ThreadLockStats threadLockStats = LockStats.getForCurrentThread(); PrepareAndActivateResult result; try { threadLockStats.startRecording("deploy of " + prepareParams.getApplicationId().serializedForm()); result = deploy(decompressApplication(in, tempDir), prepareParams, logger); } finally { threadLockStats.stopRecording(); cleanupTempDirectory(tempDir, logger); } return result; }
@Test public void testResolveMultipleVersions() { Version vespaVersion = VespaModelFactory.createTestFactory().version(); applicationRepository.deploy(app1, new PrepareParams.Builder() .applicationId(applicationId()) .vespaVersion(vespaVersion) .build()); SimpletypesConfig config = resolve(applicationId(), vespaVersion); assertEquals(1337, config.intval()); // TODO: Revisit this test, I cannot see that we create a model for version 3.2.1 config = resolve(applicationId(), new Version(3, 2, 1)); assertEquals(1337, config.intval()); }
public static String substVars(String val, PropertyContainer pc1) throws ScanException { return substVars(val, pc1, null); }
@Test public void testSubstVarsTwoLevelsDeep() throws ScanException { context.putProperty("v1", "if"); context.putProperty("v2", "${v3}"); context.putProperty("v3", "${v4}"); context.putProperty("v4", "works"); String result = OptionHelper.substVars(text, context); assertEquals(expected, result); }
@Override public Map<String, String> getTopicConfig(final String topicName) { return topicConfig(topicName, true); }
@Test public void shouldThrowKsqlTopicAuthorizationExceptionFromGetTopicConfig() { // Given: final String topicName = "foobar"; when(adminClient.describeConfigs(ImmutableList.of(topicResource(topicName)))) .thenAnswer(describeConfigsResult(new TopicAuthorizationException(ImmutableSet.of(topicName)))); // When: final Exception e = assertThrows( KsqlTopicAuthorizationException.class, () -> kafkaTopicClient.getTopicConfig(topicName) ); // Then: assertThat(e.getMessage(), containsString( "Authorization denied to Describe_configs on topic(s): [" + topicName + "]")); }
protected static String getReverseZoneNetworkAddress(String baseIp, int range, int index) throws UnknownHostException { if (index < 0) { throw new IllegalArgumentException( String.format("Invalid index provided, must be positive: %d", index)); } if (range < 0) { throw new IllegalArgumentException( String.format("Invalid range provided, cannot be negative: %d", range)); } return calculateIp(baseIp, range, index); }
@Test public void testThrowUnknownHostExceptionIfIpIsInvalid() throws Exception { exception.expect(UnknownHostException.class); ReverseZoneUtils .getReverseZoneNetworkAddress("213124.21231.14123.13", RANGE, INDEX); }
@Override public int selectChannel(SerializationDelegate<StreamRecord<T>> record) { throw new UnsupportedOperationException( "Broadcast partitioner does not support select channels."); }
@Test void testSelectChannels() { assertThatThrownBy(() -> streamPartitioner.selectChannel(serializationDelegate)) .as("Broadcast selector does not support select channels.") .isInstanceOf(UnsupportedOperationException.class); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testFunctionsFromGlobalConfig() { System.setProperty("karate.config.dir", "src/test/java/com/intuit/karate/core"); run( "def foo = configUtilsJs.someText", "def bar = configUtilsJs.someFun()", "def res = call read('called2.feature')" ); matchVar("foo", "hello world"); matchVar("bar", "hello world"); Match.that(get("res")).contains("{ calledBar: 'hello world' }"); System.clearProperty("karate.env"); System.clearProperty("karate.config.dir"); }
@Override public void replace(String key, StringResourceVersion resourceVersion, T state) throws Exception { checkNotNull(key, "Key in ConfigMap."); checkNotNull(state, "State."); final RetrievableStateHandle<T> newStateHandle = storage.store(state); final byte[] serializedStateHandle = serializeOrDiscard(new StateHandleWithDeleteMarker<>(newStateHandle)); // initialize flags to serve the failure case boolean discardOldState = false; boolean discardNewState = true; // We don't want to greedily pull the old state handle as we have to do that anyway in // replaceEntry method for check of delete markers. final AtomicReference<RetrievableStateHandle<T>> oldStateHandleRef = new AtomicReference<>(); try { final boolean success = updateConfigMap( cm -> { try { return replaceEntry( cm, key, serializedStateHandle, oldStateHandleRef); } catch (NotExistException e) { throw new CompletionException(e); } }) .get(); // swap subject for deletion in case of success discardOldState = success; discardNewState = !success; } catch (Exception ex) { final Optional<PossibleInconsistentStateException> possibleInconsistentStateException = ExceptionUtils.findThrowable(ex, PossibleInconsistentStateException.class); if (possibleInconsistentStateException.isPresent()) { // it's unclear whether the state handle metadata was written to the ConfigMap - // hence, we don't discard any data discardNewState = false; throw possibleInconsistentStateException.get(); } throw ExceptionUtils.findThrowable(ex, NotExistException.class).orElseThrow(() -> ex); } finally { if (discardNewState) { newStateHandle.discardState(); } if (discardOldState) { Objects.requireNonNull( oldStateHandleRef.get(), "state handle should have been set on success") .discardState(); } } }
@Test void testReplace() throws Exception { new Context() { { runTest( () -> { leaderCallbackGrantLeadership(); final KubernetesStateHandleStore< TestingLongStateHandleHelper.LongStateHandle> store = new KubernetesStateHandleStore<>( flinkKubeClient, LEADER_CONFIGMAP_NAME, longStateStorage, filter, LOCK_IDENTITY); store.addAndLock(key, state); final TestingLongStateHandleHelper.LongStateHandle newState = new TestingLongStateHandleHelper.LongStateHandle(23456L); final StringResourceVersion resourceVersion = store.exists(key); store.replace(key, resourceVersion, newState); assertThat(store.getAllAndLock()).hasSize(1); assertThat(store.getAndLock(key).retrieveState()).isEqualTo(newState); }); } }; }
public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch, Timer timer) { metadata.addTransientTopics(topicsForPartitions(timestampsToSearch.keySet())); try { Map<TopicPartition, ListOffsetData> fetchedOffsets = fetchOffsetsByTimes(timestampsToSearch, timer, true).fetchedOffsets; return buildOffsetsForTimesResult(timestampsToSearch, fetchedOffsets); } finally { metadata.clearTransientTopics(); } }
@Test public void testGetOffsetsForTimes() { buildFetcher(); // Empty map assertTrue(offsetFetcher.offsetsForTimes(new HashMap<>(), time.timer(100L)).isEmpty()); // Unknown Offset testGetOffsetsForTimesWithUnknownOffset(); // Error code none with unknown offset testGetOffsetsForTimesWithError(Errors.NONE, Errors.NONE, -1L, null); // Error code none with known offset testGetOffsetsForTimesWithError(Errors.NONE, Errors.NONE, 10L, 10L); // Test both of partition has error. testGetOffsetsForTimesWithError(Errors.NOT_LEADER_OR_FOLLOWER, Errors.INVALID_REQUEST, 10L, 10L); // Test the second partition has error. testGetOffsetsForTimesWithError(Errors.NONE, Errors.NOT_LEADER_OR_FOLLOWER, 10L, 10L); // Test different errors. testGetOffsetsForTimesWithError(Errors.NOT_LEADER_OR_FOLLOWER, Errors.NONE, 10L, 10L); testGetOffsetsForTimesWithError(Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.NONE, 10L, 10L); testGetOffsetsForTimesWithError(Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, Errors.NONE, 10L, null); testGetOffsetsForTimesWithError(Errors.BROKER_NOT_AVAILABLE, Errors.NONE, 10L, 10L); }
@Override public KsqlQueryType getQueryType() { return KsqlQueryType.PUSH; }
@Test public void shouldReturnPushQueryTypeByDefault() { assertThat(query.getQueryType(), is(KsqlQueryType.PUSH)); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) { return aggregate(initializer, Materialized.with(null, null)); }
@Test public void shouldNotHaveNullMaterializedOnTwoOptionAggregate() { assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(MockInitializer.STRING_INIT, (Materialized<String, String, WindowStore<Bytes, byte[]>>) null)); }
public static double[] toDoubleArray(String name, Object value) { try { if (value instanceof BigDecimal[]) { return Arrays.stream((BigDecimal[]) value).mapToDouble(BigDecimal::doubleValue).toArray(); } else if (value instanceof double[]) { return (double[]) value; } else if (value instanceof List) { return ((List<?>) value) .stream().mapToDouble(d -> new BigDecimal(String.valueOf(d)).doubleValue()).toArray(); } else { throw new MaestroInternalError( "Param [%s] has an invalid evaluated result [%s]", name, toTruncateString(value)); } } catch (NumberFormatException nfe) { throw new MaestroInternalError( nfe, "Invalid number format for evaluated result: %s for param [%s]", toTruncateString(value), name); } }
@Test public void testListToDoubleArray() { Object val = Arrays.asList(new BigDecimal("1.2"), "3.4", 5.6); double[] actual = ParamHelper.toDoubleArray("foo", val); assertEquals(1.2, actual[0], 0.00000000); assertEquals(3.4, actual[1], 0.00000000); assertEquals(5.6, actual[2], 0.00000000); }
@Override public synchronized void updateYarnSysFS(Context ctx, String user, String appId, String spec) throws IOException { LocalDirsHandlerService dirsHandler = nmContext.getLocalDirsHandler(); Path sysFSPath = dirsHandler.getLocalPathForWrite( "nmPrivate/" + appId + "/sysfs/app.json"); File file = new File(sysFSPath.toString()); List<String> localDirs = dirsHandler.getLocalDirs(); if (file.exists()) { if (!file.delete()) { LOG.warn("Unable to delete {}", sysFSPath); } } if (file.createNewFile()) { FileOutputStream output = new FileOutputStream(file); try { output.write(spec.getBytes(StandardCharsets.UTF_8)); } finally { output.close(); } } PrivilegedOperation privOp = new PrivilegedOperation( PrivilegedOperation.OperationType.SYNC_YARN_SYSFS); String runAsUser = getRunAsUser(user); privOp.appendArgs(runAsUser, user, Integer.toString(PrivilegedOperation.RunAsUserCommand .SYNC_YARN_SYSFS.getValue()), appId, StringUtils.join(PrivilegedOperation .LINUX_FILE_PATH_SEPARATOR, localDirs)); privOp.disableFailureLogging(); PrivilegedOperationExecutor privilegedOperationExecutor = PrivilegedOperationExecutor.getInstance(nmContext.getConf()); try { privilegedOperationExecutor.executePrivilegedOperation(null, privOp, null, null, false, false); } catch (PrivilegedOperationException e) { throw new IOException(e); } }
@Test public void testUpdateYarnSysFS() throws Exception { String user = System.getProperty("user.name"); String appId="app-1"; String spec=""; Context ctx = mock(Context.class); LinuxContainerExecutor lce = mock(LinuxContainerExecutor.class); lce.updateYarnSysFS(ctx, user, appId, spec); verify(lce, times(1)).updateYarnSysFS(ctx, user, appId, spec); }
public static <T> T getMapper(Class<T> clazz) { try { List<ClassLoader> classLoaders = collectClassLoaders( clazz.getClassLoader() ); return getMapper( clazz, classLoaders ); } catch ( ClassNotFoundException | NoSuchMethodException e ) { throw new RuntimeException( e ); } }
@Test public void shouldReturnPackagePrivateImplementationInstance() { assertThat( Mappers.getMapper( PackagePrivateMapper.class ) ).isNotNull(); }
@Override public V put(K key, V value, Duration ttl) { return get(putAsync(key, value, ttl)); }
@Test public void testGetAllWithStringKeys() { RMapCacheNative<String, Integer> map = redisson.getMapCacheNative("getAllStrings"); map.put("A", 100); map.put("B", 200); map.put("C", 300); map.put("D", 400); Map<String, Integer> filtered = map.getAll(new HashSet<String>(Arrays.asList("B", "C", "E"))); Map<String, Integer> expectedMap = new HashMap<String, Integer>(); expectedMap.put("B", 200); expectedMap.put("C", 300); Assertions.assertEquals(expectedMap, filtered); map.destroy(); }
public static DecryptionResultHandler getHandler(@NonNull ReactApplicationContext reactContext, @NonNull final CipherStorage storage, @NonNull final BiometricPrompt.PromptInfo promptInfo) { if (storage.isBiometrySupported()) { if (hasOnePlusBiometricBug()) { return new DecryptionResultHandlerInteractiveBiometricManualRetry(reactContext, storage, promptInfo); } return new DecryptionResultHandlerInteractiveBiometric(reactContext, storage, promptInfo); } return new DecryptionResultHandlerNonInteractive(); }
@Test @Config(sdk = Build.VERSION_CODES.M) public void testBiometryWithoutBug() { // GIVEN ReflectionHelpers.setStaticField(android.os.Build.class, "BRAND", "OnePlus"); ReflectionHelpers.setStaticField(android.os.Build.class, "MODEL", "ONEPLUS A6000"); // OnePlus 6 final ReactApplicationContext mockContext = mock(ReactApplicationContext.class); final CipherStorage storage = mock(CipherStorageBase.class); when(storage.isBiometrySupported()).thenReturn(true); final BiometricPrompt.PromptInfo promptInfo = mock(BiometricPrompt.PromptInfo.class); // WHEN DecryptionResultHandler handler = DecryptionResultHandlerProvider.getHandler(mockContext, storage, promptInfo); //THEN assertThat(handler, instanceOf(DecryptionResultHandlerInteractiveBiometric.class)); }
@VisibleForTesting public long getRollingMonitorInterval() { return rollingMonitorInterval; }
@Test public void testRollingMonitorIntervalDefault() { LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler); logAggregationService.init(this.conf); long interval = logAggregationService.getRollingMonitorInterval(); assertEquals(-1L, interval); }
public <K, V> GlobalKTable<K, V> globalTable(final String topic, final ConsumedInternal<K, V> consumed, final MaterializedInternal<K, V, KeyValueStore<Bytes, byte[]>> materialized) { Objects.requireNonNull(consumed, "consumed can't be null"); Objects.requireNonNull(materialized, "materialized can't be null"); if (materialized.storeSupplier() instanceof VersionedBytesStoreSupplier) { throw new TopologyException("GlobalTables cannot be versioned."); } // explicitly disable logging for global stores materialized.withLoggingDisabled(); final NamedInternal named = new NamedInternal(consumed.name()); final String sourceName = named .suffixWithOrElseGet(TABLE_SOURCE_SUFFIX, this, KStreamImpl.SOURCE_NAME); final String processorName = named .orElseGenerateWithPrefix(this, KTableImpl.SOURCE_NAME); // enforce store name as queryable name to always materialize global table stores final String storeName = materialized.storeName(); final KTableSource<K, V> tableSource = new KTableSource<>(storeName, storeName); final ProcessorParameters<K, V, ?, ?> processorParameters = new ProcessorParameters<>(tableSource, processorName); final TableSourceNode<K, V> tableSourceNode = TableSourceNode.<K, V>tableSourceNodeBuilder() .withTopic(topic) .isGlobalKTable(true) .withSourceName(sourceName) .withConsumedInternal(consumed) .withMaterializedInternal(materialized) .withProcessorParameters(processorParameters) .build(); addGraphNode(root, tableSourceNode); return new GlobalKTableImpl<>(new KTableSourceValueGetterSupplier<>(storeName), materialized.queryableStoreName()); }
@Test public void shouldBuildGlobalTableWithQueryaIbleStoreName() { final MaterializedInternal<String, String, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(Materialized.as("globalTable"), builder, storePrefix); final GlobalKTable<String, String> table1 = builder.globalTable("topic2", consumed, materializedInternal); assertEquals("globalTable", table1.queryableStoreName()); }
@Override public void validate(String methodName, Class<?>[] parameterTypes, Object[] arguments) throws Exception { List<Class<?>> groups = new ArrayList<>(); Class<?> methodClass = methodClass(methodName); if (methodClass != null) { groups.add(methodClass); } Method method = clazz.getMethod(methodName, parameterTypes); Class<?>[] methodClasses; if (method.isAnnotationPresent(MethodValidated.class)) { methodClasses = method.getAnnotation(MethodValidated.class).value(); groups.addAll(Arrays.asList(methodClasses)); } // add into default group groups.add(0, Default.class); groups.add(1, clazz); // convert list to array Class<?>[] classGroups = groups.toArray(new Class[0]); Set<ConstraintViolation<?>> violations = new HashSet<>(); Object parameterBean = getMethodParameterBean(clazz, method, arguments); if (parameterBean != null) { violations.addAll(validator.validate(parameterBean, classGroups)); } for (Object arg : arguments) { validate(violations, arg, classGroups); } if (!violations.isEmpty()) { logger.info("Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations); throw new ConstraintViolationException( "Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations, violations); } }
@Test void testItWithPartialParameterValidation() { URL url = URL.valueOf("test://test:11/org.apache.dubbo.validation.support.jvalidation.mock.JValidatorTestTarget"); JValidator jValidator = new JValidator(url); try { jValidator.validate("someMethod6", new Class<?>[] {Integer.class, String.class, Long.class}, new Object[] { null, "", null }); Assertions.fail(); } catch (Exception e) { assertThat(e, instanceOf(ConstraintViolationException.class)); ConstraintViolationException e1 = (ConstraintViolationException) e; assertThat(e1.getConstraintViolations().size(), is(2)); } }
@Override public DenseVector add(SGDVector other) { if (other.size() != elements.length) { throw new IllegalArgumentException("Can't add two vectors of different dimension, this = " + elements.length + ", other = " + other.size()); } double[] newValues = toArray(); for (VectorTuple tuple : other) { newValues[tuple.index] += tuple.value; } return new DenseVector(newValues); }
@Test public void add() { DenseVector a = generateVectorA(); DenseVector b = generateVectorB(); DenseVector c = generateVectorC(); DenseVector empty = generateEmptyVector(); assertEquals(a,a.add(empty), "A + empty"); assertEquals(b,b.add(empty), "B + empty"); assertEquals(c,c.add(empty), "C + empty"); assertEquals(scale(a,2.0),a.add(a), "A * 2"); assertEquals(scale(b,2.0),b.add(b), "B * 2"); assertEquals(scale(c,2.0),c.add(c), "C * 2"); DenseVector aAddB = generateVectorAAddB(); DenseVector aAddC = generateVectorAAddC(); DenseVector bAddC = generateVectorBAddC(); assertEquals(aAddB, a.add(b), "A + B"); assertEquals(aAddC, a.add(c), "A + C"); assertEquals(aAddB, b.add(a), "B + A"); assertEquals(bAddC, b.add(c), "B + C"); assertEquals(aAddC, c.add(a), "C + A"); assertEquals(bAddC, c.add(b), "C + B"); }
private ExitStatus run() { try { init(); return new Processor().processNamespace().getExitStatus(); } catch (IllegalArgumentException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS; } catch (IOException e) { System.out.println(e + ". Exiting ..."); LOG.error(e + ". Exiting ..."); return ExitStatus.IO_EXCEPTION; } finally { dispatcher.shutdownNow(); } }
@Test(timeout=100000) public void testMoverMetrics() throws Exception { long blockSize = 10*1024*1024; final Configuration conf = new HdfsConfiguration(); initConf(conf); conf.setInt(DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY, 1); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(2) .storageTypes( new StorageType[][] {{StorageType.DISK, StorageType.DISK}, {StorageType.ARCHIVE, StorageType.ARCHIVE}}) .build(); cluster.waitActive(); final DistributedFileSystem fs = cluster.getFileSystem(); final String file = "/testMaxIterationTime.dat"; final Path path = new Path(file); short repFactor = 1; int seed = 0xFAFAFA; // write to DISK DFSTestUtil.createFile(fs, path, 4L * blockSize, repFactor, seed); // move to ARCHIVE fs.setStoragePolicy(new Path(file), "COLD"); Map<URI, List<Path>> nnWithPath = new HashMap<>(); List<Path> paths = new ArrayList<>(); paths.add(path); nnWithPath .put(DFSUtil.getInternalNsRpcUris(conf).iterator().next(), paths); Mover.run(nnWithPath, conf); final String moverMetricsName = "Mover-" + cluster.getNameNode(0).getNamesystem().getBlockPoolId(); MetricsSource moverMetrics = DefaultMetricsSystem.instance().getSource(moverMetricsName); assertNotNull(moverMetrics); MetricsRecordBuilder rb = MetricsAsserts.getMetrics(moverMetricsName); // Check metrics assertEquals(4, MetricsAsserts.getLongCounter("BlocksScheduled", rb)); assertEquals(1, MetricsAsserts.getLongCounter("FilesProcessed", rb)); assertEquals(41943040, MetricsAsserts.getLongGauge("BytesMoved", rb)); assertEquals(4, MetricsAsserts.getLongGauge("BlocksMoved", rb)); assertEquals(0, MetricsAsserts.getLongGauge("BlocksFailed", rb)); }
@Override public ListView<String> getServiceList(int pageNo, int pageSize, String groupName, AbstractSelector selector) throws NacosException { Map<String, String> params = new HashMap<>(16); params.put("pageNo", String.valueOf(pageNo)); params.put("pageSize", String.valueOf(pageSize)); params.put(CommonParams.NAMESPACE_ID, namespaceId); params.put(CommonParams.GROUP_NAME, groupName); if (selector != null) { switch (SelectorType.valueOf(selector.getType())) { case none: break; case label: ExpressionSelector expressionSelector = (ExpressionSelector) selector; params.put(SELECTOR_PARAM, JacksonUtils.toJson(expressionSelector)); break; default: break; } } String result = reqApi(UtilAndComs.nacosUrlBase + "/service/list", params, HttpMethod.GET); JsonNode json = JacksonUtils.toObj(result); ListView<String> listView = new ListView<>(); listView.setCount(json.get("count").asInt()); listView.setData(JacksonUtils.toObj(json.get("doms").toString(), new TypeReference<List<String>>() { })); return listView; }
@Test void testGetServiceListWithLabelSelector() throws Exception { //given NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class); HttpRestResult<Object> a = new HttpRestResult<Object>(); a.setData("{\"count\":2,\"doms\":[\"aaa\",\"bbb\"]}"); a.setCode(200); when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenReturn(a); final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate"); nacosRestTemplateField.setAccessible(true); nacosRestTemplateField.set(clientProxy, nacosRestTemplate); String groupName = "group1"; //when ListView<String> serviceList = clientProxy.getServiceList(1, 10, groupName, new ExpressionSelector()); //then verify(nacosRestTemplate, times(1)).exchangeForm(endsWith("/service/list"), any(), any(), any(), eq(HttpMethod.GET), any()); assertEquals(2, serviceList.getCount()); assertEquals("aaa", serviceList.getData().get(0)); assertEquals("bbb", serviceList.getData().get(1)); }
public static int getApplicableNodeCountForAM(RMContext rmContext, Configuration conf, List<ResourceRequest> amReqs) { // Determine the list of nodes that are eligible based on the strict // resource requests Set<NodeId> nodesForReqs = new HashSet<>(); for (ResourceRequest amReq : amReqs) { if (amReq.getRelaxLocality() && !amReq.getResourceName().equals(ResourceRequest.ANY)) { nodesForReqs.addAll( rmContext.getScheduler().getNodeIds(amReq.getResourceName())); } } if (YarnConfiguration.areNodeLabelsEnabled(conf)) { // Determine the list of nodes that are eligible based on the node label String amNodeLabelExpression = amReqs.get(0).getNodeLabelExpression(); Set<NodeId> nodesForLabels = getNodeIdsForLabel(rmContext, amNodeLabelExpression); if (nodesForLabels != null && !nodesForLabels.isEmpty()) { // If only node labels, strip out any wildcard NodeIds and return if (nodesForReqs.isEmpty()) { for (Iterator<NodeId> it = nodesForLabels.iterator(); it.hasNext();) { if (it.next().getPort() == 0) { it.remove(); } } return nodesForLabels.size(); } else { // The NodeIds common to both the strict resource requests and the // node label is the eligible set return Sets.intersection(nodesForReqs, nodesForLabels).size(); } } } // If no strict resource request NodeIds nor node label NodeIds, then just // return the entire cluster if (nodesForReqs.isEmpty()) { return rmContext.getScheduler().getNumClusterNodes(); } // No node label NodeIds, so return the strict resource request NodeIds return nodesForReqs.size(); }
@Test public void testGetApplicableNodeCountForAMLocality() throws Exception { List<NodeId> rack1Nodes = new ArrayList<>(); for (int i = 0; i < 29; i++) { rack1Nodes.add(NodeId.newInstance("host" + i, 1234)); } NodeId node1 = NodeId.newInstance("node1", 1234); NodeId node2 = NodeId.newInstance("node2", 1234); rack1Nodes.add(node2); YarnConfiguration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, false); ResourceScheduler scheduler = Mockito.mock(ResourceScheduler.class); Mockito.when(scheduler.getNumClusterNodes()).thenReturn(100); Mockito.when(scheduler.getNodeIds("/rack1")).thenReturn(rack1Nodes); Mockito.when(scheduler.getNodeIds("node1")) .thenReturn(Collections.singletonList(node1)); Mockito.when(scheduler.getNodeIds("node2")) .thenReturn(Collections.singletonList(node2)); RMContext rmContext = Mockito.mock(RMContext.class); Mockito.when(rmContext.getScheduler()).thenReturn(scheduler); ResourceRequest anyReq = createResourceRequest(ResourceRequest.ANY, true, null); List<ResourceRequest> reqs = new ArrayList<>(); reqs.add(anyReq); Assert.assertEquals(100, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); ResourceRequest rackReq = createResourceRequest("/rack1", true, null); reqs.add(rackReq); Assert.assertEquals(30, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); anyReq.setRelaxLocality(false); Assert.assertEquals(30, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(false); Assert.assertEquals(100, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); ResourceRequest node1Req = createResourceRequest("node1", false, null); reqs.add(node1Req); Assert.assertEquals(100, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node1Req.setRelaxLocality(true); Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(true); Assert.assertEquals(31, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); ResourceRequest node2Req = createResourceRequest("node2", false, null); reqs.add(node2Req); Assert.assertEquals(31, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node2Req.setRelaxLocality(true); Assert.assertEquals(31, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); rackReq.setRelaxLocality(false); Assert.assertEquals(2, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node1Req.setRelaxLocality(false); Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); node2Req.setRelaxLocality(false); Assert.assertEquals(100, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs)); }
@Override public KsMaterializedQueryResult<Row> get( final GenericKey key, final int partition, final Optional<Position> position ) { try { final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key); StateQueryRequest<ValueAndTimestamp<GenericRow>> request = inStore(stateStore.getStateStoreName()) .withQuery(query) .withPartitions(ImmutableSet.of(partition)); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<ValueAndTimestamp<GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<ValueAndTimestamp<GenericRow>> queryResult = result.getPartitionResults().get(partition); // Some of these failures are retriable, and in the future, we may want to retry // locally before throwing. if (queryResult.isFailure()) { throw failedQueryException(queryResult); } else if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } else { final ValueAndTimestamp<GenericRow> row = queryResult.getResult(); return KsMaterializedQueryResult.rowIteratorWithPosition( ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp())) .iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldReturnValuesBothBound() { // Given: when(kafkaStreams.query(any())).thenReturn(getIteratorResult()); // When: final KsMaterializedQueryResult<Row> result = table.get(PARTITION, A_KEY, A_KEY2); // Then: Iterator<Row> rowIterator = result.getRowIterator(); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is(Row.of(SCHEMA, A_KEY, ROW1, TIME1))); assertThat(rowIterator.next(), is(Row.of(SCHEMA, A_KEY2, ROW2, TIME2))); assertThat(rowIterator.hasNext(), is(false)); assertThat(result.getPosition(), not(Optional.empty())); assertThat(result.getPosition().get(), is(POSITION)); }
public static <T> Builder<T> newBuilder(int initialCapacity) { return new Builder<>(initialCapacity); }
@Test public void serialization_whenInInitialLoadingAndEmpty() throws Exception { InflatableSet<Object> set = InflatableSet.newBuilder(0).build(); InflatableSet<Object> clone = TestJavaSerializationUtils.serializeAndDeserialize(set); assertEquals(set, clone); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { if (!(statement.getStatement() instanceof CreateSource) && !(statement.getStatement() instanceof CreateAsSelect)) { return statement; } try { if (statement.getStatement() instanceof CreateSource) { final ConfiguredStatement<CreateSource> createStatement = (ConfiguredStatement<CreateSource>) statement; return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement); } else { final ConfiguredStatement<CreateAsSelect> createStatement = (ConfiguredStatement<CreateAsSelect>) statement; return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse( createStatement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } }
@Test public void shouldThrowIfCtasKeyTableElementsNotCompatibleExtraKey() { // Given: givenFormatsAndProps("protobuf", null, ImmutableMap.of("KEY_SCHEMA_ID", new IntegerLiteral(42))); givenDDLSchemaAndFormats(LOGICAL_SCHEMA_EXTRA_KEY, "protobuf", "avro", SerdeFeature.WRAP_SINGLES, SerdeFeature.UNWRAP_SINGLES); // When: final Exception e = assertThrows( KsqlException.class, () -> injector.inject(ctasStatement) ); // Then: // Then: assertThat(e.getMessage(), containsString("The following key columns are changed, missing or reordered: " + "[`key1` STRING KEY]. Schema from schema registry is [`key` STRING KEY]")); }
@VisibleForTesting public ListenableFuture<?> internalExecute(CreateTable statement, Metadata metadata, AccessControl accessControl, Session session, List<Expression> parameters, WarningCollector warningCollector) { checkArgument(!statement.getElements().isEmpty(), "no columns for table"); Map<NodeRef<Parameter>, Expression> parameterLookup = parameterExtractor(statement, parameters); QualifiedObjectName tableName = createQualifiedObjectName(session, statement, statement.getName()); Optional<TableHandle> tableHandle = metadata.getMetadataResolver(session).getTableHandle(tableName); if (tableHandle.isPresent()) { if (!statement.isNotExists()) { throw new SemanticException(TABLE_ALREADY_EXISTS, statement, "Table '%s' already exists", tableName); } return immediateFuture(null); } ConnectorId connectorId = metadata.getCatalogHandle(session, tableName.getCatalogName()) .orElseThrow(() -> new PrestoException(NOT_FOUND, "Catalog does not exist: " + tableName.getCatalogName())); LinkedHashMap<String, ColumnMetadata> columns = new LinkedHashMap<>(); Map<String, Object> inheritedProperties = ImmutableMap.of(); boolean includingProperties = false; List<TableConstraint<String>> constraints = new ArrayList<>(); for (TableElement element : statement.getElements()) { if (element instanceof ColumnDefinition) { ColumnDefinition column = (ColumnDefinition) element; String name = column.getName().getValue().toLowerCase(Locale.ENGLISH); Type type; try { type = metadata.getType(parseTypeSignature(column.getType())); } catch (IllegalArgumentException e) { throw new SemanticException(TYPE_MISMATCH, element, "Unknown type '%s' for column '%s'", column.getType(), column.getName()); } if (type.equals(UNKNOWN)) { throw new SemanticException(TYPE_MISMATCH, element, "Unknown type '%s' for column '%s'", column.getType(), column.getName()); } if (columns.containsKey(name)) { throw new SemanticException(DUPLICATE_COLUMN_NAME, column, "Column name '%s' specified more than once", column.getName()); } if (!column.isNullable() && !metadata.getConnectorCapabilities(session, connectorId).contains(NOT_NULL_COLUMN_CONSTRAINT)) { throw new SemanticException(NOT_SUPPORTED, column, "Catalog '%s' does not support non-null column for column name '%s'", connectorId.getCatalogName(), column.getName()); } Map<String, Expression> sqlProperties = mapFromProperties(column.getProperties()); Map<String, Object> columnProperties = metadata.getColumnPropertyManager().getProperties( connectorId, tableName.getCatalogName(), sqlProperties, session, metadata, parameterLookup); columns.put(name, new ColumnMetadata( name, type, column.isNullable(), column.getComment().orElse(null), null, false, columnProperties)); } else if (element instanceof LikeClause) { LikeClause likeClause = (LikeClause) element; QualifiedObjectName likeTableName = createQualifiedObjectName(session, statement, likeClause.getTableName()); if (!metadata.getCatalogHandle(session, likeTableName.getCatalogName()).isPresent()) { throw new SemanticException(MISSING_CATALOG, statement, "LIKE table catalog '%s' does not exist", likeTableName.getCatalogName()); } if (!tableName.getCatalogName().equals(likeTableName.getCatalogName())) { throw new SemanticException(NOT_SUPPORTED, statement, "LIKE table across catalogs is not supported"); } TableHandle likeTable = metadata.getMetadataResolver(session).getTableHandle(likeTableName) .orElseThrow(() -> new SemanticException(MISSING_TABLE, statement, "LIKE table '%s' does not exist", likeTableName)); TableMetadata likeTableMetadata = metadata.getTableMetadata(session, likeTable); Optional<LikeClause.PropertiesOption> propertiesOption = likeClause.getPropertiesOption(); if (propertiesOption.isPresent() && propertiesOption.get().equals(LikeClause.PropertiesOption.INCLUDING)) { if (includingProperties) { throw new SemanticException(NOT_SUPPORTED, statement, "Only one LIKE clause can specify INCLUDING PROPERTIES"); } includingProperties = true; inheritedProperties = likeTableMetadata.getMetadata().getProperties(); } likeTableMetadata.getColumns().stream() .filter(column -> !column.isHidden()) .forEach(column -> { if (columns.containsKey(column.getName().toLowerCase(Locale.ENGLISH))) { throw new SemanticException(DUPLICATE_COLUMN_NAME, element, "Column name '%s' specified more than once", column.getName()); } columns.put(column.getName().toLowerCase(Locale.ENGLISH), column); }); } else if (element instanceof ConstraintSpecification) { accessControl.checkCanAddConstraints(session.getRequiredTransactionId(), session.getIdentity(), session.getAccessControlContext(), tableName); constraints.add(convertToTableConstraint(metadata, session, connectorId, (ConstraintSpecification) element, warningCollector)); } else { throw new PrestoException(GENERIC_INTERNAL_ERROR, "Invalid TableElement: " + element.getClass().getName()); } } accessControl.checkCanCreateTable(session.getRequiredTransactionId(), session.getIdentity(), session.getAccessControlContext(), tableName); constraints.stream() .filter(c -> c.getName().isPresent()) .collect(Collectors.groupingBy(c -> c.getName().get(), Collectors.counting())) .forEach((constraintName, count) -> { if (count > 1) { throw new PrestoException(SYNTAX_ERROR, format("Constraint name '%s' specified more than once", constraintName)); } }); if (constraints.stream() .filter(PrimaryKeyConstraint.class::isInstance) .collect(Collectors.groupingBy(c -> c.getName().orElse(""), Collectors.counting())) .size() > 1) { throw new PrestoException(SYNTAX_ERROR, "Multiple primary key constraints are not allowed"); } Map<String, Expression> sqlProperties = mapFromProperties(statement.getProperties()); Map<String, Object> properties = metadata.getTablePropertyManager().getProperties( connectorId, tableName.getCatalogName(), sqlProperties, session, metadata, parameterLookup); Map<String, Object> finalProperties = combineProperties(sqlProperties.keySet(), properties, inheritedProperties); ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(toSchemaTableName(tableName), ImmutableList.copyOf(columns.values()), finalProperties, statement.getComment(), constraints, Collections.emptyMap()); try { metadata.createTable(session, tableName.getCatalogName(), tableMetadata, statement.isNotExists()); } catch (PrestoException e) { // connectors are not required to handle the ignoreExisting flag if (!e.getErrorCode().equals(ALREADY_EXISTS.toErrorCode()) || !statement.isNotExists()) { throw e; } } return immediateFuture(null); }
@Test public void testCreateTableNotExistsFalse() { CreateTable statement = new CreateTable(QualifiedName.of("test_table"), ImmutableList.of(new ColumnDefinition(identifier("a"), "BIGINT", true, emptyList(), Optional.empty())), false, ImmutableList.of(), Optional.empty()); try { getFutureValue(new CreateTableTask().internalExecute(statement, metadata, new AllowAllAccessControl(), testSession, emptyList(), warningCollector)); fail("expected exception"); } catch (RuntimeException e) { // Expected assertTrue(e instanceof PrestoException); PrestoException prestoException = (PrestoException) e; assertEquals(prestoException.getErrorCode(), ALREADY_EXISTS.toErrorCode()); } assertEquals(metadata.getCreateTableCallCount(), 1); }
public static ParamType getSchemaFromType(final Type type) { return getSchemaFromType(type, JAVA_TO_ARG_TYPE); }
@Test public void shouldGetLongSchemaForLongPrimitiveClass() { assertThat( UdfUtil.getSchemaFromType(long.class), equalTo(ParamTypes.LONG) ); }
public static boolean parse(final String str, ResTable_config out) { return parse(str, out, true); }
@Test public void parse_smallestScreenWidth() { ResTable_config config = new ResTable_config(); ConfigDescription.parse("sw320dp", config); assertThat(config.smallestScreenWidthDp).isEqualTo(320); }
@Override public String rpcType() { return RpcTypeEnum.TARS.getName(); }
@Test public void testRpcType() { assertEquals(RpcTypeEnum.TARS.getName(), shenyuClientRegisterTarsService.rpcType()); }
public void scheduleUpdateIfAbsent(String serviceName, String groupName, String clusters) { if (!asyncQuerySubscribeService) { return; } String serviceKey = ServiceInfo.getKey(NamingUtils.getGroupedName(serviceName, groupName), clusters); if (futureMap.get(serviceKey) != null) { return; } synchronized (futureMap) { if (futureMap.get(serviceKey) != null) { return; } ScheduledFuture<?> future = addTask(new UpdateTask(serviceName, groupName, clusters)); futureMap.put(serviceKey, future); } }
@Test void testScheduleUpdateIfAbsentUpdateOlder() throws InterruptedException, NacosException { info.setCacheMillis(10000L); nacosClientProperties.setProperty(PropertyKeyConst.NAMING_ASYNC_QUERY_SUBSCRIBE_SERVICE, "true"); serviceInfoUpdateService = new ServiceInfoUpdateService(nacosClientProperties, holder, proxy, notifier); serviceInfoUpdateService.scheduleUpdateIfAbsent(serviceName, group, clusters); Map<String, ServiceInfo> map = new HashMap<>(); map.put(ServiceInfo.getKey(group + "@@" + serviceName, clusters), info); when(holder.getServiceInfoMap()).thenReturn(map); TimeUnit.MILLISECONDS.sleep(1500); Mockito.verify(proxy).queryInstancesOfService(serviceName, group, clusters, false); }
@Override public boolean createTopic( final String topic, final int numPartitions, final short replicationFactor, final Map<String, ?> configs, final CreateTopicsOptions createOptions ) { final Optional<Long> retentionMs = KafkaTopicClient.getRetentionMs(configs); if (isTopicExists(topic)) { validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs); return false; } final short resolvedReplicationFactor = replicationFactor == TopicProperties.DEFAULT_REPLICAS ? getDefaultClusterReplication() : replicationFactor; final NewTopic newTopic = new NewTopic(topic, numPartitions, resolvedReplicationFactor); newTopic.configs(toStringConfigs(configs)); try { LOG.info("Creating topic '{}' {}", topic, (createOptions.shouldValidateOnly()) ? "(ONLY VALIDATE)" : "" ); ExecutorUtil.executeWithRetries( () -> adminClient.get().createTopics( Collections.singleton(newTopic), createOptions ).all().get(), ExecutorUtil.RetryBehaviour.ON_RETRYABLE); return true; } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new KafkaResponseGetFailedException( "Failed to guarantee existence of topic " + topic, e); } catch (final TopicExistsException e) { // if the topic already exists, it is most likely because another node just created it. // ensure that it matches the partition count, replication factor, and retention // before returning success validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs); return false; } catch (final TopicAuthorizationException e) { throw new KsqlTopicAuthorizationException( AclOperation.CREATE, Collections.singleton(topic)); } catch (final Exception e) { throw new KafkaResponseGetFailedException( "Failed to guarantee existence of topic " + topic, e); } }
@Test public void shouldSetTopicCleanupPolicyToCompact() { // Given: final Map<String, String> configs = ImmutableMap.of( "cleanup.policy", "compact", TopicConfig.RETENTION_MS_CONFIG, "5000"); // When: kafkaTopicClient.createTopic("topic-name", 1, (short) 2, configs); // Then: verify(adminClient).createTopics( eq(ImmutableSet.of(newTopic("topic-name", 1, 2, configs))), any() ); }
static <T> @Nullable JdbcReadWithPartitionsHelper<T> getPartitionsHelper(TypeDescriptor<T> type) { // This cast is unchecked, thus this is a small type-checking risk. We just need // to make sure that all preset helpers in `JdbcUtil.PRESET_HELPERS` are matched // in type from their Key and their Value. return (JdbcReadWithPartitionsHelper<T>) PRESET_HELPERS.get(type.getRawType()); }
@Test public void testLongPartitioningNotEnoughRanges() { JdbcReadWithPartitionsHelper<Long> helper = JdbcUtil.getPartitionsHelper(TypeDescriptors.longs()); // The minimum stride is one, which is what causes this sort of partitioning. List<KV<Long, Long>> expectedRanges = Lists.newArrayList(KV.of(12L, 14L), KV.of(14L, 16L), KV.of(16L, 18L), KV.of(18L, 21L)); List<KV<Long, Long>> ranges = Lists.newArrayList(helper.calculateRanges(12L, 20L, 10L)); // The ranges go from the current lowerBound to ONE ELEMENT AFTER the upper bound. // Because the query's filter statement is : WHERE column >= lowerBound AND column < upperBound. assertEquals(4, ranges.size()); assertArrayEquals(expectedRanges.toArray(), ranges.toArray()); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testMapLocalRemoveOverridesStorage() throws Exception { StateTag<MapState<String, Integer>> addr = StateTags.map("map", StringUtf8Coder.of(), VarIntCoder.of()); MapState<String, Integer> mapState = underTest.state(NAMESPACE, addr); final String tag1 = "tag1"; final String tag2 = "tag2"; SettableFuture<Integer> future = SettableFuture.create(); when(mockReader.valueFuture( protoKeyFromUserKey(tag1, StringUtf8Coder.of()), STATE_FAMILY, VarIntCoder.of())) .thenReturn(future); SettableFuture<Iterable<Map.Entry<ByteString, Integer>>> prefixFuture = SettableFuture.create(); when(mockReader.valuePrefixFuture( protoKeyFromUserKey(null, StringUtf8Coder.of()), STATE_FAMILY, VarIntCoder.of())) .thenReturn(prefixFuture); waitAndSet(future, 1, 50); waitAndSet( prefixFuture, ImmutableList.of( new AbstractMap.SimpleEntry<>(protoKeyFromUserKey(tag1, StringUtf8Coder.of()), 1), new AbstractMap.SimpleEntry<>(protoKeyFromUserKey(tag2, StringUtf8Coder.of()), 2)), 50); mapState.remove(tag1); assertNull(mapState.get(tag1).read()); assertThat( mapState.entries().read(), Matchers.containsInAnyOrder(new AbstractMap.SimpleEntry<>(tag2, 2))); mapState.remove(tag2); assertTrue(mapState.isEmpty().read()); }
@VisibleForTesting Map<ExecutionVertexID, Collection<ExecutionAttemptID>> findSlowTasks( final ExecutionGraph executionGraph) { final long currentTimeMillis = System.currentTimeMillis(); final Map<ExecutionVertexID, Collection<ExecutionAttemptID>> slowTasks = new HashMap<>(); final List<ExecutionJobVertex> jobVerticesToCheck = getJobVerticesToCheck(executionGraph); for (ExecutionJobVertex ejv : jobVerticesToCheck) { final ExecutionTimeWithInputBytes baseline = getBaseline(ejv, currentTimeMillis); for (ExecutionVertex ev : ejv.getTaskVertices()) { if (ev.getExecutionState().isTerminal()) { continue; } final List<ExecutionAttemptID> slowExecutions = findExecutionsExceedingBaseline( ev.getCurrentExecutions(), baseline, currentTimeMillis); if (!slowExecutions.isEmpty()) { slowTasks.put(ev.getID(), slowExecutions); } } } return slowTasks; }
@Test void testLargeLowerBound() throws Exception { final int parallelism = 3; final JobVertex jobVertex = createNoOpVertex(parallelism); final ExecutionGraph executionGraph = createExecutionGraph(jobVertex); final ExecutionTimeBasedSlowTaskDetector slowTaskDetector = createSlowTaskDetector(0.3, 1, Integer.MAX_VALUE); final ExecutionVertex ev3 = executionGraph.getJobVertex(jobVertex.getID()).getTaskVertices()[2]; ev3.getCurrentExecutionAttempt().markFinished(); final Map<ExecutionVertexID, Collection<ExecutionAttemptID>> slowTasks = slowTaskDetector.findSlowTasks(executionGraph); // no task can exceed the large baseline assertThat(slowTasks).isEmpty(); }
FormattingNode COMPOSITE(String keyword) throws ScanException { CompositeNode compositeNode = new CompositeNode(keyword); Node childNode = E(); compositeNode.setChildNode(childNode); Token t = getNextToken(); if (t == null || t.getType() != Token.RIGHT_PARENTHESIS) { String msg = "Expecting RIGHT_PARENTHESIS token but got " + t; addError(msg); addError("See also " + MISSING_RIGHT_PARENTHESIS); throw new ScanException(msg); } Token ot = getCurentToken(); if (ot != null && ot.getType() == Token.OPTION) { List<String> optionList = ot.getOptionsList(); compositeNode.setOptions(optionList); advanceTokenPointer(); } return compositeNode; }
@Test public void testComposite() throws Exception { { Parser<Object> p = new Parser<>("hello%(%child)"); Node t = p.parse(); Node witness = new Node(Node.LITERAL, "hello"); CompositeNode composite = new CompositeNode(BARE); Node child = new SimpleKeywordNode("child"); composite.setChildNode(child); witness.next = composite; // System.out.println("w:" + witness); // System.out.println(t); Assertions.assertEquals(witness, t); } // System.out.println("testRecursive part 2"); { Parser<Object> p = new Parser<>("hello%(%child )"); Node t = p.parse(); Node witness = new Node(Node.LITERAL, "hello"); CompositeNode composite = new CompositeNode(BARE); Node child = new SimpleKeywordNode("child"); composite.setChildNode(child); witness.next = composite; child.next = new Node(Node.LITERAL, " "); Assertions.assertEquals(witness, t); } { Parser<Object> p = new Parser<>("hello%(%child %h)"); Node t = p.parse(); Node witness = new Node(Node.LITERAL, "hello"); CompositeNode composite = new CompositeNode(BARE); Node child = new SimpleKeywordNode("child"); composite.setChildNode(child); child.next = new Node(Node.LITERAL, " "); child.next.next = new SimpleKeywordNode("h"); witness.next = composite; Assertions.assertEquals(witness, t); } { Parser<Object> p = new Parser<>("hello%(%child %h) %m"); Node t = p.parse(); Node witness = new Node(Node.LITERAL, "hello"); CompositeNode composite = new CompositeNode(BARE); Node child = new SimpleKeywordNode("child"); composite.setChildNode(child); child.next = new Node(Node.LITERAL, " "); child.next.next = new SimpleKeywordNode("h"); witness.next = composite; composite.next = new Node(Node.LITERAL, " "); composite.next.next = new SimpleKeywordNode("m"); Assertions.assertEquals(witness, t); } { Parser<Object> p = new Parser<>("hello%( %child \\(%h\\) ) %m"); Node t = p.parse(); Node witness = new Node(Node.LITERAL, "hello"); CompositeNode composite = new CompositeNode(BARE); Node child = new Node(Node.LITERAL, " "); composite.setChildNode(child); Node c = child; c = c.next = new SimpleKeywordNode("child"); c = c.next = new Node(Node.LITERAL, " ("); c = c.next = new SimpleKeywordNode("h"); c = c.next = new Node(Node.LITERAL, ") "); witness.next = composite; composite.next = new Node(Node.LITERAL, " "); composite.next.next = new SimpleKeywordNode("m"); Assertions.assertEquals(witness, t); } }
protected ValueWrapper getResultWrapper(String className, FactMappingValue expectedResult, ExpressionEvaluator expressionEvaluator, Object expectedResultRaw, Object resultRaw, Class<?> resultClass) { try { ExpressionEvaluatorResult evaluationResult = expressionEvaluator.evaluateUnaryExpression((String) expectedResultRaw, resultRaw, resultClass); if (evaluationResult.isSuccessful()) { return of(resultRaw); } else if (isCollectionOrMap(className)) { return errorWithCollectionPathToValue(evaluationResult.getWrongValue(), evaluationResult.getPathToWrongValue()); } else { return errorWithValidValue(resultRaw, expectedResultRaw); } } catch (Exception e) { expectedResult.setExceptionMessage(e.getMessage()); return errorWithMessage(e.getMessage()); } }
@Test public void getResultWrapper() { ExpressionEvaluator expressionEvaluatorMock = mock(ExpressionEvaluator.class); Object resultRaw = "test"; Object expectedResultRaw = ""; String collectionWrongValue = "value"; String collectionValuePath = "Item #: 1"; String genericErrorMessage = "errorMessage"; // case 1: succeed when(expressionEvaluatorMock.evaluateUnaryExpression(any(), any(), any(Class.class))).thenReturn(ExpressionEvaluatorResult.ofSuccessful()); ValueWrapper<Object> valueWrapper = abstractRunnerHelper.getResultWrapper(String.class.getCanonicalName(), new FactMappingValue(), expressionEvaluatorMock, expectedResultRaw, resultRaw, String.class); assertThat(valueWrapper.isValid()).isTrue(); assertThat(valueWrapper.getCollectionPathToValue()).isNull(); // case 2: failed with actual value when(expressionEvaluatorMock.evaluateUnaryExpression(any(), any(), any(Class.class))).thenReturn(ExpressionEvaluatorResult.ofFailed()); valueWrapper = abstractRunnerHelper.getResultWrapper(String.class.getCanonicalName(), new FactMappingValue(), expressionEvaluatorMock, expectedResultRaw, resultRaw, String.class); assertThat(valueWrapper.isValid()).isFalse(); assertThat(valueWrapper.getValue()).isEqualTo(resultRaw); assertThat(valueWrapper.getCollectionPathToValue()).isNull(); // case 3: failed without actual value (list) valueWrapper = abstractRunnerHelper.getResultWrapper(List.class.getCanonicalName(), new FactMappingValue(), expressionEvaluatorMock, expectedResultRaw, resultRaw, List.class); assertThat(valueWrapper.getErrorMessage()).isNotPresent(); assertThat(valueWrapper.getCollectionPathToValue()).isEmpty(); assertThat(valueWrapper.getValue()).isNull(); // case 4: failed without actual value (map) valueWrapper = abstractRunnerHelper.getResultWrapper(Map.class.getCanonicalName(), new FactMappingValue(), expressionEvaluatorMock, expectedResultRaw, resultRaw, Map.class); assertThat(valueWrapper.getErrorMessage()).isNotPresent(); assertThat(valueWrapper.getCollectionPathToValue()).isEmpty(); assertThat(valueWrapper.getValue()).isNull(); // case 5: failed with wrong value (list) ExpressionEvaluatorResult result = ExpressionEvaluatorResult.ofFailed(collectionWrongValue, collectionValuePath); when(expressionEvaluatorMock.evaluateUnaryExpression(any(), any(), any(Class.class))).thenReturn(result); valueWrapper = abstractRunnerHelper.getResultWrapper(List.class.getCanonicalName(), new FactMappingValue(), expressionEvaluatorMock, expectedResultRaw, resultRaw, List.class); assertThat(valueWrapper.getErrorMessage()).isNotPresent(); assertThat(valueWrapper.getCollectionPathToValue()).hasSize(1); assertThat(valueWrapper.getValue()).isEqualTo(collectionWrongValue); // case 6: failed without actual value (map) valueWrapper = abstractRunnerHelper.getResultWrapper(Map.class.getCanonicalName(), new FactMappingValue(), expressionEvaluatorMock, expectedResultRaw, resultRaw, Map.class); assertThat(valueWrapper.getErrorMessage()).isNotPresent(); assertThat(valueWrapper.getCollectionPathToValue()).hasSize(1); assertThat(valueWrapper.getValue()).isEqualTo(collectionWrongValue); // case 7: failed without wrong value (list) result = ExpressionEvaluatorResult.ofFailed(null, collectionValuePath); when(expressionEvaluatorMock.evaluateUnaryExpression(any(), any(), any(Class.class))).thenReturn(result); valueWrapper = abstractRunnerHelper.getResultWrapper(List.class.getCanonicalName(), new FactMappingValue(), expressionEvaluatorMock, expectedResultRaw, resultRaw, List.class); assertThat(valueWrapper.getErrorMessage()).isNotPresent(); assertThat(valueWrapper.getCollectionPathToValue()).hasSize(1); assertThat(valueWrapper.getValue()).isNull(); // case 8: failed without actual value (map) valueWrapper = abstractRunnerHelper.getResultWrapper(Map.class.getCanonicalName(), new FactMappingValue(), expressionEvaluatorMock, expectedResultRaw, resultRaw, Map.class); assertThat(valueWrapper.getErrorMessage()).isNotPresent(); assertThat(valueWrapper.getCollectionPathToValue()).hasSize(1); assertThat(valueWrapper.getValue()).isNull(); // case 9: failed with generic exception when(expressionEvaluatorMock.evaluateUnaryExpression(any(), any(), any(Class.class))).thenThrow(new IllegalArgumentException(genericErrorMessage)); FactMappingValue expectedResult5 = new FactMappingValue(); valueWrapper = abstractRunnerHelper.getResultWrapper(Map.class.getCanonicalName(), expectedResult5, expressionEvaluatorMock, expectedResultRaw, resultRaw, Map.class); assertThat(valueWrapper.getErrorMessage().get()).isEqualTo(genericErrorMessage); assertThat(expectedResult5.getExceptionMessage()).isEqualTo(genericErrorMessage); }
public static Future<MetricsAndLogging> metricsAndLogging(Reconciliation reconciliation, ConfigMapOperator configMapOperations, LoggingModel logging, MetricsModel metrics) { return Future .join(metricsConfigMap(reconciliation, configMapOperations, metrics), loggingConfigMap(reconciliation, configMapOperations, logging)) .map(result -> new MetricsAndLogging(result.resultAt(0), result.resultAt(1))); }
@Test public void testNoMetricsAndExternalLogging(VertxTestContext context) { LoggingModel logging = new LoggingModel(new KafkaConnectSpecBuilder().withLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelector("log4j.properties", "logging-cm", false)).endValueFrom().build()).build(), "KafkaConnectCluster", false, true); MetricsModel metrics = new MetricsModel(new KafkaConnectSpecBuilder().build()); ConfigMapOperator mockCmOps = mock(ConfigMapOperator.class); when(mockCmOps.getAsync(any(), eq("logging-cm"))).thenReturn(Future.succeededFuture(new ConfigMapBuilder().withNewMetadata().withName("logging-cm").endMetadata().withData(Map.of()).build())); Checkpoint async = context.checkpoint(); MetricsAndLoggingUtils.metricsAndLogging(Reconciliation.DUMMY_RECONCILIATION, mockCmOps, logging, metrics) .onComplete(context.succeeding(v -> context.verify(() -> { assertThat(v.loggingCm(), is(notNullValue())); assertThat(v.loggingCm().getMetadata().getName(), is("logging-cm")); assertThat(v.metricsCm(), is(nullValue())); verify(mockCmOps, times(1)).getAsync(any(), any()); async.flag(); }))); }
private Set<TimelineEntity> getEntities(Path dir, String entityType, TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve) throws IOException { // First sort the selected entities based on created/start time. Map<Long, Set<TimelineEntity>> sortedEntities = new TreeMap<>( new Comparator<Long>() { @Override public int compare(Long l1, Long l2) { return l2.compareTo(l1); } } ); dir = getNormalPath(dir); if (dir != null) { RemoteIterator<LocatedFileStatus> fileStatuses = fs.listFiles(dir, false); if (fileStatuses != null) { while (fileStatuses.hasNext()) { LocatedFileStatus locatedFileStatus = fileStatuses.next(); Path entityFile = locatedFileStatus.getPath(); if (!entityFile.getName() .contains(TIMELINE_SERVICE_STORAGE_EXTENSION)) { continue; } try (BufferedReader reader = new BufferedReader( new InputStreamReader(fs.open(entityFile), StandardCharsets.UTF_8))) { TimelineEntity entity = readEntityFromFile(reader); if (!entity.getType().equals(entityType)) { continue; } if (!isTimeInRange(entity.getCreatedTime(), filters.getCreatedTimeBegin(), filters.getCreatedTimeEnd())) { continue; } if (filters.getRelatesTo() != null && !filters.getRelatesTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchRelatesTo(entity, filters.getRelatesTo())) { continue; } if (filters.getIsRelatedTo() != null && !filters.getIsRelatedTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchIsRelatedTo(entity, filters.getIsRelatedTo())) { continue; } if (filters.getInfoFilters() != null && !filters.getInfoFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchInfoFilters(entity, filters.getInfoFilters())) { continue; } if (filters.getConfigFilters() != null && !filters.getConfigFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchConfigFilters(entity, filters.getConfigFilters())) { continue; } if (filters.getMetricFilters() != null && !filters.getMetricFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchMetricFilters(entity, filters.getMetricFilters())) { continue; } if (filters.getEventFilters() != null && !filters.getEventFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchEventFilters(entity, filters.getEventFilters())) { continue; } TimelineEntity entityToBeReturned = createEntityToBeReturned( entity, dataToRetrieve.getFieldsToRetrieve()); Set<TimelineEntity> entitiesCreatedAtSameTime = sortedEntities.get(entityToBeReturned.getCreatedTime()); if (entitiesCreatedAtSameTime == null) { entitiesCreatedAtSameTime = new HashSet<TimelineEntity>(); } entitiesCreatedAtSameTime.add(entityToBeReturned); sortedEntities.put(entityToBeReturned.getCreatedTime(), entitiesCreatedAtSameTime); } } } } Set<TimelineEntity> entities = new HashSet<TimelineEntity>(); long entitiesAdded = 0; for (Set<TimelineEntity> entitySet : sortedEntities.values()) { for (TimelineEntity entity : entitySet) { entities.add(entity); ++entitiesAdded; if (entitiesAdded >= filters.getLimit()) { return entities; } } } return entities; }
@Test void testGetEntitiesByTimeWindows() throws Exception { // Get entities based on created time start and end time range. Set<TimelineEntity> result = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().createdTimeBegin(1425016502030L) .createTimeEnd(1425016502060L).build(), new TimelineDataToRetrieve()); assertEquals(1, result.size()); // Only one entity with ID id_4 should be returned. for (TimelineEntity entity : result) { if (!entity.getId().equals("id_4")) { fail("Incorrect filtering based on created time range"); } } // Get entities if only created time end is specified. result = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().createTimeEnd(1425016502010L) .build(), new TimelineDataToRetrieve()); assertEquals(3, result.size()); for (TimelineEntity entity : result) { if (entity.getId().equals("id_4")) { fail("Incorrect filtering based on created time range"); } } // Get entities if only created time start is specified. result = reader.getEntities( new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().createdTimeBegin(1425016502010L) .build(), new TimelineDataToRetrieve()); assertEquals(1, result.size()); for (TimelineEntity entity : result) { if (!entity.getId().equals("id_4")) { fail("Incorrect filtering based on created time range"); } } }
@Beta public static Application fromBuilder(Builder builder) throws Exception { return builder.build(); }
@Test void component() throws Exception { try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component(MockSearcher.class))))) { Component c = app.getComponentById(MockSearcher.class.getName()); assertNotNull(c); } }
public static List<Criterion> parse(String filter) { return StreamSupport.stream(CRITERIA_SPLITTER.split(filter).spliterator(), false) .map(FilterParser::parseCriterion) .toList(); }
@Test public void parse_filter_having_all_operators() { List<Criterion> criterion = FilterParser.parse("ncloc < 10 and coverage <= 80 and debt > 50 and duplication >= 56.5 and security_rating = 1 and language in (java,js)"); assertThat(criterion) .extracting(Criterion::getKey, Criterion::getOperator, Criterion::getValue, Criterion::getValues) .containsOnly( tuple("ncloc", LT, "10", emptyList()), tuple("coverage", LTE, "80", emptyList()), tuple("debt", GT, "50", emptyList()), tuple("duplication", GTE, "56.5", emptyList()), tuple("security_rating", EQ, "1", emptyList()), tuple("language", IN, null, asList("java", "js"))); }
public String transform() throws ScanException { StringBuilder stringBuilder = new StringBuilder(); compileNode(node, stringBuilder, new Stack<Node>()); return stringBuilder.toString(); }
@Test public void literalVariableLiteral() throws ScanException { String input = "a${k0}c"; Node node = makeNode(input); NodeToStringTransformer nodeToStringTransformer = new NodeToStringTransformer(node, propertyContainer0); assertEquals("av0c", nodeToStringTransformer.transform()); }
@Override public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() { return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() { private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store; private Sensor droppedRecordsSensor; @Override public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) { super.init(context); final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context; droppedRecordsSensor = TaskMetrics.droppedRecordsSensor( Thread.currentThread().getName(), internalProcessorContext.taskId().toString(), internalProcessorContext.metrics() ); store = internalProcessorContext.getStateStore(storeName); keySchema.init(context); } @Override public void process(final Record<KO, SubscriptionWrapper<K>> record) { if (record.key() == null && !SubscriptionWrapper.Instruction.PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE.equals(record.value().getInstruction())) { dropRecord(); return; } if (record.value().getVersion() > SubscriptionWrapper.CURRENT_VERSION) { //Guard against modifications to SubscriptionWrapper. Need to ensure that there is compatibility //with previous versions to enable rolling upgrades. Must develop a strategy for upgrading //from older SubscriptionWrapper versions to newer versions. throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version."); } context().forward( record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey())) .withValue(inferChange(record)) .withTimestamp(record.timestamp()) ); } private Change<ValueAndTimestamp<SubscriptionWrapper<K>>> inferChange(final Record<KO, SubscriptionWrapper<K>> record) { if (record.key() == null) { return new Change<>(ValueAndTimestamp.make(record.value(), record.timestamp()), null); } else { return inferBasedOnState(record); } } private Change<ValueAndTimestamp<SubscriptionWrapper<K>>> inferBasedOnState(final Record<KO, SubscriptionWrapper<K>> record) { final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey()); final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp()); final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey); //This store is used by the prefix scanner in ForeignTableJoinProcessorSupplier if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) || record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) { store.delete(subscriptionKey); } else { store.put(subscriptionKey, newValue); } return new Change<>(newValue, oldValue); } private void dropRecord() { if (context().recordMetadata().isPresent()) { final RecordMetadata recordMetadata = context().recordMetadata().get(); LOG.warn( "Skipping record due to null foreign key. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset() ); } else { LOG.warn( "Skipping record due to null foreign key. Topic, partition, and offset not known." ); } droppedRecordsSensor.record(); } }; }
@Test public void shouldPropagateOnlyIfFKValAvailableV0() { final StoreBuilder<TimestampedKeyValueStore<Bytes, SubscriptionWrapper<String>>> storeBuilder = storeBuilder(); final SubscriptionReceiveProcessorSupplier<String, String> supplier = supplier(storeBuilder); final Processor<String, SubscriptionWrapper<String>, CombinedKey<String, String>, Change<ValueAndTimestamp<SubscriptionWrapper<String>>>> processor = supplier.get(); stateStore = storeBuilder.build(); context.addStateStore(stateStore); stateStore.init((StateStoreContext) context, stateStore); final SubscriptionWrapper<String> oldWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, Instruction.PROPAGATE_ONLY_IF_FK_VAL_AVAILABLE, PK2, SubscriptionWrapper.VERSION_0, null ); final ValueAndTimestamp<SubscriptionWrapper<String>> oldValue = ValueAndTimestamp.make(oldWrapper, 0); final Bytes key = COMBINED_KEY_SCHEMA.toBytes(FK, PK1); stateStore.put(key, oldValue); processor.init(context); final SubscriptionWrapper<String> newWrapper = new SubscriptionWrapper<>( new long[]{1L, 2L}, Instruction.PROPAGATE_ONLY_IF_FK_VAL_AVAILABLE, PK1, SubscriptionWrapper.VERSION_0, null ); final ValueAndTimestamp<SubscriptionWrapper<String>> newValue = ValueAndTimestamp.make( newWrapper, 1L); final Record<String, SubscriptionWrapper<String>> record = new Record<>( FK, newWrapper, 1L ); processor.process(record); final List<CapturedForward<? extends CombinedKey<String, String>, ? extends Change<ValueAndTimestamp<SubscriptionWrapper<String>>>>> forwarded = context.forwarded(); assertEquals(newValue, stateStore.get(key)); assertEquals(1, forwarded.size()); assertEquals( record.withKey(new CombinedKey<>(FK, PK1)) .withValue(new Change<>(newValue, oldValue)), forwarded.get(0).record() ); }
@Override public String pathPattern() { return buildExtensionPathPattern(scheme) + "/{name}"; }
@Test void shouldBuildPathPatternCorrectly() { var scheme = Scheme.buildFromType(FakeExtension.class); var deleteHandler = new ExtensionDeleteHandler(scheme, client); var pathPattern = deleteHandler.pathPattern(); assertEquals("/apis/fake.halo.run/v1alpha1/fakes/{name}", pathPattern); }
public String resolveTenant() { return null; }
@Test void test() { var tenant = tenantService.resolveTenant(); assertThat(tenant, nullValue()); }
public CompletableFuture<Void> redeemReceipt( final Account account, final ReceiptCredentialPresentation receiptCredentialPresentation) { try { serverZkReceiptOperations.verifyReceiptCredentialPresentation(receiptCredentialPresentation); } catch (VerificationFailedException e) { throw Status.INVALID_ARGUMENT .withDescription("receipt credential presentation verification failed") .asRuntimeException(); } final ReceiptSerial receiptSerial = receiptCredentialPresentation.getReceiptSerial(); final Instant receiptExpiration = Instant.ofEpochSecond(receiptCredentialPresentation.getReceiptExpirationTime()); if (clock.instant().isAfter(receiptExpiration)) { throw Status.INVALID_ARGUMENT.withDescription("receipt is already expired").asRuntimeException(); } final long receiptLevel = receiptCredentialPresentation.getReceiptLevel(); if (BackupLevelUtil.fromReceiptLevel(receiptLevel) != BackupLevel.MEDIA) { throw Status.INVALID_ARGUMENT .withDescription("server does not recognize the requested receipt level") .asRuntimeException(); } return redeemedReceiptsManager .put(receiptSerial, receiptExpiration.getEpochSecond(), receiptLevel, account.getUuid()) .thenCompose(receiptAllowed -> { if (!receiptAllowed) { throw Status.INVALID_ARGUMENT .withDescription("receipt serial is already redeemed") .asRuntimeException(); } return accountsManager.updateAsync(account, a -> { final Account.BackupVoucher newPayment = new Account.BackupVoucher(receiptLevel, receiptExpiration); final Account.BackupVoucher existingPayment = a.getBackupVoucher(); account.setBackupVoucher(merge(existingPayment, newPayment)); }); }) .thenRun(Util.NOOP); }
@Test void redeemInvalidPresentation() throws InvalidInputException, VerificationFailedException { final BackupAuthManager authManager = create(BackupLevel.MESSAGES, false); final ReceiptCredentialPresentation invalid = receiptPresentation(ServerSecretParams.generate(), 3L, Instant.EPOCH); Assertions.assertThatExceptionOfType(StatusRuntimeException.class) .isThrownBy(() -> authManager.redeemReceipt(mock(Account.class), invalid).join()) .extracting(ex -> ex.getStatus().getCode()) .isEqualTo(Status.Code.INVALID_ARGUMENT); verifyNoInteractions(accountsManager); verifyNoInteractions(redeemedReceiptsManager); }
@Override @CacheEvict(value = RedisKeyConstants.PERMISSION_MENU_ID_LIST, key = "#createReqVO.permission", condition = "#createReqVO.permission != null") public Long createMenu(MenuSaveVO createReqVO) { // 校验父菜单存在 validateParentMenu(createReqVO.getParentId(), null); // 校验菜单(自己) validateMenu(createReqVO.getParentId(), createReqVO.getName(), null); // 插入数据库 MenuDO menu = BeanUtils.toBean(createReqVO, MenuDO.class); initMenuProperty(menu); menuMapper.insert(menu); // 返回 return menu.getId(); }
@Test public void testCreateMenu_success() { // mock 数据(构造父菜单) MenuDO menuDO = buildMenuDO(MenuTypeEnum.MENU, "parent", 0L); menuMapper.insert(menuDO); Long parentId = menuDO.getId(); // 准备参数 MenuSaveVO reqVO = randomPojo(MenuSaveVO.class, o -> { o.setParentId(parentId); o.setName("testSonName"); o.setType(MenuTypeEnum.MENU.getType()); }).setId(null); // 防止 id 被赋值 Long menuId = menuService.createMenu(reqVO); // 校验记录的属性是否正确 MenuDO dbMenu = menuMapper.selectById(menuId); assertPojoEquals(reqVO, dbMenu, "id"); }
static String encodeAddress(Address address) { return encodeNumeric(address.toUint()); }
@Test public void testLongAddress() { Address address = new Address( 256, "0xa04462684b510796c186d19abfa6929742f79394583d6efb1243bbb473f21d9f"); assertEquals(address.getTypeAsString(), ("address")); TypeEncoder.encodeAddress(address); }
static Future<Secret> getValidatedSecret(SecretOperator secretOperator, String namespace, String name, String... items) { return secretOperator.getAsync(namespace, name) .compose(secret -> validatedSecret(namespace, name, secret, items)); }
@Test void testGetValidateSecretMissingSecret() { String namespace = "ns"; String secretName = "my-secret"; SecretOperator secretOps = mock(SecretOperator.class); when(secretOps.getAsync(eq(namespace), eq(secretName))).thenReturn(Future.succeededFuture(null)); VertxUtil.getValidatedSecret(secretOps, namespace, secretName, "key1", "key2") .onComplete(r -> { assertThat(r.succeeded(), is(false)); assertThat(r.cause().getMessage(), is("Secret my-secret not found in namespace ns")); }); }
@Override public Set<K8sHost> hosts() { return hostStore.hosts(); }
@Test public void testGetAllHosts() { assertEquals(ERR_SIZE, 2, target.hosts().size()); assertTrue(ERR_NOT_FOUND, target.hosts().contains(HOST_2)); assertTrue(ERR_NOT_FOUND, target.hosts().contains(HOST_3)); }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("Compute Engine Tasks"); try (DbSession dbSession = dbClient.openSession(false)) { setAttribute(protobuf, "Total Pending", dbClient.ceQueueDao().countByStatus(dbSession, CeQueueDto.Status.PENDING)); setAttribute(protobuf, "Total In Progress", dbClient.ceQueueDao().countByStatus(dbSession, CeQueueDto.Status.IN_PROGRESS)); setAttribute(protobuf, "Max Workers per Node", workerCountProvider == null ? DEFAULT_NB_OF_WORKERS : workerCountProvider.get()); setAttribute(protobuf, "Workers Paused", "true".equals(dbClient.internalPropertiesDao().selectByKey(dbSession, InternalProperties.COMPUTE_ENGINE_PAUSE).orElse(null))); } return protobuf.build(); }
@Test public void test_workers_not_paused() { CeQueueGlobalSection underTest = new CeQueueGlobalSection(dbClient, workerCountProvider); ProtobufSystemInfo.Section section = underTest.toProtobuf(); assertThatAttributeIs(section, "Workers Paused", false); }
public boolean allowsLock(UUID ownerId) { Preconditions.checkNotNull(ownerId); boolean notLocked = isLeaseExpired() || !isLocked(); return notLocked || allowsUnlock(ownerId); }
@Test public void testAllowsLock_success() { LockGuard stateLock = LockGuard.NOT_LOCKED; assertTrue(stateLock.allowsLock(TXN)); }
private Function<KsqlConfig, Kudf> getUdfFactory( final Method method, final UdfDescription udfDescriptionAnnotation, final String functionName, final FunctionInvoker invoker, final String sensorName ) { return ksqlConfig -> { final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance( method.getDeclaringClass(), udfDescriptionAnnotation.name()); if (actualUdf instanceof Configurable) { ExtensionSecurityManager.INSTANCE.pushInUdf(); try { ((Configurable) actualUdf) .configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName)); } finally { ExtensionSecurityManager.INSTANCE.popOutUdf(); } } final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf); return metrics.<Kudf>map(m -> new UdfMetricProducer( m.getSensor(sensorName), theUdf, Time.SYSTEM )).orElse(theUdf); }; }
@Test public void shouldLoadFunctionsInKsqlEngine() { final UdfFactory function = FUNC_REG.getUdfFactory(FunctionName.of("substring")); assertThat(function, not(nullValue())); final Kudf substring1 = function.getFunction( Arrays.asList(SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.INTEGER))).newInstance(ksqlConfig); assertThat(substring1.evaluate("foo", 2), equalTo("oo")); final Kudf substring2 = function.getFunction( Arrays.asList(SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.INTEGER), SqlArgument.of(SqlTypes.INTEGER))).newInstance(ksqlConfig); assertThat(substring2.evaluate("foo", 2, 1), equalTo("o")); }
@PostConstruct public void init() { GlobalExecutor.registerServerStatusUpdater(new ServerStatusUpdater()); }
@Test void testInit() { try (MockedStatic mocked = mockStatic(GlobalExecutor.class)) { ServerStatusManager serverStatusManager = new ServerStatusManager(protocolManager, switchDomain); serverStatusManager.init(); mocked.verify(() -> GlobalExecutor.registerServerStatusUpdater(any())); } }
public static SegmentAssignmentStrategy getSegmentAssignmentStrategy(HelixManager helixManager, TableConfig tableConfig, String assignmentType, InstancePartitions instancePartitions) { String assignmentStrategy = null; TableType currentTableType = tableConfig.getTableType(); // TODO: Handle segment assignment strategy in future for CONSUMING segments in follow up PR // See https://github.com/apache/pinot/issues/9047 // Accommodate new changes for assignment strategy Map<String, SegmentAssignmentConfig> segmentAssignmentConfigMap = tableConfig.getSegmentAssignmentConfigMap(); if (tableConfig.isDimTable()) { // Segment Assignment Strategy for DIM tables Preconditions.checkState(currentTableType == TableType.OFFLINE, "All Servers Segment assignment Strategy is only applicable to Dim OfflineTables"); SegmentAssignmentStrategy segmentAssignmentStrategy = new AllServersSegmentAssignmentStrategy(); segmentAssignmentStrategy.init(helixManager, tableConfig); return segmentAssignmentStrategy; } else { // Try to determine segment assignment strategy from table config if (segmentAssignmentConfigMap != null) { SegmentAssignmentConfig segmentAssignmentConfig; // Use the pre defined segment assignment strategy segmentAssignmentConfig = segmentAssignmentConfigMap.get(assignmentType.toUpperCase()); // Segment assignment config is only applicable to offline tables and completed segments of real time tables if (segmentAssignmentConfig != null) { assignmentStrategy = segmentAssignmentConfig.getAssignmentStrategy().toLowerCase(); } } } // Use the existing information to determine segment assignment strategy SegmentAssignmentStrategy segmentAssignmentStrategy; if (assignmentStrategy == null) { // Calculate numReplicaGroups and numPartitions to determine segment assignment strategy Preconditions .checkState(instancePartitions != null, "Failed to find instance partitions for segment assignment strategy"); int numReplicaGroups = instancePartitions.getNumReplicaGroups(); int numPartitions = instancePartitions.getNumPartitions(); if (numReplicaGroups == 1 && numPartitions == 1) { segmentAssignmentStrategy = new BalancedNumSegmentAssignmentStrategy(); } else { segmentAssignmentStrategy = new ReplicaGroupSegmentAssignmentStrategy(); } } else { // Set segment assignment strategy depending on strategy set in table config switch (assignmentStrategy) { case AssignmentStrategy.REPLICA_GROUP_SEGMENT_ASSIGNMENT_STRATEGY: segmentAssignmentStrategy = new ReplicaGroupSegmentAssignmentStrategy(); break; case AssignmentStrategy.BALANCE_NUM_SEGMENT_ASSIGNMENT_STRATEGY: default: segmentAssignmentStrategy = new BalancedNumSegmentAssignmentStrategy(); break; } } segmentAssignmentStrategy.init(helixManager, tableConfig); return segmentAssignmentStrategy; }
@Test public void testReplicaGroupSegmentAssignmentStrategyForBackwardCompatibility() { int numInstancesPerReplicaGroup = NUM_INSTANCES / NUM_REPLICAS; int numInstancesPerPartition = numInstancesPerReplicaGroup / NUM_REPLICAS; ReplicaGroupStrategyConfig replicaGroupStrategyConfig = new ReplicaGroupStrategyConfig(PARTITION_COLUMN, numInstancesPerPartition); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME_WITH_PARTITION) .setNumReplicas(NUM_REPLICAS).setSegmentAssignmentStrategy("ReplicaGroup") .setReplicaGroupStrategyConfig(replicaGroupStrategyConfig).build(); // { // 0_0=[instance_0, instance_1], 1_0=[instance_2, instance_3], 2_0=[instance_4, instance_5], // 0_1=[instance_6, instance_7], 1_1=[instance_8, instance_9], 2_1=[instance_10, instance_11], // 0_2=[instance_12, instance_13], 1_2=[instance_14, instance_15], 2_2=[instance_16, instance_17] // } InstancePartitions instancePartitions = new InstancePartitions(INSTANCE_PARTITIONS_NAME_WITH_PARTITION); int instanceIdToAdd = 0; for (int replicaGroupId = 0; replicaGroupId < NUM_REPLICAS; replicaGroupId++) { for (int partitionId = 0; partitionId < NUM_PARTITIONS; partitionId++) { List<String> instancesForPartition = new ArrayList<>(numInstancesPerPartition); for (int i = 0; i < numInstancesPerPartition; i++) { instancesForPartition.add(INSTANCES.get(instanceIdToAdd++)); } instancePartitions.setInstances(partitionId, replicaGroupId, instancesForPartition); } } SegmentAssignmentStrategy segmentAssignmentStrategy = SegmentAssignmentStrategyFactory .getSegmentAssignmentStrategy(null, tableConfig, InstancePartitionsType.OFFLINE.toString(), instancePartitions); Assert.assertNotNull(segmentAssignmentStrategy); Assert.assertTrue(segmentAssignmentStrategy instanceof ReplicaGroupSegmentAssignmentStrategy); }
public void addConfigurations(List<ConfigurationProperty> configurations) { ConfigurationPropertyBuilder builder = new ConfigurationPropertyBuilder(); for (ConfigurationProperty property : configurations) { if(isValidPluginConfiguration(property.getConfigKeyName())) { configuration.add(builder.create(property.getConfigKeyName(), property.getConfigValue(), property.getEncryptedValue(), pluginConfigurationFor(property.getConfigKeyName()).getOption(Property.SECURE))); } else { configuration.add(property); } } }
@Test public void shouldAddConfigurationProperties() { List<ConfigurationProperty> configurationProperties = List.of(ConfigurationPropertyMother.create("key", "value", "encValue"), new ConfigurationProperty()); PluginConfiguration pluginConfiguration = new PluginConfiguration("github.pr", "1.1"); TaskPreference taskPreference = mock(TaskPreference.class); TaskConfig taskConfig = new TaskConfig(); Configuration configuration = new Configuration(); Property property = new Property("key"); property.with(Property.SECURE, false); PluggableTaskConfigStore.store().setPreferenceFor(pluginConfiguration.getId(), taskPreference); taskConfig.addProperty("key"); when(taskPreference.getConfig()).thenReturn(taskConfig); PluggableTask pluggableTask = new PluggableTask(pluginConfiguration, configuration); pluggableTask.addConfigurations(configurationProperties); assertThat(configuration.size(), is(2)); }