focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public RFuture<V> takeAsync() { return commandExecutor.writeAsync(getRawName(), codec, RedisCommands.BLPOP_VALUE, getRawName(), 0); }
@Test public void testTakeAsyncCancel() { Config config = createConfig(); config.useSingleServer().setConnectionMinimumIdleSize(1).setConnectionPoolSize(1); RedissonClient redisson = Redisson.create(config); RBlockingQueue<Integer> queue1 = getQueue(redisson); for (int i = 0; i < 10; i++) { RFuture<Integer> f = queue1.takeAsync(); f.cancel(true); } assertThat(queue1.add(1)).isTrue(); assertThat(queue1.add(2)).isTrue(); assertThat(queue1.size()).isEqualTo(2); redisson.shutdown(); }
protected static VplsOperation getOptimizedVplsOperation(Deque<VplsOperation> operations) { if (operations.isEmpty()) { return null; } // no need to optimize if the queue contains only one operation if (operations.size() == 1) { return operations.getFirst(); } final VplsOperation firstOperation = operations.peekFirst(); final VplsOperation lastOperation = operations.peekLast(); final VplsOperation.Operation firstOp = firstOperation.op(); final VplsOperation.Operation lastOp = lastOperation.op(); if (firstOp.equals(VplsOperation.Operation.REMOVE)) { if (lastOp.equals(VplsOperation.Operation.REMOVE)) { // case 1: both first and last operation are REMOVE; do remove return firstOperation; } else if (lastOp.equals(VplsOperation.Operation.ADD)) { // case 2: if first is REMOVE, and last is ADD; do update return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.UPDATE); } else { // case 3: first is REMOVE, last is UPDATE; do update return lastOperation; } } else if (firstOp.equals(VplsOperation.Operation.ADD)) { if (lastOp.equals(VplsOperation.Operation.REMOVE)) { // case 4: first is ADD, last is REMOVE; nothing to do return null; } else if (lastOp.equals(VplsOperation.Operation.ADD)) { // case 5: both first and last are ADD, do add return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.ADD); } else { // case 6: first is ADD and last is update, do add return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.ADD); } } else { if (lastOp.equals(VplsOperation.Operation.REMOVE)) { // case 7: last is remove, do remove return lastOperation; } else if (lastOp.equals(VplsOperation.Operation.ADD)) { // case 8: do update only return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.UPDATE); } else { // case 9: from UPDATE to UPDATE // only need last UPDATE operation return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.UPDATE); } } }
@Test public void testOptimizeOperationsAToU() { Deque<VplsOperation> operations = new ArrayDeque<>(); VplsData vplsData = VplsData.of(VPLS1); vplsData.addInterfaces(ImmutableSet.of(V100H1)); VplsOperation vplsOperation = VplsOperation.of(vplsData, VplsOperation.Operation.ADD); operations.add(vplsOperation); vplsData = VplsData.of(VPLS1, EncapsulationType.VLAN); vplsData.addInterfaces(ImmutableSet.of(V100H1, V100H2)); vplsOperation = VplsOperation.of(vplsData, VplsOperation.Operation.UPDATE); operations.add(vplsOperation); vplsOperation = VplsOperationManager.getOptimizedVplsOperation(operations); assertEquals(VplsOperation.of(vplsData, VplsOperation.Operation.ADD), vplsOperation); }
@Inject public FragmentStatsProvider() {}
@Test public void testFragmentStatsProvider() { FragmentStatsProvider fragmentStatsProvider = new FragmentStatsProvider(); QueryId queryId1 = new QueryId("queryid1"); QueryId queryId2 = new QueryId("queryid2"); PlanFragmentId planFragmentId1 = new PlanFragmentId(1); PlanFragmentId planFragmentId2 = new PlanFragmentId(2); PlanNodeStatsEstimate planNodeStatsEstimate1 = new PlanNodeStatsEstimate(NaN, 10, FACT, ImmutableMap.of(), JoinNodeStatsEstimate.unknown(), TableWriterNodeStatsEstimate.unknown(), PartialAggregationStatsEstimate.unknown()); PlanNodeStatsEstimate planNodeStatsEstimate2 = new PlanNodeStatsEstimate(NaN, 100, FACT, ImmutableMap.of(), JoinNodeStatsEstimate.unknown(), TableWriterNodeStatsEstimate.unknown(), PartialAggregationStatsEstimate.unknown()); assertEquals(fragmentStatsProvider.getStats(queryId1, planFragmentId1), PlanNodeStatsEstimate.unknown()); fragmentStatsProvider.putStats(queryId1, planFragmentId1, planNodeStatsEstimate1); // queryId1, fragmentId1 stats are available, other stats are unknown assertEquals(fragmentStatsProvider.getStats(queryId1, planFragmentId1), planNodeStatsEstimate1); assertEquals(fragmentStatsProvider.getStats(queryId2, planFragmentId1), PlanNodeStatsEstimate.unknown()); assertEquals(fragmentStatsProvider.getStats(queryId1, planFragmentId2), PlanNodeStatsEstimate.unknown()); // queryid1, fragmentid2 stats are available fragmentStatsProvider.putStats(queryId1, planFragmentId2, planNodeStatsEstimate2); assertEquals(fragmentStatsProvider.getStats(queryId1, planFragmentId2), planNodeStatsEstimate2); // queryId2, fragmentId1 stats are available fragmentStatsProvider.putStats(queryId2, planFragmentId1, planNodeStatsEstimate1); assertEquals(fragmentStatsProvider.getStats(queryId2, planFragmentId1), planNodeStatsEstimate1); // invalidate query1, query1 stats are no longer available, query 2 stats are still available fragmentStatsProvider.invalidateStats(queryId1, 2); assertEquals(fragmentStatsProvider.getStats(queryId1, planFragmentId1), PlanNodeStatsEstimate.unknown()); assertEquals(fragmentStatsProvider.getStats(queryId1, planFragmentId2), PlanNodeStatsEstimate.unknown()); assertEquals(fragmentStatsProvider.getStats(queryId2, planFragmentId1), planNodeStatsEstimate1); }
@Override public int getUnstableBars() { return surroundingHigherBars; }
@Test public void testGetUnstableBars_whenSetSurroundingBars_ReturnsSameValue() { int surroundingBars = 2; RecentSwingLowIndicator swingLowIndicator = new RecentSwingLowIndicator(series, surroundingBars); assertEquals(surroundingBars, swingLowIndicator.getUnstableBars()); }
public static <T> Collector<T, ?, Optional<T>> singleton() { return Collectors.collectingAndThen( Collectors.toList(), list -> { if (list.size() > 1) throw new IllegalArgumentException("More than one element"); return list.stream().findAny(); } ); }
@Test public void singleton_collector_throws_when_multiple() { List<String> items = List.of("foo1", "bar", "foo2"); IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> items.stream().filter(s -> s.startsWith("foo")).collect(CustomCollectors.singleton())); assertEquals("More than one element", exception.getMessage()); }
public static Object typeConvert(String tableName ,String columnName, String value, int sqlType, String mysqlType) { if (value == null || (value.equals("") && !(isText(mysqlType) || sqlType == Types.CHAR || sqlType == Types.VARCHAR || sqlType == Types.LONGVARCHAR))) { return null; } try { Object res; switch (sqlType) { case Types.INTEGER: res = Integer.parseInt(value); break; case Types.SMALLINT: res = Short.parseShort(value); break; case Types.BIT: case Types.TINYINT: res = Byte.parseByte(value); break; case Types.BIGINT: if (mysqlType.startsWith("bigint") && mysqlType.endsWith("unsigned")) { res = new BigInteger(value); } else { res = Long.parseLong(value); } break; // case Types.BIT: case Types.BOOLEAN: res = !"0".equals(value); break; case Types.DOUBLE: case Types.FLOAT: res = Double.parseDouble(value); break; case Types.REAL: res = Float.parseFloat(value); break; case Types.DECIMAL: case Types.NUMERIC: res = new BigDecimal(value); break; case Types.BINARY: case Types.VARBINARY: case Types.LONGVARBINARY: case Types.BLOB: res = value.getBytes("ISO-8859-1"); break; case Types.DATE: if (!value.startsWith("0000-00-00")) { java.util.Date date = Util.parseDate(value); if (date != null) { res = new Date(date.getTime()); } else { res = null; } } else { res = null; } break; case Types.TIME: { java.util.Date date = Util.parseDate(value); if (date != null) { res = new Time(date.getTime()); } else { res = null; } break; } case Types.TIMESTAMP: if (!value.startsWith("0000-00-00")) { java.util.Date date = Util.parseDate(value); if (date != null) { res = new Timestamp(date.getTime()); } else { res = null; } } else { res = null; } break; case Types.CLOB: default: res = value; break; } return res; } catch (Exception e) { logger.error("table: {} column: {}, failed convert type {} to {}", tableName, columnName, value, sqlType); return value; } }
@Test public void typeConvertInputNotNullNotNullNotNullPositiveNotNullOutputPositive() { // Arrange final String tableName = "foo"; final String columnName = "foo"; final String value = "3"; final int sqlType = 4; final String mysqlType = "foo"; // Act final Object actual = JdbcTypeUtil.typeConvert(tableName, columnName, value, sqlType, mysqlType); // Assert result Assert.assertEquals(3, actual); }
public void register(GracefulShutdownHook shutdownHook) { if (isShuttingDown.get()) { // Avoid any changes to the shutdown hooks set when the shutdown is already in progress throw new IllegalStateException("Couldn't register shutdown hook because shutdown is already in progress"); } shutdownHooks.add(requireNonNull(shutdownHook, "shutdownHook cannot be null")); }
@Test public void withExceptionOnShutdown() throws Exception { final AtomicBoolean hook1Called = new AtomicBoolean(false); final AtomicBoolean hook2Called = new AtomicBoolean(false); shutdownService.register(() -> hook1Called.set(true)); shutdownService.register(() -> { throw new Exception("eek"); }); stop(shutdownService); assertThat(hook1Called).isTrue(); assertThat(hook2Called).isFalse(); }
public static <T, PredicateT extends ProcessFunction<T, Boolean>> Filter<T> by( PredicateT predicate) { return new Filter<>(predicate); }
@Test @Category(NeedsRunner.class) public void testNoFilterByPredicate() { PCollection<Integer> output = p.apply(Create.of(1, 2, 4, 5)).apply(Filter.by(new TrivialFn(false))); PAssert.that(output).empty(); p.run(); }
@Override public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) { if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) { return resolveRequestConfig(propertyName); } else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX) && !propertyName.startsWith(KSQL_STREAMS_PREFIX)) { return resolveKsqlConfig(propertyName); } return resolveStreamsConfig(propertyName, strict); }
@Test public void shouldReturnUnresolvedForTopicPrefixedStreamsConfig() { final String prop = StreamsConfig.TOPIC_PREFIX + TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG; assertThat(resolver.resolve( KsqlConfig.KSQL_STREAMS_PREFIX + prop, true), is(unresolvedItem(prop))); }
public void write(CruiseConfig configForEdit, OutputStream output, boolean skipPreprocessingAndValidation) throws Exception { LOGGER.debug("[Serializing Config] Starting to write. Validation skipped? {}", skipPreprocessingAndValidation); MagicalGoConfigXmlLoader loader = new MagicalGoConfigXmlLoader(configCache, registry); if (!configForEdit.getOrigin().isLocal()) { throw new GoConfigInvalidException(configForEdit, "Attempted to save merged configuration with partials"); } if (!skipPreprocessingAndValidation) { loader.preprocessAndValidate(configForEdit); LOGGER.debug("[Serializing Config] Done with cruise config validators."); } Document document = createEmptyCruiseConfigDocument(); write(configForEdit, document.getRootElement(), configCache, registry); LOGGER.debug("[Serializing Config] XSD and DOM validation."); verifyXsdValid(document); MagicalGoConfigXmlLoader.validateDom(document.getRootElement(), registry); LOGGER.info("[Serializing Config] Generating config partial."); XmlUtils.writeXml(document, output); LOGGER.debug("[Serializing Config] Finished writing config partial."); }
@Test public void shouldNotDefineATrackingToolWithoutARegex() { CruiseConfig cruiseConfig = ConfigMigrator.load(ConfigFileFixture.ONE_PIPELINE); PipelineConfig pipelineConfig = cruiseConfig.pipelineConfigByName(new CaseInsensitiveString("pipeline1")); pipelineConfig.setTrackingTool(new TrackingTool("link", "")); try { xmlWriter.write(cruiseConfig, output, false); fail("should not save a trackingtool without a regex"); } catch (Exception e) { assertThat(e.getMessage(), containsString("Regex should be populated")); } }
String getFileName(double lat, double lon) { int lonInt = getMinLonForTile(lon); int latInt = getMinLatForTile(lat); return toLowerCase(getNorthString(latInt) + getPaddedLatString(latInt) + getEastString(lonInt) + getPaddedLonString(lonInt)); }
@Test public void testGetFileName() { assertEquals("n42e011", instance.getFileName(42.940339, 11.953125)); assertEquals("n38w078", instance.getFileName(38.548165, -77.167969)); assertEquals("n14w005", instance.getFileName(14.116047, -4.277344)); assertEquals("s52w058", instance.getFileName(-51.015725, -57.621094)); assertEquals("n24e120", instance.getFileName(24.590108, 120.640625)); assertEquals("s42w063", instance.getFileName(-41.015725, -62.949219)); }
private static void handleRpcException(RpcException e, RetryContext context) { // When enable_collect_query_detail_info is set to true, the plan will be recorded in the query detail, // and hence there is no need to log it here. ConnectContext connectContext = context.connectContext; if (context.retryTime == 0 && connectContext.getQueryDetail() == null && Config.log_plan_cancelled_by_crash_be) { LOG.warn( "Query cancelled by crash of backends or RpcException, [QueryId={}] [SQL={}] [Plan={}]", DebugUtil.printId(connectContext.getExecutionId()), context.parsedStmt.getOrigStmt() == null ? "" : context.parsedStmt.getOrigStmt().originStmt, context.execPlan == null ? "" : context.execPlan.getExplainString(TExplainLevel.COSTS), e); } }
@Test public void testHandleRpcException() throws Exception { String sql = "select * from t0"; StatementBase statementBase = SqlParser.parse(sql, connectContext.getSessionVariable()).get(0); ExecPlan execPlan = getExecPlan(sql); ExecuteExceptionHandler.RetryContext retryContext = new ExecuteExceptionHandler.RetryContext(0, execPlan, connectContext, statementBase); try { ExecuteExceptionHandler.handle(new RpcException("mock"), retryContext); } catch (Exception e) { fail("should not throw any exception"); } }
@Override public void writeByteBuffer(ByteBuffer buf) { try { if (buf.hasArray()) { out.write(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); } else { byte[] bytes = Utils.toArray(buf); out.write(bytes); } } catch (IOException e) { throw new RuntimeException(e); } }
@Test public void testWritingSlicedByteBuffer() { byte[] expectedArray = new byte[]{2, 3, 0, 0}; ByteBuffer sourceBuffer = ByteBuffer.wrap(new byte[]{0, 1, 2, 3}); ByteBuffer resultBuffer = ByteBuffer.allocate(4); // Move position forward to ensure slice is not whole buffer sourceBuffer.position(2); ByteBuffer slicedBuffer = sourceBuffer.slice(); Writable writable = new DataOutputStreamWritable( new DataOutputStream(new ByteBufferOutputStream(resultBuffer))); writable.writeByteBuffer(slicedBuffer); assertEquals(2, resultBuffer.position(), "Writing to the buffer moves the position forward"); assertArrayEquals(expectedArray, resultBuffer.array(), "Result buffer should have expected elements"); }
@Override public void handleGlobalFailure(final Throwable error) { final long timestamp = System.currentTimeMillis(); setGlobalFailureCause(error, timestamp); log.info("Trying to recover from a global failure.", error); final FailureHandlingResult failureHandlingResult = executionFailureHandler.getGlobalFailureHandlingResult(error, timestamp); maybeRestartTasks(failureHandlingResult); }
@Test void handleGlobalFailureWithLocalFailure() { final JobGraph jobGraph = singleJobVertexJobGraph(2); final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph); enableCheckpointing(jobGraph); final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph); final List<ExecutionAttemptID> attemptIds = StreamSupport.stream( scheduler .requestJob() .getArchivedExecutionGraph() .getAllExecutionVertices() .spliterator(), false) .map(ArchivedExecutionVertex::getCurrentExecutionAttempt) .map(ArchivedExecution::getAttemptId) .collect(Collectors.toList()); final ExecutionAttemptID localFailureAttemptId = attemptIds.get(0); scheduler.handleGlobalFailure(new Exception("global failure")); // the local failure shouldn't affect the global fail-over scheduler.updateTaskExecutionState( new TaskExecutionState( localFailureAttemptId, ExecutionState.FAILED, new Exception("local failure"))); for (ExecutionAttemptID attemptId : attemptIds) { scheduler.updateTaskExecutionState( new TaskExecutionState(attemptId, ExecutionState.CANCELED)); } taskRestartExecutor.triggerScheduledTasks(); final ExecutionVertexID executionVertexId0 = new ExecutionVertexID(onlyJobVertex.getID(), 0); final ExecutionVertexID executionVertexId1 = new ExecutionVertexID(onlyJobVertex.getID(), 1); assertThat(testExecutionOperations.getDeployedVertices()) .withFailMessage( "The " + "execution vertices should be deployed in a specific order reflecting the " + "scheduling start and the global fail-over afterwards.") .contains( executionVertexId0, executionVertexId1, executionVertexId0, executionVertexId1); }
TableViewBuilderImpl(PulsarClientImpl client, Schema<T> schema) { this.client = client; this.schema = schema; this.conf = new TableViewConfigurationData(); }
@Test public void testTableViewBuilderImpl() throws PulsarClientException { TableView tableView = tableViewBuilderImpl.topic(TOPIC_NAME) .autoUpdatePartitionsInterval(5, TimeUnit.SECONDS) .subscriptionName("testSubscriptionName") .cryptoKeyReader(mock(CryptoKeyReader.class)) .cryptoFailureAction(ConsumerCryptoFailureAction.DISCARD) .create(); assertNotNull(tableView); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testMultimapCachedPartialEntry() { final String tag = "multimap"; StateTag<MultimapState<byte[], Integer>> addr = StateTags.multimap(tag, ByteArrayCoder.of(), VarIntCoder.of()); MultimapState<byte[], Integer> multimapState = underTest.state(NAMESPACE, addr); final byte[] key1 = "key1".getBytes(StandardCharsets.UTF_8); final byte[] key2 = "key2".getBytes(StandardCharsets.UTF_8); final byte[] key3 = "key3".getBytes(StandardCharsets.UTF_8); SettableFuture<Iterable<Integer>> entryFuture = SettableFuture.create(); when(mockReader.multimapFetchSingleEntryFuture( encodeWithCoder(key1, ByteArrayCoder.of()), key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(entryFuture); // to set up the entry key1 as cache complete and add some local changes waitAndSet(entryFuture, weightedList(1, 2, 3), 30); multimapState.get(key1).read(); multimapState.put(key1, 2); multimapState.put(key3, 20); SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> entriesFuture = SettableFuture.create(); when(mockReader.multimapFetchAllFuture( false, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(entriesFuture); // windmill contains extra entry key2 waitAndSet( entriesFuture, weightedList(multimapEntry(key1, 1, 2, 3), multimapEntry(key2, 4, 5, 6)), 30); // key1 exist in both cache and windmill; key2 exists only in windmill; key3 exists only in // cache. They should all be merged. Iterable<Map.Entry<byte[], Integer>> entries = multimapState.entries().read(); assertEquals(8, Iterables.size(entries)); assertThat( entries, Matchers.containsInAnyOrder( multimapEntryMatcher(key1, 1), multimapEntryMatcher(key1, 2), multimapEntryMatcher(key1, 2), multimapEntryMatcher(key1, 3), multimapEntryMatcher(key2, 4), multimapEntryMatcher(key2, 5), multimapEntryMatcher(key2, 6), multimapEntryMatcher(key3, 20))); assertThat(multimapState.keys().read(), Matchers.containsInAnyOrder(key1, key2, key3)); }
@Override public JWKSet getJWKSet(JWKSetCacheRefreshEvaluator refreshEvaluator, long currentTime, T context) throws KeySourceException { var jwksUrl = discoverJwksUrl(); try (var jwkSetSource = new URLBasedJWKSetSource<>(jwksUrl, new HttpRetriever(httpClient))) { return jwkSetSource.getJWKSet(null, 0, context); } catch (IOException e) { throw new RemoteKeySourceException( "failed to fetch jwks from discovery document '%s'".formatted(discoveryUrl), e); } }
@Test void getJWKSet_success(WireMockRuntimeInfo wm) throws KeySourceException { var kid = "test"; var jwks = """ { "keys": [ { "kty": "EC", "use": "sig", "crv": "P-256", "kid": "%s", "x": "PFjgWFHOCAtnw47F3bT99fmWOKcDARN45JGEPgB8yKs", "y": "sFR6D_6Pa1vRRc5OfQNsetnN8EkXNliEipaip2L2OBg" } ] } """ .formatted(kid); var discoveryUrl = URI.create(wm.getHttpBaseUrl()).resolve(DISCOVERY_PATH); var jwksUrl = URI.create(wm.getHttpBaseUrl()).resolve(JWKS_PATH); stubFor(get(DISCOVERY_PATH).willReturn(okJson("{\"jwks_uri\": \"%s\"}".formatted(jwksUrl)))); stubFor(get(JWKS_PATH).willReturn(okJson(jwks))); var sut = new DiscoveryJwkSetSource<>(HttpClient.newHttpClient(), discoveryUrl); var gotJwks = sut.getJWKSet(null, 0, null); assertNotNull(gotJwks.getKeyByKeyId(kid)); assertEquals(1, gotJwks.getKeys().size()); }
public static SchemaKStream<?> buildSource( final PlanBuildContext buildContext, final DataSource dataSource, final QueryContext.Stacker contextStacker ) { final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed(); switch (dataSource.getDataSourceType()) { case KSTREAM: return windowed ? buildWindowedStream( buildContext, dataSource, contextStacker ) : buildStream( buildContext, dataSource, contextStacker ); case KTABLE: return windowed ? buildWindowedTable( buildContext, dataSource, contextStacker ) : buildTable( buildContext, dataSource, contextStacker ); default: throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType()); } }
@Test public void shouldReplaceTableSourceV1WithSame() { // Given: givenNonWindowedTable(); givenExistingQueryWithOldPseudoColumnVersion(tableSourceV1); // When: final SchemaKStream<?> result = SchemaKSourceFactory.buildSource( buildContext, dataSource, contextStacker ); // Then: assertThat(((TableSourceV1) result.getSourceStep()).getPseudoColumnVersion(), equalTo(LEGACY_PSEUDOCOLUMN_VERSION_NUMBER)); assertValidSchema(result); }
@Override public void executor(final Collection<MetaDataRegisterDTO> metaDataRegisterDTOList) { metaDataRegisterDTOList.forEach(meta -> Optional.ofNullable(this.shenyuClientRegisterService.get(meta.getRpcType())) .ifPresent(shenyuClientRegisterService -> { synchronized (shenyuClientRegisterService) { shenyuClientRegisterService.register(meta); } })); }
@Test public void testExecutor() { List<MetaDataRegisterDTO> list = new ArrayList<>(); metadataExecutorSubscriber.executor(list); Assertions.assertTrue(list.isEmpty()); list.add(MetaDataRegisterDTO.builder().appName("test").build()); ShenyuClientRegisterService service = mock(ShenyuClientRegisterService.class); when(shenyuClientRegisterService.get(any())).thenReturn(service); metadataExecutorSubscriber.executor(list); verify(service).register(any()); }
@Override public Short convert(String source) { return isNotEmpty(source) ? valueOf(source) : null; }
@Test void testConvert() { assertEquals(Short.valueOf("1"), converter.convert("1")); assertNull(converter.convert(null)); assertThrows(NumberFormatException.class, () -> { converter.convert("ttt"); }); }
public static String initNamespaceForNaming(NacosClientProperties properties) { String tmpNamespace = null; String isUseCloudNamespaceParsing = properties.getProperty(PropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING, properties.getProperty(SystemPropertyKeyConst.IS_USE_CLOUD_NAMESPACE_PARSING, String.valueOf(Constants.DEFAULT_USE_CLOUD_NAMESPACE_PARSING))); if (Boolean.parseBoolean(isUseCloudNamespaceParsing)) { tmpNamespace = TenantUtil.getUserTenantForAns(); LogUtils.NAMING_LOGGER.info("initializer namespace from ans.namespace attribute : {}", tmpNamespace); tmpNamespace = TemplateUtils.stringEmptyAndThenExecute(tmpNamespace, () -> { String namespace = properties.getProperty(PropertyKeyConst.SystemEnv.ALIBABA_ALIWARE_NAMESPACE); LogUtils.NAMING_LOGGER.info("initializer namespace from ALIBABA_ALIWARE_NAMESPACE attribute :" + namespace); return namespace; }); } tmpNamespace = TemplateUtils.stringEmptyAndThenExecute(tmpNamespace, () -> { String namespace = properties.getPropertyFrom(SourceType.JVM, PropertyKeyConst.NAMESPACE); LogUtils.NAMING_LOGGER.info("initializer namespace from namespace attribute :" + namespace); return namespace; }); if (StringUtils.isEmpty(tmpNamespace)) { tmpNamespace = properties.getProperty(PropertyKeyConst.NAMESPACE); } tmpNamespace = TemplateUtils.stringEmptyAndThenExecute(tmpNamespace, () -> UtilAndComs.DEFAULT_NAMESPACE_ID); return tmpNamespace; }
@Test void testInitNamespaceFromPropNamespaceWithCloudParsing() { final NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive(); String expect = "ns1"; properties.setProperty(PropertyKeyConst.NAMESPACE, expect); String ns = InitUtils.initNamespaceForNaming(properties); assertEquals(expect, ns); }
public boolean isJobCounter() { return JOB_COUNTER_NAME.equals(name); }
@Test public void testJobCounter() { assertFalse("jobCounter", new Counter("spring", null).isJobCounter()); assertTrue("jobCounter", new Counter("job", null).isJobCounter()); }
public String getApplicationName() { return getApplicationModel().getApplicationName(); }
@Test void getApplicationName() { ApplicationModel applicationModel = ApplicationModel.defaultModel(); String mockMetrics = "MockMetrics"; applicationModel .getApplicationConfigManager() .setApplication(new org.apache.dubbo.config.ApplicationConfig(mockMetrics)); ApplicationMetric applicationMetric = new ApplicationMetric(applicationModel); Assertions.assertNotNull(applicationMetric); Assertions.assertEquals(mockMetrics, applicationMetric.getApplicationName()); }
@Override public CompletableFuture<DeleteGroupsResponseData.DeletableGroupResultCollection> deleteGroups( RequestContext context, List<String> groupIds, BufferSupplier bufferSupplier ) { if (!isActive.get()) { return CompletableFuture.completedFuture(DeleteGroupsRequest.getErrorResultCollection( groupIds, Errors.COORDINATOR_NOT_AVAILABLE )); } final List<CompletableFuture<DeleteGroupsResponseData.DeletableGroupResultCollection>> futures = new ArrayList<>(groupIds.size()); final Map<TopicPartition, List<String>> groupsByTopicPartition = new HashMap<>(); groupIds.forEach(groupId -> { // For backwards compatibility, we support DeleteGroups for the empty group id. if (groupId == null) { futures.add(CompletableFuture.completedFuture(DeleteGroupsRequest.getErrorResultCollection( Collections.singletonList(null), Errors.INVALID_GROUP_ID ))); } else { final TopicPartition topicPartition = topicPartitionFor(groupId); groupsByTopicPartition .computeIfAbsent(topicPartition, __ -> new ArrayList<>()) .add(groupId); } }); groupsByTopicPartition.forEach((topicPartition, groupList) -> { CompletableFuture<DeleteGroupsResponseData.DeletableGroupResultCollection> future = runtime.scheduleWriteOperation( "delete-groups", topicPartition, Duration.ofMillis(config.offsetCommitTimeoutMs()), coordinator -> coordinator.deleteGroups(context, groupList) ).exceptionally(exception -> handleOperationException( "delete-groups", groupList, exception, (error, __) -> DeleteGroupsRequest.getErrorResultCollection(groupList, error) )); futures.add(future); }); return FutureUtils.combineFutures(futures, DeleteGroupsResponseData.DeletableGroupResultCollection::new, // We don't use res.addAll(future.join()) because DeletableGroupResultCollection is an ImplicitLinkedHashMultiCollection, // which has requirements for adding elements (see ImplicitLinkedHashCollection.java#add). (accumulator, newResults) -> newResults.forEach(result -> accumulator.add(result.duplicate()))); }
@Test public void testDeleteGroups() throws Exception { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, mock(GroupCoordinatorMetrics.class), createConfigManager() ); service.startup(() -> 3); DeleteGroupsResponseData.DeletableGroupResultCollection resultCollection1 = new DeleteGroupsResponseData.DeletableGroupResultCollection(); DeleteGroupsResponseData.DeletableGroupResult result1 = new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-id-1"); resultCollection1.add(result1); DeleteGroupsResponseData.DeletableGroupResultCollection resultCollection2 = new DeleteGroupsResponseData.DeletableGroupResultCollection(); DeleteGroupsResponseData.DeletableGroupResult result2 = new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-id-2"); resultCollection2.add(result2); DeleteGroupsResponseData.DeletableGroupResult result3 = new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-id-3") .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()); DeleteGroupsResponseData.DeletableGroupResultCollection expectedResultCollection = new DeleteGroupsResponseData.DeletableGroupResultCollection(); expectedResultCollection.addAll(Arrays.asList( new DeleteGroupsResponseData.DeletableGroupResult().setGroupId(null).setErrorCode(Errors.INVALID_GROUP_ID.code()), result2.duplicate(), result3.duplicate(), result1.duplicate() )); when(runtime.scheduleWriteOperation( ArgumentMatchers.eq("delete-groups"), ArgumentMatchers.eq(new TopicPartition("__consumer_offsets", 2)), ArgumentMatchers.eq(Duration.ofMillis(5000)), ArgumentMatchers.any() )).thenReturn(CompletableFuture.completedFuture(resultCollection1)); CompletableFuture<Object> resultCollectionFuture = new CompletableFuture<>(); when(runtime.scheduleWriteOperation( ArgumentMatchers.eq("delete-groups"), ArgumentMatchers.eq(new TopicPartition("__consumer_offsets", 0)), ArgumentMatchers.eq(Duration.ofMillis(5000)), ArgumentMatchers.any() )).thenReturn(resultCollectionFuture); when(runtime.scheduleWriteOperation( ArgumentMatchers.eq("delete-groups"), ArgumentMatchers.eq(new TopicPartition("__consumer_offsets", 1)), ArgumentMatchers.eq(Duration.ofMillis(5000)), ArgumentMatchers.any() )).thenReturn(FutureUtils.failedFuture(Errors.COORDINATOR_LOAD_IN_PROGRESS.exception())); List<String> groupIds = Arrays.asList("group-id-1", "group-id-2", "group-id-3", null); CompletableFuture<DeleteGroupsResponseData.DeletableGroupResultCollection> future = service.deleteGroups(requestContext(ApiKeys.DELETE_GROUPS), groupIds, BufferSupplier.NO_CACHING); assertFalse(future.isDone()); resultCollectionFuture.complete(resultCollection2); assertTrue(future.isDone()); assertEquals(expectedResultCollection, future.get()); }
public static int getTypeOid(final String columnTypeName) { Preconditions.checkArgument(COLUMN_TYPE_NAME_OID_MAP.containsKey(columnTypeName), "Cannot find PostgreSQL type oid for columnTypeName '%s'", columnTypeName); return COLUMN_TYPE_NAME_OID_MAP.get(columnTypeName); }
@Test void assertGetTypeOidSuccess() { assertThat(PostgreSQLArrayColumnType.getTypeOid("_int4"), is(1007)); }
@Override protected boolean isStepCompleted(@NonNull Context context) { return SetupSupport.isThisKeyboardSetAsDefaultIME(context); }
@Test public void testKeyboardNotEnabled() { WizardPageSwitchToKeyboardFragment fragment = startFragment(); Assert.assertFalse(fragment.isStepCompleted(getApplicationContext())); ImageView stateIcon = fragment.getView().findViewById(R.id.step_state_icon); Assert.assertNotNull(stateIcon); Assert.assertEquals( R.drawable.ic_wizard_switch_off, Shadows.shadowOf(stateIcon.getDrawable()).getCreatedFromResId()); Assert.assertTrue(stateIcon.isClickable()); }
@GetMapping("/health") @Secured(resource = Commons.NACOS_CORE_CONTEXT + "/cluster", action = ActionTypes.READ, signType = SignType.CONSOLE) public RestResult<String> getHealth() { return RestResultUtils.success(memberManager.getSelf().getState().name()); }
@Test void testGetHealth() { Member self = new Member(); self.setState(NodeState.UP); Mockito.when(serverMemberManager.getSelf()).thenReturn(self); RestResult<String> result = nacosClusterController.getHealth(); assertEquals(NodeState.UP.name(), result.getData()); }
public static Write write() { return new AutoValue_RedisIO_Write.Builder() .setConnectionConfiguration(RedisConnectionConfiguration.create()) .setMethod(Write.Method.APPEND) .build(); }
@Test public void testWriteWithMethodSAdd() { String key = "testWriteWithMethodSAdd"; List<String> values = Arrays.asList("0", "1", "2", "3", "2", "4", "0", "5"); List<KV<String, String>> data = buildConstantKeyList(key, values); PCollection<KV<String, String>> write = p.apply(Create.of(data)); write.apply(RedisIO.write().withEndpoint(REDIS_HOST, port).withMethod(Method.SADD)); p.run(); Set<String> expected = new HashSet<>(values); Set<String> members = client.smembers(key); assertEquals(expected, members); }
public String getMountedExternalStorageDirectoryPath() { String path = null; String state = Environment.getExternalStorageState(); if (Environment.MEDIA_MOUNTED.equals(state) || Environment.MEDIA_MOUNTED_READ_ONLY.equals(state)) { path = getExternalStorageDirectoryPath(); } return path; }
@Test public void getMountedExternalStorageDirectoryPathReturnsNullWhenNoFs() { ShadowEnvironment.setExternalStorageState(Environment.MEDIA_NOFS); assertThat(contextUtil.getMountedExternalStorageDirectoryPath(), is(nullValue())); }
@Nullable static String getPropertyIfString(Message message, String name) { try { Object o = message.getObjectProperty(name); if (o instanceof String) return o.toString(); return null; } catch (Throwable t) { propagateIfFatal(t); log(t, "error getting property {0} from message {1}", name, message); return null; } }
@Test void getPropertyIfString_notString() throws Exception { message.setByteProperty("b3", (byte) 0); assertThat(MessageProperties.getPropertyIfString(message, "b3")) .isNull(); }
@Override protected List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs, File workingDir) throws Exception { int numInputSegments = segmentDirs.size(); _eventObserver.notifyProgress(pinotTaskConfig, "Converting segments: " + numInputSegments); String taskType = pinotTaskConfig.getTaskType(); Map<String, String> configs = pinotTaskConfig.getConfigs(); LOGGER.info("Starting task: {} with configs: {}", taskType, configs); long startMillis = System.currentTimeMillis(); String realtimeTableName = configs.get(MinionConstants.TABLE_NAME_KEY); String rawTableName = TableNameBuilder.extractRawTableName(realtimeTableName); String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(rawTableName); TableConfig tableConfig = getTableConfig(offlineTableName); Schema schema = getSchema(offlineTableName); SegmentProcessorConfig.Builder segmentProcessorConfigBuilder = new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema); // Time handler config segmentProcessorConfigBuilder .setTimeHandlerConfig(MergeTaskUtils.getTimeHandlerConfig(tableConfig, schema, configs)); // Partitioner config segmentProcessorConfigBuilder .setPartitionerConfigs(MergeTaskUtils.getPartitionerConfigs(tableConfig, schema, configs)); // Merge type MergeType mergeType = MergeTaskUtils.getMergeType(configs); // Handle legacy key if (mergeType == null) { String legacyMergeTypeStr = configs.get(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY); if (legacyMergeTypeStr != null) { mergeType = MergeType.valueOf(legacyMergeTypeStr.toUpperCase()); } } segmentProcessorConfigBuilder.setMergeType(mergeType); // Aggregation types segmentProcessorConfigBuilder.setAggregationTypes(MergeTaskUtils.getAggregationTypes(configs)); // Segment config segmentProcessorConfigBuilder.setSegmentConfig(MergeTaskUtils.getSegmentConfig(configs)); // Progress observer segmentProcessorConfigBuilder.setProgressObserver(p -> _eventObserver.notifyProgress(_pinotTaskConfig, p)); SegmentProcessorConfig segmentProcessorConfig = segmentProcessorConfigBuilder.build(); List<RecordReader> recordReaders = new ArrayList<>(numInputSegments); int count = 1; for (File segmentDir : segmentDirs) { _eventObserver.notifyProgress(_pinotTaskConfig, String.format("Creating RecordReader for: %s (%d out of %d)", segmentDir, count++, numInputSegments)); PinotSegmentRecordReader recordReader = new PinotSegmentRecordReader(); // NOTE: Do not fill null field with default value to be consistent with other record readers recordReader.init(segmentDir, null, null, true); recordReaders.add(recordReader); } List<File> outputSegmentDirs; try { _eventObserver.notifyProgress(_pinotTaskConfig, "Generating segments"); outputSegmentDirs = new SegmentProcessorFramework(recordReaders, segmentProcessorConfig, workingDir).process(); } finally { for (RecordReader recordReader : recordReaders) { recordReader.close(); } } long endMillis = System.currentTimeMillis(); LOGGER.info("Finished task: {} with configs: {}. Total time: {}ms", taskType, configs, (endMillis - startMillis)); List<SegmentConversionResult> results = new ArrayList<>(); for (File outputSegmentDir : outputSegmentDirs) { String outputSegmentName = outputSegmentDir.getName(); results.add(new SegmentConversionResult.Builder().setFile(outputSegmentDir).setSegmentName(outputSegmentName) .setTableNameWithType(offlineTableName).build()); } return results; }
@Test public void testRollupWithTimeTransformation() throws Exception { FileUtils.deleteQuietly(WORKING_DIR); RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor = new RealtimeToOfflineSegmentsTaskExecutor(null, null); realtimeToOfflineSegmentsTaskExecutor.setMinionEventObserver(new MinionProgressObserver()); Map<String, String> configs = new HashMap<>(); configs.put(MinionConstants.TABLE_NAME_KEY, TABLE_NAME); configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000"); configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600560000000"); configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY, "1d"); configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, "rollup"); PinotTaskConfig pinotTaskConfig = new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs); List<SegmentConversionResult> conversionResults = realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR); assertEquals(conversionResults.size(), 1); File resultingSegment = conversionResults.get(0).getFile(); SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(resultingSegment); assertEquals(segmentMetadata.getTotalDocs(), 2); ColumnMetadata columnMetadataForT = segmentMetadata.getColumnMetadataFor(T); assertEquals(columnMetadataForT.getCardinality(), 1); assertEquals((long) columnMetadataForT.getMinValue(), 1600473600000L); }
public static <K> KStreamHolder<K> build( final KStreamHolder<K> stream, final StreamSelect<K> step, final RuntimeBuildContext buildContext ) { final QueryContext queryContext = step.getProperties().getQueryContext(); final LogicalSchema sourceSchema = stream.getSchema(); final Optional<ImmutableList<ColumnName>> selectedKeys = step.getSelectedKeys(); final Selection<K> selection = Selection.of( sourceSchema, step.getKeyColumnNames(), selectedKeys, step.getSelectExpressions(), buildContext.getKsqlConfig(), buildContext.getFunctionRegistry() ); final ImmutableList.Builder<Integer> keyIndexBuilder = ImmutableList.builder(); if (selectedKeys.isPresent()) { final ImmutableList<ColumnName> keyNames = sourceSchema.key().stream() .map(Column::name) .collect(ImmutableList.toImmutableList()); for (final ColumnName keyName : selectedKeys.get()) { keyIndexBuilder.add(keyNames.indexOf(keyName)); } } final ImmutableList<Integer> keyIndices = keyIndexBuilder.build(); final SelectValueMapper<K> selectMapper = selection.getMapper(); final ProcessingLogger logger = buildContext.getProcessingLogger(queryContext); final Named selectName = Named.as(StreamsUtil.buildOpName(queryContext)); if (selectedKeys.isPresent() && !selectedKeys.get().containsAll( sourceSchema.key().stream().map(Column::name).collect(ImmutableList.toImmutableList()) )) { return stream.withStream( stream.getStream().transform( () -> new KsTransformer<>( (readOnlyKey, value, ctx) -> { if (keyIndices.isEmpty()) { return null; } if (readOnlyKey instanceof GenericKey) { final GenericKey keys = (GenericKey) readOnlyKey; final Builder resultKeys = GenericKey.builder(keyIndices.size()); for (final int keyIndex : keyIndices) { resultKeys.append(keys.get(keyIndex)); } return (K) resultKeys.build(); } else { throw new UnsupportedOperationException(); } }, selectMapper.getTransformer(logger) ), selectName ), selection.getSchema() ); } else { return stream.withStream( stream.getStream().transformValues( () -> new KsValueTransformer<>(selectMapper.getTransformer(logger)), selectName ), selection.getSchema() ); } }
@Test public void shouldReturnResultKStream() { // When: final KStreamHolder<Struct> result = step.build(planBuilder, planInfo); // Then: assertThat(result.getStream(), is(resultKStream)); assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory)); }
public static int dot(int[] a, int[] b) { int sum = 0; for (int p1 = 0, p2 = 0; p1 < a.length && p2 < b.length; ) { int i1 = a[p1]; int i2 = b[p2]; if (i1 == i2) { sum++; p1++; p2++; } else if (i1 > i2) { p2++; } else { p1++; } } return sum; }
@Test public void testDot_doubleArr_doubleArr() { System.out.println("dot"); double[] x = {-2.1968219, -0.9559913, -0.0431738, 1.0567679, 0.3853515}; double[] y = {-1.7781325, -0.6659839, 0.9526148, -0.9460919, -0.3925300}; assertEquals(3.350726, MathEx.dot(x, y), 1E-6); }
@Override public void start() { try { forceMkdir(downloadDir); for (File tempFile : listTempFile(this.downloadDir)) { deleteQuietly(tempFile); } } catch (IOException e) { throw new IllegalStateException("Fail to create the directory: " + downloadDir, e); } }
@Test public void throw_exception_if_download_dir_is_invalid() throws Exception { ServerFileSystem fs = mock(ServerFileSystem.class); // download dir is a file instead of being a directory File downloadDir = testFolder.newFile(); when(fs.getDownloadedPluginsDir()).thenReturn(downloadDir); pluginDownloader = new PluginDownloader(updateCenterMatrixFactory, httpDownloader, fs); try { pluginDownloader.start(); fail(); } catch (IllegalStateException e) { // ok } }
public static ServiceInfo fromKey(final String key) { return new ServiceInfo(key); }
@Test void testFromKey() { String key1 = "group@@name"; String key2 = "group@@name@@c2"; ServiceInfo s1 = ServiceInfo.fromKey(key1); ServiceInfo s2 = ServiceInfo.fromKey(key2); assertEquals(key1, s1.getKey()); assertEquals(key2, s2.getKey()); }
@VisibleForTesting List<String> parseTemplateContentParams(String content) { return ReUtil.findAllGroup1(PATTERN_PARAMS, content); }
@Test public void testParseTemplateContentParams() { // 准备参数 String content = "正在进行登录操作{operation},您的验证码是{code}"; // mock 方法 // 调用 List<String> params = smsTemplateService.parseTemplateContentParams(content); // 断言 assertEquals(Lists.newArrayList("operation", "code"), params); }
public synchronized void recoverNumaResource(ContainerId containerId) { Container container = context.getContainers().get(containerId); ResourceMappings resourceMappings = container.getResourceMappings(); List<Serializable> assignedResources = resourceMappings .getAssignedResources(NUMA_RESOURCE_TYPE); if (assignedResources.size() == 1) { NumaResourceAllocation numaResourceAllocation = (NumaResourceAllocation) assignedResources.get(0); for (Entry<String, Long> nodeAndMemory : numaResourceAllocation .getNodeVsMemory().entrySet()) { numaNodeIdVsResource.get(nodeAndMemory.getKey()) .recoverMemory(containerId, nodeAndMemory.getValue()); } for (Entry<String, Integer> nodeAndCpus : numaResourceAllocation .getNodeVsCpus().entrySet()) { numaNodeIdVsResource.get(nodeAndCpus.getKey()).recoverCpus(containerId, nodeAndCpus.getValue()); } } else { LOG.error("Unexpected number:" + assignedResources.size() + " of assigned numa resources for " + containerId + " while recovering."); } }
@Test public void testRecoverNumaResource() throws Exception { @SuppressWarnings("unchecked") ConcurrentHashMap<ContainerId, Container> mockContainers = mock( ConcurrentHashMap.class); Context mockContext = mock(Context.class); Container mockContainer = mock(Container.class); ResourceMappings value = new ResourceMappings(); AssignedResources assignedResources = new AssignedResources(); assignedResources.updateAssignedResources( Arrays.asList(new NumaResourceAllocation("0", 70000, "0", 4))); value.addAssignedResources("numa", assignedResources); when(mockContainer.getResourceMappings()).thenReturn(value); when(mockContainers.get(any())).thenReturn(mockContainer); when(mockContext.getContainers()).thenReturn(mockContainers); NMStateStoreService mock = mock(NMStateStoreService.class); when(mockContext.getNMStateStore()).thenReturn(mock); numaResourceAllocator = new NumaResourceAllocator(mockContext); numaResourceAllocator.init(conf); // Recover the resources numaResourceAllocator.recoverNumaResource( ContainerId.fromString("container_1481156246874_0001_01_000001")); // Request resources based on the availability NumaResourceAllocation numaNode = numaResourceAllocator .allocateNumaNodes(getContainer( ContainerId.fromString("container_1481156246874_0001_01_000005"), Resource.newInstance(2048, 1))); assertEquals("1", String.join(",", numaNode.getMemNodes())); assertEquals("1", String.join(",", numaNode.getCpuNodes())); // Request resources more than the available numaNode = numaResourceAllocator.allocateNumaNodes(getContainer( ContainerId.fromString("container_1481156246874_0001_01_000006"), Resource.newInstance(2048, 4))); assertNull(numaNode); }
@Override public final void unsubscribe(URL url, NotifyListener listener) { if (!shouldSubscribe(url)) { // Should Not Subscribe return; } url = addRegistryClusterKey(url); doUnsubscribe(url, listener); }
@Test void testUnsubscribe() { // do subscribe to prepare for unsubscribe verification Set<String> multiApps = new TreeSet<>(); multiApps.add(APP_NAME1); multiApps.add(APP_NAME2); NotifyListener testServiceListener2 = mock(NotifyListener.class); URL url2 = URL.valueOf("consumer://127.0.0.1/TestService2?interface=TestService1&check=false&protocol=tri"); when(testServiceListener2.getConsumerUrl()).thenReturn(url2); serviceDiscoveryRegistry.subscribeURLs(url, testServiceListener, multiApps); serviceDiscoveryRegistry.subscribeURLs(url2, testServiceListener2, multiApps); assertEquals(1, serviceDiscoveryRegistry.getServiceListeners().size()); // do unsubscribe when(mapping.getMapping(url2)).thenReturn(multiApps); serviceDiscoveryRegistry.doUnsubscribe(url2, testServiceListener2); assertEquals(1, serviceDiscoveryRegistry.getServiceListeners().size()); ServiceInstancesChangedListener instancesChangedListener = serviceDiscoveryRegistry .getServiceListeners() .entrySet() .iterator() .next() .getValue(); assertTrue(instancesChangedListener.hasListeners()); when(mapping.getMapping(url)).thenReturn(multiApps); serviceDiscoveryRegistry.doUnsubscribe(url, testServiceListener); assertEquals(0, serviceDiscoveryRegistry.getServiceListeners().size()); assertFalse(instancesChangedListener.hasListeners()); }
@Override public boolean supportsOuterJoins() { return false; }
@Test void assertSupportsOuterJoins() { assertFalse(metaData.supportsOuterJoins()); }
@Override public String filterType() { return ZuulConstant.ERROR_TYPE; }
@Test public void testFilterType() throws Exception { SentinelZuulErrorFilter sentinelZuulErrorFilter = new SentinelZuulErrorFilter(); Assert.assertEquals(sentinelZuulErrorFilter.filterType(), ERROR_TYPE); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .precision(column.getColumnLength()) .length(column.getColumnLength()) .nullable(column.isNullable()) .comment(column.getComment()) .scale(column.getScale()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case NULL: builder.columnType(IRIS_NULL); builder.dataType(IRIS_NULL); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(String.format("%s(%s)", IRIS_VARCHAR, MAX_VARCHAR_LENGTH)); builder.dataType(IRIS_VARCHAR); } else if (column.getColumnLength() < MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", IRIS_VARCHAR, column.getColumnLength())); builder.dataType(IRIS_VARCHAR); } else { builder.columnType(IRIS_LONG_VARCHAR); builder.dataType(IRIS_LONG_VARCHAR); } break; case BOOLEAN: builder.columnType(IRIS_BIT); builder.dataType(IRIS_BIT); break; case TINYINT: builder.columnType(IRIS_TINYINT); builder.dataType(IRIS_TINYINT); break; case SMALLINT: builder.columnType(IRIS_SMALLINT); builder.dataType(IRIS_SMALLINT); break; case INT: builder.columnType(IRIS_INTEGER); builder.dataType(IRIS_INTEGER); break; case BIGINT: builder.columnType(IRIS_BIGINT); builder.dataType(IRIS_BIGINT); break; case FLOAT: builder.columnType(IRIS_FLOAT); builder.dataType(IRIS_FLOAT); break; case DOUBLE: builder.columnType(IRIS_DOUBLE); builder.dataType(IRIS_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } if (precision < scale) { precision = scale; } if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = MAX_SCALE; precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } builder.columnType(String.format("%s(%s,%s)", IRIS_DECIMAL, precision, scale)); builder.dataType(IRIS_DECIMAL); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(IRIS_LONG_BINARY); builder.dataType(IRIS_LONG_BINARY); } else if (column.getColumnLength() < MAX_BINARY_LENGTH) { builder.dataType(IRIS_BINARY); builder.columnType( String.format("%s(%s)", IRIS_BINARY, column.getColumnLength())); } else { builder.columnType(IRIS_LONG_BINARY); builder.dataType(IRIS_LONG_BINARY); } break; case DATE: builder.columnType(IRIS_DATE); builder.dataType(IRIS_DATE); break; case TIME: builder.dataType(IRIS_TIME); if (Objects.nonNull(column.getScale()) && column.getScale() > 0) { Integer timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_TIME_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", IRIS_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(IRIS_TIME); } break; case TIMESTAMP: builder.columnType(IRIS_TIMESTAMP2); builder.dataType(IRIS_TIMESTAMP2); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.IRIS, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertDouble() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.DOUBLE_TYPE).build(); BasicTypeDefine typeDefine = IrisTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(IrisTypeConverter.IRIS_DOUBLE, typeDefine.getColumnType()); Assertions.assertEquals(IrisTypeConverter.IRIS_DOUBLE, typeDefine.getDataType()); }
@Override public final void getSize(@NonNull SizeReadyCallback cb) { sizeDeterminer.getSize(cb); }
@Test public void testSizeCallbacksAreCalledInOrderPreDraw() { SizeReadyCallback[] cbs = new SizeReadyCallback[25]; for (int i = 0; i < cbs.length; i++) { cbs[i] = mock(SizeReadyCallback.class); target.getSize(cbs[i]); } int width = 100; int height = 111; parent.getLayoutParams().width = width; parent.getLayoutParams().height = height; activity.visible(); view.getViewTreeObserver().dispatchOnPreDraw(); InOrder order = inOrder((Object[]) cbs); for (SizeReadyCallback cb : cbs) { order.verify(cb).onSizeReady(eq(width), eq(height)); } }
@Override public String toString() { return String.format("%s,%s,%s", getType(), null == beginValue ? "" : beginValue, null == endValue ? "" : endValue); }
@Test void assertToString() { assertThat(new StringPrimaryKeyIngestPosition("hi", "jk").toString(), is("s,hi,jk")); }
@Override protected String getKeyName() { return RateLimitEnum.SLIDING_WINDOW.getKeyName(); }
@Test public void getKeyNameTest() { MatcherAssert.assertThat("sliding_window_request_rate_limiter", is(slidingWindowRateLimiterAlgorithm.getKeyName())); }
public static <K, V> StoreBuilder<SessionStore<K, V>> sessionStoreBuilder(final SessionBytesStoreSupplier supplier, final Serde<K> keySerde, final Serde<V> valueSerde) { Objects.requireNonNull(supplier, "supplier cannot be null"); return new SessionStoreBuilder<>(supplier, keySerde, valueSerde, Time.SYSTEM); }
@Test public void shouldThrowIfSupplierIsNullForSessionStoreBuilder() { final Exception e = assertThrows(NullPointerException.class, () -> Stores.sessionStoreBuilder(null, Serdes.ByteArray(), Serdes.ByteArray())); assertEquals("supplier cannot be null", e.getMessage()); }
public static Map<String, String> parseContentTypeParams(String mimeType) { List<String> items = StringUtils.split(mimeType, ';', false); int count = items.size(); if (count <= 1) { return null; } Map<String, String> map = new LinkedHashMap<>(count - 1); for (int i = 1; i < count; i++) { String item = items.get(i); int pos = item.indexOf('='); if (pos == -1) { continue; } String key = item.substring(0, pos).trim(); String val = item.substring(pos + 1).trim(); map.put(key, val); } return map; }
@Test void testParseContentTypeParams() { Map<String, String> map = HttpUtils.parseContentTypeParams("application/json"); assertNull(map); map = HttpUtils.parseContentTypeParams("application/json; charset=UTF-8"); match(map, "{ charset: 'UTF-8' }"); map = HttpUtils.parseContentTypeParams("application/json; charset = UTF-8 "); match(map, "{ charset: 'UTF-8' }"); map = HttpUtils.parseContentTypeParams("application/json; charset=UTF-8; version=1.2.3"); match(map, "{ charset: 'UTF-8', version: '1.2.3' }"); map = HttpUtils.parseContentTypeParams("application/json; charset = UTF-8 ; version=1.2.3"); match(map, "{ charset: 'UTF-8', version: '1.2.3' }"); map = HttpUtils.parseContentTypeParams("application/vnd.app.test+json;ton-version=1"); match(map, "{ 'ton-version': '1' }"); }
@Override public Matrix toMatrix() { return Matrix.of(graph); }
@Test public void testToMatrix() { System.out.println("toMatrix digraph = false"); AdjacencyMatrix graph = new AdjacencyMatrix(8, false); graph.addEdge(0, 2); graph.addEdge(1, 7); graph.addEdge(2, 6); graph.addEdge(7, 4); graph.addEdge(3, 4); graph.addEdge(3, 5); graph.addEdge(5, 4); Matrix matrix = graph.toMatrix(); for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { System.out.print(matrix.get(i, j) + " "); } System.out.println(); } assertEquals(1.0, matrix.get(0, 2), 1E-10); assertEquals(1.0, matrix.get(1, 7), 1E-10); assertEquals(1.0, matrix.get(2, 6), 1E-10); assertEquals(1.0, matrix.get(7, 4), 1E-10); assertEquals(1.0, matrix.get(3, 4), 1E-10); assertEquals(1.0, matrix.get(3, 5), 1E-10); assertEquals(1.0, matrix.get(5, 4), 1E-10); // Graph is undirected. assertEquals(1.0, matrix.get(2, 0), 1E-10); assertEquals(1.0, matrix.get(7, 1), 1E-10); assertEquals(1.0, matrix.get(6, 2), 1E-10); assertEquals(1.0, matrix.get(4, 7), 1E-10); assertEquals(1.0, matrix.get(4, 3), 1E-10); assertEquals(1.0, matrix.get(5, 3), 1E-10); assertEquals(1.0, matrix.get(4, 5), 1E-10); }
public Node chooseRandomWithStorageType(final String scope, final Collection<Node> excludedNodes, StorageType type) { netlock.readLock().lock(); try { if (scope.startsWith("~")) { return chooseRandomWithStorageType( NodeBase.ROOT, scope.substring(1), excludedNodes, type); } else { return chooseRandomWithStorageType( scope, null, excludedNodes, type); } } finally { netlock.readLock().unlock(); } }
@Test public void testChooseRandomWithStorageTypeWrapper() throws Exception { Node n; DatanodeDescriptor dd; n = CLUSTER.chooseRandomWithStorageType("/l2/d3/r4", null, null, StorageType.ARCHIVE); HashSet<Node> excluded = new HashSet<>(); // exclude the host on r4 (since there is only one host, no randomness here) excluded.add(n); // search with given scope being desired scope for (int i = 0; i < 10; i++) { n = CLUSTER.chooseRandomWithStorageType( "/l2/d3", null, StorageType.ARCHIVE); assertTrue(n instanceof DatanodeDescriptor); dd = (DatanodeDescriptor) n; assertTrue(dd.getHostName().equals("host13") || dd.getHostName().equals("host14")); } for (int i = 0; i < 10; i++) { n = CLUSTER.chooseRandomWithStorageType( "/l2/d3", excluded, StorageType.ARCHIVE); assertTrue(n instanceof DatanodeDescriptor); dd = (DatanodeDescriptor) n; assertTrue(dd.getHostName().equals("host14")); } // search with given scope being exclude scope // a total of 4 ramdisk nodes: // /l1/d2/r3/host7, /l2/d3/r2/host10, /l2/d4/r1/host7 and /l2/d4/r1/host10 // so if we exclude /l2/d4/r1, if should be always either host7 or host10 for (int i = 0; i < 10; i++) { n = CLUSTER.chooseRandomWithStorageType( "~/l2/d4", null, StorageType.RAM_DISK); assertTrue(n instanceof DatanodeDescriptor); dd = (DatanodeDescriptor) n; assertTrue(dd.getHostName().equals("host7") || dd.getHostName().equals("host11")); } // similar to above, except that we also exclude host10 here. so it should // always be host7 n = CLUSTER.chooseRandomWithStorageType("/l2/d3/r2", null, null, StorageType.RAM_DISK); // add host10 to exclude excluded.add(n); for (int i = 0; i < 10; i++) { n = CLUSTER.chooseRandomWithStorageType( "~/l2/d4", excluded, StorageType.RAM_DISK); assertTrue(n instanceof DatanodeDescriptor); dd = (DatanodeDescriptor) n; assertTrue(dd.getHostName().equals("host7")); } }
@Operation(summary = "Read the answers of the 3 apdus and read the pip/pp to send to digid x") @PostMapping(value = Constants.URL_NIK_POLYMORPHICDATA, consumes = "application/json", produces = "application/json") public PolyDataResponse getPolymorphicDataRestService(@Valid @RequestBody NikApduResponsesRequest request) { return nikService.getPolymorphicDataRestService(request); }
@Test public void getPolymorphicDataRestServiceTest() { PolyDataResponse expectedResponse = new PolyDataResponse(); when(nikServiceMock.getPolymorphicDataRestService(any(NikApduResponsesRequest.class))).thenReturn(expectedResponse); PolyDataResponse actualResponse = nikController.getPolymorphicDataRestService(new NikApduResponsesRequest()); assertEquals(expectedResponse, actualResponse); }
public static void main(String[] args) { // eagerly initialized singleton var ivoryTower1 = IvoryTower.getInstance(); var ivoryTower2 = IvoryTower.getInstance(); LOGGER.info("ivoryTower1={}", ivoryTower1); LOGGER.info("ivoryTower2={}", ivoryTower2); // lazily initialized singleton var threadSafeIvoryTower1 = ThreadSafeLazyLoadedIvoryTower.getInstance(); var threadSafeIvoryTower2 = ThreadSafeLazyLoadedIvoryTower.getInstance(); LOGGER.info("threadSafeIvoryTower1={}", threadSafeIvoryTower1); LOGGER.info("threadSafeIvoryTower2={}", threadSafeIvoryTower2); // enum singleton var enumIvoryTower1 = EnumIvoryTower.INSTANCE; var enumIvoryTower2 = EnumIvoryTower.INSTANCE; LOGGER.info("enumIvoryTower1={}", enumIvoryTower1); LOGGER.info("enumIvoryTower2={}", enumIvoryTower2); // double-checked locking var dcl1 = ThreadSafeDoubleCheckLocking.getInstance(); LOGGER.info(dcl1.toString()); var dcl2 = ThreadSafeDoubleCheckLocking.getInstance(); LOGGER.info(dcl2.toString()); // initialize on demand holder idiom var demandHolderIdiom = InitializingOnDemandHolderIdiom.getInstance(); LOGGER.info(demandHolderIdiom.toString()); var demandHolderIdiom2 = InitializingOnDemandHolderIdiom.getInstance(); LOGGER.info(demandHolderIdiom2.toString()); // initialize singleton using Bill Pugh's implementation var billPughSingleton = BillPughImplementation.getInstance(); LOGGER.info(billPughSingleton.toString()); var billPughSingleton2 = BillPughImplementation.getInstance(); LOGGER.info(billPughSingleton2.toString()); }
@Test void shouldExecuteWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
@VisibleForTesting static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) { return createStreamExecutionEnvironment( options, MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()), options.getFlinkConfDir()); }
@Test public void shouldAllowPortOmissionForRemoteEnvironmentStreaming() { FlinkPipelineOptions options = getDefaultPipelineOptions(); options.setRunner(FlinkRunner.class); options.setFlinkMaster("host"); StreamExecutionEnvironment sev = FlinkExecutionEnvironments.createStreamExecutionEnvironment(options); assertThat(sev, instanceOf(RemoteStreamEnvironment.class)); checkHostAndPort(sev, "host", RestOptions.PORT.defaultValue()); }
@Override public AppResponse process(Flow flow, MijnDigidSessionRequest request) throws FlowNotDefinedException, IOException, NoSuchAlgorithmException, SharedServiceClientException { appSession = appSessionService.getSession(request.getMijnDigidSessionId()); appAuthenticator = appAuthenticatorService.findByUserAppId(appSession.getUserAppId()); checkSwitchesEnabled(); digidClient.remoteLog("1468", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName(), lowerUnderscore(HUMAN_PROCESS), "get_notifications", lowerUnderscore(APP_CODE), appAuthenticator.getAppCode())); if (!isAppSessionAuthenticated(appSession) || !isAppAuthenticatorActivated(appAuthenticator)){ return new NokResponse("no_session"); } return nsClient.getNotifications(appAuthenticator.getAccountId()); }
@Test public void getEmptyListNotificationsTest() throws FlowNotDefinedException, SharedServiceClientException, IOException, NoSuchAlgorithmException { //given when(appSessionService.getSession(any())).thenReturn(mockedAppSession); when(appAuthenticatorService.findByUserAppId(any())).thenReturn(mockedAppAuthenticator); when(nsClient.getNotifications(anyLong())).thenReturn(new NotificationResponse("OK", List.of())); when(switchService.digidAppSwitchEnabled()).thenReturn(true); //when NotificationResponse appResponse = (NotificationResponse) notificationsGet.process(mockedFlow, mockedRequest); //then assertEquals("OK", appResponse.getStatus()); assertEquals(0, appResponse.getNotifications().size()); }
@Override public void metricChange(final KafkaMetric metric) { if (!THROUGHPUT_METRIC_NAMES.contains(metric.metricName().name()) || !StreamsMetricsImpl.TOPIC_LEVEL_GROUP.equals(metric.metricName().group())) { return; } addMetric( metric, getQueryId(metric), getTopic(metric) ); }
@Test public void shouldThrowWhenTopicNameTagIsMissing() { // When: assertThrows( KsqlException.class, () -> listener.metricChange(mockMetric( BYTES_CONSUMED_TOTAL, 2D, ImmutableMap.of( "thread-id", THREAD_ID, "task-id", TASK_ID_1, "processor-node-id", PROCESSOR_NODE_ID)) ) ); }
@Override public Map<String, String> getEnvironments() { return ConfigurationUtils.getPrefixedKeyValuePairs( ResourceManagerOptions.CONTAINERIZED_MASTER_ENV_PREFIX, flinkConfig); }
@Test void testGetEnvironments() { final Map<String, String> expectedEnvironments = new HashMap<>(); expectedEnvironments.put("k1", "v1"); expectedEnvironments.put("k2", "v2"); expectedEnvironments.forEach( (k, v) -> flinkConfig.setString( ResourceManagerOptions.CONTAINERIZED_MASTER_ENV_PREFIX + k, v)); final Map<String, String> resultEnvironments = kubernetesJobManagerParameters.getEnvironments(); assertThat(resultEnvironments).isEqualTo(expectedEnvironments); }
@Override public void decode(final ChannelHandlerContext context, final ByteBuf in, final List<Object> out) { int payloadLength = in.markReaderIndex().readUnsignedMediumLE(); int remainPayloadLength = SEQUENCE_LENGTH + payloadLength; if (in.readableBytes() < remainPayloadLength) { in.resetReaderIndex(); return; } ByteBuf message = in.readRetainedSlice(remainPayloadLength); if (MAX_PACKET_LENGTH == payloadLength) { pendingMessages.add(message.skipBytes(SEQUENCE_LENGTH)); } else if (pendingMessages.isEmpty()) { out.add(message); } else { aggregateMessages(context, message, out); } }
@Test void assertDecodeWithEmptyPacket() { when(byteBuf.markReaderIndex()).thenReturn(byteBuf); when(byteBuf.readableBytes()).thenReturn(1); when(byteBuf.readUnsignedMediumLE()).thenReturn(0); List<Object> out = new LinkedList<>(); new MySQLPacketCodecEngine().decode(context, byteBuf, out); assertThat(out.size(), is(1)); }
@Override public void onServerStart(Server server) { if (!done) { initCe(); this.done = true; } }
@Test public void onServerStart_has_no_effect_if_called_twice_to_support_medium_test_doing_startup_tasks_multiple_times() { underTest.onServerStart(server); reset(processingScheduler, cleaningScheduler); underTest.onServerStart(server); verifyNoInteractions(processingScheduler, cleaningScheduler); }
public Future<Void> closeAsync() { // Execute close asynchronously in case this is being invoked on an eventloop to avoid blocking return GlobalEventExecutor.INSTANCE.submit(new Callable<Void>() { @Override public Void call() throws Exception { close(); return null; } }); }
@Test public void testCloseAsync() throws Exception { final LocalAddress addr = new LocalAddress(getLocalAddrId()); final EventLoopGroup group = new DefaultEventLoopGroup(); // Start server final ServerBootstrap sb = new ServerBootstrap() .group(group) .channel(LocalServerChannel.class) .childHandler(new ChannelInitializer<LocalChannel>() { @Override protected void initChannel(LocalChannel ch) throws Exception { ch.pipeline().addLast(new ChannelInboundHandlerAdapter()); } }); final Channel sc = sb.bind(addr).syncUninterruptibly().channel(); // Create pool, acquire and return channels final Bootstrap bootstrap = new Bootstrap() .channel(LocalChannel.class).group(group).remoteAddress(addr); final SimpleChannelPool pool = new SimpleChannelPool(bootstrap, new CountingChannelPoolHandler()); Channel ch1 = pool.acquire().syncUninterruptibly().getNow(); Channel ch2 = pool.acquire().syncUninterruptibly().getNow(); pool.release(ch1).get(1, TimeUnit.SECONDS); pool.release(ch2).get(1, TimeUnit.SECONDS); // Assert that returned channels are open before close assertTrue(ch1.isOpen()); assertTrue(ch2.isOpen()); // Close asynchronously with timeout pool.closeAsync().get(1, TimeUnit.SECONDS); // Assert channels were indeed closed assertFalse(ch1.isOpen()); assertFalse(ch2.isOpen()); sc.close().sync(); pool.close(); group.shutdownGracefully(); }
@Override public void init(final InternalProcessorContext<KIn, VIn> context) { // It is important to first create the sensor before calling init on the // parent object. Otherwise due to backwards compatibility an empty sensor // without parent is created with the same name. // Once the backwards compatibility is not needed anymore it might be possible to // change this. processAtSourceSensor = ProcessorNodeMetrics.processAtSourceSensor( Thread.currentThread().getName(), context.taskId().toString(), context.currentNode().name(), context.metrics() ); super.init(context); this.context = context; try { keyDeserializer = prepareKeyDeserializer(keyDeserializer, context, name()); } catch (final ConfigException | StreamsException e) { throw new StreamsException(String.format("Failed to initialize key serdes for source node %s", name()), e, context.taskId()); } try { valDeserializer = prepareValueDeserializer(valDeserializer, context, name()); } catch (final ConfigException | StreamsException e) { throw new StreamsException(String.format("Failed to initialize value serdes for source node %s", name()), e, context.taskId()); } }
@Test public void shouldThrowStreamsExceptionWithExplicitErrorMessage() { final InternalMockProcessorContext<String, String> context = new InternalMockProcessorContext<>(); final SourceNode<String, String> node = new SourceNode<>(context.currentNode().name(), new TheDeserializer(), new TheDeserializer()); utilsMock.when(() -> WrappingNullableUtils.prepareKeyDeserializer(any(), any(), any())).thenThrow(new StreamsException("")); final Throwable exception = assertThrows(StreamsException.class, () -> node.init(context)); assertThat(exception.getMessage(), equalTo("Failed to initialize key serdes for source node TESTING_NODE")); }
@Override public void checkBeforeUpdate(final CreateReadwriteSplittingRuleStatement sqlStatement) { ReadwriteSplittingRuleStatementChecker.checkCreation(database, sqlStatement.getRules(), null == rule ? null : rule.getConfiguration(), sqlStatement.isIfNotExists()); }
@Test void assertCheckSQLStatementWithDuplicateLogicResource() { DataSourceMapperRuleAttribute ruleAttribute = mock(DataSourceMapperRuleAttribute.class); when(ruleAttribute.getDataSourceMapper()).thenReturn(Collections.singletonMap("duplicate_ds", Collections.singleton("ds_0"))); when(database.getRuleMetaData().getAttributes(DataSourceMapperRuleAttribute.class)).thenReturn(Collections.singleton(ruleAttribute)); ReadwriteSplittingRuleSegment ruleSegment = new ReadwriteSplittingRuleSegment("duplicate_ds", "write_ds_0", Arrays.asList("read_ds_0", "read_ds_1"), new AlgorithmSegment(null, new Properties())); executor.setDatabase(database); assertThrows(InvalidRuleConfigurationException.class, () -> executor.checkBeforeUpdate(createSQLStatement(false, ruleSegment))); }
@Override public StringResourceVersion exists(String key) throws Exception { checkNotNull(key, "Key in ConfigMap."); return kubeClient .getConfigMap(configMapName) .map( configMap -> { final String content = configMap.getData().get(key); if (content != null) { try { final StateHandleWithDeleteMarker<T> stateHandle = deserializeStateHandle(content); if (stateHandle.isMarkedForDeletion()) { return StringResourceVersion.notExisting(); } } catch (IOException e) { // Any calls to add or replace will try to remove this resource, // so we can simply treat it as non-existent. return StringResourceVersion.notExisting(); } return StringResourceVersion.valueOf( configMap.getResourceVersion()); } return StringResourceVersion.notExisting(); }) .orElseThrow(this::getConfigMapNotExistException); }
@Test void testExistsWithDeletingEntry() throws Exception { new Context() { { runTest( () -> { leaderCallbackGrantLeadership(); final TestingLongStateHandleHelper.LongStateHandle state = addDeletingEntry(getLeaderConfigMap(), key, 1337L); final KubernetesStateHandleStore< TestingLongStateHandleHelper.LongStateHandle> store = new KubernetesStateHandleStore<>( flinkKubeClient, LEADER_CONFIGMAP_NAME, longStateStorage, filter, LOCK_IDENTITY); final StringResourceVersion resourceVersion = store.exists(key); assertThat(resourceVersion.isExisting()).isFalse(); // We don't try to pro-actively remove the entry here. assertThat(state.isDiscarded()).isFalse(); }); } }; }
@Override public void write(final MySQLPacketPayload payload, final Object value) { if (value instanceof BigDecimal) { payload.writeInt8(((BigDecimal) value).longValue()); } else if (value instanceof Integer) { payload.writeInt8(((Integer) value).longValue()); } else if (value instanceof BigInteger) { payload.writeInt8(((BigInteger) value).longValue()); } else { payload.writeInt8((Long) value); } }
@Test void assertWriteWithBigDecimal() { new MySQLInt8BinaryProtocolValue().write(payload, new BigDecimal(1L)); verify(payload).writeInt8(1L); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void tableAnonymousStoreTypedMaterializedCountShouldPreserveTopologyStructure() { final StreamsBuilder builder = new StreamsBuilder(); builder.table("input-topic") .groupBy((key, value) -> null) .count(Materialized.as(Materialized.StoreType.IN_MEMORY)); final Topology topology = builder.build(); final TopologyDescription describe = topology.describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic])\n" + " --> KTABLE-SOURCE-0000000002\n" + " Processor: KTABLE-SOURCE-0000000002 (stores: [input-topic-STATE-STORE-0000000000])\n" + " --> KTABLE-SELECT-0000000003\n" + " <-- KSTREAM-SOURCE-0000000001\n" + " Processor: KTABLE-SELECT-0000000003 (stores: [])\n" + " --> KSTREAM-SINK-0000000005\n" + " <-- KTABLE-SOURCE-0000000002\n" + " Sink: KSTREAM-SINK-0000000005 (topic: KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition)\n" + " <-- KTABLE-SELECT-0000000003\n" + "\n" + " Sub-topology: 1\n" + " Source: KSTREAM-SOURCE-0000000006 (topics: [KTABLE-AGGREGATE-STATE-STORE-0000000004-repartition])\n" + " --> KTABLE-AGGREGATE-0000000007\n" + " Processor: KTABLE-AGGREGATE-0000000007 (stores: [KTABLE-AGGREGATE-STATE-STORE-0000000004])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000006\n" + "\n", describe.toString() ); topology.internalTopologyBuilder.setStreamsConfig(streamsConfig); final ProcessorTopology processorTopology = topology.internalTopologyBuilder.setApplicationId("test").buildTopology(); // one for ktable, and one for count operation assertThat(processorTopology.stateStores().size(), is(2)); // ktable store is rocksDB (default) assertThat(processorTopology.stateStores().get(0).persistent(), is(true)); // count store is in-memory assertThat(processorTopology.stateStores().get(1).persistent(), is(false)); }
@GetMapping("/task/resend-failed-messages") @Operation(summary = "Resend all failed messages") public void resend() { afnemersindicatieService.resendUnsentMessages(); }
@Test void testResend(){ when(afnemersbericht.getType()).thenReturn(Afnemersbericht.Type.Ap01); List<Afnemersbericht> afnemersberichten = Arrays.asList(afnemersbericht); when(afnemersberichtRepository.findByStatus(Afnemersbericht.Status.SEND_FAILED)).thenReturn(afnemersberichten); classUnderTest.performScheduleTask(new ScheduledTask(RESENT_TASK_NAME)); verify(dglSendServiceMock, times(1)).sendAfnemersBerichtAanDGL(any(), any()); verify(afnemersberichtRepository, times(1)).delete(afnemersbericht); }
public static Configuration getTimelineServiceHBaseConf(Configuration conf) throws IOException { if (conf == null) { throw new NullPointerException(); } Configuration hbaseConf; String timelineServiceHBaseConfFilePath = conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE); if (timelineServiceHBaseConfFilePath != null && timelineServiceHBaseConfFilePath.length() > 0) { LOG.info("Using hbase configuration at " + timelineServiceHBaseConfFilePath); // create a clone so that we don't mess with out input one hbaseConf = new Configuration(conf); Configuration plainHBaseConf = new Configuration(false); Path hbaseConfigPath = new Path(timelineServiceHBaseConfFilePath); try (FileSystem fs = FileSystem.newInstance(hbaseConfigPath.toUri(), conf); FSDataInputStream in = fs.open(hbaseConfigPath)) { plainHBaseConf.addResource(in); HBaseConfiguration.merge(hbaseConf, plainHBaseConf); } } else { // default to what is on the classpath hbaseConf = HBaseConfiguration.create(conf); } return hbaseConf; }
@Test void testWithHbaseConfAtHdfsFileSystem() throws IOException { MiniDFSCluster hdfsCluster = null; try { HdfsConfiguration hdfsConfig = new HdfsConfiguration(); hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig) .numDataNodes(1).build(); FileSystem fs = hdfsCluster.getFileSystem(); Path path = new Path("/tmp/hdfs-site.xml"); fs.copyFromLocalFile(new Path(hbaseConfigPath), path); // Verifying With Hbase Conf from HDFS FileSystem Configuration conf = new Configuration(hdfsConfig); conf.set(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE, path.toString()); Configuration hbaseConfFromHdfs = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf); assertEquals("test", hbaseConfFromHdfs.get("input"), "Failed to read hbase config from Hdfs FileSystem"); } finally { if (hdfsCluster != null) { hdfsCluster.shutdown(); } } }
@Override public ValidationTaskResult validateImpl(Map<String, String> optionsMap) { if (!ValidationUtils.isHdfsScheme(mPath)) { mMsg.append(String.format( "UFS path %s is not HDFS. Skipping validation for HDFS properties.%n", mPath)); return new ValidationTaskResult(ValidationUtils.State.SKIPPED, getName(), mMsg.toString(), mAdvice.toString()); } ValidationTaskResult loadConfig = loadHdfsConfig(); if (loadConfig.getState() != ValidationUtils.State.OK) { // If failed to load config files, abort return loadConfig; } // no conflicts between these two ValidationTaskResult last = checkConflicts(); if (last.getState() == ValidationUtils.State.OK) { last = checkNameservice(); } return last; }
@Test public void validConf() { String hdfsSite = Paths.get(sTestDir.toPath().toString(), "hdfs-site.xml").toString(); ValidationTestUtils.writeXML(hdfsSite, ImmutableMap.of("key1", "value1", "key3", "value3")); String coreSite = Paths.get(sTestDir.toPath().toString(), "core-site.xml").toString(); ValidationTestUtils.writeXML(coreSite, ImmutableMap.of("key1", "value1", "key4", "value4")); CONF.set(PropertyKey.UNDERFS_HDFS_CONFIGURATION, hdfsSite + HdfsConfValidationTask.SEPARATOR + coreSite); HdfsConfValidationTask task = new HdfsConfValidationTask("hdfs://namenode:9000/alluxio", CONF); ValidationTaskResult result = task.validateImpl(ImmutableMap.of()); assertEquals(ValidationUtils.State.OK, result.getState()); }
@ConstantFunction.List(list = { @ConstantFunction(name = "date_trunc", argTypes = {VARCHAR, DATETIME}, returnType = DATETIME, isMonotonic = true), @ConstantFunction(name = "date_trunc", argTypes = {VARCHAR, DATE}, returnType = DATE, isMonotonic = true) }) public static ConstantOperator dateTrunc(ConstantOperator fmt, ConstantOperator date) { if (date.getType().isDate()) { switch (fmt.getVarchar()) { case "day": return ConstantOperator.createDateOrNull(date.getDate().truncatedTo(ChronoUnit.DAYS)); case "month": return ConstantOperator.createDateOrNull( date.getDate().with(TemporalAdjusters.firstDayOfMonth()).truncatedTo(ChronoUnit.DAYS)); case "year": return ConstantOperator.createDateOrNull( date.getDate().with(TemporalAdjusters.firstDayOfYear()).truncatedTo(ChronoUnit.DAYS)); case "week": return ConstantOperator.createDateOrNull( date.getDate().with(DayOfWeek.MONDAY).truncatedTo(ChronoUnit.DAYS)); case "quarter": int year = date.getDate().getYear(); int month = date.getDate().getMonthValue(); int quarterMonth = (month - 1) / 3 * 3 + 1; LocalDateTime quarterDate = LocalDateTime.of(year, quarterMonth, 1, 0, 0); return ConstantOperator.createDateOrNull(quarterDate); default: throw new IllegalArgumentException(fmt + " not supported in date_trunc format string"); } } else { switch (fmt.getVarchar()) { case "second": return ConstantOperator.createDatetimeOrNull(date.getDatetime().truncatedTo(ChronoUnit.SECONDS)); case "minute": return ConstantOperator.createDatetimeOrNull(date.getDatetime().truncatedTo(ChronoUnit.MINUTES)); case "hour": return ConstantOperator.createDatetimeOrNull(date.getDatetime().truncatedTo(ChronoUnit.HOURS)); case "day": return ConstantOperator.createDatetimeOrNull(date.getDatetime().truncatedTo(ChronoUnit.DAYS)); case "month": return ConstantOperator.createDatetimeOrNull( date.getDatetime().with(TemporalAdjusters.firstDayOfMonth()).truncatedTo(ChronoUnit.DAYS)); case "year": return ConstantOperator.createDatetimeOrNull( date.getDatetime().with(TemporalAdjusters.firstDayOfYear()).truncatedTo(ChronoUnit.DAYS)); case "week": return ConstantOperator.createDatetimeOrNull( date.getDatetime().with(DayOfWeek.MONDAY).truncatedTo(ChronoUnit.DAYS)); case "quarter": int year = date.getDatetime().getYear(); int month = date.getDatetime().getMonthValue(); int quarterMonth = (month - 1) / 3 * 3 + 1; LocalDateTime quarterDate = LocalDateTime.of(year, quarterMonth, 1, 0, 0); return ConstantOperator.createDatetimeOrNull(quarterDate); default: throw new IllegalArgumentException(fmt + " not supported in date_trunc format string"); } } }
@Test public void dateTrunc() { String[][] testCases = { {"second", "2015-03-23 09:23:55", "2015-03-23T09:23:55"}, {"minute", "2015-03-23 09:23:55", "2015-03-23T09:23"}, {"hour", "2015-03-23 09:23:55", "2015-03-23T09:00"}, {"day", "2015-03-23 09:23:55", "2015-03-23T00:00"}, {"month", "2015-03-23 09:23:55", "2015-03-01T00:00"}, {"year", "2015-03-23 09:23:55", "2015-01-01T00:00"}, {"week", "2015-01-01 09:23:55", "2014-12-29T00:00"}, {"week", "2015-03-22 09:23:55", "2015-03-16T00:00"}, {"week", "2015-03-23 09:23:55", "2015-03-23T00:00"}, {"week", "2015-03-24 09:23:55", "2015-03-23T00:00"}, {"week", "2020-02-29 09:23:55", "2020-02-24T00:00"}, {"quarter", "2015-01-01 09:23:55", "2015-01-01T00:00"}, {"quarter", "2015-03-23 09:23:55", "2015-01-01T00:00"}, {"quarter", "2015-04-01 09:23:55", "2015-04-01T00:00"}, {"quarter", "2015-05-23 09:23:55", "2015-04-01T00:00"}, {"quarter", "2015-07-01 09:23:55", "2015-07-01T00:00"}, {"quarter", "2015-07-23 09:23:55", "2015-07-01T00:00"}, {"quarter", "2015-10-01 09:23:55", "2015-10-01T00:00"}, {"quarter", "2015-11-23 09:23:55", "2015-10-01T00:00"}, // The following cases are migrated from BE UT. {"day", "2020-01-01 09:23:55", "2020-01-01T00:00"}, {"day", "2020-02-02 09:23:55", "2020-02-02T00:00"}, {"day", "2020-03-06 09:23:55", "2020-03-06T00:00"}, {"day", "2020-04-08 09:23:55", "2020-04-08T00:00"}, {"day", "2020-05-09 09:23:55", "2020-05-09T00:00"}, {"day", "2020-11-03 09:23:55", "2020-11-03T00:00"}, {"month", "2020-01-01 09:23:55", "2020-01-01T00:00"}, {"month", "2020-02-02 09:23:55", "2020-02-01T00:00"}, {"month", "2020-03-06 09:23:55", "2020-03-01T00:00"}, {"month", "2020-04-08 09:23:55", "2020-04-01T00:00"}, {"month", "2020-05-09 09:23:55", "2020-05-01T00:00"}, {"month", "2020-11-03 09:23:55", "2020-11-01T00:00"}, {"year", "2020-01-01 09:23:55", "2020-01-01T00:00"}, {"year", "2020-02-02 09:23:55", "2020-01-01T00:00"}, {"year", "2020-03-06 09:23:55", "2020-01-01T00:00"}, {"year", "2020-04-08 09:23:55", "2020-01-01T00:00"}, {"year", "2020-05-09 09:23:55", "2020-01-01T00:00"}, {"year", "2020-11-03 09:23:55", "2020-01-01T00:00"}, {"week", "2020-01-01 09:23:55", "2019-12-30T00:00"}, {"week", "2020-02-02 09:23:55", "2020-01-27T00:00"}, {"week", "2020-03-06 09:23:55", "2020-03-02T00:00"}, {"week", "2020-04-08 09:23:55", "2020-04-06T00:00"}, {"week", "2020-05-09 09:23:55", "2020-05-04T00:00"}, {"week", "2020-11-03 09:23:55", "2020-11-02T00:00"}, {"quarter", "2020-01-01 09:23:55", "2020-01-01T00:00"}, {"quarter", "2020-02-02 09:23:55", "2020-01-01T00:00"}, {"quarter", "2020-03-06 09:23:55", "2020-01-01T00:00"}, {"quarter", "2020-04-08 09:23:55", "2020-04-01T00:00"}, {"quarter", "2020-05-09 09:23:55", "2020-04-01T00:00"}, {"quarter", "2020-11-03 09:23:55", "2020-10-01T00:00"}, }; DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); for (String[] tc : testCases) { ConstantOperator fmt = ConstantOperator.createVarchar(tc[0]); ConstantOperator date = ConstantOperator.createDatetime(LocalDateTime.parse(tc[1], formatter)); assertEquals(tc[2], ScalarOperatorFunctions.dateTrunc(fmt, date).getDatetime().toString()); } Assert.assertThrows("<ERROR> not supported in date_trunc format string", IllegalArgumentException.class, () -> ScalarOperatorFunctions.dateTrunc(ConstantOperator.createVarchar("<ERROR>"), O_DT_20150323_092355) .getVarchar()); }
public synchronized boolean saveNamespace(long timeWindow, long txGap, FSNamesystem source) throws IOException { if (timeWindow > 0 || txGap > 0) { final FSImageStorageInspector inspector = storage.readAndInspectDirs( EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK), StartupOption.REGULAR); FSImageFile image = inspector.getLatestImages().get(0); File imageFile = image.getFile(); final long checkpointTxId = image.getCheckpointTxId(); final long checkpointAge = Time.now() - imageFile.lastModified(); if (checkpointAge <= timeWindow * 1000 && checkpointTxId >= this.getCorrectLastAppliedOrWrittenTxId() - txGap) { return false; } } saveNamespace(source, NameNodeFile.IMAGE, null); return true; }
@Test public void testHasNonEcBlockUsingStripedIDForLoadSnapshot() throws IOException{ // start a cluster Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9) .build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); FSNamesystem fns = cluster.getNamesystem(); String testDir = "/test_block_manager"; String testFile = "testfile_loadSnapshot"; String testFilePath = testDir + "/" + testFile; String clientName = "testUser_loadSnapshot"; String clientMachine = "testMachine_loadSnapshot"; long blkId = -1; long blkNumBytes = 1024; long timestamp = 1426222918; Path d = new Path(testDir); fs.mkdir(d, new FsPermission("755")); fs.allowSnapshot(d); Path p = new Path(testFilePath); DFSTestUtil.createFile(fs, p, 0, (short) 1, 1); BlockInfoContiguous cBlk = new BlockInfoContiguous( new Block(blkId, blkNumBytes, timestamp), (short)3); INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath); file.toUnderConstruction(clientName, clientMachine); file.addBlock(cBlk); TestINodeFile.toCompleteFile(file); fs.createSnapshot(d,"testHasNonEcBlockUsingStripeID"); fs.truncate(p,0); fns.enterSafeMode(false); fns.saveNamespace(0, 0); cluster.restartNameNodes(); cluster.waitActive(); fns = cluster.getNamesystem(); assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID()); cluster.shutdown(); cluster = null; } finally { if (cluster != null) { cluster.shutdown(); } } }
public static org.apache.iceberg.Table loadIcebergTable(SparkSession spark, String name) throws ParseException, NoSuchTableException { CatalogAndIdentifier catalogAndIdentifier = catalogAndIdentifier(spark, name); TableCatalog catalog = asTableCatalog(catalogAndIdentifier.catalog); Table sparkTable = catalog.loadTable(catalogAndIdentifier.identifier); return toIcebergTable(sparkTable); }
@Test public void testLoadIcebergTable() throws Exception { spark.conf().set("spark.sql.catalog.hive", SparkCatalog.class.getName()); spark.conf().set("spark.sql.catalog.hive.type", "hive"); spark.conf().set("spark.sql.catalog.hive.default-namespace", "default"); String tableFullName = "hive.default.tbl"; sql("CREATE TABLE %s (c1 bigint, c2 string, c3 string) USING iceberg", tableFullName); Table table = Spark3Util.loadIcebergTable(spark, tableFullName); assertThat(table.name()).isEqualTo(tableFullName); }
@VisibleForTesting Collection<String> getVolumesLowOnSpace() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Going to check the following volumes disk space: " + volumes); } Collection<String> lowVolumes = new ArrayList<String>(); for (CheckedVolume volume : volumes.values()) { lowVolumes.add(volume.getVolume()); } return lowVolumes; }
@Test public void testCheckingExtraVolumes() throws IOException { Configuration conf = new Configuration(); File nameDir = new File(BASE_DIR, "name-dir"); nameDir.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath()); conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE); NameNodeResourceChecker nb = new NameNodeResourceChecker(conf); assertEquals("Should not check the same volume more than once.", 1, nb.getVolumesLowOnSpace().size()); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testGgNewPb() { ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Fight duration: <col=ff0000>1:36</col> (new personal best)", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Grotesque Guardians kill count is: <col=ff0000>179</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "grotesque guardians", 96.0); verify(configManager).setRSProfileConfiguration("killcount", "grotesque guardians", 179); // Precise times chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Fight duration: <col=ff0000>1:36.40</col> (new personal best)", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Grotesque Guardians kill count is: <col=ff0000>179</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "grotesque guardians", 96.4); }
String[] cleanFilters( String[] filters ) { return !ArrayUtils.isEmpty( filters ) ? Arrays.asList( filters ).stream().filter( f -> !StringUtils.isBlank( f ) ).toArray( String[]::new ) : null; }
@Test public void testCleanFilters() { String[] EMPTY_ARRAY = new String[]{}; assertNull( testInstance.cleanFilters( null ) ); assertNull( testInstance.cleanFilters( new String[]{} ) ); assertArrayEquals( EMPTY_ARRAY, testInstance.cleanFilters( new String[]{ null } ) ); assertArrayEquals( EMPTY_ARRAY, testInstance.cleanFilters( new String[]{ null, null, null } ) ); assertArrayEquals( EMPTY_ARRAY, testInstance.cleanFilters( new String[]{ " ", null, "" } ) ); assertArrayEquals( new String[]{ "TXT", "CSV" }, testInstance.cleanFilters( new String[]{ FilterType.TXT.toString(), null, FilterType.CSV.toString(), "" } ) ); assertArrayEquals( new String[]{ "TXT", "CSV" }, testInstance.cleanFilters( new String[]{ FilterType.TXT.toString(), FilterType.CSV.toString() } ) ); }
@Override public int size() { return contents.size(); }
@Test public void testGetSetByType2() throws HCatException { HCatRecord inpRec = getGetSet2InpRec(); HCatRecord newRec = new DefaultHCatRecord(inpRec.size()); HCatSchema hsch = HCatSchemaUtils.getHCatSchema("a:binary,b:map<string,string>,c:array<int>,d:struct<i:int>"); newRec.setByteArray("a", hsch, inpRec.getByteArray("a", hsch)); newRec.setMap("b", hsch, inpRec.getMap("b", hsch)); newRec.setList("c", hsch, inpRec.getList("c", hsch)); newRec.setStruct("d", hsch, inpRec.getStruct("d", hsch)); Assert.assertTrue(HCatDataCheckUtil.recordsEqual(newRec, inpRec)); }
@Override public void doUnregister(URL url) { try { checkDestroyed(); zkClient.delete(toUrlPath(url)); } catch (Throwable e) { throw new RpcException( "Failed to unregister " + url + " to zookeeper " + getUrl() + ", cause: " + e.getMessage(), e); } }
@Test void testDoUnregisterWithException() { Assertions.assertThrows(RpcException.class, () -> { URL errorUrl = URL.valueOf("multicast://0.0.0.0/"); zookeeperRegistry.doUnregister(errorUrl); }); }
public final void doesNotContainKey(@Nullable Object key) { check("keySet()").that(checkNotNull(actual).keySet()).doesNotContain(key); }
@Test public void doesNotContainKey() { ImmutableMultimap<String, String> multimap = ImmutableMultimap.of("kurt", "kluever"); assertThat(multimap).doesNotContainKey("daniel"); assertThat(multimap).doesNotContainKey(null); }
@Udf(description = "Returns the inverse (arc) tangent of an INT value") public Double atan( @UdfParameter( value = "value", description = "The value to get the inverse tangent of." ) final Integer value ) { return atan(value == null ? null : value.doubleValue()); }
@Test public void shouldHandlePositive() { assertThat(udf.atan(0.43), closeTo(0.40609805831761564, 0.000000000000001)); assertThat(udf.atan(0.5), closeTo(0.4636476090008061, 0.000000000000001)); assertThat(udf.atan(1.0), closeTo(0.7853981633974483, 0.000000000000001)); assertThat(udf.atan(1), closeTo(0.7853981633974483, 0.000000000000001)); assertThat(udf.atan(1L), closeTo(0.7853981633974483, 0.000000000000001)); }
@Override public void close() { close(Duration.ofMillis(0)); }
@Test public void shouldThrowOnFlushProducerIfProducerIsClosed() { buildMockProducer(true); producer.close(); assertThrows(IllegalStateException.class, producer::flush); }
@Override public int addListener(ObjectListener listener) { if (listener instanceof StreamAddListener) { return addListener("__keyevent@*:xadd", (StreamAddListener) listener, StreamAddListener::onAdd); } if (listener instanceof StreamRemoveListener) { return addListener("__keyevent@*:xdel", (StreamRemoveListener) listener, StreamRemoveListener::onRemove); } if (listener instanceof StreamCreateConsumerListener) { return addListener("__keyevent@*:xgroup-createconsumer", (StreamCreateConsumerListener) listener, StreamCreateConsumerListener::onCreateConsumer); } if (listener instanceof StreamRemoveConsumerListener) { return addListener("__keyevent@*:xgroup-delconsumer", (StreamRemoveConsumerListener) listener, StreamRemoveConsumerListener::onRemoveConsumer); } if (listener instanceof StreamCreateGroupListener) { return addListener("__keyevent@*:xgroup-create", (StreamCreateGroupListener) listener, StreamCreateGroupListener::onCreateGroup); } if (listener instanceof StreamRemoveGroupListener) { return addListener("__keyevent@*:xgroup-destroy", (StreamRemoveGroupListener) listener, StreamRemoveGroupListener::onRemoveGroup); } if (listener instanceof StreamTrimListener) { return addListener("__keyevent@*:xtrim", (StreamTrimListener) listener, StreamTrimListener::onTrim); } if (listener instanceof TrackingListener) { return addTrackingListener((TrackingListener) listener); } return super.addListener(listener); }
@Test public void testAddListener() { testWithParams(redisson -> { RStream<String, String> ss = redisson.getStream("test"); ss.createGroup(StreamCreateGroupArgs.name("test-group").makeStream()); CountDownLatch latch = new CountDownLatch(1); ss.addListener(new StreamAddListener() { @Override public void onAdd(String name) { latch.countDown(); } }); ss.add(StreamAddArgs.entry("test1", "test2")); try { assertThat(latch.await(1, TimeUnit.SECONDS)).isTrue(); } catch (InterruptedException e) { throw new RuntimeException(e); } }, NOTIFY_KEYSPACE_EVENTS, "Et"); }
@Override public Revision checkpoint(String noteId, String notePath, String commitMessage, AuthenticationInfo subject) throws IOException { Revision revision = super.checkpoint(noteId, notePath, commitMessage, subject); updateRemoteStream(); return revision; }
@Test /** * Test the case when the check-pointing (add new files and commit) it pushes the local commits to the remote * repository */ void pushLocalChangesToRemoteRepositoryOnCheckpointing() throws IOException, GitAPIException { // Add a new paragraph to the local repository addParagraphToNotebook(); // Commit and push the changes to remote repository NotebookRepoWithVersionControl.Revision secondCommitRevision = gitHubNotebookRepo.checkpoint( TEST_NOTE_ID, TEST_NOTE_PATH, "Second commit from local repository", null); // Check all the commits as seen from the remote repository. The commits are ordered chronologically. The last // commit is the first in the commit logs. Iterator<RevCommit> revisions = remoteGit.log().all().call().iterator(); assert(secondCommitRevision.id.equals(revisions.next().getName())); // The local commit after adding the paragraph // The first commit done on the remote repository assert(firstCommitRevision.getName().equals(revisions.next().getName())); }
@Override public Statement createStatement() throws SQLException { validateState(); return new PinotStatement(this); }
@Test public void unsetUserAgentTest() throws Exception { DummyPinotControllerTransport userAgentPinotControllerTransport = DummyPinotControllerTransport .create(null); PinotConnection pinotConnection = new PinotConnection("dummy", _dummyPinotClientTransport, "dummy", userAgentPinotControllerTransport); Statement statement = pinotConnection.createStatement(); Assert.assertNotNull(statement); }
public Properties getProperties() { return properties; }
@Test public void testApplicationProperties() { assertNull(Configuration.INSTANCE.getProperties().getProperty("hibernate.types.app.props.no")); assertEquals("true", Configuration.INSTANCE.getProperties().getProperty("hibernate.types.app.props")); }
public Filter parseSingleExpression(final String filterExpression, final List<EntityAttribute> attributes) { if (!filterExpression.contains(FIELD_AND_VALUE_SEPARATOR)) { throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG); } final String[] split = filterExpression.split(FIELD_AND_VALUE_SEPARATOR, 2); final String fieldPart = split[0]; if (fieldPart == null || fieldPart.isEmpty()) { throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG); } final String valuePart = split[1]; if (valuePart == null || valuePart.isEmpty()) { throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG); } final EntityAttribute attributeMetaData = getAttributeMetaData(attributes, fieldPart); final SearchQueryField.Type fieldType = attributeMetaData.type(); if (isRangeValueExpression(valuePart, fieldType)) { if (valuePart.startsWith(RANGE_VALUES_SEPARATOR)) { return new RangeFilter(attributeMetaData.id(), null, extractValue(fieldType, valuePart.substring(RANGE_VALUES_SEPARATOR.length())) ); } else if (valuePart.endsWith(RANGE_VALUES_SEPARATOR)) { return new RangeFilter(attributeMetaData.id(), extractValue(fieldType, valuePart.substring(0, valuePart.length() - RANGE_VALUES_SEPARATOR.length())), null ); } else { final String[] ranges = valuePart.split(RANGE_VALUES_SEPARATOR); return new RangeFilter(attributeMetaData.id(), extractValue(fieldType, ranges[0]), extractValue(fieldType, ranges[1]) ); } } else { return new SingleValueFilter(attributeMetaData.id(), extractValue(fieldType, valuePart)); } }
@Test void parsesFilterExpressionCorrectlyForDateType() { assertEquals(new SingleValueFilter("created_at", new DateTime(2012, 12, 12, 12, 12, 12, DateTimeZone.UTC).toDate()), toTest.parseSingleExpression("created_at:2012-12-12 12:12:12", List.of(EntityAttribute.builder() .id("created_at") .title("Creation Date") .type(SearchQueryField.Type.DATE) .filterable(true) .build()) )); }
public void isInOrder() { isInOrder(Ordering.natural()); }
@Test public void isInOrderMultipleFailures() { expectFailureWhenTestingThat(asList(1, 3, 2, 4, 0)).isInOrder(); }
public static Field getField(Class<?> clazz, String fieldName) { return internalGetField(clazz.getCanonicalName(), clazz, fieldName); }
@Test public void getFieldTest() { assertThat(getField(Person.class, "firstName")).isNotNull(); assertThat(getField(SubPerson.class, "firstName")).isNotNull(); assertThat(getField(SubPerson.class, "additionalField")).isNotNull(); assertThatThrownBy(() -> getField(Person.class, "notExistingField")) .isInstanceOf(ScenarioException.class) .hasMessageStartingWith("Impossible to find field with name "); }
public static Builder builder( String projectId, String location, CredentialsProvider credentialsProvider) { checkArgument(!Strings.isNullOrEmpty(projectId), "projectID can not be null or empty"); checkArgument(!Strings.isNullOrEmpty(location), "location can not be null or empty"); return new Builder(projectId, location, credentialsProvider); }
@Test public void testBuilderWithInvalidLocationShouldFail() { IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> DatastreamResourceManager.builder(PROJECT_ID, "", null)); assertThat(exception).hasMessageThat().contains("location can not be null or empty"); }
@Override public Collection<ScoredEntry<V>> entryRangeReversed(int startIndex, int endIndex) { return get(entryRangeReversedAsync(startIndex, endIndex)); }
@Test public void testEntryRangeReversed() { RScoredSortedSet<Integer> set = redisson.getScoredSortedSet("simple"); set.add(10, 1); set.add(20, 2); set.add(30, 3); set.add(40, 4); set.add(50, 5); Collection<ScoredEntry<Integer>> vals = set.entryRangeReversed(0, -1); assertThat(vals).containsExactly( new ScoredEntry<>(50D, 5), new ScoredEntry<>(40D, 4), new ScoredEntry<>(30D, 3), new ScoredEntry<>(20D, 2), new ScoredEntry<>(10D, 1) ); }
@Override public Collection<RedisServer> masters() { List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS); return toRedisServersList(masters); }
@Test public void testMasters() { Collection<RedisServer> masters = connection.masters(); assertThat(masters).hasSize(1); }
@Override public synchronized boolean tryReturnRecordAt( boolean isAtSplitPoint, @Nullable ShufflePosition groupStart) { if (lastGroupStart == null && !isAtSplitPoint) { throw new IllegalStateException( String.format("The first group [at %s] must be at a split point", groupStart.toString())); } if (this.startPosition != null && groupStart.compareTo(this.startPosition) < 0) { throw new IllegalStateException( String.format( "Trying to return record at %s which is before the starting position at %s", groupStart, this.startPosition)); } int comparedToLast = (lastGroupStart == null) ? 1 : groupStart.compareTo(this.lastGroupStart); if (comparedToLast < 0) { throw new IllegalStateException( String.format( "Trying to return group at %s which is before the last-returned group at %s", groupStart, this.lastGroupStart)); } if (isAtSplitPoint) { splitPointsSeen++; if (comparedToLast == 0) { throw new IllegalStateException( String.format( "Trying to return a group at a split point with same position as the " + "previous group: both at %s, last group was %s", groupStart, lastGroupWasAtSplitPoint ? "at a split point." : "not at a split point.")); } if (stopPosition != null && groupStart.compareTo(stopPosition) >= 0) { return false; } } else { checkState( comparedToLast == 0, // This case is not a violation of general RangeTracker semantics, but it is // contrary to how GroupingShuffleReader in particular works. Hitting it would // mean it's behaving unexpectedly. "Trying to return a group not at a split point, but with a different position " + "than the previous group: last group was %s at %s, current at %s", lastGroupWasAtSplitPoint ? "a split point" : "a non-split point", lastGroupStart, groupStart); } this.lastGroupStart = groupStart; this.lastGroupWasAtSplitPoint = isAtSplitPoint; return true; }
@Test public void testTryReturnNonMonotonic() throws Exception { GroupingShuffleRangeTracker tracker = new GroupingShuffleRangeTracker(ofBytes(3, 0, 0), ofBytes(5, 0, 0)); tracker.tryReturnRecordAt(true, ofBytes(3, 4, 5)); tracker.tryReturnRecordAt(true, ofBytes(3, 4, 6)); expected.expect(IllegalStateException.class); tracker.tryReturnRecordAt(true, ofBytes(3, 2, 1)); }
@VisibleForTesting public void validateDictTypeExists(String type) { DictTypeDO dictType = dictTypeService.getDictType(type); if (dictType == null) { throw exception(DICT_TYPE_NOT_EXISTS); } if (!CommonStatusEnum.ENABLE.getStatus().equals(dictType.getStatus())) { throw exception(DICT_TYPE_NOT_ENABLE); } }
@Test public void testValidateDictTypeExists_notExists() { assertServiceException(() -> dictDataService.validateDictTypeExists(randomString()), DICT_TYPE_NOT_EXISTS); }
void onAddRcvDestination(final long registrationId, final String destinationChannel, final long correlationId) { if (destinationChannel.startsWith(IPC_CHANNEL)) { onAddRcvIpcDestination(registrationId, destinationChannel, correlationId); } else if (destinationChannel.startsWith(SPY_QUALIFIER)) { onAddRcvSpyDestination(registrationId, destinationChannel, correlationId); } else { onAddRcvNetworkDestination(registrationId, destinationChannel, correlationId); } }
@Test void shouldThrowExceptionWhenRcvDestinationHasControlModeResponseSet() { final Exception exception = assertThrowsExactly(InvalidChannelException.class, () -> driverConductor.onAddRcvDestination(5, "aeron:udp?control-mode=response", 7) ); assertEquals( "ERROR - destinations may not specify " + MDC_CONTROL_MODE_PARAM_NAME + "=" + CONTROL_MODE_RESPONSE, exception.getMessage()); }
public static Optional<Throwable> findThrowableInChain(Predicate<Throwable> condition, @Nullable Throwable t) { final Set<Throwable> seen = new HashSet<>(); while (t != null && !seen.contains(t)) { if (condition.test(t)) { return Optional.of(t); } seen.add(t); t = t.getCause(); } return Optional.empty(); }
@Test void findsSimpleException() { final RuntimeException e = new RuntimeException(); assertThat(findThrowableInChain(t -> t instanceof RuntimeException, e)).contains(e); assertThat(findThrowableInChain(t -> false, e)).isEmpty(); }
@Override public int division(int n1, int n2) throws Exception { int n5 = n1 / n2; return n5; }
@Test(expected = Exception.class) public void testDivisionByZero() throws Exception { Controlador controlador = new Controlador(); controlador.division(5, 0); }
public static <T> AsIterable<T> asIterable() { return new AsIterable<>(); }
@Test @Category(ValidatesRunner.class) public void testSideInputWithNestedIterables() { final PCollectionView<Iterable<Integer>> view1 = pipeline .apply("CreateVoid1", Create.of((Void) null).withCoder(VoidCoder.of())) .apply( "OutputOneInteger", ParDo.of( new DoFn<Void, Integer>() { @ProcessElement public void processElement(ProcessContext c) { c.output(17); } })) .apply("View1", View.asIterable()); final PCollectionView<Iterable<Iterable<Integer>>> view2 = pipeline .apply("CreateVoid2", Create.of((Void) null).withCoder(VoidCoder.of())) .apply( "OutputSideInput", ParDo.of( new DoFn<Void, Iterable<Integer>>() { @ProcessElement public void processElement(ProcessContext c) { c.output(c.sideInput(view1)); } }) .withSideInputs(view1)) .apply("View2", View.asIterable()); PCollection<Integer> output = pipeline .apply("CreateVoid3", Create.of((Void) null).withCoder(VoidCoder.of())) .apply( "ReadIterableSideInput", ParDo.of( new DoFn<Void, Integer>() { @ProcessElement public void processElement(ProcessContext c) { for (Iterable<Integer> input : c.sideInput(view2)) { for (Integer i : input) { c.output(i); } } } }) .withSideInputs(view2)); PAssert.that(output).containsInAnyOrder(17); pipeline.run(); }
@Override public List<ImportValidationFeedback> verifyRule( Object subject ) { List<ImportValidationFeedback> feedback = new ArrayList<>(); if ( !isEnabled() || !( subject instanceof TransMeta ) ) { return feedback; } TransMeta transMeta = (TransMeta) subject; String description = transMeta.getDescription(); if ( null != description && minLength <= description.length() ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "A description is present" ) ); } else { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.ERROR, "A description is not present or is too short." ) ); } return feedback; }
@Test public void testVerifyRule_EmptyDescription_EnabledRule() { TransformationHasDescriptionImportRule importRule = getImportRule( 10, true ); TransMeta transMeta = new TransMeta(); transMeta.setDescription( "" ); List<ImportValidationFeedback> feedbackList = importRule.verifyRule( transMeta ); assertNotNull( feedbackList ); assertFalse( feedbackList.isEmpty() ); ImportValidationFeedback feedback = feedbackList.get( 0 ); assertNotNull( feedback ); assertEquals( ImportValidationResultType.ERROR, feedback.getResultType() ); assertTrue( feedback.isError() ); }
public static boolean isEligibleForCarbonsDelivery(final Message stanza) { // To properly handle messages exchanged with a MUC (or similar service), the server must be able to identify MUC-related messages. // This can be accomplished by tracking the clients' presence in MUCs, or by checking for the <x xmlns="http://jabber.org/protocol/muc#user"> // element in messages. The following rules apply to MUC-related messages: if (stanza.getChildElement("x", "http://jabber.org/protocol/muc#user") != null) { // A <message/> containing a Direct MUC Invitations (XEP-0249) SHOULD be carbon-copied. if (containsChildElement(stanza, Set.of("x"), "jabber:x:conference")) { return true; } // A <message/> containing a Mediated Invitation SHOULD be carbon-copied. if (stanza.getChildElement("x", "http://jabber.org/protocol/muc#user") != null && stanza.getChildElement("x", "http://jabber.org/protocol/muc#user").element("invite") != null) { return true; } // A private <message/> from a local user to a MUC participant (sent to a full JID) SHOULD be carbon-copied // The server SHOULD limit carbon-copying to the clients sharing a Multi-Session Nick in that MUC, and MAY // inject the <x/> element into such carbon copies. Clients can not respond to carbon-copies of MUC-PMs // related to a MUC they are not joined to. Therefore, they SHOULD either ignore such carbon copies, or // provide a way for the user to join the MUC before answering. if (stanza.getTo() != null && stanza.getTo().getResource() != null && stanza.getFrom() != null && stanza.getFrom().getNode() != null && XMPPServer.getInstance().isLocal(stanza.getFrom())) { return true; // TODO The server SHOULD limit carbon-copying to the clients sharing a Multi-Session Nick in that MUC (OF-2780). } // A private <message/> from a MUC participant (received from a full JID) to a local user SHOULD NOT be // carbon-copied (these messages are already replicated by the MUC service to all joined client instances). if (stanza.getFrom() != null && stanza.getFrom().getResource() != null && stanza.getTo() != null && stanza.getTo().getNode() != null && XMPPServer.getInstance().isLocal(stanza.getTo())) { return false; } } // A <message/> of type "groupchat" SHOULD NOT be carbon-copied. if (stanza.getType() == Message.Type.groupchat) { return false; } // A <message/> is eligible for carbons delivery if it does not contain a <private/> child element... if (containsChildElement(stanza, Set.of("private", "received"), "urn:xmpp:carbons")) { return false; } // and if at least one of the following is true: // ... it is of type "chat". if (stanza.getType() == Message.Type.chat) { return true; } // ... it is of type "normal" and contains a <body> element. if ((stanza.getType() == null || stanza.getType() == Message.Type.normal) && stanza.getBody() != null) { return true; } // ... it contains payload elements typically used in IM if (containsChildElement(stanza, Set.of("request", "received"), "urn:xmpp:receipts") // Message Delivery Receipts (XEP-0184) || containsChildElement(stanza, Set.of("active", "inactive", "gone", "composing", "paused"), "http://jabber.org/protocol/chatstates") // Chat State Notifications (XEP-0085) || (containsChildElement(stanza, Set.of("markable", "received", "displayed", "acknowledged"), "urn:xmpp:chat-markers")) // Chat Markers (XEP-0333)). ) { return true; } // ... it is of type "error" and it was sent in response to a <message/> that was eligible for carbons delivery. // TODO implement me (OF-2779) return false; }
@Test public void testMucGroupChat() throws Exception { // Setup test fixture. final Message input = new Message(); input.setType(Message.Type.groupchat); // Execute system under test. final boolean result = Forwarded.isEligibleForCarbonsDelivery(input); // Verify results. assertFalse(result); }
@Override public List<SyntheticBoundedSource> split(long desiredBundleSizeBytes, PipelineOptions options) throws Exception { // Choose number of bundles either based on explicit parameter, // or based on size and hints. int desiredNumBundles = (sourceOptions.forceNumInitialBundles == null) ? ((int) Math.ceil(1.0 * getEstimatedSizeBytes(options) / desiredBundleSizeBytes)) : sourceOptions.forceNumInitialBundles; List<SyntheticBoundedSource> res = bundleSplitter.getBundleSizes(desiredNumBundles, this.getStartOffset(), this.getEndOffset()) .stream() .map(offsetRange -> createSourceForSubrange(offsetRange.getFrom(), offsetRange.getTo())) .collect(Collectors.toList()); LOG.info("Split into {} bundles of sizes: {}", res.size(), res); return res; }
@Test public void testSplitIntoBundles() throws Exception { testSplitIntoBundlesP(1); testSplitIntoBundlesP(-1); testSplitIntoBundlesP(5); PipelineOptions options = PipelineOptionsFactory.create(); testSourceOptions.forceNumInitialBundles = 37; assertEquals(37, new SyntheticBoundedSource(testSourceOptions).split(42, options).size()); }
@GetMapping("/{id}") @RequiresPermissions("system:dict:edit") public ShenyuAdminResult detail(@PathVariable("id") @Valid @Existed(provider = ShenyuDictMapper.class, message = "dict is not existed") final String id) { return ShenyuAdminResult.success(ShenyuResultMessage.DETAIL_SUCCESS, shenyuDictService.findById(id)); }
@Test public void testDetail() throws Exception { given(this.shenyuDictService.findById("123")).willReturn(shenyuDictVO); this.mockMvc.perform(MockMvcRequestBuilders.get("/shenyu-dict/{id}", "123")) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DETAIL_SUCCESS))) .andExpect(jsonPath("$.data.id", is(shenyuDictVO.getId()))) .andReturn(); }
private Object convertCharset(Object content, Charset contentCharset, Charset destinationCharset, boolean outputAsString) { byte[] bytes = StandardConversions.convertCharset(content, contentCharset, destinationCharset); return outputAsString ? new String(bytes, destinationCharset) : bytes; }
@Test public void testCharset() { Charset korean = Charset.forName("EUC-KR"); MediaType textPlainKorean = TEXT_PLAIN.withCharset(korean); MediaType jsonKorean = APPLICATION_JSON.withCharset(korean); MediaType textPlainAsString = TEXT_PLAIN.withClassType(String.class); MediaType jsonAsString = APPLICATION_JSON.withClassType(String.class); String content = "{\"city_kr\":\"서울\"}"; byte[] contentUTF = content.getBytes(UTF_8); byte[] contentKorean = convertCharset(contentUTF, UTF_8, korean); byte[] result = (byte[]) transcoder.transcode(contentKorean, jsonKorean, TEXT_PLAIN); assertArrayEquals(result, contentUTF); String strResult = (String) transcoder.transcode(contentKorean, jsonKorean, textPlainAsString); assertEquals(strResult, content); result = (byte[]) transcoder.transcode(contentKorean, jsonKorean, textPlainKorean); assertArrayEquals(result, contentKorean); result = (byte[]) transcoder.transcode(contentKorean, textPlainKorean, APPLICATION_JSON); assertArrayEquals(result, contentUTF); strResult = (String) transcoder.transcode(contentKorean, jsonKorean, jsonAsString); assertEquals(strResult, content); result = (byte[]) transcoder.transcode(contentKorean, textPlainKorean, jsonKorean); assertArrayEquals(result, contentKorean); result = (byte[]) transcoder.transcode(contentUTF, TEXT_PLAIN, jsonKorean); assertArrayEquals(result, contentKorean); result = (byte[]) transcoder.transcode(content, textPlainAsString, jsonKorean); assertArrayEquals(result, contentKorean); result = (byte[]) transcoder.transcode(contentUTF, APPLICATION_JSON, TEXT_PLAIN); assertArrayEquals(result, contentUTF); result = (byte[]) transcoder.transcode(contentUTF, APPLICATION_JSON, textPlainKorean); assertArrayEquals(result, contentKorean); }
public DMNContext populateContextWith(Map<String, Object> json) { for (Entry<String, Object> kv : json.entrySet()) { InputDataNode idn = model.getInputByName(kv.getKey()); if (idn != null) { processInputDataNode(kv, idn); } else { DecisionNode dn = model.getDecisionByName(kv.getKey()); if (dn != null) { processDecisionNode(kv, dn); } else { LOG.debug("The key {} was not a InputData nor a Decision to override, setting it as-is.", kv.getKey()); context.set(kv.getKey(), kv.getValue()); } } } return context; }
@Test void trafficViolationAll() throws Exception { final DMNRuntime runtime = createRuntimeWithAdditionalResources("Traffic Violation.dmn", DMNRuntimeTypesTest.class); final DMNModel dmnModel = runtime.getModel("https://github.com/kiegroup/drools/kie-dmn/_A4BCA8B8-CF08-433F-93B2-A2598F19ECFF", "Traffic Violation"); assertThat(dmnModel).isNotNull(); assertThat(dmnModel.hasErrors()).as(DMNRuntimeUtil.formatMessages(dmnModel.getMessages())).isFalse(); DMNContext context = runtime.newContext(); final String JSON = "{\n" + " \"Driver\": {\n" + " \"Name\": \"John Doe\",\n" + " \"Age\": 47,\n" + " \"State\": \"Italy\",\n" + " \"City\": \"Milan\",\n" + " \"Points\": 15,\n" + " \"additional\": \"NO5\"\n" + // intentional additional attribute " },\n" + " \"Violation\": {\n" + " \"Code\": \"s\",\n" + " \"Date\": \"2020-10-12\",\n" + " \"Type\": \"speed\",\n" + " \"Actual Speed\": 135,\n" + " \"Speed Limit\": 100\n" + " }\n" + "}"; new DynamicDMNContextBuilder(context, dmnModel).populateContextWith(readJSON(JSON)); assertTrafficViolationSuspendedCase(runtime, dmnModel, context); }
Future<RecordMetadata> send(final ProducerRecord<byte[], byte[]> record, final Callback callback) { maybeBeginTransaction(); try { return producer.send(record, callback); } catch (final KafkaException uncaughtException) { if (isRecoverable(uncaughtException)) { // producer.send() call may throw a KafkaException which wraps a FencedException, // in this case we should throw its wrapped inner cause so that it can be // captured and re-wrapped as TaskMigratedException throw new TaskMigratedException( formatException("Producer got fenced trying to send a record"), uncaughtException.getCause() ); } else { throw new StreamsException( formatException(String.format("Error encountered trying to send record to topic %s", record.topic())), uncaughtException ); } } }
@Test public void shouldFailOnMaybeBeginTransactionIfTransactionsNotInitializedForExactlyOnceBeta() { final StreamsProducer streamsProducer = new StreamsProducer( eosBetaConfig, "threadId-StreamThread-0", eosBetaMockClientSupplier, null, UUID.randomUUID(), logContext, mockTime ); final IllegalStateException thrown = assertThrows( IllegalStateException.class, () -> streamsProducer.send(record, null) ); assertThat(thrown.getMessage(), is("MockProducer hasn't been initialized for transactions.")); }
public static boolean typeIsTransitivelyDependent(HollowStateEngine stateEngine, String dependentType, String dependencyType) { if(dependentType.equals(dependencyType)) return true; HollowSchema dependentTypeSchema = stateEngine.getSchema(dependentType); if(dependentTypeSchema == null) return false; switch(dependentTypeSchema.getSchemaType()) { case OBJECT: HollowObjectSchema objectSchema = (HollowObjectSchema)dependentTypeSchema; for(int i=0;i<objectSchema.numFields();i++) { if(objectSchema.getFieldType(i) == FieldType.REFERENCE) { if(typeIsTransitivelyDependent(stateEngine, objectSchema.getReferencedType(i), dependencyType)) return true; } } break; case LIST: case SET: return typeIsTransitivelyDependent(stateEngine, ((HollowCollectionSchema)dependentTypeSchema).getElementType(), dependencyType); case MAP: return typeIsTransitivelyDependent(stateEngine, ((HollowMapSchema)dependentTypeSchema).getKeyType(), dependencyType) || typeIsTransitivelyDependent(stateEngine, ((HollowMapSchema)dependentTypeSchema).getValueType(), dependencyType); } return false; }
@Test public void determinesIfSchemasAreTransitivelyDependent() throws IOException { String schemasText = "TypeA { TypeB b; }" + "TypeB { TypeC c; }" + "TypeC { TypeD d; }"; List<HollowSchema> schemas = HollowSchemaParser.parseCollectionOfSchemas(schemasText); HollowWriteStateEngine stateEngine = new HollowWriteStateEngine(); HollowWriteStateCreator.populateStateEngineWithTypeWriteStates(stateEngine, schemas); Assert.assertTrue(HollowSchemaSorter.typeIsTransitivelyDependent(stateEngine, "TypeA", "TypeB")); Assert.assertTrue(HollowSchemaSorter.typeIsTransitivelyDependent(stateEngine, "TypeA", "TypeC")); Assert.assertTrue(HollowSchemaSorter.typeIsTransitivelyDependent(stateEngine, "TypeB", "TypeC")); Assert.assertFalse(HollowSchemaSorter.typeIsTransitivelyDependent(stateEngine, "TypeC", "TypeB")); Assert.assertFalse(HollowSchemaSorter.typeIsTransitivelyDependent(stateEngine, "TypeB", "TypeA")); Assert.assertFalse(HollowSchemaSorter.typeIsTransitivelyDependent(stateEngine, "TypeC", "TypeA")); }