focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void profileIncrement(Map<String, ? extends Number> properties) { }
@Test public void testProfileIncrement() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); Map<String, Integer> properties = new HashMap<>(); properties.put("n1", 111); properties.put("n2", 1121); mSensorsAPI.profileIncrement(properties); }
@Override public DictDataDO getDictData(Long id) { return dictDataMapper.selectById(id); }
@Test public void testGetDictData_dictType() { // mock 数据 DictDataDO dictDataDO = randomDictDataDO().setDictType("yunai").setValue("1"); dictDataMapper.insert(dictDataDO); DictDataDO dictDataDO02 = randomDictDataDO().setDictType("yunai").setValue("2"); dictDataMapper.insert(dictDataDO02); // 准备参数 String dictType = "yunai"; String value = "1"; // 调用 DictDataDO dbDictData = dictDataService.getDictData(dictType, value); // 断言 assertEquals(dictDataDO, dbDictData); }
@Nullable public static TNetworkAddress getComputeNodeHost(ImmutableMap<Long, ComputeNode> computeNodes, Reference<Long> computeNodeIdRef) { ComputeNode node = getComputeNode(computeNodes); if (node != null) { computeNodeIdRef.setRef(node.getId()); return new TNetworkAddress(node.getHost(), node.getBePort()); } return null; }
@Test public void testEmptyComputeNodeList() { Reference<Long> idRef = new Reference<>(); TNetworkAddress address = SimpleScheduler.getComputeNodeHost(null, idRef); Assert.assertNull(address); ImmutableMap.Builder<Long, ComputeNode> builder = ImmutableMap.builder(); address = SimpleScheduler.getComputeNodeHost(builder.build(), idRef); Assert.assertNull(address); }
@Deprecated @Restricted(DoNotUse.class) public static String resolve(ConfigurationContext context, String toInterpolate) { return context.getSecretSourceResolver().resolve(toInterpolate); }
@Test public void resolve_mixedMultipleEntriesWithDefault() { environment.set("FOO", "www.foo.io"); environment.set("protocol", "http"); assertThat(resolve("${protocol:-https}://${FOO:-www.bar.io}"), equalTo("http://www.foo.io")); }
@Override public ObjectNode encode(Criterion criterion, CodecContext context) { EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context); return encoder.encode(); }
@Test public void matchIPv6FlowLabelTest() { Criterion criterion = Criteria.matchIPv6FlowLabel(0xffffe); ObjectNode result = criterionCodec.encode(criterion, context); assertThat(result, matchesCriterion(criterion)); }
@Override public void start() { this.all = registry.meter(name(getName(), "all")); this.trace = registry.meter(name(getName(), "trace")); this.debug = registry.meter(name(getName(), "debug")); this.info = registry.meter(name(getName(), "info")); this.warn = registry.meter(name(getName(), "warn")); this.error = registry.meter(name(getName(), "error")); super.start(); }
@Test public void usesDefaultRegistry() { SharedMetricRegistries.add(InstrumentedAppender.DEFAULT_REGISTRY, registry); final InstrumentedAppender shared = new InstrumentedAppender(); shared.start(); when(event.getLevel()).thenReturn(Level.INFO); shared.doAppend(event); assertThat(SharedMetricRegistries.names()).contains(InstrumentedAppender.DEFAULT_REGISTRY); assertThat(registry.meter(METRIC_NAME_PREFIX + ".info").getCount()) .isEqualTo(1); }
@Override public Iterable<RedisClusterNode> clusterGetNodes() { return read(null, StringCodec.INSTANCE, CLUSTER_NODES); }
@Test public void testClusterGetNodes() { Iterable<RedisClusterNode> nodes = connection.clusterGetNodes(); assertThat(nodes).hasSize(6); for (RedisClusterNode redisClusterNode : nodes) { assertThat(redisClusterNode.getLinkState()).isNotNull(); assertThat(redisClusterNode.getFlags()).isNotEmpty(); assertThat(redisClusterNode.getHost()).isNotNull(); assertThat(redisClusterNode.getPort()).isNotNull(); assertThat(redisClusterNode.getId()).isNotNull(); assertThat(redisClusterNode.getType()).isNotNull(); if (redisClusterNode.getType() == NodeType.MASTER) { assertThat(redisClusterNode.getSlotRange().getSlots()).isNotEmpty(); } else { assertThat(redisClusterNode.getMasterId()).isNotNull(); } } }
public static <S> ServiceLoader<S> loadAll(final Class<S> clazz) { return ServiceLoader.load(clazz); }
@Test public void testLoadAll() { assertNotNull(SpiLoadFactory.loadAll(SpiInterface.class)); }
public static Schema schemaFromJavaBeanClass( TypeDescriptor<?> typeDescriptor, FieldValueTypeSupplier fieldValueTypeSupplier) { return StaticSchemaInference.schemaFromClass(typeDescriptor, fieldValueTypeSupplier); }
@Test public void testNullable() { Schema schema = JavaBeanUtils.schemaFromJavaBeanClass( new TypeDescriptor<NullableBean>() {}, GetterTypeSupplier.INSTANCE); assertTrue(schema.getField("str").getType().getNullable()); assertFalse(schema.getField("anInt").getType().getNullable()); }
@VisibleForTesting static void processIterativeServerResponse(StreamingReducer reducer, ExecutorService executorService, Map<ServerRoutingInstance, Iterator<Server.ServerResponse>> serverResponseMap, long reduceTimeOutMs, ExecutionStatsAggregator aggregator) throws Exception { int cnt = 0; CompletableFuture<Void>[] futures = new CompletableFuture[serverResponseMap.size()]; // based on ideas from on https://stackoverflow.com/questions/19348248/waiting-on-a-list-of-future // and https://stackoverflow.com/questions/23301598/transform-java-future-into-a-completablefuture // Future created via ExecutorService.submit() can be created by CompletableFuture.supplyAsync() for (Map.Entry<ServerRoutingInstance, Iterator<Server.ServerResponse>> entry : serverResponseMap.entrySet()) { futures[cnt++] = CompletableFuture.runAsync(() -> { Iterator<Server.ServerResponse> streamingResponses = entry.getValue(); try { while (streamingResponses.hasNext()) { Server.ServerResponse streamingResponse = streamingResponses.next(); DataTable dataTable = DataTableFactory.getDataTable(streamingResponse.getPayload().asReadOnlyByteBuffer()); // null dataSchema is a metadata-only block. if (dataTable.getDataSchema() != null) { reducer.reduce(entry.getKey(), dataTable); } else { aggregator.aggregate(entry.getKey(), dataTable); } } } catch (Exception e) { LOGGER.error("Unable to process streaming response. Failure occurred!", e); throw new RuntimeException("Unable to process streaming response. Failure occurred!", e); } }, executorService); } CompletableFuture<Void> syncWaitPoint = CompletableFuture.allOf(futures); try { syncWaitPoint.get(reduceTimeOutMs, TimeUnit.MILLISECONDS); } catch (Exception ex) { syncWaitPoint.cancel(true); throw ex; } }
@Test public void testThreadExceptionTransfer() { // simulate a thread exception in gRPC call and verify that the thread can transfer the exception Iterator<Server.ServerResponse> mockedResponse = (Iterator<Server.ServerResponse>) mock(Iterator.class); when(mockedResponse.hasNext()).thenReturn(true); String exceptionMessage = "Some exception"; RuntimeException innerException = new RuntimeException(exceptionMessage); when(mockedResponse.next()).thenThrow(innerException); ExecutorService threadPoolService = Executors.newFixedThreadPool(1); ServerRoutingInstance routingInstance = new ServerRoutingInstance("localhost", 9527, TableType.OFFLINE); // supposedly we can use TestNG's annotation like @Test(expectedExceptions = { IOException.class }) to verify // here we hope to verify deeper to make sure the thrown exception is nested inside the exception assertTrue(verifyException(() -> { StreamingReduceService.processIterativeServerResponse(mock(StreamingReducer.class), threadPoolService, ImmutableMap.of(routingInstance, mockedResponse), 1000, mock(ExecutionStatsAggregator.class)); return null; }, cause -> cause.getMessage().contains(exceptionMessage)) ); }
@Override public void send(Object message) throws RemotingException { send(message, false); }
@Test void sendTest02() throws RemotingException { boolean sent = true; int message = 1; header.send(message, sent); List<Object> objects = channel.getSentObjects(); Assertions.assertEquals(objects.get(0).getClass(), Request.class); Request request = (Request) objects.get(0); Assertions.assertEquals(request.getVersion(), "2.0.2"); }
@Override protected List<MatchResult> match(List<String> specs) { ImmutableList.Builder<MatchResult> resultsBuilder = ImmutableList.builder(); for (String spec : specs) { try { final Set<Metadata> metadata = new HashSet<>(); if (spec.contains("**")) { // recursive glob int index = spec.indexOf("**"); metadata.addAll( matchRecursiveGlob(spec.substring(0, index + 1), spec.substring(index + 1))); } else { // normal glob final Path path = new Path(spec); final FileStatus[] fileStatuses = path.getFileSystem(configuration).globStatus(path); if (fileStatuses != null) { for (FileStatus fileStatus : fileStatuses) { metadata.add(toMetadata(fileStatus)); } } } if (metadata.isEmpty()) { resultsBuilder.add(MatchResult.create(Status.NOT_FOUND, Collections.emptyList())); } else { resultsBuilder.add(MatchResult.create(Status.OK, new ArrayList<>(metadata))); } } catch (IOException e) { resultsBuilder.add(MatchResult.create(Status.ERROR, e)); } } return resultsBuilder.build(); }
@Test public void testMatch() throws Exception { create("testFileAA", "testDataAA".getBytes(StandardCharsets.UTF_8)); create("testFileA", "testDataA".getBytes(StandardCharsets.UTF_8)); create("testFileB", "testDataB".getBytes(StandardCharsets.UTF_8)); // ensure files exist assertArrayEquals("testDataAA".getBytes(StandardCharsets.UTF_8), read("testFileAA", 0)); assertArrayEquals("testDataA".getBytes(StandardCharsets.UTF_8), read("testFileA", 0)); assertArrayEquals("testDataB".getBytes(StandardCharsets.UTF_8), read("testFileB", 0)); List<MatchResult> results = fileSystem.match(ImmutableList.of(testPath("testFileA*").toString())); assertEquals(Status.OK, Iterables.getOnlyElement(results).status()); assertThat( Iterables.getOnlyElement(results).metadata(), containsInAnyOrder( Metadata.builder() .setResourceId(testPath("testFileAA")) .setIsReadSeekEfficient(true) .setSizeBytes("testDataAA".getBytes(StandardCharsets.UTF_8).length) .setLastModifiedMillis(lastModified("testFileAA")) .build(), Metadata.builder() .setResourceId(testPath("testFileA")) .setIsReadSeekEfficient(true) .setSizeBytes("testDataA".getBytes(StandardCharsets.UTF_8).length) .setLastModifiedMillis(lastModified("testFileA")) .build())); }
@Override public ProcResult fetchResult() throws AnalysisException { Preconditions.checkNotNull(backend); BaseProcResult result = new BaseProcResult(); result.setNames(TITLE_NAMES); for (Map.Entry<String, DiskInfo> entry : backend.getDisks().entrySet()) { DiskInfo diskInfo = entry.getValue(); long dataUsedB = diskInfo.getDataUsedCapacityB(); long availB = diskInfo.getAvailableCapacityB(); long totalB = diskInfo.getTotalCapacityB(); long dataTotalB = diskInfo.getDataTotalCapacityB(); long otherUsedB = totalB - availB - dataUsedB; List<String> info = Lists.newArrayList(); // path info.add(entry.getKey()); // data used Pair<Double, String> dataUsedUnitPair = DebugUtil.getByteUint(dataUsedB); info.add(DebugUtil.DECIMAL_FORMAT_SCALE_3.format(dataUsedUnitPair.first) + " " + dataUsedUnitPair.second); // other used Pair<Double, String> otherUsedUnitPair = DebugUtil.getByteUint(otherUsedB); info.add(DebugUtil.DECIMAL_FORMAT_SCALE_3.format(otherUsedUnitPair.first) + " " + otherUsedUnitPair.second); // avail Pair<Double, String> availUnitPair = DebugUtil.getByteUint(availB); info.add(DebugUtil.DECIMAL_FORMAT_SCALE_3.format(availUnitPair.first) + " " + availUnitPair.second); // total Pair<Double, String> totalUnitPair = DebugUtil.getByteUint(totalB); info.add(DebugUtil.DECIMAL_FORMAT_SCALE_3.format(totalUnitPair.first) + " " + totalUnitPair.second); // total used percent double used = 0.0; if (totalB <= 0) { used = 0.0; } else { used = (double) (totalB - availB) * 100 / totalB; } info.add(String.format("%.2f", used) + " %"); // state info.add(diskInfo.getState().name()); // path hash info.add(String.valueOf(diskInfo.getPathHash())); // medium TStorageMedium medium = diskInfo.getStorageMedium(); if (medium == null) { info.add("N/A"); } else { info.add(medium.name()); } // tablet num info.add(String.valueOf(GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getTabletNumByBackendIdAndPathHash( backend.getId(), diskInfo.getPathHash()))); // data total Pair<Double, String> dataTotalUnitPair = DebugUtil.getByteUint(dataTotalB); info.add(DebugUtil.DECIMAL_FORMAT_SCALE_3.format(dataTotalUnitPair.first) + " " + dataTotalUnitPair.second); // data used percent double dataUsed = 0.0; if (dataTotalB <= 0) { dataUsed = 0.0; } else { dataUsed = (double) dataUsedB * 100 / dataTotalB; } info.add(String.format("%.2f", dataUsed) + " %"); result.addRow(info); } return result; }
@Test public void testResultNormal() throws AnalysisException { BackendProcNode node = new BackendProcNode(b1); ProcResult result; // fetch result result = node.fetchResult(); Assert.assertNotNull(result); Assert.assertTrue(result instanceof BaseProcResult); Assert.assertTrue(result.getRows().size() >= 1); Assert.assertEquals( Lists.newArrayList("RootPath", "DataUsedCapacity", "OtherUsedCapacity", "AvailCapacity", "TotalCapacity", "TotalUsedPct", "State", "PathHash", "StorageMedium", "TabletNum", "DataTotalCapacity", "DataUsedPct"), result.getColumnNames()); }
public static String getNamespace(final String originalFilename) { checkThreePart(originalFilename); final String[] threeParts = getThreePart(originalFilename); final String suffix = threeParts[2]; if (!suffix.contains(".")) { throw new BadRequestException(originalFilename + " namespace and format is invalid!"); } final int lastDotIndex = suffix.lastIndexOf("."); final String namespace = suffix.substring(0, lastDotIndex); // format after last character '.' final String format = suffix.substring(lastDotIndex + 1); if (!ConfigFileFormat.isValidFormat(format)) { throw new BadRequestException(originalFilename + " format is invalid!"); } ConfigFileFormat configFileFormat = ConfigFileFormat.fromString(format); if (configFileFormat.equals(ConfigFileFormat.Properties)) { return namespace; } else { // compatibility of other format return namespace + "." + format; } }
@Test public void getNamespace() { final String application = ConfigFileUtils.getNamespace("234+default+application.properties"); assertEquals("application", application); final String applicationYml = ConfigFileUtils.getNamespace("abc+default+application.yml"); assertEquals("application.yml", applicationYml); }
@Override public DataInputStatus emitNext(DataOutput<T> output) throws Exception { if (sortedInput != null) { return emitNextSortedRecord(output); } DataInputStatus inputStatus = wrappedInput.emitNext(forwardingDataOutput); if (inputStatus == DataInputStatus.END_OF_DATA) { endSorting(); return emitNextSortedRecord(output); } return inputStatus; }
@Test void simpleVariableLengthKeySorting() throws Exception { CollectingDataOutput<Integer> collectingDataOutput = new CollectingDataOutput<>(); CollectionDataInput<Integer> input = new CollectionDataInput<>( Arrays.asList( new StreamRecord<>(1, 3), new StreamRecord<>(1, 1), new StreamRecord<>(2, 1), new StreamRecord<>(2, 3), new StreamRecord<>(1, 2), new StreamRecord<>(2, 2))); MockEnvironment environment = MockEnvironment.builder().build(); SortingDataInput<Integer, String> sortingDataInput = new SortingDataInput<>( input, new IntSerializer(), new StringSerializer(), (KeySelector<Integer, String>) value -> "" + value, environment.getMemoryManager(), environment.getIOManager(), true, 1.0, new Configuration(), new DummyInvokable(), new ExecutionConfig()); DataInputStatus inputStatus; do { inputStatus = sortingDataInput.emitNext(collectingDataOutput); } while (inputStatus != DataInputStatus.END_OF_INPUT); assertThat(collectingDataOutput.events) .containsExactly( new StreamRecord<>(1, 1), new StreamRecord<>(1, 2), new StreamRecord<>(1, 3), new StreamRecord<>(2, 1), new StreamRecord<>(2, 2), new StreamRecord<>(2, 3)); }
public static Result find(List<Path> files, Consumer<LogEvent> logger) { List<String> mainClasses = new ArrayList<>(); for (Path file : files) { // Makes sure classFile is valid. if (!Files.exists(file)) { logger.accept(LogEvent.debug("MainClassFinder: " + file + " does not exist; ignoring")); continue; } if (!Files.isRegularFile(file)) { logger.accept( LogEvent.debug("MainClassFinder: " + file + " is not a regular file; skipping")); continue; } if (!file.toString().endsWith(".class")) { logger.accept( LogEvent.debug("MainClassFinder: " + file + " is not a class file; skipping")); continue; } MainClassVisitor mainClassVisitor = new MainClassVisitor(); try (InputStream classFileInputStream = Files.newInputStream(file)) { ClassReader reader = new ClassReader(classFileInputStream); reader.accept(mainClassVisitor, 0); if (mainClassVisitor.visitedMainClass) { mainClasses.add(reader.getClassName().replace('/', '.')); } } catch (IllegalArgumentException ex) { throw new UnsupportedOperationException( "Check the full stace trace, and if the root cause is from ASM ClassReader about " + "unsupported class file version, see " + "https://github.com/GoogleContainerTools/jib/blob/master/docs/faq.md" + "#i-am-seeing-unsupported-class-file-major-version-when-building", ex); } catch (ArrayIndexOutOfBoundsException ignored) { // Not a valid class file (thrown by ClassReader if it reads an invalid format) logger.accept(LogEvent.warn("Invalid class file found: " + file)); } catch (IOException ignored) { // Could not read class file. logger.accept(LogEvent.warn("Could not read file: " + file)); } } if (mainClasses.size() == 1) { // Valid class found. return Result.success(mainClasses.get(0)); } if (mainClasses.isEmpty()) { // No main class found anywhere. return Result.mainClassNotFound(); } // More than one main class found. return Result.multipleMainClasses(mainClasses); }
@Test public void testFindMainClass_subdirectories() throws URISyntaxException, IOException { Path rootDirectory = Paths.get(Resources.getResource("core/class-finder-tests/subdirectories").toURI()); MainClassFinder.Result mainClassFinderResult = MainClassFinder.find(new DirectoryWalker(rootDirectory).walk(), logEventConsumer); Assert.assertSame(Result.Type.MAIN_CLASS_FOUND, mainClassFinderResult.getType()); MatcherAssert.assertThat( mainClassFinderResult.getFoundMainClass(), CoreMatchers.containsString("multi.layered.HelloWorld")); }
public static TimestampRange of(Timestamp from, Timestamp to) { return new TimestampRange(from, to); }
@Test public void testTimestampRangeWhenFromIsLessThanTo() { assertEquals( new TimestampRange(Timestamp.ofTimeMicroseconds(10L), Timestamp.ofTimeMicroseconds(11L)), TimestampRange.of(Timestamp.ofTimeMicroseconds(10L), Timestamp.ofTimeMicroseconds(11L))); }
@Override public void executeWithLock(Runnable task, LockConfiguration lockConfig) { try { executeWithLock((Task) task::run, lockConfig); } catch (RuntimeException | Error e) { throw e; } catch (Throwable throwable) { // Should not happen throw new IllegalStateException(throwable); } }
@Test void shouldExecuteWithResult() throws Throwable { mockLockFor(lockConfig); TaskResult<String> result = executor.executeWithLock(() -> "result", lockConfig); assertThat(result.wasExecuted()).isTrue(); assertThat(result.getResult()).isEqualTo("result"); }
private VlanId() { super(UNTAGGED); }
@Test public void testEquality() { VlanId vlan1 = VlanId.vlanId("None"); VlanId vlan2 = VlanId.vlanId((short) -10); VlanId vlan3 = VlanId.vlanId((short) 100); VlanId vlan4 = VlanId.vlanId((short) 200); new EqualsTester().addEqualityGroup(VlanId.vlanId(), vlan1) .addEqualityGroup(vlan2) .addEqualityGroup(vlan3) .addEqualityGroup(vlan4) .addEqualityGroup(VlanId.vlanId((short) 10)) .testEquals(); }
public boolean isAbilitySupportedByServer(AbilityKey abilityKey) { return rpcClient.getConnectionAbility(abilityKey) == AbilityStatus.SUPPORTED; }
@Test void testIsAbilitySupportedByServer1() { when(this.rpcClient.getConnectionAbility(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC)).thenReturn( AbilityStatus.SUPPORTED); assertTrue(client.isAbilitySupportedByServer(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC)); verify(this.rpcClient, times(1)).getConnectionAbility(AbilityKey.SERVER_SUPPORT_PERSISTENT_INSTANCE_BY_GRPC); }
public static Map<String, List<String>> queryString(DataMap dataMap){ Map<String, List<String>> result = new HashMap<>(); DataMap processedDataMap = processProjections(dataMap, result); iterate("", processedDataMap, result); return result; }
@Test /** * Test query string representation of a DataMap representing a compound key */ public void testCompoundKeyDataMapQueryString() throws Exception { DataMap dataMap = new DataMap(); dataMap.put("memberID", 2); dataMap.put("groupID", 1); Map<String, List<String>> result = QueryParamsDataMap.queryString(dataMap); Assert.assertEquals("2", result.get("memberID").get(0)); Assert.assertEquals("1", result.get("groupID").get(0)); }
@Override public void addJobStorageOnChangeListener(StorageProviderChangeListener listener) { onChangeListeners.add(listener); startTimerToSendUpdates(); }
@Test void jobStatsChangeListenersAreNotifiedOfJobStats() { final JobStatsChangeListenerForTest changeListener = new JobStatsChangeListenerForTest(); storageProvider.addJobStorageOnChangeListener(changeListener); await() .untilAsserted(() -> assertThat(changeListener.jobStats).isNotNull()); }
@JsonIgnore public void updateForRestart( long iterationId, WorkflowInstance.Status newStatus, WorkflowInstance.Status oldStatus, WorkflowRollupOverview oldOverview) { Checks.checkTrue( isForeachIterationRestartable(iterationId) && stats != null && stats.get(oldStatus) > 0, "Invalid: pending action tries to restart a non-restartable iteration: " + iterationId); long cnt = stats.get(oldStatus); if (cnt > 1) { stats.put(oldStatus, stats.get(oldStatus) - 1); } else { stats.remove(oldStatus); } addRunningOne(newStatus, null); rollup.segregate(oldOverview); details.resetIterationDetail(iterationId, newStatus, oldStatus); if (restartInfo == null) { restartInfo = new HashSet<>(); } restartInfo.add(iterationId); }
@Test public void testUpdateForRestart() throws Exception { ForeachStepOverview overview = loadObject( "fixtures/instances/sample-foreach-step-overview.json", ForeachStepOverview.class); AssertHelper.assertThrows( "should throw exception for invalid restart", IllegalArgumentException.class, "Invalid: pending action tries to restart a non-restartable iteration", () -> overview.updateForRestart( 123L, WorkflowInstance.Status.CREATED, WorkflowInstance.Status.FAILED, null)); overview.addOne(123L, WorkflowInstance.Status.FAILED, null); overview.refreshDetail(); assertEquals(1L, overview.getStats().get(WorkflowInstance.Status.FAILED).longValue()); assertFalse(overview.getStats().containsKey(WorkflowInstance.Status.CREATED)); assertEquals( Collections.singletonList(new ForeachDetails.Interval(123L, 123L)), overview.getDetails().getInfo().get(WorkflowInstance.Status.FAILED)); assertFalse(overview.getDetails().getInfo().containsKey(WorkflowInstance.Status.CREATED)); WorkflowRollupOverview rollup = new WorkflowRollupOverview(); rollup.setTotalLeafCount(1); overview.updateForRestart( 123L, WorkflowInstance.Status.CREATED, WorkflowInstance.Status.FAILED, rollup); assertFalse(overview.isForeachIterationRestartable(123L)); assertFalse(overview.getStats().containsKey(WorkflowInstance.Status.FAILED)); assertEquals(127L, overview.getRunningStats().get(WorkflowInstance.Status.CREATED).longValue()); assertEquals( Collections.singletonList(new ForeachDetails.Interval(123L, 123L)), overview.getDetails().getPendingInfo().get(WorkflowInstance.Status.CREATED)); assertFalse(overview.getDetails().getInfo().containsKey(WorkflowInstance.Status.FAILED)); assertEquals(Collections.singleton(123L), overview.getRestartInfo()); assertEquals(160305, overview.getRollup().getTotalLeafCount()); }
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) { return Optional.ofNullable(HANDLERS.get(step.getClass())) .map(h -> h.handle(this, schema, step)) .orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass())); }
@Test public void shouldResolveSchemaForStreamSource() { final StreamSource step = new StreamSource( PROPERTIES, "foo", formats, Optional.empty(), SCHEMA, OptionalInt.of(SystemColumns.CURRENT_PSEUDOCOLUMN_VERSION_NUMBER) ); // When: final LogicalSchema result = resolver.resolve(step, SCHEMA); // Then: assertThat(result, is(SCHEMA.withPseudoAndKeyColsInValue(false))); }
public static Future<Void> reconcileJmxSecret(Reconciliation reconciliation, SecretOperator secretOperator, SupportsJmx cluster) { return secretOperator.getAsync(reconciliation.namespace(), cluster.jmx().secretName()) .compose(currentJmxSecret -> { Secret desiredJmxSecret = cluster.jmx().jmxSecret(currentJmxSecret); if (desiredJmxSecret != null) { // Desired secret is not null => should be updated return secretOperator.reconcile(reconciliation, reconciliation.namespace(), cluster.jmx().secretName(), desiredJmxSecret) .map((Void) null); } else if (currentJmxSecret != null) { // Desired secret is null but current is not => we should delete the secret return secretOperator.reconcile(reconciliation, reconciliation.namespace(), cluster.jmx().secretName(), null) .map((Void) null); } else { // Both current and desired secret are null => nothing to do return Future.succeededFuture(); } }); }
@Test public void testEnabledJmxWithoutAuthWithMissingSecret(VertxTestContext context) { KafkaClusterSpec spec = new KafkaClusterSpecBuilder().withNewJmxOptions().endJmxOptions().build(); JmxModel jmx = new JmxModel(NAMESPACE, NAME, LABELS, OWNER_REFERENCE, spec); SecretOperator mockSecretOps = mock(SecretOperator.class); when(mockSecretOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); ReconcilerUtils.reconcileJmxSecret(Reconciliation.DUMMY_RECONCILIATION, mockSecretOps, new MockJmxCluster(jmx)) .onComplete(context.succeeding(v -> context.verify(() -> { verify(mockSecretOps, never()).reconcile(any(), any(), any(), any()); async.flag(); }))); }
public static void main(String[] args) { try { FSConfigToCSConfigArgumentHandler fsConfigConversionArgumentHandler = new FSConfigToCSConfigArgumentHandler(); int exitCode = fsConfigConversionArgumentHandler.parseAndConvert(args); if (exitCode != 0) { LOG.error(FATAL, "Error while starting FS configuration conversion, " + "see previous error messages for details!"); } exitFunction.accept(exitCode); } catch (Throwable t) { LOG.error(FATAL, "Error while starting FS configuration conversion!", t); exitFunction.accept(-1); } }
@Test public void testNegativeReturnValueOnError() { FSConfigToCSConfigConverterMain.main(new String[] { "--print", "--yarnsiteconfig"}); assertEquals("Exit code", -1, exitFunc.exitCode); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("list", concurrency); try { final String prefix = this.createPrefix(directory); if(log.isDebugEnabled()) { log.debug(String.format("List with prefix %s", prefix)); } final Path bucket = containerService.getContainer(directory); final AttributedList<Path> objects = new AttributedList<>(); String priorLastKey = null; String priorLastVersionId = null; long revision = 0L; String lastKey = null; boolean hasDirectoryPlaceholder = bucket.isRoot() || containerService.isContainer(directory); do { final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), prefix, String.valueOf(Path.DELIMITER), new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"), priorLastKey, priorLastVersionId, false); // Amazon S3 returns object versions in the order in which they were stored, with the most recently stored returned first. for(BaseVersionOrDeleteMarker marker : chunk.getItems()) { final String key = URIEncoder.decode(marker.getKey()); if(new SimplePathPredicate(PathNormalizer.compose(bucket, key)).test(directory)) { if(log.isDebugEnabled()) { log.debug(String.format("Skip placeholder key %s", key)); } hasDirectoryPlaceholder = true; continue; } final PathAttributes attr = new PathAttributes(); attr.setVersionId(marker.getVersionId()); if(!StringUtils.equals(lastKey, key)) { // Reset revision for next file revision = 0L; } attr.setRevision(++revision); attr.setDuplicate(marker.isDeleteMarker() && marker.isLatest() || !marker.isLatest()); if(marker.isDeleteMarker()) { attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, String.valueOf(true))); } attr.setModificationDate(marker.getLastModified().getTime()); attr.setRegion(bucket.attributes().getRegion()); if(marker instanceof S3Version) { final S3Version object = (S3Version) marker; attr.setSize(object.getSize()); if(StringUtils.isNotBlank(object.getEtag())) { attr.setETag(StringUtils.remove(object.getEtag(), "\"")); // The ETag will only be the MD5 of the object data when the object is stored as plaintext or encrypted // using SSE-S3. If the object is encrypted using another method (such as SSE-C or SSE-KMS) the ETag is // not the MD5 of the object data. attr.setChecksum(Checksum.parse(StringUtils.remove(object.getEtag(), "\""))); } if(StringUtils.isNotBlank(object.getStorageClass())) { attr.setStorageClass(object.getStorageClass()); } } final Path f = new Path(directory.isDirectory() ? directory : directory.getParent(), PathNormalizer.name(key), EnumSet.of(Path.Type.file), attr); if(metadata) { f.withAttributes(attributes.find(f)); } objects.add(f); lastKey = key; } final String[] prefixes = chunk.getCommonPrefixes(); final List<Future<Path>> folders = new ArrayList<>(); for(String common : prefixes) { if(new SimplePathPredicate(PathNormalizer.compose(bucket, URIEncoder.decode(common))).test(directory)) { continue; } folders.add(this.submit(pool, bucket, directory, URIEncoder.decode(common))); } for(Future<Path> f : folders) { try { objects.add(Uninterruptibles.getUninterruptibly(f)); } catch(ExecutionException e) { log.warn(String.format("Listing versioned objects failed with execution failure %s", e.getMessage())); for(Throwable cause : ExceptionUtils.getThrowableList(e)) { Throwables.throwIfInstanceOf(cause, BackgroundException.class); } throw new DefaultExceptionMappingService().map(Throwables.getRootCause(e)); } } priorLastKey = null != chunk.getNextKeyMarker() ? URIEncoder.decode(chunk.getNextKeyMarker()) : null; priorLastVersionId = chunk.getNextVersionIdMarker(); listener.chunk(directory, objects); } while(priorLastKey != null); if(!hasDirectoryPlaceholder && objects.isEmpty()) { // Only for AWS if(S3Session.isAwsHostname(session.getHost().getHostname())) { if(StringUtils.isEmpty(RequestEntityRestStorageService.findBucketInHostname(session.getHost()))) { if(log.isWarnEnabled()) { log.warn(String.format("No placeholder found for directory %s", directory)); } throw new NotfoundException(directory.getAbsolute()); } } else { // Handle missing prefix for directory placeholders in Minio final VersionOrDeleteMarkersChunk chunk = session.getClient().listVersionedObjectsChunked( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), String.format("%s%s", this.createPrefix(directory.getParent()), directory.getName()), String.valueOf(Path.DELIMITER), 1, null, null, false); if(Arrays.stream(chunk.getCommonPrefixes()).map(URIEncoder::decode).noneMatch(common -> common.equals(prefix))) { throw new NotfoundException(directory.getAbsolute()); } } } return objects; } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Listing directory {0} failed", e, directory); } finally { // Cancel future tasks pool.shutdown(false); } }
@Test public void testDirectory() throws Exception { final Path bucket = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path directory = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final S3VersionedObjectListService feature = new S3VersionedObjectListService(session, acl); assertTrue(feature.list(bucket, new DisabledListProgressListener()).contains(directory)); assertTrue(feature.list(directory, new DisabledListProgressListener()).isEmpty()); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(feature.list(bucket, new DisabledListProgressListener()).contains(directory)); try { feature.list(directory, new DisabledListProgressListener()); fail(); } catch(NotfoundException e) { } }
public boolean shouldSample(String service, int sample, int duration) { SamplingPolicy samplingPolicy = this.samplingPolicySettings.get().get(service); if (samplingPolicy == null) { return shouldSampleByDefault(sample, duration); } return shouldSampleService(samplingPolicy, sample, duration); }
@Test @Timeout(20) public void testTraceLatencyThresholdDynamicUpdate() throws InterruptedException { ConfigWatcherRegister register = new TraceLatencyThresholdMockConfigWatcherRegister(3); TraceSamplingPolicyWatcher watcher = new TraceSamplingPolicyWatcher(moduleConfig, provider); register.registerConfigChangeWatcher(watcher); register.start(); // Default duration is -1, so 3000 must not be sampled,until updating to 3000 while (!watcher.shouldSample("", 10000, 3000)) { Thread.sleep(2000); } Assertions.assertTrue(watcher.shouldSample("", 10000, 3001)); }
@Override public int run(String[] args) throws Exception { YarnConfiguration yarnConf = getConf() == null ? new YarnConfiguration() : new YarnConfiguration( getConf()); boolean isHAEnabled = yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED, YarnConfiguration.DEFAULT_RM_HA_ENABLED); if (args.length < 1) { printUsage("", isHAEnabled); return -1; } int exitCode = -1; int i = 0; String cmd = args[i++]; exitCode = 0; if ("-help".equals(cmd)) { if (i < args.length) { printUsage(args[i], isHAEnabled); } else { printHelp("", isHAEnabled); } return exitCode; } if (USAGE.containsKey(cmd)) { if (isHAEnabled) { return super.run(args); } System.out.println("Cannot run " + cmd + " when ResourceManager HA is not enabled"); return -1; } // // verify that we have enough command line parameters // String subClusterId = StringUtils.EMPTY; if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) || "-refreshNodesResources".equals(cmd) || "-refreshServiceAcl".equals(cmd) || "-refreshUserToGroupsMappings".equals(cmd) || "-refreshSuperUserGroupsConfiguration".equals(cmd) || "-refreshClusterMaxPriority".equals(cmd)) { subClusterId = parseSubClusterId(args, isHAEnabled); // If we enable Federation mode, the number of args may be either one or three. // Example: -refreshQueues or -refreshQueues -subClusterId SC-1 if (isYarnFederationEnabled(getConf()) && args.length != 1 && args.length != 3) { printUsage(cmd, isHAEnabled); return exitCode; } else if (!isYarnFederationEnabled(getConf()) && args.length != 1) { // If Federation mode is not enabled, then the number of args can only be one. // Example: -refreshQueues printUsage(cmd, isHAEnabled); return exitCode; } } // If it is federation mode, we will print federation mode information if (isYarnFederationEnabled(getConf())) { System.out.println("Using YARN Federation mode."); } try { if ("-refreshQueues".equals(cmd)) { exitCode = refreshQueues(subClusterId); } else if ("-refreshNodes".equals(cmd)) { exitCode = handleRefreshNodes(args, cmd, isHAEnabled); } else if ("-refreshNodesResources".equals(cmd)) { exitCode = refreshNodesResources(subClusterId); } else if ("-refreshUserToGroupsMappings".equals(cmd)) { exitCode = refreshUserToGroupsMappings(subClusterId); } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { exitCode = refreshSuperUserGroupsConfiguration(subClusterId); } else if ("-refreshAdminAcls".equals(cmd)) { exitCode = refreshAdminAcls(subClusterId); } else if ("-refreshServiceAcl".equals(cmd)) { exitCode = refreshServiceAcls(subClusterId); } else if ("-refreshClusterMaxPriority".equals(cmd)) { exitCode = refreshClusterMaxPriority(subClusterId); } else if ("-getGroups".equals(cmd)) { String[] usernames = Arrays.copyOfRange(args, i, args.length); exitCode = getGroups(usernames); } else if ("-updateNodeResource".equals(cmd)) { exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled, subClusterId); } else if ("-addToClusterNodeLabels".equals(cmd)) { exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled); } else if ("-removeFromClusterNodeLabels".equals(cmd)) { exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled); } else if ("-replaceLabelsOnNode".equals(cmd)) { exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled); } else { exitCode = -1; System.err.println(cmd.substring(1) + ": Unknown command"); printUsage("", isHAEnabled); } } catch (IllegalArgumentException arge) { exitCode = -1; System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); printUsage(cmd, isHAEnabled); } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error message, ignore the stack trace. exitCode = -1; try { String[] content; content = e.getLocalizedMessage().split("\n"); System.err.println(cmd.substring(1) + ": " + content[0]); } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); } } catch (Exception e) { exitCode = -1; System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage()); } if (null != localNodeLabelsManager) { localNodeLabelsManager.stop(); } return exitCode; }
@Test public void testTransitionToActive() throws Exception { String[] args = {"-transitionToActive", "rm1"}; // RM HA is disabled. // transitionToActive should not be executed assertEquals(-1, rmAdminCLI.run(args)); verify(haadmin, never()).transitionToActive( any(HAServiceProtocol.StateChangeRequestInfo.class)); // Now RM HA is enabled. // transitionToActive should be executed assertEquals(0, rmAdminCLIWithHAEnabled.run(args)); verify(haadmin).transitionToActive( any(HAServiceProtocol.StateChangeRequestInfo.class)); // HAAdmin#isOtherTargetNodeActive should check state of non-target node. verify(haadmin, times(1)).getServiceStatus(); }
@Override public void unRegister() { stop(); }
@Test public void unRegister() { registerCenterService.unRegister(); final Optional<Object> isStopped = ReflectUtils.getFieldValue(registerCenterService, "isStopped"); Assert.assertTrue(isStopped.isPresent()); Assert.assertTrue(isStopped.get() instanceof AtomicBoolean); Assert.assertTrue(((AtomicBoolean) isStopped.get()).get()); }
public static String getHostAddress() throws SocketException, UnknownHostException { boolean isIPv6Preferred = Boolean.parseBoolean(System.getProperty("java.net.preferIPv6Addresses")); DatagramSocket ds = new DatagramSocket(); try { ds.connect(isIPv6Preferred ? Inet6Address.getByName(DUMMY_OUT_IPV6) : Inet4Address.getByName(DUMMY_OUT_IPV4), HTTP_PORT); } catch (java.io.UncheckedIOException e) { LOGGER.warn(e.getMessage()); if (isIPv6Preferred) { LOGGER.warn("No IPv6 route available on host, falling back to IPv4"); ds.connect(Inet4Address.getByName(DUMMY_OUT_IPV4), HTTP_PORT); } else { LOGGER.warn("No IPv4 route available on host, falling back to IPv6"); ds.connect(Inet6Address.getByName(DUMMY_OUT_IPV6), HTTP_PORT); } } InetAddress localAddress = ds.getLocalAddress(); if (localAddress.isAnyLocalAddress()) { localAddress = isIPv6Preferred ? getLocalIPv6Address() : InetAddress.getLocalHost(); } return localAddress.getHostAddress(); }
@Test(description = "Test getHostAddress with preferIPv6Addresses=true in IPv4 only environment") public void testGetHostAddressIPv4EnvIPv6Preferred() { System.setProperty("java.net.preferIPv6Addresses", "true"); InetAddress mockInetAddress = mock(InetAddress.class); when(mockInetAddress.isAnyLocalAddress()).thenReturn(false); when(mockInetAddress.getHostAddress()).thenReturn(LOCAL_ADDRESS_IPV4); try (MockedConstruction<DatagramSocket> mockedConstructionDatagramSocket = mockConstruction(DatagramSocket.class, initDatagramSocket(mockInetAddress, NetworkEnv.IPV4))) { String hostAddress = NetUtils.getHostAddress(); DatagramSocket mockDatagramSocket = mockedConstructionDatagramSocket.constructed().get(0); assertEquals(LOCAL_ADDRESS_IPV4, hostAddress); assertEquals(1, mockedConstructionDatagramSocket.constructed().size()); verify(mockDatagramSocket, times(2)).connect(any(), anyInt()); } catch (SocketException | UnknownHostException e) { Assert.fail("Should not throw: " + e.getMessage()); } }
public ServerStatus getServerStatus() { return serverStatus; }
@Test void testGetServerStatus() { ServerStatusManager serverStatusManager = new ServerStatusManager(protocolManager, switchDomain); ServerStatus serverStatus = serverStatusManager.getServerStatus(); assertEquals(ServerStatus.STARTING, serverStatus); }
@Override public boolean canPass(Node node, int acquireCount) { return canPass(node, acquireCount, false); }
@Test public void testPaceCanNotPass() throws InterruptedException { WarmUpRateLimiterController controller = new WarmUpRateLimiterController(10, 10, 10, 3); Node node = mock(Node.class); when(node.passQps()).thenReturn(100d); when(node.previousPassQps()).thenReturn(100d); assertTrue(controller.canPass(node, 1)); assertFalse(controller.canPass(node, 1)); }
public static BigDecimal cast(final Integer value, final int precision, final int scale) { if (value == null) { return null; } return cast(value.longValue(), precision, scale); }
@Test public void shouldCastInt() { // When: final BigDecimal decimal = DecimalUtil.cast((Integer)1, 2, 1); // Then: assertThat(decimal, is(new BigDecimal("1.0"))); }
@Override public Class<?>[] getParameterClasses() { return parameterClasses; }
@Test void getParameterClasses() { Assertions.assertArrayEquals(new Class[] {String.class}, method.getParameterClasses()); }
@Override public String scriptLoad(String luaScript) { return commandExecutor.get(scriptLoadAsync(luaScript)); }
@Test public void testScriptLoad() { redisson.getBucket("foo").set("bar"); String r = redisson.getScript().scriptLoad("return redis.call('get', 'foo')"); Assertions.assertEquals("282297a0228f48cd3fc6a55de6316f31422f5d17", r); String r1 = redisson.getScript().evalSha(Mode.READ_ONLY, "282297a0228f48cd3fc6a55de6316f31422f5d17", RScript.ReturnType.VALUE, Collections.emptyList()); Assertions.assertEquals("bar", r1); }
@Override public int drainTo(Collection<? super E> c) { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void drainMaxItems() { queue.drainTo(new LinkedList<>(), 10); }
@Override public void notify(Metrics metrics) { WithMetadata withMetadata = (WithMetadata) metrics; MetricsMetaInfo meta = withMetadata.getMeta(); int scope = meta.getScope(); if (!DefaultScopeDefine.inServiceCatalog(scope) && !DefaultScopeDefine.inServiceInstanceCatalog(scope) && !DefaultScopeDefine.inEndpointCatalog(scope) && !DefaultScopeDefine.inServiceRelationCatalog(scope) && !DefaultScopeDefine.inServiceInstanceRelationCatalog(scope) && !DefaultScopeDefine.inEndpointRelationCatalog(scope)) { return; } MetaInAlarm metaInAlarm; if (DefaultScopeDefine.inServiceCatalog(scope)) { final String serviceId = meta.getId(); final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId( serviceId); ServiceMetaInAlarm serviceMetaInAlarm = new ServiceMetaInAlarm(); serviceMetaInAlarm.setMetricsName(meta.getMetricsName()); serviceMetaInAlarm.setId(serviceId); serviceMetaInAlarm.setName(serviceIDDefinition.getName()); metaInAlarm = serviceMetaInAlarm; } else if (DefaultScopeDefine.inServiceInstanceCatalog(scope)) { final String instanceId = meta.getId(); final IDManager.ServiceInstanceID.InstanceIDDefinition instanceIDDefinition = IDManager.ServiceInstanceID.analysisId( instanceId); final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId( instanceIDDefinition.getServiceId()); ServiceInstanceMetaInAlarm instanceMetaInAlarm = new ServiceInstanceMetaInAlarm(); instanceMetaInAlarm.setMetricsName(meta.getMetricsName()); instanceMetaInAlarm.setId(instanceId); instanceMetaInAlarm.setName(instanceIDDefinition.getName() + " of " + serviceIDDefinition.getName()); metaInAlarm = instanceMetaInAlarm; } else if (DefaultScopeDefine.inEndpointCatalog(scope)) { final String endpointId = meta.getId(); final IDManager.EndpointID.EndpointIDDefinition endpointIDDefinition = IDManager.EndpointID.analysisId( endpointId); final IDManager.ServiceID.ServiceIDDefinition serviceIDDefinition = IDManager.ServiceID.analysisId( endpointIDDefinition.getServiceId()); EndpointMetaInAlarm endpointMetaInAlarm = new EndpointMetaInAlarm(); endpointMetaInAlarm.setMetricsName(meta.getMetricsName()); endpointMetaInAlarm.setId(meta.getId()); endpointMetaInAlarm.setName( endpointIDDefinition.getEndpointName() + " in " + serviceIDDefinition.getName()); metaInAlarm = endpointMetaInAlarm; } else if (DefaultScopeDefine.inServiceRelationCatalog(scope)) { final String serviceRelationId = meta.getId(); final IDManager.ServiceID.ServiceRelationDefine serviceRelationDefine = IDManager.ServiceID.analysisRelationId( serviceRelationId); final IDManager.ServiceID.ServiceIDDefinition sourceIdDefinition = IDManager.ServiceID.analysisId( serviceRelationDefine.getSourceId()); final IDManager.ServiceID.ServiceIDDefinition destIdDefinition = IDManager.ServiceID.analysisId( serviceRelationDefine.getDestId()); ServiceRelationMetaInAlarm serviceRelationMetaInAlarm = new ServiceRelationMetaInAlarm(); serviceRelationMetaInAlarm.setMetricsName(meta.getMetricsName()); serviceRelationMetaInAlarm.setId(serviceRelationId); serviceRelationMetaInAlarm.setName(sourceIdDefinition.getName() + " to " + destIdDefinition.getName()); metaInAlarm = serviceRelationMetaInAlarm; } else if (DefaultScopeDefine.inServiceInstanceRelationCatalog(scope)) { final String instanceRelationId = meta.getId(); final IDManager.ServiceInstanceID.ServiceInstanceRelationDefine serviceRelationDefine = IDManager.ServiceInstanceID.analysisRelationId( instanceRelationId); final IDManager.ServiceInstanceID.InstanceIDDefinition sourceIdDefinition = IDManager.ServiceInstanceID.analysisId( serviceRelationDefine.getSourceId()); final IDManager.ServiceID.ServiceIDDefinition sourceServiceId = IDManager.ServiceID.analysisId( sourceIdDefinition.getServiceId()); final IDManager.ServiceInstanceID.InstanceIDDefinition destIdDefinition = IDManager.ServiceInstanceID.analysisId( serviceRelationDefine.getDestId()); final IDManager.ServiceID.ServiceIDDefinition destServiceId = IDManager.ServiceID.analysisId( destIdDefinition.getServiceId()); ServiceInstanceRelationMetaInAlarm instanceRelationMetaInAlarm = new ServiceInstanceRelationMetaInAlarm(); instanceRelationMetaInAlarm.setMetricsName(meta.getMetricsName()); instanceRelationMetaInAlarm.setId(instanceRelationId); instanceRelationMetaInAlarm.setName(sourceIdDefinition.getName() + " of " + sourceServiceId.getName() + " to " + destIdDefinition.getName() + " of " + destServiceId.getName()); metaInAlarm = instanceRelationMetaInAlarm; } else if (DefaultScopeDefine.inEndpointRelationCatalog(scope)) { final String endpointRelationId = meta.getId(); final IDManager.EndpointID.EndpointRelationDefine endpointRelationDefine = IDManager.EndpointID.analysisRelationId( endpointRelationId); final IDManager.ServiceID.ServiceIDDefinition sourceService = IDManager.ServiceID.analysisId( endpointRelationDefine.getSourceServiceId()); final IDManager.ServiceID.ServiceIDDefinition destService = IDManager.ServiceID.analysisId( endpointRelationDefine.getDestServiceId()); EndpointRelationMetaInAlarm endpointRelationMetaInAlarm = new EndpointRelationMetaInAlarm(); endpointRelationMetaInAlarm.setMetricsName(meta.getMetricsName()); endpointRelationMetaInAlarm.setId(endpointRelationId); endpointRelationMetaInAlarm.setName(endpointRelationDefine.getSource() + " in " + sourceService.getName() + " to " + endpointRelationDefine.getDest() + " in " + destService.getName()); metaInAlarm = endpointRelationMetaInAlarm; } else { return; } List<RunningRule> runningRules = core.findRunningRule(meta.getMetricsName()); if (runningRules == null) { return; } runningRules.forEach(rule -> rule.in(metaInAlarm, metrics)); }
@Test public void testNotifyWithServiceInstanceRelationCatalog() { String metricsName = "service-instance-relation-metrics"; when(metadata.getMetricsName()).thenReturn(metricsName); when(DefaultScopeDefine.inServiceInstanceRelationCatalog(0)).thenReturn(true); final String serviceInstanceRelationId = IDManager.ServiceInstanceID.buildRelationId(new IDManager.ServiceInstanceID.ServiceInstanceRelationDefine( IDManager.ServiceInstanceID.buildId(IDManager.ServiceID.buildId("from-service", true), "from-service-instance"), IDManager.ServiceInstanceID.buildId(IDManager.ServiceID.buildId("dest-service", true), "dest-service-instance") )); when(metadata.getId()).thenReturn(serviceInstanceRelationId); ArgumentCaptor<MetaInAlarm> metaCaptor = ArgumentCaptor.forClass(MetaInAlarm.class); notifyHandler.notify(metrics); verify(rule).in(metaCaptor.capture(), any()); MetaInAlarm metaInAlarm = metaCaptor.getValue(); assertTrue(metaInAlarm instanceof ServiceInstanceRelationMetaInAlarm); assertEquals("ZnJvbS1zZXJ2aWNl.1_ZnJvbS1zZXJ2aWNlLWluc3RhbmNl", metaInAlarm.getId0()); assertEquals("ZGVzdC1zZXJ2aWNl.1_ZGVzdC1zZXJ2aWNlLWluc3RhbmNl", metaInAlarm.getId1()); assertEquals(DefaultScopeDefine.SERVICE_INSTANCE_RELATION_CATALOG_NAME, metaInAlarm.getScope()); assertEquals("from-service-instance of from-service to dest-service-instance of dest-service", metaInAlarm.getName()); assertEquals(DefaultScopeDefine.SERVICE_INSTANCE_RELATION, metaInAlarm.getScopeId()); }
@Override public final void getSize(@NonNull SizeReadyCallback cb) { sizeDeterminer.getSize(cb); }
@Test public void getSize_withBothWrapContent_usesDisplayDimens() { LayoutParams layoutParams = new FrameLayout.LayoutParams(LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT); view.setLayoutParams(layoutParams); setDisplayDimens(200, 300); activity.visible(); view.layout(0, 0, 0, 0); target.getSize(cb); verify(cb).onSizeReady(300, 300); }
public static CallRoutingTable fromTsv(final Reader inputReader) throws IOException { try (final BufferedReader reader = new BufferedReader(inputReader)) { // use maps to silently dedupe CidrBlocks Map<CidrBlock.IpV4CidrBlock, List<String>> ipv4Map = new HashMap<>(); Map<CidrBlock.IpV6CidrBlock, List<String>> ipv6Map = new HashMap<>(); Map<CallRoutingTable.GeoKey, List<String>> ipGeoTable = new HashMap<>(); String line; while((line = reader.readLine()) != null) { if(line.isBlank()) { continue; } List<String> splits = Arrays.stream(line.split(WHITESPACE_REGEX)).filter(s -> !s.isBlank()).toList(); if (splits.size() < 2) { throw new IllegalStateException("Invalid row, expected some key and list of values"); } List<String> datacenters = splits.subList(1, splits.size()); switch (guessLineType(splits)) { case v4 -> { CidrBlock cidrBlock = CidrBlock.parseCidrBlock(splits.getFirst()); if(!(cidrBlock instanceof CidrBlock.IpV4CidrBlock)) { throw new IllegalArgumentException("Expected an ipv4 cidr block"); } ipv4Map.put((CidrBlock.IpV4CidrBlock) cidrBlock, datacenters); } case v6 -> { CidrBlock cidrBlock = CidrBlock.parseCidrBlock(splits.getFirst()); if(!(cidrBlock instanceof CidrBlock.IpV6CidrBlock)) { throw new IllegalArgumentException("Expected an ipv6 cidr block"); } ipv6Map.put((CidrBlock.IpV6CidrBlock) cidrBlock, datacenters); } case Geo -> { String[] geo = splits.getFirst().split("-"); if(geo.length < 3) { throw new IllegalStateException("Geo row key invalid, expected atleast continent, country, and protocol"); } String continent = geo[0]; String country = geo[1]; Optional<String> subdivision = geo.length > 3 ? Optional.of(geo[2]) : Optional.empty(); CallRoutingTable.Protocol protocol = CallRoutingTable.Protocol.valueOf(geo[geo.length - 1].toLowerCase()); CallRoutingTable.GeoKey tableKey = new CallRoutingTable.GeoKey( continent, country, subdivision, protocol ); ipGeoTable.put(tableKey, datacenters); } } } return new CallRoutingTable( ipv4Map, ipv6Map, ipGeoTable ); } }
@Test public void testParserMixedSections() throws IOException { var input = """ 1.123.123.0/24\t datacenter-1 2001:db8:b0aa::/48\t \tdatacenter-1 2001:db8:b0ab::/48 \tdatacenter-3\tdatacenter-1 datacenter-2 2001:db8:b0ac::/48\tdatacenter-2\tdatacenter-1 192.1.12.0/24\t \tdatacenter-1\t\t datacenter-2 datacenter-3 193.123.123.0/24\tdatacenter-1\tdatacenter-2 SA-SR-v4 datacenter-3 SA-UY-v4\tdatacenter-3\tdatacenter-1\tdatacenter-2 NA-US-VA-v6 datacenter-2 \tdatacenter-1 """; var actual = CallRoutingTableParser.fromTsv(new StringReader(input)); var expected = new CallRoutingTable( Map.of( (CidrBlock.IpV4CidrBlock) CidrBlock.parseCidrBlock("1.123.123.0/24"), List.of("datacenter-1"), (CidrBlock.IpV4CidrBlock) CidrBlock.parseCidrBlock("192.1.12.0/24"), List.of("datacenter-1", "datacenter-2", "datacenter-3"), (CidrBlock.IpV4CidrBlock) CidrBlock.parseCidrBlock("193.123.123.0/24"), List.of("datacenter-1", "datacenter-2") ), Map.of( (CidrBlock.IpV6CidrBlock) CidrBlock.parseCidrBlock("2001:db8:b0aa::/48"), List.of("datacenter-1"), (CidrBlock.IpV6CidrBlock) CidrBlock.parseCidrBlock("2001:db8:b0ab::/48"), List.of("datacenter-3", "datacenter-1", "datacenter-2"), (CidrBlock.IpV6CidrBlock) CidrBlock.parseCidrBlock("2001:db8:b0ac::/48"), List.of("datacenter-2", "datacenter-1") ), Map.of( new CallRoutingTable.GeoKey("SA", "SR", Optional.empty(), CallRoutingTable.Protocol.v4), List.of("datacenter-3"), new CallRoutingTable.GeoKey("SA", "UY", Optional.empty(), CallRoutingTable.Protocol.v4), List.of("datacenter-3", "datacenter-1", "datacenter-2"), new CallRoutingTable.GeoKey("NA", "US", Optional.of("VA"), CallRoutingTable.Protocol.v6), List.of("datacenter-2", "datacenter-1") ) ); assertThat(actual).isEqualTo(expected); }
public static void optimize(Pipeline pipeline) { // Compute which Schema fields are (or conversely, are not) accessed in a pipeline. FieldAccessVisitor fieldAccessVisitor = new FieldAccessVisitor(); pipeline.traverseTopologically(fieldAccessVisitor); // Find transforms in this pipeline which both: 1. support projection pushdown and 2. output // unused fields. ProjectionProducerVisitor pushdownProjectorVisitor = new ProjectionProducerVisitor(fieldAccessVisitor.getPCollectionFieldAccess()); pipeline.traverseTopologically(pushdownProjectorVisitor); Map<ProjectionProducer<PTransform<?, ?>>, Map<PCollection<?>, FieldAccessDescriptor>> pushdownOpportunities = pushdownProjectorVisitor.getPushdownOpportunities(); // Translate target PCollections to their output TupleTags. PCollectionOutputTagVisitor outputTagVisitor = new PCollectionOutputTagVisitor(pushdownOpportunities); pipeline.traverseTopologically(outputTagVisitor); Map<ProjectionProducer<PTransform<?, ?>>, Map<TupleTag<?>, FieldAccessDescriptor>> taggedFieldAccess = outputTagVisitor.getTaggedFieldAccess(); // For each eligible transform, replace it with a modified transform that omits the unused // fields. for (Entry<ProjectionProducer<PTransform<?, ?>>, Map<TupleTag<?>, FieldAccessDescriptor>> entry : taggedFieldAccess.entrySet()) { for (Entry<TupleTag<?>, FieldAccessDescriptor> outputFields : entry.getValue().entrySet()) { LOG.info( "Optimizing transform {}: output {} will contain reduced field set {}", entry.getKey(), outputFields.getKey(), outputFields.getValue().fieldNamesAccessed()); } PTransformMatcher matcher = application -> application.getTransform() == entry.getKey(); PushdownOverrideFactory<?, ?> overrideFactory = new PushdownOverrideFactory<>(entry.getValue()); pipeline.replaceAll(ImmutableList.of(PTransformOverride.of(matcher, overrideFactory))); } }
@Test public void testIntermediateProducer() { Pipeline p = Pipeline.create(); SimpleSource source = new SimpleSource(FieldAccessDescriptor.withFieldNames("foo", "bar", "baz")); IntermediateTransformWithPushdown originalT = new IntermediateTransformWithPushdown( FieldAccessDescriptor.withFieldNames("foo", "bar", "baz")); FieldAccessDescriptor downstreamFieldAccess = FieldAccessDescriptor.withFieldNames("foo", "bar"); p.apply(source).apply(originalT).apply(new FieldAccessTransform(downstreamFieldAccess)); // TODO(https://github.com/apache/beam/issues/21359) Support pushdown on intermediate // transforms. For now, test that the pushdown optimizer ignores immediate transforms. ProjectionPushdownOptimizer.optimize(p); Assert.assertTrue(pipelineHasTransform(p, originalT)); }
protected boolean useRackAwareAssignment(Set<String> consumerRacks, Set<String> partitionRacks, Map<TopicPartition, Set<String>> racksPerPartition) { if (consumerRacks.isEmpty() || Collections.disjoint(consumerRacks, partitionRacks)) return false; else if (preferRackAwareLogic) return true; else { return !racksPerPartition.values().stream().allMatch(partitionRacks::equals); } }
@Test public void testUseRackAwareAssignment() { AbstractPartitionAssignor assignor = new RangeAssignor(); String[] racks = new String[] {"a", "b", "c"}; Set<String> allRacks = Utils.mkSet(racks); Set<String> twoRacks = Utils.mkSet("a", "b"); Map<TopicPartition, Set<String>> partitionsOnAllRacks = new HashMap<>(); Map<TopicPartition, Set<String>> partitionsOnSubsetOfRacks = new HashMap<>(); for (int i = 0; i < 10; i++) { TopicPartition tp = new TopicPartition("topic", i); partitionsOnAllRacks.put(tp, allRacks); partitionsOnSubsetOfRacks.put(tp, Utils.mkSet(racks[i % racks.length])); } assertFalse(assignor.useRackAwareAssignment(Collections.emptySet(), Collections.emptySet(), partitionsOnAllRacks)); assertFalse(assignor.useRackAwareAssignment(Collections.emptySet(), allRacks, partitionsOnAllRacks)); assertFalse(assignor.useRackAwareAssignment(allRacks, Collections.emptySet(), Collections.emptyMap())); assertFalse(assignor.useRackAwareAssignment(Utils.mkSet("d"), allRacks, partitionsOnAllRacks)); assertFalse(assignor.useRackAwareAssignment(allRacks, allRacks, partitionsOnAllRacks)); assertFalse(assignor.useRackAwareAssignment(twoRacks, allRacks, partitionsOnAllRacks)); assertFalse(assignor.useRackAwareAssignment(Utils.mkSet("a", "d"), allRacks, partitionsOnAllRacks)); assertTrue(assignor.useRackAwareAssignment(allRacks, allRacks, partitionsOnSubsetOfRacks)); assertTrue(assignor.useRackAwareAssignment(twoRacks, allRacks, partitionsOnSubsetOfRacks)); assertTrue(assignor.useRackAwareAssignment(Utils.mkSet("a", "d"), allRacks, partitionsOnSubsetOfRacks)); assignor.preferRackAwareLogic = true; assertFalse(assignor.useRackAwareAssignment(Collections.emptySet(), Collections.emptySet(), partitionsOnAllRacks)); assertFalse(assignor.useRackAwareAssignment(Collections.emptySet(), allRacks, partitionsOnAllRacks)); assertFalse(assignor.useRackAwareAssignment(allRacks, Collections.emptySet(), Collections.emptyMap())); assertFalse(assignor.useRackAwareAssignment(Utils.mkSet("d"), allRacks, partitionsOnAllRacks)); assertTrue(assignor.useRackAwareAssignment(allRacks, allRacks, partitionsOnAllRacks)); assertTrue(assignor.useRackAwareAssignment(twoRacks, allRacks, partitionsOnAllRacks)); assertTrue(assignor.useRackAwareAssignment(allRacks, allRacks, partitionsOnSubsetOfRacks)); assertTrue(assignor.useRackAwareAssignment(twoRacks, allRacks, partitionsOnSubsetOfRacks)); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldFailDropTableWhenAnotherTableIsReadingTheTable() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "create table bar as select * from test2;" + "create table foo as select * from bar;", ksqlConfig, Collections.emptyMap() ); // When: final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "drop table bar;", ksqlConfig, Collections.emptyMap() ) ); // Then: assertThat(e, rawMessage(is( "Cannot drop BAR.\n" + "The following streams and/or tables read from this source: [FOO].\n" + "You need to drop them before dropping BAR."))); assertThat(e, statementText(is("drop table bar;"))); }
@Nonnull public <T> T getInstance(@Nonnull Class<T> type) { return getInstance(new Key<>(type)); }
@Test public void whenDefaultSpecified_shouldProvideInstance() throws Exception { injector = builder.bindDefault(Umm.class, MyUmm.class).build(); assertThat(injector.getInstance(Umm.class)).isInstanceOf(MyUmm.class); }
@Transactional(rollbackFor = Exception.class) @Override public Long createDiyTemplate(DiyTemplateCreateReqVO createReqVO) { // 校验名称唯一 validateNameUnique(null, createReqVO.getName()); // 插入 DiyTemplateDO diyTemplate = DiyTemplateConvert.INSTANCE.convert(createReqVO); diyTemplate.setProperty("{}"); diyTemplateMapper.insert(diyTemplate); // 创建默认页面 createDefaultPage(diyTemplate); // 返回 return diyTemplate.getId(); }
@Test public void testCreateDiyTemplate_success() { // 准备参数 DiyTemplateCreateReqVO reqVO = randomPojo(DiyTemplateCreateReqVO.class); // 调用 Long diyTemplateId = diyTemplateService.createDiyTemplate(reqVO); // 断言 assertNotNull(diyTemplateId); // 校验记录的属性是否正确 DiyTemplateDO diyTemplate = diyTemplateMapper.selectById(diyTemplateId); assertPojoEquals(reqVO, diyTemplate); }
abstract public long[] getBlockListAsLongs();
@Test public void testEmptyReport() { BlockListAsLongs blocks = checkReport(); assertArrayEquals( new long[] { 0, 0, -1, -1, -1 }, blocks.getBlockListAsLongs()); }
public void put(final String clientId, final PushConnection pushConnection) { pushConnection.setSecureToken(mintNewSecureToken()); clientPushConnectionMap.put(clientId, pushConnection); }
@Test void testPutAssignsTokenToConnection() { pushConnectionRegistry.put("clientId1", pushConnection); verify(pushConnection).setSecureToken(anyString()); }
Record convert(Object data) { return convert(data, null); }
@Test @SuppressWarnings("unchecked") public void testMapToString() throws Exception { Table table = mock(Table.class); when(table.schema()).thenReturn(SIMPLE_SCHEMA); RecordConverter converter = new RecordConverter(table, config); Map<String, Object> nestedData = createNestedMapData(); Record record = converter.convert(nestedData); String str = (String) record.getField("st"); Map<String, Object> map = (Map<String, Object>) MAPPER.readValue(str, Map.class); assertThat(map).hasSize(MAPPED_CNT); }
@Override public @NotNull Iterator<Integer> iterator() { return new HashSetIterator<Integer>() { @Override public Integer next() { return nextEntry().key; } }; }
@Test public void iterator() { final Set<Integer> tested = new IntHashSet(); final Set<Integer> set = new java.util.HashSet<>(); for (int i = 0; i < 10000; ++i) { tested.add(i); set.add(i); } for (Integer key : tested) { Assert.assertTrue(set.remove(key)); } Assert.assertEquals(0, set.size()); }
public void updateMetrics() { recordMessagesConsumed(metricCollectors.currentConsumptionRate()); recordTotalMessagesConsumed(metricCollectors.totalMessageConsumption()); recordTotalBytesConsumed(metricCollectors.totalBytesConsumption()); recordMessagesProduced(metricCollectors.currentProductionRate()); recordMessageConsumptionByQueryStats(metricCollectors.currentConsumptionRateByQuery()); recordErrorRate(metricCollectors.currentErrorRate()); }
@Test public void shouldRecordMessagesProduced() { final int numMessagesProduced = 500; produceMessages(numMessagesProduced); engineMetrics.updateMetrics(); final double value = getMetricValue("messages-produced-per-sec"); final double legacyValue = getMetricValueLegacy("messages-produced-per-sec"); assertThat(Math.floor(value), closeTo(numMessagesProduced / 100, 0.01)); assertThat(Math.floor(legacyValue), closeTo(numMessagesProduced / 100, 0.01)); }
int addRawRecords(final Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (final ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { fifoQueue.addLast(rawRecord); } updateHead(); return size(); }
@Test public void shouldThrowOnNegativeTimestamp() { final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList( new ConsumerRecord<>("topic", 1, 1, -1L, TimestampType.CREATE_TIME, 0, 0, recordKey, recordValue, new RecordHeaders(), Optional.empty())); final RecordQueue queue = new RecordQueue( new TopicPartition("topic", 1), mockSourceNodeWithMetrics, new FailOnInvalidTimestamp(), new LogAndContinueExceptionHandler(), new InternalMockProcessorContext(), new LogContext()); final StreamsException exception = assertThrows( StreamsException.class, () -> queue.addRawRecords(records) ); assertThat(exception.getMessage(), equalTo("Input record ConsumerRecord(topic = topic, partition = 1, " + "leaderEpoch = null, offset = 1, CreateTime = -1, deliveryCount = null, serialized key size = 0, " + "serialized value size = 0, headers = RecordHeaders(headers = [], isReadOnly = false), key = 1, value = 10) " + "has invalid (negative) timestamp. Possibly because a pre-0.10 producer client was used to write this record " + "to Kafka without embedding a timestamp, or because the input topic was created before upgrading the Kafka " + "cluster to 0.10+. Use a different TimestampExtractor to process this data.")); }
@Override public Batch toBatch() { return new SparkBatch( sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode()); }
@Test public void testPartitionedBucketString() throws Exception { createPartitionedTable(spark, tableName, "bucket(5, data)"); SparkScanBuilder builder = scanBuilder(); BucketFunction.BucketString function = new BucketFunction.BucketString(); UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(5), fieldRef("data"))); Predicate predicate = new Predicate("<=", expressions(udf, intLit(2))); pushFilters(builder, predicate); Batch scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(6); // NOT LTEQ builder = scanBuilder(); predicate = new Not(predicate); pushFilters(builder, predicate); scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(4); }
public static <T> Iterables<T> iterables() { return new Iterables<>(); }
@Test @Category(ValidatesRunner.class) public void testFlattenIterablesEmpty() { PCollection<Iterable<String>> input = p.apply( Create.<Iterable<String>>of(NO_LINES) .withCoder(IterableCoder.of(StringUtf8Coder.of()))); PCollection<String> output = input.apply(Flatten.iterables()); PAssert.that(output).containsInAnyOrder(NO_LINES_ARRAY); p.run(); }
@Override public void updateBrand(ProductBrandUpdateReqVO updateReqVO) { // 校验存在 validateBrandExists(updateReqVO.getId()); validateBrandNameUnique(updateReqVO.getId(), updateReqVO.getName()); // 更新 ProductBrandDO updateObj = ProductBrandConvert.INSTANCE.convert(updateReqVO); brandMapper.updateById(updateObj); }
@Test public void testUpdateBrand_notExists() { // 准备参数 ProductBrandUpdateReqVO reqVO = randomPojo(ProductBrandUpdateReqVO.class); // 调用, 并断言异常 assertServiceException(() -> brandService.updateBrand(reqVO), BRAND_NOT_EXISTS); }
public String kafkaAuthorizationErrorMessage(final Exception e) { return errorMessages.kafkaAuthorizationErrorMessage(e); }
@Test public void shouldReturnForbiddenKafkaErrorMessageString() { final String error = errorHandler.kafkaAuthorizationErrorMessage(exception); assertThat(error, is(SOME_KAFKA_ERROR)); }
@Override public FsCheckpointStateOutputStream createCheckpointStateOutputStream( CheckpointedStateScope scope) throws IOException { Path target = getTargetPath(scope); int bufferSize = Math.max(writeBufferSize, fileStateThreshold); // Whether the file system dynamically injects entropy into the file paths. final boolean entropyInjecting = EntropyInjector.isEntropyInjecting(filesystem, target); final boolean absolutePath = entropyInjecting || scope == CheckpointedStateScope.SHARED; return new FsCheckpointStateOutputStream( target, filesystem, bufferSize, fileStateThreshold, !absolutePath); }
@Test @SuppressWarnings("ConstantConditions") void testWriteFlushesIfAboveThreshold() throws IOException { int fileSizeThreshold = 100; final FsCheckpointStreamFactory factory = createFactory( FileSystem.getLocalFileSystem(), fileSizeThreshold, fileSizeThreshold); final FsCheckpointStreamFactory.FsCheckpointStateOutputStream stream = factory.createCheckpointStateOutputStream(CheckpointedStateScope.EXCLUSIVE); stream.write(new byte[fileSizeThreshold]); File[] files = new File(exclusiveStateDir.toUri()).listFiles(); assertThat(files).hasSize(1); File file = files[0]; assertThat(file).hasSize(fileSizeThreshold); stream.write(new byte[fileSizeThreshold - 1]); // should buffer without flushing stream.write(127); // should buffer without flushing assertThat(file).hasSize(fileSizeThreshold); }
public String destinationURL(File rootPath, File file) { return destinationURL(rootPath, file, getSrc(), getDest()); }
@Test public void shouldProvideAppendFilePathToDestWhenPathProvidedAreSame() { ArtifactPlan artifactPlan = new ArtifactPlan(ArtifactPlanType.file, "test/a/b/a.log", "logs"); assertThat(artifactPlan.destinationURL(new File("pipelines/pipelineA"), new File("pipelines/pipelineA/test/b/a.log"))).isEqualTo("logs"); }
public static Instant getNextRecommendedNotificationTime(final Account account, final LocalTime preferredTime, final Clock clock) { final ZonedDateTime candidateNotificationTime = getZoneOffset(account, clock) .map(zoneOffset -> ZonedDateTime.now(zoneOffset).with(preferredTime)) .orElseGet(() -> { // We couldn't find a reasonable timezone for the account for some reason, so make an educated guess at a // reasonable time to send a notification based on the account's creation time. final Instant accountCreation = Instant.ofEpochMilli(account.getPrimaryDevice().getCreated()); final LocalTime accountCreationLocalTime = LocalTime.ofInstant(accountCreation, ZoneId.systemDefault()); return ZonedDateTime.now(ZoneId.systemDefault()).with(accountCreationLocalTime); }); if (candidateNotificationTime.toInstant().isBefore(clock.instant())) { // We've missed our opportunity today, so go for the same time tomorrow return candidateNotificationTime.plusDays(1).toInstant(); } else { // The best time to send a notification hasn't happened yet today return candidateNotificationTime.toInstant(); } }
@Test void getNextRecommendedNotificationTime() { { final Account account = mock(Account.class); // The account has a phone number that can be resolved to a region with known timezones when(account.getNumber()).thenReturn(PhoneNumberUtil.getInstance().format( PhoneNumberUtil.getInstance().getExampleNumber("DE"), PhoneNumberUtil.PhoneNumberFormat.E164)); final ZoneId berlinZoneId = ZoneId.of("Europe/Berlin"); final ZonedDateTime beforeNotificationTime = ZonedDateTime.now(berlinZoneId).with(LocalTime.of(13, 0)); assertEquals( beforeNotificationTime.with(LocalTime.of(14, 0)).toInstant(), SchedulingUtil.getNextRecommendedNotificationTime(account, LocalTime.of(14, 0), Clock.fixed(beforeNotificationTime.toInstant(), berlinZoneId))); final ZonedDateTime afterNotificationTime = ZonedDateTime.now(berlinZoneId).with(LocalTime.of(15, 0)); assertEquals( afterNotificationTime.with(LocalTime.of(14, 0)).plusDays(1).toInstant(), SchedulingUtil.getNextRecommendedNotificationTime(account, LocalTime.of(14, 0), Clock.fixed(afterNotificationTime.toInstant(), berlinZoneId))); } { final Account account = mock(Account.class); final Device primaryDevice = mock(Device.class); // The account does not have a phone number that can be connected to a region/time zone when(account.getNumber()).thenReturn("Not a parseable number"); when(account.getPrimaryDevice()).thenReturn(primaryDevice); when(primaryDevice.getCreated()) .thenReturn(ZonedDateTime.of(2024, 7, 10, 9, 53, 12, 0, ZoneId.systemDefault()).toInstant().toEpochMilli()); final ZonedDateTime beforeNotificationTime = ZonedDateTime.now(ZoneId.systemDefault()).with(LocalTime.of(9, 0)); assertEquals( beforeNotificationTime.with(LocalTime.of(9, 53, 12)).toInstant(), SchedulingUtil.getNextRecommendedNotificationTime(account, LocalTime.of(14, 0), Clock.fixed(beforeNotificationTime.toInstant(), ZoneId.systemDefault()))); final ZonedDateTime afterNotificationTime = ZonedDateTime.now(ZoneId.systemDefault()).with(LocalTime.of(10, 0)); assertEquals( afterNotificationTime.with(LocalTime.of(9, 53, 12)).plusDays(1).toInstant(), SchedulingUtil.getNextRecommendedNotificationTime(account, LocalTime.of(14, 0), Clock.fixed(afterNotificationTime.toInstant(), ZoneId.systemDefault()))); } }
@Override public void removeFlowRules(FlowRule... flowRules) { checkPermission(FLOWRULE_WRITE); apply(buildFlowRuleOperations(false, null, flowRules)); }
@Test public void removeFlowRules() { FlowRule f1 = addFlowRule(1); FlowRule f2 = addFlowRule(2); FlowRule f3 = addFlowRule(3); assertEquals("3 rules should exist", 3, flowCount()); FlowEntry fe1 = new DefaultFlowEntry(f1); FlowEntry fe2 = new DefaultFlowEntry(f2); FlowEntry fe3 = new DefaultFlowEntry(f3); providerService.pushFlowMetrics(DID, ImmutableList.of(fe1, fe2, fe3)); validateEvents(RULE_ADD_REQUESTED, RULE_ADD_REQUESTED, RULE_ADD_REQUESTED, RULE_ADDED, RULE_ADDED, RULE_ADDED); mgr.removeFlowRules(f1, f2); //removing from north, so no events generated validateEvents(RULE_REMOVE_REQUESTED, RULE_REMOVE_REQUESTED); assertEquals("3 rule should exist", 3, flowCount()); assertTrue("Entries should be pending remove.", validateState(ImmutableMap.of( f1, FlowEntryState.PENDING_REMOVE, f2, FlowEntryState.PENDING_REMOVE, f3, FlowEntryState.ADDED))); mgr.removeFlowRules(f1); assertEquals("3 rule should still exist", 3, flowCount()); }
@Override public FSDataOutputStream append(Path path, int bufferSize, Progressable progress) throws IOException { LOG.debug("append({}, {}, {})", path, bufferSize, progress); if (mStatistics != null) { mStatistics.incrementWriteOps(1); } AlluxioURI uri = getAlluxioPath(path); try { if (mFileSystem.exists(uri)) { throw new IOException( "append() to existing Alluxio path is currently not supported: " + uri); } return new FSDataOutputStream( mFileSystem.createFile(uri, CreateFilePOptions.newBuilder().setRecursive(true).build()), mStatistics); } catch (InvalidArgumentRuntimeException e) { throw new IllegalArgumentException(e); } catch (AlluxioRuntimeException e) { throw toHdfsIOException(e); } catch (AlluxioException e) { throw new IOException(e); } }
@Test public void appendExistingNotSupported() throws Exception { Path path = new Path("/file"); alluxio.client.file.FileSystem alluxioFs = mock(alluxio.client.file.FileSystem.class); when(alluxioFs.exists(new AlluxioURI(HadoopUtils.getPathWithoutScheme(path)))) .thenReturn(true); try (FileSystem alluxioHadoopFs = new FileSystem(alluxioFs)) { alluxioHadoopFs.append(path, 100); fail("append() of existing file is expected to fail"); } catch (IOException e) { assertEquals("append() to existing Alluxio path is currently not supported: " + path, e.getMessage()); } }
@Override public double getMean() { if (values.length == 0) { return 0; } double sum = 0; for (long value : values) { sum += value; } return sum / values.length; }
@Test public void calculatesAMeanOfZeroForAnEmptySnapshot() throws Exception { final Snapshot emptySnapshot = new UniformSnapshot(new long[]{ }); assertThat(emptySnapshot.getMean()) .isZero(); }
public CompletableFuture<Triple<MessageExt, String, Boolean>> getMessageAsync(String topic, long offset, int queueId, String brokerName, boolean deCompressBody) { MessageStore messageStore = brokerController.getMessageStoreByBrokerName(brokerName); if (messageStore != null) { return messageStore.getMessageAsync(innerConsumerGroupName, topic, queueId, offset, 1, null) .thenApply(result -> { if (result == null) { LOG.warn("getMessageResult is null , innerConsumerGroupName {}, topic {}, offset {}, queueId {}", innerConsumerGroupName, topic, offset, queueId); return Triple.of(null, "getMessageResult is null", false); // local store, so no retry } List<MessageExt> list = decodeMsgList(result, deCompressBody); if (list == null || list.isEmpty()) { // OFFSET_FOUND_NULL returned by TieredMessageStore indicates exception occurred boolean needRetry = GetMessageStatus.OFFSET_FOUND_NULL.equals(result.getStatus()) && messageStore instanceof TieredMessageStore; LOG.warn("Can not get msg , topic {}, offset {}, queueId {}, needRetry {}, result is {}", topic, offset, queueId, needRetry, result); return Triple.of(null, "Can not get msg", needRetry); } return Triple.of(list.get(0), "", false); }); } else { return getMessageFromRemoteAsync(topic, offset, queueId, brokerName); } }
@Test public void getMessageAsyncTest() { when(brokerController.peekMasterBroker()).thenReturn(brokerController); when(brokerController.getMessageStoreByBrokerName(any())).thenReturn(defaultMessageStore); Assertions.assertThatCode(() -> escapeBridge.putMessage(messageExtBrokerInner)).doesNotThrowAnyException(); Assertions.assertThatCode(() -> escapeBridge.getMessageAsync(TEST_TOPIC, 0, DEFAULT_QUEUE_ID, BROKER_NAME, false)).doesNotThrowAnyException(); }
public static int readUint16BE(ByteBuffer buf) throws BufferUnderflowException { return Short.toUnsignedInt(buf.order(ByteOrder.BIG_ENDIAN).getShort()); }
@Test public void testReadUint16BE() { assertEquals(258L, ByteUtils.readUint16BE(new byte[]{1, 2}, 0)); assertEquals(258L, ByteUtils.readUint16BE(new byte[]{1, 2, 3, 4}, 0)); assertEquals(772L, ByteUtils.readUint16BE(new byte[]{0, 0, 3, 4}, 2)); }
public static ProxyBackendHandler newInstance(final DatabaseType databaseType, final String sql, final SQLStatement sqlStatement, final ConnectionSession connectionSession, final HintValueContext hintValueContext) throws SQLException { if (sqlStatement instanceof EmptyStatement) { return new SkipBackendHandler(sqlStatement); } SQLStatementContext sqlStatementContext = sqlStatement instanceof DistSQLStatement ? new DistSQLStatementContext((DistSQLStatement) sqlStatement) : new SQLBindEngine(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData(), connectionSession.getCurrentDatabaseName(), hintValueContext).bind(sqlStatement, Collections.emptyList()); QueryContext queryContext = new QueryContext(sqlStatementContext, sql, Collections.emptyList(), hintValueContext, connectionSession.getConnectionContext(), ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData()); connectionSession.setQueryContext(queryContext); return newInstance(databaseType, queryContext, connectionSession, false); }
@Test void assertNewInstanceWithDistSQL() throws SQLException { String sql = "set dist variable sql_show='true'"; SQLStatement sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession); ProxyBackendHandler actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext()); assertThat(actual, instanceOf(DistSQLUpdateBackendHandler.class)); sql = "show dist variable where name = sql_show"; sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession); actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext()); assertThat(actual, instanceOf(DistSQLQueryBackendHandler.class)); sql = "show dist variables"; sqlStatement = ProxySQLComQueryParser.parse(sql, databaseType, connectionSession); actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, sqlStatement, connectionSession, new HintValueContext()); assertThat(actual, instanceOf(DistSQLQueryBackendHandler.class)); }
protected SofaRpcException convertToRpcException(Exception e) { SofaRpcException exception; if (e instanceof SofaRpcException) { exception = (SofaRpcException) e; } // 超时 else if (e instanceof InvokeTimeoutException) { exception = new SofaTimeOutException(e); } // 服务器忙 else if (e instanceof InvokeServerBusyException) { exception = new SofaRpcException(RpcErrorType.SERVER_BUSY, e); } // 序列化 else if (e instanceof SerializationException) { boolean isServer = ((SerializationException) e).isServerSide(); exception = isServer ? new SofaRpcException(RpcErrorType.SERVER_SERIALIZE, e) : new SofaRpcException(RpcErrorType.CLIENT_SERIALIZE, e); } // 反序列化 else if (e instanceof DeserializationException) { boolean isServer = ((DeserializationException) e).isServerSide(); exception = isServer ? new SofaRpcException(RpcErrorType.SERVER_DESERIALIZE, e) : new SofaRpcException(RpcErrorType.CLIENT_DESERIALIZE, e); } // 长连接断连 else if (e instanceof ConnectionClosedException) { exception = new SofaRpcException(RpcErrorType.CLIENT_NETWORK, e); } // 客户端发送失败 else if (e instanceof InvokeSendFailedException) { exception = new SofaRpcException(RpcErrorType.CLIENT_NETWORK, e); } // 服务端未知异常 else if (e instanceof InvokeServerException) { exception = new SofaRpcException(RpcErrorType.SERVER_UNDECLARED_ERROR, e.getCause()); } // 客户端未知 else { exception = new SofaRpcException(RpcErrorType.CLIENT_UNDECLARED_ERROR, e); } return exception; }
@Test public void testConvertToRpcException() { ClientTransportConfig config1 = new ClientTransportConfig(); config1.setProviderInfo(new ProviderInfo().setHost("127.0.0.1").setPort(12222)) .setContainer("bolt"); BoltClientTransport transport = new BoltClientTransport(config1); Assert.assertTrue(transport .convertToRpcException(new SofaRpcException(RpcErrorType.CLIENT_UNDECLARED_ERROR, "")) instanceof SofaRpcException); Assert.assertTrue(transport.convertToRpcException(new InvokeTimeoutException()) instanceof SofaTimeOutException); Assert.assertTrue(transport.convertToRpcException(new InvokeServerBusyException()) .getErrorType() == RpcErrorType.SERVER_BUSY); Assert.assertTrue(transport.convertToRpcException(new SerializationException("xx", true)) .getErrorType() == RpcErrorType.SERVER_SERIALIZE); Assert.assertTrue(transport.convertToRpcException(new SerializationException("xx", false)) .getErrorType() == RpcErrorType.CLIENT_SERIALIZE); Assert.assertTrue(transport.convertToRpcException(new DeserializationException("xx", true)) .getErrorType() == RpcErrorType.SERVER_DESERIALIZE); Assert.assertTrue(transport.convertToRpcException(new DeserializationException("xx", false)) .getErrorType() == RpcErrorType.CLIENT_DESERIALIZE); Assert.assertTrue(transport.convertToRpcException(new ConnectionClosedException()) .getErrorType() == RpcErrorType.CLIENT_NETWORK); Assert.assertTrue(transport.convertToRpcException(new InvokeSendFailedException()) .getErrorType() == RpcErrorType.CLIENT_NETWORK); Assert.assertTrue(transport.convertToRpcException(new InvokeServerException()) .getErrorType() == RpcErrorType.SERVER_UNDECLARED_ERROR); Assert.assertTrue(transport.convertToRpcException(new UnsupportedOperationException()) .getErrorType() == RpcErrorType.CLIENT_UNDECLARED_ERROR); }
@Override public void completeDeploymentOf(ExecutionAttemptID executionAttemptId) { pendingDeployments.remove(executionAttemptId); }
@Test void testCompleteDeploymentUnknownExecutionDoesNotThrowException() { final DefaultExecutionDeploymentTracker tracker = new DefaultExecutionDeploymentTracker(); tracker.completeDeploymentOf(createExecutionAttemptId()); }
public static <T extends SearchablePlugin> List<T> search(Collection<T> searchablePlugins, String query) { return searchablePlugins.stream() .filter(plugin -> Text.matchesSearchTerms(SPLITTER.split(query.toLowerCase()), plugin.getKeywords())) .sorted(comparator(query)) .collect(Collectors.toList()); }
@Test public void emptyQueryReturnsPluginsInAlphabeticalOrderWithPinnedItemsFirst() { List<SearchablePlugin> results = PluginSearch.search(plugins.values(), " "); assertThat(results, containsInAnyOrder(plugins.values().toArray(new SearchablePlugin[] {}))); }
@Override public Object read(final PostgreSQLPacketPayload payload, final int parameterValueLength) { return payload.readInt8(); }
@Test void assertRead() { byte[] input = new byte[]{ (byte) 0x80, 0, 0, 0, 0, 0, 0, 0, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, (byte) 0x7F, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}; PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(Unpooled.wrappedBuffer(input), StandardCharsets.UTF_8); assertThat(new PostgreSQLInt8BinaryProtocolValue().read(payload, 8), is(Long.MIN_VALUE)); assertThat(new PostgreSQLInt8BinaryProtocolValue().read(payload, 8), is(-1L)); assertThat(new PostgreSQLInt8BinaryProtocolValue().read(payload, 8), is(0L)); assertThat(new PostgreSQLInt8BinaryProtocolValue().read(payload, 8), is(Long.MAX_VALUE)); }
public Map<String, List<String>> getTableToBrokersMap() { Map<String, Set<String>> brokerUrlsMap = new HashMap<>(); try { byte[] brokerResourceNodeData = _zkClient.readData(BROKER_EXTERNAL_VIEW_PATH, true); brokerResourceNodeData = unpackZnodeIfNecessary(brokerResourceNodeData); JsonNode jsonObject = OBJECT_READER.readTree(getInputStream(brokerResourceNodeData)); JsonNode brokerResourceNode = jsonObject.get("mapFields"); Iterator<Entry<String, JsonNode>> resourceEntries = brokerResourceNode.fields(); while (resourceEntries.hasNext()) { Entry<String, JsonNode> resourceEntry = resourceEntries.next(); String resourceName = resourceEntry.getKey(); String tableName = resourceName.replace(OFFLINE_SUFFIX, "").replace(REALTIME_SUFFIX, ""); Set<String> brokerUrls = brokerUrlsMap.computeIfAbsent(tableName, k -> new HashSet<>()); JsonNode resource = resourceEntry.getValue(); Iterator<Entry<String, JsonNode>> brokerEntries = resource.fields(); while (brokerEntries.hasNext()) { Entry<String, JsonNode> brokerEntry = brokerEntries.next(); String brokerName = brokerEntry.getKey(); if (brokerName.startsWith("Broker_") && "ONLINE".equals(brokerEntry.getValue().asText())) { brokerUrls.add(getHostPort(brokerName)); } } } } catch (Exception e) { LOGGER.warn("Exception while reading External view from zookeeper", e); // ignore } Map<String, List<String>> tableToBrokersMap = new HashMap<>(); for (Entry<String, Set<String>> entry : brokerUrlsMap.entrySet()) { tableToBrokersMap.put(entry.getKey(), new ArrayList<>(entry.getValue())); } return tableToBrokersMap; }
@Test public void testGetBrokersMapByInstanceConfigTlsDefault() { configureData(_instanceConfigTls, false); final Map<String, List<String>> result = _externalViewReaderUnderTest.getTableToBrokersMap(); final Map<String, List<String>> expectedResult = ImmutableMap.of("field1", Arrays.asList("first.pug-pinot-broker-headless:8099")); // Verify the results assertEquals(expectedResult, result); }
public static String currentStackTrace() { StringBuilder sb = new StringBuilder(); StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); for (StackTraceElement ste : stackTrace) { sb.append("\n\t"); sb.append(ste.toString()); } return sb.toString(); }
@Test public void testCurrentStackTrace() { String currentStackTrace = UtilAll.currentStackTrace(); assertThat(currentStackTrace).contains("UtilAll.currentStackTrace"); assertThat(currentStackTrace).contains("UtilAllTest.testCurrentStackTrace("); }
public static void checkCPSubsystemConfig(CPSubsystemConfig config) { checkTrue(config.getGroupSize() <= config.getCPMemberCount(), "The group size parameter cannot be bigger than the number of the CP member count"); checkTrue(config.getSessionTimeToLiveSeconds() > config.getSessionHeartbeatIntervalSeconds(), "Session TTL must be greater than session heartbeat interval!"); checkTrue(config.getMissingCPMemberAutoRemovalSeconds() == 0 || config.getSessionTimeToLiveSeconds() <= config.getMissingCPMemberAutoRemovalSeconds(), "Session TTL must be smaller than or equal to missing CP member auto-removal seconds!"); checkTrue(!config.isPersistenceEnabled() || config.getCPMemberCount() > 0, "CP member count must be greater than 0 to use CP persistence feature!"); }
@Test(expected = IllegalArgumentException.class) public void testValidationFails_whenSessionTTLGreaterThanMissingCPMemberAutoRemovalSeconds() { CPSubsystemConfig config = new CPSubsystemConfig(); config.setMissingCPMemberAutoRemovalSeconds(5); config.setSessionTimeToLiveSeconds(10); checkCPSubsystemConfig(config); }
public Optional<RouteMapper> findTableMapper(final String logicDataSourceName, final String actualTableName) { for (RouteUnit each : routeUnits) { Optional<RouteMapper> result = each.findTableMapper(logicDataSourceName, actualTableName); if (result.isPresent()) { return result; } } return Optional.empty(); }
@Test void assertFindTableMapper() { Optional<RouteMapper> actual = multiRouteContext.findTableMapper(DATASOURCE_NAME_1, ACTUAL_TABLE); assertTrue(actual.isPresent()); assertThat(actual.get(), is(new RouteMapper(LOGIC_TABLE, ACTUAL_TABLE))); }
public int getRequestsCount() { return requests.size(); }
@Test public void testGetRequestsCount() { counter.addRequest("test requests count", 100, 50, 50, false, 1000); assertEquals("requests count", counter.getRequests().size(), counter.getRequestsCount()); }
public static Map<String, String> resolveMessagesForTemplate(final Locale locale, ThemeContext theme) { // Compute all the resource names we should use: *_gl_ES-gheada.properties, *_gl_ES // .properties, _gl.properties... // The order here is important: as we will let values from more specific files // overwrite those in less specific, // (e.g. a value for gl_ES will have more precedence than a value for gl). So we will // iterate these resource // names from less specific to more specific. final List<String> messageResourceNames = computeMessageResourceNamesFromBase(locale); // Build the combined messages Map<String, String> combinedMessages = null; for (final String messageResourceName : messageResourceNames) { try { final Reader messageResourceReader = messageReader(messageResourceName, theme); if (messageResourceReader != null) { final Properties messageProperties = readMessagesResource(messageResourceReader); if (messageProperties != null && !messageProperties.isEmpty()) { if (combinedMessages == null) { combinedMessages = new HashMap<>(20); } for (final Map.Entry<Object, Object> propertyEntry : messageProperties.entrySet()) { combinedMessages.put((String) propertyEntry.getKey(), (String) propertyEntry.getValue()); } } } } catch (final IOException ignored) { // File might not exist, simply try the next one } } if (combinedMessages == null) { return EMPTY_MESSAGES; } return Collections.unmodifiableMap(combinedMessages); }
@Test void resolveMessagesForTemplateForEnglish() throws URISyntaxException { Map<String, String> properties = ThemeMessageResolutionUtils.resolveMessagesForTemplate(Locale.ENGLISH, getTheme()); assertThat(properties).hasSize(1); assertThat(properties).containsEntry("index.welcome", "Welcome to the index"); }
@Override public String getString(String path) { try { Object value = configMap.get(path); return value == null ? null : String.valueOf(value); } catch (Exception e) { LOGGER.warn("get config data error" + path, e); return null; } }
@Test void getString() throws IOException { Assertions.assertThrows(IllegalArgumentException.class, () -> { YamlFileConfig config = new YamlFileConfig(new File("registry-test-yaml.yml"), ""); config.getString("registry.type"); }); YamlFileConfig config = new YamlFileConfig(new File("src/test/resources/registry-test-yaml.yml"), ""); Assertions.assertEquals("file", config.getString("registry.type")); Assertions.assertEquals("file.conf", config.getString("registry.file.name")); // not exist Assertions.assertNull(config.getString("registry.null.name")); Assertions.assertNull(config.getString("null")); // inner exception Assertions.assertNull(config.getString(null)); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldSupportImplicitEmitChangesOnPersistentQuery() { // Given: final Statement statement = parseSingle("CREATE STREAM X AS SELECT ITEMID FROM ORDERS;"); // When: final String result = SqlFormatter.formatSql(statement); // Then: assertThat(result, is("CREATE STREAM X AS SELECT ITEMID\n" + "FROM ORDERS ORDERS\n" + "EMIT CHANGES")); }
@VisibleForTesting static Duration parseDuration(String durStr) { try { // try format like '1hr20s' return Duration.ofMillis(TimeUtils.convertPeriodToMillis(durStr)); } catch (Exception ignore) { } try { // try format like 'PT1H20S' return Duration.parse(durStr); } catch (Exception e) { throw new IllegalArgumentException( String.format("Invalid time duration '%s', for examples '1hr20s' or 'PT1H20S'", durStr), e); } }
@Test(expectedExceptions = IllegalArgumentException.class) public void testParseDuration() { Assert.assertEquals(S3Config.parseDuration("P1DT2H30S"), S3Config.parseDuration("1d2h30s")); S3Config.parseDuration("10"); }
public Mono<Void> deleteTopicMessages(KafkaCluster cluster, String topicName, List<Integer> partitionsToInclude) { return withExistingTopic(cluster, topicName) .flatMap(td -> offsetsForDeletion(cluster, topicName, partitionsToInclude) .flatMap(offsets -> adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets)))); }
@Test void deleteTopicMessagesReturnsExceptionWhenTopicNotFound() { StepVerifier.create(messagesService.deleteTopicMessages(cluster, NON_EXISTING_TOPIC, List.of())) .expectError(TopicNotFoundException.class) .verify(); }
public long computeExpirationTime(final String pHttpExpiresHeader, final String pHttpCacheControlHeader, final long pNow) { final Long override = Configuration.getInstance().getExpirationOverrideDuration(); if (override != null) { return pNow + override; } final long extension = Configuration.getInstance().getExpirationExtendedDuration(); final Long cacheControlDuration = getHttpCacheControlDuration(pHttpCacheControlHeader); if (cacheControlDuration != null) { return pNow + cacheControlDuration * 1000 + extension; } final Long httpExpiresTime = getHttpExpiresTime(pHttpExpiresHeader); if (httpExpiresTime != null) { return httpExpiresTime + extension; } return pNow + OpenStreetMapTileProviderConstants.DEFAULT_MAXIMUM_CACHED_FILE_AGE + extension; }
@Test public void testCustomExpirationTimeWithValues() { final long twentyMinutesInMillis = 20 * 60 * 1000; final TileSourcePolicy tileSourcePolicy = new TileSourcePolicy() { @Override public long computeExpirationTime(String pHttpExpiresHeader, String pHttpCacheControlHeader, long pNow) { return pNow + twentyMinutesInMillis; } }; final long now = System.currentTimeMillis(); final long expected = now + twentyMinutesInMillis; for (final String cacheControlString : mCacheControlStringOK) { for (final String expiresString : mExpiresStringOK) { Assert.assertEquals( expected, tileSourcePolicy.computeExpirationTime(expiresString, cacheControlString, now)); } for (final String expiresString : mExpiresStringKO) { Assert.assertEquals( expected, tileSourcePolicy.computeExpirationTime(expiresString, cacheControlString, now)); } } for (final String cacheControlString : mCacheControlStringKO) { for (final String expiresString : mExpiresStringOK) { Assert.assertEquals( expected, tileSourcePolicy.computeExpirationTime(expiresString, cacheControlString, now)); } for (final String expiresString : mExpiresStringKO) { Assert.assertEquals( expected, tileSourcePolicy.computeExpirationTime(expiresString, cacheControlString, now)); } } }
public <T> ProducerBuilder<T> createProducerBuilder(String topic, Schema<T> schema, String producerName) { ProducerBuilder<T> builder = client.newProducer(schema); if (defaultConfigurer != null) { defaultConfigurer.accept(builder); } builder.blockIfQueueFull(true) .enableBatching(true) .batchingMaxPublishDelay(10, TimeUnit.MILLISECONDS) .hashingScheme(HashingScheme.Murmur3_32Hash) // .messageRoutingMode(MessageRoutingMode.CustomPartition) .messageRouter(FunctionResultRouter.of()) // set send timeout to be infinity to prevent potential deadlock with consumer // that might happen when consumer is blocked due to unacked messages .sendTimeout(0, TimeUnit.SECONDS) .topic(topic); if (producerName != null) { builder.producerName(producerName); } if (producerConfig != null) { if (producerConfig.getCompressionType() != null) { builder.compressionType(producerConfig.getCompressionType()); } else { // TODO: address this inconsistency. // PR https://github.com/apache/pulsar/pull/19470 removed the default compression type of LZ4 // from the top level. This default is only used if producer config is provided. builder.compressionType(CompressionType.LZ4); } if (producerConfig.getMaxPendingMessages() != null && producerConfig.getMaxPendingMessages() != 0) { builder.maxPendingMessages(producerConfig.getMaxPendingMessages()); } if (producerConfig.getMaxPendingMessagesAcrossPartitions() != null && producerConfig.getMaxPendingMessagesAcrossPartitions() != 0) { builder.maxPendingMessagesAcrossPartitions(producerConfig.getMaxPendingMessagesAcrossPartitions()); } if (producerConfig.getCryptoConfig() != null) { builder.cryptoKeyReader(crypto.keyReader); builder.cryptoFailureAction(crypto.failureAction); for (String encryptionKeyName : crypto.getEncryptionKeys()) { builder.addEncryptionKey(encryptionKeyName); } } if (producerConfig.getBatchBuilder() != null) { if (producerConfig.getBatchBuilder().equals("KEY_BASED")) { builder.batcherBuilder(BatcherBuilder.KEY_BASED); } else { builder.batcherBuilder(BatcherBuilder.DEFAULT); } } } return builder; }
@Test public void testCreateProducerBuilderWithAdvancedProducerConfig() { ProducerConfig producerConfig = new ProducerConfig(); producerConfig.setBatchBuilder("KEY_BASED"); producerConfig.setCompressionType(CompressionType.SNAPPY); producerConfig.setMaxPendingMessages(5000); producerConfig.setMaxPendingMessagesAcrossPartitions(50000); CryptoConfig cryptoConfig = new CryptoConfig(); cryptoConfig.setProducerCryptoFailureAction(ProducerCryptoFailureAction.FAIL); cryptoConfig.setEncryptionKeys(new String[]{"key1", "key2"}); cryptoConfig.setCryptoKeyReaderConfig(Map.of("key", "value")); cryptoConfig.setCryptoKeyReaderClassName(TestCryptoKeyReader.class.getName()); producerConfig.setCryptoConfig(cryptoConfig); ProducerBuilderFactory builderFactory = new ProducerBuilderFactory(pulsarClient, producerConfig, null, null); builderFactory.createProducerBuilder("topic", Schema.STRING, "producerName"); verifyCommon(); verify(producerBuilder).compressionType(CompressionType.SNAPPY); verify(producerBuilder).batcherBuilder(BatcherBuilder.KEY_BASED); verify(producerBuilder).maxPendingMessages(5000); verify(producerBuilder).maxPendingMessagesAcrossPartitions(50000); TestCryptoKeyReader lastInstance = TestCryptoKeyReader.LAST_INSTANCE; assertNotNull(lastInstance); assertEquals(lastInstance.configs, cryptoConfig.getCryptoKeyReaderConfig()); verify(producerBuilder).cryptoKeyReader(lastInstance); verify(producerBuilder).cryptoFailureAction(ProducerCryptoFailureAction.FAIL); verify(producerBuilder).addEncryptionKey("key1"); verify(producerBuilder).addEncryptionKey("key2"); verifyNoMoreInteractions(producerBuilder); }
public static Combine.BinaryCombineDoubleFn ofDoubles() { return new SumDoubleFn(); }
@Test public void testSumDoubleFnPositiveInfinity() { testCombineFn( Sum.ofDoubles(), Lists.newArrayList(1.0, 2.0, 3.0, Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY); }
@Override public Collection<String> childNames() { ArrayList<String> childNames = new ArrayList<>(); for (ConfigResource configResource : image.resourceData().keySet()) { if (configResource.isDefault()) { childNames.add(configResource.type().name()); } else { childNames.add(configResource.type().name() + ":" + configResource.name()); } } return childNames; }
@Test public void testNodeChildNames() { List<String> childNames = new ArrayList<>(NODE.childNames()); childNames.sort(String::compareTo); assertEquals(Arrays.asList( "BROKER", "BROKER:0", "TOPIC", "TOPIC::colons:", "TOPIC:__internal", "TOPIC:foobar"), childNames); }
@Override public AppResponse process(Flow flow, AppRequest params) { var result = digidClient.getWidstatus(appSession.getWidRequestId()); switch(result.get("status").toString()){ case "NO_DOCUMENTS": appSession.setRdaSessionStatus("NO_DOCUMENTS"); appSession.setBrpIdentifier(result.get("brp_identifier").toString()); appSessionService.save(appSession); return new StatusResponse("NO_DOCUMENTS"); case "PENDING": setValid(false); // Do not progress to next state return new StatusResponse("PENDING"); case "NOK": return new NokResponse(); } digidClient.remoteLog("867", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); appSession.setRdaSessionStatus("DOCUMENTS_RECEIVED"); Map<String, String> rdaSession = rdaClient.startSession(returnUrl + "/iapi/rda/confirm", appSession.getId(), params.getIpAddress(), result.get("travel_documents"), result.get("driving_licences")); if (rdaSession.isEmpty()) { digidClient.remoteLog("873", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); return new NokResponse(); } appSession.setConfirmSecret(rdaSession.get("confirmSecret")); appSession.setUrl(rdaSession.get("url")); appSession.setRdaSessionId(rdaSession.get("sessionId")); appSession.setRdaSessionTimeoutInSeconds(rdaSession.get("expiration")); appSession.setRdaSessionStatus("SCANNING"); digidClient.remoteLog("868", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); return new RdaResponse(appSession.getUrl(), appSession.getRdaSessionId()); }
@Test void processWidstatusNoDocuments(){ when(digidClientMock.getWidstatus(mockedAppSession.getWidRequestId())).thenReturn(invalidDigidClientResponseNoDoc); AppResponse appResponse = rdaPolling.process(mockedFlow, mockedAbstractAppRequest); assertEquals("NO_DOCUMENTS", ((StatusResponse)appResponse).getStatus()); }
@Override public void open(Configuration parameters) throws Exception { this.rateLimiterTriggeredCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED); this.concurrentRunThrottledCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED); this.nothingToTriggerCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER); this.triggerCounters = taskNames.stream() .map( name -> getRuntimeContext() .getMetricGroup() .addGroup(TableMaintenanceMetrics.GROUP_KEY, name) .counter(TableMaintenanceMetrics.TRIGGERED)) .collect(Collectors.toList()); this.nextEvaluationTimeState = getRuntimeContext() .getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG)); this.accumulatedChangesState = getRuntimeContext() .getListState( new ListStateDescriptor<>( "triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class))); this.lastTriggerTimesState = getRuntimeContext() .getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG)); tableLoader.open(); }
@Test void testPosDeleteFileCount() throws Exception { TriggerManager manager = manager( sql.tableLoader(TABLE_NAME), new TriggerEvaluator.Builder().posDeleteFileCount(3).build()); try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness = harness(manager)) { testHarness.open(); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(1).build(), 0); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(2).build(), 1); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(3).build(), 2); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(10).build(), 3); // No trigger in this case addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(1).build(), 3); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(1).build(), 3); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(1).build(), 4); } }
@VisibleForTesting void validateOldPassword(Long id, String oldPassword) { AdminUserDO user = userMapper.selectById(id); if (user == null) { throw exception(USER_NOT_EXISTS); } if (!isPasswordMatch(oldPassword, user.getPassword())) { throw exception(USER_PASSWORD_FAILED); } }
@Test public void testValidateOldPassword_notExists() { assertServiceException(() -> userService.validateOldPassword(randomLongId(), randomString()), USER_NOT_EXISTS); }
@Override public void append(LogEvent event) { all.mark(); switch (event.getLevel().getStandardLevel()) { case TRACE: trace.mark(); break; case DEBUG: debug.mark(); break; case INFO: info.mark(); break; case WARN: warn.mark(); break; case ERROR: error.mark(); break; case FATAL: fatal.mark(); break; default: break; } }
@Test public void metersInfoEvents() { when(event.getLevel()).thenReturn(Level.INFO); appender.append(event); assertThat(registry.meter(METRIC_NAME_PREFIX + ".all").getCount()) .isEqualTo(1); assertThat(registry.meter(METRIC_NAME_PREFIX + ".info").getCount()) .isEqualTo(1); }
public int doWork() { final long nowNs = nanoClock.nanoTime(); trackTime(nowNs); int workCount = 0; workCount += processTimers(nowNs); if (!asyncClientCommandInFlight) { workCount += clientCommandAdapter.receive(); } workCount += drainCommandQueue(); workCount += trackStreamPositions(workCount, nowNs); workCount += nameResolver.doWork(cachedEpochClock.time()); workCount += freeEndOfLifeResources(ctx.resourceFreeLimit()); return workCount; }
@Test void shouldUseUniqueChannelEndpointOnAddSubscriptionWithNoDistinguishingCharacteristics() { final long id1 = driverProxy.addSubscription(CHANNEL_SUB_CONTROL_MODE_MANUAL, STREAM_ID_1); final long id2 = driverProxy.addSubscription(CHANNEL_SUB_CONTROL_MODE_MANUAL, STREAM_ID_1); driverConductor.doWork(); driverConductor.doWork(); verify(receiverProxy, times(2)).registerReceiveChannelEndpoint(any()); driverProxy.removeSubscription(id1); driverProxy.removeSubscription(id2); driverConductor.doWork(); driverConductor.doWork(); verify(receiverProxy, times(2)).closeReceiveChannelEndpoint(any()); verify(mockErrorHandler, never()).onError(any()); }
public static UserConfigRepo build(Element producerSpec, ConfigDefinitionStore configDefinitionStore, DeployLogger deployLogger) { Map<ConfigDefinitionKey, ConfigPayloadBuilder> builderMap = new LinkedHashMap<>(); log.log(Level.FINE, () -> "getUserConfigs for " + producerSpec); for (Element configE : XML.getChildren(producerSpec, "config")) { buildElement(configE, builderMap, configDefinitionStore, deployLogger); } return new UserConfigRepo(builderMap); }
@Test void require_that_simple_config_is_resolved() { Element configRoot = getDocument("<config name=\"test.simpletypes\">" + " <intval>13</intval>" + "</config>" + "<config name=\"test.simpletypes\" version=\"1\">" + " <stringval>foolio</stringval>" + "</config>"); UserConfigRepo map = UserConfigBuilder.build(configRoot, configDefinitionStore, new BaseDeployLogger()); assertFalse(map.isEmpty()); ConfigDefinitionKey key = new ConfigDefinitionKey("simpletypes", "test"); assertNotNull(map.get(key)); SimpletypesConfig config = createConfig(SimpletypesConfig.class, map.get(key)); assertEquals(13, config.intval()); assertEquals("foolio", config.stringval()); }
@Override public UnderFileSystem create(String path, UnderFileSystemConfiguration conf) { Preconditions.checkNotNull(path, "Unable to create UnderFileSystem instance:" + " URI path should not be null"); if (conf.getInt(PropertyKey.UNDERFS_GCS_VERSION) == GCS_VERSION_TWO) { try { return GCSV2UnderFileSystem.createInstance(new AlluxioURI(path), conf); } catch (IOException e) { LOG.error("Failed to create GCSV2UnderFileSystem.", e); throw Throwables.propagate(e); } } else { if (checkGCSCredentials(conf)) { try { return GCSUnderFileSystem.createInstance(new AlluxioURI(path), conf); } catch (ServiceException e) { LOG.error("Failed to create GCSUnderFileSystem.", e); throw Throwables.propagate(e); } } } String err = "GCS credentials or version not available, cannot create GCS Under File System."; throw new InvalidArgumentRuntimeException(err); }
@Test public void createInstanceWithNullPath() { Exception e = Assert.assertThrows(NullPointerException.class, () -> mFactory.create( null, mConf)); Assert.assertTrue(e.getMessage().contains("Unable to create UnderFileSystem instance: URI " + "path should not be null")); }
@Override public GetAttributesToNodesResponse getAttributesToNodes( GetAttributesToNodesRequest request) throws YarnException, IOException { if (request == null || request.getNodeAttributes() == null) { routerMetrics.incrGetAttributesToNodesFailedRetrieved(); String msg = "Missing getAttributesToNodes request or nodeAttributes."; RouterAuditLogger.logFailure(user.getShortUserName(), GET_ATTRIBUTESTONODES, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, null); } long startTime = clock.getTime(); ClientMethod remoteMethod = new ClientMethod("getAttributesToNodes", new Class[] {GetAttributesToNodesRequest.class}, new Object[] {request}); Collection<GetAttributesToNodesResponse> attributesToNodesResponses = null; try { attributesToNodesResponses = invokeConcurrent(remoteMethod, GetAttributesToNodesResponse.class); } catch (Exception ex) { routerMetrics.incrGetAttributesToNodesFailedRetrieved(); String msg = "Unable to get attributes to nodes due to exception."; RouterAuditLogger.logFailure(user.getShortUserName(), GET_ATTRIBUTESTONODES, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, ex); } long stopTime = clock.getTime(); routerMetrics.succeededGetAttributesToNodesRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(user.getShortUserName(), GET_ATTRIBUTESTONODES, TARGET_CLIENT_RM_SERVICE); return RouterYarnClientUtils.mergeAttributesToNodesResponse(attributesToNodesResponses); }
@Test public void testGetAttributesToNodes() throws Exception { LOG.info("Test FederationClientInterceptor : Get AttributesToNodes request."); // null request LambdaTestUtils.intercept(YarnException.class, "Missing getAttributesToNodes request " + "or nodeAttributes.", () -> interceptor.getAttributesToNodes(null)); // normal request GetAttributesToNodesResponse response = interceptor.getAttributesToNodes(GetAttributesToNodesRequest.newInstance()); Assert.assertNotNull(response); Map<NodeAttributeKey, List<NodeToAttributeValue>> attrs = response.getAttributesToNodes(); Assert.assertNotNull(attrs); Assert.assertEquals(4, attrs.size()); NodeAttribute gpu = NodeAttribute.newInstance(NodeAttribute.PREFIX_CENTRALIZED, "GPU", NodeAttributeType.STRING, "nvidia"); NodeToAttributeValue attributeValue1 = NodeToAttributeValue.newInstance("0-host1", gpu.getAttributeValue()); NodeAttributeKey gpuKey = gpu.getAttributeKey(); Assert.assertTrue(attrs.get(gpuKey).contains(attributeValue1)); }
public boolean liveness() { if (!Health.Status.GREEN.equals(dbConnectionNodeCheck.check().getStatus())) { return false; } if (!Health.Status.GREEN.equals(webServerStatusNodeCheck.check().getStatus())) { return false; } if (!Health.Status.GREEN.equals(ceStatusNodeCheck.check().getStatus())) { return false; } if (esStatusNodeCheck != null && Health.Status.RED.equals(esStatusNodeCheck.check().getStatus())) { return false; } return true; }
@Test public void fail_when_db_connection_check_fail() { when(dbConnectionNodeCheck.check()).thenReturn(RED); Assertions.assertThat(underTest.liveness()).isFalse(); }
public Collection<Task> createTasks(final Consumer<byte[], byte[]> consumer, final Map<TaskId, Set<TopicPartition>> tasksToBeCreated) { final List<Task> createdTasks = new ArrayList<>(); for (final Map.Entry<TaskId, Set<TopicPartition>> newTaskAndPartitions : tasksToBeCreated.entrySet()) { final TaskId taskId = newTaskAndPartitions.getKey(); final LogContext logContext = getLogContext(taskId); final Set<TopicPartition> partitions = newTaskAndPartitions.getValue(); final ProcessorTopology topology = topologyMetadata.buildSubtopology(taskId); final ProcessorStateManager stateManager = new ProcessorStateManager( taskId, Task.TaskType.ACTIVE, eosEnabled(applicationConfig), logContext, stateDirectory, storeChangelogReader, topology.storeToChangelogTopic(), partitions, stateUpdaterEnabled); final InternalProcessorContext<Object, Object> context = new ProcessorContextImpl( taskId, applicationConfig, stateManager, streamsMetrics, cache ); createdTasks.add( createActiveTask( taskId, partitions, consumer, logContext, topology, stateManager, context ) ); } return createdTasks; }
@SuppressWarnings("deprecation") @Test public void shouldFailOnGetThreadProducerIfEosAlphaEnabled() { properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE); mockClientSupplier.setApplicationIdForProducer("appId"); createTasks(); final IllegalStateException thrown = assertThrows( IllegalStateException.class, activeTaskCreator::threadProducer ); assertThat(thrown.getMessage(), is("Expected AT_LEAST_ONCE or EXACTLY_ONCE_V2 to be enabled, but the processing mode was EXACTLY_ONCE_ALPHA")); }
@Override public CompletableFuture<List<SendResult>> sendMessage(ProxyContext ctx, AddressableMessageQueue messageQueue, List<Message> msgList, SendMessageRequestHeader requestHeader, long timeoutMillis) { byte[] body; String messageId; if (msgList.size() > 1) { requestHeader.setBatch(true); MessageBatch msgBatch = MessageBatch.generateFromList(msgList); MessageClientIDSetter.setUniqID(msgBatch); body = msgBatch.encode(); msgBatch.setBody(body); messageId = MessageClientIDSetter.getUniqID(msgBatch); } else { Message message = msgList.get(0); body = message.getBody(); messageId = MessageClientIDSetter.getUniqID(message); } RemotingCommand request = LocalRemotingCommand.createRequestCommand(RequestCode.SEND_MESSAGE, requestHeader, ctx.getLanguage()); request.setBody(body); CompletableFuture<RemotingCommand> future = new CompletableFuture<>(); SimpleChannel channel = channelManager.createInvocationChannel(ctx); InvocationContext invocationContext = new InvocationContext(future); channel.registerInvocationContext(request.getOpaque(), invocationContext); ChannelHandlerContext simpleChannelHandlerContext = channel.getChannelHandlerContext(); try { RemotingCommand response = brokerController.getSendMessageProcessor().processRequest(simpleChannelHandlerContext, request); if (response != null) { invocationContext.handle(response); channel.eraseInvocationContext(request.getOpaque()); } } catch (Exception e) { future.completeExceptionally(e); channel.eraseInvocationContext(request.getOpaque()); log.error("Failed to process sendMessage command", e); } return future.thenApply(r -> { SendResult sendResult = new SendResult(); SendMessageResponseHeader responseHeader = (SendMessageResponseHeader) r.readCustomHeader(); SendStatus sendStatus; switch (r.getCode()) { case ResponseCode.FLUSH_DISK_TIMEOUT: { sendStatus = SendStatus.FLUSH_DISK_TIMEOUT; break; } case ResponseCode.FLUSH_SLAVE_TIMEOUT: { sendStatus = SendStatus.FLUSH_SLAVE_TIMEOUT; break; } case ResponseCode.SLAVE_NOT_AVAILABLE: { sendStatus = SendStatus.SLAVE_NOT_AVAILABLE; break; } case ResponseCode.SUCCESS: { sendStatus = SendStatus.SEND_OK; break; } default: { throw new ProxyException(ProxyExceptionCode.INTERNAL_SERVER_ERROR, r.getRemark()); } } sendResult.setSendStatus(sendStatus); sendResult.setMsgId(messageId); sendResult.setMessageQueue(new MessageQueue(requestHeader.getTopic(), brokerController.getBrokerConfig().getBrokerName(), requestHeader.getQueueId())); sendResult.setQueueOffset(responseHeader.getQueueOffset()); sendResult.setTransactionId(responseHeader.getTransactionId()); sendResult.setOffsetMsgId(responseHeader.getMsgId()); return Collections.singletonList(sendResult); }); }
@Test public void testSendMessageWriteAndFlush() throws Exception { Message message = new Message(topic, "body".getBytes(StandardCharsets.UTF_8)); MessageClientIDSetter.setUniqID(message); List<Message> messagesList = Collections.singletonList(message); SendMessageRequestHeader requestHeader = new SendMessageRequestHeader(); requestHeader.setTopic(topic); requestHeader.setQueueId(queueId); Mockito.when(sendMessageProcessorMock.processRequest(Mockito.any(SimpleChannelHandlerContext.class), Mockito.argThat(argument -> { boolean first = argument.getCode() == RequestCode.SEND_MESSAGE; boolean second = Arrays.equals(argument.getBody(), message.getBody()); return first & second; }))).thenAnswer(invocation -> { SimpleChannelHandlerContext simpleChannelHandlerContext = invocation.getArgument(0); RemotingCommand request = invocation.getArgument(1); RemotingCommand response = RemotingCommand.createResponseCommand(SendMessageResponseHeader.class); response.setOpaque(request.getOpaque()); response.setCode(ResponseCode.SUCCESS); response.setBody(message.getBody()); SendMessageResponseHeader sendMessageResponseHeader = (SendMessageResponseHeader) response.readCustomHeader(); sendMessageResponseHeader.setQueueId(queueId); sendMessageResponseHeader.setQueueOffset(queueOffset); sendMessageResponseHeader.setMsgId(offsetMessageId); sendMessageResponseHeader.setTransactionId(transactionId); simpleChannelHandlerContext.writeAndFlush(response); return null; }); CompletableFuture<List<SendResult>> future = localMessageService.sendMessage(proxyContext, null, messagesList, requestHeader, 1000L); SendResult sendResult = future.get().get(0); assertThat(sendResult.getSendStatus()).isEqualTo(SendStatus.SEND_OK); assertThat(sendResult.getMsgId()).isEqualTo(MessageClientIDSetter.getUniqID(message)); assertThat(sendResult.getMessageQueue()) .isEqualTo(new MessageQueue(topic, brokerControllerMock.getBrokerConfig().getBrokerName(), queueId)); assertThat(sendResult.getQueueOffset()).isEqualTo(queueOffset); assertThat(sendResult.getTransactionId()).isEqualTo(transactionId); assertThat(sendResult.getOffsetMsgId()).isEqualTo(offsetMessageId); }
@Override public void putJobGraph(JobGraph jobGraph) throws Exception { checkNotNull(jobGraph, "Job graph"); final JobID jobID = jobGraph.getJobID(); final String name = jobGraphStoreUtil.jobIDToName(jobID); LOG.debug("Adding job graph {} to {}.", jobID, jobGraphStateHandleStore); boolean success = false; while (!success) { synchronized (lock) { verifyIsRunning(); final R currentVersion = jobGraphStateHandleStore.exists(name); if (!currentVersion.isExisting()) { try { jobGraphStateHandleStore.addAndLock(name, jobGraph); addedJobGraphs.add(jobID); success = true; } catch (StateHandleStore.AlreadyExistException ignored) { LOG.warn("{} already exists in {}.", jobGraph, jobGraphStateHandleStore); } } else if (addedJobGraphs.contains(jobID)) { try { jobGraphStateHandleStore.replace(name, currentVersion, jobGraph); LOG.info("Updated {} in {}.", jobGraph, getClass().getSimpleName()); success = true; } catch (StateHandleStore.NotExistException ignored) { LOG.warn("{} does not exists in {}.", jobGraph, jobGraphStateHandleStore); } } else { throw new IllegalStateException( "Trying to update a graph you didn't " + "#getAllSubmittedJobGraphs() or #putJobGraph() yourself before."); } } } LOG.info("Added {} to {}.", jobGraph, jobGraphStateHandleStore); }
@Test public void testPutJobGraphWhenAlreadyExist() throws Exception { final CompletableFuture<Tuple3<String, IntegerResourceVersion, JobGraph>> replaceFuture = new CompletableFuture<>(); final int resourceVersion = 100; final AtomicBoolean alreadyExist = new AtomicBoolean(false); final TestingStateHandleStore<JobGraph> stateHandleStore = builder.setExistsFunction( ignore -> { if (alreadyExist.get()) { return IntegerResourceVersion.valueOf(resourceVersion); } else { alreadyExist.set(true); return IntegerResourceVersion.notExisting(); } }) .setAddFunction((ignore, state) -> jobGraphStorageHelper.store(state)) .setReplaceConsumer(replaceFuture::complete) .build(); final JobGraphStore jobGraphStore = createAndStartJobGraphStore(stateHandleStore); jobGraphStore.putJobGraph(testingJobGraph); // Replace jobGraphStore.putJobGraph(testingJobGraph); final Tuple3<String, IntegerResourceVersion, JobGraph> actual = replaceFuture.get(timeout, TimeUnit.MILLISECONDS); assertThat(actual.f0, is(testingJobGraph.getJobID().toString())); assertThat(actual.f1, is(IntegerResourceVersion.valueOf(resourceVersion))); assertThat(actual.f2.getJobID(), is(testingJobGraph.getJobID())); }
<K, V> ShareInFlightBatch<K, V> fetchRecords(final Deserializers<K, V> deserializers, final int maxRecords, final boolean checkCrcs) { // Creating an empty ShareInFlightBatch ShareInFlightBatch<K, V> inFlightBatch = new ShareInFlightBatch<>(partition); if (cachedBatchException != null) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(cachedBatchException); cachedBatchException = null; return inFlightBatch; } if (cachedRecordException != null) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(cachedRecordException); cachedRecordException = null; return inFlightBatch; } if (isConsumed) return inFlightBatch; initializeNextAcquired(); try { int recordsInBatch = 0; while (recordsInBatch < maxRecords) { lastRecord = nextFetchedRecord(checkCrcs); if (lastRecord == null) { // Any remaining acquired records are gaps while (nextAcquired != null) { inFlightBatch.addGap(nextAcquired.offset); nextAcquired = nextAcquiredRecord(); } break; } while (nextAcquired != null) { if (lastRecord.offset() == nextAcquired.offset) { // It's acquired, so we parse it and add it to the batch Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch()); TimestampType timestampType = currentBatch.timestampType(); ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord, nextAcquired.deliveryCount); inFlightBatch.addRecord(record); recordsRead++; bytesRead += lastRecord.sizeInBytes(); recordsInBatch++; nextAcquired = nextAcquiredRecord(); break; } else if (lastRecord.offset() < nextAcquired.offset) { // It's not acquired, so we skip it break; } else { // It's acquired, but there's no non-control record at this offset, so it's a gap inFlightBatch.addGap(nextAcquired.offset); } nextAcquired = nextAcquiredRecord(); } } } catch (SerializationException se) { nextAcquired = nextAcquiredRecord(); if (inFlightBatch.isEmpty()) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(se); } else { cachedRecordException = se; inFlightBatch.setHasCachedException(true); } } catch (CorruptRecordException e) { if (inFlightBatch.isEmpty()) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(e); } else { cachedBatchException = e; inFlightBatch.setHasCachedException(true); } } return inFlightBatch; }
@Test public void testCorruptedMessage() { // Create one good record and then two "corrupted" records and then another good record. try (final MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE, TimestampType.CREATE_TIME, 0); final UUIDSerializer serializer = new UUIDSerializer()) { builder.append(new SimpleRecord(serializer.serialize(TOPIC_NAME, UUID.randomUUID()))); builder.append(0L, "key".getBytes(), "value".getBytes()); Headers headers = new RecordHeaders(); headers.add("hkey", "hvalue".getBytes()); builder.append(10L, serializer.serialize("key", UUID.randomUUID()), "otherValue".getBytes(), headers.toArray()); builder.append(new SimpleRecord(serializer.serialize(TOPIC_NAME, UUID.randomUUID()))); Records records = builder.build(); ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setRecords(records) .setAcquiredRecords(acquiredRecords(0L, 4)); try (final Deserializers<UUID, UUID> deserializers = newUuidDeserializers()) { ShareCompletedFetch completedFetch = newShareCompletedFetch(partitionData); // Record 0 is returned by itself because record 1 fails to deserialize ShareInFlightBatch<UUID, UUID> batch = completedFetch.fetchRecords(deserializers, 10, false); assertNull(batch.getException()); List<ConsumerRecord<UUID, UUID>> fetchedRecords = batch.getInFlightRecords(); assertEquals(1, fetchedRecords.size()); assertEquals(0L, fetchedRecords.get(0).offset()); Acknowledgements acknowledgements = batch.getAcknowledgements(); assertEquals(0, acknowledgements.size()); // Record 1 then results in an empty batch batch = completedFetch.fetchRecords(deserializers, 10, false); assertEquals(RecordDeserializationException.class, batch.getException().getClass()); RecordDeserializationException thrown = (RecordDeserializationException) batch.getException(); assertEquals(RecordDeserializationException.DeserializationExceptionOrigin.KEY, thrown.origin()); assertEquals(1, thrown.offset()); assertEquals(TOPIC_NAME, thrown.topicPartition().topic()); assertEquals(0, thrown.topicPartition().partition()); assertEquals(0, thrown.timestamp()); assertArrayEquals("key".getBytes(), org.apache.kafka.common.utils.Utils.toNullableArray(thrown.keyBuffer())); assertArrayEquals("value".getBytes(), Utils.toNullableArray(thrown.valueBuffer())); assertEquals(0, thrown.headers().toArray().length); fetchedRecords = batch.getInFlightRecords(); assertEquals(0, fetchedRecords.size()); acknowledgements = batch.getAcknowledgements(); assertEquals(1, acknowledgements.size()); assertEquals(AcknowledgeType.RELEASE, acknowledgements.get(1L)); // Record 2 then results in an empty batch, because record 1 has now been skipped batch = completedFetch.fetchRecords(deserializers, 10, false); assertEquals(RecordDeserializationException.class, batch.getException().getClass()); thrown = (RecordDeserializationException) batch.getException(); assertEquals(RecordDeserializationException.DeserializationExceptionOrigin.VALUE, thrown.origin()); assertEquals(2L, thrown.offset()); assertEquals(TOPIC_NAME, thrown.topicPartition().topic()); assertEquals(0, thrown.topicPartition().partition()); assertEquals(10L, thrown.timestamp()); assertNotNull(thrown.keyBuffer()); assertArrayEquals("otherValue".getBytes(), Utils.toNullableArray(thrown.valueBuffer())); fetchedRecords = batch.getInFlightRecords(); assertEquals(0, fetchedRecords.size()); acknowledgements = batch.getAcknowledgements(); assertEquals(1, acknowledgements.size()); assertEquals(AcknowledgeType.RELEASE, acknowledgements.get(2L)); // Record 3 is returned in the next batch, because record 2 has now been skipped batch = completedFetch.fetchRecords(deserializers, 10, false); assertNull(batch.getException()); fetchedRecords = batch.getInFlightRecords(); assertEquals(1, fetchedRecords.size()); assertEquals(3L, fetchedRecords.get(0).offset()); acknowledgements = batch.getAcknowledgements(); assertEquals(0, acknowledgements.size()); } } }
public static String extractParamsFromURI(String uri) { if (uri.contains("?") && uri.contains("=")) { String parameters = uri.substring(uri.indexOf("?") + 1); StringBuilder params = new StringBuilder(); for (String parameter : parameters.split("&")) { String[] pair = parameter.split("="); String key = URLDecoder.decode(pair[0], StandardCharsets.UTF_8); if (params.length() > 0) { params.append(" && "); } params.append(key); } return params.toString(); } return ""; }
@Test void testExtractParamsFromURI() { // Check with parameters. String requestPath = "/v2/pet/findByStatus?user_key=998bac0775b1d5f588e0a6ca7c11b852&status=available"; // Dispatch string params are sorted. String dispatchCriteria = DispatchCriteriaHelper.extractParamsFromURI(requestPath); assertEquals("user_key && status", dispatchCriteria); }
public static String initCacheDir(String namespace, NacosClientProperties properties) { String jmSnapshotPath = properties.getProperty(JM_SNAPSHOT_PATH_PROPERTY); String namingCacheRegistryDir = ""; if (properties.getProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR) != null) { namingCacheRegistryDir = File.separator + properties.getProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR); } if (!StringUtils.isBlank(jmSnapshotPath)) { cacheDir = jmSnapshotPath + File.separator + FILE_PATH_NACOS + namingCacheRegistryDir + File.separator + FILE_PATH_NAMING + File.separator + namespace; } else { cacheDir = properties.getProperty(USER_HOME_PROPERTY) + File.separator + FILE_PATH_NACOS + namingCacheRegistryDir + File.separator + FILE_PATH_NAMING + File.separator + namespace; } return cacheDir; }
@Test void testInitCacheDirWithDefaultRootAndWithCache() { System.setProperty("user.home", "/home/admin"); NacosClientProperties properties = NacosClientProperties.PROTOTYPE.derive(); properties.setProperty(PropertyKeyConst.NAMING_CACHE_REGISTRY_DIR, "custom"); String actual = CacheDirUtil.initCacheDir("test", properties); assertEquals("/home/admin/nacos/custom/naming/test", actual); }
@Override public int encode(VlanId resource) { return resource.toShort(); }
@Test public void testEncode() { assertThat(sut.encode(VlanId.vlanId((short) 100)), is(100)); }
public static CharSequence unescapeCsv(CharSequence value) { int length = checkNotNull(value, "value").length(); if (length == 0) { return value; } int last = length - 1; boolean quoted = isDoubleQuote(value.charAt(0)) && isDoubleQuote(value.charAt(last)) && length != 1; if (!quoted) { validateCsvFormat(value); return value; } StringBuilder unescaped = InternalThreadLocalMap.get().stringBuilder(); for (int i = 1; i < last; i++) { char current = value.charAt(i); if (current == DOUBLE_QUOTE) { if (isDoubleQuote(value.charAt(i + 1)) && (i + 1) != last) { // Followed by a double-quote but not the last character // Just skip the next double-quote i++; } else { // Not followed by a double-quote or the following double-quote is the last character throw newInvalidEscapedCsvFieldException(value, i); } } unescaped.append(current); } return unescaped.toString(); }
@Test public void unescapeCsvWithSingleQuote() { assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { unescapeCsv("\""); } }); }
@Override public SmsSendRespDTO sendSms(Long sendLogId, String mobile, String apiTemplateId, List<KeyValue<String, Object>> templateParams) throws Throwable { Assert.notBlank(properties.getSignature(), "短信签名不能为空"); // 1. 执行请求 // 参考链接 https://api.aliyun.com/document/Dysmsapi/2017-05-25/SendSms TreeMap<String, Object> queryParam = new TreeMap<>(); queryParam.put("PhoneNumbers", mobile); queryParam.put("SignName", properties.getSignature()); queryParam.put("TemplateCode", apiTemplateId); queryParam.put("TemplateParam", JsonUtils.toJsonString(MapUtils.convertMap(templateParams))); queryParam.put("OutId", sendLogId); JSONObject response = request("SendSms", queryParam); // 2. 解析请求 return new SmsSendRespDTO() .setSuccess(Objects.equals(response.getStr("Code"), RESPONSE_CODE_SUCCESS)) .setSerialNo(response.getStr("BizId")) .setApiRequestId(response.getStr("RequestId")) .setApiCode(response.getStr("Code")) .setApiMsg(response.getStr("Message")); }
@Test public void tesSendSms_fail() throws Throwable { try (MockedStatic<HttpUtils> httpUtilsMockedStatic = mockStatic(HttpUtils.class)) { // 准备参数 Long sendLogId = randomLongId(); String mobile = randomString(); String apiTemplateId = randomString(); List<KeyValue<String, Object>> templateParams = Lists.newArrayList( new KeyValue<>("code", 1234), new KeyValue<>("op", "login")); // mock 方法 httpUtilsMockedStatic.when(() -> HttpUtils.post(anyString(), anyMap(), anyString())) .thenReturn("{\"Message\":\"手机号码格式错误\",\"RequestId\":\"B7700B8E-227E-5886-9564-26036172F01F\",\"Code\":\"isv.MOBILE_NUMBER_ILLEGAL\"}"); // 调用 SmsSendRespDTO result = smsClient.sendSms(sendLogId, mobile, apiTemplateId, templateParams); // 断言 assertFalse(result.getSuccess()); assertEquals("B7700B8E-227E-5886-9564-26036172F01F", result.getApiRequestId()); assertEquals("isv.MOBILE_NUMBER_ILLEGAL", result.getApiCode()); assertEquals("手机号码格式错误", result.getApiMsg()); assertNull(result.getSerialNo()); } }