focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static Optional<Expression> convert( org.apache.flink.table.expressions.Expression flinkExpression) { if (!(flinkExpression instanceof CallExpression)) { return Optional.empty(); } CallExpression call = (CallExpression) flinkExpression; Operation op = FILTERS.get(call.getFunctionDefinition()); if (op != null) { switch (op) { case IS_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::isNull); case NOT_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::notNull); case LT: return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call); case LT_EQ: return convertFieldAndLiteral( Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call); case GT: return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call); case GT_EQ: return convertFieldAndLiteral( Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call); case EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.isNaN(ref); } else { return Expressions.equal(ref, lit); } }, call); case NOT_EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.notNaN(ref); } else { return Expressions.notEqual(ref, lit); } }, call); case NOT: return onlyChildAs(call, CallExpression.class) .flatMap(FlinkFilters::convert) .map(Expressions::not); case AND: return convertLogicExpression(Expressions::and, call); case OR: return convertLogicExpression(Expressions::or, call); case STARTS_WITH: return convertLike(call); } } return Optional.empty(); }
@Test public void testAnd() { Expression expr = resolve( Expressions.$("field1") .isEqual(Expressions.lit(1)) .and(Expressions.$("field2").isEqual(Expressions.lit(2L)))); Optional<org.apache.iceberg.expressions.Expression> actual = FlinkFilters.convert(expr); assertThat(actual).isPresent(); And and = (And) actual.get(); And expected = (And) org.apache.iceberg.expressions.Expressions.and( org.apache.iceberg.expressions.Expressions.equal("field1", 1), org.apache.iceberg.expressions.Expressions.equal("field2", 2L)); assertPredicatesMatch(expected.left(), and.left()); assertPredicatesMatch(expected.right(), and.right()); }
@ConstantFunction(name = "months_sub", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true) public static ConstantOperator monthsSub(ConstantOperator date, ConstantOperator month) { return ConstantOperator.createDatetimeOrNull(date.getDatetime().minusMonths(month.getInt())); }
@Test public void monthsSub() { assertEquals("2014-05-23T09:23:55", ScalarOperatorFunctions.monthsSub(O_DT_20150323_092355, O_INT_10).getDatetime().toString()); }
@Override public Long createSmsLog(String mobile, Long userId, Integer userType, Boolean isSend, SmsTemplateDO template, String templateContent, Map<String, Object> templateParams) { SmsLogDO.SmsLogDOBuilder logBuilder = SmsLogDO.builder(); // 根据是否要发送,设置状态 logBuilder.sendStatus(Objects.equals(isSend, true) ? SmsSendStatusEnum.INIT.getStatus() : SmsSendStatusEnum.IGNORE.getStatus()); // 设置手机相关字段 logBuilder.mobile(mobile).userId(userId).userType(userType); // 设置模板相关字段 logBuilder.templateId(template.getId()).templateCode(template.getCode()).templateType(template.getType()); logBuilder.templateContent(templateContent).templateParams(templateParams) .apiTemplateId(template.getApiTemplateId()); // 设置渠道相关字段 logBuilder.channelId(template.getChannelId()).channelCode(template.getChannelCode()); // 设置接收相关字段 logBuilder.receiveStatus(SmsReceiveStatusEnum.INIT.getStatus()); // 插入数据库 SmsLogDO logDO = logBuilder.build(); smsLogMapper.insert(logDO); return logDO.getId(); }
@Test public void testCreateSmsLog() { // 准备参数 String mobile = randomString(); Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); Boolean isSend = randomBoolean(); SmsTemplateDO templateDO = randomPojo(SmsTemplateDO.class, o -> o.setType(randomEle(SmsTemplateTypeEnum.values()).getType())); String templateContent = randomString(); Map<String, Object> templateParams = randomTemplateParams(); // mock 方法 // 调用 Long logId = smsLogService.createSmsLog(mobile, userId, userType, isSend, templateDO, templateContent, templateParams); // 断言 SmsLogDO logDO = smsLogMapper.selectById(logId); assertEquals(isSend ? SmsSendStatusEnum.INIT.getStatus() : SmsSendStatusEnum.IGNORE.getStatus(), logDO.getSendStatus()); assertEquals(mobile, logDO.getMobile()); assertEquals(userType, logDO.getUserType()); assertEquals(userId, logDO.getUserId()); assertEquals(templateDO.getId(), logDO.getTemplateId()); assertEquals(templateDO.getCode(), logDO.getTemplateCode()); assertEquals(templateDO.getType(), logDO.getTemplateType()); assertEquals(templateDO.getChannelId(), logDO.getChannelId()); assertEquals(templateDO.getChannelCode(), logDO.getChannelCode()); assertEquals(templateContent, logDO.getTemplateContent()); assertEquals(templateParams, logDO.getTemplateParams()); assertEquals(SmsReceiveStatusEnum.INIT.getStatus(), logDO.getReceiveStatus()); }
@ScalarOperator(MULTIPLY) @SqlType(StandardTypes.DOUBLE) public static double multiply(@SqlType(StandardTypes.DOUBLE) double left, @SqlType(StandardTypes.DOUBLE) double right) { return left * right; }
@Test public void testMultiply() { assertFunction("37.7E0 * 37.7E0", DOUBLE, 37.7 * 37.7); assertFunction("37.7E0 * 17.1E0", DOUBLE, 37.7 * 17.1); assertFunction("17.1E0 * 37.7E0", DOUBLE, 17.1 * 37.7); assertFunction("17.1E0 * 17.1E0", DOUBLE, 17.1 * 17.1); }
@Override public Class<T> getInterface() { return directory.getInterface(); }
@Test void testGetMenuSuccessfully() { // setup url = url.addParameter(MERGER_KEY, ".merge"); given(invocation.getMethodName()).willReturn("getMenu"); given(invocation.getParameterTypes()).willReturn(new Class<?>[] {}); given(invocation.getArguments()).willReturn(new Object[] {}); given(invocation.getObjectAttachments()).willReturn(new HashMap<>()); given(invocation.getInvoker()).willReturn(firstInvoker); firstInvoker = (Invoker) Proxy.newProxyInstance( getClass().getClassLoader(), new Class<?>[] {Invoker.class}, (proxy, method, args) -> { if ("getUrl".equals(method.getName())) { return url.addParameter(GROUP_KEY, "first"); } if ("getInterface".equals(method.getName())) { return MenuService.class; } if ("invoke".equals(method.getName())) { return AsyncRpcResult.newDefaultAsyncResult(firstMenu, invocation); } return null; }); secondInvoker = (Invoker) Proxy.newProxyInstance( getClass().getClassLoader(), new Class<?>[] {Invoker.class}, (proxy, method, args) -> { if ("getUrl".equals(method.getName())) { return url.addParameter(GROUP_KEY, "second"); } if ("getInterface".equals(method.getName())) { return MenuService.class; } if ("invoke".equals(method.getName())) { return AsyncRpcResult.newDefaultAsyncResult(secondMenu, invocation); } return null; }); given(directory.list(invocation)).willReturn(new ArrayList() { { add(firstInvoker); add(secondInvoker); } }); given(directory.getUrl()).willReturn(url); given(directory.getConsumerUrl()).willReturn(url); given(directory.getConsumerUrl()).willReturn(url); given(directory.getInterface()).willReturn(MenuService.class); mergeableClusterInvoker = new MergeableClusterInvoker<MenuService>(directory); // invoke Result result = mergeableClusterInvoker.invoke(invocation); assertTrue(result.getValue() instanceof Menu); Menu menu = (Menu) result.getValue(); Map<String, List<String>> expected = new HashMap<>(); merge(expected, firstMenuMap); merge(expected, secondMenuMap); assertEquals(expected.keySet(), menu.getMenus().keySet()); for (Map.Entry<String, List<String>> entry : expected.entrySet()) { // FIXME: cannot guarantee the sequence of the merge result, check implementation in // MergeableClusterInvoker#invoke List<String> values1 = new ArrayList<>(entry.getValue()); List<String> values2 = new ArrayList<>(menu.getMenus().get(entry.getKey())); Collections.sort(values1); Collections.sort(values2); assertEquals(values1, values2); } }
public static void runCommand(Config config) throws TerseException { try { ManifestWorkspace workspace = new ManifestWorkspace(config.out); ClassLoader parent = ConnectPluginPath.class.getClassLoader(); ServiceLoaderScanner serviceLoaderScanner = new ServiceLoaderScanner(); ReflectionScanner reflectionScanner = new ReflectionScanner(); PluginSource classpathSource = PluginUtils.classpathPluginSource(parent); ManifestWorkspace.SourceWorkspace<?> classpathWorkspace = workspace.forSource(classpathSource); PluginScanResult classpathPlugins = discoverPlugins(classpathSource, reflectionScanner, serviceLoaderScanner); Map<Path, Set<Row>> rowsByLocation = new LinkedHashMap<>(); Set<Row> classpathRows = enumerateRows(classpathWorkspace, classpathPlugins); rowsByLocation.put(null, classpathRows); ClassLoaderFactory factory = new ClassLoaderFactory(); try (DelegatingClassLoader delegatingClassLoader = factory.newDelegatingClassLoader(parent)) { beginCommand(config); for (Path pluginLocation : config.locations) { PluginSource source = PluginUtils.isolatedPluginSource(pluginLocation, delegatingClassLoader, factory); ManifestWorkspace.SourceWorkspace<?> pluginWorkspace = workspace.forSource(source); PluginScanResult plugins = discoverPlugins(source, reflectionScanner, serviceLoaderScanner); Set<Row> rows = enumerateRows(pluginWorkspace, plugins); rowsByLocation.put(pluginLocation, rows); for (Row row : rows) { handlePlugin(config, row); } } endCommand(config, workspace, rowsByLocation); } } catch (Throwable e) { failCommand(config, e); } }
@Test public void testSyncManifestsDryRunReadOnlyManifest() { PluginLocationType type = PluginLocationType.CLASS_HIERARCHY; PluginLocation locationA = setupLocation(workspace.resolve("location-a"), type, TestPlugins.TestPlugin.NON_MIGRATED_MULTI_PLUGIN); String subPath = "META-INF/services/" + PluginType.CONNECTOR_CLIENT_CONFIG_OVERRIDE_POLICY.superClass().getName(); assertTrue(locationA.path.resolve(subPath).toFile().setReadOnly()); CommandResult res = runCommand( "sync-manifests", "--plugin-location", locationA, "--dry-run" ); assertEquals(2, res.returnCode); }
public OkHttpClient get(boolean keepAlive, boolean skipTLSVerify) { try { return cache.get(Parameters.fromBoolean(keepAlive, skipTLSVerify)); } catch (ExecutionException e) { throw new RuntimeException(e); } }
@Test public void testWithSkipTlsVerifyNoKeepAlive() throws IOException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException { final ParameterizedHttpClientProvider provider = new ParameterizedHttpClientProvider(client(null)); final OkHttpClient okHttpClient = provider.get(false, true); assertThat(okHttpClient.sslSocketFactory().createSocket().getOption(StandardSocketOptions.SO_KEEPALIVE)).isFalse(); try (Response response = okHttpClient.newCall(new Request.Builder().url(server.url("/")).get().build()).execute()) { assertThat(response.isSuccessful()).isTrue(); } }
@Override public char readChar(@Nonnull String fieldName) throws IOException { FieldDefinition fd = cd.getField(fieldName); if (fd == null) { return 0; } validateTypeCompatibility(fd, CHAR); return super.readChar(fieldName); }
@Test public void testReadChar() throws Exception { char aChar = reader.readChar("char"); assertEquals(2, aChar); assertEquals(0, reader.readChar("NO SUCH FIELD")); }
public RowExpression extract(PlanNode node) { return node.accept(new Visitor(domainTranslator, functionAndTypeManager), null); }
@Test public void testTableScan() { // Effective predicate is True if there is no effective predicate Map<VariableReferenceExpression, ColumnHandle> assignments = Maps.filterKeys(scanAssignments, Predicates.in(ImmutableList.of(AV, BV, CV, DV))); PlanNode node = new TableScanNode( Optional.empty(), newId(), DUAL_TABLE_HANDLE, ImmutableList.copyOf(assignments.keySet()), assignments, TupleDomain.all(), TupleDomain.all()); RowExpression effectivePredicate = effectivePredicateExtractor.extract(node); assertEquals(effectivePredicate, TRUE_CONSTANT); node = new TableScanNode( Optional.empty(), newId(), DUAL_TABLE_HANDLE_WITH_LAYOUT, ImmutableList.copyOf(assignments.keySet()), assignments, TupleDomain.none(), TupleDomain.all()); effectivePredicate = effectivePredicateExtractor.extract(node); assertEquals(effectivePredicate, FALSE_CONSTANT); node = new TableScanNode( Optional.empty(), newId(), DUAL_TABLE_HANDLE_WITH_LAYOUT, ImmutableList.copyOf(assignments.keySet()), assignments, TupleDomain.withColumnDomains(ImmutableMap.of(scanAssignments.get(AV), Domain.singleValue(BIGINT, 1L))), TupleDomain.all()); effectivePredicate = effectivePredicateExtractor.extract(node); assertEquals(normalizeConjuncts(effectivePredicate), normalizeConjuncts(equals(bigintLiteral(1L), AV))); node = new TableScanNode( Optional.empty(), newId(), DUAL_TABLE_HANDLE_WITH_LAYOUT, ImmutableList.copyOf(assignments.keySet()), assignments, TupleDomain.withColumnDomains(ImmutableMap.of( scanAssignments.get(AV), Domain.singleValue(BIGINT, 1L), scanAssignments.get(BV), Domain.singleValue(BIGINT, 2L))), TupleDomain.all()); effectivePredicate = effectivePredicateExtractor.extract(node); assertEquals(normalizeConjuncts(effectivePredicate), normalizeConjuncts(equals(bigintLiteral(2L), BV), equals(bigintLiteral(1L), AV))); node = new TableScanNode( Optional.empty(), newId(), DUAL_TABLE_HANDLE, ImmutableList.copyOf(assignments.keySet()), assignments, TupleDomain.all(), TupleDomain.all()); effectivePredicate = effectivePredicateExtractor.extract(node); assertEquals(effectivePredicate, TRUE_CONSTANT); }
@Override public void invoke(IN value, Context context) throws Exception { bufferLock.lock(); try { // TODO this implementation is not very effective, // optimize this with MemorySegment if needed ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputViewStreamWrapper wrapper = new DataOutputViewStreamWrapper(baos); serializer.serialize(value, wrapper); invokingRecordBytes = baos.size(); if (invokingRecordBytes > maxBytesPerBatch) { throw new RuntimeException( "Record size is too large for CollectSinkFunction. Record size is " + invokingRecordBytes + " bytes, " + "but max bytes per batch is only " + maxBytesPerBatch + " bytes. " + "Please consider increasing max bytes per batch value by setting " + CollectSinkOperatorFactory.MAX_BATCH_SIZE.key()); } if (currentBufferBytes + invokingRecordBytes > bufferSizeLimitBytes) { bufferCanAddNextResultCondition.await(); } buffer.add(baos.toByteArray()); currentBufferBytes += baos.size(); } finally { bufferLock.unlock(); } }
@Test void testInvalidVersion() throws Exception { functionWrapper.openFunction(); for (int i = 0; i < 6; i++) { functionWrapper.invoke(i); } String version = initializeVersion(); // invalid version CollectCoordinationResponse response = functionWrapper.sendRequestAndGetResponse("invalid version", 0); assertResponseEquals(response, version, 0, Collections.emptyList()); functionWrapper.closeFunctionNormally(); }
@Operation(summary = "start new activation session with username/password", tags = { SwaggerConfig.ACTIVATE_WEBSITE, SwaggerConfig.ACTIVATE_SMS }, operationId = "resend_sms", parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-T"), @Parameter(ref = "APP-V"), @Parameter(ref = "OS-V"), @Parameter(ref = "REL-T")}) @PostMapping(value = "resend_sms", produces = "application/json") @ResponseBody public AppResponse resendSms(@Valid @RequestBody ResendSmsRequest request) throws FlowNotDefinedException, NoSuchAlgorithmException, FlowStateNotDefinedException, IOException, SharedServiceClientException { return service.processAction(ActivationFlowFactory.TYPE, Action.RESEND_SMS, request); }
@Test void validateIfCorrectProcessesAreCalledResendSms() throws FlowNotDefinedException, NoSuchAlgorithmException, IOException, FlowStateNotDefinedException, SharedServiceClientException { ResendSmsRequest request = new ResendSmsRequest(); activationController.resendSms(request); verify(flowService, times(1)).processAction(anyString(), any(Action.class), any(ResendSmsRequest.class)); }
protected static VplsOperation getOptimizedVplsOperation(Deque<VplsOperation> operations) { if (operations.isEmpty()) { return null; } // no need to optimize if the queue contains only one operation if (operations.size() == 1) { return operations.getFirst(); } final VplsOperation firstOperation = operations.peekFirst(); final VplsOperation lastOperation = operations.peekLast(); final VplsOperation.Operation firstOp = firstOperation.op(); final VplsOperation.Operation lastOp = lastOperation.op(); if (firstOp.equals(VplsOperation.Operation.REMOVE)) { if (lastOp.equals(VplsOperation.Operation.REMOVE)) { // case 1: both first and last operation are REMOVE; do remove return firstOperation; } else if (lastOp.equals(VplsOperation.Operation.ADD)) { // case 2: if first is REMOVE, and last is ADD; do update return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.UPDATE); } else { // case 3: first is REMOVE, last is UPDATE; do update return lastOperation; } } else if (firstOp.equals(VplsOperation.Operation.ADD)) { if (lastOp.equals(VplsOperation.Operation.REMOVE)) { // case 4: first is ADD, last is REMOVE; nothing to do return null; } else if (lastOp.equals(VplsOperation.Operation.ADD)) { // case 5: both first and last are ADD, do add return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.ADD); } else { // case 6: first is ADD and last is update, do add return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.ADD); } } else { if (lastOp.equals(VplsOperation.Operation.REMOVE)) { // case 7: last is remove, do remove return lastOperation; } else if (lastOp.equals(VplsOperation.Operation.ADD)) { // case 8: do update only return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.UPDATE); } else { // case 9: from UPDATE to UPDATE // only need last UPDATE operation return VplsOperation.of(lastOperation.vpls(), VplsOperation.Operation.UPDATE); } } }
@Test public void testOptimizeOperationsUToA() { Deque<VplsOperation> operations = new ArrayDeque<>(); VplsData vplsData = VplsData.of(VPLS1); vplsData.addInterfaces(ImmutableSet.of(V100H1)); VplsOperation vplsOperation = VplsOperation.of(vplsData, VplsOperation.Operation.UPDATE); operations.add(vplsOperation); vplsData = VplsData.of(VPLS1, EncapsulationType.VLAN); vplsData.addInterfaces(ImmutableSet.of(V100H1, V100H2)); vplsOperation = VplsOperation.of(vplsData, VplsOperation.Operation.ADD); operations.add(vplsOperation); vplsOperation = VplsOperationManager.getOptimizedVplsOperation(operations); assertEquals(VplsOperation.of(vplsData, VplsOperation.Operation.UPDATE), vplsOperation); }
public NewIssuesNotification newNewIssuesNotification(Map<String, UserDto> assigneesByUuid) { verifyAssigneesByUuid(assigneesByUuid); return new NewIssuesNotification(new DetailsSupplierImpl(assigneesByUuid)); }
@Test public void newNewIssuesNotification_throws_NPE_if_assigneesByUuid_is_null() { assertThatThrownBy(() -> underTest.newNewIssuesNotification(null)) .isInstanceOf(NullPointerException.class) .hasMessage("assigneesByUuid can't be null"); }
public static <U> Task<U> withRetryPolicy(String name, RetryPolicy policy, Function1<Integer, Task<U>> taskFunction) { RetriableTask<U> retriableTask = new RetriableTask<>(name, policy, taskFunction); Task<U> retryTaskWrapper = Task.async(name + " retriableTask", retriableTask::run); retryTaskWrapper.getShallowTraceBuilder().setTaskType(TaskType.WITH_RETRY.getName()); return retryTaskWrapper; }
@Test public void testFailingTaskSupplier() { Task<Void> task = withRetryPolicy("testFailingTaskSupplier", RetryPolicy.attempts(3, 0), attempt -> { throw new IOException("ups"); }); runAndWaitException(task, IOException.class); assertTrue(task.isDone()); }
public static void notNullOrEmpty(String string) { notNullOrEmpty(string, String.format("string [%s] is null or empty", string)); }
@Test public void testNotNull1NotEmpty4() { assertThrows(IllegalArgumentException.class, () -> Precondition.notNullOrEmpty(" ")); }
@Override public AgentMetadata fromDTO(AgentMetadataDTO agentMetadataDTO) { return new AgentMetadata(agentMetadataDTO.elasticAgentId(), agentMetadataDTO.agentState(), agentMetadataDTO.buildState(), agentMetadataDTO.configState()); }
@Test public void fromDTO_shouldConvertToAgentMetadataFromAgentMetadataDTO() { final AgentMetadataDTO agentMetadataDTO = new AgentMetadataDTO("agent-id", "Idle", "Building", "Enabled"); final AgentMetadata agentMetadata = new AgentMetadataConverterV4().fromDTO(agentMetadataDTO); assertThat(agentMetadata.elasticAgentId(), is("agent-id")); assertThat(agentMetadata.agentState(), is("Idle")); assertThat(agentMetadata.buildState(), is("Building")); assertThat(agentMetadata.configState(), is("Enabled")); }
public static DaysWindows days(int number) { return new DaysWindows(number, DEFAULT_START_DATE, DateTimeZone.UTC); }
@Test public void testDaysCompatibility() throws IncompatibleWindowException { CalendarWindows.DaysWindows daysWindows = CalendarWindows.days(10); daysWindows.verifyCompatibility(CalendarWindows.days(10)); thrown.expect(IncompatibleWindowException.class); daysWindows.verifyCompatibility(CalendarWindows.days(9)); }
private void createScanRangeLocations(ParamCreateContext context, List<TBrokerFileStatus> fileStatuses) throws UserException { Preconditions.checkState(locationsHeap.isEmpty(), "Locations heap is not empty"); long totalBytes = 0; for (TBrokerFileStatus fileStatus : fileStatuses) { totalBytes += fileStatus.size; } long numInstances = bytesPerInstance == 0 ? 1 : (totalBytes + bytesPerInstance - 1) / bytesPerInstance; // totalBytes may be 0, so numInstances may be 0 numInstances = Math.max(numInstances, (long) 1); for (int i = 0; i < numInstances; ++i) { locationsHeap.add(Pair.create(newLocations(context.params, brokerDesc.getName(), brokerDesc.hasBroker()), 0L)); } }
@Test public void testCreateScanRangeLocations(@Mocked GlobalStateMgr globalStateMgr, @Mocked SystemInfoService systemInfoService, @Injectable Database db, @Injectable OlapTable table) throws UserException { // table schema List<Column> columns = Lists.newArrayList(); Column c1 = new Column("c1", Type.BIGINT, true); columns.add(c1); Column c2 = new Column("c2", Type.BIGINT, true); columns.add(c2); List<String> columnNames = Lists.newArrayList("c1", "c2"); new Expectations() { { GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); result = systemInfoService; systemInfoService.getIdToBackend(); result = idToBackend; table.getBaseSchema(); result = columns; table.getFullSchema(); result = columns; table.getPartitions(); minTimes = 0; result = Arrays.asList(partition); partition.getId(); minTimes = 0; result = 0; table.getColumn("c1"); result = columns.get(0); table.getColumn("c2"); result = columns.get(1); } }; // case 0 // 2 csv files: file1 512M+, file2 256M- // result: 3 ranges. file1 3 ranges, file2 1 range // file groups List<BrokerFileGroup> fileGroups = Lists.newArrayList(); List<String> files = Lists.newArrayList("hdfs://127.0.0.1:9001/file1", "hdfs://127.0.0.1:9001/file2"); DataDescription desc = new DataDescription("testTable", null, files, columnNames, null, null, null, false, null); BrokerFileGroup brokerFileGroup = new BrokerFileGroup(desc); Deencapsulation.setField(brokerFileGroup, "columnSeparator", "\t"); Deencapsulation.setField(brokerFileGroup, "rowDelimiter", "\n"); fileGroups.add(brokerFileGroup); // file status List<List<TBrokerFileStatus>> fileStatusesList = Lists.newArrayList(); List<TBrokerFileStatus> fileStatusList = Lists.newArrayList(); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file1", false, 536870968, true)); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file2", false, 268435400, true)); fileStatusesList.add(fileStatusList); Analyzer analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), new ConnectContext()); DescriptorTable descTable = analyzer.getDescTbl(); TupleDescriptor tupleDesc = descTable.createTupleDescriptor("DestTableTuple"); FileScanNode scanNode = new FileScanNode(new PlanNodeId(0), tupleDesc, "FileScanNode", fileStatusesList, 2, WarehouseManager.DEFAULT_WAREHOUSE_ID); scanNode.setLoadInfo(jobId, txnId, table, brokerDesc, fileGroups, true, loadParallelInstanceNum); scanNode.init(analyzer); scanNode.finalizeStats(analyzer); // check List<TScanRangeLocations> locationsList = scanNode.getScanRangeLocations(0); System.out.println(locationsList); Assert.assertEquals(3, locationsList.size()); int file1RangesNum = 0; int file2RangesNum = 0; Set<Long> file1StartOffsetResult = Sets.newHashSet(); Set<Long> file1RangeSizeResult = Sets.newHashSet(); for (TScanRangeLocations locations : locationsList) { for (TBrokerRangeDesc rangeDesc : locations.scan_range.broker_scan_range.ranges) { long start = rangeDesc.start_offset; long size = rangeDesc.size; if (rangeDesc.path.endsWith("file1")) { ++file1RangesNum; file1StartOffsetResult.add(start); file1RangeSizeResult.add(size); } else if (rangeDesc.path.endsWith("file2")) { ++file2RangesNum; Assert.assertTrue(start == 0); Assert.assertTrue(size == 268435400); } } } Assert.assertEquals(Sets.newHashSet(0L, 268435456L, 536870912L), file1StartOffsetResult); Assert.assertEquals(Sets.newHashSet(56L, 268435456L), file1RangeSizeResult); Assert.assertEquals(3, file1RangesNum); Assert.assertEquals(1, file2RangesNum); // case 1 // 4 parquet files // result: 3 ranges. 2 files in one range and 1 file in every other range // file groups fileGroups = Lists.newArrayList(); files = Lists.newArrayList("hdfs://127.0.0.1:9001/file1", "hdfs://127.0.0.1:9001/file2", "hdfs://127.0.0.1:9001/file3", "hdfs://127.0.0.1:9001/file4"); desc = new DataDescription("testTable", null, files, columnNames, null, null, "parquet", false, null); brokerFileGroup = new BrokerFileGroup(desc); Deencapsulation.setField(brokerFileGroup, "columnSeparator", "\t"); Deencapsulation.setField(brokerFileGroup, "rowDelimiter", "\n"); Deencapsulation.setField(brokerFileGroup, "fileFormat", "parquet"); fileGroups.add(brokerFileGroup); // file status fileStatusesList = Lists.newArrayList(); fileStatusList = Lists.newArrayList(); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file1", false, 268435454, false)); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file2", false, 268435453, false)); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file3", false, 268435452, false)); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file4", false, 268435451, false)); fileStatusesList.add(fileStatusList); analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), new ConnectContext()); descTable = analyzer.getDescTbl(); tupleDesc = descTable.createTupleDescriptor("DestTableTuple"); scanNode = new FileScanNode(new PlanNodeId(0), tupleDesc, "FileScanNode", fileStatusesList, 4, WarehouseManager.DEFAULT_WAREHOUSE_ID); scanNode.setLoadInfo(jobId, txnId, table, brokerDesc, fileGroups, true, loadParallelInstanceNum); scanNode.init(analyzer); scanNode.finalizeStats(analyzer); // check locationsList = scanNode.getScanRangeLocations(0); Assert.assertEquals(3, locationsList.size()); for (TScanRangeLocations locations : locationsList) { List<TBrokerRangeDesc> rangeDescs = locations.scan_range.broker_scan_range.ranges; Assert.assertTrue(rangeDescs.size() == 1 || rangeDescs.size() == 2); } // case 2 // 2 file groups // result: 4 ranges. group1 3 ranges, group2 1 range // file groups fileGroups = Lists.newArrayList(); files = Lists.newArrayList("hdfs://127.0.0.1:9001/file1", "hdfs://127.0.0.1:9001/file2", "hdfs://127.0.0.1:9001/file3"); desc = new DataDescription("testTable", null, files, columnNames, null, null, "parquet", false, null); brokerFileGroup = new BrokerFileGroup(desc); Deencapsulation.setField(brokerFileGroup, "columnSeparator", "\t"); Deencapsulation.setField(brokerFileGroup, "rowDelimiter", "\n"); Deencapsulation.setField(brokerFileGroup, "fileFormat", "parquet"); fileGroups.add(brokerFileGroup); List<String> files2 = Lists.newArrayList("hdfs://127.0.0.1:9001/file4", "hdfs://127.0.0.1:9001/file5"); DataDescription desc2 = new DataDescription("testTable", null, files2, columnNames, null, null, null, false, null); BrokerFileGroup brokerFileGroup2 = new BrokerFileGroup(desc2); Deencapsulation.setField(brokerFileGroup2, "columnSeparator", "\t"); Deencapsulation.setField(brokerFileGroup2, "rowDelimiter", "\n"); fileGroups.add(brokerFileGroup2); // file status fileStatusesList = Lists.newArrayList(); fileStatusList = Lists.newArrayList(); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file1", false, 268435456, false)); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file2", false, 10, false)); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file3", false, 10, false)); fileStatusesList.add(fileStatusList); List<TBrokerFileStatus> fileStatusList2 = Lists.newArrayList(); fileStatusList2.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file4", false, 10, true)); fileStatusList2.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file5", false, 10, true)); fileStatusesList.add(fileStatusList2); analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), new ConnectContext()); descTable = analyzer.getDescTbl(); tupleDesc = descTable.createTupleDescriptor("DestTableTuple"); scanNode = new FileScanNode(new PlanNodeId(0), tupleDesc, "FileScanNode", fileStatusesList, 5, WarehouseManager.DEFAULT_WAREHOUSE_ID); scanNode.setLoadInfo(jobId, txnId, table, brokerDesc, fileGroups, true, loadParallelInstanceNum); scanNode.init(analyzer); scanNode.finalizeStats(analyzer); // check locationsList = scanNode.getScanRangeLocations(0); Assert.assertEquals(4, locationsList.size()); int group1RangesNum = 0; int group2RangesNum = 0; for (TScanRangeLocations locations : locationsList) { List<TBrokerRangeDesc> rangeDescs = locations.scan_range.broker_scan_range.ranges; String path = rangeDescs.get(0).path; if (path.endsWith("file1") || path.endsWith("file2") || path.endsWith("file3")) { Assert.assertEquals(1, rangeDescs.size()); ++group1RangesNum; } else if (path.endsWith("file4") || path.endsWith("file5")) { Assert.assertEquals(2, rangeDescs.size()); ++group2RangesNum; } } Assert.assertEquals(3, group1RangesNum); Assert.assertEquals(1, group2RangesNum); // case 4 // 2 parquet file and one is very large // result: 2 ranges // file groups fileGroups = Lists.newArrayList(); files = Lists.newArrayList("hdfs://127.0.0.1:9001/file1", "hdfs://127.0.0.1:9001/file2"); desc = new DataDescription("testTable", null, files, columnNames, null, null, "parquet", false, null); brokerFileGroup = new BrokerFileGroup(desc); Deencapsulation.setField(brokerFileGroup, "columnSeparator", "\t"); Deencapsulation.setField(brokerFileGroup, "rowDelimiter", "\n"); Deencapsulation.setField(brokerFileGroup, "fileFormat", "parquet"); fileGroups.add(brokerFileGroup); // file status fileStatusesList = Lists.newArrayList(); fileStatusList = Lists.newArrayList(); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file1", false, 268435456000L, false)); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file2", false, 10, false)); fileStatusesList.add(fileStatusList); analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), new ConnectContext()); descTable = analyzer.getDescTbl(); tupleDesc = descTable.createTupleDescriptor("DestTableTuple"); scanNode = new FileScanNode(new PlanNodeId(0), tupleDesc, "FileScanNode", fileStatusesList, 2, WarehouseManager.DEFAULT_WAREHOUSE_ID); scanNode.setLoadInfo(jobId, txnId, table, brokerDesc, fileGroups, true, loadParallelInstanceNum); scanNode.init(analyzer); scanNode.finalizeStats(analyzer); // check locationsList = scanNode.getScanRangeLocations(0); System.out.println(locationsList); Assert.assertEquals(2, locationsList.size()); // case 5 // 1 file which size is 0 // result: 1 range // file groups fileGroups = Lists.newArrayList(); files = Lists.newArrayList("hdfs://127.0.0.1:9001/file1"); desc = new DataDescription("testTable", null, files, columnNames, null, null, "parquet", false, null); brokerFileGroup = new BrokerFileGroup(desc); Deencapsulation.setField(brokerFileGroup, "columnSeparator", "\t"); Deencapsulation.setField(brokerFileGroup, "rowDelimiter", "\n"); Deencapsulation.setField(brokerFileGroup, "fileFormat", "parquet"); fileGroups.add(brokerFileGroup); // file status fileStatusesList = Lists.newArrayList(); fileStatusList = Lists.newArrayList(); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file1", false, 0, false)); fileStatusesList.add(fileStatusList); analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), new ConnectContext()); descTable = analyzer.getDescTbl(); tupleDesc = descTable.createTupleDescriptor("DestTableTuple"); scanNode = new FileScanNode(new PlanNodeId(0), tupleDesc, "FileScanNode", fileStatusesList, 1, WarehouseManager.DEFAULT_WAREHOUSE_ID); scanNode.setLoadInfo(jobId, txnId, table, brokerDesc, fileGroups, true, loadParallelInstanceNum); scanNode.init(analyzer); scanNode.finalizeStats(analyzer); // check locationsList = scanNode.getScanRangeLocations(0); System.out.println(locationsList); Assert.assertEquals(1, locationsList.size()); List<TBrokerRangeDesc> rangeDescs = locationsList.get(0).scan_range.broker_scan_range.ranges; Assert.assertEquals(1, rangeDescs.size()); Assert.assertEquals(0, rangeDescs.get(0).size); // case 5 // 1 file which size is 0 in json format // result: 1 range // file groups fileGroups = Lists.newArrayList(); files = Lists.newArrayList("hdfs://127.0.0.1:9001/file1"); desc = new DataDescription("testTable", null, files, columnNames, null, null, "json", false, null); brokerFileGroup = new BrokerFileGroup(desc); Deencapsulation.setField(brokerFileGroup, "columnSeparator", "\t"); Deencapsulation.setField(brokerFileGroup, "rowDelimiter", "\n"); Deencapsulation.setField(brokerFileGroup, "fileFormat", "json"); fileGroups.add(brokerFileGroup); // file status fileStatusesList = Lists.newArrayList(); fileStatusList = Lists.newArrayList(); fileStatusList.add(new TBrokerFileStatus("hdfs://127.0.0.1:9001/file1", false, 0, false)); fileStatusesList.add(fileStatusList); analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), new ConnectContext()); descTable = analyzer.getDescTbl(); tupleDesc = descTable.createTupleDescriptor("DestTableTuple"); scanNode = new FileScanNode(new PlanNodeId(0), tupleDesc, "FileScanNode", fileStatusesList, 1, WarehouseManager.DEFAULT_WAREHOUSE_ID); scanNode.setLoadInfo(jobId, txnId, table, brokerDesc, fileGroups, true, loadParallelInstanceNum); scanNode.init(analyzer); scanNode.finalizeStats(analyzer); // check locationsList = scanNode.getScanRangeLocations(0); System.out.println(locationsList); Assert.assertEquals(1, locationsList.size()); rangeDescs = locationsList.get(0).scan_range.broker_scan_range.ranges; Assert.assertEquals(1, rangeDescs.size()); Assert.assertEquals(0, rangeDescs.get(0).size); // case 6 // csv file compression type // result: CSV_PLAIN, CSV_GZ, CSV_BZ2, CSV_LZ4, CSV_DFLATE, CSV_ZSTD // file groups fileGroups = Lists.newArrayList(); files = Lists.newArrayList("hdfs://127.0.0.1:9001/file1", "hdfs://127.0.0.1:9001/file2.csv", "hdfs://127.0.0.1:9001/file3.gz", "hdfs://127.0.0.1:9001/file4.bz2", "hdfs://127.0.0.1:9001/file5.lz4", "hdfs://127.0.0.1:9001/file6.deflate", "hdfs://127.0.0.1:9001/file7.zst"); desc = new DataDescription("testTable", null, files, columnNames, null, null, "csv", false, null); brokerFileGroup = new BrokerFileGroup(desc); Deencapsulation.setField(brokerFileGroup, "columnSeparator", "\t"); Deencapsulation.setField(brokerFileGroup, "rowDelimiter", "\n"); Deencapsulation.setField(brokerFileGroup, "fileFormat", "csv"); fileGroups.add(brokerFileGroup); // file status fileStatusesList = Lists.newArrayList(); fileStatusList = Lists.newArrayList(); for (String file : files) { fileStatusList.add(new TBrokerFileStatus(file, false, 1024, true)); } fileStatusesList.add(fileStatusList); analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), new ConnectContext()); descTable = analyzer.getDescTbl(); tupleDesc = descTable.createTupleDescriptor("DestTableTuple"); scanNode = new FileScanNode(new PlanNodeId(0), tupleDesc, "FileScanNode", fileStatusesList, 2, WarehouseManager.DEFAULT_WAREHOUSE_ID); scanNode.setLoadInfo(jobId, txnId, table, brokerDesc, fileGroups, true, loadParallelInstanceNum); scanNode.init(analyzer); scanNode.finalizeStats(analyzer); // check locationsList = scanNode.getScanRangeLocations(0); Assert.assertEquals(1, locationsList.size()); Assert.assertEquals(7, locationsList.get(0).scan_range.broker_scan_range.ranges.size()); Assert.assertEquals(TFileFormatType.FORMAT_CSV_PLAIN, locationsList.get(0).scan_range.broker_scan_range.ranges.get(0).format_type); Assert.assertEquals(TFileFormatType.FORMAT_CSV_PLAIN, locationsList.get(0).scan_range.broker_scan_range.ranges.get(1).format_type); Assert.assertEquals(TFileFormatType.FORMAT_CSV_GZ, locationsList.get(0).scan_range.broker_scan_range.ranges.get(2).format_type); Assert.assertEquals(TFileFormatType.FORMAT_CSV_BZ2, locationsList.get(0).scan_range.broker_scan_range.ranges.get(3).format_type); Assert.assertEquals(TFileFormatType.FORMAT_CSV_LZ4_FRAME, locationsList.get(0).scan_range.broker_scan_range.ranges.get(4).format_type); Assert.assertEquals(TFileFormatType.FORMAT_CSV_DEFLATE, locationsList.get(0).scan_range.broker_scan_range.ranges.get(5).format_type); Assert.assertEquals(TFileFormatType.FORMAT_CSV_ZSTD, locationsList.get(0).scan_range.broker_scan_range.ranges.get(6).format_type); }
public static TopicPublishInfo topicRouteData2TopicPublishInfo(final String topic, final TopicRouteData route) { TopicPublishInfo info = new TopicPublishInfo(); // TO DO should check the usage of raw route, it is better to remove such field info.setTopicRouteData(route); if (route.getOrderTopicConf() != null && route.getOrderTopicConf().length() > 0) { String[] brokers = route.getOrderTopicConf().split(";"); for (String broker : brokers) { String[] item = broker.split(":"); int nums = Integer.parseInt(item[1]); for (int i = 0; i < nums; i++) { MessageQueue mq = new MessageQueue(topic, item[0], i); info.getMessageQueueList().add(mq); } } info.setOrderTopic(true); } else if (route.getOrderTopicConf() == null && route.getTopicQueueMappingByBroker() != null && !route.getTopicQueueMappingByBroker().isEmpty()) { info.setOrderTopic(false); ConcurrentMap<MessageQueue, String> mqEndPoints = topicRouteData2EndpointsForStaticTopic(topic, route); info.getMessageQueueList().addAll(mqEndPoints.keySet()); info.getMessageQueueList().sort((mq1, mq2) -> MixAll.compareInteger(mq1.getQueueId(), mq2.getQueueId())); } else { List<QueueData> qds = route.getQueueDatas(); Collections.sort(qds); for (QueueData qd : qds) { if (PermName.isWriteable(qd.getPerm())) { BrokerData brokerData = null; for (BrokerData bd : route.getBrokerDatas()) { if (bd.getBrokerName().equals(qd.getBrokerName())) { brokerData = bd; break; } } if (null == brokerData) { continue; } if (!brokerData.getBrokerAddrs().containsKey(MixAll.MASTER_ID)) { continue; } for (int i = 0; i < qd.getWriteQueueNums(); i++) { MessageQueue mq = new MessageQueue(topic, qd.getBrokerName(), i); info.getMessageQueueList().add(mq); } } } info.setOrderTopic(false); } return info; }
@Test public void testTopicRouteData2TopicPublishInfo() { TopicPublishInfo actual = MQClientInstance.topicRouteData2TopicPublishInfo(topic, createTopicRouteData()); assertThat(actual.isHaveTopicRouterInfo()).isFalse(); assertThat(actual.getMessageQueueList().size()).isEqualTo(4); }
public static byte[] getLinkLocalAddress(byte[] macAddress) { checkArgument(macAddress.length == MacAddress.MAC_ADDRESS_LENGTH); return new byte[] { LINK_LOCAL_0, LINK_LOCAL_1, 0, 0, 0, 0, 0, 0, (byte) (macAddress[0] ^ (1 << 1)), macAddress[1], macAddress[2], (byte) 0xff, (byte) 0xfe, macAddress[3], macAddress[4], macAddress[5], }; }
@Test public void testLinkLocalAddress() { assertArrayEquals(getLinkLocalAddress(MAC_ADDRESS_1), LINK_LOCAL_ADDRESS_1); assertArrayEquals(getLinkLocalAddress(MAC_ADDRESS_2), LINK_LOCAL_ADDRESS_2); }
public static String getHex(byte[] raw) { if (raw == null) { return null; } final StringBuilder hex = new StringBuilder(2 * raw.length); for (final byte b : raw) { hex.append(HEXES.charAt((b & 0xF0) >> 4)).append(HEXES.charAt(b & 0x0F)); } return hex.toString(); }
@Test public void testGetHex() { byte[] raw = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; //String expResult = "000102030405060708090A0B0C0D0E0F10"; String expResult = "000102030405060708090a0b0c0d0e0f10"; String result = Checksum.getHex(raw); assertEquals(expResult, result); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testHeaders() { buildFetcher(); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE, TimestampType.CREATE_TIME, 1L); builder.append(0L, "key".getBytes(), "value-1".getBytes()); Header[] headersArray = new Header[1]; headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8)); builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray); Header[] headersArray2 = new Header[2]; headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8)); headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8)); builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2); MemoryRecords memoryRecords = builder.build(); List<ConsumerRecord<byte[], byte[]>> records; assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 1); client.prepareResponse(matchesOffset(tidp0, 1), fullFetchResponse(tidp0, memoryRecords, Errors.NONE, 100L, 0)); assertEquals(1, sendFetches()); consumerClient.poll(time.timer(0)); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchRecords(); records = recordsByPartition.get(tp0); assertEquals(3, records.size()); Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator(); ConsumerRecord<byte[], byte[]> record = recordIterator.next(); assertNull(record.headers().lastHeader("headerKey")); record = recordIterator.next(); assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8)); assertEquals("headerKey", record.headers().lastHeader("headerKey").key()); record = recordIterator.next(); assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8)); assertEquals("headerKey", record.headers().lastHeader("headerKey").key()); }
public SecureRandom createSecureRandom() throws NoSuchProviderException, NoSuchAlgorithmException { try { return getProvider() != null ? SecureRandom.getInstance(getAlgorithm(), getProvider()) : SecureRandom.getInstance(getAlgorithm()); } catch (NoSuchProviderException ex) { throw new NoSuchProviderException("no such secure random provider: " + getProvider()); } catch (NoSuchAlgorithmException ex) { throw new NoSuchAlgorithmException("no such secure random algorithm: " + getAlgorithm()); } }
@Test public void testDefaults() throws Exception { assertNotNull(factoryBean.createSecureRandom()); }
@Override public String buildContext() { final PluginHandleDO after = (PluginHandleDO) getAfter(); if (Objects.isNull(getBefore())) { return String.format("the plugin-handle [%s] is %s", after.getField(), StringUtils.lowerCase(getType().getType().toString())); } return String.format("the plugin-handle [%s] is %s : %s", after.getField(), StringUtils.lowerCase(getType().getType().toString()), contrast()); }
@Test public void updatePluginHandleBuildContextTest() { String eventTypeStr = StringUtils.lowerCase(EventTypeEnum.PLUGIN_HANDLE_UPDATE.getType().toString()); PluginHandleChangedEvent pluginHandleUpdateEventWithoutChange = new PluginHandleChangedEvent(pluginHandleDO, pluginHandleDO, EventTypeEnum.PLUGIN_HANDLE_UPDATE, "test-operator"); String withoutChangeContrast = "it no change"; String context = String.format("the plugin-handle [%s] is %s : %s", pluginHandleDO.getField(), eventTypeStr, withoutChangeContrast); assertEquals(context, pluginHandleUpdateEventWithoutChange.buildContext()); PluginHandleChangedEvent pluginHandleUpdateEventNotSameDO = new PluginHandleChangedEvent(withoutChangePluginHandleDO, pluginHandleDO, EventTypeEnum.PLUGIN_HANDLE_UPDATE, "test-operator"); assertEquals(context, pluginHandleUpdateEventNotSameDO.buildContext()); final StringBuilder contrast = new StringBuilder(); contrast.append(String.format("field[%s => %s] ", pluginHandleDO.getField(), changePluginHandleDO.getField())); contrast.append(String.format("label[%s => %s] ", pluginHandleDO.getLabel(), changePluginHandleDO.getLabel())); contrast.append(String.format("type[%s => %s] ", pluginHandleDO.getType(), changePluginHandleDO.getType())); contrast.append(String.format("dataType[%s => %s] ", pluginHandleDO.getDataType(), changePluginHandleDO.getDataType())); contrast.append(String.format("sort[%s => %s] ", pluginHandleDO.getSort(), changePluginHandleDO.getSort())); String changeContext = String.format("the plugin-handle [%s] is %s : %s", changePluginHandleDO.getField(), eventTypeStr, contrast); PluginHandleChangedEvent pluginHandleUpdateEventChange = new PluginHandleChangedEvent(changePluginHandleDO, pluginHandleDO, EventTypeEnum.PLUGIN_HANDLE_UPDATE, "test-operator"); assertEquals(changeContext, pluginHandleUpdateEventChange.buildContext()); }
@Override public void addResponseCookie(Cookie cookie) { this.response.addHeader("Set-Cookie", WebContextHelper.createCookieHeader(cookie)); }
@Test public void testCookieSecureStrict() { HttpServletResponse mockResponse = new MockHttpServletResponse(); WebContext context = new JEEContext(request, mockResponse); Cookie c = new Cookie("thename","thevalue"); c.setSameSitePolicy("strict"); c.setSecure(true); context.addResponseCookie(c); assertEquals("thename=thevalue; Path=/; Secure; SameSite=Strict", mockResponse.getHeader("Set-Cookie")); }
public static String decodeObjectIdentifier(byte[] data) { return decodeObjectIdentifier(data, 0, data.length); }
@Test public void decodeObjectIdentifierWithDoubleFirst() { assertEquals("2.53", Asn1Utils.decodeObjectIdentifier(new byte[] { (byte) 0x81, 5 })); }
protected Map<String, String> formatResult(String url, String content) { return Map.of( "url", url, "content", trimContent(convertHtmlToMd(content)) ); }
@Test void testFormatResultWithLongContent() { String url = "http://example.com"; StringBuilder longHtml = new StringBuilder("<html><body>"); for (int i = 0; i < 1000; i++) { longHtml.append("<p>Paragraph ").append(i).append("</p>"); } longHtml.append("</body></html>"); Map<String, String> result = markdownBrowserAction.formatResult(url, longHtml.toString()); assertEquals(url, result.get("url")); assertEquals(2500, result.get("content").length()); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } Statistics other = (Statistics) obj; if (application == null) { if (other.application != null) { return false; } } else if (!application.equals(other.application)) { return false; } if (client == null) { if (other.client != null) { return false; } } else if (!client.equals(other.client)) { return false; } if (group == null) { if (other.group != null) { return false; } } else if (!group.equals(other.group)) { return false; } if (method == null) { if (other.method != null) { return false; } } else if (!method.equals(other.method)) { return false; } if (server == null) { if (other.server != null) { return false; } } else if (!server.equals(other.server)) { return false; } if (service == null) { if (other.service != null) { return false; } } else if (!service.equals(other.service)) { return false; } if (version == null) { if (other.version != null) { return false; } } else if (!version.equals(other.version)) { return false; } return true; }
@Test void testEquals() { URL statistics = new URLBuilder(DUBBO_PROTOCOL, "10.20.153.10", 0) .addParameter(APPLICATION_KEY, "morgan") .addParameter(INTERFACE_KEY, "MemberService") .addParameter(METHOD_KEY, "findPerson") .addParameter(CONSUMER, "10.20.153.11") .addParameter(SUCCESS_KEY, 1) .addParameter(FAILURE_KEY, 0) .addParameter(ELAPSED_KEY, 3) .addParameter(MAX_ELAPSED_KEY, 3) .addParameter(CONCURRENT_KEY, 1) .addParameter(MAX_CONCURRENT_KEY, 1) .build(); Statistics statistics1 = new Statistics(statistics); Statistics statistics2 = new Statistics(statistics); MatcherAssert.assertThat(statistics1, equalTo(statistics1)); MatcherAssert.assertThat(statistics1, equalTo(statistics2)); statistics1.setVersion("2"); MatcherAssert.assertThat(statistics1, not(equalTo(statistics2))); MatcherAssert.assertThat(statistics1.hashCode(), not(equalTo(statistics2.hashCode()))); statistics1.setMethod("anotherMethod"); MatcherAssert.assertThat(statistics1, not(equalTo(statistics2))); MatcherAssert.assertThat(statistics1.hashCode(), not(equalTo(statistics2.hashCode()))); statistics1.setClient("anotherClient"); MatcherAssert.assertThat(statistics1, not(equalTo(statistics2))); MatcherAssert.assertThat(statistics1.hashCode(), not(equalTo(statistics2.hashCode()))); }
public Certificate add(X509Certificate cert) { final Certificate db; try { db = Certificate.from(cert); } catch (CertificateEncodingException e) { logger.error("Encoding error in certificate", e); throw new RuntimeException("Encoding error in certificate", e); } try { // Special case for first CSCA certificate for this document type if (repository.countByDocumentType(db.getDocumentType()) == 0) { cert.verify(cert.getPublicKey()); logger.warn("Added first CSCA certificate for {}, set trusted flag manually", db.getDocumentType()); } else { verify(cert, allowAddingExpired ? cert.getNotAfter() : null); } } catch (GeneralSecurityException | VerificationException e) { logger.error( String.format("Could not verify certificate of %s issued by %s", cert.getSubjectX500Principal(), cert.getIssuerX500Principal() ), e ); throw new BadRequestException("Could not verify certificate", e); } return repository.saveAndFlush(db); }
@Test public void shouldAllowToAddCertificateIfTrustedByExistingEvenIfExpiredIfAllowed() throws Exception { certificateRepo.saveAndFlush(loadCertificate("rvig/01.cer", true)); final X509Certificate cert = readCertificate("rvig/02-01.cer"); ReflectionTestUtils.setField(service, "allowAddingExpired", true); final Certificate dbCert = service.add(cert); assertEquals(X509Factory.toCanonical(cert.getSubjectX500Principal()), dbCert.getSubject()); assertEquals(false, dbCert.isTrusted()); }
@Override protected int poll() throws Exception { // must reset for each poll shutdownRunningTask = null; pendingExchanges = 0; List<software.amazon.awssdk.services.sqs.model.Message> messages = pollingTask.call(); // okay we have some response from aws so lets mark the consumer as ready forceConsumerAsReady(); Queue<Exchange> exchanges = createExchanges(messages); return processBatch(CastUtils.cast(exchanges)); }
@Test void shouldIgnoreAddingSortAttributeWhenAllAttributesAreRequested() throws Exception { // given configuration.setAttributeNames("All"); try (var tested = createConsumer(-1)) { // when var polledMessagesCount = tested.poll(); // then var expectedRequest = expectedReceiveRequestBuilder() .messageSystemAttributeNames(List.of(ALL)) .maxNumberOfMessages(1) .build(); assertThat(polledMessagesCount).isZero(); assertThat(receivedExchanges).isEmpty(); assertThat(sqsClientMock.getReceiveRequests()).containsExactlyInAnyOrder(expectedRequest); assertThat(sqsClientMock.getQueues()).isEmpty(); } }
public void convertQueueHierarchy(FSQueue queue) { List<FSQueue> children = queue.getChildQueues(); final String queueName = queue.getName(); emitChildQueues(queueName, children); emitMaxAMShare(queueName, queue); emitMaxParallelApps(queueName, queue); emitMaxAllocations(queueName, queue); emitPreemptionDisabled(queueName, queue); emitChildCapacity(queue); emitMaximumCapacity(queueName, queue); emitSizeBasedWeight(queueName); emitOrderingPolicy(queueName, queue); checkMaxChildCapacitySetting(queue); emitDefaultUserLimitFactor(queueName, children); for (FSQueue childQueue : children) { convertQueueHierarchy(childQueue); } }
@Test public void testQueueSizeBasedWeightEnabled() { converter = builder.withSizeBasedWeight(true).build(); converter.convertQueueHierarchy(rootQueue); for (String queue : ALL_QUEUES) { key = PREFIX + queue + ".ordering-policy.fair.enable-size-based-weight"; assertTrue("Key " + key + " has different value", csConfig.getBoolean(key, false)); } }
public static < X, P extends MessageQueryParameter<X>, R extends RequestBody, M extends MessageParameters> X getQueryParameter(final HandlerRequest<R> request, final Class<P> queryParameterClass) throws RestHandlerException { return getQueryParameter(request, queryParameterClass, null); }
@Test void testGetQueryParameterRepeated() throws Exception { try { HandlerRequestUtils.getQueryParameter( HandlerRequest.resolveParametersAndCreate( EmptyRequestBody.getInstance(), new TestMessageParameters(), Collections.emptyMap(), Collections.singletonMap("key", Arrays.asList("true", "false")), Collections.emptyList()), TestBooleanQueryParameter.class); } catch (final RestHandlerException e) { assertThat(e.getMessage()).contains("Expected only one value"); } }
@Override public double getDoubleResult() { return _value; }
@Test void testDefaultValue() { AggregationResultHolder resultHolder = new DoubleAggregationResultHolder(DEFAULT_VALUE); double actual = resultHolder.getDoubleResult(); Assert.assertEquals(actual, DEFAULT_VALUE, "Default Value mismatch: Actual: " + actual + " Expected: " + DEFAULT_VALUE + " Random seed: " + RANDOM_SEED); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { if(containerService.isContainer(file)) { try { if(log.isDebugEnabled()) { log.debug(String.format("Test if bucket %s is accessible", file)); } return session.getClient().isBucketAccessible(containerService.getContainer(file).getName()); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } } if(file.isFile() || file.isPlaceholder()) { attributes.find(file, listener); return true; } else { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // Check for common prefix try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); return true; } catch(ListCanceledException l) { // Found common prefix return true; } catch(NotfoundException e) { throw e; } } } catch(NotfoundException e) { return false; } catch(RetriableAccessDeniedException e) { // Must fail with server error throw e; } catch(AccessDeniedException e) { // Object is inaccessible to current user, but does exist. return true; } }
@Test public void testFindRoot() throws Exception { assertTrue(new S3FindFeature(new S3Session(new Host(new S3Protocol())), new S3AccessControlListFeature(session)).find(new Path("/", EnumSet.of(Path.Type.directory)))); }
public final void isNotEmpty() { if (checkNotNull(actual).isEmpty()) { failWithoutActual(simpleFact("expected not to be empty")); } }
@Test public void isNotEmptyWithFailure() { ImmutableMap<Integer, Integer> actual = ImmutableMap.of(); expectFailureWhenTestingThat(actual).isNotEmpty(); assertFailureKeys("expected not to be empty"); }
public ClusterState getClusterState() { return clusterState.get(); }
@Test void assertGetCurrentClusterState() { assertThat(stateContext.getClusterState(), is(ClusterState.OK)); }
@VisibleForTesting void submitTrade(int slot, GrandExchangeOffer offer) { GrandExchangeOfferState state = offer.getState(); if (state != GrandExchangeOfferState.CANCELLED_BUY && state != GrandExchangeOfferState.CANCELLED_SELL && state != GrandExchangeOfferState.BUYING && state != GrandExchangeOfferState.SELLING) { return; } SavedOffer savedOffer = getOffer(slot); boolean login = client.getTickCount() <= lastLoginTick + GE_LOGIN_BURST_WINDOW; if (savedOffer == null && (state == GrandExchangeOfferState.BUYING || state == GrandExchangeOfferState.SELLING) && offer.getQuantitySold() == 0) { // new offer GrandExchangeTrade grandExchangeTrade = new GrandExchangeTrade(); grandExchangeTrade.setBuy(state == GrandExchangeOfferState.BUYING); grandExchangeTrade.setItemId(offer.getItemId()); grandExchangeTrade.setTotal(offer.getTotalQuantity()); grandExchangeTrade.setOffer(offer.getPrice()); grandExchangeTrade.setSlot(slot); grandExchangeTrade.setWorldType(getGeWorldType()); grandExchangeTrade.setLogin(login); grandExchangeTrade.setSeq(tradeSeq++); grandExchangeTrade.setResetTime(getLimitResetTime(offer.getItemId())); log.debug("Submitting new trade: {}", grandExchangeTrade); grandExchangeClient.submit(grandExchangeTrade); return; } if (savedOffer == null || savedOffer.getItemId() != offer.getItemId() || savedOffer.getPrice() != offer.getPrice() || savedOffer.getTotalQuantity() != offer.getTotalQuantity()) { // desync return; } if (savedOffer.getState() == offer.getState() && savedOffer.getQuantitySold() == offer.getQuantitySold()) { // no change return; } if (state == GrandExchangeOfferState.CANCELLED_BUY || state == GrandExchangeOfferState.CANCELLED_SELL) { GrandExchangeTrade grandExchangeTrade = new GrandExchangeTrade(); grandExchangeTrade.setBuy(state == GrandExchangeOfferState.CANCELLED_BUY); grandExchangeTrade.setCancel(true); grandExchangeTrade.setItemId(offer.getItemId()); grandExchangeTrade.setQty(offer.getQuantitySold()); grandExchangeTrade.setTotal(offer.getTotalQuantity()); grandExchangeTrade.setSpent(offer.getSpent()); grandExchangeTrade.setOffer(offer.getPrice()); grandExchangeTrade.setSlot(slot); grandExchangeTrade.setWorldType(getGeWorldType()); grandExchangeTrade.setLogin(login); grandExchangeTrade.setSeq(tradeSeq++); grandExchangeTrade.setResetTime(getLimitResetTime(offer.getItemId())); log.debug("Submitting cancelled: {}", grandExchangeTrade); grandExchangeClient.submit(grandExchangeTrade); saveTrade(grandExchangeTrade); return; } final int qty = offer.getQuantitySold() - savedOffer.getQuantitySold(); final int dspent = offer.getSpent() - savedOffer.getSpent(); if (qty <= 0 || dspent <= 0) { return; } GrandExchangeTrade grandExchangeTrade = new GrandExchangeTrade(); grandExchangeTrade.setBuy(state == GrandExchangeOfferState.BUYING); grandExchangeTrade.setItemId(offer.getItemId()); grandExchangeTrade.setQty(offer.getQuantitySold()); grandExchangeTrade.setDqty(qty); grandExchangeTrade.setTotal(offer.getTotalQuantity()); grandExchangeTrade.setDspent(dspent); grandExchangeTrade.setSpent(offer.getSpent()); grandExchangeTrade.setOffer(offer.getPrice()); grandExchangeTrade.setSlot(slot); grandExchangeTrade.setWorldType(getGeWorldType()); grandExchangeTrade.setLogin(login); grandExchangeTrade.setSeq(tradeSeq++); grandExchangeTrade.setResetTime(getLimitResetTime(offer.getItemId())); log.debug("Submitting trade: {}", grandExchangeTrade); grandExchangeClient.submit(grandExchangeTrade); saveTrade(grandExchangeTrade); }
@Test public void testSubmitTrade() { // 1 @ 25 SavedOffer savedOffer = new SavedOffer(); savedOffer.setItemId(ItemID.ABYSSAL_WHIP); savedOffer.setQuantitySold(1); savedOffer.setTotalQuantity(10); savedOffer.setPrice(1000); savedOffer.setSpent(25); savedOffer.setState(GrandExchangeOfferState.BUYING); when(configManager.getRSProfileConfiguration("geoffer", "0")).thenReturn(gson.toJson(savedOffer)); // buy 2 @ 10/ea GrandExchangeOffer grandExchangeOffer = mock(GrandExchangeOffer.class); when(grandExchangeOffer.getQuantitySold()).thenReturn(1 + 2); when(grandExchangeOffer.getItemId()).thenReturn(ItemID.ABYSSAL_WHIP); when(grandExchangeOffer.getTotalQuantity()).thenReturn(10); when(grandExchangeOffer.getPrice()).thenReturn(1000); when(grandExchangeOffer.getSpent()).thenReturn(25 + 10 * 2); when(grandExchangeOffer.getState()).thenReturn(GrandExchangeOfferState.BUYING); grandExchangePlugin.submitTrade(0, grandExchangeOffer); ArgumentCaptor<GrandExchangeTrade> captor = ArgumentCaptor.forClass(GrandExchangeTrade.class); verify(grandExchangeClient).submit(captor.capture()); GrandExchangeTrade trade = captor.getValue(); assertTrue(trade.isBuy()); assertEquals(ItemID.ABYSSAL_WHIP, trade.getItemId()); assertEquals(2, trade.getDqty()); assertEquals(10, trade.getTotal()); assertEquals(45, trade.getSpent()); assertEquals(20, trade.getDspent()); }
@Override public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext) { FutureCallback<Response<T>> callback = new FutureCallback<>(); sendRequest(request, requestContext, callback); return new ResponseFutureImpl<>(callback); }
@Test(dataProvider = TestConstants.RESTLI_PROTOCOL_1_2_PREFIX + "sendRequestOptions") public void testRestLiRemoteInvocationException(SendRequestOption option, TimeoutOption timeoutOption, ProtocolVersionOption versionOption, ProtocolVersion protocolVersion, String errorResponseHeaderName, ContentType contentType) throws ExecutionException, TimeoutException, InterruptedException, RestLiDecodingException { final int HTTP_CODE = 404; final String ERR_MSG = "WHOOPS!"; RestClient client = mockClient(HTTP_CODE, ERR_MSG, protocolVersion); Request<EmptyRecord> request = mockRequest(EmptyRecord.class, versionOption, contentType); RequestBuilder<Request<EmptyRecord>> requestBuilder = mockRequestBuilder(request); FutureCallback<Response<EmptyRecord>> callback = new FutureCallback<>(); try { sendRequest(option, client, request, requestBuilder, callback); Long l = timeoutOption._l; TimeUnit timeUnit = timeoutOption._timeUnit; Response<EmptyRecord> response = l == null ? callback.get() : callback.get(l, timeUnit); Assert.fail("Should have thrown"); } catch (ExecutionException e) { Throwable cause = e.getCause(); Assert.assertTrue(cause instanceof RemoteInvocationException, "Expected RemoteInvocationException not " + cause.getClass().getName()); RemoteInvocationException rlre = (RemoteInvocationException)cause; Assert.assertTrue(rlre.getMessage().startsWith("Received error " + HTTP_CODE + " from server")); Throwable rlCause = rlre.getCause(); Assert.assertTrue(rlCause instanceof RestException, "Expected RestException not " + rlCause.getClass().getName()); RestException rle = (RestException) rlCause; Assert.assertEquals(ERR_MSG, rle.getResponse().getEntity().asString("UTF-8")); Assert.assertEquals(HTTP_CODE, rle.getResponse().getStatus()); } }
@Override public String getSchema() { return dialectDatabaseMetaData.getSchema(connection); }
@Test void assertGetSchemaByMySQLSPI() throws SQLException { when(connection.getSchema()).thenReturn(TEST_SCHEMA); MetaDataLoaderConnection connection = new MetaDataLoaderConnection(databaseType, this.connection); assertThat(connection.getSchema(), is(TEST_SCHEMA)); }
@Override @NotNull public BTreeMutable getMutableCopy() { final BTreeMutable result = new BTreeMutable(this); result.addExpiredLoggable(rootLoggable); return result; }
@Test public void testSplitRandom() { int s = 10000; List<INode> lns = createLNs(s); tm = new BTreeEmpty(log, createTestSplittingPolicy(), true, 1).getMutableCopy(); while (!lns.isEmpty()) { final int index = (int) (Math.random() * lns.size()); INode ln = lns.get(index); getTreeMutable().put(ln); lns.remove(index); } checkTree(getTreeMutable(), s).run(); long rootAddress = saveTree(); checkTree(getTreeMutable(), s).run(); reopen(); t = new BTree(log, rootAddress, true, 1); checkTree(getTree(), s).run(); }
@Override public void addFileCommentLine(String commentLine) { fileCommentLines.add(commentLine); }
@Test void testAddFileCommentLine() { Interface interfaze = new Interface("com.foo.UserInterface"); interfaze.addFileCommentLine("test"); assertNotNull(interfaze.getFileCommentLines()); assertEquals(1, interfaze.getFileCommentLines().size()); assertEquals("test", interfaze.getFileCommentLines().get(0)); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() != 2) { onInvalidDataReceived(device, data); return; } final int value = data.getIntValue(Data.FORMAT_UINT16_LE, 0); final RSCFeatures features = new RSCFeatures(value); onRunningSpeedAndCadenceFeaturesReceived(device, features); }
@Test public void onRunningSpeedAndCadenceFeaturesReceived() { final ProfileReadResponse callback = new RunningSpeedAndCadenceFeatureDataCallback() { @Override public void onRunningSpeedAndCadenceFeaturesReceived(@NonNull final BluetoothDevice device, @NonNull final RSCFeatures features) { called = true; assertNotNull(features); assertTrue("Instantaneous Stride Length Measurement supported", features.instantaneousStrideLengthMeasurementSupported); assertFalse("Total Distance Measurement supported", features.totalDistanceMeasurementSupported); assertTrue("Walking Or Running Status supported", features.walkingOrRunningStatusSupported); assertTrue("Calibration Procedure supported", features.calibrationProcedureSupported); assertFalse("Multiple Sensor Locations supported", features.multipleSensorLocationsSupported); assertEquals("Feature value", 0b01101, features.value); } }; called = false; final Data data = new Data(new byte[] { 0b01101, 0x00 }); callback.onDataReceived(null, data); assertTrue(called); assertTrue(callback.isValid()); }
@Override public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) { final LargeUploadOutputStream proxy = new LargeUploadOutputStream(file, status); return new HttpResponseOutputStream<StorageObject>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("openstack.upload.largeobject.size.minimum")), new SwiftAttributesFinderFeature(session, regionService), status) { @Override public StorageObject getStatus() { return proxy.getResponse(); } }; }
@Test public void testWriteUploadLargeBuffer() throws Exception { final SwiftRegionService regionService = new SwiftRegionService(session); final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); { final TransferStatus status = new TransferStatus(); status.setLength(-1L); final HttpResponseOutputStream<StorageObject> out = new SwiftLargeUploadWriteFeature(session, regionService, new SwiftSegmentService(session, ".segments-test/")).write(file, status, new DisabledConnectionCallback()); final byte[] content = RandomUtils.nextBytes(6 * 1024 * 1024); final ByteArrayInputStream in = new ByteArrayInputStream(content); final TransferStatus progress = new TransferStatus(); final BytecountStreamListener count = new BytecountStreamListener(); new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out); assertEquals(content.length, count.getSent()); assertEquals(content.length, out.getStatus().getSize(), 0L); assertTrue(new SwiftFindFeature(session).find(file)); final byte[] compare = new byte[content.length]; final InputStream stream = new SwiftReadFeature(session, regionService).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); } { final TransferStatus status = new TransferStatus(); status.setLength(-1L); final HttpResponseOutputStream<StorageObject> out = new SwiftLargeUploadWriteFeature(session, regionService, new SwiftSegmentService(session, ".segments-test/")).write(file, status, new DisabledConnectionCallback()); final byte[] content = RandomUtils.nextBytes(6 * 1024 * 1024); final ByteArrayInputStream in = new ByteArrayInputStream(content); final TransferStatus progress = new TransferStatus(); final BytecountStreamListener count = new BytecountStreamListener(); new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out); assertEquals(content.length, count.getSent()); assertEquals(content.length, out.getStatus().getSize(), 0L); assertTrue(new SwiftFindFeature(session).find(file)); final byte[] compare = new byte[content.length]; final InputStream stream = new SwiftReadFeature(session, regionService).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); } new SwiftDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static String decode(String encoded) { Preconditions.checkNotNull(encoded, "Cannot decode null object"); try { return URLDecoder.decode(encoded, CHARSET); } catch (UnsupportedEncodingException uee) { throw new OAuthException("Charset not found while decoding string: " + CHARSET, uee); } }
@Test public void shouldFormURLDecodeString() { final String encoded = "this+is+a+test+%26%5E"; final String plain = "this is a test &^"; assertEquals(plain, OAuthEncoder.decode(encoded)); }
@Override public float floatValue() { return value; }
@Override @Test void testFloatValue() { new FloatValueTester().runTests(); }
public final Logger getLogger(final Class<?> clazz) { return getLogger(clazz.getName()); }
@Test public void testRootGetLogger() { Logger root = lc.getLogger(Logger.ROOT_LOGGER_NAME); assertEquals(Level.DEBUG, root.getLevel()); assertEquals(Level.DEBUG, root.getEffectiveLevel()); }
public int initialWindowLength() { return CongestionControl.receiverWindowLength(ccOutcome); }
@Test void shouldSetWindowLengthFromTermLength() { final UdpChannel channelWithWindow = UdpChannel.parse("aeron:udp?endpoint=127.0.0.1:9999|rcv-wnd=8192"); final MediaDriver.Context context = new MediaDriver.Context().initialWindowLength(16536); final int termLength = 8192; final StaticWindowCongestionControl staticWindowCongestionControl = new StaticWindowCongestionControl( 0, channelWithWindow, 0, 0, termLength, 0, null, null, null, context, null); assertEquals(termLength / 2, staticWindowCongestionControl.initialWindowLength()); }
public static Collection<AndPredicate> getAndPredicates(final ExpressionSegment expression) { Collection<AndPredicate> result = new LinkedList<>(); extractAndPredicates(result, expression); return result; }
@Test void assertExtractAndPredicatesOrAndCondition() { ColumnSegment statusColumn = new ColumnSegment(0, 0, new IdentifierValue("status")); ParameterMarkerExpressionSegment statusParameterExpression = new ParameterMarkerExpressionSegment(0, 0, 0); ExpressionSegment leftExpression = new BinaryOperationExpression(0, 0, statusColumn, statusParameterExpression, "=", "status=?"); ColumnSegment countColumn = new ColumnSegment(0, 0, new IdentifierValue("count")); ParameterMarkerExpressionSegment countParameterExpression = new ParameterMarkerExpressionSegment(0, 0, 1); ExpressionSegment subLeftExpression = new BinaryOperationExpression(0, 0, statusColumn, statusParameterExpression, "=", "status=?"); ExpressionSegment subRightExpression = new BinaryOperationExpression(0, 0, countColumn, countParameterExpression, "=", "count=?"); BinaryOperationExpression rightExpression = new BinaryOperationExpression(0, 0, subLeftExpression, subRightExpression, "AND", "status=? AND count=?"); BinaryOperationExpression expression = new BinaryOperationExpression(0, 0, leftExpression, rightExpression, "OR", "status=? OR status=? AND count=?"); Collection<AndPredicate> actual = ExpressionExtractUtils.getAndPredicates(expression); assertThat(actual.size(), is(2)); Iterator<AndPredicate> iterator = actual.iterator(); AndPredicate andPredicate1 = iterator.next(); AndPredicate andPredicate2 = iterator.next(); assertThat(andPredicate1.getPredicates().size(), is(1)); assertThat(andPredicate2.getPredicates().size(), is(2)); }
@Override public String arguments() { ArrayList<String> args = new ArrayList<>(); if (buildFile != null) { args.add("-f \"" + FilenameUtils.separatorsToUnix(buildFile) + "\""); } if (target != null) { args.add(target); } return StringUtils.join(args, " "); }
@Test public void shouldSetTargetOnBuilderWhenAvailable() { String target = "target"; antTask.setTarget(target); assertThat(antTask.arguments(), is(target)); }
@Override protected Map<String, ConfigValue> validateSinkConnectorConfig(SinkConnector connector, ConfigDef configDef, Map<String, String> config) { Map<String, ConfigValue> result = super.validateSinkConnectorConfig(connector, configDef, config); validateSinkConnectorGroupId(config, result); return result; }
@Test public void testConnectorGroupIdConflictsWithWorkerGroupId() { String overriddenGroupId = CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + GROUP_ID_CONFIG; Map<String, String> config = new HashMap<>(CONN2_CONFIG); config.put(overriddenGroupId, "connect-test-group"); SinkConnector connectorMock = mock(SinkConnector.class); // CONN2 creation should fail because the worker group id (connect-test-group) conflicts with // the consumer group id we would use for this sink Map<String, ConfigValue> validatedConfigs = herder.validateSinkConnectorConfig( connectorMock, SinkConnectorConfig.configDef(), config); ConfigValue overriddenGroupIdConfig = validatedConfigs.get(overriddenGroupId); assertEquals( Collections.singletonList("Consumer group connect-test-group conflicts with Connect worker group connect-test-group"), overriddenGroupIdConfig.errorMessages()); ConfigValue nameConfig = validatedConfigs.get(ConnectorConfig.NAME_CONFIG); assertEquals( Collections.emptyList(), nameConfig.errorMessages() ); }
@PostConstruct public void init() { nodeHeartbeatSender.start(); if (resourceRuntimeHeartbeatSender.isPresent()) { resourceRuntimeHeartbeatSender.get().start(); } }
@Test(timeOut = 2_000) public void testNodeStatus() throws Exception { sender.init(); Thread.sleep(SLEEP_DURATION); int nodeHeartbeats = resourceManagerClient.getNodeHeartbeats(); assertTrue(nodeHeartbeats > TARGET_HEARTBEATS * 0.5 && nodeHeartbeats <= TARGET_HEARTBEATS * 1.5, format("Expect number of heartbeats to fall within target range (%s), +/- 50%%. Was: %s", TARGET_HEARTBEATS, nodeHeartbeats)); }
@Override public List<RedisClientInfo> getClientList(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST); List<String> list = syncFuture(f); return CONVERTER.convert(list.toArray(new String[list.size()])); }
@Test public void testGetClientList() { RedisClusterNode master = getFirstMaster(); List<RedisClientInfo> list = connection.getClientList(master); assertThat(list.size()).isGreaterThan(10); }
@Override public <T> List<SearchResult<T>> search(SearchRequest request, Class<T> typeFilter) { SearchSession<T> session = new SearchSession<>(request, Collections.singleton(typeFilter)); if (request.inParallel()) { ForkJoinPool commonPool = ForkJoinPool.commonPool(); getProviderTasks(request, session).stream().map(commonPool::submit).forEach(ForkJoinTask::join); } else { getProviderTasks(request, session).forEach(Runnable::run); } return session.getResults(); }
@Test public void testAsync() { GraphGenerator generator = GraphGenerator.build().generateTinyGraph(); SearchRequest request = buildRequest(GraphGenerator.FIRST_NODE, generator); controller.search(request, searchListener); Awaitility.await().untilAsserted(() -> { Mockito.verify(searchListener).started(request); Mockito.verify(searchListener).finished(Mockito.eq(request), Mockito.argThat(list -> list.size() == 2)); }); }
protected void commitTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets, final ConsumerGroupMetadata consumerGroupMetadata) { if (!eosEnabled()) { throw new IllegalStateException(formatException("Exactly-once is not enabled")); } maybeBeginTransaction(); try { // EOS-v2 assumes brokers are on version 2.5+ and thus can understand the full set of consumer group metadata // Thus if we are using EOS-v1 and can't make this assumption, we must downgrade the request to include only the group id metadata final ConsumerGroupMetadata maybeDowngradedGroupMetadata = processingMode == EXACTLY_ONCE_V2 ? consumerGroupMetadata : new ConsumerGroupMetadata(consumerGroupMetadata.groupId()); producer.sendOffsetsToTransaction(offsets, maybeDowngradedGroupMetadata); producer.commitTransaction(); transactionInFlight = false; } catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException | InvalidPidMappingException error) { throw new TaskMigratedException( formatException("Producer got fenced trying to commit a transaction"), error ); } catch (final TimeoutException timeoutException) { // re-throw to trigger `task.timeout.ms` throw timeoutException; } catch (final KafkaException error) { throw new StreamsException( formatException("Error encountered trying to commit a transaction"), error ); } }
@Test public void shouldFailOnCommitIfEosDisabled() { final IllegalStateException thrown = assertThrows( IllegalStateException.class, () -> nonEosStreamsProducer.commitTransaction(null, new ConsumerGroupMetadata("appId")) ); assertThat(thrown.getMessage(), is("Exactly-once is not enabled [test]")); }
@Override public int compare(ClonePart part1, ClonePart part2) { int c = RESOURCE_ID_COMPARATOR.compare(part1, part2); if (c == 0) { if (part1.getUnitStart() <= part2.getUnitStart()) { if (part2.getUnitStart() + l2 <= part1.getUnitStart() + l1) { // part1 contains part2 return 0; } else { // SortedListsUtils#contains should continue search return -1; } } else { // unitStart of part1 is less than unitStart of part2 - SortedListsUtils#contains should stop search return 1; } } else { return c; } }
@Test public void shouldCompareByResourceId() { Comparator<ClonePart> comparator = ContainsInComparator.RESOURCE_ID_COMPARATOR; assertThat(comparator.compare(newClonePart("a", 0), newClonePart("b", 0)), is(-1)); assertThat(comparator.compare(newClonePart("b", 0), newClonePart("a", 0)), is(1)); assertThat(comparator.compare(newClonePart("a", 0), newClonePart("a", 0)), is(0)); }
public static long getSessionAgeSum() { if (!instanceCreated) { return -1; } final long now = System.currentTimeMillis(); long result = 0; for (final HttpSession session : SESSION_MAP_BY_ID.values()) { try { result += now - session.getCreationTime(); } catch (final Exception e) { // Tomcat can throw "java.lang.IllegalStateException: getCreationTime: Session already invalidated" continue; } } return result; }
@Test public void testGetSessionAgeSum() { sessionListener.sessionCreated(createSessionEvent()); if (SessionListener.getSessionAgeSum() < 0) { fail("getSessionAgeSum"); } }
Object getFromInstance(String fieldName) { try { Object ret = executor .submit(() -> fromInstance(fieldName)) .get(TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS); // As param does not allow null, using empty space indicates unset for now. return ret == null ? "" : ret; } catch (Exception e) { throw new MaestroInternalError( e, "getFromInstance throws an exception for fieldName=[%s]", fieldName); } }
@Test public void testGetFromInstance() { when(instanceWrapper.getWorkflowId()).thenReturn("test-workflow-id"); when(instanceWrapper.getWorkflowInstanceId()).thenReturn(2L); when(instanceWrapper.isWorkflowParam()).thenReturn(true); when(instanceWrapper.getInitiatorTimeZone()).thenReturn("US/Pacific"); Initiator initiator = new ManualInitiator(); initiator.setCaller(User.create("tester")); when(instanceWrapper.getInitiator()).thenReturn(initiator); when(instanceWrapper.getRunPolicy()).thenReturn("START_FRESH_NEW_RUN"); when(instanceWrapper.getWorkflowOwner()).thenReturn("tester"); when(instanceWrapper.getFirstTimeTriggerTimeZone()).thenReturn("UTC"); assertEquals("US/Pacific", paramExtension.getFromInstance(Constants.INITIATOR_TIMEZONE_PARAM)); assertEquals("MANUAL", paramExtension.getFromInstance(Constants.INITIATOR_TYPE_PARAM)); assertEquals("tester", paramExtension.getFromInstance(Constants.INITIATOR_RUNNER_NAME)); assertEquals( "START_FRESH_NEW_RUN", paramExtension.getFromInstance(Constants.WORKFLOW_RUN_POLICY_PARAM)); assertEquals("tester", paramExtension.getFromInstance(Constants.WORKFLOW_OWNER_PARAM)); assertEquals( "UTC", paramExtension.getFromInstance(Constants.FIRST_TIME_TRIGGER_TIMEZONE_PARAM)); assertEquals("test-workflow-id", paramExtension.getFromInstance(Constants.WORKFLOW_ID_PARAM)); assertEquals(2L, paramExtension.getFromInstance(Constants.WORKFLOW_INSTANCE_ID_PARAM)); initiator = new DryRunValidator.ValidationInitiator(); when(instanceWrapper.getInitiator()).thenReturn(initiator); assertEquals("VALIDATION", paramExtension.getFromInstance(Constants.INITIATOR_TYPE_PARAM)); }
public static synchronized void configure(DataflowWorkerLoggingOptions options) { if (!initialized) { throw new RuntimeException("configure() called before initialize()"); } // For compatibility reason, we do not call SdkHarnessOptions.getConfiguredLoggerFromOptions // to config the logging for legacy worker, instead replicate the config steps used for // DataflowWorkerLoggingOptions for default log level and log level overrides. SdkHarnessOptions harnessOptions = options.as(SdkHarnessOptions.class); boolean usedDeprecated = false; // default value for both DefaultSdkHarnessLogLevel and DefaultWorkerLogLevel are INFO Level overrideLevel = getJulLevel(harnessOptions.getDefaultSdkHarnessLogLevel()); if (options.getDefaultWorkerLogLevel() != null && options.getDefaultWorkerLogLevel() != INFO) { overrideLevel = getJulLevel(options.getDefaultWorkerLogLevel()); usedDeprecated = true; } LogManager.getLogManager().getLogger(ROOT_LOGGER_NAME).setLevel(overrideLevel); if (options.getWorkerLogLevelOverrides() != null) { for (Map.Entry<String, DataflowWorkerLoggingOptions.Level> loggerOverride : options.getWorkerLogLevelOverrides().entrySet()) { Logger logger = Logger.getLogger(loggerOverride.getKey()); logger.setLevel(getJulLevel(loggerOverride.getValue())); configuredLoggers.add(logger); } usedDeprecated = true; } else if (harnessOptions.getSdkHarnessLogLevelOverrides() != null) { for (Map.Entry<String, SdkHarnessOptions.LogLevel> loggerOverride : harnessOptions.getSdkHarnessLogLevelOverrides().entrySet()) { Logger logger = Logger.getLogger(loggerOverride.getKey()); logger.setLevel(getJulLevel(loggerOverride.getValue())); configuredLoggers.add(logger); } } // If the options specify a level for messages logged to System.out/err, we need to reconfigure // the corresponding stream adapter. if (options.getWorkerSystemOutMessageLevel() != null) { System.out.close(); System.setOut( JulHandlerPrintStreamAdapterFactory.create( loggingHandler, SYSTEM_OUT_LOG_NAME, getJulLevel(options.getWorkerSystemOutMessageLevel()), Charset.defaultCharset())); } if (options.getWorkerSystemErrMessageLevel() != null) { System.err.close(); System.setErr( JulHandlerPrintStreamAdapterFactory.create( loggingHandler, SYSTEM_ERR_LOG_NAME, getJulLevel(options.getWorkerSystemErrMessageLevel()), Charset.defaultCharset())); } if (usedDeprecated) { LOG.warn( "Deprecated DataflowWorkerLoggingOptions are used for log level settings." + "Consider using options defined in SdkHarnessOptions for forward compatibility."); } }
@Test public void testSystemOutCustomLogLevel() throws IOException { DataflowWorkerLoggingOptions options = PipelineOptionsFactory.as(DataflowWorkerLoggingOptions.class); options.setWorkerLogLevelOverrides( new WorkerLogLevelOverrides() .addOverrideForName("System.out", DataflowWorkerLoggingOptions.Level.ERROR)); DataflowWorkerLoggingInitializer.configure(options); System.out.println("sys.out"); List<String> actualLines = retrieveLogLines(); // N.B.: It's not safe to assert that actualLines is "empty" since the logging framework is // global and logs may be concurrently written by other infrastructure. assertThat(actualLines, not(hasItem(containsString("sys.out")))); }
public Array getArray(String name) { Array a = arrayMap.get(name); if (a == null) { validateArray(name); a = new Array(configDefinition, name); arrayMap.put(name, a); } return a; }
@Test public void require_that_arrays_can_be_appended_simple_values() { ConfigPayloadBuilder builder = new ConfigPayloadBuilder(); ConfigPayloadBuilder.Array array = builder.getArray("foo"); array.append("bar"); array.append("baz"); array.append("bim"); assertEquals(3, array.getElements().size()); Cursor root = createSlime(builder); Cursor a = root.field("foo"); assertEquals("bar", a.entry(0).asString()); assertEquals("baz", a.entry(1).asString()); assertEquals("bim", a.entry(2).asString()); }
public Optional<TableDistribution> mergeDistribution( MergingStrategy mergingStrategy, Optional<TableDistribution> sourceTableDistribution, Optional<TableDistribution> derivedTabledDistribution) { if (derivedTabledDistribution.isPresent() && sourceTableDistribution.isPresent() && mergingStrategy != MergingStrategy.EXCLUDING) { throw new ValidationException( "The base table already has a distribution defined. You might want to specify " + "EXCLUDING DISTRIBUTION."); } if (derivedTabledDistribution.isPresent()) { return derivedTabledDistribution; } return sourceTableDistribution; }
@Test void mergeDistributionFromDerivedTable() { Optional<TableDistribution> derivedDistribution = Optional.of(TableDistribution.ofHash(Collections.singletonList("a"), 3)); Optional<TableDistribution> mergePartitions = util.mergeDistribution( getDefaultMergingStrategies().get(FeatureOption.DISTRIBUTION), Optional.empty(), derivedDistribution); assertThat(mergePartitions).isEqualTo(derivedDistribution); }
public static Read read() { return Read.create(); }
@Test public void testReadWithBigTableOptionsSetsRetryOptions() { final int initialBackoffMillis = -1; BigtableOptions.Builder optionsBuilder = BIGTABLE_OPTIONS.toBuilder(); RetryOptions.Builder retryOptionsBuilder = RetryOptions.builder(); retryOptionsBuilder.setInitialBackoffMillis(initialBackoffMillis); optionsBuilder.setRetryOptions(retryOptionsBuilder.build()); BigtableIO.Read read = BigtableIO.read().withBigtableOptions(optionsBuilder.build()); BigtableOptions options = read.getBigtableOptions(); assertEquals(initialBackoffMillis, options.getRetryOptions().getInitialBackoffMillis()); assertThat(options.getRetryOptions(), Matchers.equalTo(retryOptionsBuilder.build())); }
@Override public int[] apply(Tuple x) { int[] features = new int[columns.length]; for (int i = 0; i < features.length; i++) { features[i] = x.getInt(columns[i]) + base[i]; } return features; }
@Test public void test() { System.out.println("Binary Encoder"); int[][] result = { {0, 3, 6, 9}, {0, 3, 6, 8}, {1, 3, 6, 9}, {2, 4, 6, 9}, {2, 5, 7, 9}, {2, 5, 7, 8}, {1, 5, 7, 8}, {0, 4, 6, 9}, {0, 5, 7, 9}, {2, 4, 7, 9}, {0, 4, 7, 8}, {1, 4, 6, 8}, {1, 3, 7, 9}, {2, 4, 6, 8} }; DataFrame data = WeatherNominal.data; BinaryEncoder encoder = new BinaryEncoder(data.schema(), "outlook", "temperature", "humidity", "windy"); int[][] onehot = encoder.apply(data); for (int i = 0; i < data.size(); i++) { for (int j = 0; j < result[i].length; j++) { assertEquals(result[i][j], onehot[i][j]); } } }
void removeBuiltinRole(final String roleName) { final Bson roleFindingFilter = Filters.eq(RoleServiceImpl.NAME_LOWER, roleName.toLowerCase(Locale.ENGLISH)); final MongoDatabase mongoDatabase = mongoConnection.getMongoDatabase(); final MongoCollection<Document> rolesCollection = mongoDatabase.getCollection(RoleServiceImpl.ROLES_COLLECTION_NAME); final Document role = rolesCollection.find(roleFindingFilter) .projection(include("_id")) .first(); if (role != null) { final ObjectId roleToBeRemovedId = role.getObjectId("_id"); final MongoCollection<Document> usersCollection = mongoDatabase.getCollection(UserImpl.COLLECTION_NAME); final UpdateResult updateResult = usersCollection.updateMany(Filters.empty(), Updates.pull(UserImpl.ROLES, roleToBeRemovedId)); if (updateResult.getModifiedCount() > 0) { LOG.info(StringUtils.f("Removed role %s from %d users", roleName, updateResult.getModifiedCount())); } final DeleteResult deleteResult = rolesCollection.deleteOne(roleFindingFilter); if (deleteResult.getDeletedCount() > 0) { LOG.info(StringUtils.f("Removed role %s ", roleName)); } else { LOG.warn(StringUtils.f("Failed to remove role %s migration!", roleName)); } } }
@Test public void testRemovesRoleAndItsUsageInUsersCollection() { toTest.removeBuiltinRole("Field Type Mappings Manager"); //only one role remain in DB assertEquals(1, rolesCollection.countDocuments()); //Field Type Mappings Manager is gone assertNull(rolesCollection.find(Filters.eq("_id", FTM_MANAGER_ROLE)).first()); //both users are changed, they do not reference Field Type Mappings Manager anymore final Document adminUser = usersCollection.find(Filters.eq(UserImpl.USERNAME, TEST_ADMIN_USER_WITH_BOTH_ROLES)).first(); List<ObjectId> roles = adminUser.getList(UserImpl.ROLES, ObjectId.class); assertEquals(1, roles.size()); assertTrue(roles.contains(new ObjectId(ADMIN_ROLE))); assertFalse(roles.contains(new ObjectId(FTM_MANAGER_ROLE))); final Document testUser = usersCollection.find(Filters.eq(UserImpl.USERNAME, TEST_USER_WITH_FTM_MANAGER_ROLE_ONLY)).first(); roles = testUser.getList(UserImpl.ROLES, ObjectId.class); assertTrue(roles.isEmpty()); }
protected boolean isListEmpty(ArrayNode json) { for (JsonNode node : json) { if (!isNodeEmpty(node)) { return false; } } return true; }
@Test public void isListEmpty_emptyNode() { ArrayNode json = new ArrayNode(factory); ObjectNode nestedNode = new ObjectNode(factory); json.add(nestedNode); assertThat(expressionEvaluator.isListEmpty(json)).isTrue(); }
@Override public void setConfig(RedisClusterNode node, String param, String value) { RedisClient entry = getEntry(node); RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value); syncFuture(f); }
@Test public void testSetConfig() { RedisClusterNode master = getFirstMaster(); connection.setConfig(master, "timeout", "10"); }
@VisibleForTesting static boolean isValidUrlFormat(String url) { Matcher matcher = URL_PATTERN.matcher(url); if (matcher.find()) { String host = matcher.group(2); return InetAddresses.isInetAddress(host) || InternetDomainName.isValid(host); } return false; }
@Test public void testValidURL() { assertTrue(SplunkEventWriter.isValidUrlFormat("http://test-url")); }
public String getExitValues() { return exitValues; }
@Test @DirtiesContext public void testCreateEndpointWithExitValues() throws Exception { ExecEndpoint e = createExecEndpoint("exec:test?exitValues=1,2,3"); assertEquals("1,2,3", e.getExitValues()); }
public static <T> TreeSet<Point<T>> subset(TimeWindow subsetWindow, NavigableSet<Point<T>> points) { checkNotNull(subsetWindow); checkNotNull(points); //if the input collection is empty the output collection will be empty to if (points.isEmpty()) { return newTreeSet(); } Point<T> midPoint = Point.<T>builder() .time(subsetWindow.instantWithin(.5)) .latLong(0.0, 0.0) .build(); /* * Find exactly one point in the actual Track, ideally this point will be in the middle of * the time window */ Point<T> aPointInTrack = points.floor(midPoint); if (aPointInTrack == null) { aPointInTrack = points.ceiling(midPoint); } TreeSet<Point<T>> outputSubset = newTreeSet(); //given a starting point....go up until you hit startTime. NavigableSet<Point<T>> headset = points.headSet(aPointInTrack, true); Iterator<Point<T>> iter = headset.descendingIterator(); while (iter.hasNext()) { Point<T> pt = iter.next(); if (subsetWindow.contains(pt.time())) { outputSubset.add(pt); } if (pt.time().isBefore(subsetWindow.start())) { break; } } //given a starting point....go down until you hit endTime. NavigableSet<Point<T>> tailSet = points.tailSet(aPointInTrack, true); iter = tailSet.iterator(); while (iter.hasNext()) { Point<T> pt = iter.next(); if (subsetWindow.contains(pt.time())) { outputSubset.add(pt); } if (pt.time().isAfter(subsetWindow.end())) { break; } } return outputSubset; }
@Test public void subset_reflectsStartAndEndTimes() { Track<NopHit> t1 = createTrackFromFile(new File("src/test/resources/Track1.txt")); Instant startTime = parseNopTime("07/08/2017", "14:10:45.534"); Instant endTime = parseNopTime("07/08/2017", "14:11:17.854"); TimeWindow extractionWindow = TimeWindow.of(startTime, endTime); TreeSet<Point<NopHit>> subset = subset(extractionWindow, t1.points()); assertThat(subset, hasSize(8)); assertThat(subset.first().time(), is(startTime)); assertThat(subset.last().time(), is(endTime)); }
public void setDestinationType(String destinationType) { this.destinationType = destinationType; }
@Test(timeout = 60000) public void testNoDestinationTypeFailure() { activationSpec.setDestinationType(null); PropertyDescriptor[] expected = {destinationTypeProperty}; assertActivationSpecInvalid(expected); }
Properties consumerProps() { return consumerProps; }
@Test public void testDefaultClientId() throws IOException { String[] args = new String[]{ "--bootstrap-server", "localhost:9092", "--topic", "test", "--from-beginning" }; ConsoleConsumerOptions config = new ConsoleConsumerOptions(args); Properties consumerProperties = config.consumerProps(); assertEquals("console-consumer", consumerProperties.getProperty(ConsumerConfig.CLIENT_ID_CONFIG)); }
@Override public List<RegisteredMigrationStep> readFrom(long migrationNumber) { validate(migrationNumber); int startingIndex = lookupIndexOfClosestTo(migrationNumber); if (startingIndex < 0) { return Collections.emptyList(); } return steps.subList(startingIndex, steps.size()); }
@Test public void readFrom_returns_stream_of_sublist_from_the_first_migration_with_number_greater_or_equal_to_argument() { verifyContainsNumbers(underTest.readFrom(1), 1L, 2L, 8L); verifyContainsNumbers(underTest.readFrom(2), 2L, 8L); verifyContainsNumbers(underTest.readFrom(3), 8L); verifyContainsNumbers(underTest.readFrom(4), 8L); verifyContainsNumbers(underTest.readFrom(5), 8L); verifyContainsNumbers(underTest.readFrom(6), 8L); verifyContainsNumbers(underTest.readFrom(7), 8L); verifyContainsNumbers(underTest.readFrom(8), 8L); }
public static boolean hasUnknownColumnsStats(OptExpression root) { Operator operator = root.getOp(); if (operator instanceof LogicalScanOperator) { LogicalScanOperator scanOperator = (LogicalScanOperator) operator; List<String> colNames = scanOperator.getColRefToColumnMetaMap().values().stream().map(Column::getName).collect( Collectors.toList()); if (operator instanceof LogicalOlapScanOperator) { Table table = scanOperator.getTable(); if (table instanceof OlapTable) { if (KeysType.AGG_KEYS.equals(((OlapTable) table).getKeysType())) { List<String> keyColumnNames = scanOperator.getColRefToColumnMetaMap().values().stream().filter(Column::isKey) .map(Column::getName) .collect(Collectors.toList()); List<ColumnStatistic> keyColumnStatisticList = GlobalStateMgr.getCurrentState().getStatisticStorage().getColumnStatistics(table, keyColumnNames); return keyColumnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown); } } List<ColumnStatistic> columnStatisticList = GlobalStateMgr.getCurrentState().getStatisticStorage().getColumnStatistics(table, colNames); return columnStatisticList.stream().anyMatch(ColumnStatistic::isUnknown); } else if (operator instanceof LogicalHiveScanOperator || operator instanceof LogicalHudiScanOperator) { if (ConnectContext.get().getSessionVariable().enableHiveColumnStats()) { if (operator instanceof LogicalHiveScanOperator) { return ((LogicalHiveScanOperator) operator).hasUnknownColumn(); } else { return ((LogicalHudiScanOperator) operator).hasUnknownColumn(); } } return true; } else if (operator instanceof LogicalIcebergScanOperator) { return ((LogicalIcebergScanOperator) operator).hasUnknownColumn(); } else if (operator instanceof LogicalDeltaLakeScanOperator) { return ((LogicalDeltaLakeScanOperator) operator).hasUnknownColumn(); } else { // For other scan operators, we do not know the column statistics. return true; } } return root.getInputs().stream().anyMatch(Utils::hasUnknownColumnsStats); }
@Test public void unknownStats2() { GlobalStateMgr globalStateMgr = connectContext.getGlobalStateMgr(); OlapTable t1 = (OlapTable) globalStateMgr.getDb("test").getTable("t1"); OptExpression opt = new OptExpression( new LogicalOlapScanOperator(t1, Maps.newHashMap(), Maps.newHashMap(), null, -1, null)); Assert.assertFalse(Utils.hasUnknownColumnsStats(opt)); }
static Map<Address, List<Shard>> assignShards(Collection<Shard> shards, Collection<Address> addresses) { Map<String, List<String>> assignment = addresses.stream() // host -> [indexShard...] .map(Address::getHost).distinct().collect(toMap(identity(), a -> new ArrayList<>())); Map<String, List<String>> nodeCandidates = shards.stream() // indexShard -> [host...] .collect(groupingBy(Shard::indexShard, mapping(Shard::getIp, toList()))); // Make the assignment nodeCandidates.forEach((indexShard, hosts) -> hosts.stream() .map(assignment::get) .filter(Objects::nonNull) .min(comparingInt(List::size)) .orElseThrow(() -> new IllegalStateException("Selected members do not contain shard '" + indexShard + "'")) .add(indexShard)); // Transform the results Map<String, List<Address>> addressMap = addresses.stream().collect(groupingBy(Address::getHost, toList())); Map<String, Shard> shardMap = shards.stream().collect(toMap(s -> s.indexShard() + "@" + s.getIp(), identity())); return assignment.entrySet().stream() .flatMap(e -> { List<Address> a = addressMap.get(e.getKey()); List<Shard> s = e.getValue().stream() .map(indexShard -> shardMap.get(indexShard + "@" + e.getKey())).toList(); int c = (int) Math.ceil((double) s.size() / a.size()); return IntStream.range(0, a.size()) .mapToObj(i -> entry(a.get(i), List.copyOf(s.subList(i * c, Math.min((i + 1) * c, s.size()))))); }).collect(toMap(Entry::getKey, Entry::getValue)); }
@Test public void given_noMatchingNode_when_assignShards_thenThrowException() { List<Shard> shards = List.of( new Shard("elastic-index", 0, Prirep.p, 10, "STARTED", "10.0.0.1", "10.0.0.1:9200", "node1") ); List<Address> addresses = addresses("10.0.0.2"); assertThatThrownBy(() -> ElasticSourcePMetaSupplier.assignShards(shards, addresses)) .hasMessage("Selected members do not contain shard 'elastic-index-0'"); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatIntervalExpression() { assertThat(ExpressionFormatter.formatExpression(new IntervalUnit(TimeUnit.DAYS)), equalTo("DAYS")); }
public Result check(IndexSetTemplate indexSetTemplate) { return indexSetTemplateRequirements.stream() .sorted(Comparator.comparing(IndexSetTemplateRequirement::priority)) .map(indexSetTemplateRequirement -> indexSetTemplateRequirement.check(indexSetTemplate)) .filter(result -> !result.fulfilled()) .findFirst() .orElse(new Result(true, "")); }
@Test void testRequirement1NotFulfilled() { Result expectedResult = new Result(false, "r1"); when(requirement1.check(any())).thenReturn(expectedResult); Result result = underTest.check(indexSetTemplate); assertThat(result).isEqualTo(expectedResult); requirements.verify(requirement1).check(any()); requirements.verify(requirement2, never()).check(any()); }
@Override public Table getTable(String dbName, String tableName) { if (ConnectContext.get() != null && ConnectContext.get().getCommand() == MysqlCommand.COM_QUERY) { DatabaseTableName databaseTableName = DatabaseTableName.of(dbName, tableName); lastAccessTimeMap.put(databaseTableName, System.currentTimeMillis()); } return get(tableCache, DatabaseTableName.of(dbName, tableName)); }
@Test public void testGetTable() { new MockUp<DeltaUtils>() { @mockit.Mock public DeltaLakeTable convertDeltaToSRTable(String catalog, String dbName, String tblName, String path, Engine deltaEngine, long createTime) { return new DeltaLakeTable(1, "delta0", "db1", "table1", Lists.newArrayList(), Lists.newArrayList("ts"), null, "s3://bucket/path/to/table", null, 0); } }; CachingDeltaLakeMetastore cachingDeltaLakeMetastore = CachingDeltaLakeMetastore.createCatalogLevelInstance(metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 100); Table table = cachingDeltaLakeMetastore.getTable("db1", "table1"); Assert.assertTrue(table instanceof DeltaLakeTable); DeltaLakeTable deltaLakeTable = (DeltaLakeTable) table; Assert.assertEquals("db1", deltaLakeTable.getDbName()); Assert.assertEquals("table1", deltaLakeTable.getTableName()); Assert.assertEquals("s3://bucket/path/to/table", deltaLakeTable.getTableLocation()); }
public RWLockResource lockInode(InodeView inode, LockMode mode, boolean useTryLock) { return mInodeLocks.get(inode.getId(), mode, useTryLock); }
@Test(timeout = 10000) public void lockInode() throws Exception { inodeLockTest(LockMode.WRITE, LockMode.READ, true); inodeLockTest(LockMode.READ, LockMode.WRITE, true); inodeLockTest(LockMode.WRITE, LockMode.WRITE, true); inodeLockTest(LockMode.READ, LockMode.READ, false); }
public boolean validateUuidPresent(CaseInsensitiveString name, Set<String> uuids) { if (isEmpty(uuids) || !uuids.contains(uuid)) { this.addError(UUID, format("Environment '%s' has an invalid agent uuid '%s'", name, uuid)); } return errors().isEmpty(); }
@Test void shouldValidateToTrueIfTheUUIDAssociatedWithEnvironmentIsPresentInTheSystem() { String uuidThatWillBeValidated = "uuid2"; EnvironmentAgentConfig envAgentConf = new EnvironmentAgentConfig(uuidThatWillBeValidated); Set<String> setOfUUIDs = Set.of("uuid1", uuidThatWillBeValidated, "uuid3"); boolean isPresent = envAgentConf.validateUuidPresent(new CaseInsensitiveString("env1"), setOfUUIDs); assertTrue(isPresent); }
@Override public Set<Class<? extends BaseStepMeta>> getSupportedSteps() { Set<Class<? extends BaseStepMeta>> supportedSteps = new HashSet<>(); supportedSteps.add( RestMeta.class ); return supportedSteps; }
@Test public void testGetSupportedSteps() throws Exception { Set<Class<? extends BaseStepMeta>> types = analyzer.getSupportedSteps(); assertNotNull( types ); assertEquals( types.size(), 1 ); assertTrue( types.contains( RestMeta.class ) ); }
public CqlSessionSelectResult tableDetail(String clusterId, TableDTO.ClusterTableGetArgs args) { CqlSession session = cqlSessionFactory.get(clusterId); int limit = 1; SimpleStatement statement = ClusterUtils.getSchemaTables(session, args.getKeyspace()) .all() .whereColumn(CassandraSystemTablesColumn.TABLES_KEYSPACE_NAME.getColumnName()).isEqualTo(bindMarker()) .whereColumn(CassandraSystemTablesColumn.TABLES_TABLE_NAME.getColumnName()).isEqualTo(bindMarker()) .limit(limit) .build(args.getKeyspace(), args.getTable()) .setPageSize(limit) .setTimeout(Duration.ofSeconds(3)); ResultSet resultSet = session.execute(statement); ColumnDefinitions definitions = resultSet.getColumnDefinitions(); Row row = resultSet.one(); if (row == null) { throw new ClusterTableException.ClusterTableNotFoundException(String.format("not found table(%s)", args.getTable())); } return CqlSessionSelectResult.builder() .row(convertRow(session.getContext().getCodecRegistry(), definitions, row)) .rowHeader(CassdioColumnDefinition.makes(definitions)) .build(); }
@Test void when_get_not_exists_table_throw_not_exists_exception() { // given TableDTO.ClusterTableGetArgs args = TableDTO.ClusterTableGetArgs.builder() .keyspace(keyspaceName) .table("not_exists_table") .build(); // when & then assertThatThrownBy(() -> clusterTableGetCommander.tableDetail(CLUSTER_ID, args)).isInstanceOf(ClusterTableNotFoundException.class); }
@NotNull @Override public INode enrich(@NotNull INode node) { if (node instanceof AES aes) { return enrich(aes); } return node; }
@Test void defaultKeyLengthForJca() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "Jca"); final AES aes = new AES(testDetectionLocation); this.logBefore(aes); final AESEnricher aesEnricher = new AESEnricher(); final INode enriched = aesEnricher.enrich(aes); this.logAfter(enriched); assertThat(enriched).isInstanceOf(AES.class); final AES enrichedAES = (AES) enriched; assertThat(enrichedAES.getKeyLength()).isPresent(); assertThat(enrichedAES.getKeyLength().get().asString()).isEqualTo("128"); }
@Override public void updateInstance(String serviceName, String groupName, Instance instance) throws NacosException { NAMING_LOGGER.info("[UPDATE-SERVICE] {} update service {} with instance: {}", namespaceId, serviceName, instance); final Map<String, String> params = new HashMap<>(32); params.put(CommonParams.NAMESPACE_ID, namespaceId); params.put(CommonParams.SERVICE_NAME, serviceName); params.put(CommonParams.GROUP_NAME, groupName); params.put(CommonParams.CLUSTER_NAME, instance.getClusterName()); params.put(IP_PARAM, instance.getIp()); params.put(PORT_PARAM, String.valueOf(instance.getPort())); params.put(WEIGHT_PARAM, String.valueOf(instance.getWeight())); params.put(ENABLE_PARAM, String.valueOf(instance.isEnabled())); params.put(EPHEMERAL_PARAM, String.valueOf(instance.isEphemeral())); params.put(META_PARAM, JacksonUtils.toJson(instance.getMetadata())); reqApi(UtilAndComs.nacosUrlInstance, params, HttpMethod.PUT); }
@Test void testUpdateInstance() throws Exception { //given NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class); HttpRestResult<Object> a = new HttpRestResult<Object>(); a.setData("127.0.0.1:8848"); a.setCode(200); when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenReturn(a); final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate"); nacosRestTemplateField.setAccessible(true); nacosRestTemplateField.set(clientProxy, nacosRestTemplate); String serviceName = "service1"; String groupName = "group1"; Instance instance = new Instance(); //when clientProxy.updateInstance(serviceName, groupName, instance); //then verify(nacosRestTemplate, times(1)).exchangeForm(any(), any(), any(), any(), eq(HttpMethod.PUT), any()); }
public CompiledGraph<State> buildGraph() throws Exception { return new StateGraph<>(State::new) // Define the nodes .addNode("web_search", node_async(this::webSearch) ) // web search .addNode("retrieve", node_async(this::retrieve) ) // retrieve .addNode("grade_documents", node_async(this::gradeDocuments) ) // grade documents .addNode("generate", node_async(this::generate) ) // generatae .addNode("transform_query", node_async(this::transformQuery)) // transform_query // Build graph .addConditionalEdges(START, edge_async(this::routeQuestion), mapOf( "web_search", "web_search", "vectorstore", "retrieve" )) .addEdge("web_search", "generate") .addEdge("retrieve", "grade_documents") .addConditionalEdges( "grade_documents", edge_async(this::decideToGenerate), mapOf( "transform_query","transform_query", "generate", "generate" )) .addEdge("transform_query", "retrieve") .addConditionalEdges( "generate", edge_async(this::gradeGeneration_v_documentsAndQuestion), mapOf( "not supported", "generate", "useful", END, "not useful", "transform_query" )) .compile(); }
@Test public void getGraphTest() throws Exception { AdaptiveRag adaptiveRag = new AdaptiveRag(getOpenAiKey(), getTavilyApiKey()); var graph = adaptiveRag.buildGraph(); var plantUml = graph.getGraph( GraphRepresentation.Type.PLANTUML, "Adaptive RAG" ); System.out.println( plantUml.getContent() ); var mermaid = graph.getGraph( GraphRepresentation.Type.MERMAID, "Adaptive RAG" ); System.out.println( mermaid.getContent() ); }
public static String getName(final String path) { if (path == null || path.length() == 0) { return StringUtils.EMPTY; } int unix = path.lastIndexOf("/"); int windows = path.lastIndexOf("\\"); //some macintosh file names are stored with : as the delimiter //also necessary to properly handle C:somefilename int colon = path.lastIndexOf(":"); String cand = path.substring(Math.max(colon, Math.max(unix, windows)) + 1); if (cand.equals("..") || cand.equals(".")) { return StringUtils.EMPTY; } return cand; }
@Test public void testGetName() throws Exception { testFilenameEquality("quick.ppt", "C:\\the\\quick.ppt"); testFilenameEquality("quick.ppt", "/the/quick.ppt"); testFilenameEquality("", "/the/quick/"); testFilenameEquality("", "~/the/quick////\\\\//"); testFilenameEquality("~~quick", "~~quick"); testFilenameEquality("quick.ppt", "quick.ppt"); testFilenameEquality("", "////"); testFilenameEquality("", "C:////"); testFilenameEquality("", ".."); testFilenameEquality("quick", "C:////../the/D:/quick"); testFilenameEquality("file.ppt", "path:to:file.ppt"); testFilenameEquality("HW.txt", "_1457338542/HW.txt"); }
@Override public String toString() { return uriToString(uri, inferredSchemeFromPath); }
@Test (timeout = 30000) public void testWithStringAndConfForBuggyPath() throws Exception { String dirString = "file:///tmp"; Path tmpDir = new Path(dirString); PathData item = new PathData(dirString, conf); // this may fail some day if Path is fixed to not crunch the uri // if the authority is null, however we need to test that the PathData // toString() returns the given string, while Path toString() does // the crunching assertEquals("file:/tmp", tmpDir.toString()); checkPathData(dirString, item); }
public Timestamp updateToScheduled(List<String> partitionTokens) { final TransactionResult<Void> transactionResult = runInTransaction( transaction -> transaction.updateToScheduled(partitionTokens), "updateToScheduled"); return transactionResult.getCommitTimestamp(); }
@Test public void testInTransactionContextUpdateToScheduled() { System.out.println(" update to scheduled"); ResultSet resultSet = mock(ResultSet.class); when(transaction.executeQuery(any(), anyObject())).thenReturn(resultSet); when(resultSet.next()).thenReturn(true).thenReturn(false); when(resultSet.getString(any())).thenReturn(PARTITION_TOKEN); when(resultSet.getCurrentRowAsStruct()).thenReturn(Struct.newBuilder().build()); ArgumentCaptor<ImmutableList<Mutation>> mutations = ArgumentCaptor.forClass(ImmutableList.class); doNothing().when(transaction).buffer(mutations.capture()); assertNull(inTransactionContext.updateToScheduled(Collections.singletonList(PARTITION_TOKEN))); assertEquals(1, mutations.getValue().size()); Map<String, Value> mutationValueMap = mutations.getValue().iterator().next().asMap(); assertEquals( PARTITION_TOKEN, mutationValueMap.get(PartitionMetadataAdminDao.COLUMN_PARTITION_TOKEN).getString()); assertEquals( PartitionMetadata.State.SCHEDULED.toString(), mutationValueMap.get(PartitionMetadataAdminDao.COLUMN_STATE).getString()); }
@Override public void apply(IntentOperationContext<DomainIntent> context) { Optional<IntentData> toUninstall = context.toUninstall(); Optional<IntentData> toInstall = context.toInstall(); List<DomainIntent> uninstallIntents = context.intentsToUninstall(); List<DomainIntent> installIntents = context.intentsToInstall(); if (!toInstall.isPresent() && !toUninstall.isPresent()) { intentInstallCoordinator.intentInstallSuccess(context); return; } if (toUninstall.isPresent()) { IntentData intentData = toUninstall.get(); trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources()); uninstallIntents.forEach(installable -> trackerService.removeTrackedResources(intentData.intent().key(), installable.resources())); } if (toInstall.isPresent()) { IntentData intentData = toInstall.get(); trackerService.addTrackedResources(intentData.key(), intentData.intent().resources()); installIntents.forEach(installable -> trackerService.addTrackedResources(intentData.key(), installable.resources())); } // Generate domain Intent operations DomainIntentOperations.Builder builder = DomainIntentOperations.builder(); DomainIntentOperationsContext domainOperationsContext; uninstallIntents.forEach(builder::remove); installIntents.forEach(builder::add); domainOperationsContext = new DomainIntentOperationsContext() { @Override public void onSuccess(DomainIntentOperations idops) { intentInstallCoordinator.intentInstallSuccess(context); } @Override public void onError(DomainIntentOperations idos) { intentInstallCoordinator.intentInstallFailed(context); } }; log.debug("submitting domain intent {} -> {}", toUninstall.map(x -> x.key().toString()).orElse("<empty>"), toInstall.map(x -> x.key().toString()).orElse("<empty>")); // Submit domain Inten operations with domain context domainIntentService.sumbit(builder.build(domainOperationsContext)); }
@Test public void testUninstallAndInstall() { List<Intent> intentsToUninstall = createDomainIntents(); List<Intent> intentsToInstall = createAnotherDomainIntents(); IntentData toUninstall = new IntentData(createP2PIntent(), IntentState.INSTALLED, new WallClockTimestamp()); toUninstall = IntentData.compiled(toUninstall, intentsToUninstall); IntentData toInstall = new IntentData(createP2PIntent(), IntentState.INSTALLING, new WallClockTimestamp()); toInstall = IntentData.compiled(toInstall, intentsToInstall); IntentOperationContext<DomainIntent> operationContext; IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall); operationContext = new IntentOperationContext(intentsToUninstall, intentsToInstall, context); installer.apply(operationContext); assertEquals(intentInstallCoordinator.successContext, operationContext); }
@Override public List<T> select(CmdbContext<T> context) { return doSelect(context); }
@Test void testSelect() { CmdbContext<Instance> context = new CmdbContext<>(); CmdbContext.CmdbInstance<Instance> provider = new CmdbContext.CmdbInstance<>(); provider.setInstance(new Instance()); provider.setEntity(new Entity()); context.setProviders(Collections.singletonList(provider)); CmdbContext.CmdbInstance<Instance> consumer = new CmdbContext.CmdbInstance<>(); consumer.setInstance(new Instance()); consumer.setEntity(new Entity()); context.setConsumer(consumer); List<Instance> actual = new MockCmdbSelector().select(context); assertNull(actual.get(0).getIp()); assertTrue(actual.get(0).getMetadata().isEmpty()); assertEquals("true", provider.getInstance().getMetadata().get("afterSelect")); assertEquals("true", provider.getEntity().getLabels().get("afterSelect")); assertEquals("true", consumer.getInstance().getMetadata().get("afterSelect")); assertEquals("true", consumer.getEntity().getLabels().get("afterSelect")); }
@Override public Set<OpenstackNode> completeNodes() { Set<OpenstackNode> osNodes = osNodeStore.nodes().stream() .filter(osNode -> Objects.equals(osNode.state(), COMPLETE)) .collect(Collectors.toSet()); return ImmutableSet.copyOf(osNodes); }
@Test public void testGetCompleteNodes() { assertEquals(ERR_SIZE, 2, target.completeNodes().size()); assertTrue(ERR_NOT_FOUND, target.completeNodes().contains(COMPUTE_3)); assertTrue(ERR_NOT_FOUND, target.completeNodes().contains(GATEWAY_1)); }
public static void addNumDeletesImmMemTablesMetric(final StreamsMetricsImpl streamsMetrics, final RocksDBMetricContext metricContext, final Gauge<BigInteger> valueProvider) { addMutableMetric( streamsMetrics, metricContext, valueProvider, NUMBER_OF_DELETES_IMMUTABLE_MEMTABLES, NUMBER_OF_DELETES_IMMUTABLE_MEMTABLES_DESCRIPTION ); }
@Test public void shouldAddNumDeletesImmutableMemTablesMetric() { final String name = "num-deletes-imm-mem-tables"; final String description = "Total number of delete entries in the unflushed immutable memtables"; runAndVerifyMutableMetric( name, description, () -> RocksDBMetrics.addNumDeletesImmMemTablesMetric(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER) ); }
@Override public AuthUser getAuthUser(Integer socialType, Integer userType, String code, String state) { // 构建请求 AuthRequest authRequest = buildAuthRequest(socialType, userType); AuthCallback authCallback = AuthCallback.builder().code(code).state(state).build(); // 执行请求 AuthResponse<?> authResponse = authRequest.login(authCallback); log.info("[getAuthUser][请求社交平台 type({}) request({}) response({})]", socialType, toJsonString(authCallback), toJsonString(authResponse)); if (!authResponse.ok()) { throw exception(SOCIAL_USER_AUTH_FAILURE, authResponse.getMsg()); } return (AuthUser) authResponse.getData(); }
@Test public void testAuthSocialUser_success() { // 准备参数 Integer socialType = SocialTypeEnum.WECHAT_MP.getType(); Integer userType = randomPojo(UserTypeEnum.class).getValue(); String code = randomString(); String state = randomString(); // mock 方法(AuthRequest) AuthRequest authRequest = mock(AuthRequest.class); when(authRequestFactory.get(eq("WECHAT_MP"))).thenReturn(authRequest); // mock 方法(AuthResponse) AuthUser authUser = randomPojo(AuthUser.class); AuthResponse<?> authResponse = new AuthResponse<>(2000, null, authUser); when(authRequest.login(argThat(authCallback -> { assertEquals(code, authCallback.getCode()); assertEquals(state, authCallback.getState()); return true; }))).thenReturn(authResponse); // 调用 AuthUser result = socialClientService.getAuthUser(socialType, userType, code, state); // 断言 assertSame(authUser, result); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { // Check the request path and set the cache-control header if we find // it matches what we're looking for. if (request instanceof HttpServletRequest) { if (isCacheableResourceRequest((HttpServletRequest)request)) { HttpServletResponse httpServletResponse = (HttpServletResponse) response; // // Set the expiry to one year. // // Note that this does NOT mean that the browser will never send a request // for these resources. If you click reload in the browser (def in Chrome) it will // send an If-Modified-Since request to the server (at a minimum), which means you at // least have the request overhead even if it results in a 304 response. Setting the // Cache-Control header helps for normal browsing (clicking on links, bookmarks etc), // in which case the local cache is fully used (no If-Modified-Since requests for // non-stale resources). // httpServletResponse.setHeader("Cache-Control", "public, max-age=31536000"); response = new HttpServletResponseWrapper(httpServletResponse) { @Override public void setHeader(String name, String value) { // Block the setting of the legacy HTTP/1.0 "Expires" header. // Note that, strictly speaking, this should not be required because // the HTTP spec dictates that the Cache-Control header takes priority. // Lets eliminate it anyway in case a browser/intermediary doesn't comply. if (!name.equalsIgnoreCase("Expires")) { super.setHeader(name, value); } } }; } } // continue to execute the filer chain as normal chain.doFilter(request, response); }
@Test public void test_cache_control_set() throws IOException, ServletException { Mockito.when(servletRequest.getPathInfo()).thenReturn("/a/b/c.js"); resourceCacheControl.doFilter(servletRequest, servletResponse, filterChain); Mockito.verify(servletResponse).setHeader("Cache-Control", "public, max-age=31536000"); }
@Override public void run(MyConfiguration configuration, Environment environment) { environment.jersey().register(MyResource.class); }
@Test void buildsMyResource() throws Exception { application.run(config, environment); verify(jersey).register(eq(MyResource.class)); }
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } boolean result = true; boolean containsNull = false; // Spec. definition: return false if any item is false, else true if all items are true, else null for ( final Object element : list ) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean")); } else { if (element != null) { result &= (Boolean) element; } else if (!containsNull) { containsNull = true; } } } if (containsNull && result) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeListParamEmptyList() { FunctionTestUtil.assertResult(allFunction.invoke(Collections.emptyList()), true); }
@Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { ScheduledTask<Void> task = new ScheduledTask<Void>(command); mQueue.add(TimeUnit.MILLISECONDS.convert(delay, unit), task); return task; }
@Test public void schedule() throws Exception { CountTask task = new CountTask(); ControllableScheduler scheduler = new ControllableScheduler(); scheduler.schedule(task, 5, TimeUnit.HOURS); assertTrue(scheduler.schedulerIsIdle()); scheduler.jumpAndExecute(5, TimeUnit.HOURS); assertEquals(1, task.runTimes()); }
@Override public NotifyMessageDO getNotifyMessage(Long id) { return notifyMessageMapper.selectById(id); }
@Test public void testGetNotifyMessage() { // mock 数据 NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> o.setTemplateParams(randomTemplateParams())); notifyMessageMapper.insert(dbNotifyMessage); // 准备参数 Long id = dbNotifyMessage.getId(); // 调用 NotifyMessageDO notifyMessage = notifyMessageService.getNotifyMessage(id); assertPojoEquals(dbNotifyMessage, notifyMessage); }
@SuppressForbidden("Deliberate invocation") public static String getLocalHostname() { InetAddress addr = null; try { addr = InetAddress.getLocalHost(); } catch (UnknownHostException ex) { return "Unknown"; } return addr.getHostName(); }
@Test public void testGetLocalHostname() { String hostname = Tools.getLocalHostname(); assertFalse(hostname.isEmpty()); }
@Override public synchronized UdfFactory ensureFunctionFactory(final UdfFactory factory) { validateFunctionName(factory.getName()); final String functionName = factory.getName().toUpperCase(); if (udafs.containsKey(functionName)) { throw new KsqlException("UdfFactory already registered as aggregate: " + functionName); } if (udtfs.containsKey(functionName)) { throw new KsqlException("UdfFactory already registered as table function: " + functionName); } final UdfFactory existing = udfs.putIfAbsent(functionName, factory); if (existing != null && !existing.matches(factory)) { throw new KsqlException("UdfFactory not compatible with existing factory." + " function: " + functionName + " existing: " + existing + ", factory: " + factory); } return existing == null ? factory : existing; }
@Test public void shouldThrowOnInvalidUdfFunctionName() { // Given: when(udfFactory.getName()).thenReturn("i am invalid"); // When: final Exception e = assertThrows( KsqlException.class, () -> functionRegistry.ensureFunctionFactory(udfFactory) ); // Then: assertThat(e.getMessage(), containsString("is not a valid function name")); }
@Override public void shutdown(Callback<None> shutdown) { MultiCallback multiCallback = new MultiCallback(shutdown, 2); _fsStore.shutdown(multiCallback); _zkAwareStore.shutdown(multiCallback); }
@Test public void testLastSeenLifeCycle() throws InterruptedException, ExecutionException, TimeoutException, IOException, PropertyStoreException { createZKServer(); // Fill the store with data File dataPath = ZKTestUtil.createTempDir("randomFileDataPath"); LastSeenZKStore<String> store = ZkStoreTestOnlyUtil.getLastSeenZKStore(dataPath.getPath(), PORT); ZooKeeperEphemeralStore<String> storeWriter = ZkStoreTestOnlyUtil.getZooKeeperEphemeralStore(PORT); storeWriter.put(TEST_ZK_PROP_NAME, "randomData"); PropertyEventBusImpl<String> propertyEventBus = new PropertyEventBusImpl<>(Executors.newSingleThreadExecutor()); propertyEventBus.setPublisher(store); CountDownLatch initializedLatch = new CountDownLatch(1); propertyEventBus.register(Collections.singleton(TEST_ZK_PROP_NAME), new LatchSubscriber(initializedLatch, null)); initializedLatch.await(5, TimeUnit.SECONDS); if (initializedLatch.getCount() != 0) { fail("Initialized not received"); } // stopping ZK without removing data. This make ZK unreachable _zkServer.shutdown(false); // create new last seen, without ZK Connection, and see if it fetches from the server store = ZkStoreTestOnlyUtil.getLastSeenZKStore(dataPath.getPath(), PORT); propertyEventBus = new PropertyEventBusImpl<>(Executors.newSingleThreadExecutor()); propertyEventBus.setPublisher(store); CountDownLatch initializedLatch2 = new CountDownLatch(1); CountDownLatch addLatch2 = new CountDownLatch(1); propertyEventBus.register(Collections.singleton(TEST_ZK_PROP_NAME), new LatchSubscriber(initializedLatch2, addLatch2)); initializedLatch2.await(5, TimeUnit.SECONDS); if (initializedLatch2.getCount() != 0) { fail("Initialized not received"); } if (addLatch2.getCount() != 1) { fail("The add latch should have not been invoked yet"); } // restart ZK and see if it reads the most updated value, the most updated value in this case is identical _zkServer.restart(); addLatch2.await(50, TimeUnit.SECONDS); if (addLatch2.getCount() != 0) { fail("When ZK restarted we didn't read the most updated value from ZK"); } // shutting everything down final FutureCallback<None> shutdownCallback = new FutureCallback<>(); store.shutdown(shutdownCallback); shutdownCallback.get(5, TimeUnit.SECONDS); final FutureCallback<None> shutdownCallback2 = new FutureCallback<>(); storeWriter.shutdown(shutdownCallback2); shutdownCallback2.get(5, TimeUnit.SECONDS); _zkServer.shutdown(); }
@Override public Range<T> firstRange() { if (rangeBitSetMap.isEmpty()) { return null; } Entry<Long, BitSet> firstSet = rangeBitSetMap.firstEntry(); int lower = firstSet.getValue().nextSetBit(0); int upper = Math.max(lower, firstSet.getValue().nextClearBit(lower) - 1); return Range.openClosed(consumer.apply(firstSet.getKey(), lower - 1), consumer.apply(firstSet.getKey(), upper)); }
@Test public void testFirstRange() { OpenLongPairRangeSet<LongPair> set = new OpenLongPairRangeSet<>(consumer); assertNull(set.firstRange()); Range<LongPair> range = Range.openClosed(new LongPair(0, 97), new LongPair(0, 99)); set.add(range); assertEquals(set.firstRange(), range); assertEquals(set.size(), 1); range = Range.openClosed(new LongPair(0, 98), new LongPair(0, 105)); set.add(range); assertEquals(set.firstRange(), Range.openClosed(new LongPair(0, 97), new LongPair(0, 105))); assertEquals(set.size(), 1); range = Range.openClosed(new LongPair(0, 5), new LongPair(0, 75)); set.add(range); assertEquals(set.firstRange(), range); assertEquals(set.size(), 2); }