focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public ResultSet executeQuery(String sql) throws SQLException { StatementResult result = executeInternal(sql); if (!result.isQueryResult()) { result.close(); throw new SQLException(String.format("Statement[%s] is not a query.", sql)); } currentResults = new FlinkResultSet(this, result); hasResults = true; return currentResults; }
@Test @Timeout(value = 60) public void testExecuteQuery() throws Exception { try (FlinkConnection connection = new FlinkConnection(getDriverUri())) { try (Statement statement = connection.createStatement()) { // CREATE TABLE is not a query and has no results assertFalse( statement.execute( String.format( "CREATE TABLE test_table(id bigint, val int, str string, timestamp1 timestamp(0), timestamp2 timestamp_ltz(3), time_data time, date_data date) " + "with (" + "'connector'='filesystem',\n" + "'format'='csv',\n" + "'path'='%s')", tempDir))); assertEquals(0, statement.getUpdateCount()); // INSERT TABLE returns job id assertTrue( statement.execute( "INSERT INTO test_table VALUES " + "(1, 11, '111', TIMESTAMP '2021-04-15 23:18:36', TO_TIMESTAMP_LTZ(400000000000, 3), TIME '12:32:00', DATE '2023-11-02'), " + "(3, 33, '333', TIMESTAMP '2021-04-16 23:18:36', TO_TIMESTAMP_LTZ(500000000000, 3), TIME '13:32:00', DATE '2023-12-02'), " + "(2, 22, '222', TIMESTAMP '2021-04-17 23:18:36', TO_TIMESTAMP_LTZ(600000000000, 3), TIME '14:32:00', DATE '2023-01-02'), " + "(4, 44, '444', TIMESTAMP '2021-04-18 23:18:36', TO_TIMESTAMP_LTZ(700000000000, 3), TIME '15:32:00', DATE '2023-02-02')")); assertThatThrownBy(statement::getUpdateCount) .isInstanceOf(SQLFeatureNotSupportedException.class) .hasMessage("FlinkStatement#getUpdateCount is not supported for query"); String jobId; try (ResultSet resultSet = statement.getResultSet()) { assertTrue(resultSet.next()); assertEquals(1, resultSet.getMetaData().getColumnCount()); jobId = resultSet.getString("job id"); assertEquals(jobId, resultSet.getString(1)); assertFalse(resultSet.next()); } assertNotNull(jobId); // Wait job finished boolean jobFinished = false; while (!jobFinished) { assertTrue(statement.execute("SHOW JOBS")); try (ResultSet resultSet = statement.getResultSet()) { while (resultSet.next()) { if (resultSet.getString(1).equals(jobId)) { if (resultSet.getString(3).equals("FINISHED")) { jobFinished = true; break; } } } } } // SELECT all data from test_table statement.execute("SET 'table.local-time-zone' = 'UTC'"); try (ResultSet resultSet = statement.executeQuery("SELECT * FROM test_table")) { assertEquals(7, resultSet.getMetaData().getColumnCount()); List<String> resultList = new ArrayList<>(); while (resultSet.next()) { assertEquals(resultSet.getLong("id"), resultSet.getLong(1)); assertEquals(resultSet.getInt("val"), resultSet.getInt(2)); assertEquals(resultSet.getString("str"), resultSet.getString(3)); assertEquals(resultSet.getTimestamp("timestamp1"), resultSet.getObject(4)); assertEquals(resultSet.getObject("timestamp2"), resultSet.getTimestamp(5)); assertEquals(resultSet.getObject("time_data"), resultSet.getTime(6)); assertEquals(resultSet.getObject("date_data"), resultSet.getDate(7)); resultList.add( String.format( "%s,%s,%s,%s,%s,%s,%s", resultSet.getLong("id"), resultSet.getInt("val"), resultSet.getString("str"), resultSet.getTimestamp("timestamp1"), resultSet.getTimestamp("timestamp2"), resultSet.getTime("time_data"), resultSet.getDate("date_data"))); } assertThat(resultList) .containsExactlyInAnyOrder( "1,11,111,2021-04-15 23:18:36.0,1982-09-04 15:06:40.0,12:32:00,2023-11-02", "3,33,333,2021-04-16 23:18:36.0,1985-11-05 00:53:20.0,13:32:00,2023-12-02", "2,22,222,2021-04-17 23:18:36.0,1989-01-05 10:40:00.0,14:32:00,2023-01-02", "4,44,444,2021-04-18 23:18:36.0,1992-03-07 20:26:40.0,15:32:00,2023-02-02"); } // SELECT all data from test_table with local time zone statement.execute("SET 'table.local-time-zone' = 'Asia/Shanghai'"); try (ResultSet resultSet = statement.executeQuery("SELECT * FROM test_table")) { assertEquals(7, resultSet.getMetaData().getColumnCount()); List<String> resultList = new ArrayList<>(); while (resultSet.next()) { resultList.add( String.format( "%s,%s", resultSet.getTimestamp("timestamp1"), resultSet.getTimestamp("timestamp2"))); } assertThat(resultList) .containsExactlyInAnyOrder( "2021-04-15 23:18:36.0,1982-09-04 23:06:40.0", "2021-04-16 23:18:36.0,1985-11-05 08:53:20.0", "2021-04-17 23:18:36.0,1989-01-05 18:40:00.0", "2021-04-18 23:18:36.0,1992-03-08 04:26:40.0"); } assertTrue(statement.execute("SHOW JOBS")); try (ResultSet resultSet = statement.getResultSet()) { // Check there are two finished jobs. int count = 0; while (resultSet.next()) { assertEquals("FINISHED", resultSet.getString(3)); count++; } assertEquals(3, count); } } } }
public Optional<String> validate(MonitoringInfo monitoringInfo) { if (monitoringInfo.getUrn().isEmpty() || monitoringInfo.getType().isEmpty()) { return Optional.of( String.format( "MonitoringInfo requires both urn %s and type %s to be specified.", monitoringInfo.getUrn(), monitoringInfo.getType())); } // Skip checking unknown MonitoringInfos Map<String, Set<String>> typeToRequiredLabels = REQUIRED_LABELS.get(monitoringInfo.getUrn()); if (typeToRequiredLabels == null) { return Optional.empty(); } Set<String> requiredLabels = typeToRequiredLabels.get(monitoringInfo.getType()); if (requiredLabels == null) { return Optional.empty(); } // TODO(ajamato): Tighten this restriction to use set equality, to catch unused if (!monitoringInfo.getLabelsMap().keySet().containsAll(requiredLabels)) { return Optional.of( String.format( "MonitoringInfo with urn: %s should have labels: %s, actual: %s", monitoringInfo.getUrn(), requiredLabels, monitoringInfo.getLabelsMap())); } return Optional.empty(); }
@Test public void validateReturnsErrorOnInvalidMonitoringInfoLabels() { MonitoringInfo testInput = MonitoringInfo.newBuilder() .setUrn(MonitoringInfoConstants.Urns.ELEMENT_COUNT) .setType(TypeUrns.SUM_INT64_TYPE) .putLabels(MonitoringInfoConstants.Labels.PTRANSFORM, "unexpectedLabel") .build(); assertTrue(new SpecMonitoringInfoValidator().validate(testInput).isPresent()); }
public static ErrorResponse fromJson(int code, String json) { return JsonUtil.parse(json, node -> OAuthErrorResponseParser.fromJson(code, node)); }
@Test public void testOAuthErrorResponseFromJson() { String error = OAuth2Properties.INVALID_CLIENT_ERROR; String description = "Credentials given were invalid"; String uri = "http://iceberg.apache.org"; String json = String.format( "{\"error\":\"%s\",\"error_description\":\"%s\",\"error_uri\":\"%s\"}", error, description, uri); ErrorResponse expected = ErrorResponse.builder().responseCode(400).withType(error).withMessage(description).build(); assertEquals(expected, OAuthErrorResponseParser.fromJson(400, json)); }
@Override @CheckForNull public EmailMessage format(Notification notif) { if (!(notif instanceof ChangesOnMyIssuesNotification)) { return null; } ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif; if (notification.getChange() instanceof AnalysisChange) { checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty"); return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification); } return formatMultiProject(notification); }
@Test public void format_set_html_message_with_issue_status_title_handles_plural_when_change_from_analysis() { Project project = newProject("foo"); Rule rule = newRandomNotAHotspotRule("bar"); Set<ChangedIssue> closedIssues = IntStream.range(0, 2 + new Random().nextInt(5)) .mapToObj(status -> newChangedIssue(status + "", STATUS_CLOSED, project, rule)) .collect(toSet()); Set<ChangedIssue> openIssues = IntStream.range(0, 2 + new Random().nextInt(5)) .mapToObj(status -> newChangedIssue(status + "", STATUS_OPEN, project, rule)) .collect(toSet()); AnalysisChange analysisChange = newAnalysisChange(); EmailMessage closedIssuesMessage = underTest.format(new ChangesOnMyIssuesNotification(analysisChange, closedIssues)); EmailMessage openIssuesMessage = underTest.format(new ChangesOnMyIssuesNotification(analysisChange, openIssues)); HtmlListAssert htmlListAssert = HtmlFragmentAssert.assertThat(closedIssuesMessage.getMessage()) .hasParagraph().hasParagraph() // skip header .hasParagraph("Closed issues:") .hasList(); verifyEnd(htmlListAssert); htmlListAssert = HtmlFragmentAssert.assertThat(openIssuesMessage.getMessage()) .hasParagraph().hasParagraph() // skip header .hasParagraph("Open issues:") .hasList(); verifyEnd(htmlListAssert); }
void refreshNamenodes(Configuration conf) throws IOException { LOG.info("Refresh request received for nameservices: " + conf.get(DFSConfigKeys.DFS_NAMESERVICES)); Map<String, Map<String, InetSocketAddress>> newAddressMap = null; Map<String, Map<String, InetSocketAddress>> newLifelineAddressMap = null; try { newAddressMap = DFSUtil.getNNServiceRpcAddressesForCluster(conf); newLifelineAddressMap = DFSUtil.getNNLifelineRpcAddressesForCluster(conf); } catch (IOException ioe) { LOG.warn("Unable to get NameNode addresses.", ioe); } if (newAddressMap == null || newAddressMap.isEmpty()) { throw new IOException("No services to connect, missing NameNode " + "address."); } synchronized (refreshNamenodesLock) { doRefreshNamenodes(newAddressMap, newLifelineAddressMap); } }
@Test public void testSimpleSingleNS() throws Exception { Configuration conf = new Configuration(); conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://mock1:8020"); bpm.refreshNamenodes(conf); assertEquals("create #1\n", log.toString()); }
public URL convert(String value) { if (isBlank(value)) { throw new ParameterException(getErrorString("a blank value", "a valid URL")); } try { return URLUtil.parseURL(value); } catch (IllegalArgumentException e) { throw new ParameterException(getErrorString(value, "a valid URL")); } }
@Test public void urlIsCreatedFromFilePath() { URL url = converter.convert("/path/to/something"); // on *ux the path part of the URL is equal to the given path // on Windows C: is prepended, which is expected assertThat(url.getPath(), endsWith("/path/to/something")); }
@Override public String execute(CommandContext commandContext, String[] args) { if (ArrayUtils.isEmpty(args)) { return "Please input the index of the method you want to invoke, eg: \r\n select 1"; } Channel channel = commandContext.getRemote(); String message = args[0]; List<Method> methodList = channel.attr(InvokeTelnet.INVOKE_METHOD_LIST_KEY).get(); if (CollectionUtils.isEmpty(methodList)) { return "Please use the invoke command first."; } if (!StringUtils.isNumber(message) || Integer.parseInt(message) < 1 || Integer.parseInt(message) > methodList.size()) { return "Illegal index ,please input select 1~" + methodList.size(); } Method method = methodList.get(Integer.parseInt(message) - 1); channel.attr(SELECT_METHOD_KEY).set(method); channel.attr(SELECT_KEY).set(Boolean.TRUE); String invokeMessage = channel.attr(InvokeTelnet.INVOKE_MESSAGE_KEY).get(); return invokeTelnet.execute(commandContext, new String[] {invokeMessage}); }
@Test void testInvokeWithoutMethodList() throws RemotingException { defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).set(DemoService.class.getName()); defaultAttributeMap.attr(InvokeTelnet.INVOKE_METHOD_LIST_KEY).set(null); given(mockChannel.attr(ChangeTelnet.SERVICE_KEY)) .willReturn(defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY)); given(mockChannel.attr(InvokeTelnet.INVOKE_METHOD_LIST_KEY)) .willReturn(defaultAttributeMap.attr(InvokeTelnet.INVOKE_METHOD_LIST_KEY)); registerProvider(DemoService.class.getName(), new DemoServiceImpl(), DemoService.class); String result = select.execute(mockCommandContext, new String[] {"1"}); assertTrue(result.contains("Please use the invoke command first.")); defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).remove(); defaultAttributeMap.attr(InvokeTelnet.INVOKE_METHOD_LIST_KEY).remove(); }
ControllerResult<Map<String, ApiError>> updateFeatures( Map<String, Short> updates, Map<String, FeatureUpdate.UpgradeType> upgradeTypes, boolean validateOnly ) { TreeMap<String, ApiError> results = new TreeMap<>(); List<ApiMessageAndVersion> records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); for (Entry<String, Short> entry : updates.entrySet()) { results.put(entry.getKey(), updateFeature(entry.getKey(), entry.getValue(), upgradeTypes.getOrDefault(entry.getKey(), FeatureUpdate.UpgradeType.UPGRADE), records)); } if (validateOnly) { return ControllerResult.of(Collections.emptyList(), results); } else { return ControllerResult.atomicOf(records, results); } }
@Test public void testCannotUnsafeDowngradeToHigherVersion() { FeatureControlManager manager = TEST_MANAGER_BUILDER1.build(); assertEquals(ControllerResult.of(Collections.emptyList(), singletonMap(MetadataVersion.FEATURE_NAME, new ApiError(Errors.INVALID_UPDATE_VERSION, "Invalid update version 7 for feature metadata.version. Can't downgrade to a " + "newer version."))), manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV3.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), true)); }
public static WorkflowInstanceAggregatedInfo computeAggregatedView( WorkflowInstance workflowInstance, boolean statusKnown) { if (workflowInstance == null) { // returning empty object since cannot access state of the current instance run return new WorkflowInstanceAggregatedInfo(); } WorkflowInstanceAggregatedInfo instanceAggregated = computeAggregatedViewNoStatus(workflowInstance); if (statusKnown || workflowInstance.getAggregatedInfo() == null) { instanceAggregated.setWorkflowInstanceStatus(workflowInstance.getStatus()); } else { computeAndSetAggregatedInstanceStatus(workflowInstance, instanceAggregated); } return instanceAggregated; }
@Test public void testAggregatedViewFailed() { WorkflowInstance run1 = getGenericWorkflowInstance( 1, WorkflowInstance.Status.FAILED, RunPolicy.START_FRESH_NEW_RUN, null); Workflow runtimeWorkflow = mock(Workflow.class); Map<String, StepRuntimeState> decodedOverview = new LinkedHashMap<>(); decodedOverview.put("step1", generateStepState(StepInstance.Status.SUCCEEDED, 1L, 2L)); decodedOverview.put("step2", generateStepState(StepInstance.Status.SUCCEEDED, 3L, 4L)); decodedOverview.put("step3", generateStepState(StepInstance.Status.FATALLY_FAILED, 5L, 6L)); WorkflowRuntimeOverview wro = mock(WorkflowRuntimeOverview.class); doReturn(decodedOverview).when(wro).decodeStepOverview(run1.getRuntimeDag()); run1.setRuntimeOverview(wro); run1.setRuntimeWorkflow(runtimeWorkflow); WorkflowInstanceAggregatedInfo aggregated = AggregatedViewHelper.computeAggregatedView(run1, false); assertEquals(1L, aggregated.getStepAggregatedViews().get("step1").getStartTime().longValue()); assertEquals(3L, aggregated.getStepAggregatedViews().get("step2").getStartTime().longValue()); assertEquals(5L, aggregated.getStepAggregatedViews().get("step3").getStartTime().longValue()); assertEquals(WorkflowInstance.Status.FAILED, aggregated.getWorkflowInstanceStatus()); WorkflowInstance run2 = getGenericWorkflowInstance( 2, WorkflowInstance.Status.SUCCEEDED, RunPolicy.RESTART_FROM_INCOMPLETE, RestartPolicy.RESTART_FROM_INCOMPLETE); Map<String, StepRuntimeState> decodedOverview2 = new LinkedHashMap<>(); decodedOverview2.put("step3", generateStepState(StepInstance.Status.SUCCEEDED, 11L, 12L)); Map<String, StepTransition> run2Dag = new LinkedHashMap<>(); run2Dag.put("step3", new StepTransition()); run2.setRuntimeDag(run2Dag); doReturn(run1) .when(workflowInstanceDao) .getWorkflowInstanceRun(run2.getWorkflowId(), run2.getWorkflowInstanceId(), 1L); run2.setAggregatedInfo(AggregatedViewHelper.computeAggregatedView(run1, false)); assertEquals(3, run2.getAggregatedInfo().getStepAggregatedViews().size()); assertEquals( 1L, run2.getAggregatedInfo().getStepAggregatedViews().get("step1").getStartTime().longValue()); assertEquals( 3L, run2.getAggregatedInfo().getStepAggregatedViews().get("step2").getStartTime().longValue()); assertEquals( 5L, run2.getAggregatedInfo().getStepAggregatedViews().get("step3").getStartTime().longValue()); WorkflowRuntimeOverview wro2 = mock(WorkflowRuntimeOverview.class); doReturn(decodedOverview2).when(wro2).decodeStepOverview(run2.getRuntimeDag()); run2.setRuntimeOverview(wro2); run2.setRuntimeWorkflow(runtimeWorkflow); WorkflowInstanceAggregatedInfo aggregated2 = AggregatedViewHelper.computeAggregatedView(run2, false); assertEquals(3, aggregated2.getStepAggregatedViews().size()); assertEquals( StepInstance.Status.SUCCEEDED, aggregated2.getStepAggregatedViews().get("step1").getStatus()); assertEquals( StepInstance.Status.SUCCEEDED, aggregated2.getStepAggregatedViews().get("step2").getStatus()); assertEquals( StepInstance.Status.SUCCEEDED, aggregated2.getStepAggregatedViews().get("step3").getStatus()); assertEquals(1L, aggregated2.getStepAggregatedViews().get("step1").getStartTime().longValue()); assertEquals(3L, aggregated2.getStepAggregatedViews().get("step2").getStartTime().longValue()); assertEquals(11L, aggregated2.getStepAggregatedViews().get("step3").getStartTime().longValue()); assertEquals(WorkflowInstance.Status.SUCCEEDED, aggregated2.getWorkflowInstanceStatus()); }
@Nonnull public HazelcastInstance getClient() { if (getConfig().isShared()) { retain(); return proxy.get(); } else { return HazelcastClient.newHazelcastClient(clientConfig); } }
@Test public void shared_client_should_return_same_instance() { DataConnectionConfig dataConnectionConfig = sharedDataConnectionConfig(clusterName); hazelcastDataConnection = new HazelcastDataConnection(dataConnectionConfig); HazelcastInstance c1 = hazelcastDataConnection.getClient(); HazelcastInstance c2 = hazelcastDataConnection.getClient(); try { assertThat(c1).isSameAs(c2); } finally { c1.shutdown(); c2.shutdown(); } }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { if(containerService.isContainer(folder)) { final S3BucketCreateService service = new S3BucketCreateService(session); service.create(folder, StringUtils.isBlank(status.getRegion()) ? new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getDefault().getIdentifier() : status.getRegion()); return folder; } else { final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType()); type.add(Path.Type.placeholder); return new S3TouchFeature(session, acl).withWriter(writer).touch(folder .withType(type), status // Add placeholder object .withMime(MIMETYPE) .withChecksum(writer.checksum(folder, status).compute(new NullInputStream(0L), status))); } }
@Test public void testCreatePlaceholderVirtualHost() throws Exception { final S3AccessControlListFeature acl = new S3AccessControlListFeature(virtualhost); final Path test = new S3DirectoryFeature(virtualhost, new S3WriteFeature(virtualhost, acl), acl).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); assertTrue(new S3FindFeature(virtualhost, acl).find(test)); assertTrue(new DefaultFindFeature(virtualhost).find(test)); assertTrue(new S3ObjectListService(virtualhost, acl).list(test, new DisabledListProgressListener()).isEmpty()); new S3DefaultDeleteFeature(virtualhost).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static String getDurationStringLong(int duration) { if (duration <= 0) { return "00:00:00"; } else { int[] hms = millisecondsToHms(duration); return String.format(Locale.getDefault(), "%02d:%02d:%02d", hms[0], hms[1], hms[2]); } }
@Test public void testGetDurationStringLong() { String expected = "13:05:10"; int input = 47110000; assertEquals(expected, Converter.getDurationStringLong(input)); }
@Override public void createNetwork(Network osNet) { checkNotNull(osNet, ERR_NULL_NETWORK); checkArgument(!Strings.isNullOrEmpty(osNet.getId()), ERR_NULL_NETWORK_ID); osNetworkStore.createNetwork(osNet); OpenstackNetwork finalAugmentedNetwork = buildAugmentedNetworkFromType(osNet); augmentedNetworkMap.compute(osNet.getId(), (id, existing) -> { final String error = osNet.getId() + ERR_DUPLICATE; checkArgument(existing == null, error); return finalAugmentedNetwork; }); log.info(String.format(MSG_NETWORK, deriveResourceName(osNet), MSG_CREATED)); }
@Test(expected = IllegalArgumentException.class) public void testCreateNetworkWithNullId() { final Network testNet = NeutronNetwork.builder().build(); target.createNetwork(testNet); }
public static Applications toApplications(Map<String, Application> applicationMap) { Applications applications = new Applications(); for (Application application : applicationMap.values()) { applications.addApplication(application); } return updateMeta(applications); }
@Test public void testToApplicationsIfNotNullReturnApplicationsFromMapOfApplication() { HashMap<String, Application> hashMap = new HashMap<>(); hashMap.put("foo", new Application("foo")); hashMap.put("bar", new Application("bar")); hashMap.put("baz", new Application("baz")); Applications applications = createApplications(new Application("foo"), new Application("bar"), new Application("baz")); Assert.assertEquals(applications.size(), EurekaEntityFunctions.toApplications(hashMap).size()); }
@Override public void chunk(final Path directory, final AttributedList<Path> list) throws ListCanceledException { if(directory.isRoot()) { if(list.size() >= container) { // Allow another chunk until limit is reached again container += preferences.getInteger("browser.list.limit.container"); throw new ListCanceledException(list); } } if(list.size() >= this.directory) { // Allow another chunk until limit is reached again this.directory += preferences.getInteger("browser.list.limit.directory"); throw new ListCanceledException(list); } }
@Test(expected = ListCanceledException.class) public void testChunkLimitFolder() throws Exception { new LimitedListProgressListener(new DisabledProgressListener()).chunk( new Path("/container", EnumSet.of(Path.Type.volume, Path.Type.directory)), new AttributedList<Path>() { @Override public int size() { return 10000; } } ); }
public ConvertedTime getConvertedTime(long duration) { Set<Seconds> keys = RULES.keySet(); for (Seconds seconds : keys) { if (duration <= seconds.getSeconds()) { return RULES.get(seconds).getConvertedTime(duration); } } return new TimeConverter.OverTwoYears().getConvertedTime(duration); }
@Test public void testShouldReportAbout1HourFor44Minutes30Seconds() throws Exception { assertEquals(TimeConverter.ABOUT_1_HOUR_AGO, timeConverter.getConvertedTime(44 * 60 + 30)); }
@Override public long getPeriodMillis() { return periodMillis; }
@Test public void testGetPeriodMillis() { long periodMillis = plugin.getPeriodMillis(); assertEquals(SECONDS.toMillis(1), periodMillis); }
public BackgroundException map(HttpResponse response) throws IOException { final S3ServiceException failure; if(null == response.getEntity()) { failure = new S3ServiceException(response.getStatusLine().getReasonPhrase()); } else { EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity())); failure = new S3ServiceException(response.getStatusLine().getReasonPhrase(), EntityUtils.toString(response.getEntity())); } failure.setResponseCode(response.getStatusLine().getStatusCode()); if(response.containsHeader(MINIO_ERROR_CODE)) { failure.setErrorCode(response.getFirstHeader(MINIO_ERROR_CODE).getValue()); } if(response.containsHeader(MINIO_ERROR_DESCRIPTION)) { failure.setErrorMessage(response.getFirstHeader(MINIO_ERROR_DESCRIPTION).getValue()); } return this.map(failure); }
@Test public void testBadRequest() { final ServiceException f = new ServiceException("m", "<null/>"); f.setErrorMessage("m"); f.setResponseCode(400); assertTrue(new S3ExceptionMappingService().map(f) instanceof InteroperabilityException); }
public LLCSegmentName(String segmentName) { String[] parts = StringUtils.splitByWholeSeparator(segmentName, SEPARATOR); Preconditions.checkArgument(parts.length == 4, "Invalid LLC segment name: %s", segmentName); _tableName = parts[0]; _partitionGroupId = Integer.parseInt(parts[1]); _sequenceNumber = Integer.parseInt(parts[2]); _creationTime = parts[3]; _segmentName = segmentName; }
@Test public void testLLCSegmentName() { String tableName = "myTable"; final int partitionGroupId = 4; final int sequenceNumber = 27; final long msSinceEpoch = 1466200248000L; final String creationTime = "20160617T2150Z"; final long creationTimeInMs = 1466200200000L; final String segmentName = "myTable__4__27__" + creationTime; LLCSegmentName segName1 = new LLCSegmentName(tableName, partitionGroupId, sequenceNumber, msSinceEpoch); Assert.assertEquals(segName1.getSegmentName(), segmentName); Assert.assertEquals(segName1.getPartitionGroupId(), partitionGroupId); Assert.assertEquals(segName1.getCreationTime(), creationTime); Assert.assertEquals(segName1.getCreationTimeMs(), creationTimeInMs); Assert.assertEquals(segName1.getSequenceNumber(), sequenceNumber); Assert.assertEquals(segName1.getTableName(), tableName); LLCSegmentName segName2 = new LLCSegmentName(segmentName); Assert.assertEquals(segName2.getSegmentName(), segmentName); Assert.assertEquals(segName2.getPartitionGroupId(), partitionGroupId); Assert.assertEquals(segName2.getCreationTime(), creationTime); Assert.assertEquals(segName2.getCreationTimeMs(), creationTimeInMs); Assert.assertEquals(segName2.getSequenceNumber(), sequenceNumber); Assert.assertEquals(segName2.getTableName(), tableName); Assert.assertEquals(segName1, segName2); LLCSegmentName segName3 = new LLCSegmentName(tableName, partitionGroupId + 1, sequenceNumber - 1, msSinceEpoch); Assert.assertTrue(segName1.compareTo(segName3) < 0); LLCSegmentName segName4 = new LLCSegmentName(tableName, partitionGroupId + 1, sequenceNumber + 1, msSinceEpoch); Assert.assertTrue(segName1.compareTo(segName4) < 0); LLCSegmentName segName5 = new LLCSegmentName(tableName, partitionGroupId - 1, sequenceNumber + 1, msSinceEpoch); Assert.assertTrue(segName1.compareTo(segName5) > 0); LLCSegmentName segName6 = new LLCSegmentName(tableName, partitionGroupId, sequenceNumber + 1, msSinceEpoch); Assert.assertTrue(segName1.compareTo(segName6) < 0); LLCSegmentName segName7 = new LLCSegmentName(tableName + "NotGood", partitionGroupId, sequenceNumber + 1, msSinceEpoch); try { segName1.compareTo(segName7); Assert.fail("Not failing when comparing " + segName1.getSegmentName() + " and " + segName7.getSegmentName()); } catch (Exception e) { // expected } LLCSegmentName[] testSorted = new LLCSegmentName[]{segName3, segName1, segName4, segName5, segName6}; Arrays.sort(testSorted); Assert.assertEquals(testSorted, new LLCSegmentName[]{segName5, segName1, segName6, segName3, segName4}); }
public Connection create(Connection connection) { return connectionRepository.saveAndFlush(connection); }
@Test void createConnection() { when(connectionRepositoryMock.saveAndFlush(any(Connection.class))).thenReturn(new Connection()); Connection result = connectionServiceMock.create(new Connection()); verify(connectionRepositoryMock, times(1)).saveAndFlush(any(Connection.class)); assertNotNull(result); }
@Override public void readFrame(ChannelHandlerContext ctx, ByteBuf input, Http2FrameListener listener) throws Http2Exception { if (readError) { input.skipBytes(input.readableBytes()); return; } try { do { if (readingHeaders && !preProcessFrame(input)) { return; } // The header is complete, fall into the next case to process the payload. // This is to ensure the proper handling of zero-length payloads. In this // case, we don't want to loop around because there may be no more data // available, causing us to exit the loop. Instead, we just want to perform // the first pass at payload processing now. // Wait until the entire payload has been read. if (input.readableBytes() < payloadLength) { return; } // Slice to work only on the frame being read ByteBuf framePayload = input.readSlice(payloadLength); // We have consumed the data for this frame, next time we read, // we will be expecting to read a new frame header. readingHeaders = true; verifyFrameState(); processPayloadState(ctx, framePayload, listener); } while (input.isReadable()); } catch (Http2Exception e) { readError = !Http2Exception.isStreamError(e); throw e; } catch (RuntimeException e) { readError = true; throw e; } catch (Throwable cause) { readError = true; PlatformDependent.throwException(cause); } }
@Test public void readHeaderFrameAndContinuationFrame() throws Http2Exception { final int streamId = 1; ByteBuf input = Unpooled.buffer(); try { Http2Headers headers = new DefaultHttp2Headers() .authority("foo") .method("get") .path("/") .scheme("https"); writeHeaderFrame(input, streamId, headers, new Http2Flags().endOfHeaders(false).endOfStream(true)); writeContinuationFrame(input, streamId, new DefaultHttp2Headers().add("foo", "bar"), new Http2Flags().endOfHeaders(true)); frameReader.readFrame(ctx, input, listener); verify(listener).onHeadersRead(ctx, 1, headers.add("foo", "bar"), 0, true); } finally { input.release(); } }
@Override public Set<String> listTopicNames() { try { return ExecutorUtil.executeWithRetries( () -> adminClient.get().listTopics().names().get(), ExecutorUtil.RetryBehaviour.ON_RETRYABLE); } catch (final Exception e) { throw new KafkaResponseGetFailedException("Failed to retrieve Kafka Topic names", e); } }
@Test public void shouldListTopicNames() { // When: givenTopicExists("topicA", 1, 1); givenTopicExists("topicB", 1, 2); when(adminClient.listTopics()) .thenAnswer(listTopicResult()); // When: final Set<String> names = kafkaTopicClient.listTopicNames(); // Then: assertThat(names, is(ImmutableSet.of("topicA", "topicB"))); }
public void setRequestProtocol(String requestProtocol) { this.requestProtocol = requestProtocol; }
@Test void testSetRequestProtocol() { assertNull(basicContext.getRequestProtocol()); basicContext.setRequestProtocol(BasicContext.HTTP_PROTOCOL); assertEquals(BasicContext.HTTP_PROTOCOL, basicContext.getRequestProtocol()); basicContext.setRequestProtocol(BasicContext.GRPC_PROTOCOL); assertEquals(BasicContext.GRPC_PROTOCOL, basicContext.getRequestProtocol()); }
@Override public Optional<NotificationDispatcherMetadata> getMetadata() { return Optional.empty(); }
@Test public void getMetadata_returns_empty() { assertThat(underTest.getMetadata()).isEmpty(); }
public void computeCpd(Component component, Collection<Block> originBlocks, Collection<Block> duplicationBlocks) { CloneIndex duplicationIndex = new PackedMemoryCloneIndex(); populateIndex(duplicationIndex, originBlocks); populateIndex(duplicationIndex, duplicationBlocks); List<CloneGroup> duplications = SuffixTreeCloneDetectionAlgorithm.detect(duplicationIndex, originBlocks); Iterable<CloneGroup> filtered = duplications.stream() .filter(getNumberOfUnitsNotLessThan(component.getFileAttributes().getLanguageKey())) .toList(); addDuplications(component, filtered); }
@Test public void do_not_compute_more_than_one_hundred_duplications_when_too_many_duplicated_references() { Collection<Block> originBlocks = new ArrayList<>(); Collection<Block> duplicatedBlocks = new ArrayList<>(); Block.Builder blockBuilder = new Block.Builder() .setResourceId(ORIGIN_FILE_KEY) .setBlockHash(new ByteArray("a8998353e96320ec")) .setIndexInFile(0) .setLines(30, 45) .setUnit(0, 100); originBlocks.add(blockBuilder.build()); // Generate more than 100 duplications of the same block for (int i = 0; i < 110; i++) { duplicatedBlocks.add( blockBuilder .setResourceId(randomAlphanumeric(16)) .build()); } underTest.computeCpd(ORIGIN_FILE, originBlocks, duplicatedBlocks); assertThat(logTester.logs(Level.WARN)).containsOnly( "Too many duplication references on file " + ORIGIN_FILE_KEY + " for block at line 30. Keeping only the first 100 references."); Iterable<Duplication> duplications = duplicationRepository.getDuplications(ORIGIN_FILE); assertThat(duplications).hasSize(1); assertThat(duplications.iterator().next().getDuplicates()).hasSize(100); }
static PublicationParams getPublicationParams( final ChannelUri channelUri, final MediaDriver.Context ctx, final DriverConductor driverConductor, final boolean isIpc) { final PublicationParams params = new PublicationParams(ctx, isIpc); params.getEntityTag(channelUri, driverConductor); params.getSessionId(channelUri, driverConductor); params.getTermBufferLength(channelUri); params.getMtuLength(channelUri); params.getLingerTimeoutNs(channelUri); params.getEos(channelUri); params.getSparse(channelUri, ctx); params.getSpiesSimulateConnection(channelUri, ctx); params.getUntetheredWindowLimitTimeout(channelUri, ctx); params.getUntetheredRestingTimeout(channelUri, ctx); params.getMaxResend(channelUri); int count = 0; final String initialTermIdStr = channelUri.get(INITIAL_TERM_ID_PARAM_NAME); count = initialTermIdStr != null ? count + 1 : count; final String termIdStr = channelUri.get(TERM_ID_PARAM_NAME); count = termIdStr != null ? count + 1 : count; final String termOffsetStr = channelUri.get(TERM_OFFSET_PARAM_NAME); count = termOffsetStr != null ? count + 1 : count; if (count > 0) { if (count < 3) { throw new IllegalArgumentException("params must be used as a complete set: " + INITIAL_TERM_ID_PARAM_NAME + " " + TERM_ID_PARAM_NAME + " " + TERM_OFFSET_PARAM_NAME + " channel=" + channelUri); } params.initialTermId = Integer.parseInt(initialTermIdStr); params.termId = Integer.parseInt(termIdStr); params.termOffset = Integer.parseInt(termOffsetStr); if (params.termOffset > params.termLength) { throw new IllegalArgumentException( TERM_OFFSET_PARAM_NAME + "=" + params.termOffset + " > " + TERM_LENGTH_PARAM_NAME + "=" + params.termLength + ": channel=" + channelUri); } if (params.termOffset < 0 || params.termOffset > LogBufferDescriptor.TERM_MAX_LENGTH) { throw new IllegalArgumentException( TERM_OFFSET_PARAM_NAME + "=" + params.termOffset + " out of range: channel=" + channelUri); } if ((params.termOffset & (FrameDescriptor.FRAME_ALIGNMENT - 1)) != 0) { throw new IllegalArgumentException( TERM_OFFSET_PARAM_NAME + "=" + params.termOffset + " must be a multiple of FRAME_ALIGNMENT: channel=" + channelUri); } if (params.termId - params.initialTermId < 0) { throw new IllegalStateException( "difference greater than 2^31 - 1: " + INITIAL_TERM_ID_PARAM_NAME + "=" + params.initialTermId + " when " + TERM_ID_PARAM_NAME + "=" + params.termId + " channel=" + channelUri); } params.hasPosition = true; } params.isResponse = CONTROL_MODE_RESPONSE.equals(channelUri.get(MDC_CONTROL_MODE_PARAM_NAME)); params.responseCorrelationId = Long.parseLong(channelUri.get(RESPONSE_CORRELATION_ID_PARAM_NAME, "-1")); return params; }
@Test void basicParse() { final ChannelUri uri = ChannelUri.parse("aeron:udp?endpoint=localhost:1010"); final PublicationParams params = PublicationParams.getPublicationParams(uri, ctx, conductor, false); assertFalse(params.hasMaxResend); }
@SuppressWarnings("unchecked") public static <T> T[] insert(T[] buffer, int index, T... newElements) { return (T[]) insert((Object) buffer, index, newElements); }
@Test public void testInsertPrimitive() { final boolean[] booleans = new boolean[10]; final byte[] bytes = new byte[10]; final char[] chars = new char[10]; final short[] shorts = new short[10]; final int[] ints = new int[10]; final long[] longs = new long[10]; final float[] floats = new float[10]; final double[] doubles = new double[10]; final boolean[] insert1 = (boolean[]) ArrayUtil.insert(booleans, 0, 0, 1, 2); assertNotNull(insert1); final byte[] insert2 = (byte[]) ArrayUtil.insert(bytes, 0, 1, 2, 3); assertNotNull(insert2); final char[] insert3 = (char[]) ArrayUtil.insert(chars, 0, 1, 2, 3); assertNotNull(insert3); final short[] insert4 = (short[]) ArrayUtil.insert(shorts, 0, 1, 2, 3); assertNotNull(insert4); final int[] insert5 = (int[]) ArrayUtil.insert(ints, 0, 1, 2, 3); assertNotNull(insert5); final long[] insert6 = (long[]) ArrayUtil.insert(longs, 0, 1, 2, 3); assertNotNull(insert6); final float[] insert7 = (float[]) ArrayUtil.insert(floats, 0, 1, 2, 3); assertNotNull(insert7); final double[] insert8 = (double[]) ArrayUtil.insert(doubles, 0, 1, 2, 3); assertNotNull(insert8); }
public static void copyBody(Message source, Message target) { // Preserve the DataType if both messages are DataTypeAware if (source.hasTrait(MessageTrait.DATA_AWARE)) { target.setBody(source.getBody()); target.setPayloadForTrait(MessageTrait.DATA_AWARE, source.getPayloadForTrait(MessageTrait.DATA_AWARE)); return; } target.setBody(source.getBody()); }
@Test void shouldCopyBodyIfBothDataTypeAwareWithDataTypeSet() { Object body = new Object(); DataType type = new DataType("foo"); DefaultMessage m1 = new DefaultMessage((Exchange) null); m1.setBody(body, type); DefaultMessage m2 = new DefaultMessage((Exchange) null); copyBody(m1, m2); assertSame(body, m2.getBody()); assertSame(type, m2.getDataType()); }
@GetMapping("/getUserPermissionByToken") public ShenyuAdminResult getUserPermissionByToken(@RequestParam(name = "token") final String token) { PermissionMenuVO permissionMenuVO = permissionService.getPermissionMenu(token); return Optional.ofNullable(permissionMenuVO) .map(item -> ShenyuAdminResult.success(ShenyuResultMessage.MENU_SUCCESS, item)) .orElseGet(() -> ShenyuAdminResult.error(ShenyuResultMessage.MENU_FAILED)); }
@Test public void testGetUserPermissionByTokenNull() { when(mockPermissionService.getPermissionMenu("token")).thenReturn(null); final ShenyuAdminResult result = permissionController.getUserPermissionByToken("token"); assertThat(result.getCode(), is(CommonErrorCode.ERROR)); assertThat(result.getMessage(), is(ShenyuResultMessage.MENU_FAILED)); }
@Override public ResourceId getCurrentDirectory() { if (isDirectory()) { return this; } return fromComponents(scheme, getBucket(), key.substring(0, key.lastIndexOf('/') + 1)); }
@Test public void testGetCurrentDirectory() { // Tests s3 paths. assertEquals( S3ResourceId.fromUri("s3://my_bucket/tmp dir/"), S3ResourceId.fromUri("s3://my_bucket/tmp dir/").getCurrentDirectory()); // Tests path with unicode. assertEquals( S3ResourceId.fromUri("s3://my_bucket/输出 目录/"), S3ResourceId.fromUri("s3://my_bucket/输出 目录/文件01.txt").getCurrentDirectory()); // Tests bucket with no ending '/'. assertEquals( S3ResourceId.fromUri("s3://my_bucket/"), S3ResourceId.fromUri("s3://my_bucket").getCurrentDirectory()); assertEquals( S3ResourceId.fromUri("s3://my_bucket/"), S3ResourceId.fromUri("s3://my_bucket/not-directory").getCurrentDirectory()); }
@Restricted(NoExternalUse.class) public static Icon tryGetIcon(String iconGuess) { // Jenkins Symbols don't have metadata so return null if (iconGuess == null || iconGuess.startsWith("symbol-")) { return null; } Icon iconMetadata = IconSet.icons.getIconByClassSpec(iconGuess); // `iconGuess` must be class names if it contains a whitespace. // It may contains extra css classes unrelated to icons. // Filter classes with `icon-` prefix. if (iconMetadata == null && iconGuess.contains(" ")) { iconMetadata = IconSet.icons.getIconByClassSpec(filterIconNameClasses(iconGuess)); } if (iconMetadata == null) { // Icon could be provided as a simple iconFileName e.g. "help.svg" iconMetadata = IconSet.icons.getIconByClassSpec(IconSet.toNormalizedIconNameClass(iconGuess) + " icon-md"); } if (iconMetadata == null) { // Icon could be provided as an absolute iconFileName e.g. "/plugin/foo/abc.png" iconMetadata = IconSet.icons.getIconByUrl(iconGuess); } return iconMetadata; }
@Test public void tryGetIcon_shouldReturnMetadataForExactSpec() throws Exception { assertThat(Functions.tryGetIcon("icon-help icon-sm"), is(not(nullValue()))); }
@Override public ScheduledExecutorService newScheduledThreadPool(ThreadPoolProfile profile, ThreadFactory threadFactory) { return new InstrumentedScheduledExecutorService( threadPoolFactory.newScheduledThreadPool(profile, threadFactory), metricRegistry, profile.getId()); }
@Test public void testNewScheduledThreadPool() { final ScheduledExecutorService scheduledExecutorService = instrumentedThreadPoolFactory.newScheduledThreadPool(profile, threadFactory); assertThat(scheduledExecutorService, is(notNullValue())); assertThat(scheduledExecutorService, is(instanceOf(InstrumentedScheduledExecutorService.class))); inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "submitted" })); inOrder.verify(registry, times(1)).counter(MetricRegistry.name(METRICS_NAME, new String[] { "running" })); inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "completed" })); inOrder.verify(registry, times(1)).timer(MetricRegistry.name(METRICS_NAME, new String[] { "duration" })); inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "scheduled.once" })); inOrder.verify(registry, times(1)).meter(MetricRegistry.name(METRICS_NAME, new String[] { "scheduled.repetitively" })); inOrder.verify(registry, times(1)).counter(MetricRegistry.name(METRICS_NAME, new String[] { "scheduled.overrun" })); inOrder.verify(registry, times(1)) .histogram(MetricRegistry.name(METRICS_NAME, new String[] { "scheduled.percent-of-period" })); }
@Override public Health health() { final Health.Builder health = Health.unknown(); if (!jobRunrProperties.getBackgroundJobServer().isEnabled()) { health .up() .withDetail("backgroundJobServer", "disabled"); } else { final BackgroundJobServer backgroundJobServer = backgroundJobServerProvider.getObject(); if (backgroundJobServer.isRunning()) { health .up() .withDetail("backgroundJobServer", "enabled") .withDetail("backgroundJobServerStatus", "running"); } else { health .down() .withDetail("backgroundJobServer", "enabled") .withDetail("backgroundJobServerStatus", "stopped"); } } return health.build(); }
@Test void givenEnabledBackgroundJobServerAndBackgroundJobServerRunning_ThenHealthIsUp() { when(backgroundJobServerProperties.isEnabled()).thenReturn(true); when(backgroundJobServer.isRunning()).thenReturn(true); assertThat(jobRunrHealthIndicator.health().getStatus()).isEqualTo(Status.UP); }
@ShellMethod(key = "compaction show", value = "Shows compaction details for a specific compaction instant") public String compactionShow( @ShellOption(value = "--instant", help = "Base path for the target hoodie table") final String compactionInstantTime, @ShellOption(value = {"--limit"}, help = "Limit commits", defaultValue = "-1") final Integer limit, @ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField, @ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending, @ShellOption(value = {"--headeronly"}, help = "Print Header Only", defaultValue = "false") final boolean headerOnly, @ShellOption(value = {"--partition"}, help = "Partition value", defaultValue = ShellOption.NULL) final String partition) throws Exception { HoodieTableMetaClient client = checkAndGetMetaClient(); HoodieActiveTimeline activeTimeline = client.getActiveTimeline(); HoodieCompactionPlan compactionPlan = TimelineMetadataUtils.deserializeCompactionPlan( activeTimeline.readCompactionPlanAsBytes( HoodieTimeline.getCompactionRequestedInstant(compactionInstantTime)).get()); return printCompaction(compactionPlan, sortByField, descending, limit, headerOnly, partition); }
@Test public void testCompactionShow() throws IOException { // create MOR table. new TableCommand().createTable( tablePath, tableName, HoodieTableType.MERGE_ON_READ.name(), "", TimelineLayoutVersion.VERSION_1, HoodieAvroPayload.class.getName()); CompactionTestUtils.setupAndValidateCompactionOperations(HoodieCLI.getTableMetaClient(), false, 3, 4, 3, 3); HoodieCLI.getTableMetaClient().reloadActiveTimeline(); Object result = shell.evaluate(() -> "compaction show --instant 001"); System.out.println(result.toString()); }
@Override public void dropPartition(String dbName, String tableName, List<String> partValues, boolean deleteData) { List<String> partitionColNames = getTable(dbName, tableName).getPartitionColumnNames(); try { metastore.dropPartition(dbName, tableName, partValues, deleteData); } finally { String partitionName = PartitionUtil.toHivePartitionName(partitionColNames, partValues); HivePartitionName hivePartitionName = HivePartitionName.of(dbName, tableName, partitionName); invalidatePartition(hivePartitionName); } }
@Test public void testDropPartition() { CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore( metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false); cachingHiveMetastore.dropPartition("db", "table", Lists.newArrayList("1"), false); }
@GetMapping("/status") public EmailStatusResult getEmailStatus(@RequestHeader(MijnDigidSession.MIJN_DIGID_SESSION_HEADER) String mijnDigiDsessionId){ MijnDigidSession mijnDigiDSession = retrieveMijnDigiDSession(mijnDigiDsessionId); return accountService.getEmailStatus(mijnDigiDSession.getAccountId()); }
@Test public void validEmailStatusNotVerified() { EmailStatusResult result = new EmailStatusResult(); result.setStatus(Status.OK); result.setError("error"); result.setEmailStatus(EmailStatus.NOT_VERIFIED); result.setEmailAddress("address"); result.setActionNeeded(true); when(accountService.getEmailStatus(anyLong())).thenReturn(result); EmailStatusResult emailStatus = emailController.getEmailStatus(mijnDigiDSession.getId()); assertEquals(Status.OK, emailStatus.getStatus()); assertEquals("error", emailStatus.getError()); assertEquals(EmailStatus.NOT_VERIFIED, emailStatus.getEmailStatus()); assertEquals("address", emailStatus.getEmailAddress()); }
public RuntimeOptionsBuilder parse(Map<String, String> properties) { return parse(properties::get); }
@Test void should_throw_when_fails_to_parse() { properties.put(Constants.OBJECT_FACTORY_PROPERTY_NAME, "garbage"); CucumberException exception = assertThrows( CucumberException.class, () -> cucumberPropertiesParser.parse(properties).build()); assertThat(exception.getMessage(), equalTo("Failed to parse 'cucumber.object-factory' with value 'garbage'")); }
@Override public UserIdentity create(GsonUser user, @Nullable String email, @Nullable List<GsonTeam> teams) { UserIdentity.Builder builder = UserIdentity.builder() .setProviderId(user.getId()) .setProviderLogin(user.getLogin()) .setName(generateName(user)) .setEmail(email); if (teams != null) { builder.setGroups(teams.stream() .map(GithubTeamConverter::toGroupName) .collect(Collectors.toSet())); } return builder.build(); }
@Test public void null_name_is_replaced_by_provider_login() { GsonUser gson = new GsonUser("ABCD", "octocat", null, "octocat@github.com"); UserIdentity identity = underTest.create(gson, null, null); assertThat(identity.getName()).isEqualTo("octocat"); }
@Override public final String getLocation() { return getFullLocationLocation(); }
@Test void test() throws NoSuchMethodException { Method method = AbstractGlueDefinitionTest.class.getMethod("method"); AbstractGlueDefinition definition = new AbstractGlueDefinition(method, lookup) { }; assertThat(definition.getLocation(), startsWith("io.cucumber.java.AbstractGlueDefinitionTest.method()")); }
public BucketInfo addInsert(String partitionPath) { // for new inserts, compute buckets depending on how many records we have for each partition SmallFileAssign smallFileAssign = getSmallFileAssign(partitionPath); // first try packing this into one of the smallFiles if (smallFileAssign != null && smallFileAssign.assign()) { return new BucketInfo(BucketType.UPDATE, smallFileAssign.getFileId(), partitionPath); } // if we have anything more, create new insert buckets, like normal if (newFileAssignStates.containsKey(partitionPath)) { NewFileAssignState newFileAssignState = newFileAssignStates.get(partitionPath); if (newFileAssignState.canAssign()) { newFileAssignState.assign(); final String key = StreamerUtil.generateBucketKey(partitionPath, newFileAssignState.fileId); if (bucketInfoMap.containsKey(key)) { // the newFileAssignStates is cleaned asynchronously when received the checkpoint success notification, // the records processed within the time range: // (start checkpoint, checkpoint success(and instant committed)) // should still be assigned to the small buckets of last checkpoint instead of new one. // the bucketInfoMap is cleaned when checkpoint starts. // A promotion: when the HoodieRecord can record whether it is an UPDATE or INSERT, // we can always return an UPDATE BucketInfo here, and there is no need to record the // UPDATE bucket through calling #addUpdate. return bucketInfoMap.get(key); } return new BucketInfo(BucketType.UPDATE, newFileAssignState.fileId, partitionPath); } } BucketInfo bucketInfo = new BucketInfo(BucketType.INSERT, createFileIdOfThisTask(), partitionPath); final String key = StreamerUtil.generateBucketKey(partitionPath, bucketInfo.getFileIdPrefix()); bucketInfoMap.put(key, bucketInfo); NewFileAssignState newFileAssignState = new NewFileAssignState(bucketInfo.getFileIdPrefix(), writeProfile.getRecordsPerBucket()); newFileAssignState.assign(); newFileAssignStates.put(partitionPath, newFileAssignState); return bucketInfo; }
@Test public void testInsertWithSmallFiles() { SmallFile f0 = new SmallFile(); f0.location = new HoodieRecordLocation("t0", "f0"); f0.sizeBytes = 12; SmallFile f1 = new SmallFile(); f1.location = new HoodieRecordLocation("t0", "f1"); f1.sizeBytes = 122879; // no left space to append new records to this bucket SmallFile f2 = new SmallFile(); f2.location = new HoodieRecordLocation("t0", "f2"); f2.sizeBytes = 56; Map<String, List<SmallFile>> smallFilesMap = new HashMap<>(); smallFilesMap.put("par1", Arrays.asList(f0, f1)); smallFilesMap.put("par2", Collections.singletonList(f2)); MockBucketAssigner mockBucketAssigner = new MockBucketAssigner(context, writeConfig, smallFilesMap); BucketInfo bucketInfo = mockBucketAssigner.addInsert("par1"); assertBucketEquals(bucketInfo, "par1", BucketType.UPDATE, "f0"); mockBucketAssigner.addInsert("par1"); bucketInfo = mockBucketAssigner.addInsert("par1"); assertBucketEquals(bucketInfo, "par1", BucketType.UPDATE, "f0"); mockBucketAssigner.addInsert("par2"); bucketInfo = mockBucketAssigner.addInsert("par2"); assertBucketEquals(bucketInfo, "par2", BucketType.UPDATE, "f2"); bucketInfo = mockBucketAssigner.addInsert("par3"); assertBucketEquals(bucketInfo, "par3", BucketType.INSERT); bucketInfo = mockBucketAssigner.addInsert("par3"); assertBucketEquals(bucketInfo, "par3", BucketType.INSERT); }
public static HostName getControllerHostName(ApplicationInstance application, ClusterId contentClusterId) { // It happens that the master Cluster Controller is the one with the lowest index, if up. return getClusterControllerInstancesInOrder(application, contentClusterId).stream() .findFirst() .orElseThrow(() -> new IllegalArgumentException("No cluster controllers found in application " + application)); }
@Test public void testGetControllerHostName() { HostName host = VespaModelUtil.getControllerHostName(application, CONTENT_CLUSTER_ID); assertEquals(controller0Host, host); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) { String highwayValue = way.getTag("highway"); if (skipEmergency && "service".equals(highwayValue) && "emergency_access".equals(way.getTag("service"))) return; int firstIndex = way.getFirstIndex(restrictionKeys); String firstValue = firstIndex < 0 ? "" : way.getTag(restrictionKeys.get(firstIndex), ""); if (restrictedValues.contains(firstValue) && !hasTemporalRestriction(way, firstIndex, restrictionKeys)) return; if (way.hasTag("gh:barrier_edge") && way.hasTag("node_tags")) { List<Map<String, Object>> nodeTags = way.getTag("node_tags", null); Map<String, Object> firstNodeTags = nodeTags.get(0); // a barrier edge has the restriction in both nodes and the tags are the same -> get(0) firstValue = getFirstPriorityNodeTag(firstNodeTags, restrictionKeys); String barrierValue = firstNodeTags.containsKey("barrier") ? (String) firstNodeTags.get("barrier") : ""; if (restrictedValues.contains(firstValue) || barriers.contains(barrierValue) || "yes".equals(firstNodeTags.get("locked")) && !INTENDED.contains(firstValue)) return; } if (FerrySpeedCalculator.isFerry(way)) { boolean isCar = restrictionKeys.contains("motorcar"); if (INTENDED.contains(firstValue) // implied default is allowed only if foot and bicycle is not specified: || isCar && firstValue.isEmpty() && !way.hasTag("foot") && !way.hasTag("bicycle") // if hgv is allowed then smaller trucks and cars are allowed too even if not specified || isCar && way.hasTag("hgv", "yes")) { accessEnc.setBool(false, edgeId, edgeIntAccess, true); accessEnc.setBool(true, edgeId, edgeIntAccess, true); } } else { boolean isRoundabout = roundaboutEnc.getBool(false, edgeId, edgeIntAccess); boolean ignoreOneway = "no".equals(way.getFirstValue(ignoreOnewayKeys)); boolean isBwd = isBackwardOneway(way); if (!ignoreOneway && (isBwd || isRoundabout || isForwardOneway(way))) { accessEnc.setBool(isBwd, edgeId, edgeIntAccess, true); } else { accessEnc.setBool(false, edgeId, edgeIntAccess, true); accessEnc.setBool(true, edgeId, edgeIntAccess, true); } } }
@Test public void temporalAccess() { int edgeId = 0; ArrayEdgeIntAccess access = new ArrayEdgeIntAccess(1); ReaderWay way = new ReaderWay(1); way.setTag("highway", "primary"); way.setTag("access:conditional", "no @ (May - June)"); parser.handleWayTags(edgeId, access, way, null); assertTrue(busAccessEnc.getBool(false, edgeId, access)); access = new ArrayEdgeIntAccess(1); way = new ReaderWay(1); way.setTag("highway", "primary"); way.setTag("psv:conditional", "no @ (May - June)"); parser.handleWayTags(edgeId, access, way, null); assertTrue(busAccessEnc.getBool(false, edgeId, access)); access = new ArrayEdgeIntAccess(1); way = new ReaderWay(1); way.setTag("highway", "primary"); way.setTag("psv", "no"); way.setTag("access:conditional", "yes @ (May - June)"); parser.handleWayTags(edgeId, access, way, null); assertFalse(busAccessEnc.getBool(false, edgeId, access)); access = new ArrayEdgeIntAccess(1); way = new ReaderWay(1); way.setTag("highway", "primary"); way.setTag("access", "no"); way.setTag("psv:conditional", "yes @ (May - June)"); parser.handleWayTags(edgeId, access, way, null); assertTrue(busAccessEnc.getBool(false, edgeId, access)); }
public static <T1,T2> double mi(Set<List<T1>> first, Set<List<T2>> second) { List<Row<T1>> firstList = new RowList<>(first); List<Row<T2>> secondList = new RowList<>(second); return mi(firstList,secondList); }
@Test public void testMi() { List<Integer> a = Arrays.asList(0, 3, 2, 3, 4, 4, 4, 1, 3, 3, 4, 3, 2, 3, 2, 4, 2, 2, 1, 4, 1, 2, 0, 4, 4, 4, 3, 3, 2, 2, 0, 4, 0, 1, 3, 0, 4, 0, 0, 4, 0, 0, 2, 2, 2, 2, 0, 3, 0, 2, 2, 3, 1, 0, 1, 0, 3, 4, 4, 4, 0, 1, 1, 3, 3, 1, 3, 4, 0, 3, 4, 1, 0, 3, 2, 2, 2, 1, 1, 2, 3, 2, 1, 3, 0, 4, 4, 0, 4, 0, 2, 1, 4, 0, 3, 0, 1, 1, 1, 0); List<Integer> b = Arrays.asList(4, 2, 4, 0, 4, 4, 3, 3, 3, 2, 2, 0, 1, 3, 2, 1, 2, 0, 0, 4, 3, 3, 0, 1, 1, 1, 1, 4, 4, 4, 3, 1, 0, 0, 0, 1, 4, 1, 1, 1, 3, 3, 1, 2, 3, 0, 4, 0, 2, 3, 4, 2, 3, 2, 1, 0, 2, 4, 2, 2, 4, 1, 2, 4, 3, 1, 1, 1, 3, 0, 2, 3, 2, 0, 1, 0, 0, 4, 0, 3, 0, 0, 0, 1, 3, 2, 3, 4, 2, 4, 1, 0, 3, 3, 0, 2, 1, 0, 4, 1); assertEquals(0.15688780624148022, InformationTheory.mi(a,b),1e-13); }
public MapStoreConfig setWriteDelaySeconds(int writeDelaySeconds) { this.writeDelaySeconds = writeDelaySeconds; return this; }
@Test public void setWriteDelaySeconds() { assertEquals(DEFAULT_WRITE_DELAY_SECONDS + 1, cfgNonDefaultWriteDelaySeconds.getWriteDelaySeconds()); assertEquals(new MapStoreConfig().setWriteDelaySeconds(DEFAULT_WRITE_DELAY_SECONDS + 1), cfgNonDefaultWriteDelaySeconds); }
@SuppressWarnings("unchecked") public E get(int index) { return (E) storage.get(index); }
@Test public void testGet() { // try empty array verify(0); verify(1000); verify(-1); verify(); // try dense for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE / 2; ++i) { set(i); verify(i); verify(); set(i, 100 + i); verify(i); verify(); } // go sparse for (int i = 1000000; i < 1000000 + ARRAY_STORAGE_32_MAX_SPARSE_SIZE; ++i) { set(i); verify(i); verify(); set(i, 100 + i); verify(i); verify(); } // clear everything we have added for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE / 2; ++i) { clear(i); verify(i); verify(); } for (int i = 1000000; i < 1000000 + ARRAY_STORAGE_32_MAX_SPARSE_SIZE; ++i) { clear(i); verify(i); verify(); } // test empty again verify(0); verify(1000); verify(-1); verify(); // try gaps for (int i = 0; i < 1000; ++i) { set(i * i); verify(i * i); verify(); set(i * i, 100 + i * i); verify(i * i); verify(); } // try larger gaps for (int i = (int) Math.sqrt(Integer.MAX_VALUE) - 1000; i < (int) Math.sqrt(Integer.MAX_VALUE); ++i) { set(i * i); verify(i * i); verify(); set(i * i, 100 + i * i); verify(i * i); verify(); } // try some edge cases for (int i = -2; i <= 2; ++i) { set(i); verify(i); verify(); } for (int i = Short.MAX_VALUE - 2; i <= Short.MAX_VALUE + 2; ++i) { set(i); verify(i); verify(); } for (int i = Short.MIN_VALUE - 2; i <= Short.MIN_VALUE + 2; ++i) { set(i); verify(i); verify(); } for (long i = (long) Integer.MAX_VALUE - 2; i <= (long) Integer.MAX_VALUE + 2; ++i) { set((int) i); verify((int) i); verify(); } for (long i = (long) Integer.MIN_VALUE - 2; i <= (long) Integer.MIN_VALUE + 2; ++i) { set((int) i); verify((int) i); verify(); } }
public CoercedExpressionResult coerce() { final Class<?> leftClass = left.getRawClass(); final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass); final Class<?> rightClass = right.getRawClass(); final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass); boolean sameClass = leftClass == rightClass; boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression; if (sameClass || isUnificationExpression) { return new CoercedExpressionResult(left, right); } if (!canCoerce()) { throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass)); } if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) { CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression()); return new CoercedExpressionResult( new TypedExpression(castExpression, double.class, left.getType()), right, false); } final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass ); final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass); boolean rightAsStaticField = false; final Expression rightExpression = right.getExpression(); final TypedExpression coercedRight; if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) { final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass); coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType); coercedRight.setType( leftClass ); } else if (shouldCoerceBToString(left, right)) { coercedRight = coerceToString(right); } else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) { coercedRight = castToClass(leftClass); } else if (leftClass == long.class && rightClass == int.class) { coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression())); } else if (leftClass == Date.class && rightClass == String.class) { coercedRight = coerceToDate(right); rightAsStaticField = true; } else if (leftClass == LocalDate.class && rightClass == String.class) { coercedRight = coerceToLocalDate(right); rightAsStaticField = true; } else if (leftClass == LocalDateTime.class && rightClass == String.class) { coercedRight = coerceToLocalDateTime(right); rightAsStaticField = true; } else if (shouldCoerceBToMap()) { coercedRight = castToClass(toNonPrimitiveType(leftClass)); } else if (isBoolean(leftClass) && !isBoolean(rightClass)) { coercedRight = coerceBoolean(right); } else { coercedRight = right; } final TypedExpression coercedLeft; if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) { coercedLeft = coerceToString(left); } else { coercedLeft = left; } return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField); }
@Test public void avoidCoercingStrings2() { final TypedExpression left = expr(THIS_PLACEHOLDER + ".getAge()", int.class); final TypedExpression right = expr("\"50\"", String.class); final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce(); assertThat(coerce.getCoercedRight()).isEqualTo(expr("50", int.class)); }
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) { // Set of Visited Schemas IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>(); // Stack that contains the Schemas to process and afterVisitNonTerminal // functions. // Deque<Either<Schema, Supplier<SchemaVisitorAction>>> // Using Either<...> has a cost we want to avoid... Deque<Object> dq = new ArrayDeque<>(); dq.push(start); Object current; while ((current = dq.poll()) != null) { if (current instanceof Supplier) { // We are executing a non-terminal post visit. SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get(); switch (action) { case CONTINUE: break; case SKIP_SIBLINGS: while (dq.peek() instanceof Schema) { dq.remove(); } break; case TERMINATE: return visitor.get(); case SKIP_SUBTREE: default: throw new UnsupportedOperationException("Invalid action " + action); } } else { Schema schema = (Schema) current; boolean terminate; if (visited.containsKey(schema)) { terminate = visitTerminal(visitor, schema, dq); } else { Schema.Type type = schema.getType(); switch (type) { case ARRAY: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType())); visited.put(schema, schema); break; case RECORD: terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema) .collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator()); visited.put(schema, schema); break; case UNION: terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes()); visited.put(schema, schema); break; case MAP: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType())); visited.put(schema, schema); break; default: terminate = visitTerminal(visitor, schema, dq); break; } } if (terminate) { return visitor.get(); } } } return visitor.get(); }
@Test public void testVisit2() { String s2 = "{\"type\": \"record\", \"name\": \"c1\", \"fields\": [" + "{\"name\": \"f1\", \"type\": \"int\"}" + "]}"; Assert.assertEquals("c1.\"int\"!", Schemas.visit(new Schema.Parser().parse(s2), new TestVisitor())); }
@Override public RuleConfig newRuleConfig() { return new RuleConfigImpl(); }
@Test public void addEventListeners() { TestAgendaEventListener testAgendaEventListener = new TestAgendaEventListener(); TestRuleRuntimeEventListener testRuleRuntimeEventListener = new TestRuleRuntimeEventListener(); TestRuleEventListener testRuleEventListener = new TestRuleEventListener(); RuleConfig ruleConfig = RuleUnitProvider.get().newRuleConfig(); ruleConfig.getAgendaEventListeners().add(testAgendaEventListener); ruleConfig.getRuleRuntimeListeners().add(testRuleRuntimeEventListener); ruleConfig.getRuleEventListeners().add(testRuleEventListener); HelloWorldUnit unit = new HelloWorldUnit(); unit.getStrings().add("Hello World"); try (RuleUnitInstance<HelloWorldUnit> unitInstance = RuleUnitProvider.get().createRuleUnitInstance(unit, ruleConfig)) { assertThat(unitInstance.fire()).isEqualTo(1); assertThat(unit.getResults()).containsExactly("it worked!"); assertThat(testAgendaEventListener.getResults()).containsExactly("matchCreated : HelloWorld", "beforeMatchFired : HelloWorld", "afterMatchFired : HelloWorld"); assertThat(testRuleRuntimeEventListener.getResults()).containsExactly("objectInserted : Hello World"); assertThat(testRuleEventListener.getResults()).containsExactly("onBeforeMatchFire : HelloWorld", "onAfterMatchFire : HelloWorld"); } }
@Override public boolean isAction() { if (expression != null) { return expression.isAction(); } return false; }
@Test public void isAction() { when(expr.isAction()).thenReturn(true).thenReturn(false); assertTrue(test.isAction()); assertFalse(test.isAction()); verify(expr, times(2)).isAction(); verifyNoMoreInteractions(expr); }
public static ScmInfo create(ScannerReport.Changesets changesets) { requireNonNull(changesets); Changeset[] lineChangesets = new Changeset[changesets.getChangesetIndexByLineCount()]; LineIndexToChangeset lineIndexToChangeset = new LineIndexToChangeset(changesets); for (int i = 0; i < changesets.getChangesetIndexByLineCount(); i++) { lineChangesets[i] = lineIndexToChangeset.apply(i); } return new ScmInfoImpl(lineChangesets); }
@Test public void fail_with_ISE_when_no_changeset() { assertThatThrownBy(() -> ReportScmInfo.create(ScannerReport.Changesets.newBuilder().build())) .isInstanceOf(IllegalStateException.class) .hasMessageContaining("ScmInfo cannot be empty"); }
@Override public String getPrefix() { return String.format("%s.%s", this.getClass().getPackage().getName(), "Dropbox"); }
@Test public void testPrefix() { assertEquals("ch.cyberduck.core.dropbox.Dropbox", new DropboxProtocol().getPrefix()); }
@Override public ContentElement parse(XmlPullParser parser, int initialDepth, XmlEnvironment xmlEnvironment) throws XmlPullParserException, IOException, ParseException, SmackParsingException { ContentElement.Builder builder = ContentElement.builder(); while (true) { XmlPullParser.Event tag = parser.next(); if (tag == XmlPullParser.Event.START_ELEMENT) { String name = parser.getName(); switch (name) { case ToAffixElement.ELEMENT: parseToAffix(parser, builder); break; case FromAffixElement.ELEMENT: parseFromAffix(parser, builder); break; case TimestampAffixElement.ELEMENT: parseTimestampAffix(parser, builder); break; case RandomPaddingAffixElement.ELEMENT: parseRPadAffix(parser, builder); break; case PayloadElement.ELEMENT: parsePayload(parser, xmlEnvironment, builder); break; default: parseCustomAffix(parser, xmlEnvironment, builder); break; } } else if (tag == XmlPullParser.Event.END_ELEMENT) { if (parser.getDepth() == initialDepth) { break; } } } return builder.build(); }
@Test public void testParsing() throws XmlPullParserException, IOException, SmackParsingException, ParseException { String xml = "" + "<content xmlns='urn:xmpp:sce:0'>\n" + " <payload>\n" + " <body xmlns='jabber:client'>Have you seen that new movie?</body>\n" + " <x xmlns='jabber:x:oob'>\n" + " <url>https://en.wikipedia.org/wiki/Fight_Club#Plot</url>\n" + " </x>\n" + " </payload>\n" + " <from jid='ladymacbeth@shakespear.lit/castle'/>\n" + " <to jid='doctor@shakespeare.lit/pda'/>\n" + " <time stamp='1993-10-12T03:13:10.000+00:00'/>\n" + " <rpad>A98D7KJF1ASDVG232sdff341</rpad>\n" + "</content>"; ContentElementProvider provider = new ContentElementProvider(); ContentElement contentElement = provider.parse(TestUtils.getParser(xml)); assertNotNull(contentElement); assertEquals(4, contentElement.getAffixElements().size()); assertTrue(contentElement.getAffixElements().contains( new FromAffixElement(JidCreate.from("ladymacbeth@shakespear.lit/castle")))); assertTrue(contentElement.getAffixElements().contains( new ToAffixElement(JidCreate.from("doctor@shakespeare.lit/pda")))); assertTrue(contentElement.getAffixElements().contains( new TimestampAffixElement(ParserUtils.getDateFromXep82String("1993-10-12T03:13:10.000+00:00")))); assertTrue(contentElement.getAffixElements().contains( new RandomPaddingAffixElement("A98D7KJF1ASDVG232sdff341"))); assertEquals(2, contentElement.getPayload().getItems().size()); assertTrue(contentElement.getPayload().getItems().get(0) instanceof Message.Body); Message.Body body = (Message.Body) contentElement.getPayload().getItems().get(0); assertEquals("Have you seen that new movie?", body.getMessage()); StandardExtensionElement oob = (StandardExtensionElement) contentElement.getPayload().getItems().get(1); assertEquals("x", oob.getElementName()); assertEquals("jabber:x:oob", oob.getNamespace()); assertEquals("https://en.wikipedia.org/wiki/Fight_Club#Plot", oob.getFirstElement("url").getText()); }
public static Collection<java.nio.file.Path> listFilesInDirectory( final java.nio.file.Path directory, final Predicate<java.nio.file.Path> fileFilter) throws IOException { checkNotNull(directory, "directory"); checkNotNull(fileFilter, "fileFilter"); if (!Files.exists(directory)) { throw new IllegalArgumentException( String.format("The directory %s dose not exist.", directory)); } if (!Files.isDirectory(directory)) { throw new IllegalArgumentException( String.format("The %s is not a directory.", directory)); } final FilterFileVisitor filterFileVisitor = new FilterFileVisitor(fileFilter); Files.walkFileTree( directory, EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, filterFileVisitor); return filterFileVisitor.getFiles(); }
@Test void testFollowSymbolicDirectoryLink() throws IOException { final File directory = TempDirUtils.newFolder(temporaryFolder, "a"); final File file = new File(directory, "a.jar"); assertThat(file.createNewFile()).isTrue(); final File otherDirectory = TempDirUtils.newFolder(temporaryFolder); java.nio.file.Path linkPath = Paths.get(otherDirectory.getPath(), "a.lnk"); Files.createSymbolicLink(linkPath, directory.toPath()); Collection<java.nio.file.Path> paths = FileUtils.listFilesInDirectory(linkPath, FileUtils::isJarFile); assertThat(paths).containsExactlyInAnyOrder(linkPath.resolve(file.getName())); }
@Override public Processor<K, Change<V1>, K, Change<VOut>> get() { return new KTableKTableLeftJoinProcessor(valueGetterSupplier2.get()); }
@Test public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() { final StreamsBuilder builder = new StreamsBuilder(); @SuppressWarnings("unchecked") final Processor<String, Change<String>, String, Change<Object>> join = new KTableKTableLeftJoin<>( (KTableImpl<String, String, String>) builder.table("left", Consumed.with(Serdes.String(), Serdes.String())), (KTableImpl<String, String, String>) builder.table("right", Consumed.with(Serdes.String(), Serdes.String())), null ).get(); final MockProcessorContext<String, Change<Object>> context = new MockProcessorContext<>(props); context.setRecordMetadata("left", -1, -2); join.init(context); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableLeftJoin.class)) { join.process(new Record<>(null, new Change<>("new", "old"), 0)); assertThat( appender.getMessages(), hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]") ); } }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testAbsoluteOffsetAssignmentNonCompressed() { MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V0, RecordBatch.NO_TIMESTAMP, Compression.NONE); long offset = 1234567; checkOffsets(records, 0); checkOffsets( new LogValidator( records, topicPartition, time, CompressionType.NONE, Compression.NONE, false, RecordBatch.MAGIC_VALUE_V0, TimestampType.CREATE_TIME, 1000L, 1000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset ); }
@SuppressWarnings("unchecked") public Output run(RunContext runContext) throws Exception { Logger logger = runContext.logger(); try (HttpClient client = this.client(runContext, this.method)) { HttpRequest<String> request = this.request(runContext); HttpResponse<String> response; try { response = client .toBlocking() .exchange(request, Argument.STRING, Argument.STRING); // check that the string is a valid Unicode string if (response.getBody().isPresent()) { OptionalInt illegalChar = response.body().chars().filter(c -> !Character.isDefined(c)).findFirst(); if (illegalChar.isPresent()) { throw new IllegalArgumentException("Illegal unicode code point in request body: " + illegalChar.getAsInt() + ", the Request task only support valid Unicode strings as body.\n" + "You can try using the Download task instead."); } } } catch (HttpClientResponseException e) { if (!allowFailed) { throw e; } //noinspection unchecked response = (HttpResponse<String>) e.getResponse(); } logger.debug("Request '{}' with the response code '{}'", request.getUri(), response.getStatus().getCode()); return this.output(runContext, request, response); } }
@Test void multipart() throws Exception { File file = new File(Objects.requireNonNull(RequestTest.class.getClassLoader().getResource("application-test.yml")).toURI()); URI fileStorage = storageInterface.put( null, new URI("/" + FriendlyId.createFriendlyId()), new FileInputStream(file) ); try ( ApplicationContext applicationContext = ApplicationContext.run(); EmbeddedServer server = applicationContext.getBean(EmbeddedServer.class).start(); ) { Request task = Request.builder() .id(RequestTest.class.getSimpleName()) .type(RequestTest.class.getName()) .method(HttpMethod.POST) .contentType(MediaType.MULTIPART_FORM_DATA) .uri(server.getURL().toString() + "/post/multipart") .formData(ImmutableMap.of("hello", "world", "file", fileStorage.toString())) .build(); RunContext runContext = TestsUtils.mockRunContext(this.runContextFactory, task, ImmutableMap.of()); Request.Output output = task.run(runContext); assertThat(output.getBody(), is("world > " + IOUtils.toString(new FileInputStream(file), Charsets.UTF_8))); assertThat(output.getCode(), is(200)); } }
@Override public InterpreterResult interpret(String sql, InterpreterContext contextInterpreter) { logger.info("Run SQL command '{}'", sql); return executeSql(sql); }
@Test void sqlSuccess() { InterpreterResult ret = bqInterpreter.interpret(constants.getOne(), context); assertEquals(InterpreterResult.Code.SUCCESS, ret.code()); assertEquals(InterpreterResult.Type.TABLE, ret.message().get(0).getType()); }
private ArrayAccess() { }
@Test public void shouldBeOneIndexed() { // Given: final List<Integer> list = ImmutableList.of(1, 2); // When: final Integer access = ArrayAccess.arrayAccess(list, 1); // Then: assertThat(access, is(1)); }
@Override public DeleteTopicsResult deleteTopics(final TopicCollection topics, final DeleteTopicsOptions options) { if (topics instanceof TopicIdCollection) return DeleteTopicsResult.ofTopicIds(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options)); else if (topics instanceof TopicNameCollection) return DeleteTopicsResult.ofTopicNames(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames(), options)); else throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for deleteTopics."); }
@Test public void testDeleteTopicsRetryThrottlingExceptionWhenEnabledUntilRequestTimeOut() throws Exception { long defaultApiTimeout = 60000; MockTime time = new MockTime(); try (AdminClientUnitTestEnv env = mockClientEnv(time, AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, String.valueOf(defaultApiTimeout))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopics("topic1", "topic2", "topic3"), prepareDeleteTopicsResponse(1000, deletableTopicResult("topic1", Errors.NONE), deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS))); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopics("topic2"), prepareDeleteTopicsResponse(1000, deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED))); DeleteTopicsResult result = env.adminClient().deleteTopics( asList("topic1", "topic2", "topic3"), new DeleteTopicsOptions().retryOnQuotaViolation(true)); // Wait until the prepared attempts have consumed TestUtils.waitForCondition(() -> env.kafkaClient().numAwaitingResponses() == 0, "Failed awaiting DeleteTopics requests"); // Wait until the next request is sent out TestUtils.waitForCondition(() -> env.kafkaClient().inFlightRequestCount() == 1, "Failed awaiting next DeleteTopics request"); // Advance time past the default api timeout to time out the inflight request time.sleep(defaultApiTimeout + 1); assertNull(result.topicNameValues().get("topic1").get()); ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.topicNameValues().get("topic2"), ThrottlingQuotaExceededException.class); assertEquals(0, e.throttleTimeMs()); TestUtils.assertFutureThrows(result.topicNameValues().get("topic3"), TopicExistsException.class); // With topic IDs Uuid topicId1 = Uuid.randomUuid(); Uuid topicId2 = Uuid.randomUuid(); Uuid topicId3 = Uuid.randomUuid(); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopicIds(topicId1, topicId2, topicId3), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId1, Errors.NONE), deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResultWithId(topicId3, Errors.UNKNOWN_TOPIC_ID))); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopicIds(topicId2), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED))); DeleteTopicsResult resultIds = env.adminClient().deleteTopics( TopicCollection.ofTopicIds(asList(topicId1, topicId2, topicId3)), new DeleteTopicsOptions().retryOnQuotaViolation(true)); // Wait until the prepared attempts have consumed TestUtils.waitForCondition(() -> env.kafkaClient().numAwaitingResponses() == 0, "Failed awaiting DeleteTopics requests"); // Wait until the next request is sent out TestUtils.waitForCondition(() -> env.kafkaClient().inFlightRequestCount() == 1, "Failed awaiting next DeleteTopics request"); // Advance time past the default api timeout to time out the inflight request time.sleep(defaultApiTimeout + 1); assertNull(resultIds.topicIdValues().get(topicId1).get()); e = TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2), ThrottlingQuotaExceededException.class); assertEquals(0, e.throttleTimeMs()); TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class); } }
public List<String> parseToPlainTextChunks() throws IOException, SAXException, TikaException { final List<String> chunks = new ArrayList<>(); chunks.add(""); ContentHandlerDecorator handler = new ContentHandlerDecorator() { @Override public void characters(char[] ch, int start, int length) { String lastChunk = chunks.get(chunks.size() - 1); String thisStr = new String(ch, start, length); if (lastChunk.length() + length > MAXIMUM_TEXT_CHUNK_SIZE) { chunks.add(thisStr); } else { chunks.set(chunks.size() - 1, lastChunk + thisStr); } } }; AutoDetectParser parser = new AutoDetectParser(); Metadata metadata = new Metadata(); try (InputStream stream = ContentHandlerExample.class.getResourceAsStream("test2.doc")) { parser.parse(stream, handler, metadata); return chunks; } }
@Test public void testParseToPlainTextChunks() throws IOException, SAXException, TikaException { List<String> result = example.parseToPlainTextChunks(); assertEquals(3, result.size()); for (String chunk : result) { assertTrue(chunk.length() <= example.MAXIMUM_TEXT_CHUNK_SIZE, "Chunk under max size"); } assertContains("This is in the header", result.get(0)); assertContains("Test Document", result.get(0)); assertContains("Testing", result.get(1)); assertContains("1 2 3", result.get(1)); assertContains("TestTable", result.get(1)); assertContains("Testing 123", result.get(2)); }
public void replay( long recordOffset, long producerId, OffsetCommitKey key, OffsetCommitValue value ) { final String groupId = key.group(); final String topic = key.topic(); final int partition = key.partition(); if (value != null) { // The classic or consumer group should exist when offsets are committed or // replayed. However, it won't if the consumer commits offsets but does not // use the membership functionality. In this case, we automatically create // a so-called "simple consumer group". This is an empty classic group // without a protocol type. try { groupMetadataManager.group(groupId); } catch (GroupIdNotFoundException ex) { groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, true); } if (producerId == RecordBatch.NO_PRODUCER_ID) { log.debug("Replaying offset commit with key {}, value {}", key, value); // If the offset is not part of a transaction, it is directly stored // in the offsets store. OffsetAndMetadata previousValue = offsets.put( groupId, topic, partition, OffsetAndMetadata.fromRecord(recordOffset, value) ); if (previousValue == null) { metrics.incrementNumOffsets(); } } else { log.debug("Replaying transactional offset commit with producer id {}, key {}, value {}", producerId, key, value); // Otherwise, the transaction offset is stored in the pending transactional // offsets store. Pending offsets there are moved to the main store when // the transaction is committed; or removed when the transaction is aborted. pendingTransactionalOffsets .computeIfAbsent(producerId, __ -> new Offsets()) .put( groupId, topic, partition, OffsetAndMetadata.fromRecord(recordOffset, value) ); openTransactionsByGroup .computeIfAbsent(groupId, __ -> new TimelineHashSet<>(snapshotRegistry, 1)) .add(producerId); } } else { if (offsets.remove(groupId, topic, partition) != null) { metrics.decrementNumOffsets(); } // Remove all the pending offset commits related to the tombstone. TimelineHashSet<Long> openTransactions = openTransactionsByGroup.get(groupId); if (openTransactions != null) { openTransactions.forEach(openProducerId -> { Offsets pendingOffsets = pendingTransactionalOffsets.get(openProducerId); if (pendingOffsets != null) { pendingOffsets.remove(groupId, topic, partition); } }); } } }
@Test public void testReplayWithTombstoneAndPendingTransactionalOffsets() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Add the offsets. verifyReplay(context, "foo", "bar", 0, new OffsetAndMetadata( 0L, 100L, OptionalInt.empty(), "small", context.time.milliseconds(), OptionalLong.empty() )); verifyTransactionalReplay(context, 10L, "foo", "bar", 0, new OffsetAndMetadata( 1L, 100L, OptionalInt.empty(), "small", context.time.milliseconds(), OptionalLong.empty() )); verifyTransactionalReplay(context, 10L, "foo", "bar", 1, new OffsetAndMetadata( 2L, 100L, OptionalInt.empty(), "small", context.time.milliseconds(), OptionalLong.empty() )); // Delete the offsets. context.replay(GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord( "foo", "bar", 0 )); context.replay(GroupCoordinatorRecordHelpers.newOffsetCommitTombstoneRecord( "foo", "bar", 1 )); // Verify that the offset is gone. assertFalse(context.hasOffset("foo", "bar", 0)); assertFalse(context.hasOffset("foo", "bar", 1)); }
public static int scale(final Schema schema) { requireDecimal(schema); final String scaleString = schema.parameters() .get(org.apache.kafka.connect.data.Decimal.SCALE_FIELD); if (scaleString == null) { throw new KsqlException("Invalid Decimal schema: scale parameter not found."); } try { return Integer.parseInt(scaleString); } catch (final NumberFormatException e) { throw new KsqlException("Invalid scale parameter found in Decimal schema: ", e); } }
@Test public void shouldExtractScaleFromDecimalSchema() { // When: final int scale = DecimalUtil.scale(DECIMAL_SCHEMA); // Then: assertThat(scale, is(1)); }
@Override public PageResult<ProductBrandDO> getBrandPage(ProductBrandPageReqVO pageReqVO) { return brandMapper.selectPage(pageReqVO); }
@Test public void testGetBrandPage() { // mock 数据 ProductBrandDO dbBrand = randomPojo(ProductBrandDO.class, o -> { // 等会查询到 o.setName("芋道源码"); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setCreateTime(buildTime(2022, 2, 1)); }); brandMapper.insert(dbBrand); // 测试 name 不匹配 brandMapper.insert(cloneIgnoreId(dbBrand, o -> o.setName("源码"))); // 测试 status 不匹配 brandMapper.insert(cloneIgnoreId(dbBrand, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 测试 createTime 不匹配 brandMapper.insert(cloneIgnoreId(dbBrand, o -> o.setCreateTime(buildTime(2022, 3, 1)))); // 准备参数 ProductBrandPageReqVO reqVO = new ProductBrandPageReqVO(); reqVO.setName("芋道"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); reqVO.setCreateTime((new LocalDateTime[]{buildTime(2022, 1, 1), buildTime(2022, 2, 25)})); // 调用 PageResult<ProductBrandDO> pageResult = brandService.getBrandPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbBrand, pageResult.getList().get(0)); }
public boolean hasCommand(String commandName) { BaseCommand command; try { command = frameworkModel.getExtensionLoader(BaseCommand.class).getExtension(commandName); } catch (Throwable throwable) { return false; } return command != null; }
@Test void testHasCommand() { assertTrue(commandHelper.hasCommand("greeting")); assertFalse(commandHelper.hasCommand("not-exiting")); }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyWithDuplicatesMissingAndExtraItemsWithNewlineFailure() { expectFailureWhenTestingThat(asList("a\nb", "a\nb")).containsExactly("foo\nbar", "foo\nbar"); assertFailureKeys( "missing (2)", "#1 [2 copies]", "", "unexpected (2)", "#1 [2 copies]", "---", "expected", "but was"); assertFailureValueIndexed("#1 [2 copies]", 0, "foo\nbar"); assertFailureValueIndexed("#1 [2 copies]", 1, "a\nb"); }
@Udf(description = "Converts a TIME value into the" + " string representation of the time in the given format." + " The format pattern should be in the format expected" + " by java.time.format.DateTimeFormatter") public String formatTime( @UdfParameter( description = "TIME value.") final Time time, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (time == null || formatPattern == null) { return null; } try { final DateTimeFormatter formatter = formatters.get(formatPattern); return LocalTime.ofNanoOfDay(TimeUnit.MILLISECONDS.toNanos(time.getTime())).format(formatter); } catch (ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to format time " + LocalTime.ofNanoOfDay(time.getTime() * 1000000) + " with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shoudlReturnNull() { // When: final Object result = udf.formatTime(null, "HH:mm:ss.SSS"); // Then: assertNull(result); }
public String getString(@NotNull final String key, @Nullable final String defaultValue) { return System.getProperty(key, props.getProperty(key, defaultValue)); }
@Test public void testGetString() { String key = Settings.KEYS.NVD_API_DATAFEED_VALID_FOR_DAYS; String expResult = "7"; String result = getSettings().getString(key); Assert.assertTrue(result.endsWith(expResult)); }
@Override public Map<Errors, Integer> errorCounts() { HashMap<Errors, Integer> counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); return counts; }
@Test public void testErrorCountsReturnsNoneWhenNoErrors() { GetTelemetrySubscriptionsResponseData data = new GetTelemetrySubscriptionsResponseData() .setErrorCode(Errors.NONE.code()); GetTelemetrySubscriptionsResponse response = new GetTelemetrySubscriptionsResponse(data); assertEquals(Collections.singletonMap(Errors.NONE, 1), response.errorCounts()); }
@SuppressWarnings({"rawtypes", "unchecked"}) public static ConfigDef enrich(Plugins plugins, ConfigDef baseConfigDef, Map<String, String> props, boolean requireFullConfig) { ConfigDef newDef = new ConfigDef(baseConfigDef); new EnrichablePlugin<Transformation<?>>("Transformation", TRANSFORMS_CONFIG, TRANSFORMS_GROUP, (Class) Transformation.class, props, requireFullConfig) { @Override protected Set<PluginDesc<Transformation<?>>> plugins() { return plugins.transformations(); } @Override protected ConfigDef initialConfigDef() { // All Transformations get these config parameters implicitly return super.initialConfigDef() .define(TransformationStage.PREDICATE_CONFIG, Type.STRING, null, Importance.MEDIUM, "The alias of a predicate used to determine whether to apply this transformation.") .define(TransformationStage.NEGATE_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, "Whether the configured predicate should be negated."); } @Override protected Stream<Map.Entry<String, ConfigDef.ConfigKey>> configDefsForClass(String typeConfig) { return super.configDefsForClass(typeConfig) .filter(entry -> { // The implicit parameters mask any from the transformer with the same name if (TransformationStage.PREDICATE_CONFIG.equals(entry.getKey()) || TransformationStage.NEGATE_CONFIG.equals(entry.getKey())) { log.warn("Transformer config {} is masked by implicit config of that name", entry.getKey()); return false; } else { return true; } }); } @Override protected ConfigDef config(Transformation<?> transformation) { return transformation.config(); } @Override protected void validateProps(String prefix) { String prefixedNegate = prefix + TransformationStage.NEGATE_CONFIG; String prefixedPredicate = prefix + TransformationStage.PREDICATE_CONFIG; if (props.containsKey(prefixedNegate) && !props.containsKey(prefixedPredicate)) { throw new ConfigException("Config '" + prefixedNegate + "' was provided " + "but there is no config '" + prefixedPredicate + "' defining a predicate to be negated."); } } }.enrich(newDef); new EnrichablePlugin<Predicate<?>>("Predicate", PREDICATES_CONFIG, PREDICATES_GROUP, (Class) Predicate.class, props, requireFullConfig) { @Override protected Set<PluginDesc<Predicate<?>>> plugins() { return plugins.predicates(); } @Override protected ConfigDef config(Predicate<?> predicate) { return predicate.config(); } }.enrich(newDef); return newDef; }
@Test public void testEnrichedConfigDef() { String alias = "hdt"; String prefix = ConnectorConfig.TRANSFORMS_CONFIG + "." + alias + "."; Map<String, String> props = new HashMap<>(); props.put(ConnectorConfig.TRANSFORMS_CONFIG, alias); props.put(prefix + "type", HasDuplicateConfigTransformation.class.getName()); ConfigDef def = ConnectorConfig.enrich(MOCK_PLUGINS, new ConfigDef(), props, false); assertEnrichedConfigDef(def, prefix, HasDuplicateConfigTransformation.MUST_EXIST_KEY, ConfigDef.Type.BOOLEAN); assertEnrichedConfigDef(def, prefix, TransformationStage.PREDICATE_CONFIG, ConfigDef.Type.STRING); assertEnrichedConfigDef(def, prefix, TransformationStage.NEGATE_CONFIG, ConfigDef.Type.BOOLEAN); }
public static Processor createProcessor( CamelContext camelContext, DynamicRouterConfiguration cfg, BiFunction<CamelContext, Expression, RecipientList> recipientListSupplier) { RecipientList recipientList = recipientListSupplier.apply(camelContext, RECIPIENT_LIST_EXPRESSION); setPropertiesForRecipientList(recipientList, camelContext, cfg); ExecutorService threadPool = getConfiguredExecutorService(camelContext, "RecipientList", cfg, cfg.isParallelProcessing()); recipientList.setExecutorService(threadPool); recipientList.setShutdownExecutorService( willCreateNewThreadPool(camelContext, cfg, cfg.isParallelProcessing())); recipientList.start(); return recipientList; }
@Test void testCreateProcessor() { when(camelContext.getExecutorServiceManager()).thenReturn(manager); when(mockRecipientListSupplier.apply(eq(camelContext), any(Expression.class))).thenReturn(recipientList); Processor processor = DynamicRouterRecipientListHelper.createProcessor(camelContext, mockConfig, mockRecipientListSupplier); Assertions.assertNotNull(processor); }
Meter.Type getMetricsType(String remaining) { String type = StringHelper.before(remaining, ":"); return type == null ? DEFAULT_METER_TYPE : MicrometerUtils.getByName(type); }
@Test public void testGetMetricsTypeNotSet() { assertThat(component.getMetricsType("no-metrics-type"), is(MicrometerComponent.DEFAULT_METER_TYPE)); }
@Override public boolean nextConfig(long timeout) { file.validateFile(); if (checkReloaded()) { log.log(FINE, () -> "User forced config reload at " + System.currentTimeMillis()); // User forced reload setConfigIfChanged(updateConfig()); ConfigState<T> configState = getConfigState(); log.log(FINE, () -> "Config updated at " + System.currentTimeMillis() + ", changed: " + configState.isConfigChanged()); log.log(FINE, () -> "Config: " + configState.getConfig().toString()); return true; } if (file.getLastModified() != ts) { setConfigIncGen(updateConfig()); return true; } try { Thread.sleep(timeout); } catch (InterruptedException e) { throw new ConfigInterruptedException(e); } return false; }
@Test(expected = IllegalArgumentException.class) public void require_that_bad_file_throws_exception() throws IOException { // A little trick to ensure that we can create the subscriber, but that we get an error when reading. writeConfig("intval", "23"); ConfigSubscription<SimpletypesConfig> sub = new FileConfigSubscription<>( new ConfigKey<>(SimpletypesConfig.class, ""), new FileSource(TEST_TYPES_FILE)); sub.reload(1); Files.delete(TEST_TYPES_FILE.toPath()); // delete file so the below statement throws exception sub.nextConfig(0); }
@Override public void processElement(StreamRecord<FlinkInputSplit> element) { splits.add(element.getValue()); enqueueProcessSplits(); }
@TestTemplate public void testTriggerCheckpoint() throws Exception { // Received emitted splits: split1, split2, split3, checkpoint request is triggered when reading // records from // split1. List<List<Record>> expectedRecords = generateRecordsAndCommitTxn(3); List<FlinkInputSplit> splits = generateSplits(); assertThat(splits).hasSize(3); long timestamp = 0; try (OneInputStreamOperatorTestHarness<FlinkInputSplit, RowData> harness = createReader()) { harness.setup(); harness.open(); SteppingMailboxProcessor processor = createLocalMailbox(harness); harness.processElement(splits.get(0), ++timestamp); harness.processElement(splits.get(1), ++timestamp); harness.processElement(splits.get(2), ++timestamp); // Trigger snapshot state, it will start to work once all records from split0 are read. processor.getMainMailboxExecutor().execute(() -> harness.snapshot(1, 3), "Trigger snapshot"); assertThat(processor.runMailboxStep()).as("Should have processed the split0").isTrue(); assertThat(processor.runMailboxStep()) .as("Should have processed the snapshot state action") .isTrue(); TestHelpers.assertRecords(readOutputValues(harness), expectedRecords.get(0), SCHEMA); // Read records from split1. assertThat(processor.runMailboxStep()).as("Should have processed the split1").isTrue(); // Read records from split2. assertThat(processor.runMailboxStep()).as("Should have processed the split2").isTrue(); TestHelpers.assertRecords( readOutputValues(harness), Lists.newArrayList(Iterables.concat(expectedRecords)), SCHEMA); } }
public DecoderResult decode(AztecDetectorResult detectorResult) throws FormatException { ddata = detectorResult; BitMatrix matrix = detectorResult.getBits(); boolean[] rawbits = extractBits(matrix); CorrectedBitsResult correctedBits = correctBits(rawbits); byte[] rawBytes = convertBoolArrayToByteArray(correctedBits.correctBits); String result = getEncodedData(correctedBits.correctBits); DecoderResult decoderResult = new DecoderResult(rawBytes, result, null, String.format("%d%%", correctedBits.ecLevel)); decoderResult.setNumBits(correctedBits.correctBits.length); decoderResult.setErrorsCorrected(correctedBits.errorsCorrected); return decoderResult; }
@Test public void testAztecResultECI() throws FormatException { BitMatrix matrix = BitMatrix.parse( " X X X X X X \n" + " X X X X X X X X X X X X \n" + " X X X X \n" + " X X X X X X X X X X X X X X X X X \n" + " X X \n" + " X X X X X X X X X X X X \n" + " X X X X X X X X \n" + " X X X X X X X X X X X X \n" + " X X X X X X X X \n" + " X X X X X X X X X \n" + "X X X X X X X X X \n" + " X X X X X X X X X X X X \n" + " X X X X X X \n" + " X X X X X X X X X X X X X \n" + " X X X \n" + "X X X X X X X X X X X X X X X X X \n" + "X X X X X X X X X \n" + " X X X X X X X X X X X \n" + "X X X X X X X X \n", "X ", " "); AztecDetectorResult r = new AztecDetectorResult(matrix, NO_POINTS, false, 15, 1); DecoderResult result = new Decoder().decode(r); assertEquals("Français", result.getText()); }
public final Source source(final Source source) { return new Source() { @Override public long read(Buffer sink, long byteCount) throws IOException { boolean throwOnTimeout = false; enter(); try { long result = source.read(sink, byteCount); throwOnTimeout = true; return result; } catch (IOException e) { throw exit(e); } finally { exit(throwOnTimeout); } } @Override public void close() throws IOException { boolean throwOnTimeout = false; try { source.close(); throwOnTimeout = true; } catch (IOException e) { throw exit(e); } finally { exit(throwOnTimeout); } } @Override public Timeout timeout() { return AsyncTimeout.this; } @Override public String toString() { return "AsyncTimeout.source(" + source + ")"; } }; }
@Test public void wrappedSourceTimesOut() throws Exception { Source source = new ForwardingSource(new Buffer()) { @Override public long read(Buffer sink, long byteCount) throws IOException { try { Thread.sleep(500); return -1; } catch (InterruptedException e) { throw new AssertionError(); } } }; AsyncTimeout timeout = new AsyncTimeout(); timeout.timeout(250, TimeUnit.MILLISECONDS); Source timeoutSource = timeout.source(source); try { timeoutSource.read(null, 0); fail(); } catch (InterruptedIOException expected) { } }
@Bean public PluginDataHandler sentinelRuleHandle() { return new SentinelRuleHandle(); }
@Test public void testSentinelRuleHandle() { applicationContextRunner.run(context -> { PluginDataHandler handler = context.getBean("sentinelRuleHandle", PluginDataHandler.class); assertNotNull(handler); } ); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore .store(QueryableStoreTypes.timestampedWindowStore(), partition); final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = cacheBypassFetcher.fetch(store, key, lower, upper)) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIterator(builder.build().iterator()); } } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldFetchWithNoBounds() { // When: table.get(A_KEY, PARTITION, Range.all(), Range.all()); // Then: verify(cacheBypassFetcher).fetch( eq(tableStore), any(), eq(Instant.ofEpochMilli(0)), eq(Instant.ofEpochMilli(Long.MAX_VALUE)) ); }
public static int hash(Object o) { if (o == null) { return 0; } if (o instanceof Long) { return hashLong((Long) o); } if (o instanceof Integer) { return hashLong((Integer) o); } if (o instanceof Double) { return hashLong(Double.doubleToRawLongBits((Double) o)); } if (o instanceof Float) { return hashLong(Float.floatToRawIntBits((Float) o)); } if (o instanceof String) { return hash(((String) o).getBytes()); } if (o instanceof byte[]) { return hash((byte[]) o); } return hash(o.toString()); }
@Test public void testHash() throws Exception { final long actualHash = MurmurHash.hash("hashthis"); final long expectedHash = -1974946086L; assertEquals("MurmurHash.hash(String) returns wrong hash value", expectedHash, actualHash); }
public final void isNotNaN() { if (actual == null) { failWithActual(simpleFact("expected a double other than NaN")); } else { isNotEqualTo(NaN); } }
@Test public void isNotNaN() { assertThat(1.23).isNotNaN(); assertThat(Double.MAX_VALUE).isNotNaN(); assertThat(-1.0 * Double.MIN_VALUE).isNotNaN(); assertThat(Double.POSITIVE_INFINITY).isNotNaN(); assertThat(Double.NEGATIVE_INFINITY).isNotNaN(); }
public String getStructuralGuid() { return this.structuralGuid; }
@Test public void queriesWithSameAnonFormShouldGetSameStructurallySimilarId() { // Given: final String query1 = "CREATE STREAM my_stream (profileId VARCHAR, latitude DOUBLE) " + "WITH (kafka_topic='locations', value_format='json', partitions=1);"; final String query2 = "CREATE STREAM my_stream (userId VARCHAR, performance DOUBLE) " + "WITH (kafka_topic='user_performance', value_format='json', partitions=2);"; final String anonQuery = "CREATE STREAM stream1 (column1 VARCHAR, column2 DOUBLE) WITH " + "(kafka_topic=['string'], value_format=['string'], partitions='0';"; // When: final String id1 = new QueryGuid(TEST_NAMESPACE, query1, anonQuery).getStructuralGuid(); final String id2 = new QueryGuid(TEST_NAMESPACE, query2, anonQuery).getStructuralGuid(); // Then: Assert.assertEquals(id1, id2); }
@Override public boolean match(Message msg, StreamRule rule) { if (msg.getField(rule.getField()) == null) return rule.getInverted(); try { final Pattern pattern = patternCache.get(rule.getValue()); final CharSequence charSequence = new InterruptibleCharSequence(msg.getField(rule.getField()).toString()); return rule.getInverted() ^ pattern.matcher(charSequence).find(); } catch (ExecutionException e) { LOG.error("Unable to get pattern from regex cache: ", e); } return false; }
@Test public void testInvertedNullFieldShouldMatch() throws Exception { final String fieldName = "nullfield"; final StreamRule rule = getSampleRule(); rule.setField(fieldName); rule.setValue("^foo"); rule.setInverted(true); final Message msg = getSampleMessage(); msg.addField(fieldName, null); final StreamRuleMatcher matcher = getMatcher(rule); assertTrue(matcher.match(msg, rule)); }
@Override public int hashCode() { int result = storageRef.get().hashCode(); result = 31 * result + (name != null ? name.hashCode() : 0); return result; }
@Test public void testHashCode() { assertEquals(recordStore.hashCode(), recordStore.hashCode()); assertEquals(recordStoreSameAttributes.hashCode(), recordStore.hashCode()); assumeDifferentHashCodes(); assertNotEquals(recordStoreOtherStorage.hashCode(), recordStore.hashCode()); assertNotEquals(recordStoreOtherName.hashCode(), recordStore.hashCode()); }
@Override public void processElement(StreamRecord<T> element) throws Exception { writer.write(element.getValue()); }
@TestTemplate public void testTableWithoutSnapshot() throws Exception { try (OneInputStreamOperatorTestHarness<RowData, WriteResult> testHarness = createIcebergStreamWriter()) { assertThat(testHarness.extractOutputValues()).isEmpty(); } // Even if we closed the iceberg stream writer, there's no orphan data file. assertThat(scanDataFiles()).isEmpty(); try (OneInputStreamOperatorTestHarness<RowData, WriteResult> testHarness = createIcebergStreamWriter()) { testHarness.processElement(SimpleDataUtil.createRowData(1, "hello"), 1); // Still not emit the data file yet, because there is no checkpoint. assertThat(testHarness.extractOutputValues()).isEmpty(); } // Once we closed the iceberg stream writer, there will left an orphan data file. assertThat(scanDataFiles()).hasSize(1); }
@Override public UniquenessLevel getIndexUniquenessLevel() { return UniquenessLevel.SCHEMA_LEVEL; }
@Test void assertGetIndexUniquenessLevel() { assertThat(uniquenessLevelProvider.getIndexUniquenessLevel(), is(UniquenessLevel.SCHEMA_LEVEL)); }
public static ReadOnlyHttp2Headers trailers(boolean validateHeaders, AsciiString... otherHeaders) { return new ReadOnlyHttp2Headers(validateHeaders, EMPTY_ASCII_STRINGS, otherHeaders); }
@Test public void nullHeaderNameNotChecked() { ReadOnlyHttp2Headers.trailers(false, null, null); }
@Override public PermissionOverwrite run(final Session<?> session) throws BackgroundException { final UnixPermission feature = session.getFeature(UnixPermission.class); if(log.isDebugEnabled()) { log.debug(String.format("Run with feature %s", feature)); } final List<Permission> permissions = new ArrayList<>(); for(Path next : files) { if(this.isCanceled()) { throw new ConnectionCanceledException(); } permissions.add(feature.getUnixPermission(next)); } final PermissionOverwrite overwrite = new PermissionOverwrite(); final Supplier<Stream<Permission>> supplier = permissions::stream; overwrite.user.read = resolveOverwrite(map(supplier, Permission::getUser, Permission.Action.read)); overwrite.user.write = resolveOverwrite(map(supplier, Permission::getUser, Permission.Action.write)); overwrite.user.execute = resolveOverwrite(map(supplier, Permission::getUser, Permission.Action.execute)); overwrite.group.read = resolveOverwrite(map(supplier, Permission::getGroup, Permission.Action.read)); overwrite.group.write = resolveOverwrite(map(supplier, Permission::getGroup, Permission.Action.write)); overwrite.group.execute = resolveOverwrite(map(supplier, Permission::getGroup, Permission.Action.execute)); overwrite.other.read = resolveOverwrite(map(supplier, Permission::getOther, Permission.Action.read)); overwrite.other.write = resolveOverwrite(map(supplier, Permission::getOther, Permission.Action.write)); overwrite.other.execute = resolveOverwrite(map(supplier, Permission::getOther, Permission.Action.execute)); return overwrite; }
@Test public void testRun() throws Exception { final ReadPermissionWorker worker = new ReadPermissionWorker( Arrays.asList( new Path("/a", EnumSet.of(Path.Type.file), new TestPermissionAttributes(Permission.Action.all, Permission.Action.all, Permission.Action.none)), new Path("/b", EnumSet.of(Path.Type.file), new TestPermissionAttributes(Permission.Action.all, Permission.Action.read_write, Permission.Action.read)))) { @Override public void cleanup(final PermissionOverwrite result) { // } }; PermissionOverwrite overwrite = worker.run(new NullSession(new Host(new TestProtocol())) { @Override @SuppressWarnings("unchecked") public <T> T _getFeature(final Class<T> type) { if(type == UnixPermission.class) { return (T) new UnixPermission() { @Override public void setUnixOwner(final Path file, final String owner) { throw new UnsupportedOperationException(); } @Override public void setUnixGroup(final Path file, final String group) { throw new UnsupportedOperationException(); } @Override public Permission getUnixPermission(final Path file) { return file.attributes().getPermission(); } @Override public void setUnixPermission(final Path file, final TransferStatus status) throws BackgroundException { throw new UnsupportedOperationException(); } }; } return super._getFeature(type); } }); /* +-----+-----+-----+ | rwx | rwx | --- | | rwx | rw- | r-- | +=====+=====+=====+ | rwx | rw? | ?-- | +-----+-----+-----+ */ assertEquals(Boolean.TRUE, overwrite.user.read); assertEquals(Boolean.TRUE, overwrite.user.write); assertEquals(Boolean.TRUE, overwrite.user.execute); assertEquals(Boolean.TRUE, overwrite.group.read); assertEquals(Boolean.TRUE, overwrite.group.write); assertNull(overwrite.group.execute); assertNull(overwrite.other.read); assertEquals(Boolean.FALSE, overwrite.other.write); assertEquals(Boolean.FALSE, overwrite.other.execute); assertEquals("Getting permission of a… (Multiple files) (2)", worker.getActivity()); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) @Override public ClusterInfo get() { return getClusterInfo(); }
@Test public void testClusterSchedulerFifoSlash() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("cluster") .path("scheduler/").accept(MediaType.APPLICATION_JSON) .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); verifyClusterSchedulerFifo(json); }
@Override public void resolve(ConcurrentJobModificationException e) { final List<Job> concurrentUpdatedJobs = e.getConcurrentUpdatedJobs(); final List<ConcurrentJobModificationResolveResult> failedToResolve = concurrentUpdatedJobs .stream() .map(this::resolve) .filter(ConcurrentJobModificationResolveResult::failed) .collect(toList()); if (!failedToResolve.isEmpty()) { throw new UnresolvableConcurrentJobModificationException(failedToResolve, e); } }
@Test void concurrentStateChangeForJobThatIsPerformedOnOtherBackgroundJobServerIsAllowed() { final Job jobInProgress = aJobInProgress().build(); Job localJob = aCopyOf(jobInProgress).withVersion(3).build(); Job storageProviderJob = aCopyOf(jobInProgress).withVersion(6).withFailedState().withScheduledState().withEnqueuedState(now()).withProcessingState().build(); final Thread jobThread = mock(Thread.class); when(storageProvider.getJobById(localJob.getId())).thenReturn(storageProviderJob); lenient().when(jobSteward.getThreadProcessingJob(localJob)).thenReturn(jobThread); concurrentJobModificationResolver.resolve(new ConcurrentJobModificationException(localJob)); verify(jobThread).interrupt(); }
@Override public List<Intent> compile(SinglePointToMultiPointIntent intent, List<Intent> installable) { Set<Link> links = new HashSet<>(); final boolean allowMissingPaths = intentAllowsPartialFailure(intent); boolean hasPaths = false; boolean missingSomePaths = false; for (ConnectPoint egressPoint : intent.egressPoints()) { if (egressPoint.deviceId().equals(intent.ingressPoint().deviceId())) { // Do not need to look for paths, since ingress and egress // devices are the same. if (deviceService.isAvailable(egressPoint.deviceId())) { hasPaths = true; } else { missingSomePaths = true; } continue; } Path path = getPath(intent, intent.ingressPoint().deviceId(), egressPoint.deviceId()); if (path != null) { hasPaths = true; links.addAll(path.links()); } else { missingSomePaths = true; } } // Allocate bandwidth if a bandwidth constraint is set ConnectPoint ingressCP = intent.filteredIngressPoint().connectPoint(); List<ConnectPoint> egressCPs = intent.filteredEgressPoints().stream() .map(fcp -> fcp.connectPoint()) .collect(Collectors.toList()); List<ConnectPoint> pathCPs = links.stream() .flatMap(l -> Stream.of(l.src(), l.dst())) .collect(Collectors.toList()); pathCPs.add(ingressCP); pathCPs.addAll(egressCPs); allocateBandwidth(intent, pathCPs); if (!hasPaths) { throw new IntentException("Cannot find any path between ingress and egress points."); } else if (!allowMissingPaths && missingSomePaths) { throw new IntentException("Missing some paths between ingress and egress points."); } Intent result = LinkCollectionIntent.builder() .appId(intent.appId()) .key(intent.key()) .selector(intent.selector()) .treatment(intent.treatment()) .links(links) .filteredIngressPoints(ImmutableSet.of(intent.filteredIngressPoint())) .filteredEgressPoints(intent.filteredEgressPoints()) .priority(intent.priority()) .applyTreatmentOnEgress(true) .constraints(intent.constraints()) .resourceGroup(intent.resourceGroup()) .build(); return Collections.singletonList(result); }
@Test public void testSameDeviceCompilation() { FilteredConnectPoint ingress = new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_1)); Set<FilteredConnectPoint> egress = Sets.newHashSet(new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_2)), new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_3))); SinglePointToMultiPointIntent intent = makeIntent(ingress, egress); assertThat(intent, is(notNullValue())); final String[] hops = {}; SinglePointToMultiPointIntentCompiler compiler = makeCompiler(hops); assertThat(compiler, is(notNullValue())); List<Intent> result = compiler.compile(intent, null); assertThat(result, is(notNullValue())); assertThat(result, hasSize(1)); Intent resultIntent = result.get(0); assertThat(resultIntent, instanceOf(LinkCollectionIntent.class)); if (resultIntent instanceof LinkCollectionIntent) { LinkCollectionIntent linkIntent = (LinkCollectionIntent) resultIntent; assertThat(linkIntent.links(), hasSize(0)); } assertThat("key is inherited", resultIntent.key(), is(intent.key())); }
@GetMapping("/logs/dataInfluences/field") @PreAuthorize(value = "@apolloAuditLogQueryApiPreAuthorizer.hasQueryPermission()") public List<ApolloAuditLogDataInfluenceDTO> findDataInfluencesByField( @RequestParam String entityName, @RequestParam String entityId, @RequestParam String fieldName, int page, int size) { List<ApolloAuditLogDataInfluenceDTO> dataInfluenceDTOList = api.queryDataInfluencesByField( entityName, entityId, fieldName, page, size); return dataInfluenceDTOList; }
@Test public void testFindDataInfluencesByField() throws Exception { final String entityName = "query-entity-name"; final String entityId = "query-entity-id"; final String fieldName = "query-field-name"; { List<ApolloAuditLogDataInfluenceDTO> mockDataInfluenceDTOList = MockBeanFactory.mockDataInfluenceDTOListByLength( size); mockDataInfluenceDTOList.forEach(e -> { e.setInfluenceEntityName(entityName); e.setInfluenceEntityId(entityId); e.setFieldName(fieldName); }); Mockito.when(api.queryDataInfluencesByField(Mockito.eq(entityName), Mockito.eq(entityId), Mockito.eq(fieldName), Mockito.eq(page), Mockito.eq(size))) .thenReturn(mockDataInfluenceDTOList); } mockMvc.perform(MockMvcRequestBuilders.get("/apollo/audit/logs/dataInfluences/field") .param("entityName", entityName) .param("entityId", entityId) .param("fieldName", fieldName) .param("page", String.valueOf(page)) .param("size", String.valueOf(size))) .andExpect(MockMvcResultMatchers.status().isOk()) .andExpect(MockMvcResultMatchers.jsonPath("$").isArray()) .andExpect(MockMvcResultMatchers.jsonPath("$.length()").value(size)); Mockito.verify(api, Mockito.times(1)) .queryDataInfluencesByField(Mockito.eq(entityName), Mockito.eq(entityId), Mockito.eq(fieldName), Mockito.eq(page), Mockito.eq(size)); }
@Override @SuppressWarnings("checkstyle:magicnumber") public void process(int ordinal, @Nonnull Inbox inbox) { try { switch (ordinal) { case 0: process0(inbox); break; case 1: process1(inbox); break; case 2: process2(inbox); break; case 3: process3(inbox); break; case 4: process4(inbox); break; default: processAny(ordinal, inbox); } } catch (Exception e) { throw sneakyThrow(e); } }
@Test public void when_processInbox1_then_tryProcess1Called() { // When tryProcessP.process(ORDINAL_1, inbox); // Then tryProcessP.validateReceptionOfItem(ORDINAL_1, MOCK_ITEM); }
public static JobId toJobID(String jid) { return TypeConverter.toYarn(JobID.forName(jid)); }
@Test @Timeout(120000) public void testJobIDShort() { assertThrows(IllegalArgumentException.class, () -> { MRApps.toJobID("job_0_0_0"); }); }
public ServiceListResponse checkConvergenceForAllServices(Application application, Duration timeoutPerService) { return checkConvergence(application, timeoutPerService, new HostsToCheck(Set.of())); }
@Test public void service_list_convergence() { { wireMock.stubFor(get(urlEqualTo("/state/v1/config")).willReturn(okJson("{\"config\":{\"generation\":3}}"))); ServiceListResponse response = checker.checkConvergenceForAllServices(application, clientTimeout); assertEquals(3, response.wantedGeneration); assertEquals(3, response.currentGeneration); assertTrue(response.converged); List<ServiceListResponse.Service> services = response.services; assertEquals(1, services.size()); assertService(this.service, services.get(0), 3); } { // Model with two hosts on different generations MockModel model = new MockModel(List.of( MockModel.createContainerHost(service.getHost(), service.getPort()), MockModel.createContainerHost(service2.getHost(), service2.getPort())) ); Application application = new Application(model, new ServerCache(), 4, new Version(0, 0, 0), MetricUpdater.createTestUpdater(), appId); wireMock.stubFor(get(urlEqualTo("/state/v1/config")).willReturn(okJson("{\"config\":{\"generation\":4}}"))); wireMock2.stubFor(get(urlEqualTo("/state/v1/config")).willReturn(okJson("{\"config\":{\"generation\":3}}"))); URI requestUrl = testServer().resolve("/serviceconverge"); ServiceListResponse response = checker.checkConvergenceForAllServices(application, clientTimeout); assertEquals(4, response.wantedGeneration); assertEquals(3, response.currentGeneration); assertFalse(response.converged); List<ServiceListResponse.Service> services = response.services; assertEquals(2, services.size()); assertService(this.service, services.get(0), 4); assertService(this.service2, services.get(1), 3); } }
public static <V> DeferredValue<V> withValue(V value) { if (value == null) { return NULL_VALUE; } DeferredValue<V> deferredValue = new DeferredValue<>(); deferredValue.value = value; deferredValue.valueExists = true; return deferredValue; }
@Test public void testEquals_WithValue() { DeferredValue<String> v1 = withValue(expected); DeferredValue<String> v2 = withValue(expected); assertEquals(v1, v2); }
@Override public void failover(NamedNode master) { connection.sync(RedisCommands.SENTINEL_FAILOVER, master.getName()); }
@Test public void testFailover() throws InterruptedException { Collection<RedisServer> masters = connection.masters(); connection.failover(masters.iterator().next()); Thread.sleep(10000); RedisServer newMaster = connection.masters().iterator().next(); assertThat(masters.iterator().next().getPort()).isNotEqualTo(newMaster.getPort()); }
@Override // Exposes internal mutable reference by design - Spotbugs is right to warn that this is dangerous public synchronized byte[] toByteArray() { // Note: count == buf.length is not a correct criteria to "return buf;", because the internal // buf may be reused after reset(). if (!isFallback && count > 0) { return buf; } else { return super.toByteArray(); } }
@Test public void testWriteSingleArrayFast() throws IOException { writeToBothFast(TEST_DATA); assertStreamContentsEquals(stream, exposedStream); assertSame(TEST_DATA, exposedStream.toByteArray()); }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testPkcs1AesEncryptedRsaWrongPassword() throws Exception { assertThrows(IOException.class, new Executable() { @Override public void execute() throws Throwable { SslContext.toPrivateKey(new File(getClass().getResource("rsa_pkcs1_aes_encrypted.key") .getFile()), "wrong"); } }); }
@Override public boolean isHostnameConfigurable() { return true; }
@Test public void testConfigurable() { assertTrue(new SwiftProtocol().isHostnameConfigurable()); assertTrue(new SwiftProtocol().isPortConfigurable()); }
static void cleanStackTrace(Throwable throwable) { new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet()); }
@Test public void allFramesBelowJUnitStatementCleaned() { Throwable throwable = createThrowableWithStackTrace( "com.google.common.truth.StringSubject", "com.google.example.SomeTest", SomeStatement.class.getName(), "com.google.example.SomeClass"); StackTraceCleaner.cleanStackTrace(throwable); assertThat(throwable.getStackTrace()) .isEqualTo( new StackTraceElement[] { createStackTraceElement("com.google.example.SomeTest"), }); }
@Override public TenantPackageDO validTenantPackage(Long id) { TenantPackageDO tenantPackage = tenantPackageMapper.selectById(id); if (tenantPackage == null) { throw exception(TENANT_PACKAGE_NOT_EXISTS); } if (tenantPackage.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) { throw exception(TENANT_PACKAGE_DISABLE, tenantPackage.getName()); } return tenantPackage; }
@Test public void testValidTenantPackage_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> tenantPackageService.validTenantPackage(id), TENANT_PACKAGE_NOT_EXISTS); }